tauto - vacate cros_host
Expose generic host
BUG=None
TEST=dummy_Pass
Change-Id: I88b4f7037e615e19fe5b2ba49358ec5074136fcc
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/tauto/+/3125896
Tested-by: Derek Beckett <dbeckett@chromium.org>
Reviewed-by: C Shapiro <shapiroc@chromium.org>
diff --git a/server/hosts/__init__.py b/server/hosts/__init__.py
index bab69d1..ffb5ad3 100644
--- a/server/hosts/__init__.py
+++ b/server/hosts/__init__.py
@@ -11,12 +11,6 @@
from autotest_lib.server.hosts.base_classes import Host
from autotest_lib.server.hosts.remote import RemoteHost
-# host implementation classes
-from autotest_lib.server.hosts.ssh_host import SSHHost
-from autotest_lib.server.hosts.cros_host import CrosHost
-from autotest_lib.server.hosts.chameleon_host import ChameleonHost
-from autotest_lib.server.hosts.servo_host import ServoHost
-
# factory function
from autotest_lib.server.hosts.factory import create_host
from autotest_lib.server.hosts.factory import create_target_machine
diff --git a/server/hosts/abstract_ssh.py b/server/hosts/abstract_ssh.py
index dc7c31d..c228a41 100644
--- a/server/hosts/abstract_ssh.py
+++ b/server/hosts/abstract_ssh.py
@@ -22,7 +22,6 @@
from autotest_lib.server.hosts import remote
from autotest_lib.server.hosts import rpc_server_tracker
from autotest_lib.server.hosts import ssh_multiplex
-from autotest_lib.server.hosts.tls_client import exec_dut_command
import six
from six.moves import filter
@@ -36,11 +35,7 @@
type=bool,
default=False)
-ENABLE_EXEC_DUT_COMMAND = get_value('AUTOSERV',
- 'enable_tls',
- type=bool,
- default=False)
-
+ENABLE_EXEC_DUT_COMMAND = False
# Number of seconds to use the cached up status.
_DEFAULT_UP_STATUS_EXPIRATION_SECONDS = 300
_DEFAULT_SSH_PORT = 22
@@ -1082,9 +1077,7 @@
# If the TLS connection is alive, create a new client.
if self.tls_connection is None:
return None
- return exec_dut_command.TLSExecDutCommandClient(
- tlsconnection=self.tls_connection,
- hostname=self.hostname)
+ return None
def clear_known_hosts(self):
"""Clears out the temporary ssh known_hosts file.
diff --git a/server/hosts/afe_store.py b/server/hosts/afe_store.py
deleted file mode 100644
index a01d13f..0000000
--- a/server/hosts/afe_store.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging
-
-import common
diff --git a/server/hosts/base_servohost.py b/server/hosts/base_servohost.py
deleted file mode 100644
index 277477b..0000000
--- a/server/hosts/base_servohost.py
+++ /dev/null
@@ -1,622 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2019 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-#
-# Expects to be run in an environment with sudo and no interactive password
-# prompt, such as within the Chromium OS development chroot.
-
-
-"""This is a base host class for servohost and labstation."""
-
-
-import logging
-import time
-import os
-
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib import autotest_enum
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib import lsbrelease_utils
-from autotest_lib.client.common_lib.cros import dev_server
-from autotest_lib.client.common_lib.cros import kernel_utils
-from autotest_lib.client.cros import constants as client_constants
-from autotest_lib.server import autotest
-from autotest_lib.server import site_utils as server_utils
-from autotest_lib.server.hosts import ssh_host
-
-
-class BaseServoHost(ssh_host.SSHHost):
- """Base host class for a host that manage servo(s).
- E.g. beaglebone, labstation.
- """
- REBOOT_CMD = 'sleep 5; reboot & sleep 10; reboot -f'
-
- TEMP_FILE_DIR = '/var/lib/servod/'
-
- LOCK_FILE_POSTFIX = '_in_use'
- REBOOT_FILE_POSTFIX = '_reboot'
-
- # Time to wait a rebooting servohost, in seconds.
- REBOOT_TIMEOUT = 240
-
- # Timeout value to power cycle a servohost, in seconds.
- BOOT_TIMEOUT = 240
-
- # Constants that reflect current host update state.
- UPDATE_STATE = autotest_enum.AutotestEnum('IDLE', 'RUNNING',
- 'PENDING_REBOOT')
-
- def _initialize(self,
- hostname,
- is_in_lab=None,
- servo_host_ssh_port=None,
- *args,
- **dargs):
- """Construct a BaseServoHost object.
-
- @param is_in_lab: True if the servo host is in Cros Lab. Default is set
- to None, for which utils.host_is_in_lab_zone will be
- called to check if the servo host is in Cros lab.
-
- """
- if servo_host_ssh_port is not None:
- dargs['port'] = int(servo_host_ssh_port)
-
- super(BaseServoHost, self)._initialize(hostname=hostname,
- *args, **dargs)
-
- self._is_containerized_servod = self.hostname.endswith('docker_servod')
-
- self._is_localhost = (self.hostname == 'localhost'
- and servo_host_ssh_port is None)
- if self._is_localhost or self._is_containerized_servod:
- self._is_in_lab = False
- elif is_in_lab is None:
- self._is_in_lab = utils.host_is_in_lab_zone(self.hostname)
- else:
- self._is_in_lab = is_in_lab
-
- # Commands on the servo host must be run by the superuser.
- # Our account on a remote host is root, but if our target is
- # localhost then we might be running unprivileged. If so,
- # `sudo` will have to be added to the commands.
- if self._is_localhost:
- self._sudo_required = utils.system_output('id -u') != '0'
- else:
- self._sudo_required = False
-
- self._is_labstation = None
- self._dut_host_info = None
- self._dut_hostname = None
-
-
- def get_board(self):
- """Determine the board for this servo host. E.g. fizz-labstation
-
- @returns a string representing this labstation's board or None if
- target host is not using a ChromeOS image(e.g. test in chroot).
- """
- output = self.run('cat /etc/lsb-release', ignore_status=True).stdout
- return lsbrelease_utils.get_current_board(lsb_release_content=output)
-
-
- def set_dut_host_info(self, dut_host_info):
- """
- @param dut_host_info: A HostInfo object.
- """
- logging.info('setting dut_host_info field to (%s)', dut_host_info)
- self._dut_host_info = dut_host_info
-
-
- def get_dut_host_info(self):
- """
- @return A HostInfo object.
- """
- return self._dut_host_info
-
-
- def set_dut_hostname(self, dut_hostname):
- """
- @param dut_hostname: hostname of the DUT that connected to this servo.
- """
- logging.info('setting dut_hostname as (%s)', dut_hostname)
- self._dut_hostname = dut_hostname
-
-
- def get_dut_hostname(self):
- """
- @returns hostname of the DUT that connected to this servo.
- """
- return self._dut_hostname
-
-
- def is_labstation(self):
- """Determine if the host is a labstation
-
- @returns True if ths host is a labstation otherwise False.
- """
- if self._is_labstation is None:
- board = self.get_board()
- self._is_labstation = board is not None and 'labstation' in board
-
- return self._is_labstation
-
-
- def _get_lsb_release_content(self):
- """Return the content of lsb-release file of host."""
- return self.run(
- 'cat "%s"' % client_constants.LSB_RELEASE).stdout.strip()
-
-
- def get_release_version(self):
- """Get the value of attribute CHROMEOS_RELEASE_VERSION from lsb-release.
-
- @returns The version string in lsb-release, under attribute
- CHROMEOS_RELEASE_VERSION(e.g. 12900.0.0). None on fail.
- """
- return lsbrelease_utils.get_chromeos_release_version(
- lsb_release_content=self._get_lsb_release_content()
- )
-
-
- def get_full_release_path(self):
- """Get full release path from servohost as string.
-
- @returns full release path as a string
- (e.g. fizz-labstation-release/R82.12900.0.0). None on fail.
- """
- return lsbrelease_utils.get_chromeos_release_builder_path(
- lsb_release_content=self._get_lsb_release_content()
- )
-
-
- def _check_update_status(self):
- """ Check servohost's current update state.
-
- @returns: one of below state of from self.UPDATE_STATE
- IDLE -- if the target host is not currently updating and not
- pending on a reboot.
- RUNNING -- if there is another updating process that running on
- target host(note: we don't expect to hit this scenario).
- PENDING_REBOOT -- if the target host had an update and pending
- on reboot.
- """
- result = self.run('pgrep -f quick-provision | grep -v $$',
- ignore_status=True)
- # We don't expect any output unless there are another quick
- # provision process is running.
- if result.exit_status == 0:
- return self.UPDATE_STATE.RUNNING
-
- # Determine if we have an update that pending on reboot by check if
- # the current inactive kernel has priority for the next boot.
- try:
- inactive_kernel = kernel_utils.get_kernel_state(self)[1]
- next_kernel = kernel_utils.get_next_kernel(self)
- if inactive_kernel == next_kernel:
- return self.UPDATE_STATE.PENDING_REBOOT
- except Exception as e:
- logging.error('Unexpected error while checking kernel info; %s', e)
- return self.UPDATE_STATE.IDLE
-
-
- def is_in_lab(self):
- """Check whether the servo host is a lab device.
-
- @returns: True if the servo host is in Cros Lab, otherwise False.
-
- """
- return self._is_in_lab
-
-
- def is_localhost(self):
- """Checks whether the servo host points to localhost.
-
- @returns: True if it points to localhost, otherwise False.
-
- """
- return self._is_localhost
-
-
- def is_containerized_servod(self):
- """Checks whether the servo host is a containerized servod.
-
- @returns: True if using containerized servod, otherwise False.
-
- """
- return self._is_containerized_servod
-
- def is_cros_host(self):
- """Check if a servo host is running chromeos.
-
- @return: True if the servo host is running chromeos.
- False if it isn't, or we don't have enough information.
- """
- try:
- result = self.run('grep -q CHROMEOS /etc/lsb-release',
- ignore_status=True, timeout=10)
- except (error.AutoservRunError, error.AutoservSSHTimeout):
- return False
- return result.exit_status == 0
-
-
- def prepare_for_update(self):
- """Prepares the DUT for an update.
- Subclasses may override this to perform any special actions
- required before updating.
- """
- pass
-
-
- def reboot(self, *args, **dargs):
- """Reboot using special servo host reboot command."""
- super(BaseServoHost, self).reboot(reboot_cmd=self.REBOOT_CMD,
- *args, **dargs)
-
-
- def update_image(self, stable_version=None):
- """Update the image on the servo host, if needed.
-
- This method recognizes the following cases:
- * If the Host is not running Chrome OS, do nothing.
- * If a previously triggered update is now complete, reboot
- to the new version.
- * If the host is processing an update do nothing.
- * If the host has an update that pending on reboot, do nothing.
- * If the host is running a version of Chrome OS different
- from the default for servo Hosts, start an update.
-
- @stable_version the target build number.(e.g. R82-12900.0.0)
-
- @raises dev_server.DevServerException: If all the devservers are down.
- @raises site_utils.ParseBuildNameException: If the devserver returns
- an invalid build name.
- """
- # servod could be running in a Ubuntu workstation.
- if not self.is_cros_host():
- logging.info('Not attempting an update, either %s is not running '
- 'chromeos or we cannot find enough information about '
- 'the host.', self.hostname)
- return
-
- if lsbrelease_utils.is_moblab():
- logging.info('Not attempting an update, %s is running moblab.',
- self.hostname)
- return
-
- if not stable_version:
- logging.debug("BaseServoHost::update_image attempting to get"
- " servo cros stable version")
- try:
- stable_version = (self.get_dut_host_info().
- servo_cros_stable_version)
- except AttributeError:
- logging.error("BaseServoHost::update_image failed to get"
- " servo cros stable version.")
-
- target_build = "%s-release/%s" % (self.get_board(), stable_version)
- target_build_number = server_utils.ParseBuildName(
- target_build)[3]
- current_build_number = self.get_release_version()
-
- if current_build_number == target_build_number:
- logging.info('servo host %s does not require an update.',
- self.hostname)
- return
-
- status = self._check_update_status()
- if status == self.UPDATE_STATE.RUNNING:
- logging.info('servo host %s already processing an update',
- self.hostname)
- return
- if status == self.UPDATE_STATE.PENDING_REBOOT:
- # Labstation reboot is handled separately here as it require
- # synchronized reboot among all managed DUTs. For servo_v3, we'll
- # reboot when initialize Servohost, if there is a update pending.
- logging.info('An update has been completed and pending reboot.')
- return
-
- ds = dev_server.ImageServer.resolve(self.hostname,
- hostname=self.hostname)
- url = ds.get_update_url(target_build)
- # TODO dbeckett@, strip this out in favor of services.
- # cros_provisioner = provisioner.ChromiumOSProvisioner(update_url=url,
- # host=self,
- # is_servohost=True)
- # logging.info('Using devserver url: %s to trigger update on '
- # 'servo host %s, from %s to %s', url, self.hostname,
- # current_build_number, target_build_number)
- # cros_provisioner.run_provision()
-
-
- def has_power(self):
- """Return whether or not the servo host is powered by PoE or RPM."""
- # TODO(fdeng): See crbug.com/302791
- # For now, assume all servo hosts in the lab have power.
- return self.is_in_lab()
-
-
- def _post_update_reboot(self):
- """ Reboot servohost after an quick provision.
-
- We need to do some specifal cleanup before and after reboot
- when there is an update pending.
- """
- # Regarding the 'crossystem' command below: In some cases,
- # the update flow puts the TPM into a state such that it
- # fails verification. We don't know why. However, this
- # call papers over the problem by clearing the TPM during
- # the reboot.
- #
- # We ignore failures from 'crossystem'. Although failure
- # here is unexpected, and could signal a bug, the point of
- # the exercise is to paper over problems; allowing this to
- # fail would defeat the purpose.
-
- # Preserve critical files before reboot since post-provision
- # clobbering will wipe the stateful partition.
- # TODO(xianuowang@) Remove this logic once we have updated to
- # a image with https://crrev.com/c/2485908.
- path_to_preserve = [
- '/var/lib/servod',
- '/var/lib/device_health_profile',
- ]
- safe_location = '/mnt/stateful_partition/unencrypted/preserve/'
- for item in path_to_preserve:
- dest = os.path.join(safe_location, item.split('/')[-1])
- self.run('rm -rf %s' % dest, ignore_status=True)
- self.run('mv %s %s' % (item, safe_location), ignore_status=True)
-
- self.run('crossystem clear_tpm_owner_request=1', ignore_status=True)
- self._servo_host_reboot()
- logging.debug('Cleaning up autotest directories if exist.')
- try:
- installed_autodir = autotest.Autotest.get_installed_autodir(self)
- self.run('rm -rf ' + installed_autodir)
- except autotest.AutodirNotFoundError:
- logging.debug('No autotest installed directory found.')
-
- # Recover preserved files to original location.
- # TODO(xianuowang@) Remove this logic once we have updated to
- # a image with https://crrev.com/c/2485908.
- for item in path_to_preserve:
- src = os.path.join(safe_location, item.split('/')[-1])
- dest = '/'.join(item.split('/')[:-1])
- self.run('mv %s %s' % (src, dest), ignore_status=True)
-
- def power_cycle(self):
- """Cycle power to this host via PoE(servo v3) or RPM(labstation)
- if it is a lab device.
-
- @raises AutoservRepairError if it fails to power cycle the
- servo host.
-
- """
- if self.has_power():
- # TODO b/195443964: Re-wire as needed once TLW is available.
- logging.warning("Need TLW rpm_controller wiring.")
-
- def _servo_host_reboot(self):
- """Reboot this servo host because a reboot is requested."""
- try:
- # TODO(otabek) remove if found the fix for b/174514811
- # The default factory firmware remember the latest chromeboxes
- # status after power off. If box was in sleep mode before the
- # break, the box will stay at sleep mode after power on.
- # Disable power manager has make chromebox to boot always when
- # we deliver the power to the device.
- logging.info('Stoping powerd service on device')
- self.run('stop powerd', ignore_status=True, timeout=30)
- except Exception as e:
- logging.debug('(Not critical) Fail to stop powerd; %s', e)
-
- logging.info('Rebooting servo host %s from build %s', self.hostname,
- self.get_release_version())
- # Tell the reboot() call not to wait for completion.
- # Otherwise, the call will log reboot failure if servo does
- # not come back. The logged reboot failure will lead to
- # test job failure. If the test does not require servo, we
- # don't want servo failure to fail the test with error:
- # `Host did not return from reboot` in status.log.
- self.reboot(fastsync=True, wait=False)
-
- # We told the reboot() call not to wait, but we need to wait
- # for the reboot before we continue. Alas. The code from
- # here below is basically a copy of Host.wait_for_restart(),
- # with the logging bits ripped out, so that they can't cause
- # the failure logging problem described above.
- #
- # The stain that this has left on my soul can never be
- # erased.
- old_boot_id = self.get_boot_id()
- if not self.wait_down(timeout=self.WAIT_DOWN_REBOOT_TIMEOUT,
- warning_timer=self.WAIT_DOWN_REBOOT_WARNING,
- old_boot_id=old_boot_id):
- raise error.AutoservHostError(
- 'servo host %s failed to shut down.' %
- self.hostname)
- if self.wait_up(timeout=self.REBOOT_TIMEOUT):
- logging.info('servo host %s back from reboot, with build %s',
- self.hostname, self.get_release_version())
- else:
- raise error.AutoservHostError(
- 'servo host %s failed to come back from reboot.' %
- self.hostname)
-
-
- def make_ssh_command(self, user='root', port=22, opts='', hosts_file=None,
- connect_timeout=None, alive_interval=None, alive_count_max=None,
- connection_attempts=None):
- """Override default make_ssh_command to use tuned options.
-
- Tuning changes:
- - ConnectTimeout=30; maximum of 30 seconds allowed for an SSH
- connection failure. Consistency with remote_access.py.
-
- - ServerAliveInterval=180; which causes SSH to ping connection every
- 180 seconds. In conjunction with ServerAliveCountMax ensures
- that if the connection dies, Autotest will bail out quickly.
-
- - ServerAliveCountMax=3; consistency with remote_access.py.
-
- - ConnectAttempts=4; reduce flakiness in connection errors;
- consistency with remote_access.py.
-
- - UserKnownHostsFile=/dev/null; we don't care about the keys.
-
- - SSH protocol forced to 2; needed for ServerAliveInterval.
-
- @param user User name to use for the ssh connection.
- @param port Port on the target host to use for ssh connection.
- @param opts Additional options to the ssh command.
- @param hosts_file Ignored.
- @param connect_timeout Ignored.
- @param alive_interval Ignored.
- @param alive_count_max Ignored.
- @param connection_attempts Ignored.
-
- @returns: An ssh command with the requested settings.
-
- """
- options = ' '.join([opts, '-o Protocol=2'])
- return super(BaseServoHost, self).make_ssh_command(
- user=user, port=port, opts=options, hosts_file='/dev/null',
- connect_timeout=30, alive_interval=180, alive_count_max=3,
- connection_attempts=4)
-
-
- def _make_scp_cmd(self, sources, dest):
- """Format scp command.
-
- Given a list of source paths and a destination path, produces the
- appropriate scp command for encoding it. Remote paths must be
- pre-encoded. Overrides _make_scp_cmd in AbstractSSHHost
- to allow additional ssh options.
-
- @param sources: A list of source paths to copy from.
- @param dest: Destination path to copy to.
-
- @returns: An scp command that copies |sources| on local machine to
- |dest| on the remote servo host.
-
- """
- command = ('scp -rq %s -o BatchMode=yes -o StrictHostKeyChecking=no '
- '-o UserKnownHostsFile=/dev/null -P %d %s "%s"')
- return command % (self._main_ssh.ssh_option,
- self.port, sources, dest)
-
-
- def run(self, command, timeout=3600, ignore_status=False,
- stdout_tee=utils.TEE_TO_LOGS, stderr_tee=utils.TEE_TO_LOGS,
- connect_timeout=30, ssh_failure_retry_ok=False,
- options='', stdin=None, verbose=True, args=()):
- """Run a command on the servo host.
-
- Extends method `run` in SSHHost. If the servo host is a remote device,
- it will call `run` in SSHost without changing anything.
- If the servo host is 'localhost', it will call utils.system_output.
-
- @param command: The command line string.
- @param timeout: Time limit in seconds before attempting to
- kill the running process. The run() function
- will take a few seconds longer than 'timeout'
- to complete if it has to kill the process.
- @param ignore_status: Do not raise an exception, no matter
- what the exit code of the command is.
- @param stdout_tee/stderr_tee: Where to tee the stdout/stderr.
- @param connect_timeout: SSH connection timeout (in seconds)
- Ignored if host is 'localhost'.
- @param options: String with additional ssh command options
- Ignored if host is 'localhost'.
- @param ssh_failure_retry_ok: when True and ssh connection failure is
- suspected, OK to retry command (but not
- compulsory, and likely not needed here)
- @param stdin: Stdin to pass (a string) to the executed command.
- @param verbose: Log the commands.
- @param args: Sequence of strings to pass as arguments to command by
- quoting them in " and escaping their contents if necessary.
-
- @returns: A utils.CmdResult object.
-
- @raises AutoservRunError if the command failed.
- @raises AutoservSSHTimeout SSH connection has timed out. Only applies
- when servo host is not 'localhost'.
-
- """
- run_args = {
- 'command' : command,
- 'timeout' : timeout,
- 'ignore_status' : ignore_status,
- 'stdout_tee' : stdout_tee,
- 'stderr_tee' : stderr_tee,
- # connect_timeout n/a for localhost
- # options n/a for localhost
- # ssh_failure_retry_ok n/a for localhost
- 'stdin' : stdin,
- 'verbose' : verbose,
- 'args' : args,
- }
- if self.is_localhost():
- if self._sudo_required:
- run_args['command'] = 'sudo -n sh -c "%s"' % utils.sh_escape(
- command)
- try:
- return utils.run(**run_args)
- except error.CmdError as e:
- logging.error(e)
- raise error.AutoservRunError('command execution error',
- e.result_obj)
- else:
- run_args['connect_timeout'] = connect_timeout
- run_args['options'] = options
- run_args['ssh_failure_retry_ok'] = ssh_failure_retry_ok
- return super(BaseServoHost, self).run(**run_args)
-
- def _mount_drive(self, src_path, dst_path):
- """Mount an external drive on servohost.
-
- @param: src_path the drive path to mount(e.g. /dev/sda3).
- @param: dst_path the destination directory on servohost to mount
- the drive.
-
- @returns: True if mount success otherwise False.
- """
- # Make sure the dst dir exists.
- self.run('mkdir -p %s' % dst_path)
-
- result = self.run('mount -o ro %s %s' % (src_path, dst_path),
- ignore_status=True)
- return result.exit_status == 0
-
- def _unmount_drive(self, mount_path):
- """Unmount a drive from servohost.
-
- @param: mount_path the path on servohost to unmount.
-
- @returns: True if unmount success otherwise False.
- """
- result = self.run('umount %s' % mount_path, ignore_status=True)
- return result.exit_status == 0
-
- def wait_ready(self, required_uptime=300):
- """Wait ready for a servohost if it has been rebooted recently.
-
- It may take a few minutes until all servos and their componments
- re-enumerated and become ready after a servohost(especially labstation
- as it supports multiple servos) reboot, so we need to make sure the
- servohost has been up for a given a mount of time before trying to
- start any actions.
-
- @param required_uptime: Minimum uptime in seconds that we can
- consdier a servohost be ready.
- """
- uptime = float(self.check_uptime())
- # To prevent unexpected output from check_uptime() that causes long
- # sleep, make sure the maximum wait time <= required_uptime.
- diff = min(required_uptime - uptime, required_uptime)
- if diff > 0:
- logging.info(
- 'The servohost was just rebooted, wait %s'
- ' seconds for it to become ready', diff)
- time.sleep(diff)
diff --git a/server/hosts/chameleon_host.py b/server/hosts/chameleon_host.py
deleted file mode 100644
index a0c849e..0000000
--- a/server/hosts/chameleon_host.py
+++ /dev/null
@@ -1,271 +0,0 @@
-# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-#
-
-"""This file provides core logic for connecting a Chameleon Daemon."""
-
-import logging
-
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib import global_config
-from autotest_lib.client.cros.chameleon import chameleon
-from autotest_lib.server.cros import dnsname_mangler
-from autotest_lib.server.hosts import ssh_host
-
-
-# Names of the host attributes in the database that represent the values for
-# the chameleon_host and chameleon_port for a servo connected to the DUT.
-CHAMELEON_HOST_ATTR = 'chameleon_host'
-CHAMELEON_PORT_ATTR = 'chameleon_port'
-
-_CONFIG = global_config.global_config
-ENABLE_SSH_TUNNEL_FOR_CHAMELEON = _CONFIG.get_config_value(
- 'CROS', 'enable_ssh_tunnel_for_chameleon', type=bool, default=False)
-
-class ChameleonHostError(Exception):
- """Error in ChameleonHost."""
- pass
-
-
-class ChameleonHost(ssh_host.SSHHost):
- """Host class for a host that controls a Chameleon."""
-
- # Chameleond process name.
- CHAMELEOND_PROCESS = 'chameleond'
-
-
- # TODO(waihong): Add verify and repair logic which are required while
- # deploying to Cros Lab.
-
-
- def _initialize(self, chameleon_host='localhost', chameleon_port=9992,
- *args, **dargs):
- """Initialize a ChameleonHost instance.
-
- A ChameleonHost instance represents a host that controls a Chameleon.
-
- @param chameleon_host: Name of the host where the chameleond process
- is running.
- If this is passed in by IP address, it will be
- treated as not in lab.
- @param chameleon_port: Port the chameleond process is listening on.
-
- """
- super(ChameleonHost, self)._initialize(hostname=chameleon_host,
- *args, **dargs)
-
- self._is_in_lab = None
- self._check_if_is_in_lab()
-
- self._chameleon_port = chameleon_port
- self._local_port = None
- self._tunneling_process = None
-
- try:
- if self._is_in_lab and not ENABLE_SSH_TUNNEL_FOR_CHAMELEON:
- self._chameleon_connection = chameleon.ChameleonConnection(
- self.hostname, chameleon_port)
- else:
- # A proxy generator is passed as an argument so that a proxy
- # could be re-created on demand in ChameleonConnection
- # whenever needed, e.g., after a reboot.
- proxy_generator = (
- lambda: self.rpc_server_tracker.xmlrpc_connect(
- None, chameleon_port,
- ready_test_name=chameleon.CHAMELEON_READY_TEST,
- timeout_seconds=60))
- self._chameleon_connection = chameleon.ChameleonConnection(
- None, proxy_generator=proxy_generator)
-
- except Exception as e:
- raise ChameleonHostError('Can not connect to Chameleon: %s(%s)',
- e.__class__, e)
-
-
- def _check_if_is_in_lab(self):
- """Checks if Chameleon host is in lab and set self._is_in_lab.
-
- If self.hostname is an IP address, we treat it as is not in lab zone.
-
- """
- self._is_in_lab = (False if dnsname_mangler.is_ip_address(self.hostname)
- else utils.host_is_in_lab_zone(self.hostname))
-
-
- def is_in_lab(self):
- """Check whether the chameleon host is a lab device.
-
- @returns: True if the chameleon host is in Cros Lab, otherwise False.
-
- """
- return self._is_in_lab
-
-
- def get_wait_up_processes(self):
- """Get the list of local processes to wait for in wait_up.
-
- Override get_wait_up_processes in
- autotest_lib.client.common_lib.hosts.base_classes.Host.
- Wait for chameleond process to go up. Called by base class when
- rebooting the device.
-
- """
- processes = [self.CHAMELEOND_PROCESS]
- return processes
-
-
- def create_chameleon_board(self):
- """Create a ChameleonBoard object with error recovery.
-
- This function will reboot the chameleon board once and retry if we can't
- create chameleon board.
-
- @return A ChameleonBoard object.
- """
- # TODO(waihong): Add verify and repair logic which are required while
- # deploying to Cros Lab.
- chameleon_board = None
- try:
- chameleon_board = chameleon.ChameleonBoard(
- self._chameleon_connection, self)
- return chameleon_board
- except:
- self.reboot()
- chameleon_board = chameleon.ChameleonBoard(
- self._chameleon_connection, self)
- return chameleon_board
-
-
-def create_chameleon_host(dut, chameleon_args):
- """Create a ChameleonHost object.
-
- There three possible cases:
- 1) If the DUT is in Cros Lab and has a chameleon board, then create
- a ChameleonHost object pointing to the board. chameleon_args
- is ignored.
- 2) If not case 1) and chameleon_args is neither None nor empty, then
- create a ChameleonHost object using chameleon_args.
- 3) If neither case 1) or 2) applies, return None.
-
- @param dut: host name of the host that chameleon connects. It can be used
- to lookup the chameleon in test lab using naming convention.
- If dut is an IP address, it can not be used to lookup the
- chameleon in test lab.
- @param chameleon_args: A dictionary that contains args for creating
- a ChameleonHost object,
- e.g. {'chameleon_host': '172.11.11.112',
- 'chameleon_port': 9992}.
-
- @returns: A ChameleonHost object or None.
-
- """
- if not utils.is_in_container():
- is_moblab = utils.is_moblab()
- else:
- is_moblab = _CONFIG.get_config_value(
- 'SSP', 'is_moblab', type=bool, default=False)
-
- dut_is_hostname = not dnsname_mangler.is_ip_address(dut)
- if dut_is_hostname:
- chameleon_hostname = chameleon.make_chameleon_hostname(dut)
- if utils.host_is_in_lab_zone(chameleon_hostname):
- # Be more tolerant on chameleon in the lab because
- # we don't want dead chameleon blocks non-chameleon tests.
- if utils.ping(chameleon_hostname, deadline=3):
- logging.warning(
- 'Chameleon %s is not accessible. Please file a bug'
- ' to test lab', chameleon_hostname)
- return None
- return ChameleonHost(chameleon_host=chameleon_hostname)
- if chameleon_args:
- return ChameleonHost(**chameleon_args)
- else:
- return None
-
-
-def create_btpeer_host(dut, btpeer_args_list):
- """Create a ChameleonHost object for a Bluetooth peer
-
- This is similar to create_chameleon_host but unlike chameleon board
- there can be multiple btpeers with a single DUT
-
- There four possible cases:
- 1) If the DUT is in Cros Lab then assume that it can have up to 4 bluetooth
- peers. Ping the url and create a Chameleon host for each Bluetooth peer
- present. btpeer_args_list is ignored.
- 2) If not case 1) and btpeer_args_list is not empty, then
- create a BtpeerHost object for each host specified in btpeer_args_list.
- 3) If neither case 1) or 2) applies, return None.
- 4) This DUT is controlled by moblab. This case is not implemented.
-
-
- @param dut: host name of the host that btpeer connects. It can be used
- to lookup the btpeer in test lab using naming convention.
- If dut is an IP address, it can not be used to lookup the
- btpeer in test lab. Naming convention in the lab is
- <hostname>-btpeer[1-4]
- @param btpeer_args_list: A list of dictionaries that contains args for
- creating a BtpeerHost object,
- e.g. {'btpeer_host': '172.11.11.112',
- 'btpeer_port': 9992}.
-
- @returns: A list of BtpeerHost objects
-
- """
- def _convert_btpeer_args(args):
- """Convert btpeer args to format accepted by ChameleonHost."""
- ret_args = {}
- if 'btpeer_host' in args:
- ret_args['chameleon_host'] = args['btpeer_host']
- if 'btpeer_port' in args:
- ret_args['chameleon_port'] = args['btpeer_port']
- if 'btpeer_ssh_port' in args:
- ret_args['port'] = int(args['btpeer_ssh_port'])
- return ret_args
-
- if not utils.is_in_container():
- is_moblab = utils.is_moblab()
- else:
- is_moblab = _CONFIG.get_config_value(
- 'SSP', 'is_moblab', type=bool, default=False)
-
- btpeer_hosts = []
-
- if not is_moblab:
- if (not dnsname_mangler.is_ip_address(dut) and
- utils.host_is_in_lab_zone(dut)):
- # This is a device in the lab. Ignore any arguments passed and
- # derive peer hostnames from the DUT hostname
- btpeer_hostnames = chameleon.make_btpeer_hostnames(dut)
- for btpeer_hostname in btpeer_hostnames:
- # Not all test bed have 4 Bluetooth peers
- if utils.ping(btpeer_hostname, deadline=3):
- logging.warning('Btpeer %s is not accessible. This maybe '
- 'expected or it maybe an issue with the '
- 'Bluetooth peer. Please Check the test bed.'
- , btpeer_hostname)
- continue
- else:
- logging.debug("Creating btpeer from %s",btpeer_hostname)
- btpeer_hosts.append(
- ChameleonHost(chameleon_host=btpeer_hostname))
- return btpeer_hosts
- else:
- # IP address given or DNS address is not in lab.
- # Create the Bluetooth peers from the arguments passed
- return [ ChameleonHost(**_convert_btpeer_args(btpeer_args))
- for btpeer_args in btpeer_args_list]
- else:
- # TODO(b:149606762)
- # moblab still create Bluetooth peer from chameleon_args
- afe = frontend_wrappers.RetryingAFE(timeout_min=5, delay_sec=10)
- hosts = afe.get_hosts(hostname=dut)
- if hosts and CHAMELEON_HOST_ATTR in hosts[0].attributes:
- return [ChameleonHost(
- chameleon_host=hosts[0].attributes[CHAMELEON_HOST_ATTR],
- chameleon_port=hosts[0].attributes.get(
- CHAMELEON_PORT_ATTR, 9992)
- )]
- else:
- return []
diff --git a/server/hosts/cros_constants.py b/server/hosts/cros_constants.py
deleted file mode 100644
index 0815d91..0000000
--- a/server/hosts/cros_constants.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (c) 2020 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Device is not fixable due issues with hardware and has to be replaced
-DEVICE_STATE_NEEDS_REPLACEMENT = 'needs_replacement'
-# Device required manual attention to be fixed
-DEVICE_STATE_NEEDS_MANUAL_REPAIR = 'needs_manual_repair'
-# Device required re-deployment to be fixed
-DEVICE_STATE_NEEDS_DEPLOY = 'needs_deploy'
-
-# Timeout for verifiers.
-SHORT_VERIFY_TIMEOUT_SEC = 60
-VERIFY_TIMEOUT_SEC = 60 * 5
-LONG_VERIFY_TIMEOUT_SEC = 60 * 20
-
-# Timeout for repair actions.
-SHORT_REPAIR_TIMEOUT_SEC = 60
-REPAIR_TIMEOUT_SEC = 60 * 10
-LONG_REPAIR_TIMEOUT_SEC = 60 * 30
-
-# Minimum battery level for the good DUT.
-MIN_BATTERY_LEVEL = 80.0
diff --git a/server/hosts/cros_firmware.py b/server/hosts/cros_firmware.py
deleted file mode 100644
index 3e0c04f..0000000
--- a/server/hosts/cros_firmware.py
+++ /dev/null
@@ -1,420 +0,0 @@
-# Lint as: python2, python3
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Repair actions and verifiers relating to CrOS firmware.
-
-This contains the repair actions and verifiers need to find problems
-with the firmware installed on Chrome OS DUTs, and when necessary, to
-fix problems by updating or re-installing the firmware.
-
-The operations in the module support two distinct use cases:
- * DUTs used for FAFT tests can in some cases have problems with
- corrupted firmware. The module supplies `FirmwareStatusVerifier`
- to check for corruption, and supplies `FaftFirmwareRepair` to
- re-install firmware of current faft stable_version via servo
- when needed.
- * DUTs used for general testing normally should be running a
- designated "stable" firmware version. This module supplies
- `FirmwareVersionVerifier` to detect and automatically update
- firmware that is out-of-date from the designated version. This model
- also supplys `GeneralFirmwareRepair` to re-install firmware that
- tied with current stable_version image via servo when needed.
-
-For purposes of the operations in the module, we distinguish three kinds
-of DUT, based on pool assignments:
- * DUTs used for general testing. These DUTs automatically check for
- and install the stable firmware using `FirmwareVersionVerifier`.
- * DUTs in pools used for FAFT testing. These check for bad firmware
- builds with `FirmwareStatusVerifier`, and will fix problems using
- `FirmwareRepair`. These DUTs don't check for or install the
- stable firmware.
- * DUTs not in general pools, and not used for FAFT. These DUTs
- are expected to be managed by separate processes and are excluded
- from all of the verification and repair code in this module.
-"""
-
-# pylint: disable=missing-docstring
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import json
-import logging
-
-import common
-from autotest_lib.client.common_lib import global_config
-from autotest_lib.client.common_lib import hosts
-from autotest_lib.server import afe_utils
-from autotest_lib.server import tauto_warnings
-from autotest_lib.server.hosts import cros_constants
-
-from autotest_lib.utils.frozen_chromite.lib import timeout_util
-import six
-
-
-# _FIRMWARE_REPAIR_POOLS - The set of pools that should be
-# managed by `FirmwareStatusVerifier` and `FirmwareRepair`.
-#
-_FIRMWARE_REPAIR_POOLS = set(
- global_config.global_config.get_config_value(
- 'CROS',
- 'pools_support_firmware_repair',
- type=str).split(','))
-
-
-def _is_firmware_testing_device(host):
- """
- check if a host is dedicated for firmware testing.
-
- When this function returns true, the DUT should be managed by
- `FirmwareStatusVerifier` and `FaftFirmwareRepair`, but not
- `FirmwareVersionVerifier` and `GeneralFirmwareRepair.
-
- @return A true value if the host should use `FirmwareStatusVerifier`
- and `FaftFirmwareRepair`; a false value otherwise.
- """
- info = host.host_info_store.get()
- return bool(info.pools & _FIRMWARE_REPAIR_POOLS)
-
-
-def _is_firmware_update_supported(host):
- """
- Return whether a DUT should be running the standard firmware.
-
- In the test lab, DUTs used for general testing, (e.g. the `bvt`
- pool) need their firmware kept up-to-date with
- `FirmwareVersionVerifier`. However, some pools have alternative
- policies for firmware management. This returns whether a given DUT
- should be updated via the standard stable version update, or
- managed by some other procedure.
-
- @param host The host to be checked for update policy.
- @return A true value if the host should use
- `FirmwareVersionVerifier`; a false value otherwise.
- """
- return not _is_firmware_testing_device(host)
-
-
-def _get_available_firmware(host, model):
- """Get the available RW firmware version given the model.
-
- @param host The host to get available firmware for.
- @param model The model name to get corresponding firmware version.
- @return The available RW firmware version if found, else, None.
- """
- result = host.run('chromeos-firmwareupdate --manifest', ignore_status=True)
-
- if result.exit_status != 0:
- return None
-
- # The manifest is a JSON in .model.host.versions.rw
- data = json.loads(result.stdout) or {}
- key = model if len(data) > 1 else next(six.iterkeys(data), '')
- key += '.host.versions.rw'
- for k in key.split('.'):
- data = data.get(k, {})
- return data or None
-
-
-class FirmwareStatusVerifier(hosts.Verifier):
- """
- Verify that a host's firmware is in a good state.
-
- For DUTs that run firmware tests, it's possible that the firmware
- on the DUT can get corrupted. This verifier checks whether it
- appears that firmware should be re-flashed using servo.
- """
-
- @timeout_util.TimeoutDecorator(cros_constants.VERIFY_TIMEOUT_SEC)
- def verify(self, host):
- if not _is_firmware_testing_device(host):
- return
- try:
- # Read the AP firmware and dump the sections that we're
- # interested in.
- cmd = ('mkdir /tmp/verify_firmware; '
- 'cd /tmp/verify_firmware; '
- 'for section in VBLOCK_A VBLOCK_B FW_MAIN_A FW_MAIN_B; '
- 'do flashrom -p host -r -i $section:$section; '
- 'done')
- host.run(cmd)
-
- # Verify the firmware blocks A and B.
- cmd = ('vbutil_firmware --verify /tmp/verify_firmware/VBLOCK_%c'
- ' --signpubkey /usr/share/vboot/devkeys/root_key.vbpubk'
- ' --fv /tmp/verify_firmware/FW_MAIN_%c')
- for c in ('A', 'B'):
- rv = host.run(cmd % (c, c), ignore_status=True)
- if rv.exit_status:
- raise hosts.AutoservVerifyError(
- 'Firmware %c is in a bad state.' % c)
- finally:
- # Remove the temporary files.
- host.run('rm -rf /tmp/verify_firmware')
-
- @property
- def description(self):
- return 'Firmware on this DUT is clean'
-
-
-class FirmwareRepair(hosts.RepairAction):
- """
- Reinstall the firmware image using servo.
-
- This repair function attempts to use servo to install the DUT's
- designated "stable firmware version".
-
- This repair method only applies to DUTs used for FAFT.
- """
-
- def _get_faft_stable_build(self, host):
- info = host.host_info_store.get()
- return afe_utils.get_stable_faft_version_v2(info)
-
- def _get_os_stable_build(self, host):
- # Use firmware in current stable os build.
- return host.get_cros_repair_image_name()
-
- def _run_faft_repair(self, host, build):
- host.firmware_install(build)
-
- def _run_general_repair(self, host, build):
- # As GeneralFirmwareRepair is the last repair action, we expect
- # stable_version os image is loaded on usbkey during other repair
- # action runs. And there is also no point to repeat and waste time if
- # download image to usbkey failed in other repair actions.
- if host._servo_host.validate_image_usbkey() != build:
- raise hosts.AutoservRepairError('%s is expected to be preloaded,'
- 'however it\'s not found on the usbkey' % build,
- 'image not loaded on usbkey')
- ec_image, bios_image = host._servo_host.prepare_repair_firmware_image()
-
- # For EVT device with signed variant exists we skip this repair
- # as it's hard to decide which image to use if DUT do not boot.
- info = host.host_info_store.get()
- phase = info.get_label_value('phase')
- if 'signed' in bios_image and phase.lower() in ('evt', 'dvt', ''):
- raise hosts.AutoservRepairError(
- 'Could not determine which firmware image to use'
- ' due to signed firmware image variant exists but'
- ' DUT phase is earlier than PVT or missing; Phase'
- ' from inventory: %s' % phase,
- 'Can not determine variant for EVT device')
-
- # Before flash firmware we want update the build into health profile.
- if host.health_profile:
- host.health_profile.set_firmware_stable_version(build)
-
- if ec_image:
- logging.info('Attempting to flash ec firmware...')
- host.servo.program_ec(ec_image, copy_image=False)
- if bios_image:
- logging.info('Attempting to flash bios firmware...')
- host._servo_host.flash_ap_firmware_via_servo(bios_image)
-
- logging.info('Cold resetting DUT through servo...')
- host.servo.get_power_state_controller().reset()
- host.wait_up(timeout=host.BOOT_TIMEOUT)
- # flash firmware via servo will turn DUT into dev mode, so disable
- # dev mode and reset gbb flag here.
- host.run('/usr/share/vboot/bin/set_gbb_flags.sh 0', ignore_status=True)
- host.run('crossystem disable_dev_request=1', ignore_status=True)
- host.reboot()
-
-
-class FaftFirmwareRepair(FirmwareRepair):
- """
- Reinstall the firmware for DUTs in faft related pool.
- """
-
- def repair(self, host):
- tauto_warnings.lab_services_warn_and_error("Require servo", err=False)
- build = self._get_faft_stable_build(host)
- if build:
- self._run_faft_repair(host, build)
- else:
- logging.info('Cannot find faft stable_version, falling back to'
- ' use firmware on OS stable_version.')
- build = self._get_os_stable_build(host)
- if not build:
- raise hosts.AutoservRepairError(
- 'Failed to find stable_version from host_info.',
- 'cannot find stable_version')
- self._run_general_repair(host, build)
-
- def _is_applicable(self, host):
- return _is_firmware_testing_device(host)
-
- @property
- def description(self):
- return 'Re-install the stable firmware(faft) via servo'
-
-
-class FirmwareVersionVerifier(hosts.Verifier):
- """
- Check for a firmware update, and apply it if appropriate.
-
- This verifier checks to ensure that either the firmware on the DUT
- is up-to-date, or that the target firmware can be installed from the
- currently running build.
-
- Failure occurs when all of the following apply:
- 1. The DUT is not excluded from updates. For example, DUTs used
- for FAFT testing use `FirmwareRepair` instead.
- 2. The DUT's board has an assigned stable firmware version.
- 3. The DUT is not running the assigned stable firmware.
- 4. The firmware supplied in the running OS build is not the
- assigned stable firmware.
-
- If the DUT needs an upgrade and the currently running OS build
- supplies the necessary firmware, the verifier installs the new
- firmware using `chromeos-firmwareupdate`. Failure to install will
- cause the verifier to fail.
-
- This verifier nominally breaks the rule that "verifiers must succeed
- quickly", since it can invoke `reboot()` during the success code
- path. We're doing it anyway for two reasons:
- * The time between updates will typically be measured in months,
- so the amortized cost is low.
- * The reason we distinguish repair from verify is to allow
- rescheduling work immediately while the expensive repair happens
- out-of-band. But a firmware update will likely hit all DUTs at
- once, so it's pointless to pass the buck to repair.
-
- N.B. This verifier is a trigger for all repair actions that install
- the stable repair image. If the firmware is out-of-date, but the
- stable repair image does *not* contain the proper firmware version,
- _the target DUT will fail repair, and will be unable to fix itself_.
- """
-
- @staticmethod
- def _get_rw_firmware(host):
- result = host.run('crossystem fwid', ignore_status=True)
- if result.exit_status == 0:
- return result.stdout
- else:
- return None
-
- @staticmethod
- def _check_hardware_match(version_a, version_b):
- """
- Check that two firmware versions identify the same hardware.
-
- Firmware version strings look like this:
- Google_Gnawty.5216.239.34
- The part before the numbers identifies the hardware for which
- the firmware was built. This function checks that the hardware
- identified by `version_a` and `version_b` is the same.
-
- This is a confidence check to protect us from installing the wrong
- firmware on a DUT when a board label has somehow gone astray.
-
- @param version_a First firmware version for the comparison.
- @param version_b Second firmware version for the comparison.
- """
- hardware_a = version_a.split('.')[0]
- hardware_b = version_b.split('.')[0]
- if hardware_a != hardware_b:
- message = 'Hardware/Firmware mismatch updating %s to %s'
- raise hosts.AutoservVerifyError(
- message % (version_a, version_b))
-
- def _is_stable_image_installed(self, host):
- """Verify that ChromeOS image on host is a stable version.
-
- This check verify that device booted from stable image to protect us
- from installing the firmware from bad/broken/no-tested image. Bad
- image can have broken updater or corrupted firmware.
-
- The representation version looks like:
- nocturne-release/R89-13728.0.0
- Check compare version from host to version provide as stable image
- from host-info file.
-
- @param host CrosHost instance.
- """
- os_from_host = host.get_release_builder_path()
- os_from_host_info = host.get_cros_repair_image_name()
- if os_from_host != os_from_host_info:
- raise hosts.AutoservNonCriticalVerifyError(
- 'Firmware update can be run only from stable image.'
- ' Expected version:"%s", actually: "%s"' %
- (os_from_host_info, os_from_host))
-
- @timeout_util.TimeoutDecorator(cros_constants.VERIFY_TIMEOUT_SEC)
- def verify(self, host):
- # Test 1 - The DUT is not excluded from updates.
- if not _is_firmware_update_supported(host):
- return
- # Test 2 - The DUT has an assigned stable firmware version.
- info = host.host_info_store.get()
- if info.model is None:
- raise hosts.AutoservVerifyError(
- 'Can not verify firmware version. '
- 'No model label value found')
-
- stable_firmware = None
- try:
- stable_firmware = afe_utils.get_stable_firmware_version_v2(info)
- except Exception as e:
- logging.exception('Failed lookup to AFE for stable fw version '
- ' with exception: %s', e)
-
- if stable_firmware is None:
- logging.debug('Expected FW version not found')
- # This DUT doesn't have a firmware update target
- return
- logging.debug('Expected FW version: %s', stable_firmware)
- # For tests 3 and 4: If the output from `crossystem` or
- # `chromeos-firmwareupdate` isn't what we expect, we log an
- # error, but don't fail: We don't want DUTs unable to test a
- # build merely because of a bug or change in either of those
- # commands.
-
- # Test 3 - The DUT is not running the target stable firmware.
- current_firmware = self._get_rw_firmware(host)
- if current_firmware is None:
- logging.error('DUT firmware version can\'t be determined.')
- return
- logging.debug('Current FW version: %s', current_firmware)
- if current_firmware == stable_firmware:
- return
- # Test 4 - The firmware supplied in the running OS build is not
- # the assigned stable firmware.
- available_firmware = _get_available_firmware(host, info.model)
- if available_firmware is None:
- logging.error('Supplied firmware version in OS can\'t be '
- 'determined.')
- return
- self._is_stable_image_installed(host)
- if available_firmware != stable_firmware:
- raise hosts.AutoservVerifyError(
- 'DUT firmware requires update from %s to %s' %
- (current_firmware, stable_firmware))
- # Time to update the firmware.
- logging.info('Updating firmware from %s to %s',
- current_firmware, stable_firmware)
- self._check_hardware_match(current_firmware, stable_firmware)
- try:
- host.run('chromeos-firmwareupdate --mode=autoupdate')
- host.reboot()
- except Exception as e:
- message = ('chromeos-firmwareupdate failed: from '
- '%s to %s')
- logging.exception(message, current_firmware, stable_firmware)
- raise hosts.AutoservVerifyError(
- message % (current_firmware, stable_firmware))
- final_firmware = self._get_rw_firmware(host)
- if final_firmware != stable_firmware:
- message = ('chromeos-firmwareupdate failed: tried upgrade '
- 'to %s, now running %s instead')
- raise hosts.AutoservVerifyError(
- message % (stable_firmware, final_firmware))
-
- @property
- def description(self):
- return 'The firmware on this DUT is up-to-date'
diff --git a/server/hosts/cros_firmware_unittest.py b/server/hosts/cros_firmware_unittest.py
deleted file mode 100644
index 899b335..0000000
--- a/server/hosts/cros_firmware_unittest.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# Copyright 2018 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import mock
-import unittest
-
-import common
-from autotest_lib.server import utils
-from server.hosts import cros_firmware
-
-
-VERSION_OUTPUT = """
-{
- "any-model": {
- "host": { "versions": { "ro": "Google_Kukui.12573.13.0", "rw": "Google_Kukui.12573.13.0" },
- "keys": { "root": "b11d74edd286c144e1135b49e7f0bc20cf041f10", "recovery": "c14bd720b70d97394257e3e826bd8f43de48d4ed" },
- "image": "images/bios-kukui.ro-12573-13-0.rw-12573-13-0.bin" },
- "ec": { "versions": { "ro": "kukui_v2.0.2352-5c2c3c7a0", "rw": "kukui_v2.0.2352-5c2c3c7a0" },
- "image": "images/ec-kukui.ro-2-0-2352.rw-2-0-2352.bin" },
- "signature_id": "kukui"
- }
-}
-"""
-
-NO_VERSION_OUTPUT = """
-{
-}
-"""
-
-UNIBUILD_VERSION_OUTPUT = """
-{
- "kukui": {
- "host": { "versions": { "ro": "Google_Kukui.12573.13.0", "rw": "Google_Kukui.12573.13.0" },
- "keys": { "root": "b11d74edd286c144e1135b49e7f0bc20cf041f10", "recovery": "c14bd720b70d97394257e3e826bd8f43de48d4ed" },
- "image": "images/bios-kukui.ro-12573-13-0.rw-12573-13-0.bin" },
- "ec": { "versions": { "ro": "kukui_v2.0.2352-5c2c3c7a0", "rw": "kukui_v2.0.2352-5c2c3c7a0" },
- "image": "images/ec-kukui.ro-2-0-2352.rw-2-0-2352.bin" },
- "signature_id": "kukui"
- },
- "kodama": {
- "host": { "versions": { "ro": "Google_Kodama.12573.14.0", "rw": "Google_Kodama.12573.15.0" },
- "keys": { "root": "b11d74edd286c144e1135b49e7f0bc20cf041f10", "recovery": "c14bd720b70d97394257e3e826bd8f43de48d4ed" },
- "image": "images/bios-kodama.ro-12573-14-0.rw-12573-15-0.bin" },
- "ec": { "versions": { "ro": "kodama_v2.0.2354-8c3c92f29", "rw": "kodama_v2.0.2354-8c3c92f29" },
- "image": "images/ec-kodama.ro-2-0-2354.rw-2-0-2354.bin" },
- "signature_id": "kodama"
- },
- "krane": {
- "host": { "versions": { "ro": "Google_Krane.12573.13.0", "rw": "Google_Krane.12573.13.0" },
- "keys": { "root": "b11d74edd286c144e1135b49e7f0bc20cf041f10", "recovery": "c14bd720b70d97394257e3e826bd8f43de48d4ed" },
- "image": "images/bios-krane.ro-12573-13-0.rw-12573-13-0.bin" },
- "ec": { "versions": { "ro": "krane_v2.0.2352-5c2c3c7a0", "rw": "krane_v2.0.2352-5c2c3c7a0" },
- "image": "images/ec-krane.ro-2-0-2352.rw-2-0-2352.bin" },
- "signature_id": "krane"
- }
-}
-"""
-
-
-class FirmwareVersionVerifierTest(unittest.TestCase):
- """Tests for FirmwareVersionVerifier."""
-
- def test_get_available_firmware_on_update_with_failure(self):
- """Test _get_available_firmware when update script exit_status=1."""
- result = utils.CmdResult(exit_status=1)
- host = mock.Mock()
- host.run.return_value = result
-
- fw = cros_firmware._get_available_firmware(host, 'lumpy')
- self.assertIsNone(fw)
-
- def test_get_available_firmware_returns_version(self):
- """_get_available_firmware returns BIOS version."""
- result = utils.CmdResult(stdout=VERSION_OUTPUT, exit_status=0)
- host = mock.Mock()
- host.run.return_value = result
-
- fw = cros_firmware._get_available_firmware(host, 'kukui')
- self.assertEqual(fw, 'Google_Kukui.12573.13.0')
-
- def test_get_available_firmware_returns_none(self):
- """_get_available_firmware returns None."""
- result = utils.CmdResult(stdout=NO_VERSION_OUTPUT, exit_status=0)
- host = mock.Mock()
- host.run.return_value = result
-
- fw = cros_firmware._get_available_firmware(host, 'kukui')
- self.assertIsNone(fw)
-
- def test_get_available_firmware_unibuild(self):
- """_get_available_firmware on unibuild board with multiple models."""
- result = utils.CmdResult(stdout=UNIBUILD_VERSION_OUTPUT,
- exit_status=0)
- host = mock.Mock()
- host.run.return_value = result
-
- fw = cros_firmware._get_available_firmware(host, 'kukui')
- self.assertEqual(fw, 'Google_Kukui.12573.13.0')
-
- fw = cros_firmware._get_available_firmware(host, 'kodama')
- self.assertEqual(fw, 'Google_Kodama.12573.15.0')
-
- fw = cros_firmware._get_available_firmware(host, 'flapjack')
- self.assertIsNone(fw)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/server/hosts/cros_host.py b/server/hosts/cros_host.py
deleted file mode 100644
index d2aae7f..0000000
--- a/server/hosts/cros_host.py
+++ /dev/null
@@ -1,2464 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import logging
-import os
-import re
-import sys
-import time
-
-import common
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib import autotemp
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib import global_config
-from autotest_lib.client.common_lib import lsbrelease_utils
-from autotest_lib.client.common_lib.cros import cros_config
-from autotest_lib.client.common_lib.cros import dev_server
-from autotest_lib.client.common_lib.cros import retry
-from autotest_lib.client.cros import constants as client_constants
-from autotest_lib.client.cros import cros_ui
-from autotest_lib.server import utils as server_utils
-from autotest_lib.server import tauto_warnings
-from autotest_lib.server.consts import consts
-from autotest_lib.server.cros.dynamic_suite import constants as ds_constants
-from autotest_lib.server.cros.dynamic_suite import tools
-from autotest_lib.server.cros.device_health_profile import device_health_profile
-from autotest_lib.server.cros.device_health_profile import profile_constants
-from autotest_lib.server.cros.servo import pdtester
-from autotest_lib.server.hosts import abstract_ssh
-from autotest_lib.server.hosts import base_label
-from autotest_lib.server.hosts import chameleon_host
-from autotest_lib.server.hosts import cros_constants
-from autotest_lib.server.hosts import cros_label
-from autotest_lib.server.hosts import pdtester_host
-from autotest_lib.server.hosts import servo_host
-from autotest_lib.server.hosts import servo_constants
-from autotest_lib.site_utils.admin_audit import constants as audit_const
-from six.moves import zip
-
-
-CONFIG = global_config.global_config
-
-class FactoryImageCheckerException(error.AutoservError):
- """Exception raised when an image is a factory image."""
- pass
-
-
-class CrosHost(abstract_ssh.AbstractSSHHost):
- """Chromium OS specific subclass of Host."""
-
- VERSION_PREFIX = consts.CROS_VERSION_PREFIX
-
-
- # Timeout values (in seconds) associated with various Chrome OS
- # state changes.
- #
- # In general, a good rule of thumb is that the timeout can be up
- # to twice the typical measured value on the slowest platform.
- # The times here have not necessarily been empirically tested to
- # meet this criterion.
- #
- # SLEEP_TIMEOUT: Time to allow for suspend to memory.
- # RESUME_TIMEOUT: Time to allow for resume after suspend, plus
- # time to restart the netwowrk.
- # SHUTDOWN_TIMEOUT: Time to allow for shut down.
- # BOOT_TIMEOUT: Time to allow for boot from power off. Among
- # other things, this must account for the 30 second dev-mode
- # screen delay, time to start the network on the DUT, and the
- # ssh timeout of 120 seconds.
- # USB_BOOT_TIMEOUT: Time to allow for boot from a USB device,
- # including the 30 second dev-mode delay and time to start the
- # network.
- # INSTALL_TIMEOUT: Time to allow for chromeos-install.
- # ADMIN_INSTALL_TIMEOUT: Time to allow for chromeos-install
- # used by admin tasks.
- # POWERWASH_BOOT_TIMEOUT: Time to allow for a reboot that
- # includes powerwash.
-
- SLEEP_TIMEOUT = 2
- RESUME_TIMEOUT = 10
- SHUTDOWN_TIMEOUT = 10
- BOOT_TIMEOUT = 150
- USB_BOOT_TIMEOUT = 300
- INSTALL_TIMEOUT = 480
- ADMIN_INSTALL_TIMEOUT = 600
- POWERWASH_BOOT_TIMEOUT = 60
-
- # Minimum OS version that supports server side packaging. Older builds may
- # not have server side package built or with Autotest code change to support
- # server-side packaging.
- MIN_VERSION_SUPPORT_SSP = CONFIG.get_config_value(
- 'AUTOSERV', 'min_version_support_ssp', type=int)
-
- USE_FSFREEZE = CONFIG.get_config_value(
- 'CROS', 'enable_fs_freeze', type=bool, default=False)
-
- # REBOOT_TIMEOUT: How long to wait for a reboot.
- #
- # We have a long timeout to ensure we don't flakily fail due to other
- # issues. Shorter timeouts are vetted in platform_RebootAfterUpdate.
- # TODO(sbasi - crbug.com/276094) Restore to 5 mins once the 'host did not
- # return from reboot' bug is solved.
- REBOOT_TIMEOUT = 480
-
- # _USB_POWER_TIMEOUT: Time to allow for USB to power toggle ON and OFF.
- # _POWER_CYCLE_TIMEOUT: Time to allow for manual power cycle.
- # _CHANGE_SERVO_ROLE_TIMEOUT: Time to allow DUT regain network connection
- # since changing servo role will reset USB state
- # and causes temporary ethernet drop.
- _USB_POWER_TIMEOUT = 5
- _POWER_CYCLE_TIMEOUT = 10
- _CHANGE_SERVO_ROLE_TIMEOUT = 180
-
- _RPM_HOSTNAME_REGEX = ('chromeos(\d+)(-row(\d+))?-rack(\d+[a-z]*)'
- '-host(\d+)')
-
- # Constants used in ping_wait_up() and ping_wait_down().
- #
- # _PING_WAIT_COUNT is the approximate number of polling
- # cycles to use when waiting for a host state change.
- #
- # _PING_STATUS_DOWN and _PING_STATUS_UP are names used
- # for arguments to the internal _ping_wait_for_status()
- # method.
- _PING_WAIT_COUNT = 40
- _PING_STATUS_DOWN = False
- _PING_STATUS_UP = True
-
- # Allowed values for the power_method argument.
-
- # POWER_CONTROL_RPM: Used in power_off/on/cycle() methods, default for all
- # DUTs except those with servo_v4 CCD.
- # POWER_CONTROL_CCD: Used in power_off/on/cycle() methods, default for all
- # DUTs with servo_v4 CCD.
- # POWER_CONTROL_SERVO: Used in set_power() and power_cycle() methods.
- # POWER_CONTROL_MANUAL: Used in set_power() and power_cycle() methods.
- POWER_CONTROL_RPM = 'RPM'
- POWER_CONTROL_CCD = 'CCD'
- POWER_CONTROL_SERVO = 'servoj10'
- POWER_CONTROL_MANUAL = 'manual'
-
- POWER_CONTROL_VALID_ARGS = (POWER_CONTROL_RPM,
- POWER_CONTROL_CCD,
- POWER_CONTROL_SERVO,
- POWER_CONTROL_MANUAL)
-
- _RPM_OUTLET_CHANGED = 'outlet_changed'
-
- # URL pattern to download firmware image.
- _FW_IMAGE_URL_PATTERN = CONFIG.get_config_value(
- 'CROS', 'firmware_url_pattern', type=str)
-
- # Regular expression for extracting EC version string
- _EC_REGEX = '(%s_\w*[-\.]\w*[-\.]\w*[-\.]\w*)'
-
- # Regular expression for extracting BIOS version string
- _BIOS_REGEX = '(%s\.\w*\.\w*\.\w*)'
-
- # Command to update firmware located on DUT
- _FW_UPDATE_CMD = 'chromeos-firmwareupdate --mode=recovery %s'
-
- @staticmethod
- def check_host(host, timeout=10):
- """
- Check if the given host is a chrome-os host.
-
- @param host: An ssh host representing a device.
- @param timeout: The timeout for the run command.
-
- @return: True if the host device is chromeos.
-
- """
- try:
- result = host.run(
- 'grep -q CHROMEOS /etc/lsb-release && '
- '! grep -q moblab /etc/lsb-release && '
- '! grep -q labstation /etc/lsb-release &&'
- ' grep CHROMEOS_RELEASE_BOARD /etc/lsb-release',
- ignore_status=True,
- timeout=timeout).stdout
- if result:
- return not (
- lsbrelease_utils.is_jetstream(
- lsb_release_content=result) or
- lsbrelease_utils.is_gce_board(
- lsb_release_content=result))
-
- except (error.AutoservRunError, error.AutoservSSHTimeout):
- return False
-
- return False
-
-
- @staticmethod
- def get_chameleon_arguments(args_dict):
- """Extract chameleon options from `args_dict` and return the result.
-
- Recommended usage:
- ~~~~~~~~
- args_dict = utils.args_to_dict(args)
- chameleon_args = hosts.CrosHost.get_chameleon_arguments(args_dict)
- host = hosts.create_host(machine, chameleon_args=chameleon_args)
- ~~~~~~~~
-
- @param args_dict Dictionary from which to extract the chameleon
- arguments.
- """
- chameleon_args = {key: args_dict[key]
- for key in ('chameleon_host', 'chameleon_port')
- if key in args_dict}
- if 'chameleon_ssh_port' in args_dict:
- chameleon_args['port'] = int(args_dict['chameleon_ssh_port'])
- return chameleon_args
-
- @staticmethod
- def get_btpeer_arguments(args_dict):
- """Extract btpeer options from `args_dict` and return the result.
-
- This is used to parse details of Bluetooth peer.
- Recommended usage:
- ~~~~~~~~
- args_dict = utils.args_to_dict(args)
- btpeer_args = hosts.CrosHost.get_btpeer_arguments(args_dict)
- host = hosts.create_host(machine, btpeer_args=btpeer_args)
- ~~~~~~~~
-
- @param args_dict: Dictionary from which to extract the btpeer
- arguments.
- """
- if 'btpeer_host_list' in args_dict:
- result = []
- for btpeer in args_dict['btpeer_host_list'].split(','):
- # IPv6 addresses including a port number should be enclosed in
- # square brackets.
- delimiter = ']:' if re.search(r':.*:', btpeer) else ':'
- result.append({key: value for key,value in
- zip(('btpeer_host','btpeer_port'),
- btpeer.strip('[]').split(delimiter))})
- return result
- else:
- return {key: args_dict[key]
- for key in ('btpeer_host', 'btpeer_port', 'btpeer_ssh_port')
- if key in args_dict}
-
-
- @staticmethod
- def get_pdtester_arguments(args_dict):
- """Extract chameleon options from `args_dict` and return the result.
-
- Recommended usage:
- ~~~~~~~~
- args_dict = utils.args_to_dict(args)
- pdtester_args = hosts.CrosHost.get_pdtester_arguments(args_dict)
- host = hosts.create_host(machine, pdtester_args=pdtester_args)
- ~~~~~~~~
-
- @param args_dict Dictionary from which to extract the pdtester
- arguments.
- """
- return {key: args_dict[key]
- for key in ('pdtester_host', 'pdtester_port')
- if key in args_dict}
-
-
- @staticmethod
- def get_servo_arguments(args_dict):
- """Extract servo options from `args_dict` and return the result.
-
- Recommended usage:
- ~~~~~~~~
- args_dict = utils.args_to_dict(args)
- servo_args = hosts.CrosHost.get_servo_arguments(args_dict)
- host = hosts.create_host(machine, servo_args=servo_args)
- ~~~~~~~~
-
- @param args_dict Dictionary from which to extract the servo
- arguments.
- """
- servo_attrs = (servo_constants.SERVO_HOST_ATTR,
- servo_constants.SERVO_HOST_SSH_PORT_ATTR,
- servo_constants.SERVO_PORT_ATTR,
- servo_constants.SERVO_SERIAL_ATTR,
- servo_constants.SERVO_BOARD_ATTR,
- servo_constants.SERVO_MODEL_ATTR)
- servo_args = {key: args_dict[key]
- for key in servo_attrs
- if key in args_dict}
- return (
- None
- if servo_constants.SERVO_HOST_ATTR in servo_args
- and not servo_args[servo_constants.SERVO_HOST_ATTR]
- else servo_args)
-
-
- def _initialize(self,
- hostname,
- chameleon_args=None,
- servo_args=None,
- pdtester_args=None,
- try_lab_servo=False,
- try_servo_repair=False,
- ssh_verbosity_flag='',
- ssh_options='',
- try_servo_recovery=False,
- *args,
- **dargs):
- """Initialize superclasses, |self.chameleon|, and |self.servo|.
-
- This method will attempt to create the test-assistant object
- (chameleon/servo) when it is needed by the test. Check
- the docstring of chameleon_host.create_chameleon_host and
- servo_host.create_servo_host for how this is determined.
-
- @param hostname: Hostname of the dut.
- @param chameleon_args: A dictionary that contains args for creating
- a ChameleonHost. See chameleon_host for details.
- @param servo_args: A dictionary that contains args for creating
- a ServoHost object. See servo_host for details.
- @param try_lab_servo: When true, indicates that an attempt should
- be made to create a ServoHost for a DUT in
- the test lab, even if not required by
- `servo_args`. See servo_host for details.
- @param try_servo_repair: If a servo host is created, check it
- with `repair()` rather than `verify()`.
- See servo_host for details.
- @param ssh_verbosity_flag: String, to pass to the ssh command to control
- verbosity.
- @param ssh_options: String, other ssh options to pass to the ssh
- command.
- @param try_servo_recovery: When True, start servod in recovery mode.
- See servo_host for details.
- """
- super(CrosHost, self)._initialize(hostname=hostname, *args, **dargs)
- # hold special dut_state for repair process
- self._device_repair_state = None
- self.labels = base_label.LabelRetriever(cros_label.CROS_LABELS)
- # self.env is a dictionary of environment variable settings
- # to be exported for commands run on the host.
- # LIBC_FATAL_STDERR_ can be useful for diagnosing certain
- # errors that might happen.
- self.env['LIBC_FATAL_STDERR_'] = '1'
- self._ssh_verbosity_flag = ssh_verbosity_flag
- self._ssh_options = ssh_options
- self.health_profile = None
- self._default_power_method = None
- dut_health_profile = device_health_profile.DeviceHealthProfile(
- hostname=self.hostname,
- host_info=self.host_info_store.get(),
- result_dir=self.get_result_dir())
-
- # TODO(otabek@): remove when b/171414073 closed
- if self.use_icmp:
- pingable_before_servo = self.is_up_fast(count=1)
- if pingable_before_servo:
- logging.info('DUT is pingable before init Servo.')
- else:
- logging.info('Skipping ping to DUT before init Servo.')
- _servo_host, servo_state = servo_host.create_servo_host(
- dut=self,
- servo_args=servo_args,
- try_lab_servo=try_lab_servo,
- try_servo_repair=try_servo_repair,
- try_servo_recovery=try_servo_recovery,
- dut_host_info=self.host_info_store.get(),
- dut_health_profile=dut_health_profile)
- if dut_health_profile.is_loaded():
- logging.info('Device health profile loaded.')
- # The device profile is located in the servo_host which make it
- # dependency. If profile is not loaded yet then we do not have it
- # TODO(otabek@) persist device provide out of servo-host.
- self.health_profile = dut_health_profile
- self.set_servo_host(_servo_host, servo_state)
-
- # TODO(waihong): Do the simplication on Chameleon too.
- self._chameleon_host = chameleon_host.create_chameleon_host(
- dut=self.hostname,
- chameleon_args=chameleon_args)
- if self._chameleon_host:
- self.chameleon = self._chameleon_host.create_chameleon_board()
- else:
- self.chameleon = None
-
- # Bluetooth peers will be populated by the test if needed
- self._btpeer_host_list = []
- self.btpeer_list = []
- self.btpeer = None
-
- # Add pdtester host if pdtester args were added on command line
- self._pdtester_host = pdtester_host.create_pdtester_host(
- pdtester_args, self._servo_host)
-
- if self._pdtester_host:
- self.pdtester_servo = self._pdtester_host.get_servo()
- logging.info('pdtester_servo: %r', self.pdtester_servo)
- # Create the pdtester object used to access the ec uart
- self.pdtester = pdtester.PDTester(self.pdtester_servo,
- self._pdtester_host.get_servod_server_proxy())
- else:
- self.pdtester = None
-
-
- def initialize_btpeer(self, btpeer_args=[]):
- """ Initialize the Bluetooth peers
-
- Initialize Bluetooth peer devices given in the arguments. Bluetooth peer
- is chameleon host on Raspberry Pi.
- @param btpeer_args: A dictionary that contains args for creating
- a ChameleonHost. See chameleon_host for details.
-
- """
- logging.debug('Attempting to initialize bluetooth peers if available')
- try:
- if type(btpeer_args) is list:
- btpeer_args_list = btpeer_args
- else:
- btpeer_args_list = [btpeer_args]
-
- self._btpeer_host_list = chameleon_host.create_btpeer_host(
- dut=self.hostname, btpeer_args_list=btpeer_args_list)
- logging.debug('Bluetooth peer hosts are %s',
- self._btpeer_host_list)
- self.btpeer_list = [_host.create_chameleon_board() for _host in
- self._btpeer_host_list if _host is not None]
-
- if len(self.btpeer_list) > 0:
- self.btpeer = self.btpeer_list[0]
-
- logging.debug('After initialize_btpeer btpeer_list %s '
- 'btpeer_host_list is %s and btpeer is %s',
- self.btpeer_list, self._btpeer_host_list,
- self.btpeer)
- except Exception as e:
- logging.error('Exception %s in initialize_btpeer', str(e))
-
-
- def host_version_prefix(self, image):
- """Return version label prefix.
-
- In case the CrOS provisioning version is something other than the
- standard CrOS version e.g. CrOS TH version, this function will
- find the prefix from consts.py.
-
- @param image: The image name to find its version prefix.
- @returns: A prefix string for the image type.
- """
- return consts.get_version_label_prefix(image)
-
- def stage_build_to_usb(self, build):
- """Stage the current ChromeOS image on the USB stick connected to the
- servo.
-
- @param build: The build to download and send to USB.
- """
- if not self.servo:
- raise error.TestError('Host %s does not have servo.' %
- self.hostname)
-
- _, update_url = self.stage_image_for_servo(build)
-
- try:
- self.servo.image_to_servo_usb(update_url)
- finally:
- # servo.image_to_servo_usb turned the DUT off, so turn it back on
- logging.debug('Turn DUT power back on.')
- self.servo.get_power_state_controller().power_on()
-
- logging.debug('ChromeOS image %s is staged on the USB stick.',
- build)
-
- def verify_job_repo_url(self, tag=''):
- """
- Make sure job_repo_url of this host is valid.
-
- Eg: The job_repo_url "http://lmn.cd.ab.xyx:8080/static/\
- lumpy-release/R29-4279.0.0/autotest/packages" claims to have the
- autotest package for lumpy-release/R29-4279.0.0. If this isn't the case,
- download and extract it. If the devserver embedded in the url is
- unresponsive, update the job_repo_url of the host after staging it on
- another devserver.
-
- @param job_repo_url: A url pointing to the devserver where the autotest
- package for this build should be staged.
- @param tag: The tag from the server job, in the format
- <job_id>-<user>/<hostname>, or <hostless> for a server job.
-
- @raises DevServerException: If we could not resolve a devserver.
- @raises AutoservError: If we're unable to save the new job_repo_url as
- a result of choosing a new devserver because the old one failed to
- respond to a health check.
- @raises urllib2.URLError: If the devserver embedded in job_repo_url
- doesn't respond within the timeout.
- """
- info = self.host_info_store.get()
- job_repo_url = info.attributes.get(ds_constants.JOB_REPO_URL, '')
- if not job_repo_url:
- logging.warning('No job repo url set on host %s', self.hostname)
- return
-
- logging.info('Verifying job repo url %s', job_repo_url)
- devserver_url, image_name = tools.get_devserver_build_from_package_url(
- job_repo_url)
-
- ds = dev_server.ImageServer(devserver_url)
-
- logging.info('Staging autotest artifacts for %s on devserver %s',
- image_name, ds.url())
-
- start_time = time.time()
- ds.stage_artifacts(image_name, ['autotest_packages'])
- stage_time = time.time() - start_time
-
- # Record how much of the verification time comes from a devserver
- # restage. If we're doing things right we should not see multiple
- # devservers for a given board/build/branch path.
- try:
- board, build_type, branch = server_utils.ParseBuildName(
- image_name)[:3]
- except server_utils.ParseBuildNameException:
- pass
- else:
- devserver = devserver_url[
- devserver_url.find('/') + 2:devserver_url.rfind(':')]
- stats_key = {
- 'board': board,
- 'build_type': build_type,
- 'branch': branch,
- 'devserver': devserver.replace('.', '_'),
- }
-
- monarch_fields = {
- 'board': board,
- 'build_type': build_type,
- 'branch': branch,
- 'dev_server': devserver,
- }
-
-
- def stage_image_for_servo(self, image_name=None, artifact='test_image'):
- """Stage a build on a devserver and return the update_url.
-
- @param image_name: a name like lumpy-release/R27-3837.0.0
- @param artifact: a string like 'test_image'. Requests
- appropriate image to be staged.
- @returns a tuple of (image_name, URL) like
- (lumpy-release/R27-3837.0.0,
- http://172.22.50.205:8082/update/lumpy-release/R27-3837.0.0)
- """
- if not image_name:
- image_name = self.get_cros_repair_image_name()
- logging.info('Staging build for servo install: %s', image_name)
- devserver = dev_server.ImageServer.resolve(image_name, self.hostname)
- devserver.stage_artifacts(image_name, [artifact])
- if artifact == 'test_image':
- return image_name, devserver.get_test_image_url(image_name)
- elif artifact == 'recovery_image':
- return image_name, devserver.get_recovery_image_url(image_name)
- else:
- raise error.AutoservError("Bad artifact!")
-
- def prepare_for_update(self):
- """Prepares the DUT for an update.
-
- Subclasses may override this to perform any special actions
- required before updating.
- """
- pass
-
-
- def _clear_fw_version_labels(self, rw_only):
- """Clear firmware version labels from the machine.
-
- @param rw_only: True to only clear fwrw_version; otherewise, clear
- both fwro_version and fwrw_version.
- """
- info = self.host_info_store.get()
- info.clear_version_labels(consts.FW_RW_VERSION_PREFIX)
- if not rw_only:
- info.clear_version_labels(consts.FW_RO_VERSION_PREFIX)
- self.host_info_store.commit(info)
-
-
- def _add_fw_version_label(self, build, rw_only):
- """Add firmware version label to the machine.
-
- @param build: Build of firmware.
- @param rw_only: True to only add fwrw_version; otherwise, add both
- fwro_version and fwrw_version.
-
- """
- info = self.host_info_store.get()
- info.set_version_label(consts.FW_RW_VERSION_PREFIX, build)
- if not rw_only:
- info.set_version_label(consts.FW_RO_VERSION_PREFIX, build)
- self.host_info_store.commit(info)
-
- @staticmethod
- def get_version_from_image(image, version_regex):
- """Get version string from binary image using regular expression.
-
- @param image: Binary image to search
- @param version_regex: Regular expression to search for
-
- @return Version string
-
- @raises TestFail if no version string is found in image
- """
- with open(image, 'rb') as f:
- image_data = f.read()
- match = re.findall(version_regex,
- image_data.decode('ISO-8859-1', errors='ignore'))
- if match:
- return match[0]
- else:
- raise error.TestFail('Failed to read version from %s.' % image)
-
-
- def firmware_install(self, build, rw_only=False, dest=None,
- local_tarball=None, verify_version=False,
- try_scp=False, install_ec=True, install_bios=True,
- board_as=None):
- """Install firmware to the DUT.
-
- Use stateful update if the DUT is already running the same build.
- Stateful update does not update kernel and tends to run much faster
- than a full reimage. If the DUT is running a different build, or it
- failed to do a stateful update, full update, including kernel update,
- will be applied to the DUT.
-
- Once a host enters firmware_install its fw[ro|rw]_version label will
- be removed. After the firmware is updated successfully, a new
- fw[ro|rw]_version label will be added to the host.
-
- @param build: The build version to which we want to provision the
- firmware of the machine,
- e.g. 'link-firmware/R22-2695.1.144'.
- @param rw_only: True to only install firmware to its RW portions. Keep
- the RO portions unchanged.
- @param dest: Directory to store the firmware in.
- @param local_tarball: Path to local firmware image for installing
- without devserver.
- @param verify_version: True to verify EC and BIOS versions after
- programming firmware, default is False.
- @param try_scp: False to always program using servo, true to try copying
- the firmware and programming from the DUT.
- @param install_ec: True to install EC FW, and False to skip it.
- @param install_bios: True to install BIOS, and False to skip it.
- @param board_as: A board name to force to use.
-
- TODO(dshi): After bug 381718 is fixed, update here with corresponding
- exceptions that could be raised.
-
- """
- if not self.servo:
- raise error.TestError('Host %s does not have servo.' %
- self.hostname)
-
- info = self.host_info_store.get()
- board = info.board
- model = info.model
-
- if board is None or board == '':
- board = self.servo.get_board()
-
- # if board_as argument is passed, then use it instead of the original
- # board name.
- if board_as:
- board = board_as
-
- if model is None or model == '':
- try:
- model = self.get_platform()
- except Exception as e:
- logging.warn('Dut is unresponsive: %s', str(e))
-
- # If local firmware path not provided fetch it from the dev server
- tmpd = None
- if not local_tarball:
- logging.info('Will install firmware from build %s.', build)
-
- try:
- ds = dev_server.ImageServer.resolve(build, self.hostname)
- ds.stage_artifacts(build, ['firmware'])
-
- if not dest:
- tmpd = autotemp.tempdir(unique_id='fwimage')
- dest = tmpd.name
-
- # Download firmware image
- fwurl = self._FW_IMAGE_URL_PATTERN % (ds.url(), build)
- local_tarball = os.path.join(dest, os.path.basename(fwurl))
- ds.download_file(fwurl, local_tarball)
- except Exception as e:
- raise error.TestError('Failed to download firmware package: %s'
- % str(e))
-
- ec_image = None
- if install_ec:
- # Extract EC image from tarball
- logging.info('Extracting EC image.')
- ec_image = self.servo.extract_ec_image(board, model, local_tarball)
- logging.info('Extracted: %s', ec_image)
-
- bios_image = None
- if install_bios:
- # Extract BIOS image from tarball
- logging.info('Extracting BIOS image.')
- bios_image = self.servo.extract_bios_image(board, model,
- local_tarball)
- logging.info('Extracted: %s', bios_image)
-
- if not bios_image and not ec_image:
- raise error.TestError('No firmware installation was processed.')
-
- # Clear firmware version labels
- self._clear_fw_version_labels(rw_only)
-
- # Install firmware from local tarball
- try:
- # Check if copying to DUT is enabled and DUT is available
- if try_scp and self.is_up():
- # DUT is available, make temp firmware directory to store images
- logging.info('Making temp folder.')
- dest_folder = '/tmp/firmware'
- self.run('mkdir -p ' + dest_folder)
-
- fw_cmd = self._FW_UPDATE_CMD % ('--wp=1' if rw_only else '')
-
- if bios_image:
- # Send BIOS firmware image to DUT
- logging.info('Sending BIOS firmware.')
- dest_bios_path = os.path.join(dest_folder,
- os.path.basename(bios_image))
- self.send_file(bios_image, dest_bios_path)
-
- # Initialize firmware update command for BIOS image
- fw_cmd += ' -i %s' % dest_bios_path
-
- # Send EC firmware image to DUT when EC image was found
- if ec_image:
- logging.info('Sending EC firmware.')
- dest_ec_path = os.path.join(dest_folder,
- os.path.basename(ec_image))
- self.send_file(ec_image, dest_ec_path)
-
- # Add EC image to firmware update command
- fw_cmd += ' -e %s' % dest_ec_path
-
- # Make sure command is allowed to finish even if ssh fails.
- fw_cmd = "trap '' SIGHUP; %s" % fw_cmd
-
- # Update firmware on DUT
- logging.info('Updating firmware.')
- try:
- self.run(fw_cmd, options="-o LogLevel=verbose")
- except error.AutoservRunError as e:
- if e.result_obj.exit_status != 255:
- raise
- elif ec_image:
- logging.warn("DUT network dropped during update"
- " (often caused by EC resetting USB)")
- else:
- logging.error("DUT network dropped during update"
- " (unexpected, since no EC image)")
- raise
- else:
- # Host is not available, program firmware using servo
- if ec_image:
- self.servo.program_ec(ec_image, rw_only)
- if bios_image:
- self.servo.program_bios(bios_image, rw_only)
- if utils.host_is_in_lab_zone(self.hostname):
- self._add_fw_version_label(build, rw_only)
-
- # Reboot and wait for DUT after installing firmware
- logging.info('Rebooting DUT.')
- self.servo.get_power_state_controller().reset()
- time.sleep(self.servo.BOOT_DELAY)
- self.test_wait_for_boot()
-
- # When enabled verify EC and BIOS firmware version after programming
- if verify_version:
- # Check programmed EC firmware when EC image was found
- if ec_image:
- logging.info('Checking EC firmware version.')
- dest_ec_version = self.get_ec_version()
- ec_version_prefix = dest_ec_version.split('_', 1)[0]
- ec_regex = self._EC_REGEX % ec_version_prefix
- image_ec_version = self.get_version_from_image(ec_image,
- ec_regex)
- if dest_ec_version != image_ec_version:
- raise error.TestFail(
- 'Failed to update EC firmware, version %s '
- '(expected %s)' % (dest_ec_version,
- image_ec_version))
-
- if bios_image:
- # Check programmed BIOS firmware against expected version
- logging.info('Checking BIOS firmware version.')
- dest_bios_version = self.get_firmware_version()
- bios_version_prefix = dest_bios_version.split('.', 1)[0]
- bios_regex = self._BIOS_REGEX % bios_version_prefix
- image_bios_version = self.get_version_from_image(bios_image,
- bios_regex)
- if dest_bios_version != image_bios_version:
- raise error.TestFail(
- 'Failed to update BIOS, version %s '
- '(expected %s)' % (dest_bios_version,
- image_bios_version))
- finally:
- if tmpd:
- tmpd.clean()
-
- def set_servo_host(self, host, servo_state=None):
- """Set our servo host member, and associated servo.
-
- @param host Our new `ServoHost`.
- """
- self._servo_host = host
- self.servo_pwr_supported = None
- if self._servo_host is not None:
- self.servo = self._servo_host.get_servo()
- servo_state = self._servo_host.get_servo_state()
- self._set_smart_usbhub_label(self._servo_host.smart_usbhub)
- try:
- self.servo_pwr_supported = self.servo.has_control('power_state')
- except Exception as e:
- logging.debug(
- "Could not get servo power state due to {}".format(e))
- else:
- self.servo = None
- self.servo_pwr_supported = False
- self.set_servo_type()
- self.set_servo_state(servo_state)
- self._set_servo_topology()
-
- def set_servo_type(self):
- """Set servo info labels to dut host_info"""
- if not self.servo:
- logging.debug('Servo is not initialized to get servo_type.')
- return
- if not self.is_servo_in_working_state():
- logging.debug('Servo is not good, skip update servo_type.')
- return
- servo_type = self.servo.get_servo_type()
- if not servo_type:
- logging.debug('Cannot collect servo_type from servo'
- ' by `dut-control servo_type`! Please file a bug'
- ' and inform infra team as we are not expected '
- ' to reach this point.')
- return
- host_info = self.host_info_store.get()
- prefix = servo_constants.SERVO_TYPE_LABEL_PREFIX
- old_type = host_info.get_label_value(prefix)
- if old_type == servo_type:
- # do not need update
- return
- host_info.set_version_label(prefix, servo_type)
- self.host_info_store.commit(host_info)
- logging.info('ServoHost: servo_type updated to %s '
- '(previous: %s)', servo_type, old_type)
-
-
- def set_servo_state(self, servo_state):
- """Set servo info labels to dut host_info"""
- if servo_state is not None:
- host_info = self.host_info_store.get()
- servo_state_prefix = servo_constants.SERVO_STATE_LABEL_PREFIX
- old_state = host_info.get_label_value(servo_state_prefix)
- if old_state == servo_state:
- # do not need update
- return
- host_info.set_version_label(servo_state_prefix, servo_state)
- self.host_info_store.commit(host_info)
- logging.info('ServoHost: servo_state updated to %s (previous: %s)',
- servo_state, old_state)
-
-
- def get_servo_state(self):
- host_info = self.host_info_store.get()
- servo_state_prefix = servo_constants.SERVO_STATE_LABEL_PREFIX
- return host_info.get_label_value(servo_state_prefix)
-
- def is_servo_in_working_state(self):
- """Validate servo is in WORKING state."""
- servo_state = self.get_servo_state()
- return servo_state == servo_constants.SERVO_STATE_WORKING
-
- def get_servo_usb_state(self):
- """Get the label value indicating the health of the USB drive.
-
- @return: The label value if defined, otherwise '' (empty string).
- @rtype: str
- """
- host_info = self.host_info_store.get()
- servo_usb_state_prefix = audit_const.SERVO_USB_STATE_PREFIX
- return host_info.get_label_value(servo_usb_state_prefix)
-
- def is_servo_usb_usable(self):
- """Check if the servo USB storage device is usable for FAFT.
-
- @return: False if the label indicates a state that will break FAFT.
- True if state is okay, or if state is not defined.
- @rtype: bool
- """
- usb_state = self.get_servo_usb_state()
- return usb_state in ('', audit_const.HW_STATE_ACCEPTABLE,
- audit_const.HW_STATE_NORMAL,
- audit_const.HW_STATE_UNKNOWN)
-
- def _set_smart_usbhub_label(self, smart_usbhub_detected):
- if smart_usbhub_detected is None:
- # skip the label update here as this indicate we wasn't able
- # to confirm usbhub type.
- return
- host_info = self.host_info_store.get()
- if (smart_usbhub_detected ==
- (servo_constants.SMART_USBHUB_LABEL in host_info.labels)):
- # skip label update if current label match the truth.
- return
- if smart_usbhub_detected:
- logging.info('Adding %s label to host %s',
- servo_constants.SMART_USBHUB_LABEL,
- self.hostname)
- host_info.labels.append(servo_constants.SMART_USBHUB_LABEL)
- else:
- logging.info('Removing %s label from host %s',
- servo_constants.SMART_USBHUB_LABEL,
- self.hostname)
- host_info.labels.remove(servo_constants.SMART_USBHUB_LABEL)
- self.host_info_store.commit(host_info)
-
- def repair(self):
- """Attempt to repair the DUT."""
- # TODO b/195447992: Wire this into lab services (If needed?).
- tauto_warnings.lab_services_warn_and_error("Cros repair")
-
- def get_verifier_state(self, tag):
- """Return the state of host verifier by tag.
-
- @returns: bool or None
- """
- # TODO b/195447992: Wire this into lab services.
- tauto_warnings.lab_services_warn_and_error("Cros verify state")
-
- def get_repair_strategy_node(self, tag):
- """Return the instance of verifier/repair node for host by tag.
-
- @returns: _DependencyNode or None
- """
- # TODO b/195447992: Wire this into lab services.
- tauto_warnings.lab_services_warn_and_error("Cros node")
-
- def close(self):
- """Close connection."""
- super(CrosHost, self).close()
-
- if self._chameleon_host:
- self._chameleon_host.close()
-
- if self.health_profile:
- try:
- self.health_profile.close()
- except Exception as e:
- logging.warning(
- 'Failed to finalize device health profile; %s', e)
-
- if self._servo_host:
- self._servo_host.close()
-
- def get_power_supply_info(self):
- """Get the output of power_supply_info.
-
- power_supply_info outputs the info of each power supply, e.g.,
- Device: Line Power
- online: no
- type: Mains
- voltage (V): 0
- current (A): 0
- Device: Battery
- state: Discharging
- percentage: 95.9276
- technology: Li-ion
-
- Above output shows two devices, Line Power and Battery, with details of
- each device listed. This function parses the output into a dictionary,
- with key being the device name, and value being a dictionary of details
- of the device info.
-
- @return: The dictionary of power_supply_info, e.g.,
- {'Line Power': {'online': 'yes', 'type': 'main'},
- 'Battery': {'vendor': 'xyz', 'percentage': '100'}}
- @raise error.AutoservRunError if power_supply_info tool is not found in
- the DUT. Caller should handle this error to avoid false failure
- on verification.
- """
- result = self.run('power_supply_info').stdout.strip()
- info = {}
- device_name = None
- device_info = {}
- for line in result.split('\n'):
- pair = [v.strip() for v in line.split(':')]
- if len(pair) != 2:
- continue
- if pair[0] == 'Device':
- if device_name:
- info[device_name] = device_info
- device_name = pair[1]
- device_info = {}
- else:
- device_info[pair[0]] = pair[1]
- if device_name and not device_name in info:
- info[device_name] = device_info
- return info
-
-
- def get_battery_percentage(self):
- """Get the battery percentage.
-
- @return: The percentage of battery level, value range from 0-100. Return
- None if the battery info cannot be retrieved.
- """
- try:
- info = self.get_power_supply_info()
- logging.info(info)
- return float(info['Battery']['percentage'])
- except (KeyError, ValueError, error.AutoservRunError):
- return None
-
-
- def get_battery_state(self):
- """Get the battery charging state.
-
- @return: A string representing the battery charging state. It can be
- 'Charging', 'Fully charged', or 'Discharging'.
- """
- try:
- info = self.get_power_supply_info()
- logging.info(info)
- return info['Battery']['state']
- except (KeyError, ValueError, error.AutoservRunError):
- return None
-
-
- def get_battery_display_percentage(self):
- """Get the battery display percentage.
-
- @return: The display percentage of battery level, value range from
- 0-100. Return None if the battery info cannot be retrieved.
- """
- try:
- info = self.get_power_supply_info()
- logging.info(info)
- return float(info['Battery']['display percentage'])
- except (KeyError, ValueError, error.AutoservRunError):
- return None
-
-
- def is_ac_connected(self):
- """Check if the dut has power adapter connected and charging.
-
- @return: True if power adapter is connected and charging.
- """
- try:
- info = self.get_power_supply_info()
- return info['Line Power']['online'] == 'yes'
- except (KeyError, error.AutoservRunError):
- return None
-
-
- def _cleanup_poweron(self):
- """Special cleanup method to make sure hosts always get power back."""
- info = self.host_info_store.get()
- if self._RPM_OUTLET_CHANGED not in info.attributes:
- return
- logging.debug('This host has recently interacted with the RPM'
- ' Infrastructure. Ensuring power is on.')
- try:
- self.power_on()
- self._remove_rpm_changed_tag()
- except Exception:
- # TODO b/195443964: Re-wire as needed once TLW is available.
- logging.error('Failed to turn Power On for this host after '
- 'cleanup through the RPM Infrastructure.')
-
- battery_percentage = self.get_battery_percentage()
- if (
- battery_percentage
- and battery_percentage < cros_constants.MIN_BATTERY_LEVEL):
- raise
- elif self.is_ac_connected():
- logging.info('The device has power adapter connected and '
- 'charging. No need to try to turn RPM on '
- 'again.')
- self._remove_rpm_changed_tag()
- logging.info('Battery level is now at %s%%. The device may '
- 'still have enough power to run test, so no '
- 'exception will be raised.', battery_percentage)
-
-
- def _remove_rpm_changed_tag(self):
- # TODO b/195443964: Re-wire as needed once TLW is available.
- pass
-
-
- def _add_rpm_changed_tag(self):
- # TODO b/195443964: Re-wire as needed once TLW is available.
- pass
-
-
- def _is_factory_image(self):
- """Checks if the image on the DUT is a factory image.
-
- @return: True if the image on the DUT is a factory image.
- False otherwise.
- """
- result = self.run('[ -f /root/.factory_test ]', ignore_status=True)
- return result.exit_status == 0
-
-
- def _restart_ui(self):
- """Restart the Chrome UI.
-
- @raises: FactoryImageCheckerException for factory images, since
- we cannot attempt to restart ui on them.
- error.AutoservRunError for any other type of error that
- occurs while restarting ui.
- """
- if self._is_factory_image():
- raise FactoryImageCheckerException('Cannot restart ui on factory '
- 'images')
-
- # TODO(jrbarnette): The command to stop/start the ui job
- # should live inside cros_ui, too. However that would seem
- # to imply interface changes to the existing start()/restart()
- # functions, which is a bridge too far (for now).
- prompt = cros_ui.get_chrome_session_ident(self)
- self.run('stop ui; start ui')
- cros_ui.wait_for_chrome_ready(prompt, self)
-
-
- def _start_powerd_if_needed(self):
- """Start powerd if it isn't already running."""
- self.run('start powerd', ignore_status=True)
-
- def _read_arc_prop_file(self, filename):
- for path in [
- '/usr/share/arcvm/properties/', '/usr/share/arc/properties/'
- ]:
- if self.path_exists(path + filename):
- return utils.parse_cmd_output('cat ' + path + filename,
- run_method=self.run)
- return None
-
- def _get_arc_build_info(self):
- """Returns a dictionary mapping build properties to their values."""
- build_info = None
- for filename in ['build.prop', 'vendor_build.prop']:
- properties = self._read_arc_prop_file(filename)
- if properties:
- if build_info:
- build_info.update(properties)
- else:
- build_info = properties
- else:
- logging.error('Failed to find %s in device.', filename)
- return build_info
-
- def get_arc_primary_abi(self):
- """Returns the primary abi of the host."""
- return self._get_arc_build_info().get('ro.product.cpu.abi')
-
- def get_arc_security_patch(self):
- """Returns the security patch of the host."""
- return self._get_arc_build_info().get('ro.build.version.security_patch')
-
- def get_arc_first_api_level(self):
- """Returns the security patch of the host."""
- return self._get_arc_build_info().get('ro.product.first_api_level')
-
- def _get_lsb_release_content(self):
- """Return the content of lsb-release file of host."""
- return self.run(
- 'cat "%s"' % client_constants.LSB_RELEASE).stdout.strip()
-
-
- def get_release_version(self):
- """Get the value of attribute CHROMEOS_RELEASE_VERSION from lsb-release.
-
- @returns The version string in lsb-release, under attribute
- CHROMEOS_RELEASE_VERSION.
- """
- return lsbrelease_utils.get_chromeos_release_version(
- lsb_release_content=self._get_lsb_release_content())
-
-
- def get_release_builder_path(self):
- """Get the value of CHROMEOS_RELEASE_BUILDER_PATH from lsb-release.
-
- @returns The version string in lsb-release, under attribute
- CHROMEOS_RELEASE_BUILDER_PATH.
- """
- return lsbrelease_utils.get_chromeos_release_builder_path(
- lsb_release_content=self._get_lsb_release_content())
-
-
- def get_chromeos_release_milestone(self):
- """Get the value of attribute CHROMEOS_RELEASE_BUILD_TYPE
- from lsb-release.
-
- @returns The version string in lsb-release, under attribute
- CHROMEOS_RELEASE_BUILD_TYPE.
- """
- return lsbrelease_utils.get_chromeos_release_milestone(
- lsb_release_content=self._get_lsb_release_content())
-
- def cleanup_services(self):
- """Reinitializes the device for cleanup.
-
- Subclasses may override this to customize the cleanup method.
-
- To indicate failure of the reset, the implementation may raise
- any of:
- error.AutoservRunError
- error.AutotestRunError
- FactoryImageCheckerException
-
- @raises error.AutoservRunError
- @raises error.AutotestRunError
- @raises error.FactoryImageCheckerException
- """
- self._restart_ui()
- self._start_powerd_if_needed()
-
-
- def cleanup(self):
- """Cleanup state on device."""
- self.run('rm -f %s' % client_constants.CLEANUP_LOGS_PAUSED_FILE)
- try:
- self.cleanup_services()
- except (error.AutotestRunError, error.AutoservRunError,
- FactoryImageCheckerException):
- logging.warning('Unable to restart ui.')
-
- # cleanup routines, i.e. reboot the machine.
- super(CrosHost, self).cleanup()
-
- # Check if the rpm outlet was manipulated.
- if self.has_power():
- self._cleanup_poweron()
-
-
- def reboot(self, **dargs):
- """
- This function reboots the site host. The more generic
- RemoteHost.reboot() performs sync and sleeps for 5
- seconds. This is not necessary for Chrome OS devices as the
- sync should be finished in a short time during the reboot
- command.
- """
- if 'reboot_cmd' not in dargs:
- reboot_timeout = dargs.get('reboot_timeout', 10)
- dargs['reboot_cmd'] = ('sleep 1; '
- 'reboot & sleep %d; '
- 'reboot -f' % reboot_timeout)
- # Enable fastsync to avoid running extra sync commands before reboot.
- if 'fastsync' not in dargs:
- dargs['fastsync'] = True
-
- dargs['board'] = self.host_info_store.get().board
- # Record who called us
- orig = sys._getframe(1).f_code
- metric_fields = {'board' : dargs['board'],
- 'dut_host_name' : self.hostname,
- 'success' : True}
- metric_debug_fields = {'board' : dargs['board'],
- 'caller' : "%s:%s" % (orig.co_filename,
- orig.co_name),
- 'success' : True,
- 'error' : ''}
-
- try:
- super(CrosHost, self).reboot(**dargs)
- except Exception as e:
- metric_fields['success'] = False
- metric_debug_fields['success'] = False
- metric_debug_fields['error'] = type(e).__name__
- raise
-
- def suspend(self, suspend_time=60, delay_seconds=0,
- suspend_cmd=None, allow_early_resume=False):
- """
- This function suspends the site host.
-
- @param suspend_time: How long to suspend as integer seconds.
- @param suspend_cmd: Suspend command to execute.
- @param allow_early_resume: If False and if device resumes before
- |suspend_time|, throw an error.
-
- @exception AutoservSuspendError Host resumed earlier than
- |suspend_time|.
- """
-
- if suspend_cmd is None:
- suspend_cmd = ' && '.join([
- 'echo 0 > /sys/class/rtc/rtc0/wakealarm',
- 'echo +%d > /sys/class/rtc/rtc0/wakealarm' % suspend_time,
- 'powerd_dbus_suspend --delay=%d' % delay_seconds])
- super(CrosHost, self).suspend(suspend_time, suspend_cmd,
- allow_early_resume);
-
-
- def upstart_status(self, service_name):
- """Check the status of an upstart init script.
-
- @param service_name: Service to look up.
-
- @returns True if the service is running, False otherwise.
- """
- return 'start/running' in self.run('status %s' % service_name,
- ignore_status=True).stdout
-
- def upstart_stop(self, service_name):
- """Stops an upstart job if it's running.
-
- @param service_name: Service to stop
-
- @returns True if service has been stopped or was already stopped
- False otherwise.
- """
- if not self.upstart_status(service_name):
- return True
-
- result = self.run('stop %s' % service_name, ignore_status=True)
- if result.exit_status != 0:
- return False
- return True
-
- def upstart_restart(self, service_name):
- """Restarts (or starts) an upstart job.
-
- @param service_name: Service to start/restart
-
- @returns True if service has been started/restarted, False otherwise.
- """
- cmd = 'start'
- if self.upstart_status(service_name):
- cmd = 'restart'
- cmd = cmd + ' %s' % service_name
- result = self.run(cmd)
- if result.exit_status != 0:
- return False
- return True
-
- def verify_software(self):
- """Verify working software on a Chrome OS system.
-
- Tests for the following conditions:
- 1. All conditions tested by the parent version of this
- function.
- 2. Sufficient space in /mnt/stateful_partition.
- 3. Sufficient space in /mnt/stateful_partition/encrypted.
- 4. update_engine answers a simple status request over DBus.
-
- """
- super(CrosHost, self).verify_software()
- default_kilo_inodes_required = CONFIG.get_config_value(
- 'SERVER', 'kilo_inodes_required', type=int, default=100)
- board = self.get_board().replace(ds_constants.BOARD_PREFIX, '')
- kilo_inodes_required = CONFIG.get_config_value(
- 'SERVER', 'kilo_inodes_required_%s' % board,
- type=int, default=default_kilo_inodes_required)
- self.check_inodes('/mnt/stateful_partition', kilo_inodes_required)
- self.check_diskspace(
- '/mnt/stateful_partition',
- CONFIG.get_config_value(
- 'SERVER', 'gb_diskspace_required', type=float,
- default=20.0))
- encrypted_stateful_path = '/mnt/stateful_partition/encrypted'
- # Not all targets build with encrypted stateful support.
- if self.path_exists(encrypted_stateful_path):
- self.check_diskspace(
- encrypted_stateful_path,
- CONFIG.get_config_value(
- 'SERVER', 'gb_encrypted_diskspace_required', type=float,
- default=0.1))
-
- self.wait_for_system_services()
-
- # Factory images don't run update engine,
- # goofy controls dbus on these DUTs.
- if not self._is_factory_image():
- self.run('update_engine_client --status')
-
-
- @retry.retry(error.AutoservError, timeout_min=5, delay_sec=10)
- def wait_for_service(self, service_name):
- """Wait for target status of an upstart init script.
-
- @param service_name: Service to wait for.
- """
- if not self.upstart_status(service_name):
- raise error.AutoservError('Service %s not running.' % service_name)
-
- def wait_for_system_services(self):
- """Waits for system-services to be running.
-
- Sometimes, update_engine will take a while to update firmware, so we
- should give this some time to finish. See crbug.com/765686#c38 for
- details.
- """
- self.wait_for_service('system-services')
-
-
- def verify(self):
- """Verify Chrome OS system is in good state."""
- message = 'Beginning verify for host %s board %s model %s'
- info = self.host_info_store.get()
- message %= (self.hostname, info.board, info.model)
- self.record('INFO', None, None, message)
- # TODO b/195447992: Wire this into lab services.
- tauto_warnings.lab_services_warn_and_error("Repair")
-
-
- def make_ssh_command(self, user='root', port=22, opts='', hosts_file=None,
- connect_timeout=None, alive_interval=None,
- alive_count_max=None, connection_attempts=None):
- """Override default make_ssh_command to use options tuned for Chrome OS.
-
- Tuning changes:
- - ConnectTimeout=30; maximum of 30 seconds allowed for an SSH
- connection failure. Consistency with remote_access.sh.
-
- - ServerAliveInterval=900; which causes SSH to ping connection every
- 900 seconds. In conjunction with ServerAliveCountMax ensures
- that if the connection dies, Autotest will bail out.
- Originally tried 60 secs, but saw frequent job ABORTS where
- the test completed successfully. Later increased from 180 seconds to
- 900 seconds to account for tests where the DUT is suspended for
- longer periods of time.
-
- - ServerAliveCountMax=3; consistency with remote_access.sh.
-
- - ConnectAttempts=4; reduce flakiness in connection errors;
- consistency with remote_access.sh.
-
- - UserKnownHostsFile=/dev/null; we don't care about the keys.
- Host keys change with every new installation, don't waste
- memory/space saving them.
-
- - SSH protocol forced to 2; needed for ServerAliveInterval.
-
- @param user User name to use for the ssh connection.
- @param port Port on the target host to use for ssh connection.
- @param opts Additional options to the ssh command.
- @param hosts_file Ignored.
- @param connect_timeout Ignored.
- @param alive_interval Ignored.
- @param alive_count_max Ignored.
- @param connection_attempts Ignored.
- """
- options = ' '.join([opts, '-o Protocol=2'])
- return super(CrosHost, self).make_ssh_command(
- user=user, port=port, opts=options, hosts_file='/dev/null',
- connect_timeout=30, alive_interval=900, alive_count_max=3,
- connection_attempts=4)
-
-
- def syslog(self, message, tag='autotest'):
- """Logs a message to syslog on host.
-
- @param message String message to log into syslog
- @param tag String tag prefix for syslog
-
- """
- self.run('logger -t "%s" "%s"' % (tag, message))
-
-
- def _ping_check_status(self, status):
- """Ping the host once, and return whether it has a given status.
-
- @param status Check the ping status against this value.
- @return True iff `status` and the result of ping are the same
- (i.e. both True or both False).
-
- """
- ping_val = utils.ping(self.hostname,
- tries=1,
- deadline=1,
- timeout=2,
- ignore_timeout=True)
- return not (status ^ (ping_val == 0))
-
- def _ping_wait_for_status(self, status, timeout):
- """Wait for the host to have a given status (UP or DOWN).
-
- Status is checked by polling. Polling will not last longer
- than the number of seconds in `timeout`. The polling
- interval will be long enough that only approximately
- _PING_WAIT_COUNT polling cycles will be executed, subject
- to a maximum interval of about one minute.
-
- @param status Waiting will stop immediately if `ping` of the
- host returns this status.
- @param timeout Poll for at most this many seconds.
- @return True iff the host status from `ping` matched the
- requested status at the time of return.
-
- """
- # _ping_check_status() takes about 1 second, hence the
- # "- 1" in the formula below.
- # FIXME: if the ping command errors then _ping_check_status()
- # returns instantly. If timeout is also smaller than twice
- # _PING_WAIT_COUNT then the while loop below forks many
- # thousands of ping commands (see /tmp/test_that_results_XXXXX/
- # /results-1-logging_YYY.ZZZ/debug/autoserv.DEBUG) and hogs one
- # CPU core for 60 seconds.
- poll_interval = min(int(timeout / self._PING_WAIT_COUNT), 60) - 1
- end_time = time.time() + timeout
- while time.time() <= end_time:
- if self._ping_check_status(status):
- return True
- if poll_interval > 0:
- time.sleep(poll_interval)
-
- # The last thing we did was sleep(poll_interval), so it may
- # have been too long since the last `ping`. Check one more
- # time, just to be sure.
- return self._ping_check_status(status)
-
- def ping_wait_up(self, timeout):
- """Wait for the host to respond to `ping`.
-
- N.B. This method is not a reliable substitute for
- `wait_up()`, because a host that responds to ping will not
- necessarily respond to ssh. This method should only be used
- if the target DUT can be considered functional even if it
- can't be reached via ssh.
-
- @param timeout Minimum time to allow before declaring the
- host to be non-responsive.
- @return True iff the host answered to ping before the timeout.
-
- """
- if self.use_icmp:
- return self._ping_wait_for_status(self._PING_STATUS_UP, timeout)
- else:
- logging.debug('Using SSH instead of ICMP for ping_wait_up.')
- return self.wait_up(timeout)
-
- def ping_wait_down(self, timeout):
- """Wait until the host no longer responds to `ping`.
-
- This function can be used as a slightly faster version of
- `wait_down()`, by avoiding potentially long ssh timeouts.
-
- @param timeout Minimum time to allow for the host to become
- non-responsive.
- @return True iff the host quit answering ping before the
- timeout.
-
- """
- if self.use_icmp:
- return self._ping_wait_for_status(self._PING_STATUS_DOWN, timeout)
- else:
- logging.debug('Using SSH instead of ICMP for ping_wait_down.')
- return self.wait_down(timeout)
-
- def _is_host_port_forwarded(self):
- """Checks if the dut is connected over port forwarding.
-
- N.B. This method does not detect all situations where port forwarding is
- occurring. Namely, running autotest on the dut may result in a
- false-positive, and port forwarding using a different machine on the
- same network will be a false-negative.
-
- @return True if the dut is connected over port forwarding
- False otherwise
- """
- is_localhost = self.hostname in ['localhost', '127.0.0.1']
- is_forwarded = is_localhost and not self.is_default_port
- if is_forwarded:
- logging.info('Detected DUT connected by port forwarding')
- return is_forwarded
-
- def test_wait_for_boot(self, old_boot_id=None):
- """Wait for the client to boot from cold power.
-
- The `old_boot_id` parameter should be the value from
- `get_boot_id()` obtained prior to shutting down. A
- `TestFail` exception is raised if the boot id does not
- change. The boot id test is omitted if `old_boot_id` is not
- specified.
-
- See @ref test_wait_for_shutdown for more on this function's
- usage.
-
- @param old_boot_id A boot id value obtained before the
- shut down.
-
- @exception TestFail The host did not respond within the
- allowed time.
- @exception TestFail The host responded, but the boot id test
- indicated that there was no reboot.
- """
- if not self.wait_up(timeout=self.REBOOT_TIMEOUT):
- raise error.TestFail(
- 'client failed to reboot after %d seconds' %
- self.REBOOT_TIMEOUT)
- elif old_boot_id:
- if self.get_boot_id() == old_boot_id:
- logging.error('client not rebooted (boot %s)',
- old_boot_id)
- raise error.TestFail(
- 'client is back up, but did not reboot')
-
-
- @staticmethod
- def check_for_rpm_support(hostname):
- """For a given hostname, return whether or not it is powered by an RPM.
-
- @param hostname: hostname to check for rpm support.
-
- @return None if this host does not follows the defined naming format
- for RPM powered DUT's in the lab. If it does follow the format,
- it returns a regular expression MatchObject instead.
- """
- return re.match(CrosHost._RPM_HOSTNAME_REGEX, hostname)
-
-
- def has_power(self):
- """For this host, return whether or not it is powered by an RPM.
-
- @return True if this host is in the CROS lab and follows the defined
- naming format.
- """
- return CrosHost.check_for_rpm_support(self.hostname)
-
-
- def _set_power(self, state, power_method):
- """Sets the power to the host via RPM, CCD, Servo or manual.
-
- @param state Specifies which power state to set to DUT
- @param power_method Specifies which method of power control to
- use. By default "RPM" or "CCD" will be used based
- on servo type. Valid values from
- POWER_CONTROL_VALID_ARGS, or None to use default.
-
- """
- ACCEPTABLE_STATES = ['ON', 'OFF']
-
- if not power_method:
- power_method = self.get_default_power_method()
-
- state = state.upper()
- if state not in ACCEPTABLE_STATES:
- raise error.TestError('State must be one of: %s.'
- % (ACCEPTABLE_STATES,))
-
- if power_method == self.POWER_CONTROL_SERVO:
- logging.info('Setting servo port J10 to %s', state)
- self.servo.set('prtctl3_pwren', state.lower())
- time.sleep(self._USB_POWER_TIMEOUT)
- elif power_method == self.POWER_CONTROL_MANUAL:
- logging.info('You have %d seconds to set the AC power to %s.',
- self._POWER_CYCLE_TIMEOUT, state)
- time.sleep(self._POWER_CYCLE_TIMEOUT)
- elif power_method == self.POWER_CONTROL_CCD:
- servo_role = 'src' if state == 'ON' else 'snk'
- logging.info('servo ccd power pass through detected,'
- ' changing servo_role to %s.', servo_role)
- self.servo.set_servo_v4_role(servo_role)
- if not self.ping_wait_up(timeout=self._CHANGE_SERVO_ROLE_TIMEOUT):
- # Make sure we don't leave DUT with no power(servo_role=snk)
- # when DUT is not pingable, as we raise a exception here
- # that may break a power cycle in the middle.
- self.servo.set_servo_v4_role('src')
- raise error.AutoservError(
- 'DUT failed to regain network connection after %d seconds.'
- % self._CHANGE_SERVO_ROLE_TIMEOUT)
- else:
- if not self.has_power():
- raise error.TestFail('DUT does not have RPM connected.')
- self._add_rpm_changed_tag()
- # TODO b/195443964: Re-wire as needed once TLW is available.
-
-
- def power_off(self, power_method=None):
- """Turn off power to this host via RPM, CCD, Servo or manual.
-
- @param power_method Specifies which method of power control to
- use. By default "RPM" or "CCD" will be used based
- on servo type. Valid values from
- POWER_CONTROL_VALID_ARGS, or None to use default.
-
- """
- self._sync_if_up()
- self._set_power('OFF', power_method)
-
- def _check_supported(self):
- """Throw an error if dts mode control is not supported."""
- if not self.servo_pwr_supported:
- raise error.TestFail('power_state controls not supported')
-
- def _sync_if_up(self):
- """Run sync on the DUT and wait for completion if the DUT is up.
-
- Additionally, try to sync and ignore status if its not up.
-
- Useful prior to reboots to ensure files are written to disc.
-
- """
- if self.is_up_fast():
- self.run("sync")
- return
- # If it is not up, attempt to sync in the rare event the DUT is up but
- # doesn't respond to a ping. Ignore any errors.
- try:
- self.run("sync", ignore_status=True, timeout=1)
- except Exception:
- pass
-
- def power_off_via_servo(self):
- """Force the DUT to power off.
-
- The DUT is guaranteed to be off at the end of this call,
- regardless of its previous state, provided that there is
- working EC and boot firmware. There is no requirement for
- working OS software.
-
- """
- self._check_supported()
- self._sync_if_up()
- self.servo.set_nocheck('power_state', 'off')
-
- def power_on_via_servo(self, rec_mode='on'):
- """Force the DUT to power on.
-
- Prior to calling this function, the DUT must be powered off,
- e.g. with a call to `power_off()`.
-
- At power on, recovery mode is set as specified by the
- corresponding argument. When booting with recovery mode on, it
- is the caller's responsibility to unplug/plug in a bootable
- external storage device.
-
- If the DUT requires a delay after powering on but before
- processing inputs such as USB stick insertion, the delay is
- handled by this method; the caller is not responsible for such
- delays.
-
- @param rec_mode Setting of recovery mode to be applied at
- power on. default: REC_OFF aka 'off'
-
- """
- self._check_supported()
- self.servo.set_nocheck('power_state', rec_mode)
-
- def reset_via_servo(self):
- """Force the DUT to reset.
-
- The DUT is guaranteed to be on at the end of this call,
- regardless of its previous state, provided that there is
- working OS software. This also guarantees that the EC has
- been restarted.
-
- """
- self._check_supported()
- self._sync_if_up()
- self.servo.set_nocheck('power_state', 'reset')
-
-
- def power_on(self, power_method=None):
- """Turn on power to this host via RPM, CCD, Servo or manual.
-
- @param power_method Specifies which method of power control to
- use. By default "RPM" or "CCD" will be used based
- on servo type. Valid values from
- POWER_CONTROL_VALID_ARGS, or None to use default.
-
- """
- self._set_power('ON', power_method)
-
-
- def power_cycle(self, power_method=None):
- """Cycle power to this host by turning it OFF, then ON.
-
- @param power_method Specifies which method of power control to
- use. By default "RPM" or "CCD" will be used based
- on servo type. Valid values from
- POWER_CONTROL_VALID_ARGS, or None to use default.
-
- """
- if not power_method:
- power_method = self.get_default_power_method()
-
- if power_method in (self.POWER_CONTROL_SERVO,
- self.POWER_CONTROL_MANUAL,
- self.POWER_CONTROL_CCD):
- self.power_off(power_method=power_method)
- time.sleep(self._POWER_CYCLE_TIMEOUT)
- self.power_on(power_method=power_method)
- else:
- self._add_rpm_changed_tag()
- # TODO b/195443964: Re-wire as needed once TLW is available.
-
-
- def get_platform_from_fwid(self):
- """Determine the platform from the crossystem fwid.
-
- @returns a string representing this host's platform.
- """
- # Look at the firmware for non-unibuild cases or if cros_config fails.
- crossystem = utils.Crossystem(self)
- crossystem.init()
- # Extract fwid value and use the leading part as the platform id.
- # fwid generally follow the format of {platform}.{firmware version}
- # Example: Alex.X.YYY.Z or Google_Alex.X.YYY.Z
- platform = crossystem.fwid().split('.')[0].lower()
- # Newer platforms start with 'Google_' while the older ones do not.
- return platform.replace('google_', '')
-
-
- def get_platform(self):
- """Determine the correct platform label for this host.
-
- @returns a string representing this host's platform.
- """
- release_info = utils.parse_cmd_output('cat /etc/lsb-release',
- run_method=self.run)
- platform = ''
- if release_info.get('CHROMEOS_RELEASE_UNIBUILD') == '1':
- platform = self.get_model_from_cros_config()
- return platform if platform else self.get_platform_from_fwid()
-
-
- def get_model_from_cros_config(self):
- """Get the host model from cros_config command.
-
- @returns a string representing this host's model.
- """
- return cros_config.call_cros_config_get_output('/ name',
- self.run, ignore_status=True)
-
-
- def get_architecture(self):
- """Determine the correct architecture label for this host.
-
- @returns a string representing this host's architecture.
- """
- crossystem = utils.Crossystem(self)
- crossystem.init()
- return crossystem.arch()
-
-
- def get_chrome_version(self):
- """Gets the Chrome version number and milestone as strings.
-
- Invokes "chrome --version" to get the version number and milestone.
-
- @return A tuple (chrome_ver, milestone) where "chrome_ver" is the
- current Chrome version number as a string (in the form "W.X.Y.Z")
- and "milestone" is the first component of the version number
- (the "W" from "W.X.Y.Z"). If the version number cannot be parsed
- in the "W.X.Y.Z" format, the "chrome_ver" will be the full output
- of "chrome --version" and the milestone will be the empty string.
-
- """
- version_string = self.run(client_constants.CHROME_VERSION_COMMAND).stdout
- return utils.parse_chrome_version(version_string)
-
-
- def get_ec_version(self):
- """Get the ec version as strings.
-
- @returns a string representing this host's ec version.
- """
- command = 'mosys ec info -s fw_version'
- result = self.run(command, ignore_status=True)
- if result.exit_status != 0:
- return ''
- return result.stdout.strip()
-
-
- def get_firmware_version(self):
- """Get the firmware version as strings.
-
- @returns a string representing this host's firmware version.
- """
- crossystem = utils.Crossystem(self)
- crossystem.init()
- return crossystem.fwid()
-
-
- def get_hardware_id(self):
- """Get hardware id as strings.
-
- @returns a string representing this host's hardware id.
- """
- crossystem = utils.Crossystem(self)
- crossystem.init()
- return crossystem.hwid()
-
- def get_hardware_revision(self):
- """Get the hardware revision as strings.
-
- @returns a string representing this host's hardware revision.
- """
- command = 'mosys platform version'
- result = self.run(command, ignore_status=True)
- if result.exit_status != 0:
- return ''
- return result.stdout.strip()
-
-
- def get_kernel_version(self):
- """Get the kernel version as strings.
-
- @returns a string representing this host's kernel version.
- """
- return self.run('uname -r').stdout.strip()
-
-
- def get_cpu_name(self):
- """Get the cpu name as strings.
-
- @returns a string representing this host's cpu name.
- """
-
- # Try get cpu name from device tree first
- if self.path_exists('/proc/device-tree/compatible'):
- command = ' | '.join(
- ["sed -e 's/\\x0/\\n/g' /proc/device-tree/compatible",
- 'tail -1'])
- return self.run(command).stdout.strip().replace(',', ' ')
-
- # Get cpu name from uname -p
- command = 'uname -p'
- ret = self.run(command).stdout.strip()
-
- # 'uname -p' return variant of unknown or amd64 or x86_64 or i686
- # Try get cpu name from /proc/cpuinfo instead
- if re.match("unknown|amd64|[ix][0-9]?86(_64)?", ret, re.IGNORECASE):
- command = "grep model.name /proc/cpuinfo | cut -f 2 -d: | head -1"
- self = self.run(command).stdout.strip()
-
- # Remove bloat from CPU name, for example
- # Intel(R) Core(TM) i5-7Y57 CPU @ 1.20GHz -> Intel Core i5-7Y57
- # Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.60GHz -> Intel Xeon E5-2690 v4
- # AMD A10-7850K APU with Radeon(TM) R7 Graphics -> AMD A10-7850K
- # AMD GX-212JC SOC with Radeon(TM) R2E Graphics -> AMD GX-212JC
- trim_re = r' (@|processor|apu|soc|radeon).*|\(.*?\)| cpu'
- return re.sub(trim_re, '', ret, flags=re.IGNORECASE)
-
-
- def get_screen_resolution(self):
- """Get the screen(s) resolution as strings.
- In case of more than 1 monitor, return resolution for each monitor
- separate with plus sign.
-
- @returns a string representing this host's screen(s) resolution.
- """
- command = 'for f in /sys/class/drm/*/*/modes; do head -1 $f; done'
- ret = self.run(command, ignore_status=True)
- # We might have Chromebox without a screen
- if ret.exit_status != 0:
- return ''
- return ret.stdout.strip().replace('\n', '+')
-
-
- def get_mem_total_gb(self):
- """Get total memory available in the system in GiB (2^20).
-
- @returns an integer representing total memory
- """
- mem_total_kb = self.read_from_meminfo('MemTotal')
- kb_in_gb = float(2 ** 20)
- return int(round(mem_total_kb / kb_in_gb))
-
-
- def get_disk_size_gb(self):
- """Get size of disk in GB (10^9)
-
- @returns an integer representing size of disk, 0 in Error Case
- """
- command = 'grep $(rootdev -s -d | cut -f3 -d/)$ /proc/partitions'
- result = self.run(command, ignore_status=True)
- if result.exit_status != 0:
- return 0
- _, _, block, _ = re.split(r' +', result.stdout.strip())
- byte_per_block = 1024.0
- disk_kb_in_gb = 1e9
- return int(int(block) * byte_per_block / disk_kb_in_gb + 0.5)
-
-
- def get_battery_size(self):
- """Get size of battery in Watt-hour via sysfs
-
- This method assumes that battery support voltage_min_design and
- charge_full_design sysfs.
-
- @returns a float representing Battery size, 0 if error.
- """
- # sysfs report data in micro scale
- battery_scale = 1e6
-
- command = 'cat /sys/class/power_supply/*/voltage_min_design'
- result = self.run(command, ignore_status=True)
- if result.exit_status != 0:
- return 0
- voltage = float(result.stdout.strip()) / battery_scale
-
- command = 'cat /sys/class/power_supply/*/charge_full_design'
- result = self.run(command, ignore_status=True)
- if result.exit_status != 0:
- return 0
- amphereHour = float(result.stdout.strip()) / battery_scale
-
- return voltage * amphereHour
-
-
- def get_low_battery_shutdown_percent(self):
- """Get the percent-based low-battery shutdown threshold.
-
- @returns a float representing low-battery shutdown percent, 0 if error.
- """
- ret = 0.0
- try:
- command = 'check_powerd_config --low_battery_shutdown_percent'
- ret = float(self.run(command).stdout)
- except error.CmdError:
- logging.debug("Can't run %s", command)
- except ValueError:
- logging.debug("Didn't get number from %s", command)
-
- return ret
-
-
- def has_hammer(self):
- """Check whether DUT has hammer device or not.
-
- @returns boolean whether device has hammer or not
- """
- command = 'grep Hammer /sys/bus/usb/devices/*/product'
- return self.run(command, ignore_status=True).exit_status == 0
-
-
- def is_chrome_switch_present(self, switch):
- """Returns True if the specified switch was provided to Chrome.
-
- @param switch The chrome switch to search for.
- """
-
- command = 'pgrep -x -f -c "/opt/google/chrome/chrome.*%s.*"' % switch
- return self.run(command, ignore_status=True).exit_status == 0
-
-
- def oobe_triggers_update(self):
- """Returns True if this host has an OOBE flow during which
- it will perform an update check and perhaps an update.
- One example of such a flow is Hands-Off Zero-Touch Enrollment.
- As more such flows are developed, code handling them needs
- to be added here.
-
- @return Boolean indicating whether this host's OOBE triggers an update.
- """
- return self.is_chrome_switch_present(
- '--enterprise-enable-zero-touch-enrollment=hands-off')
-
-
- # TODO(kevcheng): change this to just return the board without the
- # 'board:' prefix and fix up all the callers. Also look into removing the
- # need for this method.
- def get_board(self):
- """Determine the correct board label for this host.
-
- @returns a string representing this host's board.
- """
- release_info = utils.parse_cmd_output('cat /etc/lsb-release',
- run_method=self.run)
- return (ds_constants.BOARD_PREFIX +
- release_info['CHROMEOS_RELEASE_BOARD'])
-
- def get_channel(self):
- """Determine the correct channel label for this host.
-
- @returns: a string represeting this host's build channel.
- (stable, dev, beta). None on fail.
- """
- return lsbrelease_utils.get_chromeos_channel(
- lsb_release_content=self._get_lsb_release_content())
-
- def get_power_supply(self):
- """
- Determine what type of power supply the host has
-
- @returns a string representing this host's power supply.
- 'power:battery' when the device has a battery intended for
- extended use
- 'power:AC_primary' when the device has a battery not intended
- for extended use (for moving the machine, etc)
- 'power:AC_only' when the device has no battery at all.
- """
- psu = self.run(command='cros_config /hardware-properties psu-type',
- ignore_status=True)
- if psu.exit_status:
- # Assume battery if unspecified in cros_config.
- return 'power:battery'
-
- psu_str = psu.stdout.strip()
- if psu_str == 'unknown':
- return None
-
- return 'power:%s' % psu_str
-
-
- def has_battery(self):
- """Determine if DUT has a battery.
-
- Returns:
- Boolean, False if known not to have battery, True otherwise.
- """
- return self.get_power_supply() == 'power:battery'
-
-
- def get_servo(self):
- """Determine if the host has a servo attached.
-
- If the host has a working servo attached, it should have a servo label.
-
- @return: string 'servo' if the host has servo attached. Otherwise,
- returns None.
- """
- return 'servo' if self._servo_host else None
-
-
- def has_internal_display(self):
- """Determine if the device under test is equipped with an internal
- display.
-
- @return: 'internal_display' if one is present; None otherwise.
- """
- from autotest_lib.client.cros.graphics import graphics_utils
- from autotest_lib.client.common_lib import utils as common_utils
-
- def __system_output(cmd):
- return self.run(cmd).stdout
-
- def __read_file(remote_path):
- return self.run('cat %s' % remote_path).stdout
-
- # Hijack the necessary client functions so that we can take advantage
- # of the client lib here.
- # FIXME: find a less hacky way than this
- original_system_output = utils.system_output
- original_read_file = common_utils.read_file
- utils.system_output = __system_output
- common_utils.read_file = __read_file
- try:
- return ('internal_display' if graphics_utils.has_internal_display()
- else None)
- finally:
- utils.system_output = original_system_output
- common_utils.read_file = original_read_file
-
-
- def is_boot_from_usb(self):
- """Check if DUT is boot from USB.
-
- @return: True if DUT is boot from usb.
- """
- device = self.run('rootdev -s -d').stdout.strip()
- removable = int(self.run('cat /sys/block/%s/removable' %
- os.path.basename(device)).stdout.strip())
- return removable == 1
-
- def is_boot_from_external_device(self):
- """Check if DUT is boot from external storage.
-
- @return: True if DUT is boot from external storage.
- """
- boot_device = self.run('rootdev -s -d', ignore_status=True,
- timeout=60).stdout.strip()
- if not boot_device:
- logging.debug('Boot storage not detected on the host.')
- return False
- main_storage_cmd = ('. /usr/sbin/write_gpt.sh;'
- ' . /usr/share/misc/chromeos-common.sh;'
- ' load_base_vars; get_fixed_dst_drive')
- main_storage = self.run(main_storage_cmd,
- ignore_status=True,
- timeout=60).stdout.strip()
- if not main_storage or boot_device != main_storage:
- logging.debug('Device booted from external storage storage.')
- return True
- logging.debug('Device booted from main storage.')
- return False
-
- def read_from_meminfo(self, key):
- """Return the memory info from /proc/meminfo
-
- @param key: meminfo requested
-
- @return the memory value as a string
-
- """
- meminfo = self.run('grep %s /proc/meminfo' % key).stdout.strip()
- logging.debug('%s', meminfo)
- return int(re.search(r'\d+', meminfo).group(0))
-
-
- def get_cpu_arch(self):
- """Returns CPU arch of the device.
-
- @return CPU architecture of the DUT.
- """
- # Add CPUs by following logic in client/bin/utils.py.
- if self.run("grep '^flags.*:.* lm .*' /proc/cpuinfo",
- ignore_status=True).stdout:
- return 'x86_64'
- if self.run("grep -Ei 'ARM|CPU implementer' /proc/cpuinfo",
- ignore_status=True).stdout:
- return 'arm'
- return 'i386'
-
-
- def get_board_type(self):
- """
- Get the DUT's device type / form factor from cros_config. It can be one
- of CHROMEBOX, CHROMEBASE, CHROMEBOOK, or CHROMEBIT.
-
- @return form factor value from cros_config.
- """
-
- device_type = self.run('cros_config /hardware-properties form-factor',
- ignore_status=True).stdout
- if device_type:
- return device_type
-
- # TODO: remove lsb-release fallback once cros_config works everywhere
- device_type = self.run('grep DEVICETYPE /etc/lsb-release',
- ignore_status=True).stdout
- if device_type:
- return device_type.split('=')[-1].strip()
- return ''
-
-
- def get_arc_version(self):
- """Return ARC version installed on the DUT.
-
- @returns ARC version as string if the CrOS build has ARC, else None.
- """
- arc_version = self.run('grep CHROMEOS_ARC_VERSION /etc/lsb-release',
- ignore_status=True).stdout
- if arc_version:
- return arc_version.split('=')[-1].strip()
- return None
-
-
- def get_os_type(self):
- return 'cros'
-
-
- def get_labels(self):
- """Return the detected labels on the host."""
- return self.labels.get_labels(self)
-
-
- def get_default_power_method(self):
- """
- Get the default power method for power_on/off/cycle() methods.
- @return POWER_CONTROL_RPM or POWER_CONTROL_CCD
- """
- if not self._default_power_method:
- self._default_power_method = self.POWER_CONTROL_RPM
- if self.servo and self.servo.supports_built_in_pd_control():
- self._default_power_method = self.POWER_CONTROL_CCD
- else:
- logging.debug('Either servo is unitialized or the servo '
- 'setup does not support pd controls. Falling '
- 'back to default RPM method.')
- return self._default_power_method
-
-
- def find_usb_devices(self, idVendor, idProduct):
- """
- Get usb device sysfs name for specific device.
-
- @param idVendor Vendor ID to search in sysfs directory.
- @param idProduct Product ID to search in sysfs directory.
-
- @return Usb node names in /sys/bus/usb/drivers/usb/ that match.
- """
- # Look for matching file and cut at position 7 to get dir name.
- grep_cmd = 'grep {} /sys/bus/usb/drivers/usb/*/{} | cut -f 7 -d /'
-
- vendor_cmd = grep_cmd.format(idVendor, 'idVendor')
- product_cmd = grep_cmd.format(idProduct, 'idProduct')
-
- # Use uniq -d to print duplicate line from both command
- cmd = 'sort <({}) <({}) | uniq -d'.format(vendor_cmd, product_cmd)
-
- return self.run(cmd, ignore_status=True).stdout.strip().split('\n')
-
-
- def bind_usb_device(self, usb_node):
- """
- Bind usb device
-
- @param usb_node Node name in /sys/bus/usb/drivers/usb/
- """
- cmd = 'echo {} > /sys/bus/usb/drivers/usb/bind'.format(usb_node)
- self.run(cmd, ignore_status=True)
-
-
- def unbind_usb_device(self, usb_node):
- """
- Unbind usb device
-
- @param usb_node Node name in /sys/bus/usb/drivers/usb/
- """
- cmd = 'echo {} > /sys/bus/usb/drivers/usb/unbind'.format(usb_node)
- self.run(cmd, ignore_status=True)
-
-
- def get_wlan_ip(self):
- """
- Get ip address of wlan interface.
-
- @return ip address of wlan or empty string if wlan is not connected.
- """
- cmds = [
- 'iw dev', # List wlan physical device
- 'grep Interface', # Grep only interface name
- 'cut -f 2 -d" "', # Cut the name part
- 'xargs ifconfig', # Feed it to ifconfig to get ip
- 'grep -oE "inet [0-9.]+"', # Grep only ipv4
- 'cut -f 2 -d " "' # Cut the ip part
- ]
- return self.run(' | '.join(cmds), ignore_status=True).stdout.strip()
-
- def connect_to_wifi(self, ssid, passphrase=None, security=None):
- """
- Connect to wifi network
-
- @param ssid SSID of the wifi network.
- @param passphrase Passphrase of the wifi network. None if not existed.
- @param security Security of the wifi network. Default to "psk" if
- passphase is given without security. Possible values
- are "none", "psk", "802_1x".
-
- @return True if succeed, False if not.
- """
- cmd = '/usr/local/autotest/cros/scripts/wifi connect ' + ssid
- if passphrase:
- cmd += ' ' + passphrase
- if security:
- cmd += ' ' + security
- return self.run(cmd, ignore_status=True).exit_status == 0
-
- def get_device_repair_state(self):
- """Get device repair state"""
- return self._device_repair_state
-
- def is_file_system_writable(self, testdirs=None):
- """Check is the file systems are writable.
-
- The standard linux response to certain unexpected file system errors
- (including hardware errors in block devices) is to change the file
- system status to read-only. This checks that that hasn't happened.
-
- @param testdirs: List of directories to check. If no data provided
- then '/mnt/stateful_partition' and '/var/tmp'
- directories will be checked.
-
- @returns boolean whether file-system writable.
- """
- def _check_dir(testdir):
- # check if we can create a file
- filename = os.path.join(testdir, 'writable_my_test_file')
- command = 'touch %s && rm %s' % (filename, filename)
- rv = self.run(command=command,
- timeout=30,
- ignore_status=True)
- is_writable = rv.exit_status == 0
- if not is_writable:
- logging.info('Cannot create a file in "%s"!'
- ' Probably the FS is read-only', testdir)
- logging.info("FileSystem is not writable!")
- return False
- return True
-
- if not testdirs or len(testdirs) == 0:
- # N.B. Order matters here: Encrypted stateful is loop-mounted
- # from a file in unencrypted stateful, so we don't test for
- # errors in encrypted stateful if unencrypted fails.
- testdirs = ['/mnt/stateful_partition', '/var/tmp']
-
- for dir in testdirs:
- # loop will be stopped if any directory fill fail the check
- try:
- if not _check_dir(dir):
- return False
- except Exception as e:
- # here expected only timeout error, all other will
- # be catch by 'ignore_status=True'
- logging.debug('Fail to check %s to write in it', dir)
- return False
- return True
-
- def blocking_sync(self, freeze_for_reset=False):
- """Sync root device and internal device, via script.
-
- The actual calls end up logged by the run() call, since they're printed
- to stdout/stderr in the script.
-
- @param freeze_for_reset: if True, prepare for reset by blocking writes
- (only if enable_fs_sync_fsfreeze=True)
- """
-
- if freeze_for_reset and self.USE_FSFREEZE:
- logging.info('Blocking sync and freeze')
- elif freeze_for_reset:
- logging.info('Blocking sync for reset')
- else:
- logging.info('Blocking sync')
-
- # client/bin is installed on the DUT as /usr/local/autotest/bin
- sync_cmd = '/usr/local/autotest/bin/fs_sync.py'
- if freeze_for_reset and self.USE_FSFREEZE:
- sync_cmd += ' --freeze'
- return self.run(sync_cmd)
-
- def set_health_profile_dut_state(self, state):
- if not self.health_profile:
- logging.debug('Device health profile is not initialized, skip'
- ' set dut state.')
- return
- reset_counters = state in profile_constants.STATES_NEED_RESET_COUNTER
- self.health_profile.update_dut_state(state, reset_counters)
-
- def _set_servo_topology(self):
- """Set servo-topology info to the host-info."""
- logging.debug('Try to save servo topology to host-info.')
- if not self._servo_host:
- logging.debug('Servo host is not initialized.')
- return
- if not self.is_servo_in_working_state():
- logging.debug('Is servo is not in working state then'
- ' update topology is not allowed.')
- return
- if not self._servo_host.is_servo_topology_supported():
- logging.debug('Servo-topology is not supported.')
- return
- servo_topology = self._servo_host.get_topology()
- if not servo_topology or servo_topology.is_empty():
- logging.debug('Servo topology is empty')
- return
- servo_topology.save(self.host_info_store)
diff --git a/server/hosts/cros_host_unittest.py b/server/hosts/cros_host_unittest.py
deleted file mode 100755
index 4d0689a..0000000
--- a/server/hosts/cros_host_unittest.py
+++ /dev/null
@@ -1,209 +0,0 @@
-#!/usr/bin/python3
-# pylint: disable=missing-docstring
-
-import mock
-import unittest
-
-import common
-
-from autotest_lib.client.common_lib import error
-from autotest_lib.server.cros.servo import servo
-from autotest_lib.server.hosts import cros_host
-from autotest_lib.server.hosts import servo_constants
-from autotest_lib.server.hosts import host_info
-
-CROSSYSTEM_RESULT = '''
-fwb_tries = 0 # Fake comment
-fw_vboot2 = 1 # Fake comment
-fwid = Google_Reef.9933.0.0 # Fake comment
-fwupdate_tries = 0 #
-fw_tried = B #
-fw_try_count = 0 #
-'''
-
-NON_UNI_LSB_RELEASE_OUTPUT = '''
-CHROMEOS_RELEASE_BOARD=reef
-'''
-
-UNI_LSB_RELEASE_OUTPUT = '''
-CHROMEOS_RELEASE_BOARD=coral
-CHROMEOS_RELEASE_UNIBUILD=1
-'''
-
-SERVO_STATE_PREFIX = servo_constants.SERVO_STATE_LABEL_PREFIX
-
-
-class MockCmd(object):
- """Simple mock command with base command and results"""
-
- def __init__(self, cmd, exit_status, stdout):
- self.cmd = cmd
- self.stdout = stdout
- self.exit_status = exit_status
-
-
-class MockHost(cros_host.CrosHost):
- """Simple host for running mock'd host commands"""
-
- def __init__(self, *args):
- self._mock_cmds = {c.cmd: c for c in args}
- self.hostname = 'MockHost'
-
- def run(self, command, **kwargs):
- """Finds the matching result by command value"""
- mock_cmd = self._mock_cmds[command]
- file_out = kwargs.get('stdout_tee', None)
- if file_out:
- file_out.write(mock_cmd.stdout)
- return mock_cmd
-
-
-class GetPlatformModelTests(unittest.TestCase):
- """Unit tests for CrosHost.get_platform_model"""
-
- def test_cros_config_succeeds(self):
- host = MockHost(
- MockCmd('cat /etc/lsb-release', 0, UNI_LSB_RELEASE_OUTPUT),
- MockCmd('cros_config / name', 0, 'coral'))
- self.assertEqual(host.get_platform(), 'coral')
-
- def test_cros_config_resorts_to_fallback(self):
- host = MockHost(
- MockCmd('cat /etc/lsb-release', 0, UNI_LSB_RELEASE_OUTPUT),
- MockCmd('cros_config / name', 1, ''),
- MockCmd('mosys platform model', 0, 'coral'))
- self.assertEqual(host.get_platform(), 'coral')
-
- def test_cros_config_fails(self):
- host = MockHost(
- MockCmd('cat /etc/lsb-release', 0, UNI_LSB_RELEASE_OUTPUT),
- MockCmd('cros_config / name', 1, ''),
- MockCmd('mosys platform model', 1, ''),
- MockCmd('crossystem', 0, CROSSYSTEM_RESULT))
- self.assertEqual(host.get_platform(), 'reef')
-
- def test_non_unibuild(self):
- host = MockHost(
- MockCmd('cat /etc/lsb-release', 0, NON_UNI_LSB_RELEASE_OUTPUT),
- MockCmd('crossystem', 0, CROSSYSTEM_RESULT))
- self.assertEqual(host.get_platform(), 'reef')
-
- def test_cat_lsb_fails(self):
- host = MockHost(
- MockCmd('cat /etc/lsb-release', 1, ''),
- MockCmd('crossystem', 0, CROSSYSTEM_RESULT))
- self.assertEqual(host.get_platform(), 'reef')
-
-
-class DictFilteringTestCase(unittest.TestCase):
- """Tests for dict filtering methods on CrosHost."""
-
- def test_get_chameleon_arguments(self):
- got = cros_host.CrosHost.get_chameleon_arguments({
- 'chameleon_host': 'host',
- 'spam': 'eggs',
- })
- self.assertEqual(got, {'chameleon_host': 'host'})
-
- def test_get_pdtester_arguments(self):
- got = cros_host.CrosHost.get_pdtester_arguments({
- 'pdtester_host': 'host',
- 'spam': 'eggs',
- })
- self.assertEqual(got, {'pdtester_host': 'host'})
-
- def test_get_servo_arguments(self):
- got = cros_host.CrosHost.get_servo_arguments({
- servo_constants.SERVO_HOST_ATTR: 'host',
- 'spam': 'eggs',
- })
- self.assertEqual(got, {servo_constants.SERVO_HOST_ATTR: 'host'})
-
-
-class DictFilteringTestCase(unittest.TestCase):
- """Test to verify servo_state was set-up as label in host_info_store"""
-
- def create_host(self):
- host = MockHost()
- host.servo = None
- host._servo_host = mock.Mock()
- servo = mock.Mock()
- servo.get_servo_type.return_value = None
- host._servo_host.get_servo.return_value = servo
- host._servo_host.get_servo_state.return_value = 'SOME_STATE'
- host.host_info_store = host_info.InMemoryHostInfoStore()
- return host
-
- def test_do_not_update_label_when_servo_host_is_not_inited(self):
- host = self.create_host()
- host._servo_host = None
-
- host.set_servo_state('some_status')
- self.assertEqual(host.host_info_store.get().get_label_value(SERVO_STATE_PREFIX), 'some_status')
-
- def test_do_not_update_label_when_servo_state_is_None(self):
- host = self.create_host()
-
- host.set_servo_state(None)
- host._servo_host.get_servo_state.assert_not_called()
- self.assertEqual(host.host_info_store.get().get_label_value(SERVO_STATE_PREFIX), '')
-
- def test_set_servo_host_use_passed_servo_state_when_host_is_None(self):
- host = self.create_host()
-
- host.set_servo_host(None, 'passed_State')
- self.assertEqual(host.host_info_store.get().get_label_value(SERVO_STATE_PREFIX), 'passed_State')
-
- def test_set_servo_host_use_servo_state_from_host_when_host_is_passed(self):
- host = self.create_host()
- servo_host = mock.Mock()
- servo = mock.Mock()
- servo.get_servo_type.return_value = None
- servo_host.get_servo.return_value = servo
- servo_host.get_servo_state.return_value = 'state_of_host'
-
- host.set_servo_host(servo_host)
- self.assertEqual(host.host_info_store.get().get_label_value(SERVO_STATE_PREFIX), 'state_of_host')
-
- servo_host.get_servo_state.return_value = 'state_of_host2'
- host.set_servo_host(servo_host, 'passed_State')
- self.assertEqual(host.host_info_store.get().get_label_value(SERVO_STATE_PREFIX), 'state_of_host2')
-
-class CrosHostTestCase(unittest.TestCase):
- """Tests to verify CrosHost."""
-
- class TestCrosHost(cros_host.CrosHost):
- def __init__(self, *args, **kwargs):
- self.hostname = 'hostname'
- self.servo = mock.create_autospec(servo.Servo)
-
- @mock.patch('autotest_lib.server.hosts.cros_host.dev_server')
- def test_stage_build_to_usb(self, devserver_mock):
- host = self.TestCrosHost()
- image_server = mock.MagicMock()
- devserver_mock.ImageServer.resolve.return_value = image_server
- image_server.get_test_image_url.return_value = 'image_url'
-
- host.stage_build_to_usb('board/version')
-
- image_server.stage_artifacts.assert_called_with('board/version', ['test_image'])
- host.servo.image_to_servo_usb.assert_called_with('image_url')
-
- host.servo.get_power_state_controller.return_value.power_on.assert_called()
-
- @mock.patch('autotest_lib.server.hosts.cros_host.dev_server')
- def test_stage_build_to_usb_failure(self, devserver_mock):
- host = self.TestCrosHost()
- image_server = mock.MagicMock()
- devserver_mock.ImageServer.resolve.return_value = image_server
- image_server.get_test_image_url.return_value = 'image_url'
- host.servo.image_to_servo_usb.side_effect = error.AutotestError('download')
-
- with self.assertRaises(error.AutotestError):
- host.stage_build_to_usb('board/version')
-
- host.servo.get_power_state_controller.return_value.power_on.assert_called()
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/server/hosts/cros_label.py b/server/hosts/cros_label.py
deleted file mode 100644
index 689bf4a..0000000
--- a/server/hosts/cros_label.py
+++ /dev/null
@@ -1,378 +0,0 @@
-# Lint as: python2, python3
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""This class defines the CrosHost Label class."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import collections
-import logging
-import re
-
-import common
-
-from autotest_lib.client.bin import utils
-from autotest_lib.server.cros.dynamic_suite import constants as ds_constants
-from autotest_lib.server.hosts import base_label
-from autotest_lib.server.hosts import common_label
-from autotest_lib.server.hosts import servo_constants
-from six.moves import zip
-
-# pylint: disable=missing-docstring
-LsbOutput = collections.namedtuple('LsbOutput', ['unibuild', 'board'])
-
-# Repair and Deploy taskName
-REPAIR_TASK_NAME = 'repair'
-DEPLOY_TASK_NAME = 'deploy'
-
-
-def _parse_lsb_output(host):
- """Parses the LSB output and returns key data points for labeling.
-
- @param host: Host that the command will be executed against
- @returns: LsbOutput with the result of parsing the /etc/lsb-release output
- """
- release_info = utils.parse_cmd_output('cat /etc/lsb-release',
- run_method=host.run)
-
- unibuild = release_info.get('CHROMEOS_RELEASE_UNIBUILD') == '1'
- return LsbOutput(unibuild, release_info['CHROMEOS_RELEASE_BOARD'])
-
-
-class DeviceSkuLabel(base_label.StringPrefixLabel):
- """Determine the correct device_sku label for the device."""
-
- _NAME = ds_constants.DEVICE_SKU_LABEL
-
- def generate_labels(self, host):
- device_sku = host.host_info_store.get().device_sku
- if device_sku:
- return [device_sku]
-
- mosys_cmd = 'mosys platform sku'
- result = host.run(command=mosys_cmd, ignore_status=True)
- if result.exit_status == 0:
- return [result.stdout.strip()]
-
- return []
-
- def update_for_task(self, task_name):
- # This label is stored in the lab config.
- return task_name in (DEPLOY_TASK_NAME, REPAIR_TASK_NAME, '')
-
-
-class BrandCodeLabel(base_label.StringPrefixLabel):
- """Determine the correct brand_code (aka RLZ-code) for the device."""
-
- _NAME = ds_constants.BRAND_CODE_LABEL
-
- def generate_labels(self, host):
- brand_code = host.host_info_store.get().brand_code
- if brand_code:
- return [brand_code]
-
- cros_config_cmd = 'cros_config / brand-code'
- result = host.run(command=cros_config_cmd, ignore_status=True)
- if result.exit_status == 0:
- return [result.stdout.strip()]
-
- return []
-
-
-class BluetoothPeerLabel(base_label.StringPrefixLabel):
- """Return the Bluetooth peer labels.
-
- working_bluetooth_btpeer label is applied if a Raspberry Pi Bluetooth peer
- is detected.There can be up to 4 Bluetooth peers. Labels
- working_bluetooth_btpeer:[1-4] will be assigned depending on the number of
- peers present.
-
- """
-
- _NAME = 'working_bluetooth_btpeer'
-
- def exists(self, host):
- return len(host._btpeer_host_list) > 0
-
- def generate_labels(self, host):
- labels_list = []
- count = 1
-
- for (btpeer, btpeer_host) in \
- zip(host.btpeer_list, host._btpeer_host_list):
- try:
- # Initialize one device type to make sure the peer is working
- bt_hid_device = btpeer.get_bluetooth_hid_mouse()
- if bt_hid_device.CheckSerialConnection():
- labels_list.append(str(count))
- count += 1
- except Exception as e:
- logging.error('Error with initializing bt_hid_mouse on '
- 'btpeer %s %s', btpeer_host.hostname, e)
-
- logging.info('Bluetooth Peer labels are %s', labels_list)
- return labels_list
-
- def update_for_task(self, task_name):
- # This label is stored in the state config, so only repair tasks update
- # it or when no task name is mentioned.
- return task_name in (REPAIR_TASK_NAME, '')
-
-
-class Cr50Label(base_label.StringPrefixLabel):
- """Label indicating the cr50 image type."""
-
- _NAME = 'cr50'
-
- def __init__(self):
- self.ver = None
-
- def exists(self, host):
- # Make sure the gsctool version command runs ok
- self.ver = host.run('gsctool -a -f', ignore_status=True)
- return self.ver.exit_status == 0
-
- def _get_version(self, region):
- """Get the version number of the given region"""
- return re.search(region + ' (\d+\.\d+\.\d+)', self.ver.stdout).group(1)
-
- def generate_labels(self, host):
- # Check the major version to determine prePVT vs PVT
- version = self._get_version('RW')
- major_version = int(version.split('.')[1])
- # PVT images have a odd major version prePVT have even
- return ['pvt' if (major_version % 2) else 'prepvt']
-
- def update_for_task(self, task_name):
- # This label is stored in the state config, so only repair tasks update
- # it or when no task name is mentioned.
- return task_name in (REPAIR_TASK_NAME, '')
-
-
-class Cr50RWKeyidLabel(Cr50Label):
- """Label indicating the cr50 RW version."""
- _REGION = 'RW'
- _NAME = 'cr50-rw-keyid'
-
- def _get_keyid_info(self, region):
- """Get the keyid of the given region."""
- match = re.search('keyids:.*%s (\S+)' % region, self.ver.stdout)
- keyid = match.group(1).rstrip(',')
- is_prod = int(keyid, 16) & (1 << 2)
- return [keyid, 'prod' if is_prod else 'dev']
-
- def generate_labels(self, host):
- """Get the key type."""
- return self._get_keyid_info(self._REGION)
-
-
-class Cr50ROKeyidLabel(Cr50RWKeyidLabel):
- """Label indicating the RO key type."""
- _REGION = 'RO'
- _NAME = 'cr50-ro-keyid'
-
-
-class ChameleonLabel(base_label.BaseLabel):
- """Determine if a Chameleon is connected to this host."""
-
- _NAME = 'chameleon'
-
- def exists(self, host):
- # See crbug.com/1004500#2 for details.
- has_chameleon = host._chameleon_host is not None
- # TODO(crbug.com/995900) -- debug why chameleon label is flipping
- try:
- logging.info("has_chameleon %s", has_chameleon)
- logging.info("_chameleon_host %s",
- getattr(host, "_chameleon_host", "NO_ATTRIBUTE"))
- logging.info("chameleon %s",
- getattr(host, "chameleon", "NO_ATTRIBUTE"))
- except:
- pass
- return has_chameleon
-
- def update_for_task(self, task_name):
- # This label is stored in the state config, so only repair tasks update
- # it or when no task name is mentioned.
- return task_name in (REPAIR_TASK_NAME, '')
-
-
-class ChameleonConnectionLabel(base_label.StringPrefixLabel):
- """Return the Chameleon connection label."""
-
- _NAME = 'chameleon'
-
- def exists(self, host):
- return host._chameleon_host is not None
-
- def generate_labels(self, host):
- return [host.chameleon.get_label()]
-
- def update_for_task(self, task_name):
- # This label is stored in the lab config, so only deploy tasks update it
- # or when no task name is mentioned.
- return task_name in (DEPLOY_TASK_NAME, '')
-
-
-class AudioLoopbackDongleLabel(base_label.BaseLabel):
- """Return the label if an audio loopback dongle is plugged in."""
-
- _NAME = 'audio_loopback_dongle'
-
- def exists(self, host):
- # Based on crbug.com/991285, AudioLoopbackDongle sometimes flips.
- # Ensure that AudioLoopbackDongle.exists returns True
- # forever, after it returns True *once*.
- if self._cached_exists(host):
- # If the current state is True, return it, don't run the command on
- # the DUT and potentially flip the state.
- return True
- # If the current state is not True, run the command on
- # the DUT. The new state will be set to whatever the command
- # produces.
- return self._host_run_exists(host)
-
- def _node_type_is_plugged(self, node_type, nodes_info):
- """Determine if there is any node of node_type plugged.
-
- This method is used in the AudioLoopbackDongleLabel class, where the
- call is executed on autotest server. Use get_cras_nodes instead if
- the call can be executed on Cros device.
-
- Since Cras only reports the plugged node in GetNodes, we can
- parse the return value to see if there is any node with the given type.
- For example, if INTERNAL_MIC is of intereset, the pattern we are
- looking for is:
-
- dict entry(
- string "Type"
- variant string "INTERNAL_MIC"
- )
-
- @param node_type: A str representing node type defined in CRAS_NODE_TYPES.
- @param nodes_info: A str containing output of command get_nodes_cmd.
-
- @returns: True if there is any node of node_type plugged. False otherwise.
-
- """
- match = re.search(r'string "Type"\s+variant\s+string "%s"' % node_type,
- nodes_info)
- return True if match else False
-
- def _cached_exists(self, host):
- """Get the state of AudioLoopbackDongle in the data store"""
- info = host.host_info_store.get()
- for label in info.labels:
- if label.startswith(self._NAME):
- return True
- return False
-
- def _host_run_exists(self, host):
- """Detect presence of audio_loopback_dongle by physically
- running a command on the DUT."""
- cras_cmd = ('dbus-send --system --type=method_call --print-reply '
- '--dest=org.chromium.cras /org/chromium/cras '
- 'org.chromium.cras.Control.GetNodes')
- nodes_info = host.run(command=cras_cmd, ignore_status=True).stdout
- if (self._node_type_is_plugged('HEADPHONE', nodes_info)
- and self._node_type_is_plugged('MIC', nodes_info)):
- return True
- return False
-
- def update_for_task(self, task_name):
- # This label is stored in the state config, so only repair tasks update
- # it or when no task name is mentioned.
- return task_name in (REPAIR_TASK_NAME, '')
-
-
-class ServoTypeLabel(base_label.StringPrefixLabel):
- _NAME = servo_constants.SERVO_TYPE_LABEL_PREFIX
-
- def generate_labels(self, host):
- info = host.host_info_store.get()
-
- servo_type = self._get_from_labels(info)
- if servo_type != '':
- logging.info("Using servo_type: %s from cache!", servo_type)
- return [servo_type]
-
- if host.servo is not None:
- try:
- servo_type = host.servo.get_servo_version()
- if servo_type != '':
- return [servo_type]
- logging.warning('Cannot collect servo_type from servo'
- ' by `dut-control servo_type`! Please file a bug'
- ' and inform infra team as we are not expected '
- ' to reach this point.')
- except Exception as e:
- # We don't want fail the label and break DUTs here just
- # because of servo issue.
- logging.error("Failed to update servo_type, %s", str(e))
- return []
-
- def _get_from_labels(self, info):
- prefix = self._NAME + ':'
- for label in info.labels:
- if label.startswith(prefix):
- suffix_length = len(prefix)
- return label[suffix_length:]
- return ''
-
- def update_for_task(self, task_name):
- # This label is stored in the lab config,
- # only deploy and repair tasks update it
- # or when no task name is mentioned.
- return task_name in (DEPLOY_TASK_NAME, '')
-
-
-def _parse_hwid_labels(hwid_info_list):
- if len(hwid_info_list) == 0:
- return hwid_info_list
-
- res = []
- # See crbug.com/997816#c7 for details of two potential formats of returns
- # from HWID server.
- if isinstance(hwid_info_list[0], dict):
- # Format of hwid_info:
- # [{u'name': u'sku', u'value': u'xxx'}, ..., ]
- for hwid_info in hwid_info_list:
- value = hwid_info.get('value', '')
- name = hwid_info.get('name', '')
- # There should always be a name but just in case there is not.
- if name:
- new_label = name if not value else '%s:%s' % (name, value)
- res.append(new_label)
- else:
- # Format of hwid_info:
- # [<DUTLabel name: 'sku' value: u'xxx'>, ..., ]
- for hwid_info in hwid_info_list:
- new_label = str(hwid_info)
- logging.info('processing hwid label: %s', new_label)
- res.append(new_label)
-
- return res
-
-
-CROS_LABELS = [
- AudioLoopbackDongleLabel(), #STATECONFIG
- BluetoothPeerLabel(), #STATECONFIG
- ChameleonConnectionLabel(), #LABCONFIG
- ChameleonLabel(), #STATECONFIG
- common_label.OSLabel(),
- DeviceSkuLabel(), #LABCONFIG
- ServoTypeLabel(), #LABCONFIG
- # Temporarily add back as there's no way to reference cr50 configs.
- # See crbug.com/1057145 for the root cause.
- # See crbug.com/1057719 for future tracking.
- Cr50Label(),
- Cr50ROKeyidLabel(),
-]
-
-LABSTATION_LABELS = [
- common_label.OSLabel(),
-]
diff --git a/server/hosts/cros_label_unittest.py b/server/hosts/cros_label_unittest.py
deleted file mode 100755
index 0f0006e..0000000
--- a/server/hosts/cros_label_unittest.py
+++ /dev/null
@@ -1,298 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import unittest
-import mock
-
-import common
-
-from autotest_lib.server import utils
-from autotest_lib.server.hosts import cros_label
-from autotest_lib.server.hosts.cros_label import BrandCodeLabel
-from autotest_lib.server.hosts.cros_label import Cr50Label
-from autotest_lib.server.hosts.cros_label import Cr50ROKeyidLabel
-from autotest_lib.server.hosts.cros_label import Cr50RWKeyidLabel
-from autotest_lib.server.hosts.cros_label import DeviceSkuLabel
-from autotest_lib.server.hosts.cros_label import AudioLoopbackDongleLabel
-from autotest_lib.server.hosts.cros_label import ChameleonConnectionLabel
-from autotest_lib.server.hosts.cros_label import ChameleonLabel
-from autotest_lib.server.hosts.cros_label import ServoTypeLabel
-from autotest_lib.server.hosts import host_info
-
-# pylint: disable=missing-docstring
-
-NON_UNI_LSB_RELEASE_OUTPUT = """
-CHROMEOS_RELEASE_APPID={63A9F698-C1CA-4A75-95E7-6B90181B3718}
-CHROMEOS_BOARD_APPID={63A9F698-C1CA-4A75-95E7-6B90181B3718}
-CHROMEOS_CANARY_APPID={90F229CE-83E2-4FAF-8479-E368A34938B1}
-DEVICETYPE=CHROMEBOOK
-CHROMEOS_ARC_VERSION=4234098
-CHROMEOS_ARC_ANDROID_SDK_VERSION=25
-GOOGLE_RELEASE=9798.0.2017_08_02_1022
-CHROMEOS_DEVSERVER=http://shapiroc3.bld.corp.google.com:8080
-CHROMEOS_RELEASE_BOARD=pyro
-CHROMEOS_RELEASE_BUILD_NUMBER=9798
-CHROMEOS_RELEASE_BRANCH_NUMBER=0
-CHROMEOS_RELEASE_CHROME_MILESTONE=62
-CHROMEOS_RELEASE_PATCH_NUMBER=2017_08_02_1022
-CHROMEOS_RELEASE_TRACK=testimage-channel
-CHROMEOS_RELEASE_DESCRIPTION=9798.0.2017_08_02_1022 (Test Build)
-CHROMEOS_RELEASE_BUILD_TYPE=Test Build
-CHROMEOS_RELEASE_NAME=Chromium OS
-CHROMEOS_RELEASE_VERSION=9798.0.2017_08_02_1022
-CHROMEOS_AUSERVER=http://someserver.bld.corp.google.com:8080/update
-"""
-
-UNI_LSB_RELEASE_OUTPUT = """
-CHROMEOS_RELEASE_APPID={5A3AB642-2A67-470A-8F37-37E737A53CFC}
-CHROMEOS_BOARD_APPID={5A3AB642-2A67-470A-8F37-37E737A53CFC}
-CHROMEOS_CANARY_APPID={90F229CE-83E2-4FAF-8479-E368A34938B1}
-DEVICETYPE=CHROMEBOOK
-CHROMEOS_ARC_VERSION=4340813
-CHROMEOS_ARC_ANDROID_SDK_VERSION=25
-GOOGLE_RELEASE=9953.0.2017_09_18_1334
-CHROMEOS_DEVSERVER=http://server.bld.corp.google.com:8080
-CHROMEOS_RELEASE_BOARD=coral
-CHROMEOS_RELEASE_BUILD_NUMBER=9953
-CHROMEOS_RELEASE_BRANCH_NUMBER=0
-CHROMEOS_RELEASE_CHROME_MILESTONE=63
-CHROMEOS_RELEASE_PATCH_NUMBER=2017_09_18_1334
-CHROMEOS_RELEASE_TRACK=testimage-channel
-CHROMEOS_RELEASE_DESCRIPTION=9953.0.2017_09_18_1334 (Test Build)
-CHROMEOS_RELEASE_BUILD_TYPE=Test Build
-CHROMEOS_RELEASE_NAME=Chromium OS
-CHROMEOS_RELEASE_UNIBUILD=1
-CHROMEOS_RELEASE_VERSION=9953.0.2017_09_18_1334
-CHROMEOS_AUSERVER=http://server.bld.corp.google.com:8080/update
-CHROMEOS_RELEASE_MODELS=coral astronaut blue bruce lava nasher
-"""
-
-GSCTOOL_OUTPUT_PVT = """
-start
-target running protocol version 6
-keyids: RO 0xaa66150f, RW 0xde88588d
-offsets: backup RO at 0x40000, backup RW at 0x44000
-Current versions:
-RO 0.0.10
-RW 0.3.14
-"""
-
-GSCTOOL_OUTPUT_PREPVT = """
-start
-target running protocol version 6
-keyids: RO 0xaa66150f, RW 0xde88588d
-offsets: backup RO at 0x40000, backup RW at 0x44000
-Current versions:
-RO 0.0.10
-RW 0.4.15
-"""
-
-GSCTOOL_OUTPUT_DEV_RO = """
-start
-target running protocol version 6
-keyids: RO 0x3716ee6b, RW 0xde88588d
-offsets: backup RO at 0x40000, backup RW at 0x44000
-Current versions:
-RO 0.0.10
-RW 0.4.15
-"""
-
-GSCTOOL_OUTPUT_DEV_RW = """
-start
-target running protocol version 6
-keyids: RO 0xaa66150f, RW 0xb93d6539
-offsets: backup RO at 0x40000, backup RW at 0x44000
-Current versions:
-RO 0.0.10
-RW 0.4.15
-"""
-
-
-class MockCmd(object):
- """Simple mock command with base command and results"""
-
- def __init__(self, cmd, exit_status, stdout):
- self.cmd = cmd
- self.stdout = stdout
- self.exit_status = exit_status
-
-
-class MockAFEHost(utils.EmptyAFEHost):
-
- def __init__(self, labels=[], attributes={}):
- self.labels = labels
- self.attributes = attributes
-
-
-class MockHost(object):
- """Simple host for running mock'd host commands"""
-
- def __init__(self, labels, *args):
- self._afe_host = MockAFEHost(labels)
- self.mock_cmds = {c.cmd: c for c in args}
- info = host_info.HostInfo(labels=labels)
- self.host_info_store = host_info.InMemoryHostInfoStore(info)
-
- def run(self, command, **kwargs):
- """Finds the matching result by command value"""
- return self.mock_cmds[command]
-
- def is_up(self, **args):
- return True
-
-
-class MockHostWithoutAFE(MockHost):
-
- def __init__(self, labels, *args):
- super(MockHostWithoutAFE, self).__init__(labels, *args)
- self._afe_host = utils.EmptyAFEHost()
-
-
-class DeviceSkuLabelTests(unittest.TestCase):
- """Unit tests for DeviceSkuLabel"""
-
- def test_new_label(self):
- mosys_cmd = 'mosys platform sku'
- host = MockHost([], MockCmd(mosys_cmd, 0, '27\n'))
- self.assertEqual(DeviceSkuLabel().generate_labels(host), ['27'])
-
- def test_new_label_mosys_fails(self):
- mosys_cmd = 'mosys platform sku'
- host = MockHost([], MockCmd(mosys_cmd, 1, '27\n'))
- self.assertEqual(DeviceSkuLabel().generate_labels(host), [])
-
- def test_existing_label(self):
- host = MockHost(['device-sku:48'])
- self.assertEqual(DeviceSkuLabel().generate_labels(host), ['48'])
-
- def test_update_for_task(self):
- self.assertTrue(DeviceSkuLabel().update_for_task(''))
- self.assertTrue(DeviceSkuLabel().update_for_task('repair'))
- self.assertTrue(DeviceSkuLabel().update_for_task('deploy'))
-
-
-class BrandCodeLabelTests(unittest.TestCase):
- """Unit tests for DeviceSkuLabel"""
-
- def test_new_label(self):
- cros_config_cmd = 'cros_config / brand-code'
- host = MockHost([], MockCmd(cros_config_cmd, 0, 'XXYZ\n'))
- self.assertEqual(BrandCodeLabel().generate_labels(host), ['XXYZ'])
-
- def test_new_label_cros_config_fails(self):
- cros_config_cmd = 'cros_config / brand-code'
- host = MockHost([], MockCmd(cros_config_cmd, 1, 'XXYZ\n'))
- self.assertEqual(BrandCodeLabel().generate_labels(host), [])
-
- def test_existing_label(self):
- host = MockHost(['brand-code:ABCD'])
- self.assertEqual(BrandCodeLabel().generate_labels(host), ['ABCD'])
-
-
-class Cr50Tests(unittest.TestCase):
- """Unit tests for Cr50Label"""
-
- def test_cr50_pvt(self):
- host = MockHost([], MockCmd('gsctool -a -f', 0, GSCTOOL_OUTPUT_PVT))
- self.assertEqual(Cr50Label().get(host), ['cr50:pvt'])
-
- def test_cr50_prepvt(self):
- host = MockHost([], MockCmd('gsctool -a -f', 0, GSCTOOL_OUTPUT_PREPVT))
- self.assertEqual(Cr50Label().get(host), ['cr50:prepvt'])
-
- def test_gsctool_fails(self):
- host = MockHost([], MockCmd('gsctool -a -f', 1, ''))
- self.assertEqual(Cr50Label().get(host), [])
-
-
-class Cr50RWKeyidTests(unittest.TestCase):
- """Unit tests for Cr50RWKeyidLabel"""
-
- def test_cr50_prod_rw(self):
- host = MockHost([], MockCmd('gsctool -a -f', 0, GSCTOOL_OUTPUT_PVT))
- self.assertEqual(Cr50RWKeyidLabel().get(host),
- ['cr50-rw-keyid:0xde88588d', 'cr50-rw-keyid:prod'])
-
- def test_cr50_dev_rw(self):
- host = MockHost([], MockCmd('gsctool -a -f', 0, GSCTOOL_OUTPUT_DEV_RW))
- self.assertEqual(Cr50RWKeyidLabel().get(host),
- ['cr50-rw-keyid:0xb93d6539', 'cr50-rw-keyid:dev'])
-
- def test_gsctool_fails(self):
- host = MockHost([], MockCmd('gsctool -a -f', 1, ''))
- self.assertEqual(Cr50RWKeyidLabel().get(host), [])
-
-
-class Cr50ROKeyidTests(unittest.TestCase):
- """Unit tests for Cr50ROKeyidLabel"""
-
- def test_cr50_prod_ro(self):
- host = MockHost([], MockCmd('gsctool -a -f', 0, GSCTOOL_OUTPUT_PREPVT))
- self.assertEqual(Cr50ROKeyidLabel().get(host),
- ['cr50-ro-keyid:0xaa66150f', 'cr50-ro-keyid:prod'])
-
- def test_cr50_dev_ro(self):
- host = MockHost([], MockCmd('gsctool -a -f', 0, GSCTOOL_OUTPUT_DEV_RO))
- self.assertEqual(Cr50ROKeyidLabel().get(host),
- ['cr50-ro-keyid:0x3716ee6b', 'cr50-ro-keyid:dev'])
-
- def test_gsctool_fails(self):
- host = MockHost([], MockCmd('gsctool -a -f', 1, ''))
- self.assertEqual(Cr50ROKeyidLabel().get(host), [])
-
-
-class AudioLoopbackDongleLabelTests(unittest.TestCase):
- def test_update_for_task(self):
- self.assertTrue(AudioLoopbackDongleLabel().update_for_task(''))
- self.assertTrue(AudioLoopbackDongleLabel().update_for_task('repair'))
- self.assertFalse(AudioLoopbackDongleLabel().update_for_task('deploy'))
-
-
-class ChameleonConnectionLabelTests(unittest.TestCase):
- def test_update_for_task(self):
- self.assertTrue(ChameleonConnectionLabel().update_for_task(''))
- self.assertFalse(ChameleonConnectionLabel().update_for_task('repair'))
- self.assertTrue(ChameleonConnectionLabel().update_for_task('deploy'))
-
-
-class ChameleonLabelTests(unittest.TestCase):
- def test_update_for_task(self):
- self.assertTrue(ChameleonLabel().update_for_task(''))
- self.assertTrue(ChameleonLabel().update_for_task('repair'))
- self.assertFalse(ChameleonLabel().update_for_task('deploy'))
-
-
-class ServoTypeLabelTests(unittest.TestCase):
- """Unit tests for ServoTypeLabel"""
- def test_update_for_task(self):
- self.assertTrue(ServoTypeLabel().update_for_task(''))
- self.assertFalse(ServoTypeLabel().update_for_task('repair'))
- self.assertTrue(ServoTypeLabel().update_for_task('deploy'))
-
- def test_generate_labels_return_value_from_labels(self):
- host = MockHost(['servo_type:Some_interesting'])
- servo = ServoTypeLabel()
- self.assertEqual(servo.get(host), ['servo_type:Some_interesting'])
- self.assertEqual(servo.generate_labels(host), ['Some_interesting'])
-
- def test_generate_labels_from_cache_when_servo_is_none(self):
- host = MockHost(['servo_state:Some_interesting'])
- host.servo = None
- servo = ServoTypeLabel()
- self.assertEqual(servo.get(host), [])
- self.assertEqual(servo.generate_labels(host), [])
-
- def test_generate_labels_not_from_cache_when_servo_exist(self):
- host = MockHost(['servo_type'])
- host.servo = mock.Mock()
- host.servo.get_servo_version.return_value = 'servo_v3'
- servo = ServoTypeLabel()
- self.assertEqual(servo.get(host), ['servo_type:servo_v3'])
- self.assertEqual(servo.generate_labels(host), ['servo_v3'])
- host.servo.get_servo_version.assert_called()
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/server/hosts/factory.py b/server/hosts/factory.py
index 57bf8e6..752718e 100644
--- a/server/hosts/factory.py
+++ b/server/hosts/factory.py
@@ -16,7 +16,7 @@
from autotest_lib.client.common_lib import global_config
from autotest_lib.server import utils as server_utils
from autotest_lib.server.cros.dynamic_suite import constants
-from autotest_lib.server.hosts import cros_host
+from autotest_lib.server.hosts import generic_host
from autotest_lib.server.hosts import host_info
from autotest_lib.server.hosts import ssh_host
from autotest_lib.server.hosts import file_store
@@ -37,11 +37,11 @@
# A list of all the possible host types, ordered according to frequency of
# host types in the lab, so the more common hosts don't incur a repeated ssh
# overhead in checking for less common host types.
-host_types = [cros_host.CrosHost]
-OS_HOST_DICT = {'cros': cros_host.CrosHost}
+host_types = [generic_host.GenericHost]
+OS_HOST_DICT = {'generic': generic_host.GenericHost}
LOOKUP_DICT = {
- 'CrosHost': cros_host.CrosHost,
+ 'generic': generic_host.GenericHost,
}
# Timeout for early connectivity check to the host, in seconds.
@@ -129,7 +129,7 @@
logging.warning('Unable to apply conventional host detection methods, '
'defaulting to chromeos host.')
- return cros_host.CrosHost
+ return generic_host.GenericHost
def _preset_host(hostname):
@@ -251,11 +251,12 @@
except (error.AutoservRunError, error.AutoservSSHTimeout):
logging.exception('Failed to verify connectivity to host.'
' Skipping host auto detection logic.')
- host_class = cros_host.CrosHost
- logging.debug('Defaulting to CrosHost.')
+ host_class = generic_host.GenericHost
+ logging.debug('Defaulting to GenericHost.')
# create a custom host class for this machine and return an instance of it
classes = (host_class, connectivity_class)
+ logging.info("!!!!!!!! {}".format(classes))
custom_host_class = type("%s_host" % hostname, classes, {})
host_instance = custom_host_class(hostname, **args)
diff --git a/server/hosts/generic_host.py b/server/hosts/generic_host.py
new file mode 100644
index 0000000..2eb6f91
--- /dev/null
+++ b/server/hosts/generic_host.py
@@ -0,0 +1,26 @@
+# Lint as: python2, python3
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import common
+
+from autotest_lib.server.hosts import abstract_ssh
+
+
+class GenericHost(abstract_ssh.AbstractSSHHost):
+ """Chromium OS specific subclass of Host."""
+
+ @staticmethod
+ def check_host(host, timeout=10):
+ """Check if the host is alive."""
+ try:
+ host.run('test -e')
+
+ return True
+ except Exception:
+ return False
+
+ def _initialize(self, hostname, *args, **dargs):
+ """Initialize superclasses."""
+ super(GenericHost, self)._initialize(hostname=hostname, *args, **dargs)
diff --git a/server/hosts/pdtester_host.py b/server/hosts/pdtester_host.py
deleted file mode 100644
index af3bb97..0000000
--- a/server/hosts/pdtester_host.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-#
-# Expects to be run in an environment with sudo and no interactive password
-# prompt, such as within the Chromium OS development chroot.
-
-
-"""This file provides core logic for pdtester verify/repair process."""
-
-import logging
-
-from autotest_lib.server.hosts import servo_host
-
-
-# Names of the host attributes in the database that represent the values for
-# the pdtester_host and pdtester_port for a PD tester connected to the DUT.
-PDTESTER_HOST_ATTR = 'pdtester_host'
-PDTESTER_PORT_ATTR = 'pdtester_port'
-
-
-def make_pdtester_hostname(dut_hostname):
- """Given a DUT's hostname, return the hostname of its PD tester.
-
- @param dut_hostname: hostname of a DUT.
-
- @return hostname of the DUT's PD tester.
-
- """
- host_parts = dut_hostname.split('.')
- host_parts[0] = host_parts[0] + '-pdtester'
- return '.'.join(host_parts)
-
-
-class PDTesterHost(servo_host.ServoHost):
- """Host class for a host that controls a PDTester object."""
-
-
- def _initialize(self, pdtester_host='localhost', pdtester_port=9999,
- *args, **dargs):
- """Initialize a PDTesterHost instance.
-
- A PDTesterHost instance represents a host that controls a PD tester.
-
- @param pdtester_host: Name of the host where the servod process
- is running.
- @param pdtester_port: Port the servod process is listening on.
-
- """
- super(PDTesterHost, self)._initialize(pdtester_host, pdtester_port,
- *args, **dargs)
- self.connect_servo()
-
-
-def create_pdtester_host(pdtester_args, servo_host):
- """Create a PDTesterHost object used to access pdtester servo
-
- The `pdtester_args` parameter is a dictionary specifying optional
- PDTester client parameter overrides (i.e. a specific host or port).
- When specified, the caller requires that an exception be raised
- unless both the PDTesterHost and the PDTester are successfully
- created.
-
- @param pdtester_args: A dictionary that contains args for creating
- a PDTesterHost object,
- e.g. {'pdtester_host': '172.11.11.111',
- 'pdtester_port': 9999}.
- @param servo_host: If PDTester and Servo are the same, this
- servo_host object will be returned.
- @returns: A PDTesterHost object or None.
-
- """
- # None means PDTester is not required to run a test.
- if pdtester_args is None:
- return None
-
- # If an user doesn't pass the PDTester info, fall back to use the servo
- # info. Usually we use Servo v4 as PDTester, so make it default.
- if PDTESTER_HOST_ATTR not in pdtester_args:
- logging.debug('%s not specified, reuse the same hostname as servo: %s',
- PDTESTER_HOST_ATTR, servo_host.hostname)
- pdtester_args[PDTESTER_HOST_ATTR] = servo_host.hostname
-
- if PDTESTER_PORT_ATTR not in pdtester_args:
- logging.debug('%s not specified, reuse the same port as servo: %s',
- PDTESTER_PORT_ATTR, servo_host.servo_port)
- pdtester_args[PDTESTER_PORT_ATTR] = servo_host.servo_port
-
- # Just return the servo_host object, if the hostname and the port are the
- # same as servo_host.
- if (pdtester_args[PDTESTER_HOST_ATTR] == servo_host.hostname and
- pdtester_args[PDTESTER_PORT_ATTR] == servo_host.servo_port):
- logging.debug('Return the servo_host directly as PDTester and Servo '
- 'are the same.')
- return servo_host
-
- return PDTesterHost(**pdtester_args)
diff --git a/server/hosts/remote.py b/server/hosts/remote.py
index febbde2..6df4192 100644
--- a/server/hosts/remote.py
+++ b/server/hosts/remote.py
@@ -4,10 +4,10 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+
import os, logging, time
-import six
-from six.moves import urllib
import re
+from six.moves import urllib
import common
@@ -15,7 +15,6 @@
from autotest_lib.client.common_lib.global_config import global_config
from autotest_lib.server import utils
from autotest_lib.server.hosts import base_classes
-from autotest_lib.server.hosts.tls_client.connection import TLSConnection
class RemoteHost(base_classes.Host):
@@ -54,7 +53,7 @@
self.tls_connection = None
try:
- self.tls_connection = TLSConnection()
+ self.tls_connection = None
except Exception as e:
logging.warning("Could not establish TLS connection %s", e)
diff --git a/server/hosts/servo_constants.py b/server/hosts/servo_constants.py
deleted file mode 100644
index 9cfe90c..0000000
--- a/server/hosts/servo_constants.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright (c) 2020 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-from autotest_lib.client.common_lib import global_config
-_CONFIG = global_config.global_config
-
-# Names of the host attributes in the database that represent the values for
-# the servo_host and servo_port for a servo connected to the DUT.
-SERVO_HOST_ATTR = 'servo_host'
-SERVO_HOST_SSH_PORT_ATTR = 'servo_host_ssh_port'
-SERVO_PORT_ATTR = 'servo_port'
-SERVO_BOARD_ATTR = 'servo_board'
-# Model is inferred from host labels.
-SERVO_MODEL_ATTR = 'servo_model'
-SERVO_SERIAL_ATTR = 'servo_serial'
-# Indicates what type of servo setup, example value: REGULAR or DUAL_V4.
-SERVO_SETUP_ATTR = 'servo_setup'
-SERVO_FW_CHANNEL_ATTR = 'servo_fw_channel'
-SERVO_SETUP_VALUE_DUAL_V4 = 'DUAL_V4'
-SERVO_RECOVERY_MODE = 'servo_recovery'
-SERVO_ATTR_KEYS = (
- SERVO_BOARD_ATTR,
- SERVO_HOST_ATTR,
- SERVO_HOST_SSH_PORT_ATTR,
- SERVO_PORT_ATTR,
- SERVO_SERIAL_ATTR,
- SERVO_SETUP_ATTR,
- SERVO_FW_CHANNEL_ATTR,
-)
-
-# Additional args that will be appended to servod start command.
-ADDITIONAL_SERVOD_ARGS = 'additional_servod_args'
-
-# Timeout value for stop/start servod process.
-SERVOD_TEARDOWN_TIMEOUT = 3
-SERVOD_QUICK_STARTUP_TIMEOUT = 20
-SERVOD_STARTUP_TIMEOUT = 60
-
-# Prefix of pools that require servod cr50 config.
-CR50_CONFIG_POOL_PREFIX = 'faft-cr50'
-
-ENABLE_SSH_TUNNEL_FOR_SERVO = _CONFIG.get_config_value(
- 'CROS', 'enable_ssh_tunnel_for_servo', type=bool, default=False)
-
-SERVO_TYPE_LABEL_PREFIX = 'servo_type'
-SERVO_STATE_LABEL_PREFIX = 'servo_state'
-
-# constants to support whole list of states for servo
-SERVO_STATE_UNKNOWN = 'UNKNOWN'
-SERVO_STATE_MISSING_CONFIG = 'MISSING_CONFIG'
-SERVO_STATE_WRONG_CONFIG = 'WRONG_CONFIG'
-SERVO_STATE_NO_SSH = 'NO_SSH'
-SERVO_STATE_SERVO_HOST_ISSUE = 'SERVO_HOST_ISSUE'
-SERVO_STATE_NOT_CONNECTED = 'NOT_CONNECTED'
-SERVO_STATE_SERIAL_MISMATCH = 'SERVO_SERIAL_MISMATCH'
-SERVO_STATE_SERVO_UPDATER_ISSUE = 'SERVO_UPDATER_ISSUE'
-SERVO_STATE_NEED_REPLACEMENT = 'NEED_REPLACEMENT'
-SERVO_STATE_CR50_CONSOLE_MISSING = 'CR50_CONSOLE_MISSING'
-SERVO_STATE_CCD_TESTLAB_ISSUE = 'CCD_TESTLAB_ISSUE'
-SERVO_STATE_SERVOD_ISSUE = 'SERVOD_ISSUE'
-SERVO_STATE_SERVOD_PROXY_ISSUE = 'SERVOD_PROXY_ISSUE'
-SERVO_STATE_LID_OPEN_FAILED = 'LID_OPEN_FAILED'
-SERVO_STATE_BAD_RIBBON_CABLE = 'BAD_RIBBON_CABLE'
-SERVO_STATE_TOPOLOGY_ISSUE = 'TOPOLOGY_ISSUE'
-SERVO_STATE_SBU_LOW_VOLTAGE = 'SBU_LOW_VOLTAGE'
-SERVO_STATE_CR50_NOT_ENUMERATED = 'CR50_NOT_ENUMERATED'
-SERVO_STATE_DUT_NOT_CONNECTED = 'DUT_NOT_CONNECTED'
-SERVO_STATE_EC_BROKEN = 'EC_BROKEN'
-SERVO_STATE_BROKEN = 'BROKEN'
-SERVO_STATE_WORKING = 'WORKING'
-
-# constants to support reset servo via smart usbhub.
-SMART_USBHUB_LABEL = 'smart_usbhub'
-SERVO_RESET_TIMEOUT_SECONDS = 20
-ERROR_MESSAGE_USB_HUB_NOT_COMPATIBLE = ('Be sure the hub is a supported'
- ' smart hub')
-# a servo serial number should be fed to this constant
-ERROR_MESSAGE_DEVICE_NOT_FOUND = 'Device with serial \'%s\' not found.'
\ No newline at end of file
diff --git a/server/hosts/servo_host.py b/server/hosts/servo_host.py
deleted file mode 100644
index a95be30..0000000
--- a/server/hosts/servo_host.py
+++ /dev/null
@@ -1,1721 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-#
-# Expects to be run in an environment with sudo and no interactive password
-# prompt, such as within the Chromium OS development chroot.
-
-
-"""This file provides core logic for servo verify/repair process."""
-
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import logging
-import os
-import re
-import tarfile
-import threading
-import json
-import time
-import six
-import six.moves.xmlrpc_client
-
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib import hosts
-from autotest_lib.client.common_lib import lsbrelease_utils
-from autotest_lib.client.common_lib.cros import retry
-from autotest_lib.server import crashcollect
-from autotest_lib.server import tauto_warnings
-from autotest_lib.server.cros.servo import servo
-from autotest_lib.server.hosts import base_servohost
-from autotest_lib.server.hosts import servo_constants
-from autotest_lib.server.cros.faft.utils import config
-from autotest_lib.client.common_lib import global_config
-from autotest_lib.server.cros.servo.topology import servo_topology
-
-
-_CONFIG = global_config.global_config
-
-
-class ServoHost(base_servohost.BaseServoHost):
- """Host class for a servo host(e.g. beaglebone, labstation)
- that with a servo instance for a specific port.
-
- @type _servo: servo.Servo | None
- """
-
- DEFAULT_PORT = int(os.getenv('SERVOD_PORT', '9999'))
-
- # Timeout for initializing servo signals.
- INITIALIZE_SERVO_TIMEOUT_SECS = 60
-
- # Default timeout for run terminal command.
- DEFAULT_TERMINAL_TIMEOUT = 30
-
- # Ready test function
- SERVO_READY_METHOD = 'get_version'
-
- # Directory prefix on the servo host where the servod logs are stored.
- SERVOD_LOG_PREFIX = '/var/log/servod'
-
- # Exit code to use when symlinks for servod logs are not found.
- NO_SYMLINKS_CODE = 9
-
- # Directory in the job's results directory to dump the logs into.
- LOG_DIR = 'servod'
-
- # Prefix for joint loglevel files in the logs.
- JOINT_LOG_PREFIX = 'log'
-
- # Regex group to extract timestamp from logfile name.
- TS_GROUP = 'ts'
-
- # This regex is used to extract the timestamp from servod logs.
- # files always start with log.
- TS_RE = (r'log.'
- # The timestamp is of format %Y-%m-%d--%H-%M-%S.MS
- r'(?P<%s>\d{4}(\-\d{2}){2}\-(-\d{2}){3}.\d{3})'
- # The loglevel is optional depending on labstation version.
- r'(.(INFO|DEBUG|WARNING))?' % TS_GROUP)
- TS_EXTRACTOR = re.compile(TS_RE)
-
- # Regex group to extract MCU name from logline in servod logs.
- MCU_GROUP = 'mcu'
-
- # Regex group to extract logline from MCU logline in servod logs.
- LINE_GROUP = 'line'
-
- # This regex is used to extract the mcu and the line content from an
- # MCU logline in servod logs. e.g. EC or servo_v4 console logs.
- # Here is an example log-line:
- #
- # 2020-01-23 13:15:12,223 - servo_v4 - EC3PO.Console - DEBUG -
- # console.py:219:LogConsoleOutput - /dev/pts/9 - cc polarity: cc1
- #
- # Here is conceptually how they are formatted:
- #
- # <time> - <MCU> - EC3PO.Console - <LVL> - <file:line:func> - <pts> -
- # <output>
- #
- # The log format starts with a timestamp
- MCU_RE = (r'[\d\-]+ [\d:,]+ '
- # The mcu that is logging this is next.
- r'- (?P<%s>\w+) - '
- # Next, we have more log outputs before the actual line.
- # Information about the file line, logging function etc.
- # Anchor on EC3PO Console, LogConsoleOutput and dev/pts.
- # NOTE: if the log format changes, this regex needs to be
- # adjusted.
- r'EC3PO\.Console[\s\-\w\d:.]+LogConsoleOutput - /dev/pts/\d+ - '
- # Lastly, we get the MCU's console line.
- r'(?P<%s>.+$)' % (MCU_GROUP, LINE_GROUP))
- MCU_EXTRACTOR = re.compile(MCU_RE)
-
- # Regex to detect timeout messages when USBC pigtail has timeout issue.
- # e.g.: [475635.427072 PD TMOUT RX 1/1]
- USBC_PIGTAIL_TIMEOUT_RE = r'\[[\d \.]{1,20}(PD TMOUT RX 1\/1)\]'
-
- # Suffix to identify compressed logfiles.
- COMPRESSION_SUFFIX = '.tbz2'
-
- # A suffix to mark servod log directories that came from instance that
- # ran during this servo_host, but are not the last one running e.g. when
- # an instance (on purpose, or due to a bug) restarted in the middle of the
- # run.
- OLD_LOG_SUFFIX = 'old'
-
- def _init_attributes(self):
- self._servo_state = None
- self.servo_port = None
- self.servo_board = None
- self.servo_model = None
- self.servo_serial = None
- self.servo_setup = None
- self.servo_recovery = None
- self.servo_fw_channel = None
- self.additional_servod_args = None
- self._dut_health_profile = None
- # The flag that indicate if a servo is connected to a smart usbhub.
- # TODO(xianuowang@) remove this flag once all usbhubs in the lab
- # get replaced.
- self.smart_usbhub = None
- self._servo = None
- self._topology = None
- self._tunnel_proxy = None
- self._tunnel_proxy_lock = threading.Lock()
- self._initial_instance_ts = None
- # Flag to make sure that multiple calls to close do not result in the
- # logic executing multiple times.
- self._closed = False
- # Per-thread local data
- self._local = threading.local()
-
- def _initialize(self,
- servo_host='localhost',
- servo_port=DEFAULT_PORT,
- servo_board=None,
- servo_model=None,
- servo_serial=None,
- servo_setup=None,
- servo_recovery=None,
- servo_fw_channel=None,
- additional_servod_args=None,
- is_in_lab=None,
- *args,
- **dargs):
- """Initialize a ServoHost instance.
-
- A ServoHost instance represents a host that controls a servo.
-
- @param servo_host: Name of the host where the servod process
- is running.
- @param servo_port: Port the servod process is listening on. Defaults
- to the SERVOD_PORT environment variable if set,
- otherwise 9999.
- @param servo_board: Board that the servo is connected to.
- @param servo_model: Model that the servo is connected to.
- @param servo_serial: Serial number of the servo device.
- @param servo_setup: Type of servo setup, e.g. REGULAR or DUAL_V4.
- @param additional_servod_args: Additional args that will append to
- servod start command.
- @param is_in_lab: True if the servo host is in Cros Lab. Default is set
- to None, for which utils.host_is_in_lab_zone will be
- called to check if the servo host is in Cros lab.
-
- """
- super(ServoHost, self)._initialize(hostname=servo_host,
- is_in_lab=is_in_lab, *args, **dargs)
- self._init_attributes()
- self.servo_port = int(servo_port)
- self.servo_board = servo_board
- self.servo_model = servo_model
- self.servo_serial = servo_serial
- self.servo_setup = servo_setup
- self.servo_recovery = servo_recovery
- self.servo_fw_channel = servo_fw_channel
- self.additional_servod_args = additional_servod_args
-
- # The location of the log files on the servo host for this instance.
- self.remote_log_dir = '%s_%s' % (self.SERVOD_LOG_PREFIX,
- self.servo_port)
- # Path of the servo host lock file.
- self._lock_file = (self.TEMP_FILE_DIR + str(self.servo_port) +
- self.LOCK_FILE_POSTFIX)
- # File path to declare a reboot request.
- self._reboot_file = (self.TEMP_FILE_DIR + str(self.servo_port) +
- self.REBOOT_FILE_POSTFIX)
-
- # Lock the servo host if it's an in-lab labstation to prevent other
- # task to reboot it until current task completes. We also wait and
- # make sure the labstation is up here, in the case of the labstation is
- # in the middle of reboot.
- self._is_locked = False
- if (self.wait_up(self.REBOOT_TIMEOUT) and self.is_in_lab()
- and self.is_labstation()):
- self._lock()
- try:
- self.wait_ready()
- except Exception as e:
- logging.info(
- 'Unexpected error while ensure labstation'
- ' readiness; %s', str(e))
-
- self._repair_strategy = None
-
- def __str__(self):
- return "<%s '%s:%s'>" % (
- type(self).__name__, self.hostname, self.servo_port)
-
- def connect_servo(self):
- """ Initialize and setup servo for later use.
- """
- self.initialize_servo()
- self.initialize_dut_for_servo()
-
- def initialize_servo(self):
- """Establish a connection to the servod server on this host.
-
- Initializes `self._servo` and then verifies that all network
- connections are working. This will create an ssh tunnel if
- it's required.
- """
- self._servo = servo.Servo(servo_host=self,
- servo_serial=self.servo_serial)
-
- def initialize_dut_for_servo(self):
- """This method will do some setup for dut control, e.g. setup
- main servo_v4 device, and also testing the connection between servo
- and DUT. As a side effect of testing the connection, all signals on
- the target servo are reset to default values, and the USB stick is
- set to the neutral (off) position.
- """
- if not self._servo:
- raise hosts.AutoservVerifyError('Servo object needs to be'
- ' initialized before initialize'
- ' DUT.')
- timeout, _ = retry.timeout(
- self._servo.initialize_dut,
- timeout_sec=self.INITIALIZE_SERVO_TIMEOUT_SECS)
- if timeout:
- raise hosts.AutoservVerifyError('Initialize dut for servo timed'
- ' out.')
-
- def disconnect_servo(self):
- """Disconnect our servo if it exists.
-
- If we've previously successfully connected to our servo,
- disconnect any established ssh tunnel, and set `self._servo`
- back to `None`.
- """
- if self._servo:
- # N.B. This call is safe even without a tunnel:
- # rpc_server_tracker.disconnect() silently ignores
- # unknown ports.
- self.rpc_server_tracker.disconnect(self.servo_port)
- self._servo = None
-
- def _maybe_create_servod_ssh_tunnel_proxy(self):
- """Create a xmlrpc proxy for use with a ssh tunnel.
- A lock is used to safely create a singleton proxy.
- """
- with self._tunnel_proxy_lock:
- if self._tunnel_proxy is None:
- self._tunnel_proxy = self.rpc_server_tracker.xmlrpc_connect(
- None,
- self.servo_port,
- ready_test_name=self.SERVO_READY_METHOD,
- timeout_seconds=60,
- request_timeout_seconds=3600,
- server_desc=str(self))
-
- def get_servod_server_proxy(self):
- """Return a proxy if it exists; otherwise, create a new one.
- A proxy can either be a ssh tunnel based proxy, or a httplib
- based proxy.
-
- @returns: An xmlrpclib.ServerProxy that is connected to the servod
- server on the host.
- """
- if (servo_constants.ENABLE_SSH_TUNNEL_FOR_SERVO
- and not self.is_localhost()):
- # Check for existing ssh tunnel proxy.
- if self._tunnel_proxy is None:
- self._maybe_create_servod_ssh_tunnel_proxy()
- return self._tunnel_proxy
- else:
- # xmlrpc/httplib is not thread-safe, so each thread must have its
- # own separate proxy connection.
- if not hasattr(self._local, "_per_thread_proxy"):
- remote = 'http://%s:%s' % (self.hostname, self.servo_port)
- self._local._per_thread_proxy = six.moves.xmlrpc_client.ServerProxy(remote)
- return self._local._per_thread_proxy
-
- def verify(self, silent=False):
- """Update the servo host and verify it's in a good state.
-
- @param silent If true, suppress logging in `status.log`.
- """
- message = 'Beginning verify for servo host %s port %s serial %s'
- message %= (self.hostname, self.servo_port, self.servo_serial)
- self.record('INFO', None, None, message)
- tauto_warnings.lab_services_warn_and_error("Verify Servo state")
-
- def _get_default_usbkey_mount_path(self):
- return '/media/servo_usb/%s' % self.servo_port
-
- def get_image_name_from_usbkey(self, usbkey_dev):
- """Mount usb drive and check ChromeOS image name on it if there is
- one. This method assumes the image_usbkey_direction is already set
- to servo side.
-
- @param usbkey_dev: usbkey dev path(e.g. /dev/sdb).
-
- @returns: image_name on the usbkey, e.g. nami-release/R82.10138.0.0,
- or empty string if no test image detected, or unexpected
- error occurred.
- """
- logging.info('Checking ChromeOS image name on usbkey.')
- mount_dst = self._get_default_usbkey_mount_path()
- # Unmount if there is an existing stale mount.
- self._unmount_drive(mount_dst)
- # ChromeOS root fs is in /dev/sdx3
- mount_src = usbkey_dev + '3'
- try:
- if not self._mount_drive(mount_src, mount_dst):
- logging.debug('Unexpected error occurred on mount usb drive.')
- return ''
-
- release_content = self.run(
- 'cat %s/etc/lsb-release' % mount_dst,
- ignore_status=True).stdout.strip()
-
- if not re.search(r'RELEASE_TRACK=.*test', release_content):
- logging.info('The image on usbkey is not a test image')
- return ''
-
- return lsbrelease_utils.get_chromeos_release_builder_path(
- lsb_release_content=release_content)
- finally:
- logging.debug('Image check compeleted, unmounting the usb drive.')
- self._unmount_drive(mount_dst)
-
- def _extract_firmware_image_from_usbkey(self, fw_dst):
- """Extract firmware images from the usbkey on servo, this method
- assumes there is already a ChromeOS test image staged on servo.
-
- @param fw_dst: the path that we'll copy firmware images to.
-
- @returns: a json format string of firmware manifest data.
- """
- usbkey_dev = self._probe_and_validate_usb_dev()
- if not usbkey_dev:
- raise hosts.AutoservRepairError('Unexpected error occurred when'
- ' probe usbkey dev path, please check logs for detail.')
-
- mount_dst = self._get_default_usbkey_mount_path()
- # Unmount if there is an existing stale mount.
- self._unmount_drive(mount_dst)
- # ChromeOS root fs is in /dev/sdx3
- mount_src = usbkey_dev + '3'
- try:
- if not self._mount_drive(mount_src, mount_dst):
- raise hosts.AutoservRepairError('Failed to extract firmware'
- ' image; Unable to mount %s.' % usbkey_dev,
- 'unable to mount usbkey')
- updater_bin = os.path.join(mount_dst,
- 'usr/sbin/chromeos-firmwareupdate')
- self.run('%s --unpack %s' % (updater_bin, fw_dst))
- return self.run('%s --manifest' % updater_bin).stdout
- finally:
- self._unmount_drive(mount_dst)
-
- def prepare_repair_firmware_image(self, fw_dst=None):
- """Prepare firmware image on the servohost for auto repair process
- to consume.
-
- @param fw_dst: the path that we want to store firmware image on
- the servohost.
-
- @returns: A tuple that containes ec firmware image path and bios
- firmware image path on the servohost, or None if type of
- image is not available based on manifest and dut's model.
- """
- model = self.servo_model or self._dut_host_info.model
- if not model:
- raise hosts.AutoservRepairError(
- 'Could not determine DUT\'s model.',
- 'model infomation unknown')
-
- if not fw_dst:
- fw_dst = '/tmp/firmware_image/%s' % self.servo_port
- # Cleanup and re-create dst path to have a fresh start.
- self.run('rm -rf %s' % fw_dst)
- self.run('mkdir -p %s' % fw_dst)
-
- manifest = json.loads(self._extract_firmware_image_from_usbkey(fw_dst))
- # For models that have packed $MODEL_signed variant, we want use the
- # 'signed' variant once we get DVT devices, so try to read manifest
- # from $MODEL_signed first.
- build = manifest.get('%s_signed' % model) or manifest.get(model)
- if not build:
- raise hosts.AutoservRepairError('Could not find firmware manifest'
- ' for model:%s' % model, 'model manifest not found')
- try:
- ec_image = os.path.join(fw_dst, build['ec']['image'])
- except KeyError:
- ec_image = None
- try:
- bios_image = os.path.join(fw_dst, build['host']['image'])
- except KeyError:
- bios_image = None
- if not ec_image and not bios_image:
- raise hosts.AutoservRepairError('Could not find any firmware image'
- ' for model:%s' % model, 'cannot find firmware image')
- return ec_image, bios_image
-
- def flash_ap_firmware_via_servo(self, image):
- """Flash AP firmware by use a provided image.
-
- This is will be a short term enhanment for infra repair use, it use
- 'futility update' which will automatically determine various parameters
- needed for flashrom, and will preserve the GBB, VPD, and HWID for
- AP firmware update.
- @TODO(xianuowang@) Remove this method once b/148403277 implemented.
-
- @param image: the firmware image path on servohost.
- """
- cmd = 'futility update -i %s --servo_port=%s'
- self.run(cmd % (image, self.servo_port), timeout=900)
-
- def _probe_and_validate_usb_dev(self):
- """This method probe the usb dev path by talking to servo, and then
- validate the dev path is valid block device to servohost.
- Possible output:
- 1. Encounter error during probe usb dev, returns empty string.
- 2. probe usb dev completed without error but cannot find usb dev,
- raise AutoservRepairError.
- 3. probe usb dev find a usb dev path, but failed validation in this
- method, raise AutoservRepairError.
-
- @returns: A string of usb dev path(e.g. '/dev/sdb'), or empty string
- if unexpected error occurred during probe.
- @raises: AutoservRepairError if servo couldn't probe the usb dev path
- (servo.probe_host_usb_dev() returns empty string), or the dev path is
- not valid block device to servohost.
- """
- logging.info('Validating image usbkey on servo.')
- try:
- usb_dev = self._servo.probe_host_usb_dev()
- except Exception as e:
- # We don't want any unexpected or transient servo communicating
- # failure block usb repair, so capture all errors here.
- logging.error(e, exc_info=True)
- logging.error('Unexpected error occurred on get usbkey dev path,'
- ' skipping usbkey validation.')
- return ''
-
- if usb_dev:
- # probe_host_usb_dev() sometimes return stale record,
- # so we need to make sure the path exists in fdisk.
- validate_cmd = 'fdisk -l %s' % usb_dev
- try:
- resp = self.run(validate_cmd, ignore_status=True, timeout=30)
- if resp.exit_status == 0:
- return usb_dev
- logging.error('%s is reported from "image_usbkey_dev" control'
- ' but not detected by fdisk!', usb_dev)
- except error.AutoservRunError as e:
- if 'Timeout encountered' in str(e):
- logging.warning('Timeout encountered during fdisk run,'
- ' skipping usbkey validation.')
- return ''
- raise
-
- raise hosts.AutoservRepairError(
- 'No usbkey detected on servo, the usbkey may be either missing'
- ' or broken. Please replace usbkey on the servo and retry.',
- 'missing usbkey')
-
- def is_ec_supported(self):
- """Check if ec is supported on the servo_board"""
- if self.servo_board:
- try:
- frm_config = config.Config(self.servo_board, self.servo_model)
- return frm_config.chrome_ec
- except Exception as e:
- logging.error('Unexpected error when read from firmware'
- ' configs; %s', str(e))
- return False
-
- def validate_image_usbkey(self):
- """This method first validate if there is a recover usbkey on servo
- that accessible to servohost, and second check if a ChromeOS image is
- already on the usb drive and return the image_name so we can avoid
- unnecessary download and flash to the recover usbkey on servo.
-
- Please note that, there is special error handling logic here:
- 1. If unexpected error happens, we return empty string. So repair
- actions will not get blocked.
- 2. If no working usbkey present on servo, but no errors, we'll raise
- AutoservRepairError here.
-
- @returns: image_name on the usbkey, e.g. nami-release/R82.10138.0.0,
- or empty string if no test image detected, or unexpected
- error occurred.
- @raises: AutoservRepairError if the usbkey is not detected on servo.
- """
- usb_dev = self._probe_and_validate_usb_dev()
- if usb_dev:
- return self.get_image_name_from_usbkey(usb_dev)
- else:
- return ''
-
- def repair(self, silent=False):
- """Attempt to repair servo host.
-
- @param silent If true, suppress logging in `status.log`.
- """
- message = 'Beginning repair for servo host %s port %s serial %s'
- message %= (self.hostname, self.servo_port, self.servo_serial)
- self.record('INFO', None, None, message)
- tauto_warnings.lab_services_warn_and_error("Servo repair")
-
- def _is_critical_error(self, error):
- if (isinstance(error, hosts.AutoservVerifyDependencyError)
- and not error.is_critical()):
- logging.warning('Non-critical verify failure(s) detected during'
- ' verify/repair servo, servo connection will'
- ' still be up but may not be fully functional.'
- ' Some repair actions and servo dependent'
- ' tests may not run.')
- return False
- logging.info(
- 'Critical verify failure(s) detected during repair/verify '
- 'servo. Disconnecting servo and running `stop servod`, all'
- ' repair actions and tests that depends on servo will not '
- 'run.')
- return True
-
- def get_servo(self):
- """Get the cached servo.Servo object.
-
- @return: a servo.Servo object.
- @rtype: autotest_lib.server.cros.servo.servo.Servo
- """
- return self._servo
-
- def request_reboot(self):
- """Request servohost to be rebooted when it's safe to by touch a file.
- """
- logging.debug('Request to reboot servohost %s has been created by '
- 'servo with port # %s', self.hostname, self.servo_port)
- self.run('touch %s' % self._reboot_file, ignore_status=True)
-
- def withdraw_reboot_request(self):
- """Withdraw a servohost reboot request if exists by remove the flag
- file.
- """
- logging.debug('Withdrawing request to reboot servohost %s that created'
- ' by servo with port # %s if exists.',
- self.hostname, self.servo_port)
- self.run('rm -f %s' % self._reboot_file, ignore_status=True)
-
- def start_servod(self, quick_startup=False):
- """Start the servod process on servohost.
- """
- # Skip if running on the localhost.(crbug.com/1038168)
- if self.is_localhost():
- logging.debug("Servohost is a localhost, skipping start servod.")
- return
-
- cmd = 'start servod'
- if self.servo_board:
- cmd += ' BOARD=%s' % self.servo_board
- if self.servo_model:
- cmd += ' MODEL=%s' % self.servo_model
- else:
- logging.warning('Board for DUT is unknown; starting servod'
- ' assuming a pre-configured board.')
-
- cmd += ' PORT=%d' % self.servo_port
- if self.servo_serial:
- cmd += ' SERIAL=%s' % self.servo_serial
-
- # Start servod with dual_v4 based on servo_setup.
- if self.is_dual_setup():
- cmd += ' DUAL_V4=1'
-
- # Start servod with CONFIG=cr50.xml which required for some pools.
- if self._require_cr50_servod_config():
- cmd += ' CONFIG=cr50.xml'
-
- if self.servo_recovery == True:
- cmd += ' REC_MODE=1'
-
- # Adding customized args if any.
- if self.additional_servod_args:
- cmd += ' ' + self.additional_servod_args
-
- # Remove the symbolic links from the logs. This helps ensure that
- # a failed servod instantiation does not cause us to grab old logs
- # by mistake.
- self.remove_latest_log_symlinks()
- self.run(cmd, timeout=60)
-
- # There's a lag between when `start servod` completes and when
- # the _ServodConnectionVerifier trigger can actually succeed.
- # The call to time.sleep() below gives time to make sure that
- # the trigger won't fail after we return.
-
- # Normally servod on servo_v3 and labstation take ~10 seconds to ready,
- # But in the rare case all servo on a labstation are in heavy use they
- # may take ~30 seconds. So the timeout value will double these value,
- # and we'll try quick start up when first time initialize servohost,
- # and use standard start up timeout in repair.
- if quick_startup:
- timeout = servo_constants.SERVOD_QUICK_STARTUP_TIMEOUT
- else:
- timeout = servo_constants.SERVOD_STARTUP_TIMEOUT
- logging.debug('Wait %s seconds for servod process fully up.', timeout)
- time.sleep(timeout)
- # Cache the initial instance timestamp to check against servod restarts
- self._initial_instance_ts = self.get_instance_logs_ts()
-
- def stop_servod(self):
- """Stop the servod process on servohost.
- """
- # Skip if running on the localhost.(crbug.com/1038168)
- if self.is_localhost():
- logging.debug("Servohost is a localhost, skipping stop servod.")
- return
-
- logging.debug('Stopping servod on port %s', self.servo_port)
- self.run('stop servod PORT=%d' % self.servo_port,
- timeout=60, ignore_status=True)
- logging.debug('Wait %s seconds for servod process fully teardown.',
- servo_constants.SERVOD_TEARDOWN_TIMEOUT)
- time.sleep(servo_constants.SERVOD_TEARDOWN_TIMEOUT)
-
- def restart_servod(self, quick_startup=False):
- """Restart the servod process on servohost.
- """
- self.stop_servod()
- self.start_servod(quick_startup)
-
- def _process_servodtool_error(self, response):
- """Helper function to handle non-zero servodtool response.
- """
- if re.search(servo_constants.ERROR_MESSAGE_USB_HUB_NOT_COMPATIBLE,
- response.stdout):
- logging.error('The servo is not plugged on a usb hub that supports'
- ' power-cycle!')
- # change the flag so we can update this label in later process.
- self.smart_usbhub = False
- return
-
- if re.search(servo_constants.ERROR_MESSAGE_DEVICE_NOT_FOUND %
- self.servo_serial, response.stdout):
- logging.error('No servo with serial %s found!', self.servo_serial)
- return
-
- logging.error('Unexpected error occurred from usbhub control, please'
- ' file a bug and inform chrome-fleet-software@ team!')
-
- def get_main_servo_usb_path(self):
- """Helper function to collect current usb-path to main servo.
-
- The usb-path is path to the folder where usb-device was enumerated.
- If fail then will return an empty string ('').
-
- @returns: string, usb-path to the main servo device.
- e.g.: '/sys/bus/usb/devices/1-6.1.3.1'
- """
- # TODO remove try-except when fix crbug.com/1087964
- try:
- cmd = 'servodtool device -s %s usb-path' % self.servo_serial
- resp = self.run(cmd, ignore_status=True, timeout=30)
- except Exception as e:
- # Here we catch only timeout errors.
- # Other errors is filtered by ignore_status=True
- logging.debug('Attempt to get servo usb-path failed due to '
- 'timeout; %s', e)
- return ''
- if resp.exit_status != 0:
- self._process_servodtool_error(resp)
- return ''
- usb_path = resp.stdout.strip()
- logging.info('Usb path of servo %s is %s', self.servo_serial, usb_path)
- return usb_path
-
- def _get_servo_usb_devnum(self):
- """Helper function to collect current usb devnum of servo."""
- usb_path = self.get_main_servo_usb_path()
- if not usb_path:
- return ''
- resp = self.run('cat %s/devnum' % usb_path, ignore_status=True)
- if resp.exit_status != 0:
- self._process_servodtool_error(resp)
- return ''
- return resp.stdout.strip()
-
- def reboot_servo_v3_on_need(self):
- """Check and reboot servo_v3 based on below conditions.
- 1. If there is an update pending on reboot.
- 2. Servo_v3 has been up for more than 96 hours.
- """
- if self.get_board() != 'beaglebone_servo':
- logging.info('Servo reboot is only applicable for servo V3.')
- return
-
- update_pending_reboot = (self._check_update_status() ==
- self.UPDATE_STATE.PENDING_REBOOT)
- uptime_hours = float(self.check_uptime())/3600
- logging.info('Uptime of servo_v3: %s hour(s)', uptime_hours)
- long_up_time = uptime_hours > 96
-
- # Skip reboot if neither condition are met.
- if not (update_pending_reboot or long_up_time):
- return
-
- if update_pending_reboot:
- message = 'Starting reboot servo_v3 because an update is pending.'
- reboot_method = self._post_update_reboot
- elif long_up_time:
- message = 'Starting reboot servo_v3 because uptime > 96 hours.'
- reboot_method = self._servo_host_reboot
- self.record('INFO', None, None, message)
- logging.info(message)
- try:
- reboot_method()
- message = 'Servo_v3 reboot completed successfully.'
- except Exception as e:
- logging.debug("Fail to reboot servo_v3; %s", e)
- message = ('Servo_v3 reboot failed, please check debug log '
- 'for details.')
- logging.info(message)
- self.record('INFO', None, None, message)
-
- def _reset_servo(self):
- logging.info('Resetting servo through smart usbhub.')
- # TODO remove try-except when fix crbug.com/1087964
- try:
- resp = self.run('servodtool device -s %s power-cycle' %
- self.servo_serial, ignore_status=True,
- timeout=30)
- if resp.exit_status != 0:
- self._process_servodtool_error(resp)
- return False
- except Exception as e:
- # Here we catch only timeout errors.
- # Other errors is filtered by ignore_status=True
- logging.debug('Attempt to reset servo failed due to timeout;'
- ' %s', e)
- return False
-
- logging.debug('Wait %s seconds for servo to come back from reset.',
- servo_constants.SERVO_RESET_TIMEOUT_SECONDS)
- time.sleep(servo_constants.SERVO_RESET_TIMEOUT_SECONDS)
- # change the flag so we can update this label in later process.
- self.smart_usbhub = True
- return True
-
- def reset_servo(self):
- """Reset(power-cycle) the servo via smart usbhub.
- """
- if not self.is_labstation():
- logging.info('Servo reset is not applicable to servo_v3.')
- return
-
- pre_reset_devnum = self._get_servo_usb_devnum()
- logging.info('Servo usb devnum before reset: %s', pre_reset_devnum)
- result = self._reset_servo()
- if not result:
- message = ('Failed to reset servo with serial: %s. (Please ignore'
- ' this error if the DUT is not connected to a smart'
- ' usbhub).' % self.servo_serial)
- logging.warning(message)
- self.record('INFO', None, None, message)
- return
-
- post_reset_devnum = self._get_servo_usb_devnum()
- logging.info('Servo usb devnum after reset: %s', post_reset_devnum)
- if not (pre_reset_devnum and post_reset_devnum):
- message = ('Servo reset completed but unable to verify'
- ' devnum change!')
- elif pre_reset_devnum != post_reset_devnum:
- message = ('Reset servo with serial %s completed successfully!'
- % self.servo_serial)
- else:
- message = 'Servo reset completed but devnum is still not changed!'
- logging.info(message)
- self.record('INFO', None, None, message)
-
- def _extract_compressed_logs(self, logdir, relevant_files):
- """Decompress servod logs in |logdir|.
-
- @param logdir: directory containing compressed servod logs.
- @param relevant_files: list of files in |logdir| to consider.
-
- @returns: tuple, (tarfiles, files) where
- tarfiles: list of the compressed filenames that have been
- extracted and deleted
- files: list of the uncompressed files that were generated
- """
- # For all tar-files, first extract them to the directory, and
- # then let the common flow handle them.
- tarfiles = [cf for cf in relevant_files if
- cf.endswith(self.COMPRESSION_SUFFIX)]
- files = []
- for f in tarfiles:
- norm_name = os.path.basename(f)[:-len(self.COMPRESSION_SUFFIX)]
- with tarfile.open(f) as tf:
- # Each tarfile has only one member, as
- # that's the compressed log.
- member = tf.members[0]
- # Manipulate so that it only extracts the basename, and not
- # the directories etc.
- member.name = norm_name
- files.append(os.path.join(logdir, member.name))
- tf.extract(member, logdir)
- # File has been extracted: remove the compressed file.
- os.remove(f)
- return tarfiles, files
-
- def _extract_mcu_logs(self, log_subdir):
- """Extract MCU (EC, Cr50, etc) console output from servod debug logs.
-
- Using the MCU_EXTRACTOR regex (above) extract and split out MCU console
- lines from the logs to generate invidiual console logs e.g. after
- this method, you can find an ec.txt and servo_v4.txt in |log_dir| if
- those MCUs had any console input/output.
-
- @param log_subdir: directory with log.DEBUG.txt main servod debug logs.
- """
- # Extract the MCU for each one. The MCU logs are only in the .DEBUG
- # files
- mcu_lines_file = os.path.join(log_subdir, 'log.DEBUG.txt')
- if not os.path.exists(mcu_lines_file):
- logging.info('No DEBUG logs found to extract MCU logs from.')
- return
- mcu_files = {}
- mcu_file_template = '%s.txt'
- with open(mcu_lines_file, 'r') as f:
- for line in f:
- match = self.MCU_EXTRACTOR.match(line)
- if match:
- mcu = match.group(self.MCU_GROUP).lower()
- line = match.group(self.LINE_GROUP)
- if mcu not in mcu_files:
- mcu_file = os.path.join(log_subdir,
- mcu_file_template % mcu)
- mcu_files[mcu] = open(mcu_file, 'a')
- fd = mcu_files[mcu]
- fd.write(line + '\n')
- for f in mcu_files:
- mcu_files[f].close()
-
- def remove_latest_log_symlinks(self):
- """Remove the conveninence symlinks 'latest' servod logs."""
- symlink_wildcard = '%s/latest*' % self.remote_log_dir
- cmd = 'rm ' + symlink_wildcard
- self.run(cmd, stderr_tee=None, ignore_status=True)
-
- def probe_servod_restart(self, instance_ts, outdir):
- """Grab servod logs from previous instances if part of this session.
-
- If since the last time this host called start_servod() servod crashed
- and restarted, this helper finds those logs as well, and stores them
- with the |OLD_LOG_SUFFIX| to investigate if necessary.
-
- It also issues a panicinfo command to servo devices after the restart
- to try and collect reboot information for debugging.
-
- @param instance_ts: the log timestamp that the current instance uses
- @param outdir: directory to create a subdirectory into to place the
- servod logs into.
- """
- if self._initial_instance_ts is None:
- logging.info('No log timestamp grabbed successfully on servod '
- 'startup. Cannot check device restarts. Ignoring.')
- return
- if instance_ts == self._initial_instance_ts:
- logging.debug('Servod appears to have run without restarting')
- return
- # Servod seems to have restarted (at least once). |_initial_instance_ts|
- # is the first timestamp, and instance_ts is the current timestamp. Find
- # all timestamps in between them, and grab the logs for each.
- tss = self._find_instance_timestamps_between(self._initial_instance_ts,
- instance_ts)
- logging.info('Servod has restarted %d times between the start and the '
- 'end of this servo_host.', len(tss))
- logging.info('This might be an issue. Will extract all logs from each '
- 'instance.')
- logging.info('Logs that are not the currently running (about to turn '
- 'down) instance are maked with a .%s in their folder.',
- self.OLD_LOG_SUFFIX)
- for ts in tss:
- self.get_instance_logs(ts, outdir, old=True)
- # Lastly, servod has restarted due to a potential issue. Try to get
- # panic information from servo micro and servo v4 for the current logs.
- # This can only happen if the |_servo| attribute is initialized.
- if self._servo:
- for mcu in ['servo_micro', 'servo_v4', 'servo_v4p1']:
- ctrl = '%s_uart_cmd' % mcu
- if self._servo.has_control(ctrl):
- logging.info('Trying to retrieve %r panicinfo into logs',
- mcu)
- try:
- self._servo.set_nocheck(ctrl, 'panicinfo')
- except error.TestFail as e:
- logging.error('Failed to generate panicinfo for %r '
- 'logs. %s', mcu, str(e))
-
- def _find_instance_timestamps_between(self, start_ts, end_ts):
- """Find all log timestamps between [start_ts, end_ts).
-
- @param start_ts: str, earliest log timestamp of interest
- @param end_ts: str, latest log timestamp of interest
-
- @returns: list, all timestamps between start_ts and end_ts, end_ts
- exclusive, on the servo_host. An empty list on errors
- """
- # Simply get all timestamp, and then sort and remove
- cmd = 'ls %s' % self.remote_log_dir
- res = self.run(cmd, stderr_tee=None, ignore_status=True)
- if res.exit_status != 0:
- # Here we failed to find anything.
- logging.info('Failed to find remote servod logs. Ignoring.')
- return []
- logfiles = res.stdout.strip().split()
- timestamps = set()
- for logfile in logfiles:
- ts_match = self.TS_EXTRACTOR.match(logfile)
- if not ts_match:
- # Simply ignore files that fail the check. It might be the
- # 'latest' symlinks or random files.
- continue
- timestamps.add(ts_match.group(self.TS_GROUP))
- # At this point we have all unique timestamps.
- timestamps = sorted(timestamps)
- for ts in [start_ts, end_ts]:
- if ts not in timestamps:
- logging.error('Timestamp %r not in servod logs. Cannot query '
- 'for timestamps in between %r and %r', ts,
- start_ts, end_ts)
- return []
- return timestamps[timestamps.index(start_ts):timestamps.index(end_ts)]
-
- def get_instance_logs_ts(self):
- """Retrieve the currently running servod instance's log timestamp
-
- @returns: str, timestamp for current instance, or None on failure
- """
- # First, extract the timestamp. This cmd gives the real filename of
- # the latest aka current log file.
- cmd = ('if [ -f %(dir)s/latest.DEBUG ];'
- 'then realpath %(dir)s/latest.DEBUG;'
- 'elif [ -f %(dir)s/latest ];'
- 'then realpath %(dir)s/latest;'
- 'else exit %(code)d;'
- 'fi' % {'dir': self.remote_log_dir,
- 'code': self.NO_SYMLINKS_CODE})
- res = self.run(cmd, stderr_tee=None, ignore_status=True)
- if res.exit_status != 0:
- if res.exit_status == self.NO_SYMLINKS_CODE:
- logging.warning('servod log latest symlinks not found. '
- 'This is likely due to an error starting up '
- 'servod. Ignoring..')
- else:
- logging.warning('Failed to find servod logs on servo host.')
- logging.warning(res.stderr.strip())
- return None
- fname = os.path.basename(res.stdout.strip())
- # From the fname, ought to extract the timestamp using the TS_EXTRACTOR
- ts_match = self.TS_EXTRACTOR.match(fname)
- if not ts_match:
- logging.warning('Failed to extract timestamp from servod log file '
- '%r. Skipping. The servo host is using outdated '
- 'servod logging and needs to be updated.', fname)
- return None
- return ts_match.group(self.TS_GROUP)
-
- def get_servohost_logs(self, outdir):
- """Get logs that can help debugging servo/servod problem from
- the servohost
- """
- log_dir = os.path.join(outdir, 'servohost_%s' % self.hostname)
- if os.path.isdir(log_dir):
- # In multi-DUTs testing, each DUTs will may their own servohost
- # instance, where could cause duplicate efforts if they share a
- # same servohost, so we can just skip the collect if the log
- # dir already exists.
- logging.info(
- 'Skip dmesg and messages logs collecting as %s'
- ' already exists.', log_dir)
- return
- logging.info('Collecting dmesg and messages from servohost %s',
- self.hostname)
- os.mkdir(log_dir)
- logging.info('Saving servohost logs to %s.', log_dir)
- # First collect dmesg from the servohost.
- crashcollect.collect_command(self, 'dmesg -H',
- os.path.join(log_dir, 'dmesg'))
- # Collect messages log from the servohost.
- try:
- self.get_file('/var/log/messages', log_dir, try_rsync=False)
- except error.AutoservRunError as e:
- logging.warning('Failed to collect messages log from servohost.')
-
- def get_instance_logs(self, instance_ts, outdir, old=False):
- """Collect all logs with |instance_ts| and dump into a dir in |outdir|
-
- This method first collects all logs on the servo_host side pertaining
- to this servod instance (port, instatiation). It glues them together
- into combined log.[level].txt files and extracts all available MCU
- console I/O from the logs into individual files e.g. servo_v4.txt
-
- All the output can be found in a directory inside |outdir| that
- this generates based on |LOG_DIR|, the servod port, and the instance
- timestamp on the servo_host side.
-
- @param instance_ts: log timestamp to grab logfiles for
- @param outdir: directory to create a subdirectory into to place the
- servod logs into.
- @param old: bool, whether to append |OLD_LOG_SUFFIX| to output dir
- """
- # Create the local results log dir.
- log_dir = os.path.join(outdir, '%s_%s.%s' % (self.LOG_DIR,
- str(self.servo_port),
- instance_ts))
- if old:
- log_dir = '%s.%s' % (log_dir, self.OLD_LOG_SUFFIX)
- logging.info('Saving servod logs to %r.', log_dir)
- os.mkdir(log_dir)
- # Now, get all files with that timestamp.
- cmd = 'find %s -maxdepth 1 -name "log.%s*"' % (self.remote_log_dir,
- instance_ts)
- res = self.run(cmd, stderr_tee=None, ignore_status=True)
- files = res.stdout.strip().split()
- try:
- self.get_file(files, log_dir, try_rsync=False)
- if not os.listdir(log_dir):
- logging.info('No servod logs retrieved. Ignoring, and removing '
- '%r again.', log_dir)
- os.rmdir(log_dir)
- return
- except error.AutoservRunError as e:
- result = e.result_obj
- if result.exit_status != 0:
- stderr = result.stderr.strip()
- logging.warning("Couldn't retrieve servod logs. Ignoring: %s",
- stderr or '\n%s' % result)
- # Remove the log_dir as nothing was added to it.
- os.rmdir(log_dir)
- return
- local_files = [os.path.join(log_dir, f) for f in os.listdir(log_dir)]
- # TODO(crrev.com/c/1793030): remove no-level case once CL is pushed
- for level_name in ('DEBUG', 'INFO', 'WARNING', ''):
- # Create the joint files for each loglevel. i.e log.DEBUG
- joint_file = self.JOINT_LOG_PREFIX
- if level_name:
- joint_file = '%s.%s' % (self.JOINT_LOG_PREFIX, level_name)
- # This helps with some online tools to avoid complaints about an
- # unknown filetype.
- joint_file = joint_file + '.txt'
- joint_path = os.path.join(log_dir, joint_file)
- files = [f for f in local_files if level_name in f]
- if not files:
- # TODO(crrev.com/c/1793030): remove no-level case once CL
- # is pushed
- continue
- # Extract compressed logs if any.
- compressed, extracted = self._extract_compressed_logs(log_dir,
- files)
- files = list(set(files) - set(compressed))
- files.extend(extracted)
- # Need to sort. As they all share the same timestamp, and
- # loglevel, the index itself is sufficient. The highest index
- # is the oldest file, therefore we need a descending sort.
- def sortkey(f, level=level_name):
- """Custom sortkey to sort based on rotation number int."""
- if f.endswith(level_name): return 0
- return int(f.split('.')[-1])
-
- files.sort(reverse=True, key=sortkey)
- # Just rename the first file rather than building from scratch.
- os.rename(files[0], joint_path)
- with open(joint_path, 'a') as joint_f:
- for logfile in files[1:]:
- # Transfer the file to the joint file line by line.
- with open(logfile, 'r') as log_f:
- for line in log_f:
- joint_f.write(line)
- # File has been written over. Delete safely.
- os.remove(logfile)
- # Need to remove all files form |local_files| so we don't
- # analyze them again.
- local_files = list(set(local_files) - set(files) - set(compressed))
- # Lastly, extract MCU logs from the joint logs.
- self._extract_mcu_logs(log_dir)
-
- def _lock(self):
- """lock servohost by touching a file.
- """
- logging.debug('Locking servohost %s by touching %s file',
- self.hostname, self._lock_file)
- self.run('touch %s' % self._lock_file, ignore_status=True)
- self._is_locked = True
-
- def _unlock(self):
- """Unlock servohost by removing the lock file.
- """
- logging.debug('Unlocking servohost by removing %s file',
- self._lock_file)
- self.run('rm %s' % self._lock_file, ignore_status=True)
- self._is_locked = False
-
- def close(self):
- """Close the associated servo and the host object."""
- # NOTE: throughout this method there are multiple attempts to catch
- # all errors. This is WAI as log grabbing should not fail tests.
- # However, the goal is to catch and handle/process all errors, thus
- # we print the traceback and ask for a bug.
- if self._closed:
- logging.debug('ServoHost is already closed.')
- return
-
- # Only attempt ssh related actions if servohost is sshable. We call
- # check_cached_up_status() first because it's lightweighted and return
- # much faster in the case servohost is down, however, we still want
- # to call is_up() later since check_cached_up_status() is ping based check
- # and not guarantee the servohost is sshable.
- servo_host_ready = self.check_cached_up_status() and self.is_up()
-
- if servo_host_ready:
- instance_ts = self.get_instance_logs_ts()
- else:
- logging.info('Servohost is down, will skip servod log collecting.')
- instance_ts = None
- # TODO(crbug.com/1011516): once enabled, remove the check against
- # localhost and instead check against log-rotiation enablement.
- logs_available = (instance_ts is not None and
- self.job and
- not self.is_localhost())
- if logs_available:
- # Probe whether there was a servod restart, and grab those old
- # logs as well.
- try:
- self.probe_servod_restart(instance_ts, self.job.resultdir)
- except (error.AutoservRunError, error.TestFail) as e:
- logging.info('Failed to grab servo logs due to: %s. '
- 'This error is forgiven.', str(e))
- except Exception as e:
- logging.error('Unexpected error probing for old logs. %s. '
- 'Forgiven. Please file a bug and fix or catch '
- 'in log probing function', str(e),
- exc_info=True)
- if self._servo:
- outdir = None if not self.job else self.job.resultdir
- # In some cases when we run as lab-tools, the job object is None.
- self._servo.close(outdir)
-
- if logs_available:
- # Grab current (not old like above) logs after the servo instance
- # was closed out.
- try:
- self.get_servohost_logs(self.job.resultdir)
- self.get_instance_logs(instance_ts, self.job.resultdir)
- except error.AutoservRunError as e:
- logging.info('Failed to grab servo logs due to: %s. '
- 'This error is forgiven.', str(e))
- except Exception as e:
- logging.error('Unexpected error grabbing servod logs. %s. '
- 'Forgiven. Please file a bug and fix or catch '
- 'in log grabbing function', str(e), exc_info=True)
-
- if self._is_locked and servo_host_ready:
- # Remove the lock if the servohost has been locked.
- try:
- self._unlock()
- except error.AutoservSSHTimeout:
- logging.error('Unlock servohost failed due to ssh timeout.'
- ' It may caused by servohost went down during'
- ' the task.')
- # We want always stop servod after task to minimum the impact of bad
- # servod process interfere other servods.(see crbug.com/1028665)
- if servo_host_ready:
- try:
- self.stop_servod()
- except error.AutoservRunError as e:
- logging.info(
- "Failed to stop servod due to:\n%s\n"
- "This error is forgiven.", str(e))
-
- super(ServoHost, self).close()
- # Mark closed.
- self._closed = True
-
- def get_servo_state(self):
- return self._servo_state
-
- def is_servo_board_present_on_servo_v3(self):
- """Check if servo board is detected on servo_v3"""
- logging.debug('Started to detect servo board on servo_v3')
- vid_pids = ['18d1:5004', '0403:6014']
- not_detected = 'The servo board is not detected on servo_v3'
- try:
- cmd = 'lsusb | grep "%s"' % "\|".join(vid_pids)
- result = self.run(cmd, ignore_status=True, timeout=30)
- if result.exit_status == 0 and result.stdout.strip():
- logging.debug('The servo board is detected on servo_v3')
- return True
- logging.debug('%s; %s', not_detected, result)
- return False
- except Exception as e:
- # can be triggered by timeout issue due running the script
- logging.error('%s; %s', not_detected, str(e))
- return None
-
- def _require_cr50_servod_config(self):
- """Check whether we need start servod with CONFIG=cr50.xml"""
- dut_host_info = self.get_dut_host_info()
- if not dut_host_info:
- return False
- for pool in dut_host_info.pools:
- if pool.startswith(servo_constants.CR50_CONFIG_POOL_PREFIX):
- return True
- return False
-
- def get_verifier_state(self, tag):
- """Return the state of servo verifier.
-
- @returns: bool or None
- """
- tauto_warnings.lab_services_warn_and_error("Servo verify state")
-
- def get_repair_strategy_node(self, tag):
- """Return the instance of verifier/repair node for host by tag.
-
- @returns: _DependencyNode or None
- """
- tauto_warnings.lab_services_warn_and_error("Servo node")
-
- def determine_servo_state(self):
- """Determine servo state based on the failed verifier.
-
- @returns: servo state value
- The state detecting based on first fail verifier or collecting of
- them.
- """
- ssh = self.get_verifier_state('servo_ssh')
- servo_root_present = self.get_verifier_state('servo_root_present')
- servo_root_present_node = self.get_repair_strategy_node(
- 'servo_root_present')
- servo_v3_present = self.get_verifier_state('servo_v3_root_present')
- servo_fw = self.get_verifier_state('servo_fw')
- servo_fw_update = self.get_repair_strategy_node('servo_fw_update')
- disk_space = self.get_verifier_state('servo_disk_space')
- start_servod = self.get_verifier_state('start_servod')
- servod_started = self.get_verifier_state('servod_started')
- servod_echo = self.get_verifier_state('servod_echo')
- create_servo = self.get_verifier_state('servod_connection')
- init_servo = self.get_verifier_state('servod_control')
- cr50_low_sbu = self.get_verifier_state('servo_cr50_low_sbu')
- cr50_off = self.get_verifier_state('servo_cr50_off')
- servo_topology = self.get_verifier_state('servo_topology')
- dut_connected = self.get_verifier_state('servo_dut_connected')
- hub_connected = self.get_verifier_state('servo_hub_connected')
- pwr_button = self.get_verifier_state('servo_pwr_button')
- lid_open = self.get_verifier_state('servo_lid_open')
- ec_board = self.get_verifier_state('servo_ec_board')
- cr50_console = self.get_verifier_state('servo_cr50_console')
- ccd_testlab = self.get_verifier_state('servo_ccd_testlab')
-
- if not ssh:
- return servo_constants.SERVO_STATE_NO_SSH
- if start_servod == hosts.VERIFY_FAILED:
- return servo_constants.SERVO_STATE_SERVO_HOST_ISSUE
- if servo_root_present == hosts.VERIFY_FAILED:
- if not self.servo_serial:
- return servo_constants.SERVO_STATE_WRONG_CONFIG
- if hasattr(servo_root_present_node, 'serial_mismatch'):
- return servo_constants.SERVO_STATE_SERIAL_MISMATCH
- return servo_constants.SERVO_STATE_NOT_CONNECTED
- if servo_v3_present == hosts.VERIFY_FAILED:
- # if we cannot find required board on servo_v3
- return servo_constants.SERVO_STATE_NEED_REPLACEMENT
- if servo_fw == hosts.VERIFY_FAILED:
- logging.info(servo_fw_update)
- if hasattr(servo_fw_update, 'servo_updater_issue_detected'):
- return servo_constants.SERVO_STATE_SERVO_UPDATER_ISSUE
- return servo_constants.SERVO_STATE_NEED_REPLACEMENT
-
- if dut_connected == hosts.VERIFY_FAILED:
- return servo_constants.SERVO_STATE_DUT_NOT_CONNECTED
- if hub_connected == hosts.VERIFY_FAILED:
- logging.info('Servo HUB not connected')
- return servo_constants.SERVO_STATE_DUT_NOT_CONNECTED
-
- if cr50_low_sbu == hosts.VERIFY_FAILED:
- return servo_constants.SERVO_STATE_SBU_LOW_VOLTAGE
- if cr50_off == hosts.VERIFY_FAILED:
- return servo_constants.SERVO_STATE_CR50_NOT_ENUMERATED
-
- if servo_topology == hosts.VERIFY_FAILED:
- return servo_constants.SERVO_STATE_TOPOLOGY_ISSUE
-
- if (servod_started == hosts.VERIFY_FAILED
- or servod_echo == hosts.VERIFY_FAILED):
- return servo_constants.SERVO_STATE_SERVOD_ISSUE
-
- # one of the reason why servo can not initialized
- if cr50_console == hosts.VERIFY_FAILED:
- return servo_constants.SERVO_STATE_CR50_CONSOLE_MISSING
- if ccd_testlab == hosts.VERIFY_FAILED:
- return servo_constants.SERVO_STATE_CCD_TESTLAB_ISSUE
-
- if (create_servo == hosts.VERIFY_FAILED
- or init_servo == hosts.VERIFY_FAILED):
- return servo_constants.SERVO_STATE_SERVOD_PROXY_ISSUE
-
- if ec_board == hosts.VERIFY_FAILED:
- return servo_constants.SERVO_STATE_EC_BROKEN
- if pwr_button == hosts.VERIFY_FAILED:
- return servo_constants.SERVO_STATE_BAD_RIBBON_CABLE
- if lid_open == hosts.VERIFY_FAILED:
- return servo_constants.SERVO_STATE_LID_OPEN_FAILED
-
- logging.info('We do not have special state for this failure yet :)')
- return servo_constants.SERVO_STATE_BROKEN
-
- def is_servo_topology_supported(self):
- """Check if servo_topology is supported."""
- if not self.is_up_fast():
- logging.info('Servo-Host is not reachable.')
- return False
- if not self.is_labstation():
- logging.info('Servo-topology supported only for labstation.')
- return False
- if not self.servo_serial:
- logging.info('Servo-topology required a servo serial.')
- return False
- return True
-
- def get_topology(self):
- """Get servo topology."""
- if not self._topology:
- self._topology = servo_topology.ServoTopology(self)
- return self._topology
-
- def is_dual_setup(self):
- """Check is servo will run in dual setup.
-
- Dual setup used only for servo_v4 when used ccd_cr50 and servo_micro
- at the same time.
- """
- return self.servo_setup == servo_constants.SERVO_SETUP_VALUE_DUAL_V4
-
- def set_dut_health_profile(self, dut_health_profile):
- """
- @param dut_health_profile: A DeviceHealthProfile object.
- """
- logging.debug('setting dut_health_profile field to (%s)',
- dut_health_profile)
- self._dut_health_profile = dut_health_profile
-
- def get_dut_health_profile(self):
- """
- @return A DeviceHealthProfile object.
- """
- return self._dut_health_profile
-
- def print_all_servo_of_host(self):
- """Print all servos detected on the host."""
- try:
- logging.info('\tDevices detected on the host:')
- devices = self.get_topology().get_list_available_servos()
- for device in devices:
- logging.info('\t%s', device)
- except Exception as e:
- logging.debug('(Not critical) Fail list all servos: %s', e)
-
-
-def make_servo_hostname(dut_hostname):
- """Given a DUT's hostname, return the hostname of its servo.
-
- @param dut_hostname: hostname of a DUT.
-
- @return hostname of the DUT's servo.
-
- """
- host_parts = dut_hostname.split('.')
- host_parts[0] = host_parts[0] + '-servo'
- return '.'.join(host_parts)
-
-
-def _map_afe_board_to_servo_board(afe_board):
- """Map a board we get from the AFE to a servo appropriate value.
-
- Many boards are identical to other boards for servo's purposes.
- This function makes that mapping.
-
- @param afe_board string board name received from AFE.
- @return board we expect servo to have.
-
- """
- KNOWN_SUFFIXES = ['-freon', '_freon', '_moblab', '-cheets']
- BOARD_MAP = {'gizmo': 'panther'}
- mapped_board = afe_board
- if afe_board in BOARD_MAP:
- mapped_board = BOARD_MAP[afe_board]
- else:
- for suffix in KNOWN_SUFFIXES:
- if afe_board.endswith(suffix):
- mapped_board = afe_board[0:-len(suffix)]
- break
- if mapped_board != afe_board:
- logging.info('Mapping AFE board=%s to %s', afe_board, mapped_board)
- return mapped_board
-
-
-def get_servo_args_for_host(dut_host):
- """Return servo data associated with a given DUT.
-
- @param dut_host Instance of `Host` on which to find the servo
- attributes.
- @return `servo_args` dict with host and an optional port.
- """
- info = dut_host.host_info_store.get()
- servo_args = {k: v for k, v in six.iteritems(info.attributes)
- if k in servo_constants.SERVO_ATTR_KEYS}
-
- if servo_constants.SERVO_HOST_SSH_PORT_ATTR in servo_args:
- try:
- servo_args[servo_constants.SERVO_HOST_SSH_PORT_ATTR] = int(
- servo_args[servo_constants.SERVO_HOST_SSH_PORT_ATTR])
- except ValueError:
- logging.error('servo host port is not an int: %s',
- servo_args[servo_constants.SERVO_HOST_SSH_PORT_ATTR])
- # Reset servo_args because we don't want to use an invalid port.
- servo_args.pop(servo_constants.SERVO_HOST_SSH_PORT_ATTR, None)
-
- if servo_constants.SERVO_PORT_ATTR in servo_args:
- try:
- servo_args[servo_constants.SERVO_PORT_ATTR] = int(
- servo_args[servo_constants.SERVO_PORT_ATTR])
- except ValueError:
- logging.error('servo port is not an int: %s',
- servo_args[servo_constants.SERVO_PORT_ATTR])
- # Reset servo_args because we don't want to use an invalid port.
- servo_args.pop(servo_constants.SERVO_HOST_ATTR, None)
-
- if info.board:
- servo_board = _map_afe_board_to_servo_board(info.board)
- servo_args[servo_constants.SERVO_BOARD_ATTR] = servo_board
- if info.model:
- servo_args[servo_constants.SERVO_MODEL_ATTR] = info.model
- return servo_args if servo_constants.SERVO_HOST_ATTR in servo_args else None
-
-
-def _tweak_args_for_ssp_moblab(servo_args):
- if (servo_args[servo_constants.SERVO_HOST_ATTR]
- in ['localhost', '127.0.0.1']):
- servo_args[servo_constants.SERVO_HOST_ATTR] = _CONFIG.get_config_value(
- 'SSP', 'host_container_ip', type=str, default=None)
-
-
-def create_servo_host(dut,
- servo_args,
- try_lab_servo=False,
- try_servo_repair=False,
- try_servo_recovery=True,
- dut_host_info=None,
- dut_health_profile=None):
- """Create a ServoHost object for a given DUT, if appropriate.
-
- This function attempts to create and verify or repair a `ServoHost`
- object for a servo connected to the given `dut`, subject to various
- constraints imposed by the parameters:
- * When the `servo_args` parameter is not `None`, a servo
- host must be created, and must be checked with `repair()`.
- * Otherwise, if a servo exists in the lab and `try_lab_servo` is
- true:
- * If `try_servo_repair` is true, then create a servo host and
- check it with `repair()`.
- * Otherwise, if the servo responds to `ping` then create a
- servo host and check it with `verify()`.
-
- In cases where `servo_args` was not `None`, repair failure
- exceptions are passed back to the caller; otherwise, exceptions
- are logged and then discarded. Note that this only happens in cases
- where we're called from a test (not special task) control file that
- has an explicit dependency on servo. In that case, we require that
- repair not write to `status.log`, so as to avoid polluting test
- results.
-
- TODO(jrbarnette): The special handling for servo in test control
- files is a thorn in my flesh; I dearly hope to see it cut out before
- my retirement.
-
- Parameters for a servo host consist of a host name, port number, and
- DUT board, and are determined from one of these sources, in order of
- priority:
- * Servo attributes from the `dut` parameter take precedence over
- all other sources of information.
- * If a DNS entry for the servo based on the DUT hostname exists in
- the CrOS lab network, that hostname is used with the default
- port and the DUT's board.
- * If no other options are found, the parameters will be taken
- from the `servo_args` dict passed in from the caller.
-
- @param dut: An instance of `Host` from which to take
- servo parameters (if available).
- @param servo_args: A dictionary with servo parameters to use if
- they can't be found from `dut`. If this
- argument is supplied, unrepaired exceptions
- from `verify()` will be passed back to the
- caller.
- @param try_lab_servo: If not true, servo host creation will be
- skipped unless otherwise required by the
- caller.
- @param try_servo_repair: If true, check a servo host with
- `repair()` instead of `verify()`.
- @param try_servo_recovery: If true, start servod in recovery mode.
- Default value is True.
- @param dut_host_info: A HostInfo object of the DUT that connected
- to this servo.
- @param dut_health_profile: DUT repair info with history.
-
- @returns: A ServoHost object or None. See comments above.
-
- """
- # We are explicitly looking for if servo_args is None here(which means
- # servo not needed), as servo_args == {} means servo is needed and
- # we expect load servo_args from host_info_store.
- if servo_args is None:
- servo_dependency = False
- local_run = False
- else:
- servo_dependency = True
- # If servo_args pass in directly, then this is a local test run.
- local_run = servo_constants.SERVO_HOST_ATTR in servo_args
-
- if local_run:
- logging.warning('User input servo_args detected, will attempt'
- ' to start servod and initialize servo conncetion'
- ' directly. All servo/servohost verify and repair'
- ' steps will be skipped.')
-
- # Loading servo args from host_info_store.
- if dut is not None and (try_lab_servo or servo_dependency):
- servo_args_override = get_servo_args_for_host(dut)
- if servo_args_override is not None:
- if utils.in_moblab_ssp():
- _tweak_args_for_ssp_moblab(servo_args_override)
- logging.debug(
- 'Overriding provided servo_args (%s) with arguments'
- ' determined from the host (%s)',
- servo_args,
- servo_args_override,
- )
- servo_args = servo_args_override
-
- if not servo_args:
- logging.debug('No servo_args provided, and failed to find overrides.')
- if try_lab_servo or servo_dependency:
- return None, servo_constants.SERVO_STATE_MISSING_CONFIG
- else:
- # For regular test case which not required the servo
- return None, None
-
- servo_hostname = servo_args.get(servo_constants.SERVO_HOST_ATTR)
- servo_port = servo_args.get(servo_constants.SERVO_PORT_ATTR)
- if not local_run:
- if not _is_servo_host_information_exist(servo_hostname, servo_port):
- logging.debug(
- 'Servo connection info missed hostname: %s , port: %s',
- servo_hostname, servo_port)
- return None, servo_constants.SERVO_STATE_MISSING_CONFIG
- if not is_servo_host_information_valid(servo_hostname, servo_port):
- logging.debug(
- 'Servo connection info is incorrect hostname: %s , port: %s',
- servo_hostname, servo_port)
- return None, servo_constants.SERVO_STATE_WRONG_CONFIG
-
- if try_servo_recovery == True:
- servo_args[servo_constants.SERVO_RECOVERY_MODE] = True
-
- newhost = ServoHost(**servo_args)
- if local_run:
- try:
- newhost.start_servod()
- except:
- # If we failed to start servod here, we can assume the servod
- # either already started or the test is running against a
- # non-standard servohost so the user will resiponsble for ensure
- # servod is running.
- pass
- try:
- newhost.initialize_servo()
- newhost.initialize_dut_for_servo()
- newhost._servo_state = servo_constants.SERVO_STATE_WORKING
- return newhost, newhost.get_servo_state()
- except Exception as e:
- logging.error('Failed to initialize servo. %s', e)
- return None, servo_constants.SERVO_STATE_BROKEN
-
- if newhost.use_icmp and not newhost.is_up_fast(count=3):
- # ServoHost has internal check to wait if servo-host is in reboot
- # process. If servo-host still is not available this check will stop
- # further attempts as we do not have any option to recover servo_host.
- return None, servo_constants.SERVO_STATE_NO_SSH
-
- # Reset or reboot servo device only during AdminRepair tasks.
- if try_servo_repair:
- if newhost._is_locked:
- # Print available servos on the host for debugging.
- newhost.print_all_servo_of_host()
- # Reset servo if the servo is locked, as we check if the servohost
- # is up, if the servohost is labstation and if the servohost is in
- # lab inside the locking logic.
- newhost.reset_servo()
- else:
- try:
- newhost.reboot_servo_v3_on_need()
- except Exception as e:
- logging.info('[Non-critical] Unexpected error while trying to'
- ' reboot servo_v3, skipping the reboot; %s', e)
-
- if dut:
- newhost.set_dut_hostname(dut.hostname)
- if dut_host_info:
- newhost.set_dut_host_info(dut_host_info)
- if dut_health_profile and (try_lab_servo or try_servo_repair):
- try:
- if newhost.is_localhost():
- logging.info('Servohost is a localhost, skip device'
- ' health profile setup...')
- else:
- dut_health_profile.init_profile(newhost)
- newhost.set_dut_health_profile(dut_health_profile)
- except Exception as e:
- logging.info(
- '[Non-critical] Unexpected error while trying to'
- ' load device health profile; %s', e)
-
- # Note that the logic of repair() includes everything done
- # by verify(). It's sufficient to call one or the other;
- # we don't need both.
- if servo_dependency:
- newhost.repair(silent=True)
- return newhost, newhost.get_servo_state()
-
- if try_servo_repair:
- try:
- newhost.repair()
- except Exception:
- logging.exception('servo repair failed for %s', newhost.hostname)
- else:
- try:
- newhost.verify()
- except Exception:
- logging.exception('servo verify failed for %s', newhost.hostname)
- return newhost, newhost.get_servo_state()
-
-
-def _is_servo_host_information_exist(hostname, port):
- if hostname is None or len(hostname.strip()) == 0:
- return False
- if port is None:
- return False
- if not type(port) is int:
- try:
- int(port)
- except ValueError:
- return False
- return True
-
-
-def is_servo_host_information_valid(hostname, port):
- """Check if provided servo attributes are valid.
-
- @param hostname Hostname of the servohost.
- @param port servo port number.
-
- @returns: A bool value to indicate if provided servo attribute valid.
- """
- if not _is_servo_host_information_exist(hostname, port):
- return False
- # checking range and correct of the port
- port_int = int(port)
- if port_int < 1 or port_int > 65000:
- return False
- # we expecting host contain only latters, digits and '-' or '_'
- if not re.match('[a-zA-Z0-9-_\.:]*$', hostname) or len(hostname) < 5:
- return False
- return True
diff --git a/server/hosts/servo_host_unittest.py b/server/hosts/servo_host_unittest.py
deleted file mode 100644
index f327b26..0000000
--- a/server/hosts/servo_host_unittest.py
+++ /dev/null
@@ -1,168 +0,0 @@
-import unittest
-import re
-
-import common
-
-from autotest_lib.server.hosts import servo_host
-
-
-class MockCmd(object):
- """Simple mock command with base command and results"""
-
- def __init__(self, cmd, exit_status, stdout):
- self.cmd = cmd
- self.stdout = stdout
- self.exit_status = exit_status
-
-
-class MockHost(servo_host.ServoHost):
- """Simple host for running mock'd host commands"""
-
- def __init__(self, *args):
- self._mock_cmds = {c.cmd: c for c in args}
- self._init_attributes()
- self.hostname = "chromeos1-row1-rack1-host1"
- self._dut_hostname = 'dut-' + self.hostname
- self.servo_port = '9991'
- self._is_localhost = False
- self._use_icmp = True
-
- def run(self, command, **kwargs):
- """Finds the matching result by command value"""
- mock_cmd = self._mock_cmds[command]
- file_out = kwargs.get('stdout_tee', None)
- if file_out:
- file_out.write(mock_cmd.stdout)
- return mock_cmd
-
-
-class ServoHostServoStateTestCase(unittest.TestCase):
- """Tests to verify changing the servo_state"""
- def test_return_none_if_state_not_defined(self):
- host = MockHost()
- self.assertIsNotNone(host)
- self.assertIsNone(host._servo_state)
- self.assertIsNone(host.get_servo_state())
- self.assertEqual(host._servo_state, None)
-
-
-class ServoHostInformationValidator(unittest.TestCase):
- """Tests to verify logic in servo host data"""
- def test_true_when_host_and_port_is_correct(self):
- port = 9999
- hostname = 'chromeos1-rack1-row1-host1-servo'
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, port))
- hostname = 'CHROMEOS1-RACK1-ROW1-host1-SERVO'
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, port))
- hostname = '96.120.0.567'
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, port))
- hostname = 'locathost'
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, port))
- hostname = 'my.dut-1'
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, port))
- hostname = '192.168.0.1:8022'
- self.assertTrue(
- servo_host.is_servo_host_information_valid(hostname, port))
- # diff ports
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, 7000))
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, 1234))
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, 1))
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, 15000))
- # port as string in case of local testing
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, '7000'))
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, '1234'))
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, '1'))
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, ' 15000'))
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, ' 07000'))
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, ' 01234 '))
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, ' 01 '))
- self.assertTrue(servo_host.is_servo_host_information_valid(hostname, '015000'))
-
- def test_false_when_host_is_incorrect_and_port_is_correct(self):
- port = '9991'
- self.assertFalse(
- servo_host.is_servo_host_information_valid('ch1%ra1$r1.h1.servo', port))
- self.assertFalse(
- servo_host.is_servo_host_information_valid('[undefined]', port))
- self.assertFalse(
- servo_host.is_servo_host_information_valid('None', port))
- self.assertFalse(
- servo_host.is_servo_host_information_valid('', port))
- self.assertFalse(
- servo_host.is_servo_host_information_valid(None, port))
-
- def test_false_when_port_is_incorrect_and_host_is_correct(self):
- hostname = 'Some_host-my'
- self.assertFalse(servo_host.is_servo_host_information_valid(hostname, None))
- self.assertFalse(servo_host.is_servo_host_information_valid(hostname, -1))
- self.assertFalse(servo_host.is_servo_host_information_valid(hostname, 0))
- self.assertFalse(servo_host.is_servo_host_information_valid(hostname, None))
- self.assertFalse(servo_host.is_servo_host_information_valid(hostname, 'a1234'))
- self.assertFalse(servo_host.is_servo_host_information_valid(hostname, 'o1234'))
- self.assertFalse(servo_host.is_servo_host_information_valid(hostname, '71234'))
- self.assertFalse(servo_host.is_servo_host_information_valid(hostname, '71.24'))
- self.assertFalse(servo_host.is_servo_host_information_valid(hostname, '71-24'))
- self.assertFalse(servo_host.is_servo_host_information_valid(hostname, '-234'))
- self.assertFalse(servo_host.is_servo_host_information_valid(hostname, '-234.9'))
-
-
-class ServoHostInformationExistor(unittest.TestCase):
- """Tests to verify logic in servo host present"""
- def test_true_when_host_is_correct(self):
- port = 9999
- hostname = 'chromeos1-rack1-row1-host1-servo'
- self.assertTrue(servo_host._is_servo_host_information_exist(hostname, port))
- hostname = 'CHROMEOS1'
- self.assertTrue(servo_host._is_servo_host_information_exist(hostname, port))
- hostname = '!@#$%^&*()'
- self.assertTrue(servo_host._is_servo_host_information_exist(hostname, port))
-
- def test_true_when_port_is_correct(self):
- hostname = 'chromeos1-rack1-row1-host1-servo'
- port = 9999
- self.assertTrue(servo_host._is_servo_host_information_exist(hostname, 9999))
- self.assertTrue(servo_host._is_servo_host_information_exist(hostname, '9999'))
- self.assertTrue(servo_host._is_servo_host_information_exist(hostname, 1234))
- self.assertTrue(servo_host._is_servo_host_information_exist(hostname, '1234'))
- self.assertTrue(servo_host._is_servo_host_information_exist(hostname, ' 1234 '))
- self.assertTrue(servo_host._is_servo_host_information_exist(hostname, '01234'))
- self.assertTrue(servo_host._is_servo_host_information_exist(hostname, '01234'))
-
- def test_false_when_port_was_not_set_up(self):
- hostname = 'chromeos1%rack1$row1.host1.servo'
- self.assertFalse(servo_host._is_servo_host_information_exist(hostname, ''))
- self.assertFalse(servo_host._is_servo_host_information_exist(hostname, None))
- self.assertFalse(servo_host._is_servo_host_information_exist(hostname, ""))
-
- def test_false_when_host_was_not_set_up(self):
- port = 1234
- self.assertFalse(servo_host._is_servo_host_information_exist('', port))
- self.assertFalse(servo_host._is_servo_host_information_exist(None, port))
- self.assertFalse(servo_host._is_servo_host_information_exist(' ', port))
-
-
-class ValidateUSBCPigtailRegex(unittest.TestCase):
- """Tests to verify logic in servo host present"""
- def test_good_cases(self):
- host = MockHost()
- message = "[475635.476044 PD TMOUT RX 1/1]"
- self.assertTrue(bool(re.match(host.USBC_PIGTAIL_TIMEOUT_RE, message)))
- message = "[475635.476044654 PD TMOUT RX 1/1]"
- self.assertTrue(bool(re.match(host.USBC_PIGTAIL_TIMEOUT_RE, message)))
- message = "475635.476044654 PD TMOUT RX 1/1"
- self.assertFalse(bool(re.match(host.USBC_PIGTAIL_TIMEOUT_RE, message)))
-
- def test_bad_cases(self):
- host = MockHost()
- message = "PD TMOUT RX 1/1"
- self.assertFalse(bool(re.match(host.USBC_PIGTAIL_TIMEOUT_RE, message)))
- message = "[PD TMOUT RX 1/1]"
- self.assertFalse(bool(re.match(host.USBC_PIGTAIL_TIMEOUT_RE, message)))
- message = "PD TMOUT RX"
- self.assertFalse(bool(re.match(host.USBC_PIGTAIL_TIMEOUT_RE, message)))
- message = "something other"
- self.assertFalse(bool(re.match(host.USBC_PIGTAIL_TIMEOUT_RE, message)))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/server/hosts/tls_client/OWNERS b/server/hosts/tls_client/OWNERS
deleted file mode 100644
index bf13e9b..0000000
--- a/server/hosts/tls_client/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-# ChromeOS Core Automation
-dbeckett@chromium.org
\ No newline at end of file
diff --git a/server/hosts/tls_client/README b/server/hosts/tls_client/README
deleted file mode 100644
index 3d4a08e..0000000
--- a/server/hosts/tls_client/README
+++ /dev/null
@@ -1,10 +0,0 @@
-Protos in this dir are generated using buildprotos.py and manually checked
-into the tree. The source of the protos is
-src/config/proto/chromiumos/config/api/test/tls/commontls.proto, and
-src/config/proto/chromiumos/config/api/test/tls/dependencies/longrunning/operations.proto
-relative to a standard repo.
-(https://chromium.git.corp.google.com/chromiumos/config/+/HEAD/proto/chromiumos/config/api/test/tls/commontls.proto)
-If there are updates to src proto, the protos here must be re-generated. Run the
-script "buildprotos.py" to automatically rebuild them from the source. You must
-manually repo sync prior to this. If there are changes to the file structure
-in the source, buildprotos.py will need to be updated for this.
diff --git a/server/hosts/tls_client/__init__.py b/server/hosts/tls_client/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/server/hosts/tls_client/__init__.py
+++ /dev/null
diff --git a/server/hosts/tls_client/autotest_common.proto b/server/hosts/tls_client/autotest_common.proto
deleted file mode 100644
index 45242b5..0000000
--- a/server/hosts/tls_client/autotest_common.proto
+++ /dev/null
@@ -1,431 +0,0 @@
-// Copyright 2019 The Chromium OS Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-syntax = "proto3";
-
-package chromiumos.config.api.test.tls;
-
-option go_package = "go.chromium.org/chromiumos/config/go/api/test/tls";
-
-import "google/protobuf/empty.proto";
-
-import "dependencies/longrunning/operations.proto";
-
-// Common lab services implemented on top of the wiring APIs.
-//
-// The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL
-// NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and
-// "OPTIONAL" in this document are to be interpreted as described in
-// RFC 2119.
-//
-// All clients SHOULD pass the gRPC metadata key request_trace_id with one
-// value. The value is a unique string that is associated with the method call
-// in metrics. Clients that do not pass request_trace_id MAY be rejected so that
-// they can be fixed.
-service Common {
- // ExecDutCommand runs a command on a DUT.
- //
- // The working directory is /.
- // A tty is not spawned for the command.
- // The user and group is root.
- // All signals have their default dispositions and are not masked.
- // The umask is set to 0.
- //
- // The environment contains:
- //
- // TERM=dumb
- // PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin
- // LANG=en_US.UTF-8
- // USER=root
- // HOME=/root
- //
- // The environment MAY also contain SSH client variables.
- // The environment SHALL NOT contain variables not mentioned above.
- //
- // If the stream is interrupted, the implementation MAY attempt to
- // stop the command by sending SIGINT, SIGHUP, SIGTERM, or SIGKILL.
- rpc ExecDutCommand(ExecDutCommandRequest)
- returns (stream ExecDutCommandResponse);
-
- // ProvisionDut installs a specified version of Chrome OS on the DUT, along
- // with any specified DLCs.
- //
- // If the DUT is already on the specified version of Chrome OS, the OS will
- // not be provisioned.
- //
- // If the DUT already has the specified list of DLCs, only the missing DLCs
- // will be provisioned.
- rpc ProvisionDut(ProvisionDutRequest) returns (google.longrunning.Operation) {
- option (google.longrunning.operation_info) = {
- response_type: "ProvisionDutResponse",
- metadata_type: "ProvisionDutMetadata"
- };
- }
-
- // ProvisionLacros installs a specified version of Lacros on the DUT.
- //
- // If the DUT already has the specified version of Lacros, Lacros will not be
- // provisioned.
- rpc ProvisionLacros(ProvisionLacrosRequest) returns (google.longrunning.Operation) {
- option (google.longrunning.operation_info) = {
- response_type: "ProvisionLacrosResponse",
- metadata_type: "ProvisionLacrosMetadata"
- };
- }
-
- // FetchCrashes gets a stream of all crash reports currently on the DUT.
- //
- // The stream returned may split up a crash over multiple
- // `FetchCrashesResponse` protos. See the definition of that proto for
- // details.
- //
- // This call is read-only: it doesn't delete the crashes that it reads.
- rpc FetchCrashes(FetchCrashesRequest) returns (stream FetchCrashesResponse);
-
- // CreateFakeOmaha starts a fake Omaha service on TLS and exposes the
- // listened port to the DUT.
- rpc CreateFakeOmaha(CreateFakeOmahaRequest) returns (FakeOmaha);
- // DeleteFakeOmaha deletes the specified fake Omaha resource created by
- // CreateFakeOmaha.
- rpc DeleteFakeOmaha(DeleteFakeOmahaRequest) returns (google.protobuf.Empty);
-}
-
-message ExecDutCommandRequest {
- // name is the resource name for the DUT.
- // The DUT name is passed to the RTD when the RTD is started.
- // It is not specified whether the name is the DUT hostname.
- string name = 1;
- // command is the command to run.
- // If this contains no slashes, it is resolved using PATH.
- // If this starts with /, it is used as an absolute path to the
- // program to run.
- // Otherwise, this is treated as a path relative to the working
- // directory.
- string command = 2;
- // args are the arguments to pass to the command.
- repeated string args = 3;
- // stdin is passed to the command as the program's stdin.
- // The stream does not support seeking.
- // An empty bytes is not treated specially; if the command reads
- // from stdin, it will receive zero bytes.
- bytes stdin = 4;
- // stdout indicates how to handle the command's stdout.
- Output stdout = 5;
- // stderr indicates how to handle the command's stderr.
- Output stderr = 6;
-}
-message ExecDutCommandResponse {
- message ExitInfo {
- // status provides information about how the command process
- // terminated.
- //
- // If the command failed to start, status is set to an arbitrary
- // non-zero value.
- //
- // If signaled is set, status is set to the signal that caused
- // the command to terminate.
- //
- // Otherwise, status is set to the exit status of the process.
- // Exit statuses outside of 0 to 255 inclusive are not supported;
- // they will be mapped to an arbitrary non-zero value.
- //
- // status is zero if and only if the process was successfully
- // started and exited with a zero status.
- int32 status = 1;
- // signaled indicates whether the command exited due to a signal.
- // If set, status contains the signal.
- bool signaled = 2;
- // started indicates whether the command was started.
- bool started = 3;
- // error_message provides a human readable explanation for some errors.
- // This MUST NOT be inspected by programs.
- string error_message = 4;
- }
- // exit_info contains exit information.
- // This is set when the command has exited or failed to start.
- // This is set on the last message in the response stream.
- ExitInfo exit_info = 1;
- // stdout contains the shell command's stdout output since the last
- // response in the stream.
- // The implementation MAY batch or delay output to later
- // responses in the stream.
- bytes stdout = 2;
- // stderr contains the shell command's stderr output since the last
- // response in the stream.
- // The implementation MAY batch or delay output to later
- // responses in the stream.
- bytes stderr = 3;
-}
-
-// Output enumeration for ExecDutCommandRequest.
-enum Output {
- // OUTPUT_PIPE means to collect output and return it.
- OUTPUT_PIPE = 0;
- // OUTPUT_STDOUT is a special value for stderr which means to merge stderr
- // into stdout.
- OUTPUT_STDOUT = 1;
-}
-
-message ProvisionDutRequest {
- // name is the resource name for the DUT.
- // The DUT name is passed to the RTD when the RTD is started.
- // It is not specified whether the name is the DUT hostname.
- string name = 1;
-
- // TODO(crbug.com/1155247) Deprecate this nested message and replace with
- // top level ChromeOsImage.
- message ChromeOSImage {
- oneof path_oneof {
- // gs_path_prefix is the GS path to where kernel, rootfs, and stateful
- // images are located. If DLCs are to be provisioned, it must be a GS path
- // that also has the dlc directory.
- // Only gs://chromeos-image-archive bucket is supported.
- // For example the format should be:
- // - gs://chromeos-image-archive/eve-release/R86-13380.0.0
- string gs_path_prefix = 1;
- }
- }
- // image specifies the Chrome OS image with which to provision the DUT.
- ChromeOSImage image = 2;
-
- // Reference DLCs developer documentation:
- // https://source.corp.google.com/chromeos_public/src/platform2/dlcservice/docs/developer.md
- message DLCSpec {
- // id is the DLC ID which is a unique identifier.
- // The DLC ID must follow a specific format that can be found in the DLC
- // developer doc below.
- string id = 1;
- }
- // dlc_specs specifies which DLCs to install on the DUT after provisioning.
- repeated DLCSpec dlc_specs = 3;
- // preserve_stateful specifies whether the stateful partition should be preserved during
- // provisioning. If preserve_stateful is not set to true, the stateful partition is
- // block-level wiped and reset during provisioning.
- bool preserve_stateful = 4;
-}
-
-message ProvisionDutResponse {
- // When the status code is other than OK, details in Status message should be
- // parsed for ErrorInfo message with the following Reasons as the reason.
- enum Reason {
- // status code: INVALID_ARGUMENT
- REASON_INVALID_REQUEST = 0;
- // status code: FAILED_PRECONDITION
- REASON_DUT_UNREACHABLE_PRE_PROVISION = 1;
- // status code: FAILED_PRECONDITION
- REASON_DOWNLOADING_IMAGE_FAILED = 2;
- // status code: DEADLINE_EXCEEDED
- REASON_PROVISIONING_TIMEDOUT = 3;
- // status code: ABORTED
- REASON_PROVISIONING_FAILED = 4;
- // status code: ABORTED
- REASON_DUT_UNREACHABLE_POST_PROVISION = 5;
- }
-}
-
-message ProvisionDutMetadata {
-}
-
-message ProvisionLacrosRequest {
- // name is the resource name for the DUT.
- // The DUT name is passed to the RTD when the RTD is started.
- // It is not specified whether the name is the DUT hostname.
- string name = 1;
-
- message LacrosImage {
- oneof path_oneof {
- // gs_path_prefix is the GS path prefix to where Lacros is located.
- string gs_path_prefix = 1;
- }
- }
- // image specifies the Lacros image with which to provision the DUT.
- LacrosImage image = 2;
-}
-
-message ProvisionLacrosResponse {
- // When the status code is other than OK, details in Status message should be
- // parsed for ErrorInfo message with the following Reasons as the reason.
- enum Reason {
- // Failed as the ProvisionLacros request is invalid.
- REASON_INVALID_REQUEST = 0;
- // Failed to connect to the DUT prior to provisioning Lacros.
- REASON_DUT_UNREACHABLE_PRE_PROVISION = 1;
- // Failed to download the Lacros image or a timeout during download.
- REASON_DOWNLOADING_IMAGE_FAILED = 2;
- // Failed due to a timeout during the main Lacros provisioning.
- // Excludes timeout during other steps.
- REASON_PROVISIONING_TIMEDOUT = 3;
- // General failure in Lacros provisioning.
- REASON_PROVISIONING_FAILED = 4;
- }
-}
-
-message ProvisionLacrosMetadata {
-}
-
-message FetchCrashesRequest {
- // dut is the resource name for the DUT from which to fetch crashes.
- // The DUT name is passed to the RTD when the RTD is started.
- // It is not specified whether the name is the DUT hostname.
- string dut = 1;
- // If true, fetch the core file.
- // For uploads to the crash server, that should generally be false.
- // If the crash file is likely to be used for manual debugging (e.g. on
- // a manually-invoked test suite run), this might be true.
- // Coredumps can be extremely large (even gigabytes), so if resource usage
- // is a concern, this should probably be false.
- bool fetch_core = 2;
-}
-
-// When this response is streamed, the first proto with a given crash ID will
-// always contain the CrashInfo.
-// Files and core dumps (if present) may be streamed. If they are,
-// subsequent protos with the same crash ID will follow, each containing a chunk
-// of file/coredump. To reassemble these, concatenate the bytes received from
-// each subsequent proto with a matching crash_id (concatenate blobs that have
-// matching crash_ids and keys).
-// Additional crashes may be reported in the same stream with a new crash ID.
-message FetchCrashesResponse {
- // Crash id. unique only within responses to a single FetchCrashes request.
- // Used to assemble multiple streamed |FetchCrashesResponse| protos into a
- // single crash report.
- int64 crash_id = 1;
- oneof data {
- // Full details of crash report.
- CrashInfo crash = 2;
- // Misc file (e.g. minidump, large binary log, etc)
- CrashBlob blob = 3;
- // Coredump. Present iff fetch_core was true in FetchCrashesRequest and
- // the crash has a coredump. (kernel warnings, for example, do not have
- // one).
- bytes core = 4;
- }
-}
-
-// The data in this proto matches the metadata from crash-reporter's meta files.
-// Sender::CreateCrashFormData puts this data into crash upload POST requests.
-// (See src/platform2/crash-reporter/crash_sender_util.cc.)
-// The names in this proto MUST match the names that crash-reporter uses so
-// that, when crashes are uploaded to the crash server, they are interpreted
-// as they are when crash-reporter uploads them.
-// Similarly, when this proto is converted into a POST request to send to the
-// crash server, the names must not be altered.
-message CrashInfo {
- // Name of executable that crashed (e.g. "chrome")
- string exec_name = 1;
- // Product name (e.g. "Chrome_ChromeOS" or "ChromeOS")
- string prod = 2;
- // Product version (e.g. "12345.0.0")
- string ver = 3;
- // Crash signature (may not be populated for all crashes)
- string sig = 4;
- // The name of the integration test that was running when this crash
- // happened, if any.
- string in_progress_integration_test = 5;
- // The name of the collector (e.g. chrome_collector, arc_collector)
- string collector = 6;
- // Additional key-value pairs of metadata (e.g. "crash_loop_mode = true").
- // These should be included in any POSTs to the crash server in a standard
- // POST form, as seen in CreateCrashFormData.
- // (despite the fact that this message is a subfield, it should be a flat
- // structure in any POSTs).
- repeated CrashMetadata fields = 7;
-}
-
-// Arbitrary text-only key-value pair corresponding to the key-value pairs in
-// crash report metadata files.
-message CrashMetadata {
- // This value is a UTF8, human-readable, description of the data.
- string key = 1;
- // The value will be a human-readable string (e.g. "12345.0.0"), which must
- // be valid UTF-8.
- string text = 2;
-};
-
-// Arbitrary non-UTF8 key-value pair from crash report metadata files.
-message CrashBlob {
- // This value is a UTF8, human-readable, description of the data.
- // This should be passed as the 'name' to the crash server.
- // For instance, upload_file_fake_payload
- string key = 1;
- // The value is a blob (e.g. a file from sysfs or a minidump), which need
- // not be valid UTF-8, and may be large.
- bytes blob = 2;
- // The basename of the file. Must be specified as the filename in data
- // uploaded to the crash server.
- // e.g. foo_binary.20201027.102345.0.dmp
- string filename = 3;
-};
-
-message ChromeOsImage {
- oneof path_oneof {
- // gs_path_prefix is the GS path to where the payloads are located. For
- // example the format MAY be:
- // gs://chromeos-image-archive/eve-release/R86-13380.0.0
- string gs_path_prefix = 1;
- }
-}
-
-message FakeOmaha {
- // name is the resource name of the fake Omaha service.
- // Format: fakeOmaha/{fake-omaha-id}
- // The implementation MUST set it after creating the fake Omaha service.
- // Clients SHOULD NOT set it.
- string name = 1;
- // dut is the resource name for the DUT.
- // The DUT name is passed to the RTD when the RTD is started.
- // It is not specified whether the name is the DUT hostname.
- string dut = 2;
-
- // target_build is the ChromeOS build that the fake Omaha service will serve
- // payloads for.
- ChromeOsImage target_build = 3;
-
- message Payload {
- enum Type {
- TYPE_UNSPECIFIED = 0;
- FULL = 1;
- DELTA = 2;
- }
- // id is the id of the payload. It MAY be "ROOTFS" or a DLC id, etc.
- string id = 1;
- // type is the payload type, e.g. TYPE_FULL or TYPE_DELTA.
- Type type = 2;
- }
- // payloads is the payloads can be served by the fake Omaha service.
- repeated Payload payloads = 4;
- // exposed_via_proxy indicates that the fake Omaha service is exposed to a
- // DUT via a proxy server, instead of exposing to the DUT directly. So the
- // service exposing won't be impacted by rebooting the DUT, disconnecting the
- // DUT network, etc.
- bool exposed_via_proxy = 5;
- // critical_update instructs the fake Omaha created that the update is
- // critical if set.
- bool critical_update = 6;
- // return_noupdate_starting indicates from which update check to start returning noupdate.
- // It MUST be 0 or greater.
- // When set to 0 (the default value), disables returning noupdate.
- // If set to positive N, returns noupdate for the Nth check and for every
- // check thereafter.
- // For example, if set to 1, returns noupdate starting from the first check,
- // i.e., always returns noupdate.
- int32 return_noupdate_starting = 7;
- // omaha_url is the current fake Omaha service URL which is reachable from
- // the specified DUT.
- // The URL can be used as input of the update engine client of the DUT.
- // The implementation MUST set it after creating the fake Omaha service.
- // Clients SHOULD NOT set it.
- string omaha_url = 8;
-}
-
-message CreateFakeOmahaRequest {
- // fake_omaha is the fake omaha service to be created.
- FakeOmaha fake_omaha = 1;
-}
-
-message DeleteFakeOmahaRequest {
- // The resource name of the fake Omaha service to stop.
- // Format: fakeOmahaServices/{fake-omaha-id}
- string name = 1;
-}
diff --git a/server/hosts/tls_client/autotest_common_pb2.py b/server/hosts/tls_client/autotest_common_pb2.py
deleted file mode 100644
index 6ad02bd..0000000
--- a/server/hosts/tls_client/autotest_common_pb2.py
+++ /dev/null
@@ -1,1353 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: autotest_common.proto
-
-from google.protobuf.internal import enum_type_wrapper
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
-
-import common
-from autotest_lib.server.hosts.tls_client.dependencies.longrunning import operations_pb2 as dependencies_dot_longrunning_dot_operations__pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
- name='autotest_common.proto',
- package='chromiumos.config.api.test.tls',
- syntax='proto3',
- serialized_options=b'Z1go.chromium.org/chromiumos/config/go/api/test/tls',
- serialized_pb=b'\n\x15\x61utotest_common.proto\x12\x1e\x63hromiumos.config.api.test.tls\x1a\x1bgoogle/protobuf/empty.proto\x1a)dependencies/longrunning/operations.proto\"\xc3\x01\n\x15\x45xecDutCommandRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63ommand\x18\x02 \x01(\t\x12\x0c\n\x04\x61rgs\x18\x03 \x03(\t\x12\r\n\x05stdin\x18\x04 \x01(\x0c\x12\x36\n\x06stdout\x18\x05 \x01(\x0e\x32&.chromiumos.config.api.test.tls.Output\x12\x36\n\x06stderr\x18\x06 \x01(\x0e\x32&.chromiumos.config.api.test.tls.Output\"\xe2\x01\n\x16\x45xecDutCommandResponse\x12R\n\texit_info\x18\x01 \x01(\x0b\x32?.chromiumos.config.api.test.tls.ExecDutCommandResponse.ExitInfo\x12\x0e\n\x06stdout\x18\x02 \x01(\x0c\x12\x0e\n\x06stderr\x18\x03 \x01(\x0c\x1aT\n\x08\x45xitInfo\x12\x0e\n\x06status\x18\x01 \x01(\x05\x12\x10\n\x08signaled\x18\x02 \x01(\x08\x12\x0f\n\x07started\x18\x03 \x01(\x08\x12\x15\n\rerror_message\x18\x04 \x01(\t\"\xb0\x02\n\x13ProvisionDutRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12P\n\x05image\x18\x02 \x01(\x0b\x32\x41.chromiumos.config.api.test.tls.ProvisionDutRequest.ChromeOSImage\x12N\n\tdlc_specs\x18\x03 \x03(\x0b\x32;.chromiumos.config.api.test.tls.ProvisionDutRequest.DLCSpec\x12\x19\n\x11preserve_stateful\x18\x04 \x01(\x08\x1a\x37\n\rChromeOSImage\x12\x18\n\x0egs_path_prefix\x18\x01 \x01(\tH\x00\x42\x0c\n\npath_oneof\x1a\x15\n\x07\x44LCSpec\x12\n\n\x02id\x18\x01 \x01(\t\"\xf9\x01\n\x14ProvisionDutResponse\"\xe0\x01\n\x06Reason\x12\x1a\n\x16REASON_INVALID_REQUEST\x10\x00\x12(\n$REASON_DUT_UNREACHABLE_PRE_PROVISION\x10\x01\x12#\n\x1fREASON_DOWNLOADING_IMAGE_FAILED\x10\x02\x12 \n\x1cREASON_PROVISIONING_TIMEDOUT\x10\x03\x12\x1e\n\x1aREASON_PROVISIONING_FAILED\x10\x04\x12)\n%REASON_DUT_UNREACHABLE_POST_PROVISION\x10\x05\"\x16\n\x14ProvisionDutMetadata\"\xb0\x01\n\x16ProvisionLacrosRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Q\n\x05image\x18\x02 \x01(\x0b\x32\x42.chromiumos.config.api.test.tls.ProvisionLacrosRequest.LacrosImage\x1a\x35\n\x0bLacrosImage\x12\x18\n\x0egs_path_prefix\x18\x01 \x01(\tH\x00\x42\x0c\n\npath_oneof\"\xd1\x01\n\x17ProvisionLacrosResponse\"\xb5\x01\n\x06Reason\x12\x1a\n\x16REASON_INVALID_REQUEST\x10\x00\x12(\n$REASON_DUT_UNREACHABLE_PRE_PROVISION\x10\x01\x12#\n\x1fREASON_DOWNLOADING_IMAGE_FAILED\x10\x02\x12 \n\x1cREASON_PROVISIONING_TIMEDOUT\x10\x03\x12\x1e\n\x1aREASON_PROVISIONING_FAILED\x10\x04\"\x19\n\x17ProvisionLacrosMetadata\"6\n\x13\x46\x65tchCrashesRequest\x12\x0b\n\x03\x64ut\x18\x01 \x01(\t\x12\x12\n\nfetch_core\x18\x02 \x01(\x08\"\xb7\x01\n\x14\x46\x65tchCrashesResponse\x12\x10\n\x08\x63rash_id\x18\x01 \x01(\x03\x12:\n\x05\x63rash\x18\x02 \x01(\x0b\x32).chromiumos.config.api.test.tls.CrashInfoH\x00\x12\x39\n\x04\x62lob\x18\x03 \x01(\x0b\x32).chromiumos.config.api.test.tls.CrashBlobH\x00\x12\x0e\n\x04\x63ore\x18\x04 \x01(\x0cH\x00\x42\x06\n\x04\x64\x61ta\"\xbe\x01\n\tCrashInfo\x12\x11\n\texec_name\x18\x01 \x01(\t\x12\x0c\n\x04prod\x18\x02 \x01(\t\x12\x0b\n\x03ver\x18\x03 \x01(\t\x12\x0b\n\x03sig\x18\x04 \x01(\t\x12$\n\x1cin_progress_integration_test\x18\x05 \x01(\t\x12\x11\n\tcollector\x18\x06 \x01(\t\x12=\n\x06\x66ields\x18\x07 \x03(\x0b\x32-.chromiumos.config.api.test.tls.CrashMetadata\"*\n\rCrashMetadata\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x0c\n\x04text\x18\x02 \x01(\t\"8\n\tCrashBlob\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x0c\n\x04\x62lob\x18\x02 \x01(\x0c\x12\x10\n\x08\x66ilename\x18\x03 \x01(\t\"7\n\rChromeOsImage\x12\x18\n\x0egs_path_prefix\x18\x01 \x01(\tH\x00\x42\x0c\n\npath_oneof\"\xaa\x03\n\tFakeOmaha\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03\x64ut\x18\x02 \x01(\t\x12\x43\n\x0ctarget_build\x18\x03 \x01(\x0b\x32-.chromiumos.config.api.test.tls.ChromeOsImage\x12\x43\n\x08payloads\x18\x04 \x03(\x0b\x32\x31.chromiumos.config.api.test.tls.FakeOmaha.Payload\x12\x19\n\x11\x65xposed_via_proxy\x18\x05 \x01(\x08\x12\x17\n\x0f\x63ritical_update\x18\x06 \x01(\x08\x12 \n\x18return_noupdate_starting\x18\x07 \x01(\x05\x12\x11\n\tomaha_url\x18\x08 \x01(\t\x1a\x8e\x01\n\x07Payload\x12\n\n\x02id\x18\x01 \x01(\t\x12\x44\n\x04type\x18\x02 \x01(\x0e\x32\x36.chromiumos.config.api.test.tls.FakeOmaha.Payload.Type\"1\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x08\n\x04\x46ULL\x10\x01\x12\t\n\x05\x44\x45LTA\x10\x02\"W\n\x16\x43reateFakeOmahaRequest\x12=\n\nfake_omaha\x18\x01 \x01(\x0b\x32).chromiumos.config.api.test.tls.FakeOmaha\"&\n\x16\x44\x65leteFakeOmahaRequest\x12\x0c\n\x04name\x18\x01 \x01(\t*,\n\x06Output\x12\x0f\n\x0bOUTPUT_PIPE\x10\x00\x12\x11\n\rOUTPUT_STDOUT\x10\x01\x32\x9a\x06\n\x06\x43ommon\x12\x81\x01\n\x0e\x45xecDutCommand\x12\x35.chromiumos.config.api.test.tls.ExecDutCommandRequest\x1a\x36.chromiumos.config.api.test.tls.ExecDutCommandResponse0\x01\x12\x93\x01\n\x0cProvisionDut\x12\x33.chromiumos.config.api.test.tls.ProvisionDutRequest\x1a\x1d.google.longrunning.Operation\"/\xca\x41,\n\x14ProvisionDutResponse\x12\x14ProvisionDutMetadata\x12\x9f\x01\n\x0fProvisionLacros\x12\x36.chromiumos.config.api.test.tls.ProvisionLacrosRequest\x1a\x1d.google.longrunning.Operation\"5\xca\x41\x32\n\x17ProvisionLacrosResponse\x12\x17ProvisionLacrosMetadata\x12{\n\x0c\x46\x65tchCrashes\x12\x33.chromiumos.config.api.test.tls.FetchCrashesRequest\x1a\x34.chromiumos.config.api.test.tls.FetchCrashesResponse0\x01\x12t\n\x0f\x43reateFakeOmaha\x12\x36.chromiumos.config.api.test.tls.CreateFakeOmahaRequest\x1a).chromiumos.config.api.test.tls.FakeOmaha\x12\x61\n\x0f\x44\x65leteFakeOmaha\x12\x36.chromiumos.config.api.test.tls.DeleteFakeOmahaRequest\x1a\x16.google.protobuf.EmptyB3Z1go.chromium.org/chromiumos/config/go/api/test/tlsb\x06proto3'
- ,
- dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,dependencies_dot_longrunning_dot_operations__pb2.DESCRIPTOR,])
-
-_OUTPUT = _descriptor.EnumDescriptor(
- name='Output',
- full_name='chromiumos.config.api.test.tls.Output',
- filename=None,
- file=DESCRIPTOR,
- values=[
- _descriptor.EnumValueDescriptor(
- name='OUTPUT_PIPE', index=0, number=0,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='OUTPUT_STDOUT', index=1, number=1,
- serialized_options=None,
- type=None),
- ],
- containing_type=None,
- serialized_options=None,
- serialized_start=2709,
- serialized_end=2753,
-)
-_sym_db.RegisterEnumDescriptor(_OUTPUT)
-
-Output = enum_type_wrapper.EnumTypeWrapper(_OUTPUT)
-OUTPUT_PIPE = 0
-OUTPUT_STDOUT = 1
-
-
-_PROVISIONDUTRESPONSE_REASON = _descriptor.EnumDescriptor(
- name='Reason',
- full_name='chromiumos.config.api.test.tls.ProvisionDutResponse.Reason',
- filename=None,
- file=DESCRIPTOR,
- values=[
- _descriptor.EnumValueDescriptor(
- name='REASON_INVALID_REQUEST', index=0, number=0,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='REASON_DUT_UNREACHABLE_PRE_PROVISION', index=1, number=1,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='REASON_DOWNLOADING_IMAGE_FAILED', index=2, number=2,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='REASON_PROVISIONING_TIMEDOUT', index=3, number=3,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='REASON_PROVISIONING_FAILED', index=4, number=4,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='REASON_DUT_UNREACHABLE_POST_PROVISION', index=5, number=5,
- serialized_options=None,
- type=None),
- ],
- containing_type=None,
- serialized_options=None,
- serialized_start=889,
- serialized_end=1113,
-)
-_sym_db.RegisterEnumDescriptor(_PROVISIONDUTRESPONSE_REASON)
-
-_PROVISIONLACROSRESPONSE_REASON = _descriptor.EnumDescriptor(
- name='Reason',
- full_name='chromiumos.config.api.test.tls.ProvisionLacrosResponse.Reason',
- filename=None,
- file=DESCRIPTOR,
- values=[
- _descriptor.EnumValueDescriptor(
- name='REASON_INVALID_REQUEST', index=0, number=0,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='REASON_DUT_UNREACHABLE_PRE_PROVISION', index=1, number=1,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='REASON_DOWNLOADING_IMAGE_FAILED', index=2, number=2,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='REASON_PROVISIONING_TIMEDOUT', index=3, number=3,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='REASON_PROVISIONING_FAILED', index=4, number=4,
- serialized_options=None,
- type=None),
- ],
- containing_type=None,
- serialized_options=None,
- serialized_start=889,
- serialized_end=1070,
-)
-_sym_db.RegisterEnumDescriptor(_PROVISIONLACROSRESPONSE_REASON)
-
-_FAKEOMAHA_PAYLOAD_TYPE = _descriptor.EnumDescriptor(
- name='Type',
- full_name='chromiumos.config.api.test.tls.FakeOmaha.Payload.Type',
- filename=None,
- file=DESCRIPTOR,
- values=[
- _descriptor.EnumValueDescriptor(
- name='TYPE_UNSPECIFIED', index=0, number=0,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='FULL', index=1, number=1,
- serialized_options=None,
- type=None),
- _descriptor.EnumValueDescriptor(
- name='DELTA', index=2, number=2,
- serialized_options=None,
- type=None),
- ],
- containing_type=None,
- serialized_options=None,
- serialized_start=2529,
- serialized_end=2578,
-)
-_sym_db.RegisterEnumDescriptor(_FAKEOMAHA_PAYLOAD_TYPE)
-
-
-_EXECDUTCOMMANDREQUEST = _descriptor.Descriptor(
- name='ExecDutCommandRequest',
- full_name='chromiumos.config.api.test.tls.ExecDutCommandRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='name', full_name='chromiumos.config.api.test.tls.ExecDutCommandRequest.name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='command', full_name='chromiumos.config.api.test.tls.ExecDutCommandRequest.command', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='args', full_name='chromiumos.config.api.test.tls.ExecDutCommandRequest.args', index=2,
- number=3, type=9, cpp_type=9, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='stdin', full_name='chromiumos.config.api.test.tls.ExecDutCommandRequest.stdin', index=3,
- number=4, type=12, cpp_type=9, label=1,
- has_default_value=False, default_value=b"",
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='stdout', full_name='chromiumos.config.api.test.tls.ExecDutCommandRequest.stdout', index=4,
- number=5, type=14, cpp_type=8, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='stderr', full_name='chromiumos.config.api.test.tls.ExecDutCommandRequest.stderr', index=5,
- number=6, type=14, cpp_type=8, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=130,
- serialized_end=325,
-)
-
-
-_EXECDUTCOMMANDRESPONSE_EXITINFO = _descriptor.Descriptor(
- name='ExitInfo',
- full_name='chromiumos.config.api.test.tls.ExecDutCommandResponse.ExitInfo',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='status', full_name='chromiumos.config.api.test.tls.ExecDutCommandResponse.ExitInfo.status', index=0,
- number=1, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='signaled', full_name='chromiumos.config.api.test.tls.ExecDutCommandResponse.ExitInfo.signaled', index=1,
- number=2, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='started', full_name='chromiumos.config.api.test.tls.ExecDutCommandResponse.ExitInfo.started', index=2,
- number=3, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='error_message', full_name='chromiumos.config.api.test.tls.ExecDutCommandResponse.ExitInfo.error_message', index=3,
- number=4, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=470,
- serialized_end=554,
-)
-
-_EXECDUTCOMMANDRESPONSE = _descriptor.Descriptor(
- name='ExecDutCommandResponse',
- full_name='chromiumos.config.api.test.tls.ExecDutCommandResponse',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='exit_info', full_name='chromiumos.config.api.test.tls.ExecDutCommandResponse.exit_info', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='stdout', full_name='chromiumos.config.api.test.tls.ExecDutCommandResponse.stdout', index=1,
- number=2, type=12, cpp_type=9, label=1,
- has_default_value=False, default_value=b"",
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='stderr', full_name='chromiumos.config.api.test.tls.ExecDutCommandResponse.stderr', index=2,
- number=3, type=12, cpp_type=9, label=1,
- has_default_value=False, default_value=b"",
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[_EXECDUTCOMMANDRESPONSE_EXITINFO, ],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=328,
- serialized_end=554,
-)
-
-
-_PROVISIONDUTREQUEST_CHROMEOSIMAGE = _descriptor.Descriptor(
- name='ChromeOSImage',
- full_name='chromiumos.config.api.test.tls.ProvisionDutRequest.ChromeOSImage',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='gs_path_prefix', full_name='chromiumos.config.api.test.tls.ProvisionDutRequest.ChromeOSImage.gs_path_prefix', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- _descriptor.OneofDescriptor(
- name='path_oneof', full_name='chromiumos.config.api.test.tls.ProvisionDutRequest.ChromeOSImage.path_oneof',
- index=0, containing_type=None, fields=[]),
- ],
- serialized_start=783,
- serialized_end=838,
-)
-
-_PROVISIONDUTREQUEST_DLCSPEC = _descriptor.Descriptor(
- name='DLCSpec',
- full_name='chromiumos.config.api.test.tls.ProvisionDutRequest.DLCSpec',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='id', full_name='chromiumos.config.api.test.tls.ProvisionDutRequest.DLCSpec.id', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=840,
- serialized_end=861,
-)
-
-_PROVISIONDUTREQUEST = _descriptor.Descriptor(
- name='ProvisionDutRequest',
- full_name='chromiumos.config.api.test.tls.ProvisionDutRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='name', full_name='chromiumos.config.api.test.tls.ProvisionDutRequest.name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='image', full_name='chromiumos.config.api.test.tls.ProvisionDutRequest.image', index=1,
- number=2, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='dlc_specs', full_name='chromiumos.config.api.test.tls.ProvisionDutRequest.dlc_specs', index=2,
- number=3, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='preserve_stateful', full_name='chromiumos.config.api.test.tls.ProvisionDutRequest.preserve_stateful', index=3,
- number=4, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[_PROVISIONDUTREQUEST_CHROMEOSIMAGE, _PROVISIONDUTREQUEST_DLCSPEC, ],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=557,
- serialized_end=861,
-)
-
-
-_PROVISIONDUTRESPONSE = _descriptor.Descriptor(
- name='ProvisionDutResponse',
- full_name='chromiumos.config.api.test.tls.ProvisionDutResponse',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- _PROVISIONDUTRESPONSE_REASON,
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=864,
- serialized_end=1113,
-)
-
-
-_PROVISIONDUTMETADATA = _descriptor.Descriptor(
- name='ProvisionDutMetadata',
- full_name='chromiumos.config.api.test.tls.ProvisionDutMetadata',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1115,
- serialized_end=1137,
-)
-
-
-_PROVISIONLACROSREQUEST_LACROSIMAGE = _descriptor.Descriptor(
- name='LacrosImage',
- full_name='chromiumos.config.api.test.tls.ProvisionLacrosRequest.LacrosImage',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='gs_path_prefix', full_name='chromiumos.config.api.test.tls.ProvisionLacrosRequest.LacrosImage.gs_path_prefix', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- _descriptor.OneofDescriptor(
- name='path_oneof', full_name='chromiumos.config.api.test.tls.ProvisionLacrosRequest.LacrosImage.path_oneof',
- index=0, containing_type=None, fields=[]),
- ],
- serialized_start=1263,
- serialized_end=1316,
-)
-
-_PROVISIONLACROSREQUEST = _descriptor.Descriptor(
- name='ProvisionLacrosRequest',
- full_name='chromiumos.config.api.test.tls.ProvisionLacrosRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='name', full_name='chromiumos.config.api.test.tls.ProvisionLacrosRequest.name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='image', full_name='chromiumos.config.api.test.tls.ProvisionLacrosRequest.image', index=1,
- number=2, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[_PROVISIONLACROSREQUEST_LACROSIMAGE, ],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1140,
- serialized_end=1316,
-)
-
-
-_PROVISIONLACROSRESPONSE = _descriptor.Descriptor(
- name='ProvisionLacrosResponse',
- full_name='chromiumos.config.api.test.tls.ProvisionLacrosResponse',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- _PROVISIONLACROSRESPONSE_REASON,
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1319,
- serialized_end=1528,
-)
-
-
-_PROVISIONLACROSMETADATA = _descriptor.Descriptor(
- name='ProvisionLacrosMetadata',
- full_name='chromiumos.config.api.test.tls.ProvisionLacrosMetadata',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1530,
- serialized_end=1555,
-)
-
-
-_FETCHCRASHESREQUEST = _descriptor.Descriptor(
- name='FetchCrashesRequest',
- full_name='chromiumos.config.api.test.tls.FetchCrashesRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='dut', full_name='chromiumos.config.api.test.tls.FetchCrashesRequest.dut', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='fetch_core', full_name='chromiumos.config.api.test.tls.FetchCrashesRequest.fetch_core', index=1,
- number=2, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1557,
- serialized_end=1611,
-)
-
-
-_FETCHCRASHESRESPONSE = _descriptor.Descriptor(
- name='FetchCrashesResponse',
- full_name='chromiumos.config.api.test.tls.FetchCrashesResponse',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='crash_id', full_name='chromiumos.config.api.test.tls.FetchCrashesResponse.crash_id', index=0,
- number=1, type=3, cpp_type=2, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='crash', full_name='chromiumos.config.api.test.tls.FetchCrashesResponse.crash', index=1,
- number=2, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='blob', full_name='chromiumos.config.api.test.tls.FetchCrashesResponse.blob', index=2,
- number=3, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='core', full_name='chromiumos.config.api.test.tls.FetchCrashesResponse.core', index=3,
- number=4, type=12, cpp_type=9, label=1,
- has_default_value=False, default_value=b"",
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- _descriptor.OneofDescriptor(
- name='data', full_name='chromiumos.config.api.test.tls.FetchCrashesResponse.data',
- index=0, containing_type=None, fields=[]),
- ],
- serialized_start=1614,
- serialized_end=1797,
-)
-
-
-_CRASHINFO = _descriptor.Descriptor(
- name='CrashInfo',
- full_name='chromiumos.config.api.test.tls.CrashInfo',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='exec_name', full_name='chromiumos.config.api.test.tls.CrashInfo.exec_name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='prod', full_name='chromiumos.config.api.test.tls.CrashInfo.prod', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='ver', full_name='chromiumos.config.api.test.tls.CrashInfo.ver', index=2,
- number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='sig', full_name='chromiumos.config.api.test.tls.CrashInfo.sig', index=3,
- number=4, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='in_progress_integration_test', full_name='chromiumos.config.api.test.tls.CrashInfo.in_progress_integration_test', index=4,
- number=5, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='collector', full_name='chromiumos.config.api.test.tls.CrashInfo.collector', index=5,
- number=6, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='fields', full_name='chromiumos.config.api.test.tls.CrashInfo.fields', index=6,
- number=7, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1800,
- serialized_end=1990,
-)
-
-
-_CRASHMETADATA = _descriptor.Descriptor(
- name='CrashMetadata',
- full_name='chromiumos.config.api.test.tls.CrashMetadata',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='key', full_name='chromiumos.config.api.test.tls.CrashMetadata.key', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='text', full_name='chromiumos.config.api.test.tls.CrashMetadata.text', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=1992,
- serialized_end=2034,
-)
-
-
-_CRASHBLOB = _descriptor.Descriptor(
- name='CrashBlob',
- full_name='chromiumos.config.api.test.tls.CrashBlob',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='key', full_name='chromiumos.config.api.test.tls.CrashBlob.key', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='blob', full_name='chromiumos.config.api.test.tls.CrashBlob.blob', index=1,
- number=2, type=12, cpp_type=9, label=1,
- has_default_value=False, default_value=b"",
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='filename', full_name='chromiumos.config.api.test.tls.CrashBlob.filename', index=2,
- number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=2036,
- serialized_end=2092,
-)
-
-
-_CHROMEOSIMAGE = _descriptor.Descriptor(
- name='ChromeOsImage',
- full_name='chromiumos.config.api.test.tls.ChromeOsImage',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='gs_path_prefix', full_name='chromiumos.config.api.test.tls.ChromeOsImage.gs_path_prefix', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- _descriptor.OneofDescriptor(
- name='path_oneof', full_name='chromiumos.config.api.test.tls.ChromeOsImage.path_oneof',
- index=0, containing_type=None, fields=[]),
- ],
- serialized_start=2094,
- serialized_end=2149,
-)
-
-
-_FAKEOMAHA_PAYLOAD = _descriptor.Descriptor(
- name='Payload',
- full_name='chromiumos.config.api.test.tls.FakeOmaha.Payload',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='id', full_name='chromiumos.config.api.test.tls.FakeOmaha.Payload.id', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='type', full_name='chromiumos.config.api.test.tls.FakeOmaha.Payload.type', index=1,
- number=2, type=14, cpp_type=8, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- _FAKEOMAHA_PAYLOAD_TYPE,
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=2436,
- serialized_end=2578,
-)
-
-_FAKEOMAHA = _descriptor.Descriptor(
- name='FakeOmaha',
- full_name='chromiumos.config.api.test.tls.FakeOmaha',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='name', full_name='chromiumos.config.api.test.tls.FakeOmaha.name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='dut', full_name='chromiumos.config.api.test.tls.FakeOmaha.dut', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='target_build', full_name='chromiumos.config.api.test.tls.FakeOmaha.target_build', index=2,
- number=3, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='payloads', full_name='chromiumos.config.api.test.tls.FakeOmaha.payloads', index=3,
- number=4, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='exposed_via_proxy', full_name='chromiumos.config.api.test.tls.FakeOmaha.exposed_via_proxy', index=4,
- number=5, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='critical_update', full_name='chromiumos.config.api.test.tls.FakeOmaha.critical_update', index=5,
- number=6, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='return_noupdate_starting', full_name='chromiumos.config.api.test.tls.FakeOmaha.return_noupdate_starting', index=6,
- number=7, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='omaha_url', full_name='chromiumos.config.api.test.tls.FakeOmaha.omaha_url', index=7,
- number=8, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[_FAKEOMAHA_PAYLOAD, ],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=2152,
- serialized_end=2578,
-)
-
-
-_CREATEFAKEOMAHAREQUEST = _descriptor.Descriptor(
- name='CreateFakeOmahaRequest',
- full_name='chromiumos.config.api.test.tls.CreateFakeOmahaRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='fake_omaha', full_name='chromiumos.config.api.test.tls.CreateFakeOmahaRequest.fake_omaha', index=0,
- number=1, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=2580,
- serialized_end=2667,
-)
-
-
-_DELETEFAKEOMAHAREQUEST = _descriptor.Descriptor(
- name='DeleteFakeOmahaRequest',
- full_name='chromiumos.config.api.test.tls.DeleteFakeOmahaRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='name', full_name='chromiumos.config.api.test.tls.DeleteFakeOmahaRequest.name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=2669,
- serialized_end=2707,
-)
-
-_EXECDUTCOMMANDREQUEST.fields_by_name['stdout'].enum_type = _OUTPUT
-_EXECDUTCOMMANDREQUEST.fields_by_name['stderr'].enum_type = _OUTPUT
-_EXECDUTCOMMANDRESPONSE_EXITINFO.containing_type = _EXECDUTCOMMANDRESPONSE
-_EXECDUTCOMMANDRESPONSE.fields_by_name['exit_info'].message_type = _EXECDUTCOMMANDRESPONSE_EXITINFO
-_PROVISIONDUTREQUEST_CHROMEOSIMAGE.containing_type = _PROVISIONDUTREQUEST
-_PROVISIONDUTREQUEST_CHROMEOSIMAGE.oneofs_by_name['path_oneof'].fields.append(
- _PROVISIONDUTREQUEST_CHROMEOSIMAGE.fields_by_name['gs_path_prefix'])
-_PROVISIONDUTREQUEST_CHROMEOSIMAGE.fields_by_name['gs_path_prefix'].containing_oneof = _PROVISIONDUTREQUEST_CHROMEOSIMAGE.oneofs_by_name['path_oneof']
-_PROVISIONDUTREQUEST_DLCSPEC.containing_type = _PROVISIONDUTREQUEST
-_PROVISIONDUTREQUEST.fields_by_name['image'].message_type = _PROVISIONDUTREQUEST_CHROMEOSIMAGE
-_PROVISIONDUTREQUEST.fields_by_name['dlc_specs'].message_type = _PROVISIONDUTREQUEST_DLCSPEC
-_PROVISIONDUTRESPONSE_REASON.containing_type = _PROVISIONDUTRESPONSE
-_PROVISIONLACROSREQUEST_LACROSIMAGE.containing_type = _PROVISIONLACROSREQUEST
-_PROVISIONLACROSREQUEST_LACROSIMAGE.oneofs_by_name['path_oneof'].fields.append(
- _PROVISIONLACROSREQUEST_LACROSIMAGE.fields_by_name['gs_path_prefix'])
-_PROVISIONLACROSREQUEST_LACROSIMAGE.fields_by_name['gs_path_prefix'].containing_oneof = _PROVISIONLACROSREQUEST_LACROSIMAGE.oneofs_by_name['path_oneof']
-_PROVISIONLACROSREQUEST.fields_by_name['image'].message_type = _PROVISIONLACROSREQUEST_LACROSIMAGE
-_PROVISIONLACROSRESPONSE_REASON.containing_type = _PROVISIONLACROSRESPONSE
-_FETCHCRASHESRESPONSE.fields_by_name['crash'].message_type = _CRASHINFO
-_FETCHCRASHESRESPONSE.fields_by_name['blob'].message_type = _CRASHBLOB
-_FETCHCRASHESRESPONSE.oneofs_by_name['data'].fields.append(
- _FETCHCRASHESRESPONSE.fields_by_name['crash'])
-_FETCHCRASHESRESPONSE.fields_by_name['crash'].containing_oneof = _FETCHCRASHESRESPONSE.oneofs_by_name['data']
-_FETCHCRASHESRESPONSE.oneofs_by_name['data'].fields.append(
- _FETCHCRASHESRESPONSE.fields_by_name['blob'])
-_FETCHCRASHESRESPONSE.fields_by_name['blob'].containing_oneof = _FETCHCRASHESRESPONSE.oneofs_by_name['data']
-_FETCHCRASHESRESPONSE.oneofs_by_name['data'].fields.append(
- _FETCHCRASHESRESPONSE.fields_by_name['core'])
-_FETCHCRASHESRESPONSE.fields_by_name['core'].containing_oneof = _FETCHCRASHESRESPONSE.oneofs_by_name['data']
-_CRASHINFO.fields_by_name['fields'].message_type = _CRASHMETADATA
-_CHROMEOSIMAGE.oneofs_by_name['path_oneof'].fields.append(
- _CHROMEOSIMAGE.fields_by_name['gs_path_prefix'])
-_CHROMEOSIMAGE.fields_by_name['gs_path_prefix'].containing_oneof = _CHROMEOSIMAGE.oneofs_by_name['path_oneof']
-_FAKEOMAHA_PAYLOAD.fields_by_name['type'].enum_type = _FAKEOMAHA_PAYLOAD_TYPE
-_FAKEOMAHA_PAYLOAD.containing_type = _FAKEOMAHA
-_FAKEOMAHA_PAYLOAD_TYPE.containing_type = _FAKEOMAHA_PAYLOAD
-_FAKEOMAHA.fields_by_name['target_build'].message_type = _CHROMEOSIMAGE
-_FAKEOMAHA.fields_by_name['payloads'].message_type = _FAKEOMAHA_PAYLOAD
-_CREATEFAKEOMAHAREQUEST.fields_by_name['fake_omaha'].message_type = _FAKEOMAHA
-DESCRIPTOR.message_types_by_name['ExecDutCommandRequest'] = _EXECDUTCOMMANDREQUEST
-DESCRIPTOR.message_types_by_name['ExecDutCommandResponse'] = _EXECDUTCOMMANDRESPONSE
-DESCRIPTOR.message_types_by_name['ProvisionDutRequest'] = _PROVISIONDUTREQUEST
-DESCRIPTOR.message_types_by_name['ProvisionDutResponse'] = _PROVISIONDUTRESPONSE
-DESCRIPTOR.message_types_by_name['ProvisionDutMetadata'] = _PROVISIONDUTMETADATA
-DESCRIPTOR.message_types_by_name['ProvisionLacrosRequest'] = _PROVISIONLACROSREQUEST
-DESCRIPTOR.message_types_by_name['ProvisionLacrosResponse'] = _PROVISIONLACROSRESPONSE
-DESCRIPTOR.message_types_by_name['ProvisionLacrosMetadata'] = _PROVISIONLACROSMETADATA
-DESCRIPTOR.message_types_by_name['FetchCrashesRequest'] = _FETCHCRASHESREQUEST
-DESCRIPTOR.message_types_by_name['FetchCrashesResponse'] = _FETCHCRASHESRESPONSE
-DESCRIPTOR.message_types_by_name['CrashInfo'] = _CRASHINFO
-DESCRIPTOR.message_types_by_name['CrashMetadata'] = _CRASHMETADATA
-DESCRIPTOR.message_types_by_name['CrashBlob'] = _CRASHBLOB
-DESCRIPTOR.message_types_by_name['ChromeOsImage'] = _CHROMEOSIMAGE
-DESCRIPTOR.message_types_by_name['FakeOmaha'] = _FAKEOMAHA
-DESCRIPTOR.message_types_by_name['CreateFakeOmahaRequest'] = _CREATEFAKEOMAHAREQUEST
-DESCRIPTOR.message_types_by_name['DeleteFakeOmahaRequest'] = _DELETEFAKEOMAHAREQUEST
-DESCRIPTOR.enum_types_by_name['Output'] = _OUTPUT
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-ExecDutCommandRequest = _reflection.GeneratedProtocolMessageType('ExecDutCommandRequest', (_message.Message,), {
- 'DESCRIPTOR' : _EXECDUTCOMMANDREQUEST,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.ExecDutCommandRequest)
- })
-_sym_db.RegisterMessage(ExecDutCommandRequest)
-
-ExecDutCommandResponse = _reflection.GeneratedProtocolMessageType('ExecDutCommandResponse', (_message.Message,), {
-
- 'ExitInfo' : _reflection.GeneratedProtocolMessageType('ExitInfo', (_message.Message,), {
- 'DESCRIPTOR' : _EXECDUTCOMMANDRESPONSE_EXITINFO,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.ExecDutCommandResponse.ExitInfo)
- })
- ,
- 'DESCRIPTOR' : _EXECDUTCOMMANDRESPONSE,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.ExecDutCommandResponse)
- })
-_sym_db.RegisterMessage(ExecDutCommandResponse)
-_sym_db.RegisterMessage(ExecDutCommandResponse.ExitInfo)
-
-ProvisionDutRequest = _reflection.GeneratedProtocolMessageType('ProvisionDutRequest', (_message.Message,), {
-
- 'ChromeOSImage' : _reflection.GeneratedProtocolMessageType('ChromeOSImage', (_message.Message,), {
- 'DESCRIPTOR' : _PROVISIONDUTREQUEST_CHROMEOSIMAGE,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.ProvisionDutRequest.ChromeOSImage)
- })
- ,
-
- 'DLCSpec' : _reflection.GeneratedProtocolMessageType('DLCSpec', (_message.Message,), {
- 'DESCRIPTOR' : _PROVISIONDUTREQUEST_DLCSPEC,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.ProvisionDutRequest.DLCSpec)
- })
- ,
- 'DESCRIPTOR' : _PROVISIONDUTREQUEST,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.ProvisionDutRequest)
- })
-_sym_db.RegisterMessage(ProvisionDutRequest)
-_sym_db.RegisterMessage(ProvisionDutRequest.ChromeOSImage)
-_sym_db.RegisterMessage(ProvisionDutRequest.DLCSpec)
-
-ProvisionDutResponse = _reflection.GeneratedProtocolMessageType('ProvisionDutResponse', (_message.Message,), {
- 'DESCRIPTOR' : _PROVISIONDUTRESPONSE,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.ProvisionDutResponse)
- })
-_sym_db.RegisterMessage(ProvisionDutResponse)
-
-ProvisionDutMetadata = _reflection.GeneratedProtocolMessageType('ProvisionDutMetadata', (_message.Message,), {
- 'DESCRIPTOR' : _PROVISIONDUTMETADATA,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.ProvisionDutMetadata)
- })
-_sym_db.RegisterMessage(ProvisionDutMetadata)
-
-ProvisionLacrosRequest = _reflection.GeneratedProtocolMessageType('ProvisionLacrosRequest', (_message.Message,), {
-
- 'LacrosImage' : _reflection.GeneratedProtocolMessageType('LacrosImage', (_message.Message,), {
- 'DESCRIPTOR' : _PROVISIONLACROSREQUEST_LACROSIMAGE,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.ProvisionLacrosRequest.LacrosImage)
- })
- ,
- 'DESCRIPTOR' : _PROVISIONLACROSREQUEST,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.ProvisionLacrosRequest)
- })
-_sym_db.RegisterMessage(ProvisionLacrosRequest)
-_sym_db.RegisterMessage(ProvisionLacrosRequest.LacrosImage)
-
-ProvisionLacrosResponse = _reflection.GeneratedProtocolMessageType('ProvisionLacrosResponse', (_message.Message,), {
- 'DESCRIPTOR' : _PROVISIONLACROSRESPONSE,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.ProvisionLacrosResponse)
- })
-_sym_db.RegisterMessage(ProvisionLacrosResponse)
-
-ProvisionLacrosMetadata = _reflection.GeneratedProtocolMessageType('ProvisionLacrosMetadata', (_message.Message,), {
- 'DESCRIPTOR' : _PROVISIONLACROSMETADATA,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.ProvisionLacrosMetadata)
- })
-_sym_db.RegisterMessage(ProvisionLacrosMetadata)
-
-FetchCrashesRequest = _reflection.GeneratedProtocolMessageType('FetchCrashesRequest', (_message.Message,), {
- 'DESCRIPTOR' : _FETCHCRASHESREQUEST,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.FetchCrashesRequest)
- })
-_sym_db.RegisterMessage(FetchCrashesRequest)
-
-FetchCrashesResponse = _reflection.GeneratedProtocolMessageType('FetchCrashesResponse', (_message.Message,), {
- 'DESCRIPTOR' : _FETCHCRASHESRESPONSE,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.FetchCrashesResponse)
- })
-_sym_db.RegisterMessage(FetchCrashesResponse)
-
-CrashInfo = _reflection.GeneratedProtocolMessageType('CrashInfo', (_message.Message,), {
- 'DESCRIPTOR' : _CRASHINFO,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.CrashInfo)
- })
-_sym_db.RegisterMessage(CrashInfo)
-
-CrashMetadata = _reflection.GeneratedProtocolMessageType('CrashMetadata', (_message.Message,), {
- 'DESCRIPTOR' : _CRASHMETADATA,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.CrashMetadata)
- })
-_sym_db.RegisterMessage(CrashMetadata)
-
-CrashBlob = _reflection.GeneratedProtocolMessageType('CrashBlob', (_message.Message,), {
- 'DESCRIPTOR' : _CRASHBLOB,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.CrashBlob)
- })
-_sym_db.RegisterMessage(CrashBlob)
-
-ChromeOsImage = _reflection.GeneratedProtocolMessageType('ChromeOsImage', (_message.Message,), {
- 'DESCRIPTOR' : _CHROMEOSIMAGE,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.ChromeOsImage)
- })
-_sym_db.RegisterMessage(ChromeOsImage)
-
-FakeOmaha = _reflection.GeneratedProtocolMessageType('FakeOmaha', (_message.Message,), {
-
- 'Payload' : _reflection.GeneratedProtocolMessageType('Payload', (_message.Message,), {
- 'DESCRIPTOR' : _FAKEOMAHA_PAYLOAD,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.FakeOmaha.Payload)
- })
- ,
- 'DESCRIPTOR' : _FAKEOMAHA,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.FakeOmaha)
- })
-_sym_db.RegisterMessage(FakeOmaha)
-_sym_db.RegisterMessage(FakeOmaha.Payload)
-
-CreateFakeOmahaRequest = _reflection.GeneratedProtocolMessageType('CreateFakeOmahaRequest', (_message.Message,), {
- 'DESCRIPTOR' : _CREATEFAKEOMAHAREQUEST,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.CreateFakeOmahaRequest)
- })
-_sym_db.RegisterMessage(CreateFakeOmahaRequest)
-
-DeleteFakeOmahaRequest = _reflection.GeneratedProtocolMessageType('DeleteFakeOmahaRequest', (_message.Message,), {
- 'DESCRIPTOR' : _DELETEFAKEOMAHAREQUEST,
- '__module__' : 'autotest_common_pb2'
- # @@protoc_insertion_point(class_scope:chromiumos.config.api.test.tls.DeleteFakeOmahaRequest)
- })
-_sym_db.RegisterMessage(DeleteFakeOmahaRequest)
-
-
-DESCRIPTOR._options = None
-
-_COMMON = _descriptor.ServiceDescriptor(
- name='Common',
- full_name='chromiumos.config.api.test.tls.Common',
- file=DESCRIPTOR,
- index=0,
- serialized_options=None,
- serialized_start=2756,
- serialized_end=3550,
- methods=[
- _descriptor.MethodDescriptor(
- name='ExecDutCommand',
- full_name='chromiumos.config.api.test.tls.Common.ExecDutCommand',
- index=0,
- containing_service=None,
- input_type=_EXECDUTCOMMANDREQUEST,
- output_type=_EXECDUTCOMMANDRESPONSE,
- serialized_options=None,
- ),
- _descriptor.MethodDescriptor(
- name='ProvisionDut',
- full_name='chromiumos.config.api.test.tls.Common.ProvisionDut',
- index=1,
- containing_service=None,
- input_type=_PROVISIONDUTREQUEST,
- output_type=dependencies_dot_longrunning_dot_operations__pb2._OPERATION,
- serialized_options=b'\312A,\n\024ProvisionDutResponse\022\024ProvisionDutMetadata',
- ),
- _descriptor.MethodDescriptor(
- name='ProvisionLacros',
- full_name='chromiumos.config.api.test.tls.Common.ProvisionLacros',
- index=2,
- containing_service=None,
- input_type=_PROVISIONLACROSREQUEST,
- output_type=dependencies_dot_longrunning_dot_operations__pb2._OPERATION,
- serialized_options=b'\312A2\n\027ProvisionLacrosResponse\022\027ProvisionLacrosMetadata',
- ),
- _descriptor.MethodDescriptor(
- name='FetchCrashes',
- full_name='chromiumos.config.api.test.tls.Common.FetchCrashes',
- index=3,
- containing_service=None,
- input_type=_FETCHCRASHESREQUEST,
- output_type=_FETCHCRASHESRESPONSE,
- serialized_options=None,
- ),
- _descriptor.MethodDescriptor(
- name='CreateFakeOmaha',
- full_name='chromiumos.config.api.test.tls.Common.CreateFakeOmaha',
- index=4,
- containing_service=None,
- input_type=_CREATEFAKEOMAHAREQUEST,
- output_type=_FAKEOMAHA,
- serialized_options=None,
- ),
- _descriptor.MethodDescriptor(
- name='DeleteFakeOmaha',
- full_name='chromiumos.config.api.test.tls.Common.DeleteFakeOmaha',
- index=5,
- containing_service=None,
- input_type=_DELETEFAKEOMAHAREQUEST,
- output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
- serialized_options=None,
- ),
-])
-_sym_db.RegisterServiceDescriptor(_COMMON)
-
-DESCRIPTOR.services_by_name['Common'] = _COMMON
-
-# @@protoc_insertion_point(module_scope)
diff --git a/server/hosts/tls_client/autotest_common_pb2_grpc.py b/server/hosts/tls_client/autotest_common_pb2_grpc.py
deleted file mode 100644
index 6804124..0000000
--- a/server/hosts/tls_client/autotest_common_pb2_grpc.py
+++ /dev/null
@@ -1,191 +0,0 @@
-# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
-import grpc
-from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
-
-import common
-from autotest_lib.server.hosts.tls_client import autotest_common_pb2 as autotest__common__pb2
-from autotest_lib.server.hosts.tls_client.dependencies.longrunning import operations_pb2 as dependencies_dot_longrunning_dot_operations__pb2
-
-class CommonStub(object):
- """Common lab services implemented on top of the wiring APIs.
-
- The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL
- NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and
- "OPTIONAL" in this document are to be interpreted as described in
- RFC 2119.
-
- All clients SHOULD pass the gRPC metadata key request_trace_id with one
- value. The value is a unique string that is associated with the method call
- in metrics. Clients that do not pass request_trace_id MAY be rejected so that
- they can be fixed.
- """
-
- def __init__(self, channel):
- """Constructor.
-
- Args:
- channel: A grpc.Channel.
- """
- self.ExecDutCommand = channel.unary_stream(
- '/chromiumos.config.api.test.tls.Common/ExecDutCommand',
- request_serializer=autotest__common__pb2.ExecDutCommandRequest.SerializeToString,
- response_deserializer=autotest__common__pb2.ExecDutCommandResponse.FromString,
- )
- self.ProvisionDut = channel.unary_unary(
- '/chromiumos.config.api.test.tls.Common/ProvisionDut',
- request_serializer=autotest__common__pb2.ProvisionDutRequest.SerializeToString,
- response_deserializer=dependencies_dot_longrunning_dot_operations__pb2.Operation.FromString,
- )
- self.ProvisionLacros = channel.unary_unary(
- '/chromiumos.config.api.test.tls.Common/ProvisionLacros',
- request_serializer=autotest__common__pb2.ProvisionLacrosRequest.SerializeToString,
- response_deserializer=dependencies_dot_longrunning_dot_operations__pb2.Operation.FromString,
- )
- self.FetchCrashes = channel.unary_stream(
- '/chromiumos.config.api.test.tls.Common/FetchCrashes',
- request_serializer=autotest__common__pb2.FetchCrashesRequest.SerializeToString,
- response_deserializer=autotest__common__pb2.FetchCrashesResponse.FromString,
- )
- self.CreateFakeOmaha = channel.unary_unary(
- '/chromiumos.config.api.test.tls.Common/CreateFakeOmaha',
- request_serializer=autotest__common__pb2.CreateFakeOmahaRequest.SerializeToString,
- response_deserializer=autotest__common__pb2.FakeOmaha.FromString,
- )
- self.DeleteFakeOmaha = channel.unary_unary(
- '/chromiumos.config.api.test.tls.Common/DeleteFakeOmaha',
- request_serializer=autotest__common__pb2.DeleteFakeOmahaRequest.SerializeToString,
- response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
- )
-
-
-class CommonServicer(object):
- """Common lab services implemented on top of the wiring APIs.
-
- The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL
- NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and
- "OPTIONAL" in this document are to be interpreted as described in
- RFC 2119.
-
- All clients SHOULD pass the gRPC metadata key request_trace_id with one
- value. The value is a unique string that is associated with the method call
- in metrics. Clients that do not pass request_trace_id MAY be rejected so that
- they can be fixed.
- """
-
- def ExecDutCommand(self, request, context):
- """ExecDutCommand runs a command on a DUT.
-
- The working directory is /.
- A tty is not spawned for the command.
- The user and group is root.
- All signals have their default dispositions and are not masked.
- The umask is set to 0.
-
- The environment contains:
-
- TERM=dumb
- PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin
- LANG=en_US.UTF-8
- USER=root
- HOME=/root
-
- The environment MAY also contain SSH client variables.
- The environment SHALL NOT contain variables not mentioned above.
-
- If the stream is interrupted, the implementation MAY attempt to
- stop the command by sending SIGINT, SIGHUP, SIGTERM, or SIGKILL.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def ProvisionDut(self, request, context):
- """ProvisionDut installs a specified version of Chrome OS on the DUT, along
- with any specified DLCs.
-
- If the DUT is already on the specified version of Chrome OS, the OS will
- not be provisioned.
-
- If the DUT already has the specified list of DLCs, only the missing DLCs
- will be provisioned.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def ProvisionLacros(self, request, context):
- """ProvisionLacros installs a specified version of Lacros on the DUT.
-
- If the DUT already has the specified version of Lacros, Lacros will not be
- provisioned.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def FetchCrashes(self, request, context):
- """FetchCrashes gets a stream of all crash reports currently on the DUT.
-
- The stream returned may split up a crash over multiple
- `FetchCrashesResponse` protos. See the definition of that proto for
- details.
-
- This call is read-only: it doesn't delete the crashes that it reads.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def CreateFakeOmaha(self, request, context):
- """CreateFakeOmaha starts a fake Omaha service on TLS and exposes the
- listened port to the DUT.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def DeleteFakeOmaha(self, request, context):
- """DeleteFakeOmaha deletes the specified fake Omaha resource created by
- CreateFakeOmaha.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
-
-def add_CommonServicer_to_server(servicer, server):
- rpc_method_handlers = {
- 'ExecDutCommand': grpc.unary_stream_rpc_method_handler(
- servicer.ExecDutCommand,
- request_deserializer=autotest__common__pb2.ExecDutCommandRequest.FromString,
- response_serializer=autotest__common__pb2.ExecDutCommandResponse.SerializeToString,
- ),
- 'ProvisionDut': grpc.unary_unary_rpc_method_handler(
- servicer.ProvisionDut,
- request_deserializer=autotest__common__pb2.ProvisionDutRequest.FromString,
- response_serializer=dependencies_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
- ),
- 'ProvisionLacros': grpc.unary_unary_rpc_method_handler(
- servicer.ProvisionLacros,
- request_deserializer=autotest__common__pb2.ProvisionLacrosRequest.FromString,
- response_serializer=dependencies_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
- ),
- 'FetchCrashes': grpc.unary_stream_rpc_method_handler(
- servicer.FetchCrashes,
- request_deserializer=autotest__common__pb2.FetchCrashesRequest.FromString,
- response_serializer=autotest__common__pb2.FetchCrashesResponse.SerializeToString,
- ),
- 'CreateFakeOmaha': grpc.unary_unary_rpc_method_handler(
- servicer.CreateFakeOmaha,
- request_deserializer=autotest__common__pb2.CreateFakeOmahaRequest.FromString,
- response_serializer=autotest__common__pb2.FakeOmaha.SerializeToString,
- ),
- 'DeleteFakeOmaha': grpc.unary_unary_rpc_method_handler(
- servicer.DeleteFakeOmaha,
- request_deserializer=autotest__common__pb2.DeleteFakeOmahaRequest.FromString,
- response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- 'chromiumos.config.api.test.tls.Common', rpc_method_handlers)
- server.add_generic_rpc_handlers((generic_handler,))
diff --git a/server/hosts/tls_client/buildprotos.py b/server/hosts/tls_client/buildprotos.py
deleted file mode 100644
index 3f8bd61..0000000
--- a/server/hosts/tls_client/buildprotos.py
+++ /dev/null
@@ -1,88 +0,0 @@
-"""Deletes the existing bindings, then rebuild using the source .proto file."""
-
-import os
-from shutil import copyfile
-
-UP = '../'
-PROTO_PATH = 'src/config/proto/chromiumos/config/api/test/tls/'
-PROTO_NAME = 'commontls.proto'
-DEST_PROTO_NAME = 'autotest_common.proto'
-DEP_PROTO_RELATIVE_PATH = 'dependencies/longrunning/'
-DEP_PROTO_NAME = 'operations.proto'
-
-BUILD_CMD = (
- 'python -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. {} {}'
- .format(DEST_PROTO_NAME,
- os.path.join(DEP_PROTO_RELATIVE_PATH, DEP_PROTO_NAME)))
-
-
-def delete_old_protos():
- """Delete any existing protos or built proto bindings."""
- for file in os.listdir('.'):
- if 'autotest_common' in file:
- os.remove(file)
-
- for file in os.listdir(DEP_PROTO_RELATIVE_PATH):
- if 'operations' in file:
- os.remove(os.path.join(DEP_PROTO_RELATIVE_PATH, file))
-
-
-def copy_proto_from_src():
- """Copy the proto from the src dirs to the local dir."""
- copy_list = [(get_proto_path(), DEST_PROTO_NAME),
- (get_proto_deps_dir(),
- os.path.join(DEP_PROTO_RELATIVE_PATH, DEP_PROTO_NAME))]
-
- for src, dest in copy_list:
- if os.path.isfile(src):
- copyfile(src, dest)
- else:
- raise Exception('Proto missing at %s' % src)
-
-
-def get_proto_path():
- """Return the full path of the commontls.proto from TLS."""
- return os.path.join(UP * get_current_depth(), PROTO_PATH, PROTO_NAME)
-
-
-def get_proto_deps_dir():
- """Return the full path of the operations.proto from TLS."""
- return os.path.join(UP * get_current_depth(), PROTO_PATH,
- DEP_PROTO_RELATIVE_PATH, DEP_PROTO_NAME)
-
-
-def get_current_depth():
- """Return the current depth off /src/ within the file structure."""
- dirs = os.getcwd().split('/')
- src_level = dirs.index('src')
- return len(dirs) - src_level
-
-
-def modify_proto():
- """Change the full path for the dependencies for a local one."""
- # This is likely a dirty hack, but compiling with the full src in autotest
- # doesn't work. Open to suggestions for alternatives.
-
- #TODO (dbeckett@) b/183220746, work on a better thats not a hack...
- with open(DEST_PROTO_NAME, 'r+') as f:
- original = f.read()
- new = original.replace(
- 'import "chromiumos/config/api/test/tls/dependencies/longrunning/operations.proto";',
- 'import "dependencies/longrunning/operations.proto";')
- with open(DEST_PROTO_NAME, 'w') as wf:
- wf.write(new)
-
-
-def create_bindings():
- os.system(BUILD_CMD)
-
-
-def main():
- delete_old_protos()
- copy_proto_from_src()
- modify_proto()
- create_bindings()
-
-
-if __name__ == "__main__":
- main()
diff --git a/server/hosts/tls_client/common.py b/server/hosts/tls_client/common.py
deleted file mode 100644
index c505ee4..0000000
--- a/server/hosts/tls_client/common.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import os, sys
-dirname = os.path.dirname(sys.modules[__name__].__file__)
-autotest_dir = os.path.abspath(os.path.join(dirname, "..", "..", ".."))
-client_dir = os.path.join(autotest_dir, "client")
-sys.path.insert(0, client_dir)
-import setup_modules
-sys.path.pop(0)
-setup_modules.setup(base_path=autotest_dir, root_module_name="autotest_lib")
diff --git a/server/hosts/tls_client/connection.py b/server/hosts/tls_client/connection.py
deleted file mode 100644
index a847f81..0000000
--- a/server/hosts/tls_client/connection.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Lint as: python2, python3
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Abstract Client for Autotest side communications to the TLS Server."""
-
-import grpc
-
-import common
-
-from autotest_lib.server.hosts.tls_client import autotest_common_pb2_grpc
-
-TLS_PORT = 7152
-TLS_IP = '10.254.254.254'
-
-
-class TLSConnection(object):
- """The client side connection to Common-TLS service running in a drone."""
-
- def __init__(self):
- """Configure the grpc channel."""
- self.channel = grpc.insecure_channel('{}:{}'.format(TLS_IP, TLS_PORT))
- self.stub = autotest_common_pb2_grpc.CommonStub(self.channel)
- self.alive = True
-
- def __enter__(self):
- return self
-
- def __exit__(self, *exc):
- self.close()
-
- def close(self):
- """Close the grpc channel."""
- self.channel.close()
- self.alive = False
diff --git a/server/hosts/tls_client/dependencies/__init__.py b/server/hosts/tls_client/dependencies/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/server/hosts/tls_client/dependencies/__init__.py
+++ /dev/null
diff --git a/server/hosts/tls_client/dependencies/common.py b/server/hosts/tls_client/dependencies/common.py
deleted file mode 100644
index 9c2b102..0000000
--- a/server/hosts/tls_client/dependencies/common.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import os, sys
-dirname = os.path.dirname(sys.modules[__name__].__file__)
-autotest_dir = os.path.abspath(os.path.join(dirname, "..", "..", "..", ".."))
-client_dir = os.path.join(autotest_dir, "client")
-sys.path.insert(0, client_dir)
-import setup_modules
-sys.path.pop(0)
-setup_modules.setup(base_path=autotest_dir, root_module_name="autotest_lib")
diff --git a/server/hosts/tls_client/dependencies/longrunning/__init__.py b/server/hosts/tls_client/dependencies/longrunning/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/server/hosts/tls_client/dependencies/longrunning/__init__.py
+++ /dev/null
diff --git a/server/hosts/tls_client/dependencies/longrunning/common.py b/server/hosts/tls_client/dependencies/longrunning/common.py
deleted file mode 100644
index f4dce84..0000000
--- a/server/hosts/tls_client/dependencies/longrunning/common.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import os, sys
-dirname = os.path.dirname(sys.modules[__name__].__file__)
-autotest_dir = os.path.abspath(
- os.path.join(dirname, "..", "..", "..", "..", ".."))
-client_dir = os.path.join(autotest_dir, "client")
-sys.path.insert(0, client_dir)
-import setup_modules
-sys.path.pop(0)
-setup_modules.setup(base_path=autotest_dir, root_module_name="autotest_lib")
diff --git a/server/hosts/tls_client/dependencies/longrunning/operations.proto b/server/hosts/tls_client/dependencies/longrunning/operations.proto
deleted file mode 100644
index 1e5ddf1..0000000
--- a/server/hosts/tls_client/dependencies/longrunning/operations.proto
+++ /dev/null
@@ -1,281 +0,0 @@
-// Copyright 2016 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-
-package google.longrunning;
-
-import "google/protobuf/any.proto";
-import "google/protobuf/descriptor.proto";
-import "google/protobuf/duration.proto";
-import "google/protobuf/empty.proto";
-
-option go_package = "go.chromium.org/chromiumos/config/go/api/test/tls/dependencies/longrunning";
-
-
-// Manages long-running operations with an API service.
-//
-// When an API method normally takes long time to complete, it can be designed
-// to return [Operation][google.longrunning.Operation] to the client, and the client can use this
-// interface to receive the real response asynchronously by polling the
-// operation resource, or pass the operation resource to another API (such as
-// Google Cloud Pub/Sub API) to receive the response. Any API service that
-// returns long-running operations should implement the `Operations` interface
-// so developers can have a consistent client experience.
-service Operations {
- // Lists operations that match the specified filter in the request. If the
- // server doesn't support this method, it returns `UNIMPLEMENTED`.
- //
- // NOTE: the `name` binding below allows API services to override the binding
- // to use different resource name schemes, such as `users/*/operations`.
- rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) {
- }
-
- // Gets the latest state of a long-running operation. Clients can use this
- // method to poll the operation result at intervals as recommended by the API
- // service.
- rpc GetOperation(GetOperationRequest) returns (Operation) {
- }
-
- // Deletes a long-running operation. This method indicates that the client is
- // no longer interested in the operation result. It does not cancel the
- // operation. If the server doesn't support this method, it returns
- // `google.rpc.Code.UNIMPLEMENTED`.
- rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) {
- }
-
- // Starts asynchronous cancellation on a long-running operation. The server
- // makes a best effort to cancel the operation, but success is not
- // guaranteed. If the server doesn't support this method, it returns
- // `google.rpc.Code.UNIMPLEMENTED`. Clients can use
- // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
- // other methods to check whether the cancellation succeeded or whether the
- // operation completed despite cancellation. On successful cancellation,
- // the operation is not deleted; instead, it becomes an operation with
- // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
- // corresponding to `Code.CANCELLED`.
- rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) {
- }
- // Waits for the specified long-running operation until it is done or reaches
- // at most a specified timeout, returning the latest state. If the operation
- // is already done, the latest state is immediately returned. If the timeout
- // specified is greater than the default HTTP/RPC timeout, the HTTP/RPC
- // timeout is used. If the server does not support this method, it returns
- // `google.rpc.Code.UNIMPLEMENTED`.
- // Note that this method is on a best-effort basis. It may return the latest
- // state before the specified timeout (including immediately), meaning even an
- // immediate response is no guarantee that the operation is done.
- rpc WaitOperation(WaitOperationRequest) returns (Operation) {
- }
-}
-
-// This resource represents a long-running operation that is the result of a
-// network API call.
-message Operation {
- // The server-assigned name, which is only unique within the same service that
- // originally returns it. If you use the default HTTP mapping, the
- // `name` should have the format of `operations/some/unique/name`.
- string name = 1;
-
- // Service-specific metadata associated with the operation. It typically
- // contains progress information and common metadata such as create time.
- // Some services might not provide such metadata. Any method that returns a
- // long-running operation should document the metadata type, if any.
- google.protobuf.Any metadata = 2;
-
- // If the value is `false`, it means the operation is still in progress.
- // If true, the operation is completed, and either `error` or `response` is
- // available.
- bool done = 3;
-
- // The operation result, which can be either an `error` or a valid `response`.
- // If `done` == `false`, neither `error` nor `response` is set.
- // If `done` == `true`, exactly one of `error` or `response` is set.
- oneof result {
- // The error result of the operation in case of failure or cancellation.
- Status error = 4;
-
- // The normal response of the operation in case of success. If the original
- // method returns no data on success, such as `Delete`, the response is
- // `google.protobuf.Empty`. If the original method is standard
- // `Get`/`Create`/`Update`, the response should be the resource. For other
- // methods, the response should have the type `XxxResponse`, where `Xxx`
- // is the original method name. For example, if the original method name
- // is `TakeSnapshot()`, the inferred response type is
- // `TakeSnapshotResponse`.
- google.protobuf.Any response = 5;
- }
-}
-
-// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation].
-message GetOperationRequest {
- // The name of the operation resource.
- string name = 1;
-}
-
-// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
-message ListOperationsRequest {
- // The name of the operation collection.
- string name = 4;
-
- // The standard list filter.
- string filter = 1;
-
- // The standard list page size.
- int32 page_size = 2;
-
- // The standard list page token.
- string page_token = 3;
-}
-
-// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
-message ListOperationsResponse {
- // A list of operations that matches the specified filter in the request.
- repeated Operation operations = 1;
-
- // The standard List next-page token.
- string next_page_token = 2;
-}
-
-// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation].
-message CancelOperationRequest {
- // The name of the operation resource to be cancelled.
- string name = 1;
-}
-
-// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation].
-message DeleteOperationRequest {
- // The name of the operation resource to be deleted.
- string name = 1;
-}
-
-// The request message for [Operations.WaitOperation][google.longrunning.Operations.WaitOperation].
-message WaitOperationRequest {
- // The name of the operation resource to wait on.
- string name = 1;
-
- // The maximum duration to wait before timing out. If left blank, the wait
- // will be at most the time permitted by the underlying HTTP/RPC protocol.
- // If RPC context deadline is also specified, the shorter one will be used.
- google.protobuf.Duration timeout = 2;
-}
-
-// A message representing the message types used by a long-running operation.
-//
-// Example:
-//
-// rpc LongRunningRecognize(LongRunningRecognizeRequest)
-// returns (google.longrunning.Operation) {
-// option (google.longrunning.operation_info) = {
-// response_type: "LongRunningRecognizeResponse"
-// metadata_type: "LongRunningRecognizeMetadata"
-// };
-// }
-message OperationInfo {
- // Required. The message name of the primary return type for this
- // long-running operation.
- // This type will be used to deserialize the LRO's response.
- //
- // If the response is in a different package from the rpc, a fully-qualified
- // message name must be used (e.g. `google.protobuf.Struct`).
- //
- // Note: Altering this value constitutes a breaking change.
- string response_type = 1;
-
- // Required. The message name of the metadata type for this long-running
- // operation.
- //
- // If the response is in a different package from the rpc, a fully-qualified
- // message name must be used (e.g. `google.protobuf.Struct`).
- //
- // Note: Altering this value constitutes a breaking change.
- string metadata_type = 2;
-}
-
-extend google.protobuf.MethodOptions {
- // Additional information regarding long-running operations.
- // In particular, this specifies the types that are returned from
- // long-running operations.
- //
- // Required for methods that return `google.longrunning.Operation`; invalid
- // otherwise.
- OperationInfo operation_info = 1049;
-}
-
-// The `Status` type defines a logical error model that is suitable for different
-// programming environments, including REST APIs and RPC APIs. It is used by
-// [gRPC](https://github.com/grpc). The error model is designed to be:
-//
-// - Simple to use and understand for most users
-// - Flexible enough to meet unexpected needs
-//
-// # Overview
-//
-// The `Status` message contains three pieces of data: error code, error message,
-// and error details. The error code should be an enum value of
-// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The
-// error message should be a developer-facing English message that helps
-// developers *understand* and *resolve* the error. If a localized user-facing
-// error message is needed, put the localized message in the error details or
-// localize it in the client. The optional error details may contain arbitrary
-// information about the error. There is a predefined set of error detail types
-// in the package `google.rpc` that can be used for common error conditions.
-//
-// # Language mapping
-//
-// The `Status` message is the logical representation of the error model, but it
-// is not necessarily the actual wire format. When the `Status` message is
-// exposed in different client libraries and different wire protocols, it can be
-// mapped differently. For example, it will likely be mapped to some exceptions
-// in Java, but more likely mapped to some error codes in C.
-//
-// # Other uses
-//
-// The error model and the `Status` message can be used in a variety of
-// environments, either with or without APIs, to provide a
-// consistent developer experience across different environments.
-//
-// Example uses of this error model include:
-//
-// - Partial errors. If a service needs to return partial errors to the client,
-// it may embed the `Status` in the normal response to indicate the partial
-// errors.
-//
-// - Workflow errors. A typical workflow has multiple steps. Each step may
-// have a `Status` message for error reporting.
-//
-// - Batch operations. If a client uses batch request and batch response, the
-// `Status` message should be used directly inside batch response, one for
-// each error sub-response.
-//
-// - Asynchronous operations. If an API call embeds asynchronous operation
-// results in its response, the status of those operations should be
-// represented directly using the `Status` message.
-//
-// - Logging. If some API errors are stored in logs, the message `Status` could
-// be used directly after any stripping needed for security/privacy reasons.
-message Status {
- // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
- int32 code = 1;
-
- // A developer-facing error message, which should be in English. Any
- // user-facing error message should be localized and sent in the
- // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
- string message = 2;
-
- // A list of messages that carry the error details. There is a common set of
- // message types for APIs to use.
- repeated google.protobuf.Any details = 3;
-}
diff --git a/server/hosts/tls_client/dependencies/longrunning/operations_pb2.py b/server/hosts/tls_client/dependencies/longrunning/operations_pb2.py
deleted file mode 100644
index bb465f0..0000000
--- a/server/hosts/tls_client/dependencies/longrunning/operations_pb2.py
+++ /dev/null
@@ -1,556 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: dependencies/longrunning/operations.proto
-
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
-from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
-from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
-from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
- name='dependencies/longrunning/operations.proto',
- package='google.longrunning',
- syntax='proto3',
- serialized_options=b'ZJgo.chromium.org/chromiumos/config/go/api/test/tls/dependencies/longrunning',
- serialized_pb=b'\n)dependencies/longrunning/operations.proto\x12\x12google.longrunning\x1a\x19google/protobuf/any.proto\x1a google/protobuf/descriptor.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xb0\x01\n\tOperation\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x08metadata\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any\x12\x0c\n\x04\x64one\x18\x03 \x01(\x08\x12+\n\x05\x65rror\x18\x04 \x01(\x0b\x32\x1a.google.longrunning.StatusH\x00\x12(\n\x08response\x18\x05 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x42\x08\n\x06result\"#\n\x13GetOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\\\n\x15ListOperationsRequest\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"d\n\x16ListOperationsResponse\x12\x31\n\noperations\x18\x01 \x03(\x0b\x32\x1d.google.longrunning.Operation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"&\n\x16\x43\x61ncelOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"&\n\x16\x44\x65leteOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"P\n\x14WaitOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12*\n\x07timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\"=\n\rOperationInfo\x12\x15\n\rresponse_type\x18\x01 \x01(\t\x12\x15\n\rmetadata_type\x18\x02 \x01(\t\"N\n\x06Status\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\x12%\n\x07\x64\x65tails\x18\x03 \x03(\x0b\x32\x14.google.protobuf.Any2\xdf\x03\n\nOperations\x12i\n\x0eListOperations\x12).google.longrunning.ListOperationsRequest\x1a*.google.longrunning.ListOperationsResponse\"\x00\x12X\n\x0cGetOperation\x12\'.google.longrunning.GetOperationRequest\x1a\x1d.google.longrunning.Operation\"\x00\x12W\n\x0f\x44\x65leteOperation\x12*.google.longrunning.DeleteOperationRequest\x1a\x16.google.protobuf.Empty\"\x00\x12W\n\x0f\x43\x61ncelOperation\x12*.google.longrunning.CancelOperationRequest\x1a\x16.google.protobuf.Empty\"\x00\x12Z\n\rWaitOperation\x12(.google.longrunning.WaitOperationRequest\x1a\x1d.google.longrunning.Operation\"\x00:Z\n\x0eoperation_info\x12\x1e.google.protobuf.MethodOptions\x18\x99\x08 \x01(\x0b\x32!.google.longrunning.OperationInfoBLZJgo.chromium.org/chromiumos/config/go/api/test/tls/dependencies/longrunningb\x06proto3'
- ,
- dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
-
-
-OPERATION_INFO_FIELD_NUMBER = 1049
-operation_info = _descriptor.FieldDescriptor(
- name='operation_info', full_name='google.longrunning.operation_info', index=0,
- number=1049, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=True, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR)
-
-
-_OPERATION = _descriptor.Descriptor(
- name='Operation',
- full_name='google.longrunning.Operation',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='name', full_name='google.longrunning.Operation.name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='metadata', full_name='google.longrunning.Operation.metadata', index=1,
- number=2, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='done', full_name='google.longrunning.Operation.done', index=2,
- number=3, type=8, cpp_type=7, label=1,
- has_default_value=False, default_value=False,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='error', full_name='google.longrunning.Operation.error', index=3,
- number=4, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='response', full_name='google.longrunning.Operation.response', index=4,
- number=5, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- _descriptor.OneofDescriptor(
- name='result', full_name='google.longrunning.Operation.result',
- index=0, containing_type=None, fields=[]),
- ],
- serialized_start=188,
- serialized_end=364,
-)
-
-
-_GETOPERATIONREQUEST = _descriptor.Descriptor(
- name='GetOperationRequest',
- full_name='google.longrunning.GetOperationRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='name', full_name='google.longrunning.GetOperationRequest.name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=366,
- serialized_end=401,
-)
-
-
-_LISTOPERATIONSREQUEST = _descriptor.Descriptor(
- name='ListOperationsRequest',
- full_name='google.longrunning.ListOperationsRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='name', full_name='google.longrunning.ListOperationsRequest.name', index=0,
- number=4, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='filter', full_name='google.longrunning.ListOperationsRequest.filter', index=1,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='page_size', full_name='google.longrunning.ListOperationsRequest.page_size', index=2,
- number=2, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='page_token', full_name='google.longrunning.ListOperationsRequest.page_token', index=3,
- number=3, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=403,
- serialized_end=495,
-)
-
-
-_LISTOPERATIONSRESPONSE = _descriptor.Descriptor(
- name='ListOperationsResponse',
- full_name='google.longrunning.ListOperationsResponse',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='operations', full_name='google.longrunning.ListOperationsResponse.operations', index=0,
- number=1, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='next_page_token', full_name='google.longrunning.ListOperationsResponse.next_page_token', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=497,
- serialized_end=597,
-)
-
-
-_CANCELOPERATIONREQUEST = _descriptor.Descriptor(
- name='CancelOperationRequest',
- full_name='google.longrunning.CancelOperationRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='name', full_name='google.longrunning.CancelOperationRequest.name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=599,
- serialized_end=637,
-)
-
-
-_DELETEOPERATIONREQUEST = _descriptor.Descriptor(
- name='DeleteOperationRequest',
- full_name='google.longrunning.DeleteOperationRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='name', full_name='google.longrunning.DeleteOperationRequest.name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=639,
- serialized_end=677,
-)
-
-
-_WAITOPERATIONREQUEST = _descriptor.Descriptor(
- name='WaitOperationRequest',
- full_name='google.longrunning.WaitOperationRequest',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='name', full_name='google.longrunning.WaitOperationRequest.name', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='timeout', full_name='google.longrunning.WaitOperationRequest.timeout', index=1,
- number=2, type=11, cpp_type=10, label=1,
- has_default_value=False, default_value=None,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=679,
- serialized_end=759,
-)
-
-
-_OPERATIONINFO = _descriptor.Descriptor(
- name='OperationInfo',
- full_name='google.longrunning.OperationInfo',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='response_type', full_name='google.longrunning.OperationInfo.response_type', index=0,
- number=1, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='metadata_type', full_name='google.longrunning.OperationInfo.metadata_type', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=761,
- serialized_end=822,
-)
-
-
-_STATUS = _descriptor.Descriptor(
- name='Status',
- full_name='google.longrunning.Status',
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- fields=[
- _descriptor.FieldDescriptor(
- name='code', full_name='google.longrunning.Status.code', index=0,
- number=1, type=5, cpp_type=1, label=1,
- has_default_value=False, default_value=0,
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='message', full_name='google.longrunning.Status.message', index=1,
- number=2, type=9, cpp_type=9, label=1,
- has_default_value=False, default_value=b"".decode('utf-8'),
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- _descriptor.FieldDescriptor(
- name='details', full_name='google.longrunning.Status.details', index=2,
- number=3, type=11, cpp_type=10, label=3,
- has_default_value=False, default_value=[],
- message_type=None, enum_type=None, containing_type=None,
- is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
- ],
- extensions=[
- ],
- nested_types=[],
- enum_types=[
- ],
- serialized_options=None,
- is_extendable=False,
- syntax='proto3',
- extension_ranges=[],
- oneofs=[
- ],
- serialized_start=824,
- serialized_end=902,
-)
-
-_OPERATION.fields_by_name['metadata'].message_type = google_dot_protobuf_dot_any__pb2._ANY
-_OPERATION.fields_by_name['error'].message_type = _STATUS
-_OPERATION.fields_by_name['response'].message_type = google_dot_protobuf_dot_any__pb2._ANY
-_OPERATION.oneofs_by_name['result'].fields.append(
- _OPERATION.fields_by_name['error'])
-_OPERATION.fields_by_name['error'].containing_oneof = _OPERATION.oneofs_by_name['result']
-_OPERATION.oneofs_by_name['result'].fields.append(
- _OPERATION.fields_by_name['response'])
-_OPERATION.fields_by_name['response'].containing_oneof = _OPERATION.oneofs_by_name['result']
-_LISTOPERATIONSRESPONSE.fields_by_name['operations'].message_type = _OPERATION
-_WAITOPERATIONREQUEST.fields_by_name['timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
-_STATUS.fields_by_name['details'].message_type = google_dot_protobuf_dot_any__pb2._ANY
-DESCRIPTOR.message_types_by_name['Operation'] = _OPERATION
-DESCRIPTOR.message_types_by_name['GetOperationRequest'] = _GETOPERATIONREQUEST
-DESCRIPTOR.message_types_by_name['ListOperationsRequest'] = _LISTOPERATIONSREQUEST
-DESCRIPTOR.message_types_by_name['ListOperationsResponse'] = _LISTOPERATIONSRESPONSE
-DESCRIPTOR.message_types_by_name['CancelOperationRequest'] = _CANCELOPERATIONREQUEST
-DESCRIPTOR.message_types_by_name['DeleteOperationRequest'] = _DELETEOPERATIONREQUEST
-DESCRIPTOR.message_types_by_name['WaitOperationRequest'] = _WAITOPERATIONREQUEST
-DESCRIPTOR.message_types_by_name['OperationInfo'] = _OPERATIONINFO
-DESCRIPTOR.message_types_by_name['Status'] = _STATUS
-DESCRIPTOR.extensions_by_name['operation_info'] = operation_info
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-Operation = _reflection.GeneratedProtocolMessageType('Operation', (_message.Message,), {
- 'DESCRIPTOR' : _OPERATION,
- '__module__' : 'dependencies.longrunning.operations_pb2'
- # @@protoc_insertion_point(class_scope:google.longrunning.Operation)
- })
-_sym_db.RegisterMessage(Operation)
-
-GetOperationRequest = _reflection.GeneratedProtocolMessageType('GetOperationRequest', (_message.Message,), {
- 'DESCRIPTOR' : _GETOPERATIONREQUEST,
- '__module__' : 'dependencies.longrunning.operations_pb2'
- # @@protoc_insertion_point(class_scope:google.longrunning.GetOperationRequest)
- })
-_sym_db.RegisterMessage(GetOperationRequest)
-
-ListOperationsRequest = _reflection.GeneratedProtocolMessageType('ListOperationsRequest', (_message.Message,), {
- 'DESCRIPTOR' : _LISTOPERATIONSREQUEST,
- '__module__' : 'dependencies.longrunning.operations_pb2'
- # @@protoc_insertion_point(class_scope:google.longrunning.ListOperationsRequest)
- })
-_sym_db.RegisterMessage(ListOperationsRequest)
-
-ListOperationsResponse = _reflection.GeneratedProtocolMessageType('ListOperationsResponse', (_message.Message,), {
- 'DESCRIPTOR' : _LISTOPERATIONSRESPONSE,
- '__module__' : 'dependencies.longrunning.operations_pb2'
- # @@protoc_insertion_point(class_scope:google.longrunning.ListOperationsResponse)
- })
-_sym_db.RegisterMessage(ListOperationsResponse)
-
-CancelOperationRequest = _reflection.GeneratedProtocolMessageType('CancelOperationRequest', (_message.Message,), {
- 'DESCRIPTOR' : _CANCELOPERATIONREQUEST,
- '__module__' : 'dependencies.longrunning.operations_pb2'
- # @@protoc_insertion_point(class_scope:google.longrunning.CancelOperationRequest)
- })
-_sym_db.RegisterMessage(CancelOperationRequest)
-
-DeleteOperationRequest = _reflection.GeneratedProtocolMessageType('DeleteOperationRequest', (_message.Message,), {
- 'DESCRIPTOR' : _DELETEOPERATIONREQUEST,
- '__module__' : 'dependencies.longrunning.operations_pb2'
- # @@protoc_insertion_point(class_scope:google.longrunning.DeleteOperationRequest)
- })
-_sym_db.RegisterMessage(DeleteOperationRequest)
-
-WaitOperationRequest = _reflection.GeneratedProtocolMessageType('WaitOperationRequest', (_message.Message,), {
- 'DESCRIPTOR' : _WAITOPERATIONREQUEST,
- '__module__' : 'dependencies.longrunning.operations_pb2'
- # @@protoc_insertion_point(class_scope:google.longrunning.WaitOperationRequest)
- })
-_sym_db.RegisterMessage(WaitOperationRequest)
-
-OperationInfo = _reflection.GeneratedProtocolMessageType('OperationInfo', (_message.Message,), {
- 'DESCRIPTOR' : _OPERATIONINFO,
- '__module__' : 'dependencies.longrunning.operations_pb2'
- # @@protoc_insertion_point(class_scope:google.longrunning.OperationInfo)
- })
-_sym_db.RegisterMessage(OperationInfo)
-
-Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), {
- 'DESCRIPTOR' : _STATUS,
- '__module__' : 'dependencies.longrunning.operations_pb2'
- # @@protoc_insertion_point(class_scope:google.longrunning.Status)
- })
-_sym_db.RegisterMessage(Status)
-
-operation_info.message_type = _OPERATIONINFO
-google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(operation_info)
-
-DESCRIPTOR._options = None
-
-_OPERATIONS = _descriptor.ServiceDescriptor(
- name='Operations',
- full_name='google.longrunning.Operations',
- file=DESCRIPTOR,
- index=0,
- serialized_options=None,
- serialized_start=905,
- serialized_end=1384,
- methods=[
- _descriptor.MethodDescriptor(
- name='ListOperations',
- full_name='google.longrunning.Operations.ListOperations',
- index=0,
- containing_service=None,
- input_type=_LISTOPERATIONSREQUEST,
- output_type=_LISTOPERATIONSRESPONSE,
- serialized_options=None,
- ),
- _descriptor.MethodDescriptor(
- name='GetOperation',
- full_name='google.longrunning.Operations.GetOperation',
- index=1,
- containing_service=None,
- input_type=_GETOPERATIONREQUEST,
- output_type=_OPERATION,
- serialized_options=None,
- ),
- _descriptor.MethodDescriptor(
- name='DeleteOperation',
- full_name='google.longrunning.Operations.DeleteOperation',
- index=2,
- containing_service=None,
- input_type=_DELETEOPERATIONREQUEST,
- output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
- serialized_options=None,
- ),
- _descriptor.MethodDescriptor(
- name='CancelOperation',
- full_name='google.longrunning.Operations.CancelOperation',
- index=3,
- containing_service=None,
- input_type=_CANCELOPERATIONREQUEST,
- output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
- serialized_options=None,
- ),
- _descriptor.MethodDescriptor(
- name='WaitOperation',
- full_name='google.longrunning.Operations.WaitOperation',
- index=4,
- containing_service=None,
- input_type=_WAITOPERATIONREQUEST,
- output_type=_OPERATION,
- serialized_options=None,
- ),
-])
-_sym_db.RegisterServiceDescriptor(_OPERATIONS)
-
-DESCRIPTOR.services_by_name['Operations'] = _OPERATIONS
-
-# @@protoc_insertion_point(module_scope)
diff --git a/server/hosts/tls_client/dependencies/longrunning/operations_pb2_grpc.py b/server/hosts/tls_client/dependencies/longrunning/operations_pb2_grpc.py
deleted file mode 100644
index cd7858f..0000000
--- a/server/hosts/tls_client/dependencies/longrunning/operations_pb2_grpc.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
-import grpc
-
-from dependencies.longrunning import operations_pb2 as dependencies_dot_longrunning_dot_operations__pb2
-from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
-
-
-class OperationsStub(object):
- """Manages long-running operations with an API service.
-
- When an API method normally takes long time to complete, it can be designed
- to return [Operation][google.longrunning.Operation] to the client, and the client can use this
- interface to receive the real response asynchronously by polling the
- operation resource, or pass the operation resource to another API (such as
- Google Cloud Pub/Sub API) to receive the response. Any API service that
- returns long-running operations should implement the `Operations` interface
- so developers can have a consistent client experience.
- """
-
- def __init__(self, channel):
- """Constructor.
-
- Args:
- channel: A grpc.Channel.
- """
- self.ListOperations = channel.unary_unary(
- '/google.longrunning.Operations/ListOperations',
- request_serializer=dependencies_dot_longrunning_dot_operations__pb2.ListOperationsRequest.SerializeToString,
- response_deserializer=dependencies_dot_longrunning_dot_operations__pb2.ListOperationsResponse.FromString,
- )
- self.GetOperation = channel.unary_unary(
- '/google.longrunning.Operations/GetOperation',
- request_serializer=dependencies_dot_longrunning_dot_operations__pb2.GetOperationRequest.SerializeToString,
- response_deserializer=dependencies_dot_longrunning_dot_operations__pb2.Operation.FromString,
- )
- self.DeleteOperation = channel.unary_unary(
- '/google.longrunning.Operations/DeleteOperation',
- request_serializer=dependencies_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.SerializeToString,
- response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
- )
- self.CancelOperation = channel.unary_unary(
- '/google.longrunning.Operations/CancelOperation',
- request_serializer=dependencies_dot_longrunning_dot_operations__pb2.CancelOperationRequest.SerializeToString,
- response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
- )
- self.WaitOperation = channel.unary_unary(
- '/google.longrunning.Operations/WaitOperation',
- request_serializer=dependencies_dot_longrunning_dot_operations__pb2.WaitOperationRequest.SerializeToString,
- response_deserializer=dependencies_dot_longrunning_dot_operations__pb2.Operation.FromString,
- )
-
-
-class OperationsServicer(object):
- """Manages long-running operations with an API service.
-
- When an API method normally takes long time to complete, it can be designed
- to return [Operation][google.longrunning.Operation] to the client, and the client can use this
- interface to receive the real response asynchronously by polling the
- operation resource, or pass the operation resource to another API (such as
- Google Cloud Pub/Sub API) to receive the response. Any API service that
- returns long-running operations should implement the `Operations` interface
- so developers can have a consistent client experience.
- """
-
- def ListOperations(self, request, context):
- """Lists operations that match the specified filter in the request. If the
- server doesn't support this method, it returns `UNIMPLEMENTED`.
-
- NOTE: the `name` binding below allows API services to override the binding
- to use different resource name schemes, such as `users/*/operations`.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def GetOperation(self, request, context):
- """Gets the latest state of a long-running operation. Clients can use this
- method to poll the operation result at intervals as recommended by the API
- service.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def DeleteOperation(self, request, context):
- """Deletes a long-running operation. This method indicates that the client is
- no longer interested in the operation result. It does not cancel the
- operation. If the server doesn't support this method, it returns
- `google.rpc.Code.UNIMPLEMENTED`.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def CancelOperation(self, request, context):
- """Starts asynchronous cancellation on a long-running operation. The server
- makes a best effort to cancel the operation, but success is not
- guaranteed. If the server doesn't support this method, it returns
- `google.rpc.Code.UNIMPLEMENTED`. Clients can use
- [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
- other methods to check whether the cancellation succeeded or whether the
- operation completed despite cancellation. On successful cancellation,
- the operation is not deleted; instead, it becomes an operation with
- an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
- corresponding to `Code.CANCELLED`.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
- def WaitOperation(self, request, context):
- """Waits for the specified long-running operation until it is done or reaches
- at most a specified timeout, returning the latest state. If the operation
- is already done, the latest state is immediately returned. If the timeout
- specified is greater than the default HTTP/RPC timeout, the HTTP/RPC
- timeout is used. If the server does not support this method, it returns
- `google.rpc.Code.UNIMPLEMENTED`.
- Note that this method is on a best-effort basis. It may return the latest
- state before the specified timeout (including immediately), meaning even an
- immediate response is no guarantee that the operation is done.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details('Method not implemented!')
- raise NotImplementedError('Method not implemented!')
-
-
-def add_OperationsServicer_to_server(servicer, server):
- rpc_method_handlers = {
- 'ListOperations': grpc.unary_unary_rpc_method_handler(
- servicer.ListOperations,
- request_deserializer=dependencies_dot_longrunning_dot_operations__pb2.ListOperationsRequest.FromString,
- response_serializer=dependencies_dot_longrunning_dot_operations__pb2.ListOperationsResponse.SerializeToString,
- ),
- 'GetOperation': grpc.unary_unary_rpc_method_handler(
- servicer.GetOperation,
- request_deserializer=dependencies_dot_longrunning_dot_operations__pb2.GetOperationRequest.FromString,
- response_serializer=dependencies_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
- ),
- 'DeleteOperation': grpc.unary_unary_rpc_method_handler(
- servicer.DeleteOperation,
- request_deserializer=dependencies_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.FromString,
- response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
- ),
- 'CancelOperation': grpc.unary_unary_rpc_method_handler(
- servicer.CancelOperation,
- request_deserializer=dependencies_dot_longrunning_dot_operations__pb2.CancelOperationRequest.FromString,
- response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
- ),
- 'WaitOperation': grpc.unary_unary_rpc_method_handler(
- servicer.WaitOperation,
- request_deserializer=dependencies_dot_longrunning_dot_operations__pb2.WaitOperationRequest.FromString,
- response_serializer=dependencies_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- 'google.longrunning.Operations', rpc_method_handlers)
- server.add_generic_rpc_handlers((generic_handler,))
diff --git a/server/hosts/tls_client/exec_dut_command.py b/server/hosts/tls_client/exec_dut_command.py
deleted file mode 100644
index f82588b..0000000
--- a/server/hosts/tls_client/exec_dut_command.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# Lint as: python2, python3
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Autotest communcations to the Hosts (DUTs) via TLS ExecDutCommand."""
-
-import common
-import grpc
-import logging
-import six
-import time
-
-from autotest_lib.server.hosts.tls_client import autotest_common_pb2
-
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib import utils
-
-
-class TLSExecDutCommandClient():
- """Object for sending commands to a host, and getting the response."""
-
- def __init__(self, tlsconnection, hostname):
- """Configure the grpc channel."""
- if tlsconnection.alive:
- self.stub = tlsconnection.stub
- else:
- raise error.TLSConnectionError(
- "TLS connection is not alive when try to creating"
- " exec_dut_command client.")
-
- self.hostname = hostname
- self.tlsconnection = tlsconnection
-
- def run_cmd(self,
- cmd,
- timeout=120,
- stdout_tee=None,
- stderr_tee=None,
- ignore_timeout=False):
- """
- Run a command on the host configured during init.
-
- @param cmd: shell cmd to execute on the DUT
- @param: stdout_tee/stderr_tee: objects to write the data from the
- respective streams to
- @param timeout int(seconds): how long to allow the command to run
- before forcefully killing it.
- @param ignore_timeout: if True, do not raise err on timeouts.
- """
- if not self.tlsconnection.alive:
- error.TLSConnectionError(
- "TLS connection is not up when try to run exec_dut_command.")
- result = utils.CmdResult(command=cmd)
- try:
- self._run(cmd, stdout_tee, stderr_tee, result, timeout)
- except grpc.RpcError as e:
- if e.code().name == "DEADLINE_EXCEEDED":
- if ignore_timeout:
- return None
- raise error.CmdTimeoutError(
- cmd, result,
- "Command(s) did not complete within %d seconds" %
- timeout)
- raise e
- except Exception as e:
- raise e
- return result
-
- def _run(self, cmd, stdout_tee, stderr_tee, result, timeout):
- """Run the provided cmd, populate the result in place."""
- start_time = time.time()
- response = self._send_cmd(cmd, timeout)
-
- stdout_buf = six.StringIO()
- stderr_buf = six.StringIO()
- last_status = 0
-
- if response:
- for item in response:
- last_status = item.exit_info.status
- _log_item(item.stdout, stdout_buf, stdout_tee)
- _log_item(item.stderr, stderr_buf, stderr_tee)
-
- result.stdout = stdout_buf.getvalue()
- result.stderr = stderr_buf.getvalue()
- result.exit_status = last_status
- result.duration = time.time() - start_time
-
- def _send_cmd(self, cmd, timeout):
- """Serialize and send the cmd to the TLS service."""
- formatted_cmd = autotest_common_pb2.ExecDutCommandRequest(
- name=self.hostname, command=cmd)
- return self.stub.ExecDutCommand(formatted_cmd, timeout=timeout)
-
-
-def _log_item(item, buf, tee):
- """
- Parse the provided item.
-
- If the item exists, append the provided arr with the item & write to
- the provided tee if provided.
-
- """
- if not item:
- return
- # TODO dbeckett@ (crbug.com/990593), adjust this to be PY3 compatible.
- buf.write(item)
- if tee is not None and tee is not utils.TEE_TO_LOGS:
- tee.write(item)
diff --git a/server/hosts/tls_client/fake_omaha.py b/server/hosts/tls_client/fake_omaha.py
deleted file mode 100644
index c1767af..0000000
--- a/server/hosts/tls_client/fake_omaha.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Lint as: python2, python3
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""Communication with the TLS FakeOmaha Service."""
-
-import logging
-
-import common
-
-from autotest_lib.server.hosts.tls_client import autotest_common_pb2
-from autotest_lib.client.common_lib import error
-
-PAYLOAD_TYPE = {
- 'TYPE_UNSPECIFIED':
- autotest_common_pb2.FakeOmaha.Payload.TYPE_UNSPECIFIED,
- 'FULL': autotest_common_pb2.FakeOmaha.Payload.FULL,
- 'DELTA': autotest_common_pb2.FakeOmaha.Payload.DELTA
-}
-
-
-class TLSFakeOmaha():
- """Object for sending commands to a host, and getting the response."""
-
- def __init__(self, tlsconnection):
- """Configure the grpc channel."""
- if tlsconnection.alive:
- self.stub = tlsconnection.stub
- else:
- raise error.TLSConnectionError(
- "TLS connection is not alive when try to creating"
- " FakeOmaha client.")
-
- self.tlsconnection = tlsconnection
-
- def _make_payloads(self, payloads):
- """Serialize and return the list of payloads."""
- serialized_payloads = []
- for payload in payloads:
- serialized_payloads.append(
- autotest_common_pb2.FakeOmaha.Payload(
- id=payload['payload_id'],
- type=PAYLOAD_TYPE[payload['payload_type']]))
-
- return serialized_payloads
-
- def start_omaha(self,
- hostname,
- target_build,
- payloads,
- exposed_via_proxy=False,
- critical_update=False,
- return_noupdate_starting=0):
- """Serialize and send the cmd to the TLS service.
-
- @param hostname: hostname of dut. Normally 'hostname' or 'self.hostname'
- @param target_build: full target build for the update. Example:
-
- @param payloads: list of the payloads in the format:
- [{'payload_id': <id>, 'payload_type': <type>}]
- example:
- [{'payload_id': 'ROOTFS', 'payload_type': 'FULL'},]
- @param exposed_via_proxy: bool indicates that the fake Omaha service is
- exposed to a DUT via a proxy server, instead of exposing to the DUT
- directly.
- @param critical_update:bool instructs the fake Omaha created that the
- update is critical.
- @param return_noupdate_starting: int indicates from which update check
- to start returning noupdate.
-
- @returns: the omaha_url
- """
- payload = self._make_payloads(payloads)
-
- target_build = autotest_common_pb2.ChromeOsImage(
- gs_path_prefix=target_build)
- fake_omaha = autotest_common_pb2.FakeOmaha(
- dut=hostname,
- target_build=target_build,
- payloads=payload,
- exposed_via_proxy=exposed_via_proxy,
- critical_update=critical_update,
- return_noupdate_starting=return_noupdate_starting)
-
- req = autotest_common_pb2.CreateFakeOmahaRequest(fake_omaha=fake_omaha)
-
- try:
- result = self.stub.CreateFakeOmaha(req)
- return result.omaha_url
- except Exception as e:
- logging.error("TLS FakeOmaha Debug String: %s",
- e.debug_error_string())
- raise error.TestError(
- "Could not start FakeOmaha Server because %s", e.details())
diff --git a/server/server_job.py b/server/server_job.py
index b83060e..6e73403 100644
--- a/server/server_job.py
+++ b/server/server_job.py
@@ -51,9 +51,7 @@
from autotest_lib.server import utils as server_utils
from autotest_lib.server import hosts
from autotest_lib.server.hosts import abstract_ssh
-from autotest_lib.server.hosts import afe_store
from autotest_lib.server.hosts import file_store
-from autotest_lib.server.hosts import shadowing_store
from autotest_lib.server.hosts import factory as host_factory
from autotest_lib.server.hosts import host_info
from autotest_lib.server.hosts import ssh_multiplex
@@ -1563,32 +1561,6 @@
return file_store.FileStore(backing_file_path)
-def _create_afe_backed_host_info_store(store_dir, hostname):
- """Create a CachingHostInfoStore backed by the AFE.
-
- @param store_dir: A directory to contain store backing files.
- @param hostname: Name of the host for which we want the store.
-
- @returns: An object of type CachingHostInfoStore.
- """
- primary_store = afe_store.AfeStore(hostname)
- try:
- primary_store.get(force_refresh=True)
- except host_info.StoreError:
- raise error.AutoservError(
- 'Could not obtain HostInfo for hostname %s' % hostname)
- # Since the store wasn't initialized external to autoserv, we must
- # ensure that the store we create is unique within store_dir.
- backing_file_path = os.path.join(
- _make_unique_subdir(store_dir),
- '%s.store' % hostname,
- )
- logging.info('Shadowing AFE store with a FileStore at %s',
- backing_file_path)
- shadow_store = file_store.FileStore(backing_file_path)
- return shadowing_store.ShadowingStore(primary_store, shadow_store)
-
-
def _make_unique_subdir(workdir):
"""Creates a new subdir within workdir and returns the path to it."""
store_dir = os.path.join(workdir, 'dir_%s' % uuid.uuid4())