tauto - removing more client libs
BUG=None
TEST=This
Change-Id: I22e8a18549d9838749853ee0d1a499ea91c7a9c3
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/tauto/+/3122539
Reviewed-by: Ruben Zakarian <rzakarian@chromium.org>
Tested-by: Derek Beckett <dbeckett@chromium.org>
diff --git a/client/common_lib/cros/assistant_util.py b/client/common_lib/cros/assistant_util.py
deleted file mode 100644
index 8fa0003..0000000
--- a/client/common_lib/cros/assistant_util.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-#
-# assistant_util.py is supposed to be called from chrome.py for Assistant
-# specific logic.
-
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib import utils
-from telemetry.core import exceptions
-
-
-def enable_assistant(autotest_ext):
- """Enables Google Assistant.
-
- @param autotest_ext private autotest extension.
- @raise error.TestFail if failed to start Assistant service within time.
- """
- if autotest_ext is None:
- raise error.TestFail('Could not start Assistant service because '
- 'autotest extension is not available.')
-
- try:
- autotest_ext.ExecuteJavaScript('''
- window.__assistant_ready = 0;
- chrome.autotestPrivate.setAssistantEnabled(true,
- 10 * 1000 /* timeout_ms */,
- () => {
- if (chrome.runtime.lastError) {
- window.__assistant_ready = -1;
- window.__assistant_error_msg =
- chrome.runtime.lastError.message;
- } else {
- window.__assistant_ready = 1;
- }
- });
- ''')
- except exceptions.EvaluateException as e:
- raise error.TestFail('Could not start Assistant "%s".' % e)
-
- ready = utils.poll_for_condition(
- lambda: autotest_ext.EvaluateJavaScript(
- 'window.__assistant_ready'),
- desc='Wait for the assistant running state to return.')
-
- if ready == -1:
- raise error.TestFail(
- autotest_ext.EvaluateJavaScript(
- 'window.__assistant_error_msg'))
-
-
-def enable_hotword(autotest_ext):
- """Enables hotword in Google Assistant.
-
- @param autotest_ext private autotest extension.
- @raise error.TestFail if failed to enable hotword feature within time.
- """
- try:
- autotest_ext.ExecuteJavaScript('''
- window.__assistant_hotword_ready = 0;
- chrome.autotestPrivate.setWhitelistedPref(
- 'settings.voice_interaction.hotword.enabled', true,
- function(response) {
- if (chrome.runtime.lastError) {
- window.__assistant_hotword_ready = -1;
- window.__assistant_hotword_error_msg =
- chrome.runtime.lastError.message;
- } else {
- window.__assistant_hotword_ready = 1;
- }
- });
- ''')
- except exceptions.EvaluateException as e:
- raise error.TestFail('Could not enable Hotword "{}".'.format(e))
-
- ready = utils.poll_for_condition(
- lambda: autotest_ext.EvaluateJavaScript(
- 'window.__assistant_hotword_ready'),
- desc='Wait for the hotword pref change event to return".')
-
- if ready == -1:
- raise error.TestFail(
- autotest_ext.EvaluateJavaScript(
- 'window.__assistant_hotword_error_msg'))
-
-
-def send_text_query(autotest_ext, text_query):
- """Sends text query to Assistant and returns response.
-
- @param autotest_ext private autotest extension.
- @param text_query text query.
- @return dictionary containing the information of Assistant query
- response, mapping from response type to content.
- """
- try:
- autotest_ext.ExecuteJavaScript('''
- window.__assistant_response_ready = 0;
- chrome.autotestPrivate.sendAssistantTextQuery('%s', 10 * 1000,
- function(response) {
- if (chrome.runtime.lastError) {
- window.__assistant_response_ready = -1;
- window.__assistant_error_msg =
- chrome.runtime.lastError.message;
- } else {
- window.__assistant_response_ready = 1;
- window.__query_response = response;
- }
- });
- ''' % text_query)
- except exceptions.EvaluateException as e:
- raise error.TestFail('Could not get Assistant response "%s".' % e)
-
- is_ready = utils.poll_for_condition(
- lambda: autotest_ext.EvaluateJavaScript(
- 'window.__assistant_response_ready'),
- desc='Waiting for Assistant response.')
-
- if is_ready == -1:
- raise error.TestFail(
- autotest_ext.EvaluateJavaScript(
- 'window.__assistant_error_msg'))
-
- return autotest_ext.EvaluateJavaScript('window.__query_response')
diff --git a/client/common_lib/cros/authpolicy.py b/client/common_lib/cros/authpolicy.py
deleted file mode 100644
index 2d2e359..0000000
--- a/client/common_lib/cros/authpolicy.py
+++ /dev/null
@@ -1,297 +0,0 @@
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""
-Wrapper for D-Bus calls ot the AuthPolicy daemon.
-"""
-
-import logging
-import os
-import sys
-
-import dbus
-
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib import utils
-from autotest_lib.client.cros import upstart
-
-
-class AuthPolicy(object):
- """
- Wrapper for D-Bus calls ot the AuthPolicy daemon.
-
- The AuthPolicy daemon handles Active Directory domain join, user
- authentication and policy fetch. This class is a wrapper around the D-Bus
- interface to the daemon.
-
- """
-
- # Log file written by authpolicyd.
- _LOG_FILE = '/var/log/authpolicy.log'
-
- # Number of log lines to include in error logs.
- _LOG_LINE_LIMIT = 50
-
- # The usual system log file (minijail logs there!).
- _SYSLOG_FILE = '/var/log/messages'
-
- # Authpolicy daemon D-Bus parameters.
- _DBUS_SERVICE_NAME = 'org.chromium.AuthPolicy'
- _DBUS_SERVICE_PATH = '/org/chromium/AuthPolicy'
- _DBUS_INTERFACE_NAME = 'org.chromium.AuthPolicy'
- _DBUS_ERROR_SERVICE_UNKNOWN = 'org.freedesktop.DBus.Error.ServiceUnknown'
-
- # Default timeout in seconds for D-Bus calls.
- _DEFAULT_TIMEOUT = 120
-
- def __init__(self, bus_loop, proto_binding_location):
- """
- Constructor
-
- Creates and returns a D-Bus connection to authpolicyd. The daemon must
- be running.
-
- @param bus_loop: glib main loop object.
- @param proto_binding_location: the location of generated python bindings
- for authpolicy protobufs.
- """
-
- # Pull in protobuf bindings.
- sys.path.append(proto_binding_location)
-
- self._bus_loop = bus_loop
- self.restart()
-
- def restart(self):
- """
- Restarts authpolicyd and rebinds to D-Bus interface.
- """
- logging.info('restarting authpolicyd')
- upstart.restart_job('authpolicyd')
- bus = dbus.SystemBus(self._bus_loop)
- proxy = bus.get_object(self._DBUS_SERVICE_NAME,
- self._DBUS_SERVICE_PATH)
- self._authpolicyd = dbus.Interface(proxy, self._DBUS_INTERFACE_NAME)
-
- def stop(self):
- """
- Turns debug logs off.
-
- Stops authpolicyd.
- """
- logging.info('stopping authpolicyd')
-
- # Reset log level and stop. Ignore errors that occur when authpolicy is
- # already down.
- try:
- self.set_default_log_level(0)
- except dbus.exceptions.DBusException as ex:
- if ex.get_dbus_name() != self._DBUS_ERROR_SERVICE_UNKNOWN:
- raise
- try:
- upstart.stop_job('authpolicyd')
- except error.CmdError as ex:
- if (ex.result_obj.exit_status == 0):
- raise
-
- self._authpolicyd = None
-
- def join_ad_domain(self,
- user_principal_name,
- password,
- machine_name,
- machine_domain=None,
- machine_ou=None):
- """
- Joins a machine (=device) to an Active Directory domain.
-
- @param user_principal_name: Logon name of the user (with @realm) who
- joins the machine to the domain.
- @param password: Password corresponding to user_principal_name.
- @param machine_name: Netbios computer (aka machine) name for the joining
- device.
- @param machine_domain: Domain (realm) the machine should be joined to.
- If not specified, the machine is joined to the user's realm.
- @param machine_ou: Array of organizational units (OUs) from leaf to
- root. The machine is put into the leaf OU. If not specified, the
- machine account is created in the default 'Computers' OU.
-
- @return A tuple with the ErrorType and the joined domain returned by the
- D-Bus call.
-
- """
-
- from active_directory_info_pb2 import JoinDomainRequest
-
- request = JoinDomainRequest()
- request.user_principal_name = user_principal_name
- request.machine_name = machine_name
- if machine_ou:
- request.machine_ou.extend(machine_ou)
- if machine_domain:
- request.machine_domain = machine_domain
-
- with self.PasswordFd(password) as password_fd:
- return self._authpolicyd.JoinADDomain(
- dbus.ByteArray(request.SerializeToString()),
- dbus.types.UnixFd(password_fd),
- timeout=self._DEFAULT_TIMEOUT,
- byte_arrays=True)
-
- def authenticate_user(self, user_principal_name, account_id, password):
- """
- Authenticates a user with an Active Directory domain.
-
- @param user_principal_name: User logon name (user@example.com) for the
- Active Directory domain.
- #param account_id: User account id (aka objectGUID). May be empty.
- @param password: Password corresponding to user_principal_name.
-
- @return A tuple with the ErrorType and an ActiveDirectoryAccountInfo
- blob string returned by the D-Bus call.
-
- """
-
- from active_directory_info_pb2 import ActiveDirectoryAccountInfo
- from active_directory_info_pb2 import AuthenticateUserRequest
- from active_directory_info_pb2 import ERROR_NONE
-
- request = AuthenticateUserRequest()
- request.user_principal_name = user_principal_name
- if account_id:
- request.account_id = account_id
-
- with self.PasswordFd(password) as password_fd:
- error_value, account_info_blob = self._authpolicyd.AuthenticateUser(
- dbus.ByteArray(request.SerializeToString()),
- dbus.types.UnixFd(password_fd),
- timeout=self._DEFAULT_TIMEOUT,
- byte_arrays=True)
- account_info = ActiveDirectoryAccountInfo()
- if error_value == ERROR_NONE:
- account_info.ParseFromString(account_info_blob)
- return error_value, account_info
-
- def refresh_user_policy(self, account_id):
- """
- Fetches user policy and sends it to Session Manager.
-
- @param account_id: User account ID (aka objectGUID).
-
- @return ErrorType from the D-Bus call.
-
- """
-
- return self._authpolicyd.RefreshUserPolicy(
- dbus.String(account_id),
- timeout=self._DEFAULT_TIMEOUT,
- byte_arrays=True)
-
- def refresh_device_policy(self):
- """
- Fetches device policy and sends it to Session Manager.
-
- @return ErrorType from the D-Bus call.
-
- """
-
- return self._authpolicyd.RefreshDevicePolicy(
- timeout=self._DEFAULT_TIMEOUT, byte_arrays=True)
-
- def change_machine_password(self):
- """
- Changes machine password.
-
- @return ErrorType from the D-Bus call.
-
- """
- return self._authpolicyd.ChangeMachinePasswordForTesting(
- timeout=self._DEFAULT_TIMEOUT, byte_arrays=True)
-
- def set_default_log_level(self, level):
- """
- Fetches device policy and sends it to Session Manager.
-
- @param level: Log level, 0=quiet, 1=taciturn, 2=chatty, 3=verbose.
-
- @return error_message: Error message, empty if no error occurred.
-
- """
-
- return self._authpolicyd.SetDefaultLogLevel(level, byte_arrays=True)
-
- def print_log_tail(self):
- """
- Prints out authpolicyd log tail. Catches and prints out errors.
-
- """
-
- try:
- cmd = 'tail -n %s %s' % (self._LOG_LINE_LIMIT, self._LOG_FILE)
- log_tail = utils.run(cmd).stdout
- logging.info('Tail of %s:\n%s', self._LOG_FILE, log_tail)
- except error.CmdError as ex:
- logging.error('Failed to print authpolicyd log tail: %s', ex)
-
- def print_seccomp_failure_info(self):
- """
- Detects seccomp failures and prints out the failing syscall.
-
- """
-
- # Exit code 253 is minijail's marker for seccomp failures.
- cmd = 'grep -q "exit code 253" %s' % self._LOG_FILE
- if utils.run(cmd, ignore_status=True).exit_status == 0:
- logging.error('Seccomp failure detected!')
- cmd = 'grep -oE "blocked syscall: \\w+" %s | tail -1' % \
- self._SYSLOG_FILE
- try:
- logging.error(utils.run(cmd).stdout)
- logging.error(
- 'This can happen if you changed a dependency of '
- 'authpolicyd. Consider allowlisting this syscall in '
- 'the appropriate -seccomp.policy file in authpolicyd.'
- '\n')
- except error.CmdError as ex:
- logging.error(
- 'Failed to determine reason for seccomp issue: %s', ex)
-
- def clear_log(self):
- """
- Clears the authpolicy daemon's log file.
-
- """
-
- try:
- utils.run('echo "" > %s' % self._LOG_FILE)
- except error.CmdError as ex:
- logging.error('Failed to clear authpolicyd log file: %s', ex)
-
- class PasswordFd(object):
- """
- Writes password into a file descriptor.
-
- Use in a 'with' statement to automatically close the returned file
- descriptor.
-
- @param password: Plaintext password string.
-
- @return A file descriptor (pipe) containing the password.
-
- """
-
- def __init__(self, password):
- self._password = password
- self._read_fd = None
-
- def __enter__(self):
- """Creates the password file descriptor."""
- self._read_fd, write_fd = os.pipe()
- os.write(write_fd, self._password)
- os.close(write_fd)
- return self._read_fd
-
- def __exit__(self, mytype, value, traceback):
- """Closes the password file descriptor again."""
- if self._read_fd:
- os.close(self._read_fd)
diff --git a/client/common_lib/cros/autotest_private_ext/background.js b/client/common_lib/cros/autotest_private_ext/background.js
deleted file mode 100644
index a7c19b3..0000000
--- a/client/common_lib/cros/autotest_private_ext/background.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
diff --git a/client/common_lib/cros/autotest_private_ext/manifest.json b/client/common_lib/cros/autotest_private_ext/manifest.json
deleted file mode 100644
index 99f0622..0000000
--- a/client/common_lib/cros/autotest_private_ext/manifest.json
+++ /dev/null
@@ -1,22 +0,0 @@
-{
- "key": "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDuUZGKCDbff6IRaxa4Pue7PPkxwPaNhGT3JEqppEsNWFjM80imEdqMbf3lrWqEfaHgaNku7nlpwPO1mu3/4Hr+XdNa5MhfnOnuPee4hyTLwOs3Vzz81wpbdzUxZSi2OmqMyI5oTaBYICfNHLwcuc65N5dbt6WKGeKgTpp4v7j7zwIDAQAB",
- "description": "Telemetry ChromeOS Autotest component extension",
- "name": "Telemetry ChromeOS AutoTest Component Extension",
- "background": {
- "scripts": ["background.js"]
- },
- "manifest_version": 2,
- "version": "0.1",
- "permissions": [
- "autotestPrivate",
- "inputMethodPrivate",
- "languageSettingsPrivate",
- "power",
- "settingsPrivate",
- "system.display"
- ],
- "automation": {
- "interact": true,
- "desktop": true
- }
-}
diff --git a/client/common_lib/cros/avahi_utils.py b/client/common_lib/cros/avahi_utils.py
deleted file mode 100644
index af52e38..0000000
--- a/client/common_lib/cros/avahi_utils.py
+++ /dev/null
@@ -1,201 +0,0 @@
-# Lint as: python2, python3
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import six.moves.configparser
-import io
-import collections
-import logging
-import shlex
-import time
-
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib.cros import dbus_send
-
-BUS_NAME = 'org.freedesktop.Avahi'
-INTERFACE_SERVER = 'org.freedesktop.Avahi.Server'
-
-ServiceRecord = collections.namedtuple(
- 'ServiceRecord',
- ['interface', 'protocol', 'name', 'record_type', 'domain',
- 'hostname', 'address', 'port', 'txt'])
-
-
-def avahi_config(options, src_file='/etc/avahi/avahi-daemon.conf', host=None):
- """Creates a temporary avahi-daemon.conf file with the specified changes.
-
- Avahi daemon uses a text configuration file with sections and values
- assigned to options on that section. This function creates a new config
- file based on the one provided and a set of changes. The changes are
- specified as triples of section, option and value that override the existing
- options on the config file. If a value of None is specified for any triplet,
- the corresponding option will be removed from the file.
-
- @param options: A list of triplets of the form (section, option, value).
- @param src_file: The default config file to use as a base for the changes.
- @param host: An optional host object if running against a remote host.
- @return: The filename of a temporary file with the new configuration file.
-
- """
- run = utils.run if host is None else host.run
- existing_config = run('cat %s 2> /dev/null' % src_file).stdout
- conf = six.moves.configparser.SafeConfigParser()
- conf.readfp(io.BytesIO(existing_config))
-
- for section, option, value in options:
- if not conf.has_section(section):
- conf.add_section(section)
- if value is None:
- conf.remove_option(section, option)
- else:
- conf.set(section, option, value)
-
- tmp_conf_file = run('mktemp -t avahi-conf.XXXX').stdout.strip()
- lines = []
- for section in conf.sections():
- lines.append('[%s]' % section)
- for option in conf.options(section):
- lines.append('%s=%s' % (option, conf.get(section, option)))
- run('cat <<EOF >%s\n%s\nEOF\n' % (tmp_conf_file, '\n'.join(lines)))
- return tmp_conf_file
-
-
-def avahi_ping(host=None):
- """Returns True when the avahi-deamon's DBus interface is ready.
-
- After your launch avahi-daemon, there is a short period of time where the
- daemon is running but the DBus interface isn't ready yet. This functions
- blocks for a few seconds waiting for a ping response from the DBus API
- and returns wether it got a response.
-
- @param host: An optional host object if running against a remote host.
- @return boolean: True if Avahi is up and in a stable state.
-
- """
- result = dbus_send.dbus_send(BUS_NAME, INTERFACE_SERVER, '/', 'GetState',
- host=host, timeout_seconds=2,
- tolerate_failures=True)
- # AVAHI_ENTRY_GROUP_ESTABLISHED == 2
- return result is not None and result.response == 2
-
-
-def avahi_start(config_file=None, host=None):
- """Start avahi-daemon with the provided config file.
-
- This function waits until the avahi-daemon is ready listening on the DBus
- interface. If avahi fails to be ready after 10 seconds, an error is raised.
-
- @param config_file: The filename of the avahi-daemon config file or None to
- use the default.
- @param host: An optional host object if running against a remote host.
-
- """
- run = utils.run if host is None else host.run
- env = ''
- if config_file is not None:
- env = ' AVAHI_DAEMON_CONF="%s"' % config_file
- run('start avahi %s' % env, ignore_status=False)
- # Wait until avahi is ready.
- deadline = time.time() + 10.
- while time.time() < deadline:
- if avahi_ping(host=host):
- return
- time.sleep(0.1)
- raise error.TestError('avahi-daemon is not ready after 10s running.')
-
-
-def avahi_stop(ignore_status=False, host=None):
- """Stop the avahi daemon.
-
- @param ignore_status: True to ignore failures while stopping avahi.
- @param host: An optional host object if running against a remote host.
-
- """
- run = utils.run if host is None else host.run
- run('stop avahi', ignore_status=ignore_status)
-
-
-def avahi_start_on_iface(iface, host=None):
- """Starts avahi daemon listening only on the provided interface.
-
- @param iface: A string with the interface name.
- @param host: An optional host object if running against a remote host.
-
- """
- run = utils.run if host is None else host.run
- opts = [('server', 'allow-interfaces', iface),
- ('server', 'deny-interfaces', None)]
- conf = avahi_config(opts, host=host)
- avahi_start(config_file=conf, host=host)
- run('rm %s' % conf)
-
-
-def avahi_get_hostname(host=None):
- """Get the lan-unique hostname of the the device.
-
- @param host: An optional host object if running against a remote host.
- @return string: the lan-unique hostname of the DUT.
-
- """
- result = dbus_send.dbus_send(
- BUS_NAME, INTERFACE_SERVER, '/', 'GetHostName',
- host=host, timeout_seconds=2, tolerate_failures=True)
- return None if result is None else result.response
-
-
-def avahi_get_domain_name(host=None):
- """Get the current domain name being used by Avahi.
-
- @param host: An optional host object if running against a remote host.
- @return string: the current domain name being used by Avahi.
-
- """
- result = dbus_send.dbus_send(
- BUS_NAME, INTERFACE_SERVER, '/', 'GetDomainName',
- host=host, timeout_seconds=2, tolerate_failures=True)
- return None if result is None else result.response
-
-
-def avahi_browse(host=None, ignore_local=True):
- """Browse mDNS service records with avahi-browse.
-
- Some example avahi-browse output (lines are wrapped for readability):
-
- localhost ~ # avahi-browse -tarlp
- +;eth1;IPv4;E58E8561-3BCA-4910-ABC7-BD8779D7D761;_serbus._tcp;local
- +;eth1;IPv4;E58E8561-3BCA-4910-ABC7-BD8779D7D761;_privet._tcp;local
- =;eth1;IPv4;E58E8561-3BCA-4910-ABC7-BD8779D7D761;_serbus._tcp;local;\
- 9bcd92bbc1f91f2ee9c9b2e754cfd22e.local;172.22.23.237;0;\
- "ver=1.0" "services=privet" "id=11FB0AD6-6C87-433E-8ACB-0C68EE78CDBD"
- =;eth1;IPv4;E58E8561-3BCA-4910-ABC7-BD8779D7D761;_privet._tcp;local;\
- 9bcd92bbc1f91f2ee9c9b2e754cfd22e.local;172.22.23.237;8080;\
- "ty=Unnamed Device" "txtvers=3" "services=_camera" "model_id=///" \
- "id=FEE9B312-1F2B-4B9B-813C-8482FA75E0DB" "flags=AB" "class=BB"
-
- @param host: An optional host object if running against a remote host.
- @param ignore_local: boolean True to ignore local service records.
- @return list of ServiceRecord objects parsed from output.
-
- """
- run = utils.run if host is None else host.run
- flags = ['--terminate', # Terminate after looking for a short time.
- '--all', # Show all services, regardless of type.
- '--resolve', # Resolve the services discovered.
- '--parsable', # Print service records in a parsable format.
- ]
- if ignore_local:
- flags.append('--ignore-local')
- result = run('avahi-browse %s' % ' '.join(flags))
- records = []
- for line in result.stdout.strip().splitlines():
- parts = line.split(';')
- if parts[0] == '+':
- # Skip it, just parse the resolved record.
- continue
- # Do minimal parsing of the TXT record.
- parts[-1] = shlex.split(parts[-1])
- records.append(ServiceRecord(*parts[1:]))
- logging.debug('Found %r', records[-1])
- return records
diff --git a/client/common_lib/cros/cfm_hangouts_api.py b/client/common_lib/cros/cfm_hangouts_api.py
deleted file mode 100644
index 966b58f..0000000
--- a/client/common_lib/cros/cfm_hangouts_api.py
+++ /dev/null
@@ -1,337 +0,0 @@
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging
-
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib import error
-
-DEFAULT_TIMEOUT = 30
-DIAGNOSTIC_RUN_TIMEOUT = 180
-
-
-class CfmHangoutsAPI(object):
- """Utility class for interacting with Hangouts in CFM."""
-
- def __init__(self, webview_context):
- self._webview_context = webview_context
-
-
- def wait_for_meetings_in_call_page(self):
- """Waits for the in-call page to launch."""
- raise NotImplementedError
-
-
- def wait_for_meetings_landing_page(self):
- """Waits for the landing page screen."""
- raise NotImplementedError
-
-
- # UI commands/functions
- def wait_for_oobe_start_page(self):
- """Wait for oobe start screen to launch."""
- self._webview_context.WaitForJavaScriptCondition(
- "window.hasOwnProperty('hrOobIsStartPageForTest') "
- "&& window.hrOobIsStartPageForTest() === true;",
- timeout=DEFAULT_TIMEOUT)
- logging.info('Reached oobe start page')
-
-
- def skip_oobe_screen(self):
- """Skip Chromebox for Meetings oobe screen."""
- self._webview_context.ExecuteJavaScript("window.hrOobSkipForTest()")
- utils.poll_for_condition(
- lambda: not self._webview_context.EvaluateJavaScript(
- "window.hrOobIsStartPageForTest()"),
- exception=error.TestFail('Not able to skip oobe screen.'),
- timeout=DEFAULT_TIMEOUT,
- sleep_interval=1)
- logging.info('Skipped oobe screen.')
-
-
- def is_oobe_start_page(self):
- """Check if device is on CFM oobe start screen."""
- if self._webview_context.EvaluateJavaScript(
- "window.hrOobIsStartPageForTest()"):
- logging.info('Is on oobe start page.')
- return True
- logging.info('Is not on oobe start page.')
- return False
-
-
- # Hangouts commands/functions
- def start_new_hangout_session(self, hangout_name):
- """Start a new hangout session.
-
- @param hangout_name: Name of the hangout session.
- """
- if not self.is_ready_to_start_hangout_session():
- if self.is_in_hangout_session():
- self.end_hangout_session()
- utils.poll_for_condition(
- lambda: self._webview_context.EvaluateJavaScript(
- "window.hrIsReadyToStartHangoutForTest()"),
- exception=error.TestFail(
- 'Not ready to start hangout session.'),
- timeout=DEFAULT_TIMEOUT,
- sleep_interval=1)
-
- self._webview_context.ExecuteJavaScript("window.hrStartCallForTest('" +
- hangout_name + "')")
- utils.poll_for_condition(
- lambda: self._webview_context.EvaluateJavaScript(
- "window.hrIsInHangoutForTest()"),
- exception=error.TestFail('Not able to start session.'),
- timeout=DEFAULT_TIMEOUT,
- sleep_interval=1)
- logging.info('Started hangout session: %s', hangout_name)
-
-
- def end_hangout_session(self):
- """End current hangout session."""
- self._webview_context.ExecuteJavaScript("window.hrHangupCallForTest()")
- utils.poll_for_condition(
- lambda: not self._webview_context.EvaluateJavaScript(
- "window.hrIsInHangoutForTest()"),
- exception=error.TestFail('Not able to end session.'),
- timeout=DEFAULT_TIMEOUT,
- sleep_interval=1)
-
- logging.info('Ended hangout session.')
-
-
- def is_in_hangout_session(self):
- """Check if device is in hangout session."""
- if self._webview_context.EvaluateJavaScript(
- "window.hrIsInHangoutForTest()"):
- logging.info('Is in hangout session.')
- return True
- logging.info('Is not in hangout session.')
- return False
-
-
- def is_ready_to_start_hangout_session(self):
- """Check if device is ready to start a new hangout session."""
- if (self._webview_context.EvaluateJavaScript(
- "window.hrIsReadyToStartHangoutForTest()")):
- logging.info('Is ready to start hangout session.')
- return True
- logging.info('Is not ready to start hangout session.')
- return False
-
-
- def join_meeting_session(self, meeting_name):
- """Joins a meeting.
-
- @param meeting_name: Name of the meeting session.
- """
- raise NotImplementedError
-
-
- def end_meeting_session(self):
- """End current meeting session."""
- raise NotImplementedError
-
-
- def is_in_meeting_session(self):
- """Check if device is in meeting session."""
- raise NotImplementedError
-
-
- def get_participant_count(self):
- """Returns the total number of participants in a hangout."""
- return self._webview_context.EvaluateJavaScript(
- "window.hrGetParticipantsCountInCallForTest()")
-
-
- # Diagnostics commands/functions
- def is_diagnostic_run_in_progress(self):
- """Check if hotrod diagnostics is running."""
- if (self._webview_context.EvaluateJavaScript(
- "window.hrIsDiagnosticRunInProgressForTest()")):
- logging.info('Diagnostic run is in progress.')
- return True
- logging.info('Diagnostic run is not in progress.')
- return False
-
-
- def wait_for_diagnostic_run_to_complete(self):
- """Wait for hotrod diagnostics to complete."""
- utils.poll_for_condition(
- lambda: not self._webview_context.EvaluateJavaScript(
- "window.hrIsDiagnosticRunInProgressForTest()"),
- exception=error.TestError('Diagnostic run still in progress '
- 'after 3 minutes.'),
- timeout=DIAGNOSTIC_RUN_TIMEOUT,
- sleep_interval=1)
-
-
- def run_diagnostics(self):
- """Run hotrod diagnostics."""
- if self.is_diagnostic_run_in_progress():
- self.wait_for_diagnostic_run_to_complete()
- self._webview_context.ExecuteJavaScript(
- "window.hrRunDiagnosticsForTest()")
- logging.info('Started diagnostics run.')
-
-
- def get_last_diagnostics_results(self):
- """Get latest hotrod diagnostics results."""
- if self.is_diagnostic_run_in_progress():
- self.wait_for_diagnostic_run_to_complete()
- return self._webview_context.EvaluateJavaScript(
- "window.hrGetLastDiagnosticsResultForTest()")
-
-
- # Mic audio commands/functions
- def is_mic_muted(self):
- """Check if mic is muted."""
- if self._webview_context.EvaluateJavaScript(
- "window.hrGetAudioInMutedForTest()"):
- logging.info('Mic is muted.')
- return True
- logging.info('Mic is not muted.')
- return False
-
-
- def mute_mic(self):
- """Local mic mute from toolbar."""
- self._webview_context.ExecuteJavaScript(
- "window.hrSetAudioInMutedForTest(true)")
- logging.info('Locally muted mic.')
-
-
- def unmute_mic(self):
- """Local mic unmute from toolbar."""
- self._webview_context.ExecuteJavaScript(
- "window.hrSetAudioInMutedForTest(false)")
- logging.info('Locally unmuted mic.')
-
-
- def remote_mute_mic(self):
- """Remote mic mute request from cPanel."""
- self._webview_context.ExecuteJavaScript("window.hrMuteAudioForTest()")
- logging.info('Remotely muted mic.')
-
-
- def remote_unmute_mic(self):
- """Remote mic unmute request from cPanel."""
- self._webview_context.ExecuteJavaScript(
- "window.hrUnmuteAudioForTest()")
- logging.info('Remotely unmuted mic.')
-
-
- def get_mic_devices(self):
- """Get all mic devices detected by hotrod."""
- return self._webview_context.EvaluateJavaScript(
- "window.hrGetAudioInDevicesForTest()")
-
-
- def get_preferred_mic(self):
- """Get mic preferred for hotrod."""
- return self._webview_context.EvaluateJavaScript(
- "window.hrGetAudioInPrefForTest()")
-
-
- def set_preferred_mic(self, mic):
- """Set preferred mic for hotrod.
-
- @param mic: String with mic name.
- """
- self._webview_context.ExecuteJavaScript(
- "window.hrSetAudioInPrefForTest('" + mic + "')")
- logging.info('Setting preferred mic to %s.', mic)
-
-
- # Speaker commands/functions
- def get_speaker_devices(self):
- """Get all speaker devices detected by hotrod."""
- return self._webview_context.EvaluateJavaScript(
- "window.hrGetAudioOutDevicesForTest()")
-
-
- def get_preferred_speaker(self):
- """Get speaker preferred for hotrod."""
- return self._webview_context.EvaluateJavaScript(
- "window.hrGetAudioOutPrefForTest()")
-
-
- def set_preferred_speaker(self, speaker):
- """Set preferred speaker for hotrod.
-
- @param mic: String with speaker name.
- """
- self._webview_context.ExecuteJavaScript(
- "window.hrSetAudioOutPrefForTest('" + speaker + "')")
- logging.info('Set preferred speaker to %s.', speaker)
-
-
- def set_speaker_volume(self, vol_level):
- """Set speaker volume.
-
- @param vol_level: String value ranging from 0-100 to set volume to.
- """
- self._webview_context.ExecuteJavaScript(
- "window.hrSetAudioOutVolumeLevelForTest('" + vol_level + "')")
- logging.info('Set speaker volume to %s', vol_level)
-
-
- def get_speaker_volume(self):
- """Get current speaker volume."""
- return self._webview_context.EvaluateJavaScript(
- "window.hrGetAudioOutVolumeLevelForTest()")
-
-
- def play_test_sound(self):
- """Play test sound."""
- self._webview_context.ExecuteJavaScript(
- "window.hrPlayTestSoundForTest()")
- logging.info('Playing test sound.')
-
-
- # Camera commands/functions
- def get_camera_devices(self):
- """Get all camera devices detected by hotrod."""
- return self._webview_context.EvaluateJavaScript(
- "window.hrGetVideoCaptureDevicesForTest()")
-
-
- def get_preferred_camera(self):
- """Get camera preferred for hotrod."""
- return self._webview_context.EvaluateJavaScript(
- "window.hrGetVideoCapturePrefForTest()")
-
-
- def set_preferred_camera(self, camera):
- """Set preferred camera for hotrod.
-
- @param mic: String with camera name.
- """
- self._webview_context.ExecuteJavaScript(
- "window.hrSetVideoCapturePrefForTest('" + camera + "')")
- logging.info('Set preferred camera to %s.', camera)
-
-
- def is_camera_muted(self):
- """Check if camera is muted (turned off)."""
- if self._webview_context.EvaluateJavaScript(
- "window.hrGetVideoCaptureMutedForTest()"):
- logging.info('Camera is muted.')
- return True
- logging.info('Camera is not muted.')
- return False
-
-
- def mute_camera(self):
- """Turned camera off."""
- self._webview_context.ExecuteJavaScript(
- "window.hrSetVideoCaptureMutedForTest(true)")
- logging.info('Camera muted.')
-
-
- def unmute_camera(self):
- """Turned camera on."""
- self._webview_context.ExecuteJavaScript(
- "window.hrSetVideoCaptureMutedForTest(false)")
- logging.info('Camera unmuted.')
diff --git a/client/common_lib/cros/cfm_meetings_api.py b/client/common_lib/cros/cfm_meetings_api.py
deleted file mode 100644
index c00915a..0000000
--- a/client/common_lib/cros/cfm_meetings_api.py
+++ /dev/null
@@ -1,295 +0,0 @@
-# Lint as: python2, python3
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging
-
-from six.moves.urllib.parse import urlparse
-
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib import error
-
-
-DEFAULT_TIMEOUT = 30
-TELEMETRY_API = 'hrTelemetryApi'
-
-
-class CfmMeetingsAPI(object):
- """Utility class for interacting with CfMs."""
-
- def __init__(self, webview_context):
- self._webview_context = webview_context
-
- def _execute_telemetry_command(self, command):
- self._webview_context.ExecuteJavaScript(
- 'window.%s.%s' % (TELEMETRY_API, command))
-
- def _evaluate_telemetry_command(self, command):
- return self._webview_context.EvaluateJavaScript(
- 'window.%s.%s' % (TELEMETRY_API, command))
-
- # UI commands/functions
- def wait_for_meetings_landing_page(self):
- """Waits for the landing page screen."""
- self._webview_context.WaitForJavaScriptCondition(
- 'window.hasOwnProperty("%s") '
- '&& !window.%s.isInMeeting()' % (TELEMETRY_API, TELEMETRY_API),
- timeout=DEFAULT_TIMEOUT)
- logging.info('Reached meetings landing page.')
-
- def wait_for_meetings_in_call_page(self):
- """Waits for the in-call page to launch."""
- self._webview_context.WaitForJavaScriptCondition(
- 'window.hasOwnProperty("%s") '
- '&& window.%s.isInMeeting()' % (TELEMETRY_API, TELEMETRY_API),
- timeout=DEFAULT_TIMEOUT)
- logging.info('Reached meetings in-call page.')
-
- def wait_for_oobe_start_page(self):
- """Wait for oobe start screen to launch."""
- self._webview_context.WaitForJavaScriptCondition(
- 'window.hasOwnProperty("%s") '
- '&& typeof window.%s.skipOobe === "function"' % (
- TELEMETRY_API, TELEMETRY_API),
- timeout=DEFAULT_TIMEOUT)
- logging.info('Reached oobe page.')
-
- def skip_oobe_screen(self):
- """Skip Chromebox for Meetings oobe screen."""
- self._execute_telemetry_command('skipOobe()')
- utils.poll_for_condition(
- lambda: not self.is_oobe_start_page(),
- exception=error.TestFail('Not able to skip oobe screen.'),
- timeout=DEFAULT_TIMEOUT,
- sleep_interval=1)
- logging.info('Skipped oobe screen.')
-
- def is_oobe_start_page(self):
- """Check if device is on CFM oobe start screen."""
- if self._webview_context.EvaluateJavaScript(
- 'window.hasOwnProperty("%s") '
- '&& typeof window.%s.skipOobe === "function"' % (
- TELEMETRY_API, TELEMETRY_API)):
- logging.info('Is on oobe start page.')
- return True
- logging.info('Is not on oobe start page.')
- return False
-
- # Hangouts commands/functions
- def start_meeting_session(self):
- """Start a meeting.
-
- @return code for the started meeting
- """
- if self.is_in_meeting_session():
- self.end_meeting_session()
-
- self._execute_telemetry_command('startMeeting()')
- self.wait_for_meetings_in_call_page()
- meeting_code = self._get_meeting_code()
- logging.info('Started meeting session %s', meeting_code)
- return meeting_code
-
- def _get_meeting_code(self):
- path = urlparse(self._webview_context.GetUrl()).path
- # The meeting code is the last part of the path.
- return path.split('/')[-1]
-
- def join_meeting_session(self, meeting_name):
- """Joins a meeting.
-
- @param meeting_name: Name of the meeting session.
- """
- if self.is_in_meeting_session():
- self.end_meeting_session()
-
- self._execute_telemetry_command('joinMeeting("%s")' % meeting_name)
- self.wait_for_meetings_in_call_page()
- logging.info('Started meeting session: %s', meeting_name)
-
- def end_meeting_session(self):
- """End current meeting session."""
- self._execute_telemetry_command('endCall()')
- self.wait_for_meetings_landing_page()
- logging.info('Ended meeting session.')
-
- def is_in_meeting_session(self):
- """Check if device is in meeting session."""
- if self._evaluate_telemetry_command('isInMeeting()'):
- logging.info('Is in meeting session.')
- return True
- logging.info('Is not in meeting session.')
- return False
-
- def start_new_hangout_session(self, hangout_name):
- """Start a new hangout session.
-
- @param hangout_name: Name of the hangout session.
- """
- raise NotImplementedError
-
- def end_hangout_session(self):
- """End current hangout session."""
- raise NotImplementedError
-
- def is_in_hangout_session(self):
- """Check if device is in hangout session."""
- raise NotImplementedError
-
- def is_ready_to_start_hangout_session(self):
- """Check if device is ready to start a new hangout session."""
- raise NotImplementedError
-
- def get_participant_count(self):
- """Returns the total number of participants in a meeting."""
- return self._evaluate_telemetry_command('getParticipantCount()')
-
- # Diagnostics commands/functions
- def is_diagnostic_run_in_progress(self):
- """Check if hotrod diagnostics is running."""
- raise NotImplementedError
-
- def wait_for_diagnostic_run_to_complete(self):
- """Wait for hotrod diagnostics to complete."""
- raise NotImplementedError
-
- def run_diagnostics(self):
- """Run hotrod diagnostics."""
- raise NotImplementedError
-
- def get_last_diagnostics_results(self):
- """Get latest hotrod diagnostics results."""
- raise NotImplementedError
-
- # Mic audio commands/functions
- def is_mic_muted(self):
- """Check if mic is muted."""
- if self._evaluate_telemetry_command('isMicMuted()'):
- logging.info('Mic is muted.')
- return True
- logging.info('Mic is not muted.')
- return False
-
- def mute_mic(self):
- """Local mic mute from toolbar."""
- self._execute_telemetry_command('setMicMuted(true)')
- logging.info('Locally muted mic.')
-
- def unmute_mic(self):
- """Local mic unmute from toolbar."""
- self._execute_telemetry_command('setMicMuted(false)')
- logging.info('Locally unmuted mic.')
-
- def get_mic_devices(self):
- """Get all mic devices detected by hotrod."""
- return self._evaluate_telemetry_command('getAudioInDevices()')
-
- def get_preferred_mic(self):
- """Get preferred microphone for hotrod."""
- return self._evaluate_telemetry_command('getPreferredAudioInDevice()')
-
- def set_preferred_mic(self, mic_name):
- """Set preferred mic for hotrod.
-
- @param mic_name: String with mic name.
- """
- self._execute_telemetry_command('setPreferredAudioInDevice(%s)'
- % mic_name)
- logging.info('Setting preferred mic to %s.', mic_name)
-
- def remote_mute_mic(self):
- """Remote mic mute request from cPanel."""
- raise NotImplementedError
-
- def remote_unmute_mic(self):
- """Remote mic unmute request from cPanel."""
- raise NotImplementedError
-
- # Speaker commands/functions
- def get_speaker_devices(self):
- """Get all speaker devices detected by hotrod."""
- return self._evaluate_telemetry_command('getAudioOutDevices()')
-
- def get_preferred_speaker(self):
- """Get speaker preferred for hotrod."""
- return self._evaluate_telemetry_command('getPreferredAudioOutDevice()')
-
- def set_preferred_speaker(self, speaker_name):
- """Set preferred speaker for hotrod.
-
- @param speaker_name: String with speaker name.
- """
- self._execute_telemetry_command('setPreferredAudioOutDevice(%s)'
- % speaker_name)
- logging.info('Set preferred speaker to %s.', speaker_name)
-
- def set_speaker_volume(self, volume_level):
- """Set speaker volume.
-
- @param volume_level: Number value ranging from 0-100 to set volume to.
- """
- self._execute_telemetry_command('setAudioOutVolume(%d)' % volume_level)
- logging.info('Set speaker volume to %d', volume_level)
-
- def get_speaker_volume(self):
- """Get current speaker volume."""
- return self._evaluate_telemetry_command('getAudioOutVolume()')
-
- def play_test_sound(self):
- """Play test sound."""
- raise NotImplementedError
-
- # Camera commands/functions
- def get_camera_devices(self):
- """Get all camera devices detected by hotrod.
-
- @return List of camera devices.
- """
- return self._evaluate_telemetry_command('getVideoInDevices()')
-
- def get_preferred_camera(self):
- """Get camera preferred for hotrod."""
- return self._evaluate_telemetry_command('getPreferredVideoInDevice()')
-
- def set_preferred_camera(self, camera_name):
- """Set preferred camera for hotrod.
-
- @param camera_name: String with camera name.
- """
- self._execute_telemetry_command('setPreferredVideoInDevice(%s)'
- % camera_name)
- logging.info('Set preferred camera to %s.', camera_name)
-
- def is_camera_muted(self):
- """Check if camera is muted (turned off)."""
- if self._evaluate_telemetry_command('isCameraMuted()'):
- logging.info('Camera is muted.')
- return True
- logging.info('Camera is not muted.')
- return False
-
- def mute_camera(self):
- """Mute (turn off) camera."""
- self._execute_telemetry_command('setCameraMuted(true)')
- logging.info('Camera muted.')
-
- def unmute_camera(self):
- """Unmute (turn on) camera."""
- self._execute_telemetry_command('setCameraMuted(false)')
- logging.info('Camera unmuted.')
-
- def move_camera(self, camera_motion):
- """Move camera(PTZ functions).
-
- @param camera_motion: String of the desired camera motion.
- """
- ptz_motions = ['panLeft','panRight','panStop',
- 'tiltUp','tiltDown','tiltStop',
- 'zoomIn','zoomOut','resetPosition']
-
- if camera_motion in ptz_motions:
- self._execute_telemetry_command('ptz.%s()' % camera_motion)
- else:
- raise ValueError('Unsupported PTZ camera action: "%s"'
- % camera_motion)
diff --git a/client/common_lib/cros/chip_utils.py b/client/common_lib/cros/chip_utils.py
deleted file mode 100644
index 4d9c61f..0000000
--- a/client/common_lib/cros/chip_utils.py
+++ /dev/null
@@ -1,141 +0,0 @@
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""A collection of classes representing TCPC firmware blobs.
-"""
-
-import logging
-import os
-import subprocess
-
-
-class ChipUtilsError(Exception):
- """Error in the chip_utils module."""
-
-
-
-class generic_chip(object):
-
- """A chip we don't actually support."""
-
- chip_name = 'unknown'
- fw_name = None
-
- def __init__(self):
- self.fw_ver = None
- self.fw_file_name = None
-
- def set_fw_ver_from_string(self, version):
- """Sets version property from string."""
- self.fw_ver = int(version, 0)
-
- def set_from_file(self, file_name):
- """Sets chip params from file name.
-
- The typical firmware blob file name format is: <chip>_0x00.bin
-
- Args:
- file_name: Firmware blob file name.
-
- Raises:
- ValueError: Failed to decompose firmware file name.
- """
-
- basename = os.path.basename(file_name)
- if not basename.startswith(self.chip_name):
- raise ValueError('filename did not start with %s' % self.chip_name)
- fname = basename.split('.')[0]
- if '_' in fname:
- rev = fname.split('_')[-1]
- self.set_fw_ver_from_string(rev)
- else:
- logging.info('No fw ver found in filename %s', basename)
- self.fw_file_name = file_name
-
-
-class ps8751(generic_chip):
-
- """The PS8751 TCPC chip."""
-
- chip_name = 'ps8751'
- fw_name = 'ps8751_a3'
- extension = '.bin'
- hash_extension = '.hash'
- cbfs_bin_name = fw_name + extension
- cbfs_hash_name = fw_name + hash_extension
-
- def fw_ver_from_hash(self, blob):
- """Return the firmware version encoded in the firmware hash."""
-
- return blob[1]
-
- def compute_hash_bytes(self):
- """Generates the firmware blob hash."""
-
- if self.fw_ver is None:
- raise ChipUtilsError('fw_ver not initialized')
-
- h = bytearray(2)
- h[0] = 0xa3
- h[1] = self.fw_ver
- return h
-
-
-class anx3429(generic_chip):
-
- """The ANX3429 TCPC chip."""
-
- chip_name = 'anx3429'
- fw_name = 'anx3429_ocm'
- extension = '.bin'
- hash_extension = '.hash'
- cbfs_bin_name = fw_name + extension
- cbfs_hash_name = fw_name + hash_extension
-
- def fw_ver_from_hash(self, blob):
- """Return the firmware version encoded in the passed-in hash content."""
- return blob[0]
-
- def compute_hash_bytes(self):
- """Generates the firmware blob hash."""
-
- if self.fw_ver is None:
- raise ChipUtilsError('fw_ver not initialized')
-
- h = bytearray(1)
- h[0] = self.fw_ver
- return h
-
-
-class ecrw(generic_chip):
-
- """Chrome EC RW portion."""
-
- chip_name = 'ecrw'
- fw_name = 'ecrw'
- extension = ''
- hash_extension = '.hash'
- cbfs_bin_name = fw_name
- cbfs_hash_name = fw_name + hash_extension
-
- def compute_hash_bytes(self):
- """Generates the firmware blob hash."""
-
- if self.fw_file_name is None:
- raise ChipUtilsError('fw_file_name not initialized')
-
- if not os.path.exists(self.fw_file_name):
- raise ChipUtilsError('%s does not exist' % self.fw_file_name)
-
- # openssl outputs the result to stdout
- cmd = 'openssl dgst -sha256 -binary %s' % self.fw_file_name
- result = subprocess.check_output(cmd, shell=True)
- return bytearray(result)
-
-
-chip_id_map = {
- 'ecrw': ecrw,
- '0x8751': ps8751,
- '0x3429': anx3429,
-}
diff --git a/client/common_lib/cros/chrome.py b/client/common_lib/cros/chrome.py
deleted file mode 100644
index db31bf2..0000000
--- a/client/common_lib/cros/chrome.py
+++ /dev/null
@@ -1,454 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import logging
-import os
-import re
-
-from autotest_lib.client.common_lib.cros import arc_common
-from autotest_lib.client.common_lib.cros import arc_util
-from autotest_lib.client.common_lib.cros import assistant_util
-from autotest_lib.client.cros import constants
-from autotest_lib.client.bin import utils
-from six.moves import range
-from telemetry.core import cros_interface, exceptions
-from telemetry.internal.browser import browser_finder, browser_options
-from telemetry.internal.browser import extension_to_load
-
-import py_utils
-
-Error = exceptions.Error
-
-
-def NormalizeEmail(username):
- """Remove dots from username. Add @gmail.com if necessary.
-
- TODO(achuith): Get rid of this when crbug.com/358427 is fixed.
-
- @param username: username/email to be scrubbed.
- """
- parts = re.split('@', username)
- parts[0] = re.sub('\.', '', parts[0])
-
- if len(parts) == 1:
- parts.append('gmail.com')
- return '@'.join(parts)
-
-
-class Chrome(object):
- """Wrapper for creating a telemetry browser instance with extensions.
-
- The recommended way to use this class is to create the instance using the
- with statement:
-
- >>> with chrome.Chrome(...) as cr:
- >>> # Do whatever you need with cr.
- >>> pass
-
- This will make sure all the clean-up functions are called. If you really
- need to use this class without the with statement, make sure to call the
- close() method once you're done with the Chrome instance.
- """
-
- BROWSER_TYPE_LOGIN = 'system'
- BROWSER_TYPE_GUEST = 'system-guest'
- AUTOTEST_EXT_ID = 'behllobkkfkfnphdnhnkndlbkcpglgmj'
-
- def __init__(self, logged_in=True, extension_paths=None, autotest_ext=False,
- num_tries=3, extra_browser_args=None,
- clear_enterprise_policy=True, expect_policy_fetch=False,
- dont_override_profile=False, disable_gaia_services=True,
- disable_default_apps=True, auto_login=True, gaia_login=False,
- username=None, password=None, gaia_id=None,
- arc_mode=None, arc_timeout=None,
- enable_web_app_auto_install=False,
- disable_arc_opt_in=True,
- disable_arc_opt_in_verification=True,
- disable_arc_cpu_restriction=True,
- disable_app_sync=False,
- disable_play_auto_install=False,
- disable_locale_sync=True,
- disable_play_store_auto_update=True,
- enable_assistant=False,
- enterprise_arc_test=False,
- init_network_controller=False,
- mute_audio=False,
- proxy_server=None,
- login_delay=0):
- """
- Constructor of telemetry wrapper.
-
- @param logged_in: Regular user (True) or guest user (False).
- @param extension_paths: path of unpacked extension to install.
- @param autotest_ext: Load a component extension with privileges to
- invoke chrome.autotestPrivate.
- @param num_tries: Number of attempts to log in.
- @param extra_browser_args: Additional argument(s) to pass to the
- browser. It can be a string or a list.
- @param clear_enterprise_policy: Clear enterprise policy before
- logging in.
- @param expect_policy_fetch: Expect that chrome can reach the device
- management server and download policy.
- @param dont_override_profile: Don't delete cryptohome before login.
- Telemetry will output a warning with this
- option.
- @param disable_gaia_services: For enterprise autotests, this option may
- be used to enable policy fetch.
- @param disable_default_apps: For tests that exercise default apps.
- @param auto_login: Does not login automatically if this is False.
- Useful if you need to examine oobe.
- @param gaia_login: Logs in to real gaia.
- @param username: Log in using this username instead of the default.
- @param password: Log in using this password instead of the default.
- @param gaia_id: Log in using this gaia_id instead of the default.
- @param arc_mode: How ARC instance should be started. Default is to not
- start.
- @param arc_timeout: Timeout to wait for ARC to boot.
- @param enable_web_app_auto_install: For tests that require to auto download and install default web applications. By default it is disabled.
- @param disable_arc_opt_in: For opt in flow autotest. This option is used
- to disable the arc opt in flow.
- @param disable_arc_opt_in_verification:
- Adds --disable-arc-opt-in-verification to browser args. This should
- generally be enabled when disable_arc_opt_in is enabled. However,
- for data migration tests where user's home data is already set up
- with opted-in state before login, this option needs to be set to
- False with disable_arc_opt_in=True to make ARC container work.
- @param disable_arc_cpu_restriction:
- Adds --disable-arc-cpu-restriction to browser args. This is enabled
- by default and will make tests run faster and is generally
- desirable unless a test is actually trying to test performance
- where ARC is running in the background for some porition of the
- test.
- @param disable_app_sync:
- Adds --arc-disable-app-sync to browser args and this disables ARC
- app sync flow. By default it is enabled.
- @param disable_play_auto_install:
- Adds --arc-disable-play-auto-install to browser args and this
- disables ARC Play Auto Install flow. By default it is enabled.
- @param enable_assistant: For tests that require to enable Google
- Assistant service. Default is False.
- @param enterprise_arc_test: Skips opt_in causing enterprise tests to fail
- @param disable_locale_sync:
- Adds --arc-disable-locale-sync to browser args and this
- disables locale sync between Chrome and Android container. In case
- of disabling sync, Android container is started with language and
- preference language list as it was set on the moment of starting
- full instance. Used to prevent random app restarts caused by racy
- locale change, coming from profile sync. By default locale sync is
- disabled.
- @param disable_play_store_auto_update:
- Adds --arc-play-store-auto-update=off to browser args and this
- disables Play Store, GMS Core and third-party apps auto-update.
- By default auto-update is off to have stable autotest environment.
- @param mute_audio: Mute audio.
- @param proxy_server: To launch the chrome with --proxy-server
- Adds '--proxy-server="http://$HTTP_PROXY:PORT"' to browser args. By
- default proxy-server is disabled
- @param login_delay: Time for idle in login screen to simulate the time
- required for password typing.
- """
- self._autotest_ext_path = None
-
- # Force autotest extension if we need enable Play Store.
- if (utils.is_arc_available() and (arc_util.should_start_arc(arc_mode)
- or not disable_arc_opt_in)):
- autotest_ext = True
-
- if extension_paths is None:
- extension_paths = []
-
- finder_options = browser_options.BrowserFinderOptions()
- if proxy_server:
- finder_options.browser_options.AppendExtraBrowserArgs(
- ['--proxy-server="%s"' % proxy_server])
- if utils.is_arc_available() and arc_util.should_start_arc(arc_mode):
- if disable_arc_opt_in and disable_arc_opt_in_verification:
- finder_options.browser_options.AppendExtraBrowserArgs(
- ['--disable-arc-opt-in-verification'])
- if disable_arc_cpu_restriction:
- finder_options.browser_options.AppendExtraBrowserArgs(
- ['--disable-arc-cpu-restriction'])
- if disable_app_sync:
- finder_options.browser_options.AppendExtraBrowserArgs(
- ['--arc-disable-app-sync'])
- if disable_play_auto_install:
- finder_options.browser_options.AppendExtraBrowserArgs(
- ['--arc-disable-play-auto-install'])
- if disable_locale_sync:
- finder_options.browser_options.AppendExtraBrowserArgs(
- ['--arc-disable-locale-sync'])
- if disable_play_store_auto_update:
- finder_options.browser_options.AppendExtraBrowserArgs(
- ['--arc-play-store-auto-update=off'])
- logged_in = True
-
- if autotest_ext:
- self._autotest_ext_path = os.path.join(os.path.dirname(__file__),
- 'autotest_private_ext')
- extension_paths.append(self._autotest_ext_path)
- finder_options.browser_options.AppendExtraBrowserArgs(
- ['--whitelisted-extension-id=%s' % self.AUTOTEST_EXT_ID])
-
- self._browser_type = (self.BROWSER_TYPE_LOGIN
- if logged_in else self.BROWSER_TYPE_GUEST)
- finder_options.browser_type = self.browser_type
-
- if not enable_web_app_auto_install:
- finder_options.browser_options.AppendExtraBrowserArgs(
- ['--disable-features=DefaultWebAppInstallation'])
-
- if not auto_login:
- finder_options.browser_options.AppendExtraBrowserArgs(
- ['--enable-oobe-test-api'])
-
- if extra_browser_args:
- finder_options.browser_options.AppendExtraBrowserArgs(
- extra_browser_args)
-
- # finder options must be set before parse_args(), browser options must
- # be set before Create().
- # TODO(crbug.com/360890) Below MUST be '2' so that it doesn't inhibit
- # autotest debug logs
- finder_options.verbosity = 2
- finder_options.CreateParser().parse_args(args=[])
- b_options = finder_options.browser_options
- b_options.disable_component_extensions_with_background_pages = False
- b_options.create_browser_with_oobe = True
- b_options.clear_enterprise_policy = clear_enterprise_policy
- b_options.dont_override_profile = dont_override_profile
- b_options.disable_gaia_services = disable_gaia_services
- b_options.disable_default_apps = disable_default_apps
- b_options.disable_component_extensions_with_background_pages = disable_default_apps
- b_options.disable_background_networking = False
- b_options.expect_policy_fetch = expect_policy_fetch
- b_options.auto_login = auto_login
- b_options.gaia_login = gaia_login
- b_options.mute_audio = mute_audio
- b_options.login_delay = login_delay
-
- if utils.is_arc_available() and not disable_arc_opt_in:
- arc_util.set_browser_options_for_opt_in(b_options)
-
- self.username = b_options.username if username is None else username
- self.password = b_options.password if password is None else password
- self.username = NormalizeEmail(self.username)
- b_options.username = self.username
- b_options.password = self.password
- self.gaia_id = b_options.gaia_id if gaia_id is None else gaia_id
- b_options.gaia_id = self.gaia_id
-
- self.arc_mode = arc_mode
-
- if logged_in:
- extensions_to_load = b_options.extensions_to_load
- for path in extension_paths:
- extension = extension_to_load.ExtensionToLoad(
- path, self.browser_type)
- extensions_to_load.append(extension)
- self._extensions_to_load = extensions_to_load
-
- # Turn on collection of Chrome coredumps via creation of a magic file.
- # (Without this, Chrome coredumps are trashed.)
- open(constants.CHROME_CORE_MAGIC_FILE, 'w').close()
-
- self._browser_to_create = browser_finder.FindBrowser(
- finder_options)
- self._browser_to_create.SetUpEnvironment(b_options)
- for i in range(num_tries):
- try:
- self._browser = self._browser_to_create.Create()
- self._browser_pid = \
- cros_interface.CrOSInterface().GetChromePid()
- if utils.is_arc_available():
- if disable_arc_opt_in:
- if arc_util.should_start_arc(arc_mode):
- arc_util.enable_play_store(self.autotest_ext, True)
- else:
- if not enterprise_arc_test:
- wait_for_provisioning = \
- arc_mode != arc_common.ARC_MODE_ENABLED_ASYNC
- arc_util.opt_in(
- browser=self.browser,
- autotest_ext=self.autotest_ext,
- wait_for_provisioning=wait_for_provisioning)
- arc_util.post_processing_after_browser(self, arc_timeout)
- if enable_assistant:
- assistant_util.enable_assistant(self.autotest_ext)
- break
- except exceptions.LoginException as e:
- logging.error('Timed out logging in, tries=%d, error=%s',
- i, repr(e))
- if i == num_tries-1:
- raise
- if init_network_controller:
- self._browser.platform.network_controller.Open()
-
- def __enter__(self):
- return self
-
- def __exit__(self, *args):
- # Turn off collection of Chrome coredumps turned on in init.
- if os.path.exists(constants.CHROME_CORE_MAGIC_FILE):
- os.remove(constants.CHROME_CORE_MAGIC_FILE)
- self.close()
-
- @property
- def browser(self):
- """Returns a telemetry browser instance."""
- return self._browser
-
- def get_extension(self, extension_path, retry=5):
- """Fetches a telemetry extension instance given the extension path."""
- def _has_ext(ext):
- """
- Return True if the extension is fully loaded.
-
- Sometimes an extension will be in the _extensions_to_load, but not
- be fully loaded, and will error when trying to fetch from
- self.browser.extensions. Happens most common when ARC is enabled.
- This will add a wait/retry.
-
- @param ext: the extension to look for
- @returns True if found, False if not.
- """
- try:
- return bool(self.browser.extensions[ext])
- except KeyError:
- return False
-
- for ext in self._extensions_to_load:
- if extension_path == ext.path:
- utils.poll_for_condition(lambda: _has_ext(ext),
- timeout=retry)
- return self.browser.extensions[ext]
- return None
-
- @property
- def autotest_ext(self):
- """Returns the autotest extension."""
- return self.get_extension(self._autotest_ext_path)
-
- @property
- def login_status(self):
- """Returns login status."""
- ext = self.autotest_ext
- if not ext:
- return None
-
- ext.ExecuteJavaScript('''
- window.__login_status = null;
- chrome.autotestPrivate.loginStatus(function(s) {
- window.__login_status = s;
- });
- ''')
- return utils.poll_for_condition(
- lambda: ext.EvaluateJavaScript('window.__login_status'),
- timeout=10)
-
- def disable_dim_display(self):
- """Avoid dim display.
-
- @returns True if success otherwise False.
- """
- ext = self.autotest_ext
- if not ext:
- return False
- try:
- ext.ExecuteJavaScript(
- '''chrome.power.requestKeepAwake("display")''')
- except:
- logging.error("failed to disable dim display")
- return False
- return True
-
- def get_visible_notifications(self):
- """Returns an array of visible notifications of Chrome.
-
- For specific type of each notification, please refer to Chromium's
- chrome/common/extensions/api/autotest_private.idl.
- """
- ext = self.autotest_ext
- if not ext:
- return None
-
- ext.ExecuteJavaScript('''
- window.__items = null;
- chrome.autotestPrivate.getVisibleNotifications(function(items) {
- window.__items = items;
- });
- ''')
- if ext.EvaluateJavaScript('window.__items') is None:
- return None
- return ext.EvaluateJavaScript('window.__items')
-
- @property
- def browser_type(self):
- """Returns the browser_type."""
- return self._browser_type
-
- @staticmethod
- def did_browser_crash(func):
- """Runs func, returns True if the browser crashed, False otherwise.
-
- @param func: function to run.
-
- """
- try:
- func()
- except Error:
- return True
- return False
-
- @staticmethod
- def wait_for_browser_restart(func, browser):
- """Runs func, and waits for a browser restart.
-
- @param func: function to run.
-
- """
- _cri = cros_interface.CrOSInterface()
- pid = _cri.GetChromePid()
- Chrome.did_browser_crash(func)
- utils.poll_for_condition(
- lambda: pid != _cri.GetChromePid(), timeout=60)
- browser.WaitForBrowserToComeUp()
-
- def wait_for_browser_to_come_up(self):
- """Waits for the browser to come up. This should only be called after a
- browser crash.
- """
- def _BrowserReady(cr):
- tabs = [] # Wrapper for pass by reference.
- if self.did_browser_crash(
- lambda: tabs.append(cr.browser.tabs.New())):
- return False
- try:
- tabs[0].Close()
- except:
- # crbug.com/350941
- logging.error('Timed out closing tab')
- return True
- py_utils.WaitFor(lambda: _BrowserReady(self), timeout=10)
-
- def close(self):
- """Closes the browser.
- """
- try:
- if utils.is_arc_available():
- arc_util.pre_processing_before_close(self)
- finally:
- # Calling platform.StopAllLocalServers() to tear down the telemetry
- # server processes such as the one started by
- # platform.SetHTTPServerDirectories(). Not calling this function
- # will leak the process and may affect test results.
- # (crbug.com/663387)
- self._browser.platform.StopAllLocalServers()
- self._browser.Close()
- self._browser_to_create.CleanUpEnvironment()
- self._browser.platform.network_controller.Close()
diff --git a/client/common_lib/cros/chromedriver.py b/client/common_lib/cros/chromedriver.py
deleted file mode 100644
index 3493a17..0000000
--- a/client/common_lib/cros/chromedriver.py
+++ /dev/null
@@ -1,245 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import atexit
-import logging
-import os
-from six.moves import urllib
-import six.moves.urllib.parse
-
-try:
- from selenium import webdriver
-except ImportError:
- # Ignore import error, as this can happen when builder tries to call the
- # setup method of test that imports chromedriver.
- logging.error('selenium module failed to be imported.')
- pass
-
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib.cros import chrome
-
-CHROMEDRIVER_EXE_PATH = '/usr/local/chromedriver/chromedriver'
-X_SERVER_DISPLAY = ':0'
-X_AUTHORITY = '/home/chronos/.Xauthority'
-
-
-class chromedriver(object):
- """Wrapper class, a context manager type, for tests to use Chrome Driver."""
-
- def __init__(self, extra_chrome_flags=[], subtract_extra_chrome_flags=[],
- extension_paths=[], username=None, password=None,
- server_port=None, skip_cleanup=False, url_base=None,
- extra_chromedriver_args=None, gaia_login=False,
- disable_default_apps=True, dont_override_profile=False, *args,
- **kwargs):
- """Initialize.
-
- @param extra_chrome_flags: Extra chrome flags to pass to chrome, if any.
- @param subtract_extra_chrome_flags: Remove default flags passed to
- chrome by chromedriver, if any.
- @param extension_paths: A list of paths to unzipped extensions. Note
- that paths to crx files won't work.
- @param username: Log in using this username instead of the default.
- @param password: Log in using this password instead of the default.
- @param server_port: Port number for the chromedriver server. If None,
- an available port is chosen at random.
- @param skip_cleanup: If True, leave the server and browser running
- so that remote tests can run after this script
- ends. Default is False.
- @param url_base: Optional base url for chromedriver.
- @param extra_chromedriver_args: List of extra arguments to forward to
- the chromedriver binary, if any.
- @param gaia_login: Logs in to real gaia.
- @param disable_default_apps: For tests that exercise default apps.
- @param dont_override_profile: Don't delete cryptohome before login.
- Telemetry will output a warning with this
- option.
- """
- self._cleanup = not skip_cleanup
- assert os.geteuid() == 0, 'Need superuser privileges'
-
- # When ChromeDriver starts Chrome on other platforms (Linux, Windows,
- # etc.), it accepts flag inputs of the form "--flag_name" or
- # "flag_name". Before starting Chrome with those flags, ChromeDriver
- # reformats them all to "--flag_name". This behavior is copied
- # to ChromeOS for consistency across platforms.
- fixed_extra_chrome_flags = [
- f if f.startswith('--') else '--%s' % f for f in extra_chrome_flags]
-
- # Log in with telemetry
- self._chrome = chrome.Chrome(extension_paths=extension_paths,
- username=username,
- password=password,
- extra_browser_args=fixed_extra_chrome_flags,
- gaia_login=gaia_login,
- disable_default_apps=disable_default_apps,
- dont_override_profile=dont_override_profile
- )
- self._browser = self._chrome.browser
- # Close all tabs owned and opened by Telemetry, as these cannot be
- # transferred to ChromeDriver.
- self._browser.tabs[0].Close()
-
- # Start ChromeDriver server
- self._server = chromedriver_server(CHROMEDRIVER_EXE_PATH,
- port=server_port,
- skip_cleanup=skip_cleanup,
- url_base=url_base,
- extra_args=extra_chromedriver_args)
-
- # Open a new tab using Chrome remote debugging. ChromeDriver expects
- # a tab opened for remote to work. Tabs opened using Telemetry will be
- # owned by Telemetry, and will be inaccessible to ChromeDriver.
- urllib.request.urlopen('http://localhost:%i/json/new' %
- utils.get_chrome_remote_debugging_port())
-
- chromeOptions = {'debuggerAddress':
- ('localhost:%d' %
- utils.get_chrome_remote_debugging_port())}
- capabilities = {'chromeOptions':chromeOptions}
- # Handle to chromedriver, for chrome automation.
- try:
- self.driver = webdriver.Remote(command_executor=self._server.url,
- desired_capabilities=capabilities)
- except NameError:
- logging.error('selenium module failed to be imported.')
- raise
-
-
- def __enter__(self):
- return self
-
-
- def __exit__(self, *args):
- """Clean up after running the test.
-
- """
- if hasattr(self, 'driver') and self.driver:
- self.driver.close()
- del self.driver
-
- if not hasattr(self, '_cleanup') or self._cleanup:
- if hasattr(self, '_server') and self._server:
- self._server.close()
- del self._server
-
- if hasattr(self, '_browser') and self._browser:
- self._browser.Close()
- del self._browser
-
- def get_extension(self, extension_path):
- """Gets an extension by proxying to the browser.
-
- @param extension_path: Path to the extension loaded in the browser.
-
- @return: A telemetry extension object representing the extension.
- """
- return self._chrome.get_extension(extension_path)
-
-
- @property
- def chrome_instance(self):
- """ The chrome instance used by this chrome driver instance. """
- return self._chrome
-
-
-class chromedriver_server(object):
- """A running ChromeDriver server.
-
- This code is migrated from chrome:
- src/chrome/test/chromedriver/server/server.py
- """
-
- def __init__(self, exe_path, port=None, skip_cleanup=False,
- url_base=None, extra_args=None):
- """Starts the ChromeDriver server and waits for it to be ready.
-
- Args:
- exe_path: path to the ChromeDriver executable
- port: server port. If None, an available port is chosen at random.
- skip_cleanup: If True, leave the server running so that remote
- tests can run after this script ends. Default is
- False.
- url_base: Optional base url for chromedriver.
- extra_args: List of extra arguments to forward to the chromedriver
- binary, if any.
- Raises:
- RuntimeError if ChromeDriver fails to start
- """
- if not os.path.exists(exe_path):
- raise RuntimeError('ChromeDriver exe not found at: ' + exe_path)
-
- chromedriver_args = [exe_path]
- if port:
- # Allow remote connections if a port was specified
- chromedriver_args.append('--whitelisted-ips')
- else:
- port = utils.get_unused_port()
- chromedriver_args.append('--port=%d' % port)
-
- self.url = 'http://localhost:%d' % port
- if url_base:
- chromedriver_args.append('--url-base=%s' % url_base)
- self.url = six.moves.urllib.parse.urljoin(self.url, url_base)
-
- if extra_args:
- chromedriver_args.extend(extra_args)
-
- # TODO(ihf): Remove references to X after M45.
- # Chromedriver will look for an X server running on the display
- # specified through the DISPLAY environment variable.
- os.environ['DISPLAY'] = X_SERVER_DISPLAY
- os.environ['XAUTHORITY'] = X_AUTHORITY
-
- self.bg_job = utils.BgJob(chromedriver_args, stderr_level=logging.DEBUG)
- if self.bg_job is None:
- raise RuntimeError('ChromeDriver server cannot be started')
-
- try:
- timeout_msg = 'Timeout on waiting for ChromeDriver to start.'
- utils.poll_for_condition(self.is_running,
- exception=utils.TimeoutError(timeout_msg),
- timeout=10,
- sleep_interval=.1)
- except utils.TimeoutError:
- self.close_bgjob()
- raise RuntimeError('ChromeDriver server did not start')
-
- logging.debug('Chrome Driver server is up and listening at port %d.',
- port)
- if not skip_cleanup:
- atexit.register(self.close)
-
-
- def is_running(self):
- """Returns whether the server is up and running."""
- try:
- urllib.request.urlopen(self.url + '/status')
- return True
- except urllib.error.URLError as e:
- return False
-
-
- def close_bgjob(self):
- """Close background job and log stdout and stderr."""
- utils.nuke_subprocess(self.bg_job.sp)
- utils.join_bg_jobs([self.bg_job], timeout=1)
- result = self.bg_job.result
- if result.stdout or result.stderr:
- logging.info('stdout of Chrome Driver:\n%s', result.stdout)
- logging.error('stderr of Chrome Driver:\n%s', result.stderr)
-
-
- def close(self):
- """Kills the ChromeDriver server, if it is running."""
- if self.bg_job is None:
- return
-
- try:
- urllib.request.urlopen(self.url + '/shutdown', timeout=10).close()
- except:
- pass
-
- self.close_bgjob()
diff --git a/client/common_lib/cros/cr50_utils.py b/client/common_lib/cros/cr50_utils.py
deleted file mode 100644
index f695aaa..0000000
--- a/client/common_lib/cros/cr50_utils.py
+++ /dev/null
@@ -1,683 +0,0 @@
-# Lint as: python2, python3
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import argparse
-import logging
-import re
-import six
-from six.moves import range
-
-from autotest_lib.client.common_lib import error
-
-
-RO = 'ro'
-RW = 'rw'
-BID = 'bid'
-CR50_PROD = '/opt/google/cr50/firmware/cr50.bin.prod'
-CR50_PREPVT = '/opt/google/cr50/firmware/cr50.bin.prepvt'
-CR50_STATE = '/var/cache/cr50*'
-CR50_VERSION = '/var/cache/cr50-version'
-GET_CR50_VERSION = 'cat %s' % CR50_VERSION
-GET_CR50_MESSAGES ='grep "cr50-.*\[" /var/log/messages'
-UPDATE_FAILURE = 'unexpected cr50-update exit code'
-STUB_VER = '-1.-1.-1'
-# This dictionary is used to search the gsctool output for the version strings.
-# There are two gsctool commands that will return versions: 'fwver' and
-# 'binvers'.
-#
-# 'fwver' is used to get the running RO and RW versions from cr50
-# 'binvers' gets the version strings for each RO and RW region in the given
-# file
-#
-# The value in the dictionary is the regular expression that can be used to
-# find the version strings for each region.
-#
-# --fwver
-# example output:
-# open_device 18d1:5014
-# found interface 3 endpoint 4, chunk_len 64
-# READY
-# -------
-# start
-# target running protocol version 6
-# keyids: RO 0xaa66150f, RW 0xde88588d
-# offsets: backup RO at 0x40000, backup RW at 0x44000
-# Current versions:
-# RO 0.0.10
-# RW 0.0.21
-# match groupdict:
-# {
-# 'ro': '0.0.10',
-# 'rw': '0.0.21'
-# }
-#
-# --binvers
-# example output:
-# read 524288(0x80000) bytes from /tmp/cr50.bin
-# RO_A:0.0.10 RW_A:0.0.21[00000000:00000000:00000000]
-# RO_B:0.0.10 RW_B:0.0.21[00000000:00000000:00000000]
-# match groupdict:
-# {
-# 'rw_b': '0.0.21',
-# 'rw_a': '0.0.21',
-# 'ro_b': '0.0.10',
-# 'ro_a': '0.0.10',
-# 'bid_a': '00000000:00000000:00000000',
-# 'bid_b': '00000000:00000000:00000000'
-# }
-VERSION_RE = {
- '--fwver' : '\nRO (?P<ro>\S+).*\nRW (?P<rw>\S+)',
- '--binvers' : 'RO_A:(?P<ro_a>[\d\.]+).*' \
- 'RW_A:(?P<rw_a>[\d\.]+)(\[(?P<bid_a>[\d\:A-z]+)\])?.*' \
- 'RO_B:(?P<ro_b>\S+).*' \
- 'RW_B:(?P<rw_b>[\d\.]+)(\[(?P<bid_b>[\d\:A-z]+)\])?.*',
-}
-UPDATE_TIMEOUT = 60
-UPDATE_OK = 1
-
-MP_BID_FLAGS = 0x7f80
-ERASED_BID_INT = 0xffffffff
-ERASED_BID_STR = hex(ERASED_BID_INT)
-# With an erased bid, the flags and board id will both be erased
-ERASED_CHIP_BID = (ERASED_BID_INT, ERASED_BID_INT, ERASED_BID_INT)
-# Any image with this board id will run on any device
-EMPTY_IMAGE_BID = '00000000:00000000:00000000'
-EMPTY_IMAGE_BID_CHARACTERS = set(EMPTY_IMAGE_BID)
-SYMBOLIC_BID_LENGTH = 4
-
-gsctool = argparse.ArgumentParser()
-gsctool.add_argument('-a', '--any', dest='universal', action='store_true')
-# use /dev/tpm0 to send the command
-gsctool.add_argument('-s', '--systemdev', dest='systemdev', action='store_true')
-gsctool.add_argument('-o', '--ccd_open', dest='ccd_open', action='store_true')
-# Any command used for something other than updating. These commands should
-# never timeout because they do not force cr50 to reboot. They should all just
-# return information about cr50 and should only have a nonzero exit status if
-# something went wrong.
-gsctool.add_argument('-b', '--binvers', '-f', '--fwver', '-g', '--getbootmode',
- '-i', '--board_id', '-r', '--rma_auth', '-F', '--factory',
- '-m', '--tpm_mode', '-L', '--flog',
- dest='info_cmd', action='store_true')
-# upstart and post_reset will post resets instead of rebooting immediately
-gsctool.add_argument('-u', '--upstart', '-p', '--post_reset', dest='post_reset',
- action='store_true')
-gsctool.add_argument('extras', nargs=argparse.REMAINDER)
-
-
-def AssertVersionsAreEqual(name_a, ver_a, name_b, ver_b):
- """Raise an error ver_a isn't the same as ver_b
-
- Args:
- name_a: the name of section a
- ver_a: the version string for section a
- name_b: the name of section b
- ver_b: the version string for section b
-
- Raises:
- AssertionError if ver_a is not equal to ver_b
- """
- assert ver_a == ver_b, ('Versions do not match: %s %s %s %s' %
- (name_a, ver_a, name_b, ver_b))
-
-
-def GetNewestVersion(ver_a, ver_b):
- """Compare the versions. Return the newest one. If they are the same return
- None."""
- a = [int(x) for x in ver_a.split('.')]
- b = [int(x) for x in ver_b.split('.')]
-
- if a > b:
- return ver_a
- if b > a:
- return ver_b
- return None
-
-
-def GetVersion(versions, name):
- """Return the version string from the dictionary.
-
- Get the version for each key in the versions dictionary that contains the
- substring name. Make sure all of the versions match and return the version
- string. Raise an error if the versions don't match.
-
- Args:
- version: dictionary with the partition names as keys and the
- partition version strings as values.
- name: the string used to find the relevant items in versions.
-
- Returns:
- the version from versions or "-1.-1.-1" if an invalid RO was detected.
- """
- ver = None
- key = None
- for k, v in six.iteritems(versions):
- if name in k:
- if v == STUB_VER:
- logging.info('Detected invalid %s %s', name, v)
- return v
- elif ver:
- AssertVersionsAreEqual(key, ver, k, v)
- else:
- ver = v
- key = k
- return ver
-
-
-def FindVersion(output, arg):
- """Find the ro and rw versions.
-
- Args:
- output: The string to search
- arg: string representing the gsctool option, either '--binvers' or
- '--fwver'
-
- Returns:
- a tuple of the ro and rw versions
- """
- versions = re.search(VERSION_RE[arg], output)
- if not versions:
- raise Exception('Unable to determine version from: %s' % output)
-
- versions = versions.groupdict()
- ro = GetVersion(versions, RO)
- rw = GetVersion(versions, RW)
- # --binver is the only gsctool command that may have bid keys in its
- # versions dictionary. If no bid keys exist, bid will be None.
- bid = GetVersion(versions, BID)
- # Use GetBoardIdInfoString to standardize all board ids to the non
- # symbolic form.
- return ro, rw, GetBoardIdInfoString(bid, symbolic=False)
-
-
-def GetSavedVersion(client):
- """Return the saved version from /var/cache/cr50-version
-
- Some boards dont have cr50.bin.prepvt. They may still have prepvt flags.
- It is possible that cr50-update wont successfully run in this case.
- Return None if the file doesn't exist.
-
- Returns:
- the version saved in cr50-version or None if cr50-version doesn't exist
- """
- if not client.path_exists(CR50_VERSION):
- return None
-
- result = client.run(GET_CR50_VERSION).stdout.strip()
- return FindVersion(result, '--fwver')
-
-
-def StopTrunksd(client):
- """Stop trunksd on the client"""
- if 'running' in client.run('status trunksd').stdout:
- client.run('stop trunksd')
-
-
-def GSCTool(client, args, ignore_status=False):
- """Run gsctool with the given args.
-
- Args:
- client: the object to run commands on
- args: a list of strings that contiain the gsctool args
-
- Returns:
- the result of gsctool
- """
- options = gsctool.parse_args(args)
-
- if options.systemdev:
- StopTrunksd(client)
-
- # If we are updating the cr50 image, gsctool will return a non-zero exit
- # status so we should ignore it.
- ignore_status = not options.info_cmd or ignore_status
- # immediate reboots are only honored if the command is sent using /dev/tpm0
- expect_reboot = ((options.systemdev or options.universal) and
- not options.post_reset and not options.info_cmd)
-
- result = client.run('gsctool %s' % ' '.join(args),
- ignore_status=ignore_status,
- ignore_timeout=expect_reboot,
- timeout=UPDATE_TIMEOUT)
-
- # After a posted reboot, the gsctool exit code should equal 1.
- if (result and result.exit_status and result.exit_status != UPDATE_OK and
- not ignore_status):
- logging.debug(result)
- raise error.TestFail('Unexpected gsctool exit code after %s %d' %
- (' '.join(args), result.exit_status))
- return result
-
-
-def GetVersionFromUpdater(client, args):
- """Return the version from gsctool"""
- result = GSCTool(client, args).stdout.strip()
- return FindVersion(result, args[0])
-
-
-def GetFwVersion(client):
- """Get the running version using 'gsctool --fwver'"""
- return GetVersionFromUpdater(client, ['--fwver', '-a'])
-
-
-def GetBinVersion(client, image=CR50_PROD):
- """Get the image version using 'gsctool --binvers image'"""
- return GetVersionFromUpdater(client, ['--binvers', image])
-
-
-def GetVersionString(ver):
- """Combine the RO and RW tuple into a understandable string"""
- return 'RO %s RW %s%s' % (ver[0], ver[1],
- ' BID %s' % ver[2] if ver[2] else '')
-
-
-def GetRunningVersion(client):
- """Get the running Cr50 version.
-
- The version from gsctool and /var/cache/cr50-version should be the
- same. Get both versions and make sure they match.
-
- Args:
- client: the object to run commands on
-
- Returns:
- running_ver: a tuple with the ro and rw version strings
-
- Raises:
- TestFail
- - If the version in /var/cache/cr50-version is not the same as the
- version from 'gsctool --fwver'
- """
- running_ver = GetFwVersion(client)
- saved_ver = GetSavedVersion(client)
-
- if saved_ver:
- AssertVersionsAreEqual('Running', GetVersionString(running_ver),
- 'Saved', GetVersionString(saved_ver))
- return running_ver
-
-
-def GetActiveCr50ImagePath(client):
- """Get the path the device uses to update cr50
-
- Extract the active cr50 path from the cr50-update messages. This path is
- determined by cr50-get-name based on the board id flag value.
-
- Args:
- client: the object to run commands on
-
- Raises:
- TestFail
- - If cr50-update uses more than one path or if the path we find
- is not a known cr50 update path.
- """
- ClearUpdateStateAndReboot(client)
- messages = client.run(GET_CR50_MESSAGES).stdout.strip()
- paths = set(re.findall('/opt/google/cr50/firmware/cr50.bin[\S]+', messages))
- if not paths:
- raise error.TestFail('Could not determine cr50-update path')
- path = paths.pop()
- if len(paths) > 1 or (path != CR50_PROD and path != CR50_PREPVT):
- raise error.TestFail('cannot determine cr50 path')
- return path
-
-
-def CheckForFailures(client, last_message):
- """Check for any unexpected cr50-update exit codes.
-
- This only checks the cr50 update messages that have happened since
- last_message. If a unexpected exit code is detected it will raise an error>
-
- Args:
- client: the object to run commands on
- last_message: the last cr50 message from the last update run
-
- Returns:
- the last cr50 message in /var/log/messages
-
- Raises:
- TestFail
- - If there is a unexpected cr50-update exit code after last_message
- in /var/log/messages
- """
- messages = client.run(GET_CR50_MESSAGES).stdout.strip()
- if last_message:
- messages = messages.rsplit(last_message, 1)[-1].split('\n')
- failures = []
- for message in messages:
- if UPDATE_FAILURE in message:
- failures.append(message)
- if len(failures):
- logging.info(messages)
- raise error.TestFail('Detected unexpected exit code during update: '
- '%s' % failures)
- return messages[-1]
-
-
-def VerifyUpdate(client, ver='', last_message=''):
- """Verify that the saved update state is correct and there were no
- unexpected cr50-update exit codes since the last update.
-
- Args:
- client: the object to run commands on
- ver: the expected version tuple (ro ver, rw ver)
- last_message: the last cr50 message from the last update run
-
- Returns:
- new_ver: a tuple containing the running ro and rw versions
- last_message: The last cr50 update message in /var/log/messages
- """
- # Check that there were no unexpected reboots from cr50-result
- last_message = CheckForFailures(client, last_message)
- logging.debug('last cr50 message %s', last_message)
-
- new_ver = GetRunningVersion(client)
- if ver != '':
- if STUB_VER != ver[0]:
- AssertVersionsAreEqual('Old RO', ver[0], 'Updated RO', new_ver[0])
- AssertVersionsAreEqual('Old RW', ver[1], 'Updated RW', new_ver[1])
- return new_ver, last_message
-
-
-def GetDevicePath(ext):
- """Return the device path for the .prod or .prepvt image."""
- if ext == 'prod':
- return CR50_PROD
- elif ext == 'prepvt':
- return CR50_PREPVT
- raise error.TestError('Unsupported cr50 image type %r' % ext)
-
-
-def ClearUpdateStateAndReboot(client):
- """Removes the cr50 status files in /var/cache and reboots the AP"""
- # If any /var/cache/cr50* files exist, remove them.
- result = client.run('ls %s' % CR50_STATE, ignore_status=True)
- if not result.exit_status:
- client.run('rm %s' % ' '.join(result.stdout.split()))
- elif result.exit_status != 2:
- # Exit status 2 means the file didn't exist. If the command fails for
- # some other reason, raise an error.
- logging.debug(result)
- raise error.TestFail(result.stderr)
- client.reboot()
-
-
-def InstallImage(client, src, dest=CR50_PROD):
- """Copy the image at src to dest on the dut
-
- Args:
- client: the object to run commands on
- src: the image location of the server
- dest: the desired location on the dut
-
- Returns:
- The filename where the image was copied to on the dut, a tuple
- containing the RO and RW version of the file
- """
- # Send the file to the DUT
- client.send_file(src, dest)
-
- ver = GetBinVersion(client, dest)
- client.run('sync')
- return dest, ver
-
-
-def GetBoardIdInfoTuple(board_id_str):
- """Convert the string into board id args.
-
- Split the board id string board_id:(mask|board_id_inv):flags to a tuple of
- its parts. Each element will be converted to an integer.
-
- Returns:
- board id int, mask|board_id_inv, and flags or None if its a universal
- image.
- """
- # In tests None is used for universal board ids. Some old images don't
- # support getting board id, so we use None. Convert 0:0:0 to None.
- if not board_id_str or set(board_id_str) == EMPTY_IMAGE_BID_CHARACTERS:
- return None
-
- board_id, param2, flags = board_id_str.split(':')
- return GetIntBoardId(board_id), int(param2, 16), int(flags, 16)
-
-
-def GetBoardIdInfoString(board_id_info, symbolic=False):
- """Convert the board id list or str into a symbolic or non symbolic str.
-
- This can be used to convert the board id info list into a symbolic or non
- symbolic board id string. It can also be used to convert a the board id
- string into a board id string with a symbolic or non symbolic board id
-
- Args:
- board_id_info: A string of the form board_id:(mask|board_id_inv):flags
- or a list with the board_id, (mask|board_id_inv), flags
-
- Returns:
- (board_id|symbolic_board_id):(mask|board_id_inv):flags. Will return
- None if if the given board id info is empty or is not valid
- """
- # TODO(mruthven): remove unicode check after conversion to python3.
- # Convert board_id_info to a tuple if it's a string.
- if isinstance(board_id_info, str) or isinstance(board_id_info, unicode):
- board_id_info = GetBoardIdInfoTuple(board_id_info)
-
- if not board_id_info:
- return None
-
- board_id, param2, flags = board_id_info
- # Get the hex string for board id
- board_id = '%08x' % GetIntBoardId(board_id)
-
- # Convert the board id hex to a symbolic board id
- if symbolic:
- board_id = GetSymbolicBoardId(board_id)
-
- # Return the board_id_str:8_digit_hex_mask: 8_digit_hex_flags
- return '%s:%08x:%08x' % (board_id, param2, flags)
-
-
-def GetSymbolicBoardId(board_id):
- """Convert an integer board id to a symbolic string
-
- Args:
- board_id: the board id to convert to the symbolic board id
-
- Returns:
- the 4 character symbolic board id
- """
- symbolic_board_id = ''
- board_id = GetIntBoardId(board_id)
-
- # Convert the int to a symbolic board id
- for i in range(SYMBOLIC_BID_LENGTH):
- symbolic_board_id += chr((board_id >> (i * 8)) & 0xff)
- symbolic_board_id = symbolic_board_id[::-1]
-
- # Verify the created board id is 4 characters
- if len(symbolic_board_id) != SYMBOLIC_BID_LENGTH:
- raise error.TestFail('Created invalid symbolic board id %s' %
- symbolic_board_id)
- return symbolic_board_id
-
-
-def ConvertSymbolicBoardId(symbolic_board_id):
- """Convert the symbolic board id str to an int
-
- Args:
- symbolic_board_id: a ASCII string. It can be up to 4 characters
-
- Returns:
- the symbolic board id string converted to an int
- """
- board_id = 0
- for c in symbolic_board_id:
- board_id = ord(c) | (board_id << 8)
- return board_id
-
-
-def GetIntBoardId(board_id):
- """"Return the gsctool interpretation of board_id
-
- Args:
- board_id: a int or string value of the board id
-
- Returns:
- a int representation of the board id
- """
- if type(board_id) == int:
- return board_id
-
- if len(board_id) <= SYMBOLIC_BID_LENGTH:
- return ConvertSymbolicBoardId(board_id)
-
- return int(board_id, 16)
-
-
-def GetExpectedFlags(flags):
- """If flags are not specified, gsctool will set them to 0xff00
-
- Args:
- flags: The int value or None
-
- Returns:
- the original flags or 0xff00 if flags is None
- """
- return flags if flags != None else 0xff00
-
-
-def RMAOpen(client, cmd='', ignore_status=False):
- """Run gsctool RMA commands"""
- return GSCTool(client, ['-a', '-r', cmd], ignore_status)
-
-
-def GetChipBoardId(client):
- """Return the board id and flags
-
- Args:
- client: the object to run commands on
-
- Returns:
- a tuple with the int values of board id, board id inv, flags
-
- Raises:
- TestFail if the second board id response field is not ~board_id
- """
- result = GSCTool(client, ['-a', '-i']).stdout.strip()
- board_id_info = result.split('Board ID space: ')[-1].strip().split(':')
- board_id, board_id_inv, flags = [int(val, 16) for val in board_id_info]
- logging.info('BOARD_ID: %x:%x:%x', board_id, board_id_inv, flags)
-
- if board_id == board_id_inv == ERASED_BID_INT:
- if flags == ERASED_BID_INT:
- logging.info('board id is erased')
- else:
- logging.info('board id type is erased')
- elif board_id & board_id_inv:
- raise error.TestFail('board_id_inv should be ~board_id got %x %x' %
- (board_id, board_id_inv))
- return board_id, board_id_inv, flags
-
-
-def GetChipBIDFromImageBID(image_bid, brand):
- """Calculate a chip bid that will work with the image bid.
-
- Returns:
- A tuple of integers (bid type, ~bid type, bid flags)
- """
- image_bid_tuple = GetBoardIdInfoTuple(image_bid)
- # GetBoardIdInfoTuple returns None if the image isn't board id locked.
- # Generate a Tuple of all 0s the rest of the function can use.
- if not image_bid_tuple:
- image_bid_tuple = (0, 0, 0)
-
- image_bid, image_mask, image_flags = image_bid_tuple
- if image_mask:
- new_brand = GetSymbolicBoardId(image_bid)
- else:
- new_brand = brand
- new_flags = image_flags or MP_BID_FLAGS
- bid_type = GetIntBoardId(new_brand)
- # If the board id type is erased, type_inv should also be unset.
- if bid_type == ERASED_BID_INT:
- return (ERASED_BID_INT, ERASED_BID_INT, new_flags)
- return bid_type, 0xffffffff & ~bid_type, new_flags
-
-
-def CheckChipBoardId(client, board_id, flags, board_id_inv=None):
- """Compare the given board_id and flags to the running board_id and flags
-
- Interpret board_id and flags how gsctool would interpret them, then compare
- those interpreted values to the running board_id and flags.
-
- Args:
- client: the object to run commands on
- board_id: a hex str, symbolic str, or int value for board_id
- board_id_inv: a hex str or int value of board_id_inv. Ignore
- board_id_inv if None. board_id_inv is ~board_id unless
- the board id is erased. In case both should be 0xffffffff.
- flags: the int value of flags or None
-
- Raises:
- TestFail if the new board id info does not match
- """
- # Read back the board id and flags
- new_board_id, new_board_id_inv, new_flags = GetChipBoardId(client)
-
- expected_board_id = GetIntBoardId(board_id)
- expected_flags = GetExpectedFlags(flags)
-
- if board_id_inv == None:
- new_board_id_inv_str = ''
- expected_board_id_inv_str = ''
- else:
- new_board_id_inv_str = '%08x:' % new_board_id_inv
- expected_board_id_inv = GetIntBoardId(board_id_inv)
- expected_board_id_inv_str = '%08x:' % expected_board_id_inv
-
- expected_str = '%08x:%s%08x' % (expected_board_id,
- expected_board_id_inv_str,
- expected_flags)
- new_str = '%08x:%s%08x' % (new_board_id, new_board_id_inv_str, new_flags)
-
- if new_str != expected_str:
- raise error.TestFail('Failed to set board id: expected %r got %r' %
- (expected_str, new_str))
-
-
-def SetChipBoardId(client, board_id, flags=None, pad=True):
- """Sets the board id and flags
-
- Args:
- client: the object to run commands on
- board_id: a string of the symbolic board id or board id hex value. If
- the string is less than 4 characters long it will be
- considered a symbolic value
- flags: a int flag value. If board_id is a symbolic value, then this will
- be ignored.
- pad: pad any int board id, so the string is not 4 characters long.
-
- Raises:
- TestFail if we were unable to set the flags to the correct value
- """
- if isinstance(board_id, int):
- # gsctool will interpret any 4 character string as a RLZ code. If pad is
- # true, pad the board id with 0s to make sure the board id isn't 4
- # characters long.
- board_id_arg = ('0x%08x' % board_id) if pad else hex(board_id)
- else:
- board_id_arg = board_id
- if flags != None:
- board_id_arg += ':' + hex(flags)
- # Set the board id using the given board id and flags
- result = GSCTool(client, ['-a', '-i', board_id_arg]).stdout.strip()
-
- CheckChipBoardId(client, board_id, flags)
-
-def DumpFlog(client):
- """Retrieve contents of the flash log"""
- return GSCTool(client, ['-a', '-L']).stdout.strip()
diff --git a/client/common_lib/cros/enrollment.py b/client/common_lib/cros/enrollment.py
deleted file mode 100644
index 1f8f82d..0000000
--- a/client/common_lib/cros/enrollment.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging
-
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib.cros import chrome
-
-
-def _ExecuteOobeCmd(browser, cmd):
- logging.info('Invoking ' + cmd)
- oobe = browser.oobe
- oobe.WaitForJavaScriptCondition('typeof Oobe !== \'undefined\'',
- timeout=10)
- oobe.ExecuteJavaScript(cmd)
-
-
-def RemoraEnrollment(browser, user_id, password):
- """Enterprise login for a Remora device.
-
- @param browser: telemetry browser object.
- @param user_id: login credentials user_id.
- @param password: login credentials password.
- """
- browser.oobe.NavigateGaiaLogin(
- user_id, password, enterprise_enroll=True,
- for_user_triggered_enrollment=False)
-
-
-def EnterpriseEnrollment(browser, user_id, password, auto_login=False):
- """Enterprise login for a kiosk device.
-
- @param browser: telemetry browser object.
- @param user_id: login credentials user_id.
- @param password: login credentials password.
- @param auto_login: also login after enrollment.
- """
- browser.oobe.NavigateGaiaLogin(user_id, password,
- enterprise_enroll=True,
- for_user_triggered_enrollment=True)
- if auto_login:
- browser.oobe.NavigateGaiaLogin(user_id, password)
- # TODO(achuith): Replace with WaitForLogin.
- utils.poll_for_condition(lambda: not browser.oobe_exists, timeout=30)
-
-
-def EnterpriseFakeEnrollment(browser, user_id, password, gaia_id,
- auto_login=False):
- """Enterprise fake login.
-
- @param browser: telemetry browser object.
- @param user_id: login credentials user_id.
- @param password: login credentials password.
- @param gaia_id: login credentials gaia_id.
- @param auto_login: also login after enrollment.
- """
- browser.oobe.NavigateFakeLogin(user_id, password, gaia_id,
- enterprise_enroll=True)
- # Oobe context recreated after after the enrollment.
- utils.poll_for_condition(
- lambda: browser.oobe_exists and
- browser.oobe.EnterpriseWebviewVisible(user_id), timeout=30)
-
- if auto_login:
- browser.oobe.NavigateFakeLogin(user_id, password, gaia_id)
- # TODO(achuith): Replace with WaitForLogin.
- utils.poll_for_condition(lambda: not browser.oobe_exists, timeout=45)
-
-
-def OnlineDemoMode(browser):
- """Switch to online demo mode.
-
- @param browser: telemetry browser object.
- """
- _ExecuteOobeCmd(browser, 'Oobe.setUpOnlineDemoModeForTesting();')
- utils.poll_for_condition(lambda: not browser.oobe_exists, timeout=90)
-
-
-def KioskEnrollment(browser, user_id, password, gaia_id):
- """Kiosk Enrollment.
-
- @param browser: telemetry browser object.
- @param user_id: login credentials user_id.
- @param password: login credentials password.
- @param gaia_id: login credentials gaia_id.
- """
-
- cmd = ('Oobe.loginForTesting("{user}", "{password}", "{gaia_id}", true)'
- .format(user=user_id,
- password=password,
- gaia_id=gaia_id))
- _ExecuteOobeCmd(browser, cmd)
-
- utils.poll_for_condition(lambda: not browser.oobe_exists, timeout=60)
diff --git a/client/common_lib/cros/g2f_utils.py b/client/common_lib/cros/g2f_utils.py
deleted file mode 100644
index 73fa8dd..0000000
--- a/client/common_lib/cros/g2f_utils.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Lint as: python2, python3
-# Copyright 2018 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging
-import time
-
-from autotest_lib.client.common_lib import error
-
-# USB ID for the virtual U2F HID Device.
-U2F_VID = '18D1'
-U2F_PID = '502C'
-
-QUERY_U2F_DEVICE_ATTEMPTS=5
-QUERY_U2F_RETRY_DELAY_SEC=1
-
-def ChromeOSLogin(client):
- """Logs in to ChromeOS, so that u2fd can start up."""
- client.run('/usr/local/autotest/bin/autologin.py')
-
-def ChromeOSLogout(client):
- """Logs out of ChromeOS, to return the device to a known state."""
- client.run('restart ui')
-
-def StartU2fd(client):
- """Starts u2fd on the client.
-
- @param client: client object to run commands on.
- """
- client.run('touch /var/lib/u2f/force/u2f.force')
- client.run('restart u2fd')
-
- path = '/sys/bus/hid/devices/*:%s:%s.*/hidraw' % (U2F_VID, U2F_PID)
- attempts = 0
- while attempts < QUERY_U2F_DEVICE_ATTEMPTS:
- attempts += 1
- try:
- return '/dev/' + client.run('ls ' + path).stdout.strip()
- except error.AutoservRunError as e:
- logging.info('Could not find U2F device on attempt ' +
- str(attempts))
- time.sleep(QUERY_U2F_RETRY_DELAY_SEC)
-
-def G2fRegister(client, dev, challenge, application, p1=0):
- """Returns a dictionary with TPM status.
-
- @param client: client object to run commands on.
- """
- return client.run('g2ftool --reg --dev=' + dev +
- ' --challenge=' + challenge +
- ' --application=' + application +
- ' --p1=' + str(p1),
- ignore_status=True)
-
-def G2fAuth(client, dev, challenge, application, key_handle, p1=0):
- """Returns a dictionary with TPM status.
-
- @param client: client object to run commands on.
- """
- return client.run('g2ftool --auth --dev=' + dev +
- ' --challenge=' + challenge +
- ' --application=' + application +
- ' --key_handle=' + key_handle +
- ' --p1=' + str(p1),
- ignore_status=True)
diff --git a/client/common_lib/cros/kiosk_utils.py b/client/common_lib/cros/kiosk_utils.py
deleted file mode 100644
index 94d8090..0000000
--- a/client/common_lib/cros/kiosk_utils.py
+++ /dev/null
@@ -1,141 +0,0 @@
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file
-
-import logging
-import time
-
-from telemetry.core import exceptions
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib.cros import chrome
-
-DEFAULT_TIMEOUT = 30
-SHORT_TIMEOUT = 5
-
-
-def get_webview_contexts(browser, ext_id):
- """Get all webview contexts for an extension.
-
- @param browser: Telemetry browser object.
- @param ext_id: Extension id of the kiosk app.
- @return A list of webview contexts.
- """
- ext_contexts = wait_for_kiosk_ext(browser, ext_id)
-
- for context in ext_contexts:
- context.WaitForDocumentReadyStateToBeInteractiveOrBetter()
- tagName = context.EvaluateJavaScript(
- "document.querySelector('webview') ? 'WEBVIEW' : 'NOWEBVIEW'")
- if tagName == "WEBVIEW":
- def _webview_context():
- try:
- return context.GetWebviewContexts()
- except (chrome.Error):
- logging.exception(
- 'An error occured while getting the webview contexts.')
- return None
-
- return utils.poll_for_condition(
- _webview_context,
- exception=error.TestFail('Webview not available.'),
- timeout=DEFAULT_TIMEOUT,
- sleep_interval=1)
-
-
-# TODO(dtosic): deprecate this method in favor of 'get_webview_contexts()'
-def get_webview_context(browser, ext_id):
- """Get context for CFM webview.
-
- @param browser: Telemetry browser object.
- @param ext_id: Extension id of the kiosk app.
- @return webview context.
- """
- ext_contexts = wait_for_kiosk_ext(browser, ext_id)
-
- for context in ext_contexts:
- context.WaitForDocumentReadyStateToBeInteractiveOrBetter()
- tagName = context.EvaluateJavaScript(
- "document.querySelector('webview') ? 'WEBVIEW' : 'NOWEBVIEW'")
- if tagName == "WEBVIEW":
- def _webview_context():
- try:
- wb_contexts = context.GetWebviewContexts()
- if len(wb_contexts) == 1:
- return wb_contexts[0]
- if len(wb_contexts) == 2:
- return wb_contexts[1]
-
- except (KeyError, chrome.Error):
- pass
- return None
- return utils.poll_for_condition(
- _webview_context,
- exception=error.TestFail('Webview not available.'),
- timeout=DEFAULT_TIMEOUT,
- sleep_interval=1)
-
-
-def wait_for_kiosk_ext(browser, ext_id):
- """Wait for kiosk extension launch.
-
- @param browser: Telemetry browser object.
- @param ext_id: Extension id of the kiosk app.
- @return extension contexts.
- """
- def _kiosk_ext_contexts():
- try:
- ext_contexts = browser.extensions.GetByExtensionId(ext_id)
- if len(ext_contexts) > 1:
- return ext_contexts
- except (AttributeError, KeyError, chrome.Error):
- pass
- return []
- return utils.poll_for_condition(
- _kiosk_ext_contexts,
- exception=error.TestFail('Kiosk app failed to launch'),
- timeout=DEFAULT_TIMEOUT,
- sleep_interval=1)
-
-
-def config_riseplayer(browser, ext_id, app_config_id):
- """
- Configure Rise Player app with a specific display id.
-
- Step through the configuration screen of the Rise Player app
- which is launched within the browser and enter a display id
- within the configuration frame to initiate media display.
-
- @param browser: browser instance containing the Rise Player kiosk app.
- @param ext_id: extension id of the Rise Player Kiosk App.
- @param app_config_id: display id for the Rise Player app .
-
- """
- if not app_config_id:
- raise error.TestFail(
- 'Error in configuring Rise Player: app_config_id is None')
- config_js = """
- var frameId = 'btn btn-primary display-register-button'
- document.getElementsByClassName(frameId)[0].click();
- $( "input:text" ).val("%s");
- document.getElementsByClassName(frameId)[4].click();
- """ % app_config_id
-
- kiosk_webview_context = get_webview_context(
- browser, ext_id)
- # Wait for the configuration frame to load.
- time.sleep(SHORT_TIMEOUT)
- kiosk_webview_context.ExecuteJavaScript(config_js)
- # TODO (krishnargv): Find a way to verify that content is playing
- # within the RisePlayer app.
- verify_app_config_id = """
- /rvashow.*.display&id=%s.*/.test(location.href)
- """ % app_config_id
- #Verify that Risepplayer successfully validates the display id.
- try:
- kiosk_webview_context.WaitForJavaScriptCondition(
- verify_app_config_id,
- timeout=DEFAULT_TIMEOUT)
- except exceptions.TimeoutException:
- raise error.TestFail('Error in configuring Rise Player with id: %s'
- % app_config_id)
diff --git a/client/common_lib/cros/memory_eater.py b/client/common_lib/cros/memory_eater.py
deleted file mode 100644
index c87b925..0000000
--- a/client/common_lib/cros/memory_eater.py
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2018 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging
-import subprocess
-import time
-import threading
-
-from autotest_lib.client.bin import utils
-
-class MemoryEater(object):
- """A util class which run programs to consume memory in the background.
-
- Sample usage:
- with MemoryEator() as memory_eater:
- # Allocate mlocked memory.
- memory_eater.consume_locked_memory(123)
-
- # Allocate memory and sequentially traverse them over and over.
- memory_eater.consume_active_memory(500)
-
- When it goes out of the "with" context or the object is destructed, all
- allocated memory are released.
- """
-
- memory_eater_locked = 'memory-eater-locked'
- memory_eater = 'memory-eater'
-
- _all_instances = []
-
- def __init__(self):
- self._locked_consumers = []
- self._active_consumers_lock = threading.Lock()
- self._active_consumers = []
- self._all_instances.append(self)
-
- def __enter__(self):
- return self
-
- @staticmethod
- def cleanup_consumers(consumers):
- """Kill all processes in |consumers|
-
- @param consumers: The list of consumers to clean.
- """
- while len(consumers):
- job = consumers.pop()
- logging.info('Killing %d', job.pid)
- job.kill()
-
- def cleanup(self):
- """Releases all allocated memory."""
- # Kill all hanging jobs.
- logging.info('Cleaning hanging memory consuming processes...')
- self.cleanup_consumers(self._locked_consumers)
- with self._active_consumers_lock:
- self.cleanup_consumers(self._active_consumers)
-
- def __exit__(self, type, value, traceback):
- self.cleanup()
-
- def __del__(self):
- self.cleanup()
- if self in self._all_instances:
- self._all_instances.remove(self)
-
- def consume_locked_memory(self, mb):
- """Consume non-swappable memory."""
- logging.info('Consuming locked memory %d MB', mb)
- cmd = [self.memory_eater_locked, str(mb)]
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- self._locked_consumers.append(p)
- # Wait until memory allocation is done.
- while True:
- line = p.stdout.readline()
- if line.find('Done') != -1:
- break
-
- def consume_active_memory(self, mb):
- """Consume active memory."""
- logging.info('Consuming active memory %d MB', mb)
- cmd = [self.memory_eater, '--size', str(mb), '--chunk', '128']
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- with self._active_consumers_lock:
- self._active_consumers.append(p)
-
- @classmethod
- def get_active_consumer_pids(cls):
- """Gets pid of active consumers by all instances of the class."""
- all_pids = []
- for instance in cls._all_instances:
- with instance._active_consumers_lock:
- all_pids.extend([p.pid for p in instance._active_consumers])
- return all_pids
-
-
-def consume_free_memory(memory_to_reserve_mb):
- """Consumes free memory until |memory_to_reserve_mb| is remained.
-
- Non-swappable memory is allocated to consume memory.
- memory_to_reserve_mb: Consume memory until this amount of free memory
- is remained.
- @return The MemoryEater() object on which memory is allocated. One can
- catch it in a context manager.
- """
- consumer = MemoryEater()
- while True:
- mem_free_mb = utils.read_from_meminfo('MemFree') / 1024
- logging.info('Current Free Memory %d', mem_free_mb)
- if mem_free_mb <= memory_to_reserve_mb:
- break
- memory_to_consume = min(
- 2047, mem_free_mb - memory_to_reserve_mb + 1)
- logging.info('Consuming %d MB locked memory', memory_to_consume)
- consumer.consume_locked_memory(memory_to_consume)
- return consumer
-
-
-class TimeoutException(Exception):
- """Exception to return if timeout happens."""
- def __init__(self, message):
- super(TimeoutException, self).__init__(message)
-
-
-class _Timer(object):
- """A simple timer class to check timeout."""
- def __init__(self, timeout, des):
- """Initializer.
-
- @param timeout: Timeout in seconds.
- @param des: A short description for this timer.
- """
- self.timeout = timeout
- self.des = des
- if self.timeout:
- self.start_time = time.time()
-
- def check_timeout(self):
- """Raise TimeoutException if timeout happens."""
- if not self.timeout:
- return
- time_delta = time.time() - self.start_time
- if time_delta > self.timeout:
- err_message = '%s timeout after %s seconds' % (self.des, time_delta)
- logging.warning(err_message)
- raise TimeoutException(err_message)
-
-
-def run_single_memory_pressure(
- starting_mb, step_mb, end_condition, duration, cool_down, timeout=None):
- """Runs a single memory consumer to produce memory pressure.
-
- Keep adding memory pressure. In each round, it runs a memory consumer
- and waits for a while before checking whether to end the process. If not,
- kill current memory consumer and allocate more memory pressure in the next
- round.
- @param starting_mb: The amount of memory to start with.
- @param step_mb: If |end_condition| is not met, allocate |step_mb| more
- memory in the next round.
- @param end_condition: A boolean function returns whether to end the process.
- @param duration: Time (in seconds) to wait between running a memory
- consumer and checking |end_condition|.
- @param cool_down: Time (in seconds) to wait between each round.
- @param timeout: Seconds to stop the function is |end_condition| is not met.
- @return The size of memory allocated in the last round.
- @raise TimeoutException if timeout.
- """
- current_mb = starting_mb
- timer = _Timer(timeout, 'run_single_memory_pressure')
- while True:
- timer.check_timeout()
- with MemoryEater() as consumer:
- consumer.consume_active_memory(current_mb)
- time.sleep(duration)
- if end_condition():
- return current_mb
- current_mb += step_mb
- time.sleep(cool_down)
-
-
-def run_multi_memory_pressure(size_mb, end_condition, duration, timeout=None):
- """Runs concurrent memory consumers to produce memory pressure.
-
- In each round, it runs a new memory consumer until a certain condition is
- met.
- @param size_mb: The amount of memory each memory consumer allocates.
- @param end_condition: A boolean function returns whether to end the process.
- @param duration: Time (in seconds) to wait between running a memory
- consumer and checking |end_condition|.
- @param timeout: Seconds to stop the function is |end_condition| is not met.
- @return Total allocated memory.
- @raise TimeoutException if timeout.
- """
- total_mb = 0
- timer = _Timer(timeout, 'run_multi_memory_pressure')
- with MemoryEater() as consumer:
- while True:
- timer.check_timeout()
- consumer.consume_active_memory(size_mb)
- time.sleep(duration)
- if end_condition():
- return total_mb
- total_mb += size_mb
diff --git a/client/common_lib/cros/network/ap_constants.py b/client/common_lib/cros/network/ap_constants.py
deleted file mode 100644
index 0a4945a..0000000
--- a/client/common_lib/cros/network/ap_constants.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (c) 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# These constants are used by the chaos_runner to raise TestError based on
-# failure
-AP_CONFIG_FAIL = 'AP configuration failed'
-AP_PDU_DOWN = 'PDU is down'
-AP_SECURITY_MISMATCH = 'AP security mismatch'
-AP_SSID_NOTFOUND = 'SSID was not found'
-WORK_CLI_CONNECT_FAIL = 'Work client was not able to connect to the AP'
-
-# These constants are used by the AP configurator to indicate the type of
-# configuration failure or success.
-CONFIG_SUCCESS = 0
-PDU_FAIL = 1
-CONFIG_FAIL = 2
-
-# These constants are used by the AP configurator to determine if this is
-# a chaos vs clique test.
-AP_TEST_TYPE_CHAOS = 1
-AP_TEST_TYPE_CLIQUE = 2
-AP_TEST_TYPE_CASEY5 = 3
-AP_TEST_TYPE_CASEY7 = 4
-
-# This constant is used by the chaos_runner to determine maximum APs/SSIDs
-# that are up in the lab.
-MAX_SSID_COUNT = 10
-MAX_SCAN_TIMEOUT = 30
-
-# Isolation chamber lab prefixes used to determine where the test is running
-CASEY5 = 'chromeos5'
-CASEY7 = 'chromeos7'
diff --git a/client/common_lib/cros/network/apmanager_constants.py b/client/common_lib/cros/network/apmanager_constants.py
deleted file mode 100644
index f4a0b24..0000000
--- a/client/common_lib/cros/network/apmanager_constants.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# These constants are configuration keys for configuring AP service through
-# apmanager's DBus interface. These are names of DBus properties on
-# org.chromium.apmanager.Config DBus interface.
-CONFIG_BRIDGE_INTERFACE = 'BridgeInterface'
-CONFIG_CHANNEL = 'Channel'
-CONFIG_HIDDEN_NETWORK = 'HiddenNetwork'
-CONFIG_HW_MODE = 'HwMode'
-CONFIG_INTERFACE_NAME = 'InterfaceName'
-CONFIG_OPERATION_MODE = 'OperationMode'
-CONFIG_PASSPHRASE = 'Passphrase'
-CONFIG_SECURITY_MODE = 'SecurityMode'
-CONFIG_SERVER_ADDRESS_INDEX = 'ServerAddressIndex'
-CONFIG_SSID = 'Ssid'
-
-# Configuration value definitions
-OPERATION_MODE_BRIDGE = 'bridge'
-OPERATION_MODE_SERVER = 'server'
-
-# Default configuration values.
-DEFAULT_CHANNEL_NUMBER = 6
diff --git a/client/common_lib/cros/network/chrome_net_constants.py b/client/common_lib/cros/network/chrome_net_constants.py
deleted file mode 100644
index 0a58b69..0000000
--- a/client/common_lib/cros/network/chrome_net_constants.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright 2014 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# These constants are used by the Chrome end-to-end tests.
-OPEN_CONNECT = 'OpenNetworkConnect'
-OPEN_ROAM = 'OpenNetworkRoam'
-SHORT_TIMEOUT = 10
-LONG_TIMEOUT = 30
diff --git a/client/common_lib/cros/network/interface.py b/client/common_lib/cros/network/interface.py
deleted file mode 100644
index f6f6dfc..0000000
--- a/client/common_lib/cros/network/interface.py
+++ /dev/null
@@ -1,647 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import collections
-import logging
-import os
-import re
-from six.moves import map
-from six.moves import range
-
-from autotest_lib.client.bin import local_host
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib.cros.network import netblock
-
-# A tuple consisting of a readable part number (one of NAME_* below)
-# and a kernel module that provides the driver for this part (e.g. ath9k).
-DeviceDescription = collections.namedtuple('DeviceDescription',
- ['name', 'kernel_module'])
-
-
-# A tuple describing a default route, consisting of an interface name,
-# gateway IP address, and the metric value visible in the routing table.
-DefaultRoute = collections.namedtuple('DefaultRoute', ['interface_name',
- 'gateway',
- 'metric'])
-
-NAME_MARVELL_88W8797_SDIO = 'Marvell 88W8797 SDIO'
-NAME_MARVELL_88W8887_SDIO = 'Marvell 88W8887 SDIO'
-NAME_MARVELL_88W8897_SDIO = 'Marvell 88W8897 SDIO'
-NAME_MARVELL_88W8897_PCIE = 'Marvell 88W8897 PCIE'
-NAME_MARVELL_88W8997_PCIE = 'Marvell 88W8997 PCIE'
-NAME_ATHEROS_AR9280 = 'Atheros AR9280'
-NAME_ATHEROS_AR9382 = 'Atheros AR9382'
-NAME_ATHEROS_AR9462 = 'Atheros AR9462'
-NAME_QUALCOMM_ATHEROS_QCA6174 = 'Qualcomm Atheros QCA6174'
-NAME_QUALCOMM_ATHEROS_QCA6174_SDIO = 'Qualcomm Atheros QCA6174 SDIO'
-NAME_QUALCOMM_WCN3990 = 'Qualcomm WCN3990'
-NAME_INTEL_7260 = 'Intel 7260'
-NAME_INTEL_7265 = 'Intel 7265'
-NAME_INTEL_9000 = 'Intel 9000'
-NAME_INTEL_9260 = 'Intel 9260'
-NAME_INTEL_22260 = 'Intel 22260'
-NAME_INTEL_22560 = 'Intel 22560'
-NAME_BROADCOM_BCM4354_SDIO = 'Broadcom BCM4354 SDIO'
-NAME_BROADCOM_BCM4356_PCIE = 'Broadcom BCM4356 PCIE'
-NAME_BROADCOM_BCM4371_PCIE = 'Broadcom BCM4371 PCIE'
-NAME_REALTEK_8822C_PCIE = 'Realtek 8822C PCIE'
-NAME_UNKNOWN = 'Unknown WiFi Device'
-
-DEVICE_INFO_ROOT = '/sys/class/net'
-
-DeviceInfo = collections.namedtuple('DeviceInfo', ['vendor', 'device',
- 'subsystem',
- 'compatible'])
-# Provide default values for parameters.
-DeviceInfo.__new__.__defaults__ = (None, None, None, None)
-
-DEVICE_NAME_LOOKUP = {
- DeviceInfo('0x02df', '0x9129'): NAME_MARVELL_88W8797_SDIO,
- DeviceInfo('0x02df', '0x912d'): NAME_MARVELL_88W8897_SDIO,
- DeviceInfo('0x02df', '0x9135'): NAME_MARVELL_88W8887_SDIO,
- DeviceInfo('0x11ab', '0x2b38'): NAME_MARVELL_88W8897_PCIE,
- DeviceInfo('0x1b4b', '0x2b42'): NAME_MARVELL_88W8997_PCIE,
- DeviceInfo('0x168c', '0x002a'): NAME_ATHEROS_AR9280,
- DeviceInfo('0x168c', '0x0030'): NAME_ATHEROS_AR9382,
- DeviceInfo('0x168c', '0x0034'): NAME_ATHEROS_AR9462,
- DeviceInfo('0x168c', '0x003e'): NAME_QUALCOMM_ATHEROS_QCA6174,
- DeviceInfo('0x105b', '0xe09d'): NAME_QUALCOMM_ATHEROS_QCA6174,
- DeviceInfo('0x0271', '0x050a'): NAME_QUALCOMM_ATHEROS_QCA6174_SDIO,
- DeviceInfo('0x8086', '0x08b1'): NAME_INTEL_7260,
- DeviceInfo('0x8086', '0x08b2'): NAME_INTEL_7260,
- DeviceInfo('0x8086', '0x095a'): NAME_INTEL_7265,
- DeviceInfo('0x8086', '0x095b'): NAME_INTEL_7265,
- # Note that Intel 9000 is also Intel 9560 aka Jefferson Peak 2
- DeviceInfo('0x8086', '0x9df0'): NAME_INTEL_9000,
- DeviceInfo('0x8086', '0x31dc'): NAME_INTEL_9000,
- DeviceInfo('0x8086', '0x2526'): NAME_INTEL_9260,
- DeviceInfo('0x8086', '0x2723'): NAME_INTEL_22260,
- # For integrated wifi chips, use device_id and subsystem_id together
- # as an identifier.
- # 0x02f0 is for Quasar on CML, 0x4070 and 0x0074 is for HrP2
- DeviceInfo('0x8086', '0x02f0', subsystem='0x4070'): NAME_INTEL_22560,
- DeviceInfo('0x8086', '0x02f0', subsystem='0x0074'): NAME_INTEL_22560,
- DeviceInfo('0x8086', '0x4df0', subsystem='0x0074'): NAME_INTEL_22560,
- # With the same Quasar, subsystem_id 0x0034 is JfP2
- DeviceInfo('0x8086', '0x02f0', subsystem='0x0034'): NAME_INTEL_9000,
- DeviceInfo('0x02d0', '0x4354'): NAME_BROADCOM_BCM4354_SDIO,
- DeviceInfo('0x14e4', '0x43ec'): NAME_BROADCOM_BCM4356_PCIE,
- DeviceInfo('0x14e4', '0x440d'): NAME_BROADCOM_BCM4371_PCIE,
- DeviceInfo('0x10ec', '0xc822'): NAME_REALTEK_8822C_PCIE,
-
- DeviceInfo(compatible='qcom,wcn3990-wifi'): NAME_QUALCOMM_WCN3990,
-}
-
-class Interface:
- """Interace is a class that contains the queriable address properties
- of an network device.
- """
- ADDRESS_TYPE_MAC = 'link/ether'
- ADDRESS_TYPE_IPV4 = 'inet'
- ADDRESS_TYPE_IPV6 = 'inet6'
- ADDRESS_TYPES = [ ADDRESS_TYPE_MAC, ADDRESS_TYPE_IPV4, ADDRESS_TYPE_IPV6 ]
-
-
- @staticmethod
- def get_connected_ethernet_interface(ignore_failures=False):
- """Get an interface object representing a connected ethernet device.
-
- Raises an exception if no such interface exists.
-
- @param ignore_failures bool function will return None instead of raising
- an exception on failures.
- @return an Interface object except under the conditions described above.
-
- """
- # Assume that ethernet devices are called ethX until proven otherwise.
- for device_name in ['eth%d' % i for i in range(5)]:
- ethernet_if = Interface(device_name)
- if ethernet_if.exists and ethernet_if.ipv4_address:
- return ethernet_if
-
- else:
- if ignore_failures:
- return None
-
- raise error.TestFail('Failed to find ethernet interface.')
-
-
- def __init__(self, name, host=None):
- self._name = name
- if host is None:
- self.host = local_host.LocalHost()
- else:
- self.host = host
- self._run = self.host.run
-
-
- @property
- def name(self):
- """@return name of the interface (e.g. 'wlan0')."""
- return self._name
-
-
- @property
- def addresses(self):
- """@return the addresses (MAC, IP) associated with interface."""
- # "ip addr show %s 2> /dev/null" returns something that looks like:
- #
- # 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast
- # link/ether ac:16:2d:07:51:0f brd ff:ff:ff:ff:ff:ff
- # inet 172.22.73.124/22 brd 172.22.75.255 scope global eth0
- # inet6 2620:0:1000:1b02:ae16:2dff:fe07:510f/64 scope global dynamic
- # valid_lft 2591982sec preferred_lft 604782sec
- # inet6 fe80::ae16:2dff:fe07:510f/64 scope link
- # valid_lft forever preferred_lft forever
- #
- # We extract the second column from any entry for which the first
- # column is an address type we are interested in. For example,
- # for "inet 172.22.73.124/22 ...", we will capture "172.22.73.124/22".
- result = self._run('ip addr show %s 2> /dev/null' % self._name,
- ignore_status=True)
- address_info = result.stdout
- if result.exit_status != 0:
- # The "ip" command will return non-zero if the interface does
- # not exist.
- return {}
-
- addresses = {}
- for address_line in address_info.splitlines():
- address_parts = address_line.lstrip().split()
- if len(address_parts) < 2:
- continue
- address_type, address_value = address_parts[:2]
- if address_type in self.ADDRESS_TYPES:
- if address_type not in addresses:
- addresses[address_type] = []
- addresses[address_type].append(address_value)
- return addresses
-
-
- @property
- def device_path(self):
- """@return the sysfs path of the interface device"""
- # This assumes that our path separator is the same as the remote host.
- device_path = os.path.join(DEVICE_INFO_ROOT, self._name, 'device')
- if not self.host.path_exists(device_path):
- logging.error('No device information found at %s', device_path)
- return None
-
- return device_path
-
-
- @property
- def wiphy_name(self):
- """
- @return name of the wiphy (e.g., 'phy0'), if available.
- Otherwise None.
- """
- readlink_result = self._run('readlink "%s"' %
- os.path.join(DEVICE_INFO_ROOT, self._name, 'phy80211'),
- ignore_status=True)
- if readlink_result.exit_status != 0:
- return None
-
- return os.path.basename(readlink_result.stdout.strip())
-
-
- @property
- def module_name(self):
- """@return Name of kernel module in use by this interface."""
- module_readlink_result = self._run('readlink "%s"' %
- os.path.join(self.device_path, 'driver', 'module'),
- ignore_status=True)
- if module_readlink_result.exit_status != 0:
- return None
-
- return os.path.basename(module_readlink_result.stdout.strip())
-
- @property
- def parent_device_name(self):
- """
- @return Name of device at which wiphy device is present. For example,
- for a wifi NIC present on a PCI bus, this would be the same as
- PCI_SLOT_PATH. """
- path_readlink_result = self._run('readlink "%s"' % self.device_path)
- if path_readlink_result.exit_status != 0:
- return None
-
- return os.path.basename(path_readlink_result.stdout.strip())
-
- def _get_wifi_device_name(self):
- """Helper for device_description()."""
- device_path = self.device_path
- if not device_path:
- return None
-
- read_file = (lambda path: self._run('cat "%s"' % path).stdout.rstrip()
- if self.host.path_exists(path) else None)
-
- # Try to identify using either vendor/product ID, or using device tree
- # "OF_COMPATIBLE_x".
- vendor_id = read_file(os.path.join(device_path, 'vendor'))
- product_id = read_file(os.path.join(device_path, 'device'))
- subsystem_id = read_file(os.path.join(device_path, 'subsystem_device'))
- uevent = read_file(os.path.join(device_path, 'uevent'))
-
- # Device tree "compatible".
- for line in uevent.splitlines():
- key, _, value = line.partition('=')
- if re.match('^OF_COMPATIBLE_[0-9]+$', key):
- info = DeviceInfo(compatible=value)
- if info in DEVICE_NAME_LOOKUP:
- return DEVICE_NAME_LOOKUP[info]
-
- # {Vendor, Product, Subsystem} ID.
- if subsystem_id is not None:
- info = DeviceInfo(vendor_id, product_id, subsystem=subsystem_id)
- if info in DEVICE_NAME_LOOKUP:
- return DEVICE_NAME_LOOKUP[info]
-
-
- # {Vendor, Product} ID.
- info = DeviceInfo(vendor_id, product_id)
- if info in DEVICE_NAME_LOOKUP:
- return DEVICE_NAME_LOOKUP[info]
-
- return None
-
- @property
- def device_description(self):
- """@return DeviceDescription object for a WiFi interface, or None."""
- if not self.is_wifi_device():
- logging.error('Device description not supported on non-wifi '
- 'interface: %s.', self._name)
- return None
-
- device_name = self._get_wifi_device_name()
- if not device_name:
- device_name = NAME_UNKNOWN
- logging.error('Device is unknown.')
- else:
- logging.debug('Device is %s', device_name)
-
- module_name = self.module_name
- kernel_release = self._run('uname -r').stdout.strip()
- net_drivers_path = '/lib/modules/%s/kernel/drivers/net' % kernel_release
- if module_name is not None and self.host.path_exists(net_drivers_path):
- module_path = self._run('find %s -name %s.ko -printf %%P' % (
- net_drivers_path, module_name)).stdout
- else:
- module_path = 'Unknown (kernel might have modules disabled)'
- return DeviceDescription(device_name, module_path)
-
-
- @property
- def exists(self):
- """@return True if this interface exists, False otherwise."""
- # No valid interface has no addresses at all.
- return bool(self.addresses)
-
-
-
- def get_ip_flags(self):
- """@return List of flags from 'ip addr show'."""
- # "ip addr show %s 2> /dev/null" returns something that looks like:
- #
- # 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast
- # link/ether ac:16:2d:07:51:0f brd ff:ff:ff:ff:ff:ff
- # inet 172.22.73.124/22 brd 172.22.75.255 scope global eth0
- # inet6 2620:0:1000:1b02:ae16:2dff:fe07:510f/64 scope global dynamic
- # valid_lft 2591982sec preferred_lft 604782sec
- # inet6 fe80::ae16:2dff:fe07:510f/64 scope link
- # valid_lft forever preferred_lft forever
- #
- # We only cares about the flags in the first line.
- result = self._run('ip addr show %s 2> /dev/null' % self._name,
- ignore_status=True)
- address_info = result.stdout
- if result.exit_status != 0:
- # The "ip" command will return non-zero if the interface does
- # not exist.
- return []
- status_line = address_info.splitlines()[0]
- flags_str = status_line[status_line.find('<')+1:status_line.find('>')]
- return flags_str.split(',')
-
-
- @property
- def is_up(self):
- """@return True if this interface is UP, False otherwise."""
- return 'UP' in self.get_ip_flags()
-
-
- @property
- def is_lower_up(self):
- """
- Check if the interface is in LOWER_UP state. This usually means (e.g.,
- for ethernet) a link is detected.
-
- @return True if this interface is LOWER_UP, False otherwise."""
- return 'LOWER_UP' in self.get_ip_flags()
-
-
- def is_link_operational(self):
- """@return True if RFC 2683 IfOperStatus is UP (i.e., is able to pass
- packets).
- """
- command = 'ip link show %s' % self._name
- result = self._run(command, ignore_status=True)
- if result.exit_status:
- return False
- return result.stdout.find('state UP') >= 0
-
-
- @property
- def mac_address(self):
- """@return the (first) MAC address, e.g., "00:11:22:33:44:55"."""
- return self.addresses.get(self.ADDRESS_TYPE_MAC, [None])[0]
-
-
- @property
- def ipv4_address_and_prefix(self):
- """@return the IPv4 address/prefix, e.g., "192.186.0.1/24"."""
- return self.addresses.get(self.ADDRESS_TYPE_IPV4, [None])[0]
-
-
- @property
- def ipv4_address(self):
- """@return the (first) IPv4 address, e.g., "192.168.0.1"."""
- netblock_addr = self.netblock
- return netblock_addr.addr if netblock_addr else None
-
-
- @property
- def ipv4_prefix(self):
- """@return the IPv4 address prefix e.g., 24."""
- addr = self.netblock
- return addr.prefix_len if addr else None
-
-
- @property
- def ipv4_subnet(self):
- """@return string subnet of IPv4 address (e.g. '192.168.0.0')"""
- addr = self.netblock
- return addr.subnet if addr else None
-
-
- @property
- def ipv4_subnet_mask(self):
- """@return the IPv4 subnet mask e.g., "255.255.255.0"."""
- addr = self.netblock
- return addr.netmask if addr else None
-
-
- def is_wifi_device(self):
- """@return True if iw thinks this is a wifi device."""
- if self._run('iw dev %s info' % self._name,
- ignore_status=True).exit_status:
- logging.debug('%s does not seem to be a wireless device.',
- self._name)
- return False
- return True
-
-
- @property
- def netblock(self):
- """Return Netblock object for this interface's IPv4 address.
-
- @return Netblock object (or None if no IPv4 address found).
-
- """
- netblock_str = self.ipv4_address_and_prefix
- return netblock.from_addr(netblock_str) if netblock_str else None
-
-
- @property
- def signal_level(self):
- """Get the signal level for an interface.
-
- This is currently only defined for WiFi interfaces.
-
- localhost test # iw dev mlan0 link
- Connected to 04:f0:21:03:7d:b2 (on mlan0)
- SSID: Perf_slvf0_ch36
- freq: 5180
- RX: 699407596 bytes (8165441 packets)
- TX: 58632580 bytes (9923989 packets)
- signal: -54 dBm
- tx bitrate: 130.0 MBit/s MCS 15
-
- bss flags:
- dtim period: 2
- beacon int: 100
-
- @return signal level in dBm (a negative, integral number).
-
- """
- if not self.is_wifi_device():
- return None
-
- result_lines = self._run('iw dev %s link' %
- self._name).stdout.splitlines()
- signal_pattern = re.compile('signal:\s+([-0-9]+)\s+dbm')
- for line in result_lines:
- cleaned = line.strip().lower()
- match = re.search(signal_pattern, cleaned)
- if match is not None:
- return int(match.group(1))
-
- logging.error('Failed to find signal level for %s.', self._name)
- return None
-
-
- @property
- def signal_level_all_chains(self):
- """Get the signal level for each chain of an interface.
-
- This is only defined for WiFi interfaces.
-
- localhost test # iw wlan0 station dump
- Station 44:48:c1:af:d7:31 (on wlan0)
- inactive time: 13180 ms
- rx bytes: 46886
- rx packets: 459
- tx bytes: 103159
- tx packets: 745
- tx retries: 17
- tx failed: 0
- beacon loss: 0
- beacon rx: 128
- rx drop misc: 2
- signal: -52 [-52, -53] dBm
- signal avg: 56 dBm
- beacon signal avg: -49 dBm
- tx bitrate: 400.0 MBit/s VHT-MCS 9 40MHz short GI VHT-NSS 2
- rx bitrate: 400.0 MBit/s VHT-MCS 9 40MHz short GI VHT-NSS 2
- authorized: yes
- authenticated: yes
- associated: yes
- preamble: long
- WMM/WME: yes
- MFP: no
- TDLS peer: no
- DTIM period: 1
- beacon interval:100
- short slot time:yes
- connected time: 6874 seconds
-
- @return array of signal level information for each antenna in dBm
- (an array of negative, integral numbers e.g. [-67, -60]) or None if
- chain specific data is not provided by the device.
-
- """
- if not self.is_wifi_device():
- return None
-
- result_lines = self._run('iw %s station dump' %
- self._name).stdout.splitlines()
- signal_pattern = re.compile('signal:\s+([-0-9]+)\[')
- for line in result_lines:
- cleaned = line.strip().replace(' ', '').lower()
- match = re.search(signal_pattern, cleaned)
- if match is not None:
- signal_levels = cleaned[cleaned.find('[') + 1 :
- cleaned.find(']')].split(',')
- return list(map(int, signal_levels))
- return None
-
-
- @property
- def mtu(self):
- """@return the interface configured maximum transmission unit (MTU)."""
- # "ip addr show %s 2> /dev/null" returns something that looks like:
- #
- # 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast
- # link/ether ac:16:2d:07:51:0f brd ff:ff:ff:ff:ff:ff
- # inet 172.22.73.124/22 brd 172.22.75.255 scope global eth0
- # inet6 2620:0:1000:1b02:ae16:2dff:fe07:510f/64 scope global dynamic
- # valid_lft 2591982sec preferred_lft 604782sec
- # inet6 fe80::ae16:2dff:fe07:510f/64 scope link
- # valid_lft forever preferred_lft forever
- #
- # We extract the 'mtu' value (in this example "1500")
- try:
- result = self._run('ip addr show %s 2> /dev/null' % self._name)
- address_info = result.stdout
- except error.CmdError as e:
- # The "ip" command will return non-zero if the interface does
- # not exist.
- return None
-
- match = re.search('mtu\s+(\d+)', address_info)
- if not match:
- raise error.TestFail('MTU information is not available.')
- return int(match.group(1))
-
-
- def noise_level(self, frequency_mhz):
- """Get the noise level for an interface at a given frequency.
-
- This is currently only defined for WiFi interfaces.
-
- This only works on some devices because 'iw survey dump' (the method
- used to get the noise) only works on some devices. On other devices,
- this method returns None.
-
- @param frequency_mhz: frequency at which the noise level should be
- measured and reported.
- @return noise level in dBm (a negative, integral number) or None.
-
- """
- if not self.is_wifi_device():
- return None
-
- # This code has to find the frequency and then find the noise
- # associated with that frequency because 'iw survey dump' output looks
- # like this:
- #
- # localhost test # iw dev mlan0 survey dump
- # ...
- # Survey data from mlan0
- # frequency: 5805 MHz
- # noise: -91 dBm
- # channel active time: 124 ms
- # channel busy time: 1 ms
- # channel receive time: 1 ms
- # channel transmit time: 0 ms
- # Survey data from mlan0
- # frequency: 5825 MHz
- # ...
-
- result_lines = self._run('iw dev %s survey dump' %
- self._name).stdout.splitlines()
- my_frequency_pattern = re.compile('frequency:\s*%d mhz' %
- frequency_mhz)
- any_frequency_pattern = re.compile('frequency:\s*\d{4} mhz')
- inside_desired_frequency_block = False
- noise_pattern = re.compile('noise:\s*([-0-9]+)\s+dbm')
- for line in result_lines:
- cleaned = line.strip().lower()
- if my_frequency_pattern.match(cleaned):
- inside_desired_frequency_block = True
- elif inside_desired_frequency_block:
- match = noise_pattern.match(cleaned)
- if match is not None:
- return int(match.group(1))
- if any_frequency_pattern.match(cleaned):
- inside_desired_frequency_block = False
-
- logging.error('Failed to find noise level for %s at %d MHz.',
- self._name, frequency_mhz)
- return None
-
-
-def get_interfaces():
- """
- Retrieve the list of network interfaces found on the system.
-
- @return List of interfaces.
-
- """
- return [Interface(nic.strip()) for nic in os.listdir(DEVICE_INFO_ROOT)]
-
-
-def get_prioritized_default_route(host=None, interface_name_regex=None):
- """
- Query a local or remote host for its prioritized default interface
- and route.
-
- @param interface_name_regex string regex to filter routes by interface.
- @return DefaultRoute tuple, or None if no default routes are found.
-
- """
- # Build a list of default routes, filtered by interface if requested.
- # Example command output: 'default via 172.23.188.254 dev eth0 metric 2'
- run = host.run if host is not None else utils.run
- output = run('ip route show').stdout
- output_regex_str = 'default\s+via\s+(\S+)\s+dev\s+(\S+)\s+metric\s+(\d+)'
- output_regex = re.compile(output_regex_str)
- defaults = []
- for item in output.splitlines():
- if 'default' not in item:
- continue
- match = output_regex.match(item.strip())
- if match is None:
- raise error.TestFail('Unexpected route output: %s' % item)
- gateway = match.group(1)
- interface_name = match.group(2)
- metric = int(match.group(3))
- if interface_name_regex is not None:
- if re.match(interface_name_regex, interface_name) is None:
- continue
- defaults.append(DefaultRoute(interface_name=interface_name,
- gateway=gateway, metric=metric))
- if not defaults:
- return None
-
- # Sort and return the route with the lowest metric value.
- defaults.sort(key=lambda x: x.metric)
- return defaults[0]
diff --git a/client/common_lib/cros/network/iw_event_logger.py b/client/common_lib/cros/network/iw_event_logger.py
deleted file mode 100644
index eb3870d..0000000
--- a/client/common_lib/cros/network/iw_event_logger.py
+++ /dev/null
@@ -1,187 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import collections
-import logging
-import os
-import re
-import six
-
-IW_REMOTE_EVENT_LOG_FILE_NAME = 'iw_event.log'
-
-LogEntry = collections.namedtuple('LogEntry', ['timestamp',
- 'interface',
- 'message'])
-
-class IwEventLogger(object):
- """Context enclosing the use of iw event logger."""
- def __init__(self, host, command_iw, local_file):
- self._host = host
- self._command_iw = command_iw
- self._iw_event_log_path = os.path.join(self._host.get_tmp_dir(),
- IW_REMOTE_EVENT_LOG_FILE_NAME)
- self._local_file = local_file
- self._pid = None
- self._start_time = 0
-
-
- def __enter__(self):
- return self
-
-
- def __exit__(self, exception, value, traceback):
- self.stop()
-
-
- def _check_message_for_disconnect(self, message):
- """Check log message for disconnect event.
-
- This function checks log messages for signs the connection was
- terminated.
-
- @param: message String message to check for disconnect event.
-
- @returns True if the log message is a disconnect event, false otherwise.
-
- """
- return (message.startswith('disconnected') or
- message.startswith('Deauthenticated') or
- message == 'Previous authentication no longer valid')
-
-
- @property
- def local_file(self):
- """@return string local host path for log file."""
- return self._local_file
-
-
- def start(self):
- """Start event logger.
-
- This function will start iw event process in remote host, and redirect
- output to a temporary file in remote host.
-
- """
- command = 'nohup %s event -t </dev/null >%s 2>&1 & echo $!' % (
- self._command_iw, self._iw_event_log_path)
- command += ';date +%s'
- out_lines = self._host.run(command).stdout.splitlines()
- self._pid = int(out_lines[0])
- self._start_time = float(out_lines[1])
-
-
- def stop(self):
- """Stop event logger.
-
- This function will kill iw event process, and copy the log file from
- remote to local.
-
- """
- if self._pid is None:
- return
- # Kill iw event process
- self._host.run('kill %d' % self._pid, ignore_status=True)
- self._pid = None
- # Copy iw event log file from remote host
- self._host.get_file(self._iw_event_log_path, self._local_file)
- logging.info('iw event log saved to %s', self._local_file)
-
-
- def get_log_entries(self):
- """Parse local log file and yield LogEntry named tuples.
-
- This function will parse the iw event log and return individual
- LogEntry tuples for each parsed line.
- Here are example of lines to be parsed:
- 1393961008.058711: wlan0 (phy #0): scan started
- 1393961019.758599: wlan0 (phy #0): connected to 04:f0:21:03:7d:bd
-
- @yields LogEntry tuples for each log entry.
-
- """
- iw_log = self._host.run('cat %s' % self._iw_event_log_path).stdout
- iw_log_file = six.StringIO(iw_log)
- for line in iw_log_file.readlines():
- parse_line = re.match('\s*(\d+).(\d+): (\w[^:]*): (\w.*)', line)
- if parse_line:
- time_integer = parse_line.group(1)
- time_decimal = parse_line.group(2)
- timestamp = float('%s.%s' % (time_integer, time_decimal))
- yield LogEntry(timestamp=timestamp,
- interface=parse_line.group(3),
- message=parse_line.group(4))
-
-
- def get_reassociation_time(self):
- """Return reassociation time.
-
- This function will search the iw event log to determine the time it
- takes from start of reassociation request to being connected. Start of
- reassociation request could be either an attempt to scan or to
- disconnect. Assume the one that appeared in the log first is the start
- of the reassociation request.
-
- @returns float number of seconds it take from start of reassociation
- request to being connected. Return None if unable to determine
- the time based on the log.
-
- """
- start_time = None
- end_time = None
- # Figure out the time when reassociation process started and the time
- # when client is connected.
- for entry in self.get_log_entries():
- if (entry.message.startswith('scan started') and
- start_time is None):
- start_time = entry.timestamp
- # Newer wpa_supplicant would attempt to disconnect then reconnect
- # without scanning. So if no scan event is detected before the
- # disconnect attempt, we'll assume the disconnect attempt is the
- # beginning of the reassociate attempt.
- if (self._check_message_for_disconnect(entry.message) and
- start_time is None):
- start_time = entry.timestamp
- if entry.message.startswith('connected'):
- if start_time is None:
- return None
- end_time = entry.timestamp
- break;
- else:
- return None
- return end_time - start_time
-
-
- def get_disconnect_count(self):
- """Return number of times the system disconnected during the log.
-
- This function will search the iw event log to determine how many
- times the "disconnect" and "Deauthenticated" messages appear.
-
- @returns int number of times the system disconnected in the logs.
-
- """
- count = 0
- for entry in self.get_log_entries():
- if self._check_message_for_disconnect(entry.message):
- count += 1
- return count
-
-
- def get_time_to_disconnected(self):
- """Return disconnect time.
-
- This function will search the iw event log to determine the number of
- seconds between the time iw event logger is started to the time the
- first "disconnected" or "Deauthenticated" event is received.
-
- @return float number of seconds between the time iw event logger is
- started to the time "disconnected" or "Deauthenticated" event
- is received. Return None if no "disconnected" or
- "Deauthenticated" event is detected in the iw event log.
- """
- for entry in self.get_log_entries():
- if self._check_message_for_disconnect(entry.message):
- return entry.timestamp - self._start_time
- return None
diff --git a/client/common_lib/cros/network/iw_runner.py b/client/common_lib/cros/network/iw_runner.py
deleted file mode 100644
index a83b77c..0000000
--- a/client/common_lib/cros/network/iw_runner.py
+++ /dev/null
@@ -1,1146 +0,0 @@
-# Lint as: python2, python3
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import collections
-import logging
-import operator
-import re
-from six.moves import map
-from six.moves import range
-
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib import utils
-from autotest_lib.client.common_lib.cros.network import iw_event_logger
-
-# These must mirror the values in 'iw list' output.
-CHAN_FLAG_DISABLED = 'disabled'
-CHAN_FLAG_NO_IR = 'no IR'
-CHAN_FLAG_PASSIVE_SCAN = 'passive scan'
-CHAN_FLAG_RADAR_DETECT = 'radar detection'
-DEV_MODE_AP = 'AP'
-DEV_MODE_IBSS = 'IBSS'
-DEV_MODE_MONITOR = 'monitor'
-DEV_MODE_MESH_POINT = 'mesh point'
-DEV_MODE_STATION = 'managed'
-SUPPORTED_DEV_MODES = (DEV_MODE_AP, DEV_MODE_IBSS, DEV_MODE_MONITOR,
- DEV_MODE_MESH_POINT, DEV_MODE_STATION)
-
-class _PrintableWidth:
- """Printable width constant objects used by packet_capturer."""
- def __init__(self, name):
- self._name = name
-
- def __repr__(self):
- return '\'%s\'' % self._name
-
- def __str__(self):
- return self._name
-
-WIDTH_HT20 = _PrintableWidth('HT20')
-WIDTH_HT40_PLUS = _PrintableWidth('HT40+')
-WIDTH_HT40_MINUS = _PrintableWidth('HT40-')
-WIDTH_VHT80 = _PrintableWidth('VHT80')
-WIDTH_VHT160 = _PrintableWidth('VHT160')
-WIDTH_VHT80_80 = _PrintableWidth('VHT80+80')
-
-VHT160_CENTER_CHANNELS = ('50','114')
-
-SECURITY_OPEN = 'open'
-SECURITY_WEP = 'wep'
-SECURITY_WPA = 'wpa'
-SECURITY_WPA2 = 'wpa2'
-# Mixed mode security is WPA2/WPA
-SECURITY_MIXED = 'mixed'
-
-# Table of lookups between the output of item 'secondary channel offset:' from
-# iw <device> scan to constants.
-
-HT_TABLE = {'no secondary': WIDTH_HT20,
- 'above': WIDTH_HT40_PLUS,
- 'below': WIDTH_HT40_MINUS}
-
-IwBand = collections.namedtuple(
- 'Band', ['num', 'frequencies', 'frequency_flags', 'mcs_indices'])
-IwBss = collections.namedtuple('IwBss', ['bss', 'frequency', 'ssid', 'security',
- 'width', 'signal'])
-IwNetDev = collections.namedtuple('IwNetDev', ['phy', 'if_name', 'if_type'])
-IwTimedScan = collections.namedtuple('IwTimedScan', ['time', 'bss_list'])
-
-# The fields for IwPhy are as follows:
-# name: string name of the phy, such as "phy0"
-# bands: list of IwBand objects.
-# modes: List of strings containing interface modes supported, such as "AP".
-# commands: List of strings containing nl80211 commands supported, such as
-# "authenticate".
-# features: List of strings containing nl80211 features supported, such as
-# "T-DLS".
-# max_scan_ssids: Maximum number of SSIDs which can be scanned at once.
-IwPhy = collections.namedtuple(
- 'Phy', ['name', 'bands', 'modes', 'commands', 'features',
- 'max_scan_ssids', 'avail_tx_antennas', 'avail_rx_antennas',
- 'supports_setting_antenna_mask', 'support_vht'])
-
-DEFAULT_COMMAND_IW = 'iw'
-
-# Redirect stderr to stdout on Cros since adb commands cannot distinguish them
-# on Brillo.
-IW_TIME_COMMAND_FORMAT = '(time -p %s) 2>&1'
-IW_TIME_COMMAND_OUTPUT_START = 'real'
-
-IW_LINK_KEY_BEACON_INTERVAL = 'beacon int'
-IW_LINK_KEY_DTIM_PERIOD = 'dtim period'
-IW_LINK_KEY_FREQUENCY = 'freq'
-IW_LINK_KEY_SIGNAL = 'signal'
-IW_LINK_KEY_RX_BITRATE = 'rx bitrate'
-IW_LINK_KEY_RX_DROPS = 'rx drop misc'
-IW_LINK_KEY_RX_PACKETS = 'rx packets'
-IW_LINK_KEY_TX_BITRATE = 'tx bitrate'
-IW_LINK_KEY_TX_FAILURES = 'tx failed'
-IW_LINK_KEY_TX_PACKETS = 'tx packets'
-IW_LINK_KEY_TX_RETRIES = 'tx retries'
-IW_LOCAL_EVENT_LOG_FILE = './debug/iw_event_%d.log'
-
-# Strings from iw/util.c describing supported HE features
-HE_MAC_PLUS_HTC_HE = '+HTC HE Supported'
-HE_MAC_TWT_REQUESTER = 'TWT Requester'
-HE_MAC_TWT_RESPONDER = 'TWT Responder'
-HE_MAC_DYNAMIC_BA_FRAGMENTATION = 'Dynamic BA Fragementation Level'
-HE_MAC_MAX_MSDUS = 'Maximum number of MSDUS Fragments'
-HE_MAC_MIN_PAYLOAD_128 = 'Minimum Payload size of 128 bytes'
-HE_MAC_TRIGGER_FRAME_PADDING = 'Trigger Frame MAC Padding Duration'
-HE_MAC_MULTI_TID_AGGREGATION = 'Multi-TID Aggregation Support'
-HE_MAC_ALL_ACK = 'All Ack'
-HE_MAC_TRS = 'TRS'
-HE_MAC_BSR = 'BSR'
-HE_MAC_TWT_BROADCAST = 'Broadcast TWT'
-HE_MAC_32_BIT_BA_BITMAP = '32-bit BA Bitmap'
-HE_MAC_MU_CASCADING = 'MU Cascading'
-HE_MAC_ACK_AGGREGATION = 'Ack-Enabled Aggregation'
-HE_MAC_OM_CONTROL = 'OM Control'
-HE_MAC_OFDMA_RA = 'OFDMA RA'
-HE_MAC_MAX_AMPDU_LENGTH_EXPONENT = 'Maximum A-MPDU Length Exponent'
-HE_MAC_AMSDU_FRAGMENTATION = 'A-MSDU Fragmentation'
-HE_MAC_FLEXIBLE_TWT = 'Flexible TWT Scheduling'
-HE_MAC_RX_CONTROL_FRAME_TO_MULTIBSS = 'RX Control Frame to MultiBSS'
-HE_MAC_BSRP_BQRP_AMPDU_AGGREGATION = 'BSRP BQRP A-MPDU Aggregation'
-HE_MAC_QTP = 'QTP'
-HE_MAC_BQR = 'BQR'
-HE_MAC_SRP_RESPONDER_ROLE = 'SRP Responder Role'
-HE_MAC_NDP_FEEDBACK_REPORT = 'NDP Feedback Report'
-HE_MAC_OPS = 'OPS'
-HE_MAC_AMSDU_IN_AMPDU = 'A-MSDU in A-MPDU'
-HE_MAC_MULTI_TID_AGGREGATION_TX = 'Multi-TID Aggregation TX'
-HE_MAC_SUBCHANNEL_SELECTIVE = 'HE Subchannel Selective Transmission'
-HE_MAC_UL_2X966_TONE_RU = 'UL 2x996-Tone RU'
-HE_MAC_OM_CONTROL_DISABLE_RX = 'OM Control UL MU Data Disable RX'
-
-HE_PHY_24HE40 = 'HE40/2.4GHz'
-HE_PHY_5HE40_80 = 'HE40/HE80/5GHz'
-HE_PHY_5HE160 = 'HE160/5GHz'
-HE_PHY_5HE160_80_80 = 'HE160/HE80+80/5GHz'
-HE_PHY_242_TONE_RU_24 = '242 tone RUs/2.4GHz'
-HE_PHY_242_TONE_RU_5 = '242 tone RUs/5GHz'
-HE_PHY_PUNCTURED_PREAMBLE_RX = 'Punctured Preamble RX'
-HE_PHY_DEVICE_CLASS = 'Device Class'
-HE_PHY_LDPC_CODING_IN_PAYLOAD = 'LDPC Coding in Payload'
-HE_PHY_HE_SU_PPDU_1X_HE_LTF_08_GI = 'HE SU PPDU with 1x HE-LTF and 0.8us GI'
-HE_PHY_HE_MIDAMBLE_RX_MAX_NSTS = 'Midamble Rx Max NSTS'
-HE_PHY_NDP_4X_HE_LTF_32_GI = 'NDP with 4x HE-LTF and 3.2us GI'
-HE_PHY_STBC_TX_LEQ_80 = 'STBC Tx <= 80MHz'
-HE_PHY_STBC_RX_LEQ_80 = 'STBC Rx <= 80MHz'
-HE_PHY_DOPPLER_TX = 'Doppler Tx'
-HE_PHY_DOPPLER_RX = 'Doppler Rx'
-HE_PHY_FULL_BAND_UL_MU_MIMO = 'Full Bandwidth UL MU-MIMO'
-HE_PHY_PART_BAND_UL_MU_MIMO = 'Partial Bandwidth UL MU-MIMO'
-HE_PHY_DCM_MAX_CONSTELLATION = 'DCM Max Constellation'
-HE_PHY_DCM_MAX_NSS_TX = 'DCM Max NSS Tx'
-HE_PHY_DCM_MAX_CONSTELLATION_RX = 'DCM Max Constellation Rx'
-HE_PHY_DCM_MAX_NSS_RX = 'DCM Max NSS Rx'
-HE_PHY_RX_MU_PPDU_FROM_NON_AP = 'Rx HE MU PPDU from Non-AP STA'
-HE_PHY_SU_BEAMFORMER = 'SU Beamformer'
-HE_PHY_SU_BEAMFORMEE = 'SU Beamformee'
-HE_PHY_MU_BEAMFORMER = 'MU Beamformer'
-HE_PHY_BEAMFORMEE_STS_LEQ_80 = 'Beamformee STS <= 80Mhz'
-HE_PHY_BEAMFORMEE_STS_GT_80 = 'Beamformee STS > 80Mhz'
-HE_PHY_SOUNDING_DIMENSIONS_LEQ_80 = 'Sounding Dimensions <= 80Mhz'
-HE_PHY_SOUNDING_DIMENSIONS_GT_80 = 'Sounding Dimensions > 80Mhz'
-HE_PHY_NG_EQ_16_SU_FB = 'Ng = 16 SU Feedback'
-HE_PHY_NG_EQ_16_MU_FB = 'Ng = 16 MU Feedback'
-HE_PHY_CODEBOOK_SIZE_SU_FB = 'Codebook Size SU Feedback'
-HE_PHY_CODEBOOK_SIZE_MU_FB = 'Codebook Size MU Feedback'
-HE_PHY_TRIGGERED_SU_BEAMFORMING_FB = 'Triggered SU Beamforming Feedback'
-HE_PHY_TRIGGERED_MU_BEAMFORMING_FB = 'Triggered MU Beamforming Feedback'
-HE_PHY_TRIGGERED_CQI_FB = 'Triggered CQI Feedback'
-HE_PHY_PART_BAND_EXT_RANGE = 'Partial Bandwidth Extended Range'
-HE_PHY_PART_BAND_DL_MU_MIMO = 'Partial Bandwidth DL MU-MIMO'
-HE_PHY_PPE_THRESHOLD = 'PPE Threshold Present'
-HE_PHY_SRP_SR = 'SRP-based SR'
-HE_PHY_POWER_BOOST_FACTOR_AR = 'Power Boost Factor ar'
-HE_PHY_SU_PPDU_4X_HE_LTF_08_GI = 'HE SU PPDU & HE PPDU 4x HE-LTF 0.8us GI'
-HE_PHY_MAX_NC = 'Max NC'
-HE_PHY_STBC_TX_GT_80 = 'STBC Tx > 80MHz'
-HE_PHY_STBC_RX_GT_80 = 'STBC Rx > 80MHz'
-HE_PHY_ER_SU_PPDU_4X_HE_LTF_08_GI = 'HE ER SU PPDU 4x HE-LTF 0.8us GI'
-HE_PHY_20_IN_44_PPDU_24 = '20MHz in 40MHz HE PPDU 2.4GHz'
-HE_PHY_20_IN_160_80_80 = '20MHz in 160/80+80MHz HE PPDU'
-HE_PHY_80_IN_160_80_80 = '80MHz in 160/80+80MHz HE PPDU'
-HE_PHY_ER_SU_PPDU_1X_HE_LTF_08_GI = 'HE ER SU PPDU 1x HE-LTF 0.8us GI'
-HE_PHY_MIDAMBLE_RX_2X_AND_1X_HE_LTF = 'Midamble Rx 2x & 1x HE-LTF'
-HE_PHY_DCM_MAX_BW = 'DCM Max BW'
-HE_PHY_LONGER_THAN_16HE_OFDM_SYM = 'Longer Than 16HE SIG-B OFDM Symbols'
-HE_PHY_NON_TRIGGERED_CQI_FB = 'Non-Triggered CQI Feedback'
-HE_PHY_TX_1024_QAM = 'TX 1024-QAM'
-HE_PHY_RX_1024_QAM = 'RX 1024-QAM'
-HE_PHY_RX_FULL_BW_SU_USING_MU_COMPRESSION_SIGB = \
- 'RX Full BW SU Using HE MU PPDU with Compression SIGB'
-HE_PHY_RX_FULL_BW_SU_USING_MU_NON_COMPRESSION_SIGB = \
- 'RX Full BW SU Using HE MU PPDU with Non-Compression SIGB'
-
-
-def _get_all_link_keys(link_information):
- """Parses link or station dump output for link key value pairs.
-
- Link or station dump information is in the format below:
-
- Connected to 74:e5:43:10:4f:c0 (on wlan0)
- SSID: PMKSACaching_4m9p5_ch1
- freq: 5220
- RX: 5370 bytes (37 packets)
- TX: 3604 bytes (15 packets)
- signal: -59 dBm
- tx bitrate: 13.0 MBit/s MCS 1
-
- bss flags: short-slot-time
- dtim period: 5
- beacon int: 100
-
- @param link_information: string containing the raw link or station dump
- information as reported by iw. Note that this parsing assumes a single
- entry, in the case of multiple entries (e.g. listing stations from an
- AP, or listing mesh peers), the entries must be split on a per
- peer/client basis before this parsing operation.
- @return a dictionary containing all the link key/value pairs.
-
- """
- link_key_value_pairs = {}
- keyval_regex = re.compile(r'^\s+(.*):\s+(.*)$')
- for link_key in link_information.splitlines()[1:]:
- match = re.search(keyval_regex, link_key)
- if match:
- # Station dumps can contain blank lines.
- link_key_value_pairs[match.group(1)] = match.group(2)
- return link_key_value_pairs
-
-
-def _extract_bssid(link_information, interface_name, station_dump=False):
- """Get the BSSID that |interface_name| is associated with.
-
- See doc for _get_all_link_keys() for expected format of the station or link
- information entry.
-
- @param link_information: string containing the raw link or station dump
- information as reported by iw. Note that this parsing assumes a single
- entry, in the case of multiple entries (e.g. listing stations from an AP
- or listing mesh peers), the entries must be split on a per peer/client
- basis before this parsing operation.
- @param interface_name: string name of interface (e.g. 'wlan0').
- @param station_dump: boolean indicator of whether the link information is
- from a 'station dump' query. If False, it is assumed the string is from
- a 'link' query.
- @return string bssid of the current association, or None if no matching
- association information is found.
-
- """
- # We're looking for a line like this when parsing the output of a 'link'
- # query:
- # Connected to 04:f0:21:03:7d:bb (on wlan0)
- # We're looking for a line like this when parsing the output of a
- # 'station dump' query:
- # Station 04:f0:21:03:7d:bb (on mesh-5000mhz)
- identifier = 'Station' if station_dump else 'Connected to'
- search_re = r'%s ([0-9a-fA-F:]{17}) \(on %s\)' % (identifier,
- interface_name)
- match = re.match(search_re, link_information)
- if match is None:
- return None
- return match.group(1)
-
-
-class IwRunner(object):
- """Defines an interface to the 'iw' command."""
-
-
- def __init__(self, remote_host=None, command_iw=DEFAULT_COMMAND_IW):
- self._run = utils.run
- self._host = remote_host
- if remote_host:
- self._run = remote_host.run
- self._command_iw = command_iw
- self._log_id = 0
-
-
- def _parse_scan_results(self, output):
- """Parse the output of the 'scan' and 'scan dump' commands.
-
- Here is an example of what a single network would look like for
- the input parameter. Some fields have been removed in this example:
- BSS 00:11:22:33:44:55(on wlan0)
- freq: 2447
- beacon interval: 100 TUs
- signal: -46.00 dBm
- Information elements from Probe Response frame:
- SSID: my_open_network
- Extended supported rates: 24.0 36.0 48.0 54.0
- HT capabilities:
- Capabilities: 0x0c
- HT20
- HT operation:
- * primary channel: 8
- * secondary channel offset: no secondary
- * STA channel width: 20 MHz
- RSN: * Version: 1
- * Group cipher: CCMP
- * Pairwise ciphers: CCMP
- * Authentication suites: PSK
- * Capabilities: 1-PTKSA-RC 1-GTKSA-RC (0x0000)
-
- @param output: string command output.
-
- @returns a list of IwBss namedtuples; None if the scan fails
-
- """
- bss = None
- frequency = None
- ssid = None
- ht = None
- vht = None
- signal = None
- security = None
- supported_securities = []
- bss_list = []
- # TODO(crbug.com/1032892): The parsing logic here wasn't really designed
- # for the presence of multiple information elements like HT, VHT, and
- # (eventually) HE. We should eventually update it to check that we are
- # in the right section (e.g., verify the '* channel width' match is a
- # match in the VHT section and not a different section). Also, we should
- # probably add in VHT20, and VHT40 whenever we finish this bug.
- for line in output.splitlines():
- line = line.strip()
- bss_match = re.match('BSS ([0-9a-f:]+)', line)
- if bss_match:
- if bss != None:
- security = self.determine_security(supported_securities)
- iwbss = IwBss(bss, frequency, ssid, security,
- vht if vht else ht, signal)
- bss_list.append(iwbss)
- bss = frequency = ssid = security = ht = vht = None
- supported_securities = []
- bss = bss_match.group(1)
- if line.startswith('freq:'):
- frequency = int(line.split()[1])
- if line.startswith('signal:'):
- signal = float(line.split()[1])
- if line.startswith('SSID: '):
- _, ssid = line.split(': ', 1)
- if line.startswith('* secondary channel offset'):
- ht = HT_TABLE[line.split(':')[1].strip()]
- # Checking for the VHT channel width based on IEEE 802.11-2016
- # Table 9-252.
- if line.startswith('* channel width:'):
- chan_width_subfield = line.split(':')[1].strip()[0]
- if chan_width_subfield == '1':
- vht = WIDTH_VHT80
- # 2 and 3 are deprecated but are included here for older APs.
- if chan_width_subfield == '2':
- vht = WIDTH_VHT160
- if chan_width_subfield == '3':
- vht = WIDTH_VHT80_80
- if line.startswith('* center freq segment 2:'):
- center_chan_two = line.split(':')[1].strip()
- if vht == WIDTH_VHT80:
- if center_chan_two in VHT160_CENTER_CHANNELS:
- vht = WIDTH_VHT160
- elif center_chan_two != '0':
- vht = WIDTH_VHT80_80
- if line.startswith('WPA'):
- supported_securities.append(SECURITY_WPA)
- if line.startswith('RSN'):
- supported_securities.append(SECURITY_WPA2)
- security = self.determine_security(supported_securities)
- bss_list.append(IwBss(bss, frequency, ssid, security,
- vht if vht else ht, signal))
- return bss_list
-
-
- def _parse_scan_time(self, output):
- """
- Parse the scan time in seconds from the output of the 'time -p "scan"'
- command.
-
- 'time -p' Command output format is below:
- real 0.01
- user 0.01
- sys 0.00
-
- @param output: string command output.
-
- @returns float time in seconds.
-
- """
- output_lines = output.splitlines()
- for line_num, line in enumerate(output_lines):
- line = line.strip()
- if (line.startswith(IW_TIME_COMMAND_OUTPUT_START) and
- output_lines[line_num + 1].startswith('user') and
- output_lines[line_num + 2].startswith('sys')):
- return float(line.split()[1])
- raise error.TestFail('Could not parse scan time.')
-
-
- def add_interface(self, phy, interface, interface_type):
- """
- Add an interface to a WiFi PHY.
-
- @param phy: string name of PHY to add an interface to.
- @param interface: string name of interface to add.
- @param interface_type: string type of interface to add (e.g. 'monitor').
-
- """
- self._run('%s phy %s interface add %s type %s' %
- (self._command_iw, phy, interface, interface_type))
-
-
- def disconnect_station(self, interface):
- """
- Disconnect a STA from a network.
-
- @param interface: string name of interface to disconnect.
-
- """
- self._run('%s dev %s disconnect' % (self._command_iw, interface))
-
-
- def get_current_bssid(self, interface_name):
- """Get the BSSID that |interface_name| is associated with.
-
- @param interface_name: string name of interface (e.g. 'wlan0').
- @return string bssid of our current association, or None.
-
- """
- result = self._run('%s dev %s link' %
- (self._command_iw, interface_name),
- ignore_status=True)
- if result.exit_status:
- # See comment in get_link_value.
- return None
-
- return _extract_bssid(result.stdout, interface_name)
-
-
- def get_interface(self, interface_name):
- """Get full information about an interface given an interface name.
-
- @param interface_name: string name of interface (e.g. 'wlan0').
- @return IwNetDev tuple.
-
- """
- matching_interfaces = [iw_if for iw_if in self.list_interfaces()
- if iw_if.if_name == interface_name]
- if len(matching_interfaces) != 1:
- raise error.TestFail('Could not find interface named %s' %
- interface_name)
-
- return matching_interfaces[0]
-
-
- def get_link_value(self, interface, iw_link_key):
- """Get the value of a link property for |interface|.
-
- Checks the link using iw, and parses the result to return a link key.
-
- @param iw_link_key: string one of IW_LINK_KEY_* defined above.
- @param interface: string desired value of iw link property.
- @return string containing the corresponding link property value, None
- if there was a parsing error or the iw command failed.
-
- """
- result = self._run('%s dev %s link' % (self._command_iw, interface),
- ignore_status=True)
- if result.exit_status:
- # When roaming, there is a period of time for mac80211 based drivers
- # when the driver is 'associated' with an SSID but not a particular
- # BSS. This causes iw to return an error code (-2) when attempting
- # to retrieve information specific to the BSS. This does not happen
- # in mwifiex drivers.
- return None
- actual_value = _get_all_link_keys(result.stdout).get(iw_link_key)
- if actual_value is not None:
- logging.info('Found iw link key %s with value %s.',
- iw_link_key, actual_value)
- return actual_value
-
-
- def get_station_dump(self, interface):
- """Gets information about connected peers.
-
- Returns information about the currently connected peers. When the host
- is in station mode, it returns a single entry, with information about
- the link to the AP it is currently connected to. If the host is in mesh
- or AP mode, it can return multiple entries, one for each connected
- station, or mesh peer.
-
- @param interface: string name of interface to get peer information
- from.
- @return a list of dictionaries with link information about each
- connected peer (ordered by peer mac address).
-
- """
- result = self._run('%s dev %s station dump' %
- (self._command_iw, interface))
- parts = re.split(r'^Station ', result.stdout, flags=re.MULTILINE)[1:]
- peer_list_raw = ['Station ' + x for x in parts]
- parsed_peer_info = []
-
- for peer in peer_list_raw:
- peer_link_keys = _get_all_link_keys(peer)
- rssi_str = peer_link_keys.get(IW_LINK_KEY_SIGNAL, '0')
- rssi_int = int(rssi_str.split()[0])
-
- tx_bitrate = peer_link_keys.get(IW_LINK_KEY_TX_BITRATE, '0')
- tx_failures = int(peer_link_keys.get(IW_LINK_KEY_TX_FAILURES, 0))
- tx_packets = int(peer_link_keys.get(IW_LINK_KEY_TX_PACKETS, 0))
- tx_retries = int(peer_link_keys.get(IW_LINK_KEY_TX_RETRIES, 0))
-
- rx_bitrate = peer_link_keys.get(IW_LINK_KEY_RX_BITRATE, '0')
- rx_drops = int(peer_link_keys.get(IW_LINK_KEY_RX_DROPS, 0))
- rx_packets = int(peer_link_keys.get(IW_LINK_KEY_RX_PACKETS, 0))
-
- mac = _extract_bssid(link_information=peer,
- interface_name=interface,
- station_dump=True)
-
- # If any of these are missing, they will be None
- peer_info = {'rssi_int': rssi_int,
- 'rssi_str': rssi_str,
- 'tx_bitrate': tx_bitrate,
- 'tx_failures': tx_failures,
- 'tx_packets': tx_packets,
- 'tx_retries': tx_retries,
- 'rx_bitrate': rx_bitrate,
- 'rx_drops': rx_drops,
- 'rx_packets': rx_packets,
- 'mac': mac}
-
- # don't evaluate if tx_packets 0
- if tx_packets:
- peer_info['tx_retry_rate'] = tx_retries / float(tx_packets)
- peer_info['tx_failure_rate'] = tx_failures / float(tx_packets)
-
- # don't evaluate if rx_packets is 0
- if rx_packets:
- peer_info['rx_drop_rate'] = rx_drops / float(rx_packets)
-
- parsed_peer_info.append(peer_info)
- return sorted(parsed_peer_info, key=operator.itemgetter('mac'))
-
-
- def get_operating_mode(self, interface):
- """Gets the operating mode for |interface|.
-
- @param interface: string name of interface to get peer information
- about.
-
- @return string one of DEV_MODE_* defined above, or None if no mode is
- found, or if an unsupported mode is found.
-
- """
- ret = self._run('%s dev %s info' % (self._command_iw, interface))
- mode_regex = r'^\s*type (.*)$'
- match = re.search(mode_regex, ret.stdout, re.MULTILINE)
- if match:
- operating_mode = match.group(1)
- if operating_mode in SUPPORTED_DEV_MODES:
- return operating_mode
- logging.warning(
- 'Unsupported operating mode %s found for interface: %s. '
- 'Supported modes: %s', operating_mode, interface,
- SUPPORTED_DEV_MODES)
- return None
-
-
- def get_radio_config(self, interface):
- """Gets the channel information of a specfic interface using iw.
-
- @param interface: string name of interface to get radio information
- from.
-
- @return dictionary containing the channel information.
-
- """
- channel_config = {}
- ret = self._run('%s dev %s info' % (self._command_iw, interface))
- channel_config_regex = (r'^\s*channel ([0-9]+) \(([0-9]+) MHz\), '
- 'width: ([2,4,8]0) MHz, center1: ([0-9]+) MHz')
- match = re.search(channel_config_regex, ret.stdout, re.MULTILINE)
-
- if match:
- channel_config['number'] = int(match.group(1))
- channel_config['freq'] = int(match.group(2))
- channel_config['width'] = int(match.group(3))
- channel_config['center1_freq'] = int(match.group(4))
-
- return channel_config
-
-
- def ibss_join(self, interface, ssid, frequency):
- """
- Join a WiFi interface to an IBSS.
-
- @param interface: string name of interface to join to the IBSS.
- @param ssid: string SSID of IBSS to join.
- @param frequency: int frequency of IBSS in Mhz.
-
- """
- self._run('%s dev %s ibss join %s %d' %
- (self._command_iw, interface, ssid, frequency))
-
-
- def ibss_leave(self, interface):
- """
- Leave an IBSS.
-
- @param interface: string name of interface to remove from the IBSS.
-
- """
- self._run('%s dev %s ibss leave' % (self._command_iw, interface))
-
-
- def list_interfaces(self, desired_if_type=None):
- """List WiFi related interfaces on this system.
-
- @param desired_if_type: string type of interface to filter
- our returned list of interfaces for (e.g. 'managed').
-
- @return list of IwNetDev tuples.
-
- """
-
- # Parse output in the following format:
- #
- # $ adb shell iw dev
- # phy#0
- # Unnamed/non-netdev interface
- # wdev 0x2
- # addr aa:bb:cc:dd:ee:ff
- # type P2P-device
- # Interface wlan0
- # ifindex 4
- # wdev 0x1
- # addr aa:bb:cc:dd:ee:ff
- # ssid Whatever
- # type managed
-
- output = self._run('%s dev' % self._command_iw).stdout
- interfaces = []
- phy = None
- if_name = None
- if_type = None
- for line in output.splitlines():
- m = re.match('phy#([0-9]+)', line)
- if m:
- phy = 'phy%d' % int(m.group(1))
- if_name = None
- if_type = None
- continue
- if not phy:
- continue
- m = re.match('[\s]*Interface (.*)', line)
- if m:
- if_name = m.group(1)
- continue
- if not if_name:
- continue
- # Common values for type are 'managed', 'monitor', and 'IBSS'.
- m = re.match('[\s]*type ([a-zA-Z]+)', line)
- if m:
- if_type = m.group(1)
- interfaces.append(IwNetDev(phy=phy, if_name=if_name,
- if_type=if_type))
- # One phy may have many interfaces, so don't reset it.
- if_name = None
-
- if desired_if_type:
- interfaces = [interface for interface in interfaces
- if interface.if_type == desired_if_type]
- return interfaces
-
-
- def list_phys(self):
- """
- List WiFi PHYs on the given host.
-
- @return list of IwPhy tuples.
-
- """
- output = self._run('%s list' % self._command_iw).stdout
-
- pending_phy_name = None
- current_band = None
- current_section = None
- all_phys = []
-
- def add_pending_phy():
- """Add the pending phy into |all_phys|."""
- bands = tuple(IwBand(band.num,
- tuple(band.frequencies),
- dict(band.frequency_flags),
- tuple(band.mcs_indices))
- for band in pending_phy_bands)
- new_phy = IwPhy(pending_phy_name,
- bands,
- tuple(pending_phy_modes),
- tuple(pending_phy_commands),
- tuple(pending_phy_features),
- pending_phy_max_scan_ssids,
- pending_phy_tx_antennas,
- pending_phy_rx_antennas,
- pending_phy_tx_antennas and pending_phy_rx_antennas,
- pending_phy_support_vht)
- all_phys.append(new_phy)
-
- for line in output.splitlines():
- match_phy = re.search('Wiphy (.*)', line)
- if match_phy:
- if pending_phy_name:
- add_pending_phy()
- pending_phy_name = match_phy.group(1)
- pending_phy_bands = []
- pending_phy_modes = []
- pending_phy_commands = []
- pending_phy_features = []
- pending_phy_max_scan_ssids = None
- pending_phy_tx_antennas = 0
- pending_phy_rx_antennas = 0
- pending_phy_support_vht = False
- continue
-
- match_section = re.match('\s*(\w.*):\s*$', line)
- if match_section:
- current_section = match_section.group(1)
- match_band = re.match('Band (\d+)', current_section)
- if match_band:
- current_band = IwBand(num=int(match_band.group(1)),
- frequencies=[],
- frequency_flags={},
- mcs_indices=[])
- pending_phy_bands.append(current_band)
- continue
-
- # Check for max_scan_ssids. This isn't a section, but it
- # also isn't within a section.
- match_max_scan_ssids = re.match('\s*max # scan SSIDs: (\d+)',
- line)
- if match_max_scan_ssids and pending_phy_name:
- pending_phy_max_scan_ssids = int(
- match_max_scan_ssids.group(1))
- continue
-
- if (current_section == 'Supported interface modes' and
- pending_phy_name):
- mode_match = re.search('\* (\w+)', line)
- if mode_match:
- pending_phy_modes.append(mode_match.group(1))
- continue
-
- if current_section == 'Supported commands' and pending_phy_name:
- command_match = re.search('\* (\w+)', line)
- if command_match:
- pending_phy_commands.append(command_match.group(1))
- continue
-
- if (current_section is not None and
- current_section.startswith('VHT Capabilities') and
- pending_phy_name):
- pending_phy_support_vht = True
- continue
-
- match_avail_antennas = re.match('\s*Available Antennas: TX (\S+)'
- ' RX (\S+)', line)
- if match_avail_antennas and pending_phy_name:
- pending_phy_tx_antennas = int(
- match_avail_antennas.group(1), 16)
- pending_phy_rx_antennas = int(
- match_avail_antennas.group(2), 16)
- continue
-
- match_device_support = re.match('\s*Device supports (.*)\.', line)
- if match_device_support and pending_phy_name:
- pending_phy_features.append(match_device_support.group(1))
- continue
-
- if not all([current_band, pending_phy_name,
- line.startswith('\t')]):
- continue
-
- # E.g.
- # * 2412 MHz [1] (20.0 dBm)
- # * 2467 MHz [12] (20.0 dBm) (passive scan)
- # * 2472 MHz [13] (disabled)
- # * 5260 MHz [52] (19.0 dBm) (no IR, radar detection)
- match_chan_info = re.search(
- r'(?P<frequency>\d+) MHz'
- r' (?P<chan_num>\[\d+\])'
- r'(?: \((?P<tx_power_limit>[0-9.]+ dBm)\))?'
- r'(?: \((?P<flags>[a-zA-Z, ]+)\))?', line)
- if match_chan_info:
- frequency = int(match_chan_info.group('frequency'))
- current_band.frequencies.append(frequency)
- flags_string = match_chan_info.group('flags')
- if flags_string:
- current_band.frequency_flags[frequency] = frozenset(
- flags_string.split(','))
- else:
- # Populate the dict with an empty set, to make
- # things uniform for client code.
- current_band.frequency_flags[frequency] = frozenset()
- continue
-
- # re_mcs needs to match something like:
- # HT TX/RX MCS rate indexes supported: 0-15, 32
- if re.search('HT TX/RX MCS rate indexes supported: ', line):
- rate_string = line.split(':')[1].strip()
- for piece in rate_string.split(','):
- if piece.find('-') > 0:
- # Must be a range like ' 0-15'
- begin, end = piece.split('-')
- for index in range(int(begin), int(end) + 1):
- current_band.mcs_indices.append(index)
- else:
- # Must be a single rate like '32 '
- current_band.mcs_indices.append(int(piece))
- if pending_phy_name:
- add_pending_phy()
- return all_phys
-
-
- def remove_interface(self, interface, ignore_status=False):
- """
- Remove a WiFi interface from a PHY.
-
- @param interface: string name of interface (e.g. mon0)
- @param ignore_status: boolean True iff we should ignore failures
- to remove the interface.
-
- """
- self._run('%s dev %s del' % (self._command_iw, interface),
- ignore_status=ignore_status)
-
-
- def determine_security(self, supported_securities):
- """Determines security from the given list of supported securities.
-
- @param supported_securities: list of supported securities from scan
-
- """
- if not supported_securities:
- security = SECURITY_OPEN
- elif len(supported_securities) == 1:
- security = supported_securities[0]
- else:
- security = SECURITY_MIXED
- return security
-
-
- def scan(self, interface, frequencies=(), ssids=()):
- """Performs a scan.
-
- @param interface: the interface to run the iw command against
- @param frequencies: list of int frequencies in Mhz to scan.
- @param ssids: list of string SSIDs to send probe requests for.
-
- @returns a list of IwBss namedtuples; None if the scan fails
-
- """
- scan_result = self.timed_scan(interface, frequencies, ssids)
- if scan_result is None:
- return None
- return scan_result.bss_list
-
-
- def timed_scan(self, interface, frequencies=(), ssids=()):
- """Performs a timed scan.
-
- @param interface: the interface to run the iw command against
- @param frequencies: list of int frequencies in Mhz to scan.
- @param ssids: list of string SSIDs to send probe requests for.
-
- @returns a IwTimedScan namedtuple; None if the scan fails
-
- """
- freq_param = ''
- if frequencies:
- freq_param = ' freq %s' % ' '.join(map(str, frequencies))
- ssid_param = ''
- if ssids:
- ssid_param = ' ssid "%s"' % '" "'.join(ssids)
-
- iw_command = '%s dev %s scan%s%s' % (self._command_iw,
- interface, freq_param, ssid_param)
- command = IW_TIME_COMMAND_FORMAT % iw_command
- scan = self._run(command, ignore_status=True)
- if scan.exit_status != 0:
- # The device was busy
- logging.debug('scan exit_status: %d', scan.exit_status)
- return None
- if not scan.stdout:
- raise error.TestFail('Missing scan parse time')
-
- if scan.stdout.startswith(IW_TIME_COMMAND_OUTPUT_START):
- logging.debug('Empty scan result')
- bss_list = []
- else:
- bss_list = self._parse_scan_results(scan.stdout)
- scan_time = self._parse_scan_time(scan.stdout)
- return IwTimedScan(scan_time, bss_list)
-
-
- def scan_dump(self, interface):
- """Dump the contents of the scan cache.
-
- Note that this does not trigger a scan. Instead, it returns
- the kernel's idea of what BSS's are currently visible.
-
- @param interface: the interface to run the iw command against
-
- @returns a list of IwBss namedtuples; None if the scan fails
-
- """
- result = self._run('%s dev %s scan dump' % (self._command_iw,
- interface))
- return self._parse_scan_results(result.stdout)
-
-
- def set_tx_power(self, interface, power):
- """
- Set the transmission power for an interface.
-
- @param interface: string name of interface to set Tx power on.
- @param power: string power parameter. (e.g. 'auto').
-
- """
- self._run('%s dev %s set txpower %s' %
- (self._command_iw, interface, power))
-
-
- def set_freq(self, interface, freq):
- """
- Set the frequency for an interface.
-
- @param interface: string name of interface to set frequency on.
- @param freq: int frequency
-
- """
- self._run('%s dev %s set freq %d' %
- (self._command_iw, interface, freq))
-
-
- def set_regulatory_domain(self, domain_string):
- """
- Set the regulatory domain of the current machine. Note that
- the regulatory change happens asynchronously to the exit of
- this function.
-
- @param domain_string: string regulatory domain name (e.g. 'US').
-
- """
- self._run('%s reg set %s' % (self._command_iw, domain_string))
-
-
- def get_regulatory_domain(self, wiphy=None):
- """
- Get the regulatory domain of the current machine.
-
- @param wiphy: string; if provided, check for the phy-specific domain,
- rather than the global one.
-
- @returns a string containing the 2-letter regulatory domain name
- (e.g. 'US').
-
- """
- cmd = self._command_iw
- if wiphy:
- cmd += ' phy ' + wiphy
- cmd += ' reg get'
- output = self._run(cmd).stdout
- m = re.search('^country (..):', output, re.MULTILINE)
- if not m:
- return None
- return m.group(1)
-
-
- def is_regulatory_self_managed(self):
- """
- Determine if any WiFi device on the system manages its own regulatory
- info (NL80211_ATTR_WIPHY_SELF_MANAGED_REG).
-
- @returns True if self-managed, False otherwise.
- """
- output = self._run('%s reg get' % self._command_iw).stdout
- m = re.search('^phy#.*\(self-managed\)', output, re.MULTILINE)
- return not m is None
-
-
- def wait_for_scan_result(self, interface, bsses=(), ssids=(),
- timeout_seconds=30, wait_for_all=False):
- """Returns a list of IWBSS objects for given list of bsses or ssids.
-
- This method will scan for a given timeout and return all of the networks
- that have a matching ssid or bss. If wait_for_all is true and all
- networks are not found within the given timeout an empty list will
- be returned.
-
- @param interface: which interface to run iw against
- @param bsses: a list of BSS strings
- @param ssids: a list of ssid strings
- @param timeout_seconds: the amount of time to wait in seconds
- @param wait_for_all: True to wait for all listed bsses or ssids; False
- to return if any of the networks were found
-
- @returns a list of IwBss collections that contain the given bss or ssid;
- if the scan is empty or returns an error code None is returned.
-
- """
-
- logging.info('Performing a scan with a max timeout of %d seconds.',
- timeout_seconds)
-
- # If the in-progress scan takes more than 30 seconds to
- # complete it will most likely never complete; abort.
- # See crbug.com/309148
- scan_results = list()
- try:
- scan_results = utils.poll_for_condition(
- condition=lambda: self.scan(interface),
- timeout=timeout_seconds,
- sleep_interval=5, # to allow in-progress scans to complete
- desc='Timed out getting IWBSSes that match desired')
- except utils.TimeoutError as e:
- pass
-
- if not scan_results: # empty list or None
- return None
-
- # get all IWBSSes from the scan that match any of the desired
- # ssids or bsses passed in
- matching_iwbsses = [iwbss for iwbss in scan_results
- if iwbss.ssid in ssids or iwbss.bss in bsses]
- if wait_for_all:
- found_bsses = [iwbss.bss for iwbss in matching_iwbsses]
- found_ssids = [iwbss.ssid for iwbss in matching_iwbsses]
- # if an expected bss or ssid was not found, and it was required
- # by the caller that all expected be found, return empty list
- if any(bss not in found_bsses for bss in bsses) or any(
- ssid not in found_ssids for ssid in ssids):
- return list()
- return list(matching_iwbsses)
-
-
- def set_antenna_bitmap(self, phy, tx_bitmap, rx_bitmap):
- """Set antenna chain mask on given phy (radio).
-
- This function will set the antennas allowed to use for TX and
- RX on the |phy| based on the |tx_bitmap| and |rx_bitmap|.
- This command is only allowed when the interfaces on the phy are down.
-
- @param phy: phy name
- @param tx_bitmap: bitmap of allowed antennas to use for TX
- @param rx_bitmap: bitmap of allowed antennas to use for RX
-
- """
- command = '%s phy %s set antenna %d %d' % (self._command_iw, phy,
- tx_bitmap, rx_bitmap)
- self._run(command)
-
-
- def get_event_logger(self):
- """Create and return a IwEventLogger object.
-
- @returns a IwEventLogger object.
-
- """
- local_file = IW_LOCAL_EVENT_LOG_FILE % (self._log_id)
- self._log_id += 1
- return iw_event_logger.IwEventLogger(self._host, self._command_iw,
- local_file)
-
-
- def vht_supported(self):
- """Returns True if VHT is supported; False otherwise."""
- result = self._run('%s list' % self._command_iw).stdout
- if 'VHT Capabilities' in result:
- return True
- return False
-
-
- def he_supported(self):
- """Returns True if HE (802.11ax) is supported; False otherwise."""
- result = self._run('%s list' % self._command_iw).stdout
- if 'HE MAC Capabilities' in result:
- return True
- return False
-
-
- def frequency_supported(self, frequency):
- """Returns True if the given frequency is supported; False otherwise.
-
- @param frequency: int Wifi frequency to check if it is supported by
- DUT.
- """
- phys = self.list_phys()
- for phy in phys:
- for band in phy.bands:
- if frequency in band.frequencies:
- return True
- return False
-
-
- def get_fragmentation_threshold(self, phy):
- """Returns the fragmentation threshold for |phy|.
-
- @param phy: phy name
- """
- ret = self._run('%s phy %s info' % (self._command_iw, phy))
- frag_regex = r'^\s+Fragmentation threshold:\s+([0-9]+)$'
- match = re.search(frag_regex, ret.stdout, re.MULTILINE)
-
- if match:
- return int(match.group(1))
-
- return None
-
-
- def get_info(self, phy=None):
- """
- Returns the output of 'iw phy info' for |phy|, or 'iw list' if no phy
- specified.
-
- @param phy: optional string giving the name of the phy
- @return string stdout of the command run
- """
- if phy and phy not in [iw_phy.name for iw_phy in self.list_phys()]:
- logging.info('iw could not find phy %s', phy)
- return None
-
- if phy:
- out = self._run('%s phy %s info' % (self._command_iw, phy)).stdout
- else:
- out = self._run('%s list' % self._command_iw).stdout
- if 'Wiphy' in out:
- return out
- return None
diff --git a/client/common_lib/cros/network/iw_runner_unittest.py b/client/common_lib/cros/network/iw_runner_unittest.py
deleted file mode 100755
index ab6eed9..0000000
--- a/client/common_lib/cros/network/iw_runner_unittest.py
+++ /dev/null
@@ -1,789 +0,0 @@
-#!/usr/bin/python3
-#
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from six.moves import zip
-import unittest
-
-import common
-
-from autotest_lib.client.common_lib.cros.network import iw_runner
-
-class IwRunnerTest(unittest.TestCase):
- """Unit test for the IWRunner object."""
-
-
- class host_cmd(object):
- """Mock host command class."""
-
- def __init__(self, stdout, stderr, exit_status):
- self._stdout = stdout
- self._stderr = stderr
- self._exit_status = exit_status
-
-
- @property
- def stdout(self):
- """Returns stdout."""
- return self._stdout
-
-
- @property
- def stderr(self):
- """Returns stderr."""
- return self._stderr
-
-
- @property
- def exit_status(self):
- """Returns the exit status."""
- return self._exit_status
-
-
- class host(object):
- """Mock host class."""
-
- def __init__(self, host_cmd):
- self._host_cmd = IwRunnerTest.host_cmd(host_cmd, 1.0, 0)
-
-
- def run(self, cmd, ignore_status=False):
- """Returns the mocked output.
-
- @param cmd: a stub input ignore
- @param ignore_status: a stub input ignore
-
- """
- return self._host_cmd
-
-
- HT20 = str('BSS aa:aa:aa:aa:aa:aa (on wlan0)\n'
- ' freq: 2412\n'
- ' signal: -50.00 dBm\n'
- ' SSID: support_ht20\n'
- ' HT operation:\n'
- ' * secondary channel offset: no secondary\n')
-
- HT20_IW_BSS = iw_runner.IwBss('aa:aa:aa:aa:aa:aa', 2412,
- 'support_ht20', iw_runner.SECURITY_OPEN,
- iw_runner.WIDTH_HT20, -50.00)
-
- HT20_2 = str('BSS 11:11:11:11:11:11 (on wlan0)\n'
- ' freq: 2462\n'
- ' signal: -42.00 dBm\n'
- ' SSID: support_ht20\n'
- ' WPA: * Version: 1\n'
- ' HT operation:\n'
- ' * secondary channel offset: below\n')
-
- HT20_2_IW_BSS = iw_runner.IwBss('11:11:11:11:11:11', 2462,
- 'support_ht20', iw_runner.SECURITY_WPA,
- iw_runner.WIDTH_HT40_MINUS, -42.00)
-
- HT40_ABOVE = str('BSS bb:bb:bb:bb:bb:bb (on wlan0)\n'
- ' freq: 5180\n'
- ' signal: -55.00 dBm\n'
- ' SSID: support_ht40_above\n'
- ' RSN: * Version: 1\n'
- ' HT operation:\n'
- ' * secondary channel offset: above\n')
-
- HT40_ABOVE_IW_BSS = iw_runner.IwBss('bb:bb:bb:bb:bb:bb', 5180,
- 'support_ht40_above',
- iw_runner.SECURITY_WPA2,
- iw_runner.WIDTH_HT40_PLUS, -55.00)
-
- HT40_BELOW = str('BSS cc:cc:cc:cc:cc:cc (on wlan0)\n'
- ' freq: 2462\n'
- ' signal: -44.00 dBm\n'
- ' SSID: support_ht40_below\n'
- ' RSN: * Version: 1\n'
- ' WPA: * Version: 1\n'
- ' HT operation:\n'
- ' * secondary channel offset: below\n')
-
- HT40_BELOW_IW_BSS = iw_runner.IwBss('cc:cc:cc:cc:cc:cc', 2462,
- 'support_ht40_below',
- iw_runner.SECURITY_MIXED,
- iw_runner.WIDTH_HT40_MINUS, -44.00)
-
- NO_HT = str('BSS dd:dd:dd:dd:dd:dd (on wlan0)\n'
- ' freq: 2412\n'
- ' signal: -45.00 dBm\n'
- ' SSID: no_ht_support\n')
-
- NO_HT_IW_BSS = iw_runner.IwBss('dd:dd:dd:dd:dd:dd', 2412,
- 'no_ht_support', iw_runner.SECURITY_OPEN,
- None, -45.00)
-
- VHT_CAPA_20 = str('BSS ff:ff:ff:ff:ff:ff (on wlan0)\n'
- ' freq: 2462\n'
- ' signal: -44.00 dBm\n'
- ' SSID: vht_capable_20\n'
- ' HT operation:\n'
- ' * secondary channel offset: no secondary\n'
- ' VHT capabilities:\n'
- ' VHT Capabilities (0x0f8369b1):\n'
- ' Max MPDU length: 7991\n'
- ' Supported Channel Width: neither 160 nor 80+80\n'
- ' VHT operation:\n'
- ' * channel width: 0 (20 or 40 MHz)\n'
- ' * center freq segment 1: 11\n')
-
- VHT_CAPA_20_IW_BSS = iw_runner.IwBss('ff:ff:ff:ff:ff:ff', 2462,
- 'vht_capable_20',
- iw_runner.SECURITY_OPEN,
- iw_runner.WIDTH_HT20, -44.00)
-
- VHT80 = str('BSS ff:ff:ff:ff:ff:ff (on wlan0)\n'
- ' freq: 2462\n'
- ' signal: -44.00 dBm\n'
- ' SSID: support_vht80\n'
- ' HT operation:\n'
- ' * secondary channel offset: below\n'
- ' VHT capabilities:\n'
- ' VHT Capabilities (0x0f8369b1):\n'
- ' Max MPDU length: 7991\n'
- ' Supported Channel Width: neither 160 nor 80+80\n'
- ' VHT operation:\n'
- ' * channel width: 1 (80 MHz)\n'
- ' * center freq segment 1: 11\n'
- ' * center freq segment 2: 0\n')
-
- VHT80_IW_BSS = iw_runner.IwBss('ff:ff:ff:ff:ff:ff', 2462,
- 'support_vht80', iw_runner.SECURITY_OPEN,
- iw_runner.WIDTH_VHT80, -44.00)
-
- VHT160 = str('BSS 12:34:56:78:90:aa (on wlan0)\n'
- ' freq: 5180\n'
- ' signal: -44.00 dBm\n'
- ' SSID: support_vht160\n'
- ' HT operation:\n'
- ' * secondary channel offset: below\n'
- ' VHT capabilities:\n'
- ' VHT Capabilities (0x0f8369b1):\n'
- ' Max MPDU length: 7991\n'
- ' Supported Channel Width: 160 MHz\n'
- ' VHT operation:\n'
- ' * channel width: 1 (80 MHz)\n'
- ' * center freq segment 1: 42\n'
- ' * center freq segment 2: 50\n')
-
- VHT160_IW_BSS = iw_runner.IwBss('12:34:56:78:90:aa', 5180,
- 'support_vht160', iw_runner.SECURITY_OPEN,
- iw_runner.WIDTH_VHT160, -44.00)
-
- VHT80_80 = str('BSS ab:cd:ef:fe:dc:ba (on wlan0)\n'
- ' freq: 5180\n'
- ' signal: -44.00 dBm\n'
- ' SSID: support_vht80_80\n'
- ' HT operation:\n'
- ' * secondary channel offset: below\n'
- ' VHT operation:\n'
- ' * channel width: 1 (80 MHz)\n'
- ' * center freq segment 1: 42\n'
- ' * center freq segment 2: 106\n')
-
- VHT80_80_IW_BSS = iw_runner.IwBss('ab:cd:ef:fe:dc:ba', 5180,
- 'support_vht80_80', iw_runner.SECURITY_OPEN,
- iw_runner.WIDTH_VHT80_80, -44.00)
-
- HIDDEN_SSID = str('BSS ee:ee:ee:ee:ee:ee (on wlan0)\n'
- ' freq: 2462\n'
- ' signal: -70.00 dBm\n'
- ' SSID: \n'
- ' HT operation:\n'
- ' * secondary channel offset: no secondary\n')
-
- SCAN_TIME_OUTPUT = str('real 4.5\n'
- 'user 2.1\n'
- 'system 3.1\n')
-
- HIDDEN_SSID_IW_BSS = iw_runner.IwBss('ee:ee:ee:ee:ee:ee', 2462,
- None, iw_runner.SECURITY_OPEN,
- iw_runner.WIDTH_HT20, -70.00)
-
- STATION_LINK_INFORMATION = str(
- 'Connected to 12:34:56:ab:cd:ef (on wlan0)\n'
- ' SSID: PMKSACaching_4m9p5_ch1\n'
- ' freq: 5220\n'
- ' RX: 5370 bytes (37 packets)\n'
- ' TX: 3604 bytes (15 packets)\n'
- ' signal: -59 dBm\n'
- ' tx bitrate: 13.0 MBit/s MCS 1\n'
- '\n'
- ' bss flags: short-slot-time\n'
- ' dtim period: 5\n'
- ' beacon int: 100\n')
-
- STATION_LINK_BSSID = '12:34:56:ab:cd:ef'
-
- STATION_LINK_IFACE = 'wlan0'
-
- STATION_LINK_FREQ = '5220'
-
- STATION_LINK_PARSED = {
- 'SSID': 'PMKSACaching_4m9p5_ch1',
- 'freq': '5220',
- 'RX': '5370 bytes (37 packets)',
- 'TX': '3604 bytes (15 packets)',
- 'signal': '-59 dBm',
- 'tx bitrate': '13.0 MBit/s MCS 1',
- 'bss flags': 'short-slot-time',
- 'dtim period': '5',
- 'beacon int': '100'
- }
-
- STATION_DUMP_INFORMATION = str(
- 'Station dd:ee:ff:33:44:55 (on mesh-5000mhz)\n'
- ' inactive time: 140 ms\n'
- ' rx bytes: 2883498\n'
- ' rx packets: 31981\n'
- ' tx bytes: 1369934\n'
- ' tx packets: 6615\n'
- ' tx retries: 4\n'
- ' tx failed: 0\n'
- ' rx drop misc: 5\n'
- ' signal: -4 dBm\n'
- ' signal avg: -11 dBm\n'
- ' Toffset: 81715566854 us\n'
- ' tx bitrate: 866.7 MBit/s VHT-MCS 9 80MHz '
- 'short GI VHT-NSS 2\n'
- ' rx bitrate: 866.7 MBit/s VHT-MCS 9 80MHz '
- 'short GI VHT-NSS 2\n'
- ' mesh llid: 0\n'
- ' mesh plid: 0\n'
- ' mesh plink: ESTAB\n'
- ' mesh local PS mode: ACTIVE\n'
- ' mesh peer PS mode: ACTIVE\n'
- ' mesh non-peer PS mode: ACTIVE\n'
- ' authorized: yes\n'
- ' authenticated: yes\n'
- ' preamble: long\n'
- ' WMM/WME: yes\n'
- ' MFP: yes\n'
- ' TDLS peer: no\n'
- ' connected time: 8726 seconds\n'
- 'Station aa:bb:cc:00:11:22 (on mesh-5000mhz)\n'
- ' inactive time: 140 ms\n'
- ' rx bytes: 2845200\n'
- ' rx packets: 31938\n'
- ' tx bytes: 1309945\n'
- ' tx packets: 6672\n'
- ' tx retries: 0\n'
- ' tx failed: 1\n'
- ' signal: -21 dBm\n'
- ' signal avg: -21 dBm\n'
- ' tx bitrate: 866.7 MBit/s VHT-MCS 9 80MHz '
- 'short GI VHT-NSS 2\n'
- ' rx bitrate: 650.0 MBit/s VHT-MCS 7 80MHz '
- 'short GI VHT-NSS 2\n'
- ' mesh llid: 0\n'
- ' mesh plid: 0\n'
- ' mesh plink: ESTAB\n'
- ' mesh local PS mode: ACTIVE\n'
- ' mesh peer PS mode: ACTIVE\n'
- ' mesh non-peer PS mode: ACTIVE\n'
- ' authorized: yes\n'
- ' authenticated: yes\n'
- ' preamble: long\n'
- ' WMM/WME: yes\n'
- ' MFP: yes\n'
- ' TDLS peer: no\n'
- ' connected time: 8724 seconds\n'
- 'Station ff:aa:bb:aa:44:55 (on mesh-5000mhz)\n'
- ' inactive time: 304 ms\n'
- ' rx bytes: 18816\n'
- ' rx packets: 75\n'
- ' tx bytes: 5386\n'
- ' tx packets: 21\n'
- ' signal: -29 dBm\n'
- ' tx bitrate: 65.0 MBit/s VHT-MCS 0 80MHz short GI VHT-NSS 2\n'
- ' mesh llid: 0\n'
- ' mesh plid: 0\n'
- ' mesh plink: ESTAB\n'
- ' mesh local PS mode: ACTIVE\n'
- ' mesh peer PS mode: ACTIVE\n'
- ' mesh non-peer PS mode: ACTIVE\n'
- ' authorized: yes\n'
- ' authenticated: yes\n'
- ' preamble: long\n'
- ' WMM/WME: yes\n'
- ' MFP: yes\n'
- ' TDLS peer: no\n'
- ' connected time: 824 seconds\n')
-
- STATION_DUMP_INFORMATION_PARSED = [
- {'mac': 'aa:bb:cc:00:11:22',
- 'rssi_str': '-21 dBm',
- 'rssi_int': -21,
- 'rx_bitrate': '650.0 MBit/s VHT-MCS 7 80MHz short GI VHT-NSS 2',
- 'rx_drops': 0,
- 'rx_drop_rate': 0.0,
- 'rx_packets': 31938,
- 'tx_bitrate': '866.7 MBit/s VHT-MCS 9 80MHz short GI VHT-NSS 2',
- 'tx_failures': 1,
- 'tx_failure_rate': 0.00014988009592326138,
- 'tx_packets': 6672,
- 'tx_retries': 0,
- 'tx_retry_rate': 0.0},
- {'mac': 'dd:ee:ff:33:44:55',
- 'rssi_str': '-4 dBm',
- 'rssi_int': -4,
- 'rx_bitrate': '866.7 MBit/s VHT-MCS 9 80MHz short GI VHT-NSS 2',
- 'rx_drops': 5,
- 'rx_drop_rate': 0.0001563428285544542,
- 'rx_packets': 31981,
- 'tx_bitrate': '866.7 MBit/s VHT-MCS 9 80MHz short GI VHT-NSS 2',
- 'tx_failures': 0,
- 'tx_failure_rate': 0.0,
- 'tx_packets': 6615,
- 'tx_retries': 4,
- 'tx_retry_rate': 0.0006046863189720333},
- {'mac': 'ff:aa:bb:aa:44:55',
- 'rssi_str': '-29 dBm',
- 'rssi_int': -29,
- 'rx_bitrate': '0',
- 'rx_drops': 0,
- 'rx_drop_rate': 0.0,
- 'rx_packets': 75,
- 'tx_bitrate': '65.0 MBit/s VHT-MCS 0 80MHz short GI VHT-NSS 2',
- 'tx_failures': 0,
- 'tx_failure_rate': 0.0,
- 'tx_retries': 0,
- 'tx_retry_rate': 0.0,
- 'tx_packets': 21},
- ]
-
- STATION_DUMP_IFACE = 'mesh-5000mhz'
-
- INFO_MESH_MODE = str(
- 'Interface wlan-2400mhz\n'
- ' ifindex 10\n'
- ' wdev 0x100000002\n'
- ' addr aa:bb:cc:dd:ee:ff\n'
- ' type mesh point\n'
- ' wiphy 1\n'
- ' channel 149 (5745 MHz), width: 80 MHz, center1: 5775 MHz\n')
-
- INFO_AP_MODE = str(
- 'Interface wlan-2400mhz\n'
- ' ifindex 8\n'
- ' wdev 0x1\n'
- ' addr 00:11:22:33:44:55\n'
- ' ssid testap_170501151530_wsvx\n'
- ' type AP\n'
- ' wiphy 0\n'
- ' channel 11 (2462 MHz), width: 20 MHz, center1: 2462 MHz\n')
-
- RADIO_CONFIG_AP_MODE = {'number': 11, 'freq': 2462, 'width': 20,
- 'center1_freq': 2462}
-
- INFO_STA_MODE = str(
- 'Interface wlan-2400mhz\n'
- ' ifindex 9\n'
- ' wdev 0x1\n'
- ' addr 44:55:66:77:88:99\n'
- ' type managed\n'
- ' wiphy 0\n'
- ' channel 11 (2462 MHz), width: 20 MHz, center1: 2462 MHz\n')
-
- INFO_IFACE = 'wlan-2400mhz'
-
- PHY_INFO_FRAGMENTATION = str(
- 'Wiphy phy1\n'
- ' max # scan SSIDs: 20\n'
- ' max scan IEs length: 425 bytes\n'
- ' Fragmentation threshold: 256\n'
- ' Retry short limit: 7\n'
- ' Retry long limit: 4\n')
-
- INFO_PHY = 'phy1'
-
- PHY_FRAGMENTATION_THRESHOLD = 256
-
- VHT_IW_INFO = str(
- 'Wiphy phy0\n'
- ' max # scan SSIDs: 20\n'
- ' max scan IEs length: 425 bytes\n'
- ' max # sched scan SSIDs: 20\n'
- ' max # match sets: 11\n'
- ' Retry short limit: 7\n'
- ' Retry long limit: 4\n'
- ' Coverage class: 0 (up to 0m)\n'
- ' Device supports RSN-IBSS.\n'
- ' Device supports AP-side u-APSD.\n'
- ' Device supports T-DLS.\n'
- ' Supported Ciphers:\n'
- ' * WEP40 (00-0f-ac:1)\n'
- ' * WEP104 (00-0f-ac:5)\n'
- ' * TKIP (00-0f-ac:2)\n'
- ' * CCMP-128 (00-0f-ac:4)\n'
- ' * CMAC (00-0f-ac:6)\n'
- ' Available Antennas: TX 0 RX 0\n'
- ' Supported interface modes:\n'
- ' * IBSS\n'
- ' * managed\n'
- ' * AP\n'
- ' * AP/VLAN\n'
- ' * monitor\n'
- ' * P2P-client\n'
- ' * P2P-GO\n'
- ' * P2P-device\n'
- ' Band 1:\n'
- ' Capabilities: 0x11ef\n'
- ' RX LDPC\n'
- ' HT20/HT40\n'
- ' SM Power Save disabled\n'
- ' RX HT20 SGI\n'
- ' RX HT40 SGI\n'
- ' TX STBC\n'
- ' RX STBC 1-stream\n'
- ' Max AMSDU length: 3839 bytes\n'
- ' DSSS/CCK HT40\n'
- ' Maximum RX AMPDU length 65535 bytes (exponent: 0x003)\n'
- ' Minimum RX AMPDU time spacing: 4 usec (0x05)\n'
- ' HT Max RX data rate: 300 Mbps\n'
- ' HT TX/RX MCS rate indexes supported: 0-15\n'
- ' Band 2:\n'
- ' Capabilities: 0x11ef\n'
- ' RX LDPC\n'
- ' HT20/HT40\n'
- ' SM Power Save disabled\n'
- ' RX HT20 SGI\n'
- ' RX HT40 SGI\n'
- ' TX STBC\n'
- ' RX STBC 1-stream\n'
- ' Max AMSDU length: 3839 bytes\n'
- ' DSSS/CCK HT40\n'
- ' Maximum RX AMPDU length 65535 bytes (exponent: 0x003)\n'
- ' Minimum RX AMPDU time spacing: 4 usec (0x05)\n'
- ' HT Max RX data rate: 300 Mbps\n'
- ' HT TX/RX MCS rate indexes supported: 0-15\n'
- ' VHT Capabilities (0x038071b0):\n'
- ' Max MPDU length: 3895\n'
- ' Supported Channel Width: neither 160 nor 80+80\n'
- ' RX LDPC\n'
- ' short GI (80 MHz)\n'
- ' TX STBC\n'
- ' SU Beamformee\n')
-
- HE_IW_INFO = str(
- 'Wiphy phy0\n'
- ' max # scan SSIDs: 20\n'
- ' max scan IEs length: 365 bytes\n'
- ' max # sched scan SSIDs: 20\n'
- ' max # match sets: 11\n'
- ' max # scan plans: 2\n'
- ' max scan plan interval: 65535\n'
- ' max scan plan iterations: 254\n'
- ' Retry short limit: 7\n'
- ' Retry long limit: 4\n'
- ' Coverage class: 0 (up to 0m)\n'
- ' Device supports RSN-IBSS.\n'
- ' Device supports AP-side u-APSD.\n'
- ' Device supports T-DLS.\n'
- ' Supported Ciphers:\n'
- ' * WEP40 (00-0f-ac:1)\n'
- ' * WEP104 (00-0f-ac:5)\n'
- ' * TKIP (00-0f-ac:2)\n'
- ' * CCMP-128 (00-0f-ac:4)\n'
- ' * GCMP-128 (00-0f-ac:8)\n'
- ' * GCMP-256 (00-0f-ac:9)\n'
- ' * CMAC (00-0f-ac:6)\n'
- ' * GMAC-128 (00-0f-ac:11)\n'
- ' * GMAC-256 (00-0f-ac:12)\n'
- ' Available Antennas: TX 0 RX 0\n'
- ' Supported interface modes:\n'
- ' * IBSS\n'
- ' * managed\n'
- ' * AP\n'
- ' * AP/VLAN\n'
- ' * monitor\n'
- ' * P2P-client\n'
- ' * P2P-GO\n'
- ' * P2P-device\n'
- ' Band 1:\n'
- ' Capabilities: 0x19ef\n'
- ' RX LDPC\n'
- ' HT20/HT40\n'
- ' SM Power Save disabled\n'
- ' RX HT20 SGI\n'
- ' RX HT40 SGI\n'
- ' TX STBC\n'
- ' RX STBC 1-stream\n'
- ' Max AMSDU length: 7935 bytes\n'
- ' DSSS/CCK HT40\n'
- ' Maximum RX AMPDU length 65535 bytes (exponent: 0x003)\n'
- ' Minimum RX AMPDU time spacing: 4 usec (0x05)\n'
- ' HT Max RX data rate: 300 Mbps\n'
- ' HT TX/RX MCS rate indexes supported: 0-15\n'
- ' HE Iftypes: Station\n'
- ' HE MAC Capabilities (0x780112a0abc0):\n'
- ' +HTC HE Supported\n'
- ' HE PHY Capabilities: (0x0e3f0200fd09800ecff200):\n'
- ' HE40/2.4GHz\n'
- ' HE40/HE80/5GHz\n'
- ' HE160/5GHz\n'
- ' Band 2:\n'
- ' Capabilities: 0x19ef\n'
- ' RX LDPC\n'
- ' HT20/HT40\n'
- ' SM Power Save disabled\n'
- ' RX HT20 SGI\n'
- ' RX HT40 SGI\n'
- ' TX STBC\n'
- ' RX STBC 1-stream\n'
- ' Max AMSDU length: 7935 bytes\n'
- ' DSSS/CCK HT40\n'
- ' Maximum RX AMPDU length 65535 bytes (exponent: 0x003)\n'
- ' Minimum RX AMPDU time spacing: 4 usec (0x05)\n'
- ' HT Max RX data rate: 300 Mbps\n'
- ' HT TX/RX MCS rate indexes supported: 0-15\n'
- ' VHT Capabilities (0x039071f6):\n'
- ' Max MPDU length: 11454\n'
- ' Supported Channel Width: 160 MHz\n'
- ' RX LDPC\n'
- ' short GI (80 MHz)\n'
- ' short GI (160/80+80 MHz)\n'
- ' TX STBC\n'
- ' SU Beamformee\n'
- ' MU Beamformee\n'
- ' VHT RX MCS set:\n'
- ' 1 streams: MCS 0-9\n'
- ' 2 streams: MCS 0-9\n'
- ' 3 streams: not supported\n'
- ' 4 streams: not supported\n'
- ' 5 streams: not supported\n'
- ' 6 streams: not supported\n'
- ' 7 streams: not supported\n'
- ' 8 streams: not supported\n'
- ' VHT RX highest supported: 0 Mbps\n'
- ' VHT TX MCS set:\n'
- ' 1 streams: MCS 0-9\n'
- ' 2 streams: MCS 0-9\n'
- ' 3 streams: not supported\n'
- ' 4 streams: not supported\n'
- ' 5 streams: not supported\n'
- ' 6 streams: not supported\n'
- ' 7 streams: not supported\n'
- ' 8 streams: not supported\n'
- ' VHT TX highest supported: 0 Mbps\n'
- ' HE Iftypes: Station\n'
- ' HE MAC Capabilities (0x780112a0abc0):\n'
- ' +HTC HE Supported\n'
- ' HE PHY Capabilities: (0x0e3f0200fd09800ecff200):\n'
- ' HE40/2.4GHz\n'
- ' HE40/HE80/5GHz\n'
- ' HE160/5GHz\n')
-
-
- def verify_values(self, iw_bss_1, iw_bss_2):
- """Checks all of the IWBss values
-
- @param iw_bss_1: an IWBss object
- @param iw_bss_2: an IWBss object
-
- """
-
- self.assertEquals(iw_bss_1.bss, iw_bss_2.bss)
- self.assertEquals(iw_bss_1.ssid, iw_bss_2.ssid)
- self.assertEquals(iw_bss_1.frequency, iw_bss_2.frequency)
- self.assertEquals(iw_bss_1.security, iw_bss_2.security)
- self.assertEquals(iw_bss_1.width, iw_bss_2.width)
- self.assertEquals(iw_bss_1.signal, iw_bss_2.signal)
-
-
- def search_by_bss(self, scan_output, test_iw_bss):
- """
-
- @param scan_output: the output of the scan as a string
- @param test_iw_bss: an IWBss object
-
- Uses the runner to search for a network by bss.
- """
- host = self.host(scan_output + self.SCAN_TIME_OUTPUT)
- runner = iw_runner.IwRunner(remote_host=host)
- network = runner.wait_for_scan_result('wlan0', bsses=[test_iw_bss.bss])
- self.verify_values(test_iw_bss, network[0])
-
-
- def test_find_first(self):
- """Test with the first item in the list."""
- scan_output = self.HT20 + self.HT40_ABOVE
- self.search_by_bss(scan_output, self.HT20_IW_BSS)
-
-
- def test_find_last(self):
- """Test with the last item in the list."""
- scan_output = self.HT40_ABOVE + self.HT20
- self.search_by_bss(scan_output, self.HT20_IW_BSS)
-
-
- def test_find_middle(self):
- """Test with the middle item in the list."""
- scan_output = self.HT40_ABOVE + self.HT20 + self.NO_HT
- self.search_by_bss(scan_output, self.HT20_IW_BSS)
-
-
- def test_ht40_above(self):
- """Test with a HT40+ network."""
- scan_output = self.HT20 + self.HT40_ABOVE + self.NO_HT
- self.search_by_bss(scan_output, self.HT40_ABOVE_IW_BSS)
-
-
- def test_ht40_below(self):
- """Test with a HT40- network."""
- scan_output = self.HT20 + self.HT40_BELOW + self.NO_HT
- self.search_by_bss(scan_output, self.HT40_BELOW_IW_BSS)
-
-
- def test_no_ht(self):
- """Test with a network that doesn't have ht."""
- scan_output = self.HT20 + self.NO_HT + self.HT40_ABOVE
- self.search_by_bss(scan_output, self.NO_HT_IW_BSS)
-
-
- def test_vht_20(self):
- """Test with a network that supports vht but is 20 MHz wide."""
- scan_output = self.HT20 + self.NO_HT + self.VHT_CAPA_20
- self.search_by_bss(scan_output, self.VHT_CAPA_20_IW_BSS)
-
-
- def test_vht80(self):
- """Test with a VHT80 network."""
- scan_output = self.HT20 + self.VHT80 + self.HT40_ABOVE
- self.search_by_bss(scan_output, self.VHT80_IW_BSS)
-
-
- def test_vht160(self):
- """Test with a VHT160 network."""
- scan_output = self.VHT160 + self.VHT80 + self.HT40_ABOVE
- self.search_by_bss(scan_output, self.VHT160_IW_BSS)
-
- def test_vht80_80(self):
- """Test with a VHT80+80 network."""
- scan_output = self.VHT160 + self.VHT80_80
- self.search_by_bss(scan_output, self.VHT80_80_IW_BSS)
-
-
- def test_hidden_ssid(self):
- """Test with a network with a hidden ssid."""
- scan_output = self.HT20 + self.HIDDEN_SSID + self.NO_HT
- self.search_by_bss(scan_output, self.HIDDEN_SSID_IW_BSS)
-
-
- def test_multiple_ssids(self):
- """Test with multiple networks with the same ssids."""
- scan_output = self.HT40_ABOVE + self.HT20 + self.NO_HT + self.HT20_2
- host = self.host(scan_output + self.SCAN_TIME_OUTPUT)
- runner = iw_runner.IwRunner(remote_host=host)
- networks = runner.wait_for_scan_result('wlan 0',
- ssids=[self.HT20_2_IW_BSS.ssid])
- for iw_bss_1, iw_bss_2 in zip([self.HT20_IW_BSS, self.HT20_2_IW_BSS],
- networks):
- self.verify_values(iw_bss_1, iw_bss_2)
-
-
- def test_station_bssid(self):
- """Test parsing of the bssid of a station-mode link."""
- host = self.host(self.STATION_LINK_INFORMATION)
- runner = iw_runner.IwRunner(remote_host=host)
- self.assertEquals(
- runner.get_current_bssid(self.STATION_LINK_IFACE),
- self.STATION_LINK_BSSID)
-
-
- def test_station_link_parsing(self):
- """Test all link keys can be parsed from station link information."""
- self.assertEquals(
- iw_runner._get_all_link_keys(self.STATION_LINK_INFORMATION),
- self.STATION_LINK_PARSED)
-
-
- def test_station_link_key(self):
- """Test a link key is extracted from station link information."""
- host = self.host(self.STATION_LINK_INFORMATION)
- runner = iw_runner.IwRunner(remote_host=host)
- self.assertEquals(
- runner.get_link_value(self.STATION_LINK_INFORMATION,
- iw_runner.IW_LINK_KEY_FREQUENCY),
- self.STATION_LINK_FREQ)
-
-
- def test_station_dump(self):
- """Test parsing of a station dump."""
- host = self.host(self.STATION_DUMP_INFORMATION)
- runner = iw_runner.IwRunner(remote_host=host)
- self.assertEquals(
- runner.get_station_dump(self.STATION_DUMP_IFACE),
- self.STATION_DUMP_INFORMATION_PARSED)
-
-
- def test_operating_mode_mesh(self):
- """Test mesh operating mode parsing."""
- host = self.host(self.INFO_MESH_MODE)
- runner = iw_runner.IwRunner(remote_host=host)
- self.assertEquals(
- runner.get_operating_mode(self.INFO_IFACE),
- iw_runner.DEV_MODE_MESH_POINT)
-
-
- def test_operating_mode_ap(self):
- """Test AP operating mode parsing."""
- host = self.host(self.INFO_AP_MODE)
- runner = iw_runner.IwRunner(remote_host=host)
- self.assertEquals(
- runner.get_operating_mode(self.INFO_IFACE),
- iw_runner.DEV_MODE_AP)
-
-
- def test_operating_mode_station(self):
- """Test STA operating mode parsing."""
- host = self.host(self.INFO_STA_MODE)
- runner = iw_runner.IwRunner(remote_host=host)
- self.assertEquals(
- runner.get_operating_mode(self.INFO_IFACE),
- iw_runner.DEV_MODE_STATION)
-
-
- def test_radio_information(self):
- """Test radio information parsing."""
- host = self.host(self.INFO_AP_MODE)
- runner = iw_runner.IwRunner(remote_host=host)
- self.assertEquals(
- runner.get_radio_config(self.INFO_IFACE),
- self.RADIO_CONFIG_AP_MODE)
-
-
- def test_fragmentation_threshold(self):
- """Test fragmentation threshold parsing."""
- host = self.host(self.PHY_INFO_FRAGMENTATION)
- runner = iw_runner.IwRunner(remote_host=host)
- self.assertEquals(
- runner.get_fragmentation_threshold(self.INFO_PHY),
- self.PHY_FRAGMENTATION_THRESHOLD)
-
-
- def test_vht_supported(self):
- """Test VHT support parsing."""
- host = self.host(self.VHT_IW_INFO)
- runner = iw_runner.IwRunner(remote_host=host)
- self.assertEquals(runner.vht_supported(), True)
-
-
- def test_he_supported(self):
- """Test HE support parsing."""
- host = self.host(self.HE_IW_INFO)
- runner = iw_runner.IwRunner(remote_host=host)
- self.assertEquals(runner.he_supported(), True)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/client/common_lib/cros/network/netblock.py b/client/common_lib/cros/network/netblock.py
deleted file mode 100644
index e0961c5..0000000
--- a/client/common_lib/cros/network/netblock.py
+++ /dev/null
@@ -1,150 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import logging
-from six.moves import map
-from six.moves import zip
-
-from autotest_lib.client.common_lib import error
-
-def from_addr(addr, prefix_len=None):
- """Build a Netblock object.
-
- @param addr: string IP address with optional prefix length
- (e.g. '192.168.1.1' or '192.168.1.1/24'). If |addr| has no
- prefix length, then use the |prefix_len| parameter.
- @param prefix_len: int number of bits forming the IP subnet prefix for
- |addr|. This value will be preferred to the parsed value if
- |addr| has a prefix length as well. If |addr|
- has no prefix length and |prefix_len| is None, then an error
- will be thrown.
-
- """
- if addr is None:
- raise error.TestError('netblock.from_addr() expects non-None addr '
- 'parameter.')
-
- prefix_sep_count = addr.count('/')
- if prefix_sep_count > 1:
- raise error.TestError('Invalid IP address found: "%s".' % addr)
-
- if prefix_sep_count == 1:
- addr_str, prefix_len_str = addr.split('/')
- else:
- # No prefix separator. Assume addr looks like '192.168.1.1'
- addr_str = addr
- # Rely on passed in |prefix_len|
- prefix_len_str = None
-
- if prefix_len is not None and prefix_len_str is not None:
- logging.warning('Ignoring parsed prefix length of %s in favor of '
- 'passed in value %d', prefix_len_str, prefix_len)
- elif prefix_len is not None and prefix_len_str is None:
- pass
- elif prefix_len is None and prefix_len_str is not None:
- prefix_len = int(prefix_len_str)
- else:
- raise error.TestError('Cannot construct netblock without knowing '
- 'prefix length for addr: "%s".' % addr)
-
- return Netblock(addr_str, prefix_len)
-
-
-class Netblock(object):
- """Utility class for transforming netblock address to related strings."""
-
- @staticmethod
- def _octets_to_addr(octets):
- """Transform a list of bytes into a string IP address.
-
- @param octets list of ints (e.g. [192.168.0.1]).
- @return string IP address (e.g. '192.168.0.1.').
-
- """
- return '.'.join(map(str, octets))
-
-
- @staticmethod
- def _int_to_octets(num):
- """Tranform a 32 bit number into a list of 4 octets.
-
- @param num: number to convert to octets.
- @return list of int values <= 8 bits long.
-
- """
- return [(num >> s) & 0xff for s in (24, 16, 8, 0)]
-
-
- @property
- def netblock(self):
- """@return the IPv4 address/prefix, e.g., '192.168.0.1/24'."""
- return '/'.join([self._octets_to_addr(self._octets),
- str(self.prefix_len)])
-
-
- @property
- def netmask(self):
- """@return the IPv4 netmask, e.g., '255.255.255.0'."""
- return self._octets_to_addr(self._mask_octets)
-
-
- @property
- def prefix_len(self):
- """@return the IPv4 prefix len, e.g., 24."""
- return self._prefix_len
-
-
- @property
- def subnet(self):
- """@return the IPv4 subnet, e.g., '192.168.0.0'."""
- octets = [a & m for a, m in zip(self._octets, self._mask_octets)]
- return self._octets_to_addr(octets)
-
-
- @property
- def broadcast(self):
- """@return the IPv4 broadcast address, e.g., '192.168.0.255'."""
- octets = [a | (m ^ 0xff)
- for a, m in zip(self._octets, self._mask_octets)]
- return self._octets_to_addr(octets)
-
-
- @property
- def addr(self):
- """@return the IPv4 address, e.g., '192.168.0.1'."""
- return self._octets_to_addr(self._octets)
-
-
- def __init__(self, addr_str, prefix_len):
- """Construct a Netblock.
-
- @param addr_str: string IP address (e.g. '192.168.1.1').
- @param prefix_len: int length of subnet prefix (e.g. 24).
-
- """
- self._octets = list(map(int, addr_str.split('.')))
- mask_bits = (-1 << (32 - prefix_len)) & 0xffffffff
- self._mask_octets = self._int_to_octets(mask_bits)
- self._prefix_len = prefix_len
-
-
- def get_addr_in_block(self, offset):
- """Get an address in a subnet.
-
- For instance if this netblock represents 192.168.0.1/24,
- then get_addr_in_block(5) would return 192.168.0.5.
-
- @param offset int offset in block, (e.g. 5).
- @return string address (e.g. '192.168.0.5').
-
- """
- offset = self._int_to_octets(offset)
- octets = [(a & m) + o
- for a, m, o in zip(self._octets, self._mask_octets, offset)]
- return self._octets_to_addr(octets)
diff --git a/client/common_lib/cros/network/tcpdump_analyzer.py b/client/common_lib/cros/network/tcpdump_analyzer.py
deleted file mode 100644
index 075f572..0000000
--- a/client/common_lib/cros/network/tcpdump_analyzer.py
+++ /dev/null
@@ -1,244 +0,0 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging
-import pyshark
-from locale import *
-
-PYSHARK_LOAD_TIMEOUT = 2
-FRAME_FIELD_RADIOTAP_DATARATE = 'radiotap.datarate'
-FRAME_FIELD_RADIOTAP_MCS_INDEX = 'radiotap.mcs_index'
-FRAME_FIELD_WLAN_FRAME_TYPE = 'wlan.fc_type_subtype'
-FRAME_FIELD_WLAN_SOURCE_ADDR = 'wlan.sa'
-FRAME_FIELD_WLAN_MGMT_SSID = 'wlan_mgt.ssid'
-RADIOTAP_KNOWN_BAD_FCS_REJECTOR = (
- 'not radiotap.flags.badfcs or radiotap.flags.badfcs==0')
-RADIOTAP_LOW_SIGNAL_REJECTOR = ('radiotap.dbm_antsignal > -85')
-WLAN_BEACON_FRAME_TYPE = '0x08'
-WLAN_BEACON_ACCEPTOR = 'wlan.fc.type_subtype==0x08'
-WLAN_PROBE_REQ_FRAME_TYPE = '0x04'
-WLAN_PROBE_REQ_ACCEPTOR = 'wlan.fc.type_subtype==0x04'
-WLAN_QOS_NULL_TYPE = '0x2c'
-PYSHARK_BROADCAST_SSID = 'SSID: '
-BROADCAST_SSID = ''
-
-setlocale(LC_ALL, '')
-
-class Frame(object):
- """A frame from a packet capture."""
- TIME_FORMAT = "%H:%M:%S.%f"
-
-
- def __init__(self, frametime, bit_rate, mcs_index, ssid, source_addr,
- frame_type):
- self._datetime = frametime
- self._bit_rate = bit_rate
- self._mcs_index = mcs_index
- self._ssid = ssid
- self._source_addr = source_addr
- self._frame_type = frame_type
-
-
- @property
- def time_datetime(self):
- """The time of the frame, as a |datetime| object."""
- return self._datetime
-
-
- @property
- def bit_rate(self):
- """The bitrate used to transmit the frame, as an int."""
- return self._bit_rate
-
-
- @property
- def frame_type(self):
- """802.11 type/subtype field, as a hex string."""
- return self._frame_type
-
-
- @property
- def mcs_index(self):
- """
- The MCS index used to transmit the frame, as an int.
-
- The value may be None, if the frame was not transmitted
- using 802.11n modes.
- """
- return self._mcs_index
-
-
- @property
- def ssid(self):
- """
- The SSID of the frame, as a string.
-
- The value may be None, if the frame does not have an SSID.
- """
- return self._ssid
-
-
- @property
- def source_addr(self):
- """The source address of the frame, as a string."""
- return self._source_addr
-
-
- @property
- def time_string(self):
- """The time of the frame, in local time, as a string."""
- return self._datetime.strftime(self.TIME_FORMAT)
-
-
- def __str__(self):
- return '%s: rate %s, MCS %s, SSID %s, SA %s, Type %s' % (
- self.time_datetime, self.bit_rate, self.mcs_index, self.ssid,
- self.source_addr, self.frame_type)
-
-
-def _fetch_frame_field_value(frame, field):
- """
- Retrieve the value of |field| within the |frame|.
-
- @param frame: Pyshark packet object corresponding to a captured frame.
- @param field: Field for which the value needs to be extracted from |frame|.
-
- @return Value extracted from the frame if the field exists, else None.
-
- """
- layer_object = frame
- for layer in field.split('.'):
- try:
- layer_object = getattr(layer_object, layer)
- except AttributeError:
- return None
- return layer_object
-
-
-def _open_capture(pcap_path, display_filter):
- """
- Get pyshark packet object parsed contents of a pcap file.
-
- @param pcap_path: string path to pcap file.
- @param display_filter: string filter to apply to captured frames.
-
- @return list of Pyshark packet objects.
-
- """
- import pyshark
- capture = pyshark.FileCapture(
- input_file=pcap_path, display_filter=display_filter)
- capture.load_packets(timeout=PYSHARK_LOAD_TIMEOUT)
- return capture
-
-
-def get_frames(local_pcap_path, display_filter, reject_bad_fcs=True,
- reject_low_signal=False):
- """
- Get a parsed representation of the contents of a pcap file.
- If the RF shielding in the wificell or other chambers is imperfect,
- we'll see packets from the external environment in the packet capture
- and tests that assert if the packet capture has certain properties
- (i.e. only packets of a certain kind) will fail. A good way to reject
- these packets ("leakage from the outside world") is to look at signal
- strength. The DUT is usually either next to the AP or <5ft from the AP
- in these chambers. A signal strength of < -85 dBm in an incoming packet
- should imply it is leakage. The reject_low_signal option is turned off by
- default and external packets are part of the capture by default.
- Be careful to not turn on this option in an attenuated setup, where the
- DUT/AP packets will also have a low signal (i.e. network_WiFi_AttenPerf).
-
- @param local_pcap_path: string path to a local pcap file on the host.
- @param display_filter: string filter to apply to captured frames.
- @param reject_bad_fcs: bool, for frames with bad Frame Check Sequence.
- @param reject_low_signal: bool, for packets with signal < -85 dBm. These
- are likely from the external environment and show
- up due to poor shielding in the RF chamber.
-
- @return list of Frame structs.
-
- """
- if reject_bad_fcs is True:
- display_filter = '(%s) and (%s)' % (RADIOTAP_KNOWN_BAD_FCS_REJECTOR,
- display_filter)
-
- if reject_low_signal is True:
- display_filter = '(%s) and (%s)' % (RADIOTAP_LOW_SIGNAL_REJECTOR,
- display_filter)
-
- logging.debug('Capture: %s, Filter: %s', local_pcap_path, display_filter)
- capture_frames = _open_capture(local_pcap_path, display_filter)
- frames = []
- logging.info('Parsing frames')
-
- try:
- for frame in capture_frames:
- rate = _fetch_frame_field_value(frame, FRAME_FIELD_RADIOTAP_DATARATE)
- if rate:
- rate = atof(rate)
- else:
- logging.debug('Capture frame missing rate: %s', frame)
-
- frametime = frame.sniff_time
-
- mcs_index = _fetch_frame_field_value(
- frame, FRAME_FIELD_RADIOTAP_MCS_INDEX)
- if mcs_index:
- mcs_index = int(mcs_index)
-
- source_addr = _fetch_frame_field_value(
- frame, FRAME_FIELD_WLAN_SOURCE_ADDR)
-
- # Get the SSID for any probe requests
- frame_type = _fetch_frame_field_value(
- frame, FRAME_FIELD_WLAN_FRAME_TYPE)
- if (frame_type in [WLAN_BEACON_FRAME_TYPE, WLAN_PROBE_REQ_FRAME_TYPE]):
- ssid = _fetch_frame_field_value(frame, FRAME_FIELD_WLAN_MGMT_SSID)
- # Since the SSID name is a variable length field, there seems to be
- # a bug in the pyshark parsing, it returns 'SSID: ' instead of ''
- # for broadcast SSID's.
- if ssid == PYSHARK_BROADCAST_SSID:
- ssid = BROADCAST_SSID
- else:
- ssid = None
-
- frames.append(Frame(frametime, rate, mcs_index, ssid, source_addr,
- frame_type=frame_type))
- except pyshark.capture.capture.TSharkCrashException as e:
- # tcpdump sometimes produces captures with an incomplete packet when passed SIGINT.
- # tshark will crash when it reads this incomplete packet and return a non-zero exit code.
- # pyshark will throw a TSharkCrashException due to this exit code from tshark.
- # Instead of throwing away all packets, let's ignore the malformed packet and continue to
- # analyze packets and return the successfully analyzed ones.
- # This is a band aid fix for b/158311775 as we would ideally fix the tcpdump issue
- # in the first place.
- logging.info("Frame capture issue")
- return frames
-
-
-def get_probe_ssids(local_pcap_path, probe_sender=None):
- """
- Get the SSIDs that were named in 802.11 probe requests frames.
-
- Parse a pcap, returning all the SSIDs named in 802.11 probe
- request frames. If |probe_sender| is specified, only probes
- from that MAC address will be considered.
-
- @param pcap_path: string path to a local pcap file on the host.
- @param remote_host: Host object (if the file is remote).
- @param probe_sender: MAC address of the device sending probes.
-
- @return: A frozenset of the SSIDs that were probed.
-
- """
- if probe_sender:
- display_filter = '%s and wlan.addr==%s' % (
- WLAN_PROBE_REQ_ACCEPTOR, probe_sender)
- else:
- display_filter = WLAN_PROBE_REQ_ACCEPTOR
-
- frames = get_frames(local_pcap_path, display_filter, reject_bad_fcs=True)
-
- return frozenset(
- [frame.ssid for frame in frames if frame.ssid is not None])
diff --git a/client/common_lib/cros/network/wifi_rack_constants.py b/client/common_lib/cros/network/wifi_rack_constants.py
deleted file mode 100644
index 169aeba..0000000
--- a/client/common_lib/cros/network/wifi_rack_constants.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from collections import namedtuple
-
-# Used in network_RackWiFiConnect
-PASSWORD = 'chromeos'
-
-SCAN_RETRY_TIMEOUT = 180
-
-NetworkServices = namedtuple('NetworkServices',
- ['testname', 'user', 'ssid', 'url', 'pattern'])
-
-HIDDEN_WPA = NetworkServices('hiddenWPA',
- 'networktest01@croste.tv',
- 'CrOS_WPA_LinksysWRT54GL',
- 'www.openvpn.com',
- 'certs')
-
-PROXY_NON_AUTH = NetworkServices('proxyNonAuth',
- 'networktest01@croste.tv',
- 'CrOS_WPA2_Airport_Xtreme_5GHz',
- 'www.openvpn.com',
- 'certs')
-
-GOOGLE_GUEST = NetworkServices('googleGuest',
- 'networktest01@croste.tv',
- 'GoogleGuest',
- 'www.google.com',
- 'www.google.com')
-
-WEP = NetworkServices('WEP',
- 'networktest01@croste.tv',
- 'CrOS_WEP_DLink_Dir601',
- 'www.openvpn.com',
- 'certs')
-
-PEAP = NetworkServices('PEAP',
- 'networktest01@croste.tv',
- 'CrOS_WPA2_LinksysE3000_2.4GHz',
- 'www.openvpn.com',
- 'certs')
-
-HIDDEN_WEP = NetworkServices('hiddenWEP',
- 'networktest02@croste.tv',
- 'CrOS_WEP_ddwrt_54GL',
- 'www.openvpn.com',
- 'certs')
-
-WPA2 = NetworkServices('WPA2',
- 'networktest02@croste.tv',
- 'CrOS_WPA2_LinksysE3000N_5GHz',
- 'www.openvpn.com',
- 'certs')
-
-EAP_TTLS = NetworkServices('EAP_TTLS',
- 'networktest03@croste.tv',
- 'CrOS_WPA2_LinksysE3000_2.4GHz',
- 'www.openvpn.com',
- 'certs')
-
-NETWORK_SERVICES_TESTS = [HIDDEN_WPA, HIDDEN_WEP, PROXY_NON_AUTH, GOOGLE_GUEST,
- WEP, PEAP, WPA2, EAP_TTLS]
diff --git a/client/common_lib/cros/network/xmlrpc_datatypes.py b/client/common_lib/cros/network/xmlrpc_datatypes.py
deleted file mode 100644
index f29eff6..0000000
--- a/client/common_lib/cros/network/xmlrpc_datatypes.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import copy
-import logging
-import pprint
-import sys
-
-from autotest_lib.client.common_lib.cros import xmlrpc_types
-from autotest_lib.client.common_lib.cros.network import xmlrpc_security_types
-
-
-def deserialize(serialized):
- """Deserialize an argument to the XmlRpc proxy.
-
- @param serialized dict representing a serialized object.
- @return the corresponding deserialized object.
-
- """
- return xmlrpc_types.deserialize(serialized, module=sys.modules[__name__])
-
-
-class AssociationParameters(xmlrpc_types.XmlRpcStruct):
- """Describes parameters used in WiFi connection attempts."""
-
- DEFAULT_DISCOVERY_TIMEOUT = 15
- DEFAULT_ASSOCIATION_TIMEOUT = 15
- DEFAULT_CONFIGURATION_TIMEOUT = 15
- # Mode for most routers and access points.
- STATION_TYPE_MANAGED = 'managed'
- # Mode for certain kinds of p2p networks like old Android phone hotspots.
- STATION_TYPE_IBSS = 'ibss'
-
- @property
- def security(self):
- """@return string security type for this network."""
- return self.security_config.security
-
-
- @property
- def security_parameters(self):
- """@return dict of service property/value pairs related to security."""
- return self.security_config.get_shill_service_properties()
-
-
- def __init__(self, ssid=None, security_config=None,
- discovery_timeout=DEFAULT_DISCOVERY_TIMEOUT,
- association_timeout=DEFAULT_ASSOCIATION_TIMEOUT,
- configuration_timeout=DEFAULT_CONFIGURATION_TIMEOUT,
- is_hidden=False, save_credentials=False, station_type=None,
- expect_failure=False, guid=None, autoconnect=None,
- bgscan_config=None):
- """Construct an AssociationParameters.
-
- @param ssid string the network to connect to (e.g. 'GoogleGuest').
- @param security_config SecurityConfig object or serialized version.
- @param discovery_timeout int timeout for discovery in seconds.
- @param association_timeout int timeout for association in seconds.
- @param configuration_timeout int timeout for configuration in seconds.
- @param is_hidden bool True iff this is a hidden service.
- @param save_credentials True iff the credentials should be saved for
- this service.
- @param station_type string station type to connect with. Usually
- left unfilled unless we're attempting to connect to a
- non-managed BSS. One of STATION_TYPE_* above.
- @param expect_failure bool True if we expect this connection attempt to
- fail.
- @param guid string unique identifier of this service.
- @param autoconnect: bool or None. None indicates that this should not
- be set one way or the other, while a boolean indicates a desired
- value.
-
- """
- super(AssociationParameters, self).__init__()
- self.ssid = ssid
- # The security config is a little tricky. When we're being deserialized
- # this is passed to us in the form of a dictionary which also needs
- # to be deserialized into a real object.
- if isinstance(security_config, dict):
- self.security_config = xmlrpc_security_types.deserialize(
- security_config)
- elif security_config is not None:
- self.security_config = copy.copy(security_config)
- else:
- self.security_config = xmlrpc_security_types.SecurityConfig()
-
- # The bgscan configuration is similar to the security configuration.
- if isinstance(bgscan_config, dict):
- self.bgscan_config = deserialize(bgscan_config)
- elif bgscan_config is not None:
- self.bgscan_config = copy.copy(bgscan_config)
- else:
- self.bgscan_config = BgscanConfiguration()
- self.discovery_timeout = discovery_timeout
- self.association_timeout = association_timeout
- self.configuration_timeout = configuration_timeout
- self.is_hidden = is_hidden
- self.save_credentials = save_credentials
- self.station_type = station_type
- self.expect_failure = expect_failure
- self.guid = guid
- self.autoconnect = autoconnect
-
-
- def __str__(self):
- """Returns a formatted string of member parameters"""
- return pprint.pformat(self.__dict__)
-
-
-class AssociationResult(xmlrpc_types.XmlRpcStruct):
- """Describes the result of an association attempt."""
-
- def __init__(self, success=False, discovery_time=-1.0,
- association_time=-1.0, configuration_time=-1.0,
- failure_reason='unknown'):
- """Construct an AssociationResult.
-
- @param success bool True iff we were successful in connecting to
- this WiFi network.
- @param discovery_time int number of seconds it took to find and call
- connect on a network from the time the proxy is told to connect.
- This includes scanning time.
- @param association_time int number of seconds it takes from the moment
- that we call connect to the moment we're fully associated with
- the BSS. This includes wpa handshakes.
- @param configuration_time int number of seconds it takes from
- association till we have an IP address and mark the network as
- being either online or portalled.
- @param failure_reason int holds a descriptive reason for why the
- negotiation failed when |successs| is False. Undefined
- otherwise.
-
- """
- super(AssociationResult, self).__init__()
- self.success = success
- self.discovery_time = discovery_time
- self.association_time = association_time
- self.configuration_time = configuration_time
- self.failure_reason = failure_reason
-
-
- @staticmethod
- def from_dbus_proxy_output(raw):
- """Factory for AssociationResult.
-
- The object which knows how to talk over DBus to shill is not part of
- autotest and as a result can't return a AssociationResult. Instead,
- it returns a similar looing tuple, which we'll parse.
-
- @param raw tuple from ShillProxy.
- @return AssociationResult parsed output from ShillProxy.
-
- """
- return AssociationResult(success=raw[0],
- discovery_time=raw[1],
- association_time=raw[2],
- configuration_time=raw[3],
- failure_reason=raw[4])
-
-
-class BgscanConfiguration(xmlrpc_types.XmlRpcStruct):
- """Describes how to configure wpa_supplicant on a DUT."""
-
- # Clears shill's bgscan method property on the WiFi device.
- # This causes shill to choose between simple and no bgscan
- # depending on the number of visible BSS's for a network.
- SCAN_METHOD_DEFAULT = 'default'
- # Disables background scan entirely.
- SCAN_METHOD_NONE = 'none'
- # A periodic background scan based on signal strength.
- SCAN_METHOD_SIMPLE = 'simple'
-
- # These three parameters come out shill's wifi.cc.
- # and refer to inputs to the simple scanning method.
- DEFAULT_SHORT_INTERVAL_SECONDS = 64
- DEFAULT_LONG_INTERVAL_SECONDS = 60
- DEFAULT_SIGNAL_THRESHOLD = -72
-
- def __init__(self, interface=None, signal=DEFAULT_SIGNAL_THRESHOLD,
- short_interval=DEFAULT_SHORT_INTERVAL_SECONDS,
- long_interval=DEFAULT_LONG_INTERVAL_SECONDS,
- method=SCAN_METHOD_DEFAULT):
- """Construct a BgscanConfiguration.
-
- @param interface string interface to configure (e.g. wlan0).
- @param signal int signal threshold to scan below.
- @param short_interval int wpa_supplicant short scanning interval.
- @param long_interval int wpa_supplicant normal scanning interval.
- @param method string a valid wpa_supplicant scanning algorithm (e.g.
- any of SCAN_METHOD_* above).
-
- """
- super(BgscanConfiguration, self).__init__()
- self.interface = interface
- self.signal = signal
- self.short_interval = short_interval
- self.long_interval = long_interval
- self.method = method
-
-
- def set_auto_signal(self, signal_average, signal_offset=None,
- signal_noise=None):
- """Set the signal threshold automatically from observed parameters.
-
- @param signal_average int average signal level.
- @param signal_offset int amount to adjust the average by.
- @param signal_noise int amount of background noise observed.
-
- """
- signal = signal_average
- if signal_offset:
- signal += signal_offset
- if signal_noise:
- # Compensate for real noise vs standard estimate
- signal -= 95 + signal_noise
- logging.debug('Setting signal via auto configuration: '
- 'avg=%d, offset=%r, noise=%r => signal=%d.',
- signal_average, signal_offset, signal_noise, signal)
- self.signal = signal
-
-
-class ConfigureServiceParameters(xmlrpc_types.XmlRpcStruct):
- """Describes a group of optional settings for use with ConfigureService.
-
- The Manager in shill has a method ConfigureService which takes a dictionary
- of parameters, and uses some of them to look up a service, and sets the
- remainder of the properties on the service. This struct represents
- some of the optional parameters that can be set in this way. Current
- consumers of this interface look up the service by GUID.
-
- """
-
- def __init__(self, guid, passphrase=None, autoconnect=None):
- """Construct a ConfigureServiceParameters.
-
- @param guid string GUID of the service to configure.
- @param passphrase string optional psk passphrase.
- @param autoconnect: bool or None. None indicates that this should not
- be set one way or the other, while a boolean indicates a desired
- value.
-
- """
- super(ConfigureServiceParameters, self).__init__()
- self.guid = guid
- self.passphrase = passphrase
- self.autoconnect = autoconnect
diff --git a/client/common_lib/cros/network/xmlrpc_security_types.py b/client/common_lib/cros/network/xmlrpc_security_types.py
deleted file mode 100644
index e23bffa..0000000
--- a/client/common_lib/cros/network/xmlrpc_security_types.py
+++ /dev/null
@@ -1,692 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import logging
-import os
-import random
-import six
-from six.moves import map
-from six.moves import range
-import stat
-import string
-import sys
-import tempfile
-
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib.cros import xmlrpc_types
-
-
-def deserialize(serialized):
- """Deserialize a SecurityConfig.
-
- @param serialized dict representing a serialized SecurityConfig.
- @return a SecurityConfig object built from |serialized|.
-
- """
- return xmlrpc_types.deserialize(serialized, module=sys.modules[__name__])
-
-
-class SecurityConfig(xmlrpc_types.XmlRpcStruct):
- """Abstracts the security configuration for a WiFi network.
-
- This bundle of credentials can be passed to both HostapConfig and
- AssociationParameters so that both shill and hostapd can set up and connect
- to an encrypted WiFi network. By default, we'll assume we're connecting
- to an open network.
-
- """
- SERVICE_PROPERTY_PASSPHRASE = 'Passphrase'
-
- def __init__(self, security='none'):
- super(SecurityConfig, self).__init__()
- self.security = security
-
-
- def get_hostapd_config(self):
- """@return dict fragment of hostapd configuration for security."""
- return {}
-
-
- def get_shill_service_properties(self):
- """@return dict of shill service properties."""
- return {}
-
-
- def get_wpa_cli_properties(self):
- """@return dict values to be set with wpa_cli set_network."""
- return {'key_mgmt': 'NONE'}
-
-
- def install_router_credentials(self, host, install_dir):
- """Install the necessary credentials on the router.
-
- @param host host object representing the router.
- @param install_dir the directory on host to install the files.
-
- """
- pass # Many authentication methods have no special router credentials.
-
-
- def install_client_credentials(self, tpm_store):
- """Install credentials on the local host (hopefully a DUT).
-
- Only call this if we're running on a DUT in a WiFi test. This
- method can do things like install credentials into the TPM.
-
- @param tpm_store TPMStore object representing the TPM on our DUT.
-
- """
- pass # Many authentication methods have no special client credentials.
-
-
- def __repr__(self):
- return '%s(%s)' % (self.__class__.__name__, ', '.join(
- ['%s=%r' % item for item in six.iteritems(vars(self))]))
-
-
-class WEPConfig(SecurityConfig):
- """Abstracts security configuration for a WiFi network using static WEP."""
- # Open system authentication means that we don't do a 4 way AUTH handshake,
- # and simply start using the WEP keys after association finishes.
- AUTH_ALGORITHM_OPEN = 1
- # This refers to a mode where the AP sends a plaintext challenge and the
- # client sends back the challenge encrypted with the WEP key as part of a 4
- # part auth handshake.
- AUTH_ALGORITHM_SHARED = 2
- AUTH_ALGORITHM_DEFAULT = AUTH_ALGORITHM_OPEN
-
- @staticmethod
- def _format_key(key, ascii_key_formatter):
- """Returns a key formatted to for its appropriate consumer.
-
- Both hostapd and wpa_cli want their ASCII encoded WEP keys formatted
- in a particular way. Hex string on the other hand can be given raw.
- Other key formats aren't even accepted, and this method will raise
- and exception if it sees such a key.
-
- @param key string a 40/104 bit WEP key.
- @param ascii_key_formatter converter function that escapes a WEP
- string-encoded passphrase. This conversion varies in format
- depending on the consumer.
- @return string corrected formatted WEP key.
-
- """
- if len(key) in (5, 13):
- # These are 'ASCII' strings, or at least N-byte strings
- # of the right size.
- return ascii_key_formatter(key)
-
- if len(key) in (10, 26):
- # These are hex encoded byte strings.
- return key
-
- raise error.TestFail('Invalid WEP key: %r' % key)
-
-
- def __init__(self, wep_keys, wep_default_key=0,
- auth_algorithm=AUTH_ALGORITHM_DEFAULT):
- """Construct a WEPConfig object.
-
- @param wep_keys list of string WEP keys.
- @param wep_default_key int 0 based index into |wep_keys| for the default
- key.
- @param auth_algorithm int bitfield of AUTH_ALGORITHM_* defined above.
-
- """
- super(WEPConfig, self).__init__(security='wep')
- self.wep_keys = wep_keys
- self.wep_default_key = wep_default_key
- self.auth_algorithm = auth_algorithm
- if self.auth_algorithm & ~(self.AUTH_ALGORITHM_OPEN |
- self.AUTH_ALGORITHM_SHARED):
- raise error.TestFail('Invalid authentication mode specified (%d).' %
- self.auth_algorithm)
-
- if self.wep_keys and len(self.wep_keys) > 4:
- raise error.TestFail('More than 4 WEP keys specified (%d).' %
- len(self.wep_keys))
-
-
- def get_hostapd_config(self):
- """@return dict fragment of hostapd configuration for security."""
- ret = {}
- quote = lambda x: '"%s"' % x
- for idx,key in enumerate(self.wep_keys):
- ret['wep_key%d' % idx] = self._format_key(key, quote)
- ret['wep_default_key'] = self.wep_default_key
- ret['auth_algs'] = self.auth_algorithm
- return ret
-
-
- def get_shill_service_properties(self):
- """@return dict of shill service properties."""
- return {self.SERVICE_PROPERTY_PASSPHRASE: '%d:%s' % (
- self.wep_default_key,
- self.wep_keys[self.wep_default_key])}
-
-
- def get_wpa_cli_properties(self):
- properties = super(WEPConfig, self).get_wpa_cli_properties()
- quote = lambda x: '\\"%s\\"' % x
- for idx, key in enumerate(self.wep_keys):
- properties['wep_key%d' % idx] = self._format_key(key, quote)
- properties['wep_tx_keyidx'] = self.wep_default_key
- if self.auth_algorithm == self.AUTH_ALGORITHM_SHARED:
- properties['auth_alg'] = 'SHARED'
- return properties
-
-
-class WPAConfig(SecurityConfig):
- """Abstracts security configuration for a WPA encrypted WiFi network."""
-
- # We have the option of turning on combinations of WPA, WPA2, or WPA3 via a
- # bitfield.
- MODE_PURE_WPA = 1
- MODE_PURE_WPA2 = 2
- MODE_PURE_WPA3 = 4
- MODE_MIXED_WPA = MODE_PURE_WPA | MODE_PURE_WPA2
- MODE_MIXED_WPA3 = MODE_PURE_WPA2 | MODE_PURE_WPA3
- MODE_DEFAULT = MODE_MIXED_WPA
-
- # WPA2 mandates the use of AES in CCMP mode.
- # WPA allows the use of 'ordinary' AES, but mandates support for TKIP.
- # The protocol however seems to indicate that you just list a bunch of
- # different ciphers that you support and we'll start speaking one.
- CIPHER_CCMP = 'CCMP'
- CIPHER_TKIP = 'TKIP'
-
- # Fast Transition (FT) mode for WPA network.
- FT_MODE_NONE = 1
- FT_MODE_PURE = 2
- FT_MODE_MIXED = FT_MODE_NONE | FT_MODE_PURE
- FT_MODE_DEFAULT = FT_MODE_NONE
-
- def __init__(self, psk='', wpa_mode=MODE_DEFAULT, wpa_ciphers=[],
- wpa2_ciphers=[], wpa_ptk_rekey_period=None,
- wpa_gtk_rekey_period=None, wpa_gmk_rekey_period=None,
- use_strict_rekey=None, ft_mode=FT_MODE_NONE):
- """Construct a WPAConfig.
-
- @param psk string a passphrase (64 hex characters or an ASCII phrase up
- to 63 characters long).
- @param wpa_mode int one of MODE_* above.
- @param wpa_ciphers list of ciphers to advertise in the WPA IE.
- @param wpa2_ciphers list of ciphers to advertise in the WPA2 IE.
- hostapd will fall back on WPA ciphers for WPA2 if this is
- left unpopulated.
- @param wpa_ptk_rekey_period int number of seconds between PTK rekeys.
- @param wpa_gtk_rekey_period int number of second between GTK rekeys.
- @param wpa_gmk_rekey_period int number of seconds between GMK rekeys.
- The GMK is a key internal to hostapd used to generate GTK.
- It is the 'main' key.
- @param use_strict_rekey bool True iff hostapd should refresh the GTK
- whenever any client leaves the group.
- @param ft_mode int one of the FT_MODE_* in SecurityConfig.
-
- """
- super(WPAConfig, self).__init__(security='psk')
- self.psk = psk
- self.wpa_mode = wpa_mode
- self.wpa_ciphers = wpa_ciphers
- self.wpa2_ciphers = wpa2_ciphers
- self.wpa_ptk_rekey_period = wpa_ptk_rekey_period
- self.wpa_gtk_rekey_period = wpa_gtk_rekey_period
- self.wpa_gmk_rekey_period = wpa_gmk_rekey_period
- self.use_strict_rekey = use_strict_rekey
- self.ft_mode = ft_mode
- if len(psk) > 64:
- raise error.TestFail('WPA passphrases can be no longer than 63 '
- 'characters (or 64 hex digits).')
-
- if len(psk) == 64:
- for c in psk:
- if c not in '0123456789abcdefABCDEF':
- raise error.TestFail('Invalid PMK: %r' % psk)
-
-
- def get_hostapd_config(self):
- """@return dict fragment of hostapd configuration for security."""
- mode = 0
- # WPA2 and WPA3 are both RSN, so hostapd lumps these together for wpa=.
- if self.wpa_mode & (self.MODE_PURE_WPA2 | self.MODE_PURE_WPA3):
- mode |= self.MODE_PURE_WPA2
- # WPA.
- if self.wpa_mode & self.MODE_PURE_WPA:
- mode |= self.MODE_PURE_WPA
- if not mode:
- raise error.TestFail('Cannot configure WPA unless we know which '
- 'mode to use.')
-
- if mode & self.MODE_PURE_WPA and not self.wpa_ciphers:
- raise error.TestFail('Cannot configure WPA unless we know which '
- 'ciphers to use.')
-
- if not self.wpa_ciphers and not self.wpa2_ciphers:
- raise error.TestFail('Cannot configure WPA2 unless we have some '
- 'ciphers.')
-
- key_mgmt = []
- if self.ft_mode & self.FT_MODE_NONE:
- if self.wpa_mode & self.MODE_MIXED_WPA:
- key_mgmt += ['WPA-PSK']
- if self.wpa_mode & self.MODE_PURE_WPA3:
- key_mgmt += ['SAE']
- if self.ft_mode & self.FT_MODE_PURE:
- if self.wpa_mode & self.MODE_MIXED_WPA:
- key_mgmt += ['FT-PSK']
- if self.wpa_mode & self.MODE_PURE_WPA3:
- key_mgmt += ['FT-SAE']
-
- ret = {'wpa': mode, 'wpa_key_mgmt': ' '.join(key_mgmt)}
-
- if len(self.psk) == 64:
- ret['wpa_psk'] = self.psk
- else:
- ret['wpa_passphrase'] = self.psk
-
- if self.wpa_ciphers:
- ret['wpa_pairwise'] = ' '.join(self.wpa_ciphers)
- if self.wpa2_ciphers:
- ret['rsn_pairwise'] = ' '.join(self.wpa2_ciphers)
- if self.wpa_ptk_rekey_period:
- ret['wpa_ptk_rekey'] = self.wpa_ptk_rekey_period
- if self.wpa_gtk_rekey_period:
- ret['wpa_group_rekey'] = self.wpa_gtk_rekey_period
- if self.wpa_gmk_rekey_period:
- ret['wpa_gmk_rekey'] = self.wpa_gmk_rekey_period
- if self.use_strict_rekey:
- ret['wpa_strict_rekey'] = 1
- return ret
-
-
- def get_shill_service_properties(self):
- """@return dict of shill service properties."""
- ret = {self.SERVICE_PROPERTY_PASSPHRASE: self.psk}
- return ret
-
-
- def get_wpa_cli_properties(self):
- properties = super(WPAConfig, self).get_wpa_cli_properties()
- # TODO(wiley) This probably doesn't work for raw PMK.
- protos = []
- if self.wpa_mode & self.MODE_PURE_WPA:
- protos.append('WPA')
- if self.wpa_mode & (self.MODE_PURE_WPA2 | self.MODE_PURE_WPA3):
- protos.append('RSN')
- key_mgmt = []
- if self.ft_mode & self.FT_MODE_NONE:
- if self.wpa_mode & self.MODE_MIXED_WPA:
- key_mgmt += ['WPA-PSK']
- if self.wpa_mode & self.MODE_PURE_WPA3:
- key_mgmt += ['SAE']
- if self.ft_mode & self.FT_MODE_PURE:
- if self.wpa_mode & self.MODE_MIXED_WPA:
- key_mgmt += ['FT-PSK']
- if self.wpa_mode & self.MODE_PURE_WPA3:
- key_mgmt += ['FT-SAE']
- properties.update({
- 'psk': '\\"%s\\"' % self.psk,
- 'key_mgmt': ' '.join(key_mgmt),
- 'proto': ' '.join(protos)
- })
- return properties
-
-
-class EAPConfig(SecurityConfig):
- """Abstract superclass that implements certificate/key installation."""
-
- DEFAULT_EAP_USERS = '* TLS'
- DEFAULT_EAP_IDENTITY = 'chromeos'
-
- SERVICE_PROPERTY_CA_CERT_PEM = 'EAP.CACertPEM'
- SERVICE_PROPERTY_CLIENT_CERT_ID = 'EAP.CertID'
- SERVICE_PROPERTY_EAP_IDENTITY = 'EAP.Identity'
- SERVICE_PROPERTY_EAP_KEY_MGMT = 'EAP.KeyMgmt'
- SERVICE_PROPERTY_EAP_PASSWORD = 'EAP.Password'
- SERVICE_PROPERTY_EAP_PIN = 'EAP.PIN'
- SERVICE_PROPERTY_INNER_EAP= 'EAP.InnerEAP'
- SERVICE_PROPERTY_PRIVATE_KEY_ID = 'EAP.KeyID'
- SERVICE_PROPERTY_USE_SYSTEM_CAS = 'EAP.UseSystemCAs'
- SERVICE_PROPERTY_ALTSUBJECT_MATCH = 'EAP.SubjectAlternativeNameMatch'
-
- last_tpm_id = 8800
-
- # Credential file prefixes.
- SERVER_CA_CERT_FILE_PREFIX = 'hostapd_ca_cert_file.'
- SERVER_CERT_FILE_PREFIX = 'hostapd_cert_file.'
- SERVER_KEY_FILE_PREFIX = 'hostapd_key_file.'
- SERVER_EAP_USER_FILE_PREFIX = 'hostapd_eap_user_file.'
-
- @staticmethod
- def reserve_TPM_id():
- """@return session unique TPM identifier."""
- ret = str(EAPConfig.last_tpm_id)
- EAPConfig.last_tpm_id += 1
- return ret
-
-
- def __init__(self, security='802_1x', file_suffix=None, use_system_cas=None,
- server_ca_cert=None, server_cert=None, server_key=None,
- server_eap_users=None,
- client_ca_cert=None, client_cert=None, client_key=None,
- client_cert_id=None, client_key_id=None,
- eap_identity=None, ft_mode=WPAConfig.FT_MODE_DEFAULT,
- altsubject_match=None):
- """Construct an EAPConfig.
-
- @param file_suffix string unique file suffix on DUT.
- @param use_system_cas False iff we should ignore server certificates.
- @param server_ca_cert string PEM encoded CA certificate for the server.
- @param server_cert string PEM encoded identity certificate for server.
- @param server_key string PEM encoded private key for server.
- @param server_eap_users string contents of EAP user file.
- @param client_ca_cert string PEM encoded CA certificate for client.
- @param client_cert string PEM encoded identity certificate for client.
- @param client_key string PEM encoded private key for client.
- @param client_cert_id string identifier for client certificate in TPM.
- @param client_key_id string identifier for client private key in TPM.
- @param eap_identity string user to authenticate as during EAP.
- @param ft_mode int one of the FT_MODE_* in SecurityConfig.
- @param altsubject_match list of strings in the format of shill
- EAP.SubjectAlternativeNameMatch property.
-
- """
- super(EAPConfig, self).__init__(security=security)
- self.use_system_cas = use_system_cas
- self.server_ca_cert = server_ca_cert
- self.server_cert = server_cert
- self.server_key = server_key
- self.server_eap_users = server_eap_users or self.DEFAULT_EAP_USERS
- self.client_ca_cert = client_ca_cert
- self.client_cert = client_cert
- self.client_key = client_key
- if file_suffix is None:
- suffix_letters = string.ascii_lowercase + string.digits
- file_suffix = ''.join(random.choice(suffix_letters)
- for x in range(10))
- logging.debug('Choosing unique file_suffix %s.', file_suffix)
- # The key paths will be determined in install_router_credentials.
- self.server_ca_cert_file = None
- self.server_cert_file = None
- self.server_key_file = None
- self.server_eap_user_file = None
- # While these paths won't make it across the network, the suffix will.
- self.file_suffix = file_suffix
- self.client_cert_id = client_cert_id or self.reserve_TPM_id()
- self.client_key_id = client_key_id or self.reserve_TPM_id()
- # This gets filled in at install time.
- self.pin = None
- # The slot where the certificate/key are installed in the TPM.
- self.client_cert_slot_id = None
- self.client_key_slot_id = None
- self.eap_identity = eap_identity or self.DEFAULT_EAP_IDENTITY
- self.ft_mode = ft_mode
- self.altsubject_match = altsubject_match
-
-
- def install_router_credentials(self, host, install_dir):
- """Install the necessary credentials on the router.
-
- @param host host object representing the router.
-
- """
- self.server_ca_cert_file = os.path.join(
- install_dir, self.SERVER_CA_CERT_FILE_PREFIX + self.file_suffix)
- self.server_cert_file = os.path.join(
- install_dir, self.SERVER_CERT_FILE_PREFIX + self.file_suffix)
- self.server_key_file = os.path.join(
- install_dir, self.SERVER_KEY_FILE_PREFIX + self.file_suffix)
- self.server_eap_user_file = os.path.join(
- install_dir, self.SERVER_EAP_USER_FILE_PREFIX + self.file_suffix)
-
- files = [(self.server_ca_cert, self.server_ca_cert_file),
- (self.server_cert, self.server_cert_file),
- (self.server_key, self.server_key_file),
- (self.server_eap_users, self.server_eap_user_file)]
- for content, path in files:
- # If we omit a parameter, just omit copying a file over.
- if content is None:
- continue
- # Write the contents to local disk first so we can use the easy
- # built in mechanism to do this.
- with tempfile.NamedTemporaryFile() as f:
- f.write(content)
- f.flush()
- os.chmod(f.name, stat.S_IRUSR | stat.S_IWUSR |
- stat.S_IRGRP | stat.S_IWGRP |
- stat.S_IROTH | stat.S_IWOTH)
- host.send_file(f.name, path, delete_dest=True)
-
-
- def install_client_credentials(self, tpm_store):
- """Install credentials on the local host (hopefully a DUT).
-
- Only call this if we're running on a DUT in a WiFi test. This
- method can do things like install credentials into the TPM.
-
- @param tpm_store TPMStore object representing the TPM on our DUT.
-
- """
- if self.client_cert:
- tpm_store.install_certificate(self.client_cert, self.client_cert_id)
- self.client_cert_slot_id = tpm_store.SLOT_ID
- self.pin = tpm_store.PIN
- if self.client_key:
- tpm_store.install_private_key(self.client_key, self.client_key_id)
- self.client_key_slot_id = tpm_store.SLOT_ID
- self.pin = tpm_store.PIN
-
-
- def get_shill_service_properties(self):
- """@return dict of shill service properties."""
- ret = {self.SERVICE_PROPERTY_EAP_IDENTITY: self.eap_identity}
- if self.pin:
- ret[self.SERVICE_PROPERTY_EAP_PIN] = self.pin
- if self.client_ca_cert:
- # Technically, we could accept a list of certificates here, but we
- # have no such tests.
- ret[self.SERVICE_PROPERTY_CA_CERT_PEM] = [self.client_ca_cert]
- if self.client_cert:
- ret[self.SERVICE_PROPERTY_CLIENT_CERT_ID] = (
- '%s:%s' % (self.client_cert_slot_id, self.client_cert_id))
- if self.client_key:
- ret[self.SERVICE_PROPERTY_PRIVATE_KEY_ID] = (
- '%s:%s' % (self.client_key_slot_id, self.client_key_id))
- if self.use_system_cas is not None:
- ret[self.SERVICE_PROPERTY_USE_SYSTEM_CAS] = self.use_system_cas
- if self.altsubject_match:
- ret[self.SERVICE_PROPERTY_ALTSUBJECT_MATCH] = self.altsubject_match
- return ret
-
-
- def get_hostapd_config(self):
- """@return dict fragment of hostapd configuration for security."""
- return {'ieee8021x': 1, # Enable 802.1x support.
- 'eap_server' : 1, # Do EAP inside hostapd to avoid RADIUS.
- 'ca_cert': self.server_ca_cert_file,
- 'server_cert': self.server_cert_file,
- 'private_key': self.server_key_file,
- 'eap_user_file': self.server_eap_user_file}
-
-
-class DynamicWEPConfig(EAPConfig):
- """Configuration settings bundle for dynamic WEP.
-
- This is a WEP encrypted connection where the keys are negotiated after the
- client authenticates via 802.1x.
-
- """
-
- DEFAULT_REKEY_PERIOD = 20
-
-
- def __init__(self, use_short_keys=False,
- wep_rekey_period=DEFAULT_REKEY_PERIOD,
- server_ca_cert=None, server_cert=None, server_key=None,
- client_ca_cert=None, client_cert=None, client_key=None,
- file_suffix=None, client_cert_id=None, client_key_id=None):
- """Construct a DynamicWEPConfig.
-
- @param use_short_keys bool force hostapd to use 40 bit WEP keys.
- @param wep_rekey_period int number of second between rekeys.
- @param server_ca_cert string PEM encoded CA certificate for the server.
- @param server_cert string PEM encoded identity certificate for server.
- @param server_key string PEM encoded private key for server.
- @param client_ca_cert string PEM encoded CA certificate for client.
- @param client_cert string PEM encoded identity certificate for client.
- @param client_key string PEM encoded private key for client.
- @param file_suffix string unique file suffix on DUT.
- @param client_cert_id string identifier for client certificate in TPM.
- @param client_key_id string identifier for client private key in TPM.
-
- """
- super(DynamicWEPConfig, self).__init__(
- security='wep', file_suffix=file_suffix,
- server_ca_cert=server_ca_cert, server_cert=server_cert,
- server_key=server_key, client_ca_cert=client_ca_cert,
- client_cert=client_cert, client_key=client_key,
- client_cert_id=client_cert_id, client_key_id=client_key_id)
- self.use_short_keys = use_short_keys
- self.wep_rekey_period = wep_rekey_period
-
-
- def get_hostapd_config(self):
- """@return dict fragment of hostapd configuration for security."""
- ret = super(DynamicWEPConfig, self).get_hostapd_config()
- key_len = 13 # 128 bit WEP, 104 secret bits.
- if self.use_short_keys:
- key_len = 5 # 64 bit WEP, 40 bits of secret.
- ret.update({'wep_key_len_broadcast': key_len,
- 'wep_key_len_unicast': key_len,
- 'wep_rekey_period': self.wep_rekey_period})
- return ret
-
-
- def get_shill_service_properties(self):
- """@return dict of shill service properties."""
- ret = super(DynamicWEPConfig, self).get_shill_service_properties()
- ret.update({self.SERVICE_PROPERTY_EAP_KEY_MGMT: 'IEEE8021X'})
- return ret
-
-
-class WPAEAPConfig(EAPConfig):
- """Security type to set up a WPA tunnel via EAP-TLS negotiation."""
-
- def __init__(self, file_suffix=None, use_system_cas=None,
- server_ca_cert=None, server_cert=None, server_key=None,
- client_ca_cert=None, client_cert=None, client_key=None,
- client_cert_id=None, client_key_id=None, eap_identity=None,
- server_eap_users=None, altsubject_match=None,
- wpa_mode=WPAConfig.MODE_PURE_WPA,
- ft_mode=WPAConfig.FT_MODE_DEFAULT):
- """Construct a DynamicWEPConfig.
-
- @param file_suffix string unique file suffix on DUT.
- @param use_system_cas False iff we should ignore server certificates.
- @param server_ca_cert string PEM encoded CA certificate for the server.
- @param server_cert string PEM encoded identity certificate for server.
- @param server_key string PEM encoded private key for server.
- @param client_ca_cert string PEM encoded CA certificate for client.
- @param client_cert string PEM encoded identity certificate for client.
- @param client_key string PEM encoded private key for client.
- @param client_cert_id string identifier for client certificate in TPM.
- @param client_key_id string identifier for client private key in TPM.
- @param eap_identity string user to authenticate as during EAP.
- @param server_eap_users string contents of server EAP users file.
- @param ft_mode int one of the FT_MODE_* in SecurityConfig
- @param altsubject_match list of strings in the format of shill
- EAP.SubjectAlternativeNameMatch property.
-
- """
- super(WPAEAPConfig, self).__init__(
- file_suffix=file_suffix, use_system_cas=use_system_cas,
- server_ca_cert=server_ca_cert, server_cert=server_cert,
- server_key=server_key, client_ca_cert=client_ca_cert,
- client_cert=client_cert, client_key=client_key,
- client_cert_id=client_cert_id, client_key_id=client_key_id,
- eap_identity=eap_identity, server_eap_users=server_eap_users,
- ft_mode=ft_mode, altsubject_match=altsubject_match)
- self.wpa_mode = wpa_mode
-
-
- def get_hostapd_config(self):
- """@return dict fragment of hostapd configuration for security."""
- ret = super(WPAEAPConfig, self).get_hostapd_config()
- # If we wanted to expand test coverage to WPA2/PEAP combinations
- # or particular ciphers, we'd have to let people set these
- # settings manually. But for now, do the simple thing.
- ret.update({'wpa': self.wpa_mode,
- 'wpa_pairwise': WPAConfig.CIPHER_CCMP,
- 'wpa_key_mgmt':'WPA-EAP'})
- if self.ft_mode == WPAConfig.FT_MODE_PURE:
- ret['wpa_key_mgmt'] = 'FT-EAP'
- elif self.ft_mode == WPAConfig.FT_MODE_MIXED:
- ret['wpa_key_mgmt'] = 'WPA-EAP FT-EAP'
- return ret
-
-
-class Tunneled1xConfig(WPAEAPConfig):
- """Security type to set up a TTLS/PEAP connection.
-
- Both PEAP and TTLS are tunneled protocols which use EAP inside of a TLS
- secured tunnel. The secured tunnel is a symmetric key encryption scheme
- negotiated under the protection of a public key in the server certificate.
- Thus, we'll see server credentials in the form of certificates, but client
- credentials in the form of passwords and a CA Cert to root the trust chain.
-
- """
-
- TTLS_PREFIX = 'TTLS-'
-
- LAYER1_TYPE_PEAP = 'PEAP'
- LAYER1_TYPE_TTLS = 'TTLS'
-
- LAYER2_TYPE_GTC = 'GTC'
- LAYER2_TYPE_MSCHAPV2 = 'MSCHAPV2'
- LAYER2_TYPE_MD5 = 'MD5'
- LAYER2_TYPE_TTLS_MSCHAPV2 = TTLS_PREFIX + 'MSCHAPV2'
- LAYER2_TYPE_TTLS_MSCHAP = TTLS_PREFIX + 'MSCHAP'
- LAYER2_TYPE_TTLS_PAP = TTLS_PREFIX + 'PAP'
-
- def __init__(self, server_ca_cert, server_cert, server_key,
- client_ca_cert, eap_identity, password,
- outer_protocol=LAYER1_TYPE_PEAP,
- inner_protocol=LAYER2_TYPE_MD5,
- client_password=None, file_suffix=None,
- altsubject_match=None):
- self.password = password
- if client_password is not None:
- # Override the password used on the client. This lets us set
- # bad passwords for testing. However, we use the real password
- # below for the server config.
- self.password = client_password
- self.inner_protocol = inner_protocol
- # hostapd wants these surrounded in double quotes.
- quote = lambda x: '"' + x + '"'
- eap_users = list(map(' '.join, [('*', outer_protocol),
- (quote(eap_identity), inner_protocol, quote(password), '[2]')]))
- super(Tunneled1xConfig, self).__init__(
- server_ca_cert=server_ca_cert,
- server_cert=server_cert,
- server_key=server_key,
- server_eap_users='\n'.join(eap_users),
- client_ca_cert=client_ca_cert,
- eap_identity=eap_identity,
- file_suffix=file_suffix,
- altsubject_match=altsubject_match)
-
-
- def get_shill_service_properties(self):
- """@return dict of shill service properties."""
- ret = super(Tunneled1xConfig, self).get_shill_service_properties()
- ret.update({self.SERVICE_PROPERTY_EAP_PASSWORD: self.password})
- if self.inner_protocol.startswith(self.TTLS_PREFIX):
- auth_str = 'auth=' + self.inner_protocol[len(self.TTLS_PREFIX):]
- ret.update({self.SERVICE_PROPERTY_INNER_EAP: auth_str})
- return ret
diff --git a/client/common_lib/cros/perf_stat_lib.py b/client/common_lib/cros/perf_stat_lib.py
deleted file mode 100644
index 7c4a71d..0000000
--- a/client/common_lib/cros/perf_stat_lib.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from math import ceil, floor, sqrt
-
-
-def get_kth_percentile(num_list, k):
- """
- Computes the k-th percentile of a list of numbers.
-
- @param num_list: list with numbers.
- @param k: the percentile to be computed.
-
- @returns the kth percentile value of the list.
-
- """
- if not num_list:
- return 0
- assert k >= 0 and k <= 1
- i = k * (len(num_list) - 1)
- c, f = int(ceil(i)), int(floor(i))
- if c == f:
- return num_list[c]
- return round((i - f) * num_list[c] + (c - i) * num_list[f], 2)
-
-
-def get_median(num_list):
- """
- Computes the median of a list of numbers.
-
- @param num_list: a list with numbers.
-
- @returns the median value of the numbers.
-
- """
- if not num_list:
- return 0
- num_list.sort()
- size = len(num_list)
- if size % 2 != 0:
- return num_list[size / 2]
- return round((num_list[size / 2] + num_list[size / 2 - 1]) / 2.0, 2)
-
-
-def get_average(num_list):
- """
- Computes mean of a list of numbers.
-
- @param num_list: a list with numbers.
-
- @returns the average value computed from the list of numbers.
-
- """
- if not num_list:
- return 0
- return round(float(sum(num_list)) / len(num_list) , 2)
-
-
-def get_std_dev(num_list):
- """
- Computes standard deviation of a list of numbers.
-
- @param num_list: a list with numbers.
-
- @returns Standard deviation computed from the list of numbers.
-
- """
- n = len(num_list)
- if not num_list or n == 1:
- return 0
- mean = float(sum(num_list)) / n
- variance = sum([(elem - mean) ** 2 for elem in num_list]) / (n -1)
- return round(sqrt(variance), 2)
\ No newline at end of file
diff --git a/client/common_lib/cros/pinweaver_client.py b/client/common_lib/cros/pinweaver_client.py
deleted file mode 100644
index 563289f..0000000
--- a/client/common_lib/cros/pinweaver_client.py
+++ /dev/null
@@ -1,135 +0,0 @@
-# Copyright 2018 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import json
-import logging
-
-from subprocess import CalledProcessError
-
-
-class PinWeaverNotAvailableError(CalledProcessError):
- """This exception is thrown when pinweaver_client reports that the PinWeaver
- feature is not available.
- """
-
- def __init__(self, *args, **kwargs):
- super(PinWeaverNotAvailableError, self).__init__(*args, **kwargs)
-
-
-def __check_pinweaver_client_present(client, message):
- cmd = 'which pinweaver_client'
- run = client.run('which pinweaver_client', ignore_status=True)
- if run.exit_status != 0: # pinweaver_client isn't present.
- raise PinWeaverNotAvailableError(run.exit_status, cmd, message);
-
-def __execute_for_dict(client, *args, **kwargs):
- """Executes a command with the specified args and parses stdout as JSON
- based on the expected output of pinweaver_client.
- """
- __check_pinweaver_client_present(client, args[0])
-
- result = {}
- stack = [result]
- if 'ignore_status' not in kwargs:
- kwargs['ignore_status'] = True
- run = client.run(*args, **kwargs)
- if run.exit_status == 2: # EXIT_PINWEAVER_NOT_SUPPORTED
- raise PinWeaverNotAvailableError(run.exit_status, args[0]);
- logging.debug(args)
- logging.info(run.stderr)
- logging.debug(run.stdout)
- return json.loads(run.stdout)
-
-
-def ResetTree(client, bits_per_level, height):
- """Returns a dictionary with keys result_code and root_hash.
-
- @param client: client object to run commands on.
- """
- return __execute_for_dict(client, 'pinweaver_client resettree %d %d' %
- (bits_per_level, height))
-
-
-def InsertLeaf(client, label, auxilary_hashes, low_entropy_secret,
- high_entropy_secret, reset_secret, delay_schedule):
- """Returns a dictionary with keys result_code, root_hash, cred_metadata,
- and mac.
-
- @param client: client object to run commands on.
- """
- return __execute_for_dict(
- client, 'pinweaver_client insert %d %s %s %s %s %s' %
- (label, auxilary_hashes, low_entropy_secret,
- high_entropy_secret, reset_secret, delay_schedule))
-
-
-def RemoveLeaf(client, label, auxilary_hashes, mac):
- """Returns a dictionary with keys result_code and root_hash.
-
- @param client: client object to run commands on.
- """
- return __execute_for_dict(
- client, 'pinweaver_client remove %d %s %s' %
- (label, auxilary_hashes, mac))
-
-
-def TryAuth(client, auxilary_hashes, low_entropy_secret, cred_metadata):
- """Returns a dictionary with keys result_code, root_hash, cred_metadata,
- mac, and he_secret.
-
- @param client: client object to run commands on.
- """
- return __execute_for_dict(
- client, 'pinweaver_client auth %s %s %s' %
- (auxilary_hashes, low_entropy_secret, cred_metadata))
-
-
-def ResetAuth(client, auxilary_hashes, reset_secret, cred_metadata):
- """Returns a dictionary with keys result_code, root_hash, cred_metadata,
- mac, and he_secret.
-
- @param client: client object to run commands on.
- """
- return __execute_for_dict(
- client, 'pinweaver_client resetleaf %s %s %s' %
- (auxilary_hashes, reset_secret, cred_metadata))
-
-
-def GetLog(client, root=None):
- """Returns a dictionary with keys result_code, root_hash, and a list of
- entry[#] sub dictionaries for each log entry.
-
- @param client: client object to run commands on.
- @param root: root hash of the log entry to search for.
- """
- if root is None:
- root = ('0' * 64)
-
- return __execute_for_dict(client, 'pinweaver_client getlog %s' % (root))
-
-
-def LogReplay(client, auxilary_hashes, log_root, cred_metadata):
- """Returns a dictionary with keys result_code, root_hash, cred_metadata,
- and mac.
-
- @param client: client object to run commands on.
- """
- return __execute_for_dict(
- client, 'pinweaver_client replay %d %s %s %s' %
- (auxilary_hashes, log_root, cred_metadata))
-
-
-def SelfTest(client):
- """Returns True if the test succeeded.
-
- @param client: client object to run commands on.
- """
- cmd = 'pinweaver_client selftest'
- __check_pinweaver_client_present(client, cmd)
-
- run = client.run(cmd)
- if run.exit_status == -2:
- raise PinWeaverNotAvailableError(run.exit_status, cmd);
- output = run.stdout
- return "Success!" in output
diff --git a/client/common_lib/cros/policy.py b/client/common_lib/cros/policy.py
deleted file mode 100644
index d22dc7f..0000000
--- a/client/common_lib/cros/policy.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import dbus, gobject, os, sys
-
-import common
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib.cros import session_manager
-from autotest_lib.client.cros import ownership
-
-"""Utility class for tests that generate, push and fetch policies.
-
-As the python bindings for the protobufs used in policies are built as a part
-of tests that use them, callers must pass in their location at call time."""
-
-
-def install_protobufs(autodir, job):
- """Installs policy protobuf dependencies and set import path.
-
- After calling this, you can simply import any policy pb2.py file directly,
- e.g. import chrome_device_policy_pb2.
-
- @param autodir: Autotest directory (usually the caller's self.autodir).
- @param job: Job instance (usually the caller's self.job).
- """
- # TODO(crbug.com/807950): Change the installation process so that policy
- # proto imports can be moved to the top.
- dep = 'policy_protos'
- dep_dir = os.path.join(autodir, 'deps', dep)
- job.install_pkg(dep, 'dep', dep_dir)
- sys.path.append(dep_dir)
-
-
-def compare_policy_response(policy_response, owner=None, guests=None,
- new_users=None, roaming=None):
- """Check the contents of |policy_response| against given args.
-
- Deserializes |policy_response| into a PolicyFetchResponse protobuf,
- with an embedded (serialized) PolicyData protobuf that embeds a
- (serialized) ChromeDeviceSettingsProto, and checks to see if this
- protobuf turducken contains the information passed in.
-
- @param policy_response: string serialization of a PolicyData protobuf.
- @param owner: string representing the owner's name/account.
- @param guests: boolean indicating whether guests should be allowed.
- @param new_users: boolean indicating if user pods are on login screen.
- @param roaming: boolean indicating whether data roaming is enabled.
-
- @return True if |policy_response| has all the provided data, else False.
- """
- import chrome_device_policy_pb2
- import device_management_backend_pb2
-
- response_proto = device_management_backend_pb2.PolicyFetchResponse()
- response_proto.ParseFromString(policy_response)
- ownership.assert_has_policy_data(response_proto)
-
- data_proto = device_management_backend_pb2.PolicyData()
- data_proto.ParseFromString(response_proto.policy_data)
- ownership.assert_has_device_settings(data_proto)
- if owner: ownership.assert_username(data_proto, owner)
-
- settings = chrome_device_policy_pb2.ChromeDeviceSettingsProto()
- settings.ParseFromString(data_proto.policy_value)
- if guests: ownership.assert_guest_setting(settings, guests)
- if new_users: ownership.assert_show_users(settings, new_users)
- if roaming: ownership.assert_roaming(settings, roaming)
-
-
-def build_policy_data():
- """Generate and serialize a populated device policy protobuffer.
-
- Creates a PolicyData protobuf, with an embedded
- ChromeDeviceSettingsProto, containing the information passed in.
-
- @return serialization of the PolicyData proto that we build.
- """
- import chrome_device_policy_pb2
- import device_management_backend_pb2
-
- data_proto = device_management_backend_pb2.PolicyData()
- data_proto.policy_type = ownership.POLICY_TYPE
-
- settings = chrome_device_policy_pb2.ChromeDeviceSettingsProto()
-
- data_proto.policy_value = settings.SerializeToString()
- return data_proto.SerializeToString()
-
-
-def generate_policy(key, pubkey, policy, old_key=None):
- """Generate and serialize a populated, signed device policy protobuffer.
-
- Creates a protobuf containing the device policy |policy|, signed with
- |key|. Also includes the public key |pubkey|, signed with |old_key|
- if provided. If not, |pubkey| is signed with |key|. The protobuf
- is serialized to a string and returned.
-
- @param key: new policy signing key.
- @param pubkey: new public key to be signed and embedded in generated
- PolicyFetchResponse.
- @param policy: policy data to be embedded in generated PolicyFetchResponse.
- @param old_key: if provided, this implies the generated PolicyFetchRespone
- is intended to represent a key rotation. pubkey will be
- signed with this key before embedding.
-
- @return serialization of the PolicyFetchResponse proto that we build.
- """
- import device_management_backend_pb2
-
- if old_key == None:
- old_key = key
- policy_proto = device_management_backend_pb2.PolicyFetchResponse()
- policy_proto.policy_data = policy
- policy_proto.policy_data_signature = ownership.sign(key, policy)
- policy_proto.new_public_key = pubkey
- policy_proto.new_public_key_signature = ownership.sign(old_key, pubkey)
- return policy_proto.SerializeToString()
-
-
-def push_policy_and_verify(policy_string, sm):
- """Push a device policy to the session manager over DBus.
-
- The serialized device policy |policy_string| is sent to the session
- manager with the StorePolicyEx DBus call. Success of the store is
- validated by fetching the policy again and comparing.
-
- @param policy_string: serialized policy to push to the session manager.
- @param sm: a connected SessionManagerInterface.
-
- @raises error.TestFail if policy push failed.
- """
- listener = session_manager.OwnershipSignalListener(gobject.MainLoop())
- listener.listen_for_new_policy()
- descriptor = session_manager.make_device_policy_descriptor()
- sm.StorePolicyEx(descriptor,
- dbus.ByteArray(policy_string), byte_arrays=True)
- listener.wait_for_signals(desc='Policy push.')
-
- retrieved_policy = sm.RetrievePolicyEx(descriptor, byte_arrays=True)
- if retrieved_policy != policy_string:
- raise error.TestFail('Policy should not be %s' % retrieved_policy)
-
-
-def get_policy(sm):
- """Get a device policy from the session manager over DBus.
-
- Provided mainly for symmetry with push_policy_and_verify().
-
- @param sm: a connected SessionManagerInterface.
-
- @return Serialized PolicyFetchResponse.
- """
- return sm.RetrievePolicyEx(session_manager.make_device_policy_descriptor(),
- byte_arrays=True)
diff --git a/client/common_lib/cros/power_cycle_usb_util.py b/client/common_lib/cros/power_cycle_usb_util.py
deleted file mode 100755
index 10c36f7..0000000
--- a/client/common_lib/cros/power_cycle_usb_util.py
+++ /dev/null
@@ -1,316 +0,0 @@
-# Lint as: python2, python3
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Power cycle a usb port on DUT(device under test)."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import logging
-import os
-from six.moves import zip
-import time
-
-from autotest_lib.client.common_lib.cros.cfm.usb import usb_port_manager
-
-
-TOKEN_NEW_BUS = '/: '
-TOKEN_ROOT_DEVICE = '\n |__ '
-
-# On board guado, there are three gpios that control usb port power:
-# Front left usb port: 218, port number: 2
-# Front right usb port: 219, port number: 3
-# Rear dual usb ports: 209, port number: 5,6
-#
-# On board fizz, there are 5 usb ports and usb port power is controlled by EC
-# with user space command: ectool goioset USBx_ENABLE 0/1 (x from 1 to 5).
-PORT_NUM_DICT = {
- 'guado': {
- # USB 2.0.
- 'bus1': {
- 2: 'front_left',
- 3: 'front_right',
- 5: 'back_dual',
- 6: 'back_dual'
- },
- # USB 3.0.
- 'bus2': {
- 1: 'front_left',
- 2: 'front_right',
- 3: 'back_dual',
- 4: 'back_dual'
- }
- },
- 'fizz': {
- # USB 2.0.
- 'bus1': {
- 2: 'rear_right',
- 3: 'front_right',
- 4: 'front_left',
- 5: 'rear_left',
- 6: 'rear_middle'
- },
- # USB 3.0.
- 'bus2': {
- 2: 'rear_right',
- 3: 'front_right',
- 4: 'front_left',
- 5: 'rear_left',
- 6: 'rear_middle'
- }
- }
-}
-PORT_GPIO_DICT = {
- 'guado': {
- 'bus1': {
- 'front_left': 218,
- 'front_right': 219,
- 'back_dual': 209
- },
- 'bus2': {
- 'front_left': 218,
- 'front_right': 219,
- 'back_dual': 209
- }
- },
- 'fizz': {
- 'bus1': {
- 'rear_left': 1,
- 'rear_middle': 2,
- 'rear_right': 3,
- 'front_right': 4,
- 'front_left': 5
- },
- 'bus2': {
- 'rear_left': 1,
- 'rear_middle': 2,
- 'rear_right': 3,
- 'front_right': 4,
- 'front_left': 5
- }
- }
-}
-
-
-def power_cycle_usb_vidpid(dut, board, vid, pid, pause=1):
- """
- Power cycle a usb port on DUT via peripharel's VID and PID.
-
- When only the VID and PID of the peripharel is known, a search is needed
- to decide which port it connects to by its VID and PID and look up the gpio
- index according to the board and port number in the dictionary. Then the
- USB port is power cycled using the gpio number.
-
- @param dut: The handle of the device under test.
- @param board: Board name ('guado', etc.)
- @param vid: Vendor ID of the peripharel device.
- @param pid: Product ID of the peripharel device.
- @param pause: Time interval between power off and power on, unit is second.
-
- @raise KeyError if the target device wasn't found by given VID and PID.
-
- """
- bus_idx, port_idx = get_port_number_from_vidpid(dut, vid, pid)
- if port_idx is None:
- raise KeyError('Couldn\'t find target device, {}:{}.'.format(vid, pid))
- logging.info('found device bus {} port {}'.format(bus_idx, port_idx))
-
- usb_manager = usb_port_manager.UsbPortManager(dut)
- port_id = [usb_port_manager.PortId(bus=bus_idx, port_number=port_idx)]
- usb_manager.set_port_power(port_id, 0)
- time.sleep(pause)
- usb_manager.set_port_power(port_id, 1)
-
-
-def get_port_number_from_vidpid(dut, vid, pid):
- """
- Get bus number and port number a device is connected to on DUT.
-
- Get the bus number and port number of the usb port the target perpipharel
- device is connected to.
-
- @param dut: The handle of the device under test.
- @param vid: Vendor ID of the peripharel device.
- @param pid: Product ID of the peripharel device.
-
- @returns the target bus number and port number, if device not found, returns
- (None, None).
-
- """
- cmd = 'lsusb -d {}:{}'.format(vid, pid)
- lsusb_output = dut.run(cmd, ignore_status=True).stdout
- logging.info('lsusb output {}'.format(lsusb_output))
- target_bus_idx, target_dev_idx = get_bus_dev_id(lsusb_output, vid, pid)
- if target_bus_idx is None:
- return None, None
- cmd = 'lsusb -t'
- lsusb_output = dut.run(cmd, ignore_status=True).stdout
- target_port_number = get_port_number(
- lsusb_output, target_bus_idx, target_dev_idx)
- return target_bus_idx, target_port_number
-
-
-def get_bus_dev_id(lsusb_output, vid, pid):
- """
- Get bus number and device index a device is connected to on DUT.
-
- Get the bus number and port number of the usb port the target perpipharel
- device is connected to based on the output of command 'lsusb -d VID:PID'.
-
- @param lsusb_output: output of command 'lsusb -d VID:PID' running on DUT.
- @param vid: Vendor ID of the peripharel device.
- @param pid: Product ID of the peripharel device.
-
- @returns the target bus number and device index, if device not found,
- returns (None, None).
-
- """
- if lsusb_output == '':
- return None, None
- lsusb_device_info = lsusb_output.strip().split('\n')
- if len(lsusb_device_info) > 1:
- logging.info('find more than one device with VID:PID: %s:%s', vid, pid)
- return None, None
- # An example of the info line is 'Bus 001 Device 006: ID 266e:0110 ...'
- fields = lsusb_device_info[0].split(' ')
- assert len(fields) >= 6, 'Wrong info format: {}'.format(lsusb_device_info)
- target_bus_idx = int(fields[1])
- target_device_idx = int(fields[3][:-1])
- logging.info('found target device %s:%s, bus: %d, dev: %d',
- vid, pid, target_bus_idx, target_device_idx)
- return target_bus_idx, target_device_idx
-
-def get_port_number(lsusb_tree_output, bus, dev):
- """
- Get port number that certain device is connected to on DUT.
-
- Get the port number of the usb port that the target peripharel device is
- connected to based on the output of command 'lsusb -t', its bus number and
- device index.
- An example of lsusb_tree_output could be:
- /: Bus 02.Port 1: Dev 1, Class=root_hub, Driver=xhci_hcd/4p, 5000M
- |__ Port 2: Dev 2, If 0, Class=Hub, Driver=hub/4p, 5000M
- /: Bus 01.Port 1: Dev 1, Class=root_hub, Driver=xhci_hcd/11p, 480M
- |__ Port 2: Dev 52, If 0, Class=Hub, Driver=hub/4p, 480M
- |__ Port 1: Dev 55, If 0, Class=Human Interface Device,
- Driver=usbhid, 12M
- |__ Port 3: Dev 54, If 0, Class=Vendor Specific Class,
- Driver=udl, 480M
- |__ Port 3: Dev 3, If 0, Class=Hub, Driver=hub/4p, 480M
- |__ Port 4: Dev 4, If 0, Class=Wireless, Driver=btusb, 12M
- |__ Port 4: Dev 4, If 1, Class=Wireless, Driver=btusb, 12M
-
- @param lsusb_tree_output: The output of command 'lsusb -t' on DUT.
- @param bus: The bus number the peripharel device is connected to.
- @param dev: The device index of the peripharel device on DUT.
-
- @returns the target port number, if device not found, returns None.
-
- """
- lsusb_device_buses = lsusb_tree_output.strip().split(TOKEN_NEW_BUS)
- target_bus_token = 'Bus {:02d}.'.format(bus)
- for bus_info in lsusb_device_buses:
- if bus_info.find(target_bus_token) != 0:
- continue
- target_dev_token = 'Dev {}'.format(dev)
- device_info = bus_info.strip(target_bus_token).split(TOKEN_ROOT_DEVICE)
- for device in device_info:
- if target_dev_token not in device:
- continue
- target_port_number = int(device.split(':')[0].split(' ')[1])
- return target_port_number
- return None
-
-
-def get_all_port_number_from_vidpid(dut, vid, pid):
- """
- Get the list of bus number and port number devices are connected to DUT.
-
- Get the the list of bus number and port number of the usb ports the target
- perpipharel devices are connected to.
-
- @param dut: The handle of the device under test.
- @param vid: Vendor ID of the peripharel device.
- @param pid: Product ID of the peripharel device.
-
- @returns the list of target bus number and port number, if device not found,
- returns empty list.
-
- """
- port_number = []
- cmd = 'lsusb -d {}:{}'.format(vid, pid)
- lsusb_output = dut.run(cmd, ignore_status=True).stdout
- (target_bus_idx, target_dev_idx) = get_all_bus_dev_id(lsusb_output, vid, pid)
- if target_bus_idx is None:
- return None, None
- cmd = 'lsusb -t'
- lsusb_output = dut.run(cmd, ignore_status=True).stdout
- for bus, dev in zip(target_bus_idx, target_dev_idx):
- port_number.append(get_port_number(
- lsusb_output, bus, dev))
- return (target_bus_idx, port_number)
-
-
-def get_all_bus_dev_id(lsusb_output, vid, pid):
- """
- Get the list of bus number and device index devices are connected to DUT.
-
- Get the bus number and port number of the usb ports the target perpipharel
- devices are connected to based on the output of command 'lsusb -d VID:PID'.
-
- @param lsusb_output: output of command 'lsusb -d VID:PID' running on DUT.
- @param vid: Vendor ID of the peripharel device.
- @param pid: Product ID of the peripharel device.
-
- @returns the list of target bus number and device index, if device not found,
- returns empty list.
-
- """
- bus_idx = []
- device_idx =[]
- if lsusb_output == '':
- return None, None
- lsusb_device_info = lsusb_output.strip().split('\n')
- for lsusb_device in lsusb_device_info:
- fields = lsusb_device.split(' ')
- assert len(fields) >= 6, 'Wrong info format: {}'.format(lsusb_device_info)
- target_bus_idx = int(fields[1])
- target_device_idx = int(fields[3][:-1])
- bus_idx.append(target_bus_idx)
- device_idx.append( target_device_idx)
- return (bus_idx, device_idx)
-
-
-def get_target_all_gpio(dut, board, vid, pid):
- """
- Get GPIO for all devices with vid, pid connected to on DUT.
-
- Get gpio of usb port the target perpipharel devices are
- connected to based on the output of command 'lsusb -d VID:PID'.
-
- @param dut: The handle of the device under test.
- @param board: Board name ('guado', etc.)
- @param vid: Vendor ID of the peripharel device.
- @param pid: Product ID of the peripharel device.
-
- @returns the list of gpio, if no device found return []
-
- """
- gpio_list = []
- (bus_idx, port_idx) = get_all_port_number_from_vidpid(dut, vid, pid)
- if port_idx is None:
- raise KeyError('Couldn\'t find target device, {}:{}.'.format(vid, pid))
-
- for bus, port in zip(bus_idx, port_idx):
- logging.info('found device bus {} port {}'.format(bus, port))
- token_bus = 'bus{}'.format(bus)
- target_gpio_pos = (PORT_NUM_DICT.get(board, {})
- .get(token_bus, {}).get(port, ''))
- target_gpio = (PORT_GPIO_DICT.get(board, {})
- .get(token_bus, {}).get(target_gpio_pos, None))
- logging.info('Target gpio num {}'.format(target_gpio))
- gpio_list.append(target_gpio)
- return gpio_list
diff --git a/client/common_lib/cros/power_cycle_usb_util_unittest.py b/client/common_lib/cros/power_cycle_usb_util_unittest.py
deleted file mode 100755
index ec78135..0000000
--- a/client/common_lib/cros/power_cycle_usb_util_unittest.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import unittest
-
-from autotest_lib.client.common_lib.cros import power_cycle_usb_util
-
-
-class PowerCycleUsbUtilTest(unittest.TestCase):
- """Unittest for the parse functions within power_cycle_usb_util.py."""
-
- VID = '0001'
- PID = '0001'
- BUS = 1
- DEV = 2
-
- LSUSB_DEVICE_OUTPUT = 'Bus 001 Device 002: ID 0001:0001\n'
- LSUSB_DEVICE_OUTPUT_NONE = ''
- LSUSB_DEVICE_OUTPUT_MULTI = ('Bus 001 Device 002: ID 0001:0001\n'
- 'Bus 001 Device 002: ID 0001:0001\n')
-
- LSUSB_TREE_OUTPUT = \
- ('/: Bus 02.Port 1: Dev 1, Class=root_hub, Driver=xhci_hcd/4p, 5000M\n'
- ' |__ Port 3: Dev 2, If 0, Class=Hub, Driver=hub/4p, 5000M\n'
- '/: Bus 01.Port 1: Dev 1, Class=root_hub, Driver=xhci_hcd/11p, 480M\n'
- ' |__ Port 2: Dev 52, If 0, Class=Hub, Driver=hub/4p, 480M\n'
- ' |__ Port 1: Dev 2, If 0, Class=Human Interface Device,'
- 'Driver=usbhid, 12M\n'
- ' |__ Port 3: Dev 54, If 0, Class=Vendor Specific Class,'
- 'Driver=udl, 480M\n'
- ' |__ Port 3: Dev 3, If 0, Class=Hub, Driver=hub/4p, 480M\n'
- ' |__ Port 4: Dev 4, If 0, Class=Wireless, Driver=btusb, 12M\n'
- ' |__ Port 4: Dev 4, If 1, Class=Wireless, Driver=btusb, 12M\n')
-
- def test_get_bus_dev_id(self):
- want = (self.BUS, self.DEV)
- want_none = (None, None)
- want_multi = (None, None)
-
- bus, dev = power_cycle_usb_util.get_bus_dev_id(
- self.LSUSB_DEVICE_OUTPUT, self.VID, self.PID)
- self.assertEqual((bus, dev), want)
- bus, dev = power_cycle_usb_util.get_bus_dev_id(
- self.LSUSB_DEVICE_OUTPUT_NONE, self.VID, self.PID)
- self.assertEqual((bus, dev), want_none)
- bus, dev = power_cycle_usb_util.get_bus_dev_id(
- self.LSUSB_DEVICE_OUTPUT_MULTI, self.VID, self.PID)
- self.assertEqual((bus, dev), want_multi)
-
- def test_get_port_number(self):
- want = 2
-
- port = power_cycle_usb_util.get_port_number(
- self.LSUSB_TREE_OUTPUT, self.BUS, self.DEV)
- self.assertEqual(port, want)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/client/common_lib/cros/power_load_util.py b/client/common_lib/cros/power_load_util.py
deleted file mode 100644
index baaf38e..0000000
--- a/client/common_lib/cros/power_load_util.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2018 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import tempfile
-
-from autotest_lib.client.common_lib import file_utils
-
-_URL_BASE = ('https://sites.google.com/a/chromium.org/dev/chromium-os'
- '/testing/power-testing/pltp')
-_PLTG_URL = _URL_BASE + '/pltg'
-_PLTU_URL = _URL_BASE + '/pltu'
-_PLTP_URL = _URL_BASE + '/pltp'
-_MEETU_URL = _URL_BASE + '/meetu'
-_MEETP_URL = _URL_BASE + '/meetp'
-
-
-def _get_content(url):
- """Reads the content of the file at the given |URL|.
-
- Args:
- url: URL to be fetched.
-
- Return:
- The content of the fetched file.
- """
- with tempfile.NamedTemporaryFile() as named_file:
- file_utils.download_file(url, named_file.name)
- return named_file.read().rstrip()
-
-
-def use_gaia_login():
- """Returns whether Gaia login should be used by default for load testing."""
- res = _get_content(_PLTG_URL)
- return res == 'True' or res == 'true'
-
-
-def get_username():
- """Returns username for load testing."""
- return _get_content(_PLTU_URL)
-
-
-def get_password():
- """Returns password for load testing."""
- return _get_content(_PLTP_URL)
-
-
-def get_meet_username():
- """Returns username for meet testing."""
- return _get_content(_MEETU_URL)
-
-
-def get_meet_password():
- """Returns password for meet testing."""
- return _get_content(_MEETP_URL)
diff --git a/client/common_lib/cros/test_webrtc_peer_connection.py b/client/common_lib/cros/test_webrtc_peer_connection.py
deleted file mode 100644
index 0a28d69..0000000
--- a/client/common_lib/cros/test_webrtc_peer_connection.py
+++ /dev/null
@@ -1,327 +0,0 @@
-# Lint as: python2, python3
-import logging
-import os
-import time
-
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib.cros import chrome
-from autotest_lib.client.common_lib.cros import system_metrics_collector
-from autotest_lib.client.common_lib.cros import webrtc_utils
-from autotest_lib.client.cros.graphics import graphics_utils
-from autotest_lib.client.cros.multimedia import system_facade_native
-from autotest_lib.client.cros.video import helper_logger
-from telemetry.util import image_util
-
-
-EXTRA_BROWSER_ARGS = ['--use-fake-ui-for-media-stream',
- '--use-fake-device-for-media-stream']
-
-
-class WebRtcPeerConnectionTest(object):
- """
- Runs a WebRTC peer connection test.
-
- This class runs a test that uses WebRTC peer connections to stress Chrome
- and WebRTC. It interacts with HTML and JS files that contain the actual test
- logic. It makes many assumptions about how these files behave. See one of
- the existing tests and the documentation for run_test() for reference.
- """
- def __init__(
- self,
- title,
- own_script,
- common_script,
- bindir,
- tmpdir,
- debugdir,
- timeout = 70,
- test_runtime_seconds = 60,
- num_peer_connections = 5,
- iteration_delay_millis = 500,
- before_start_hook = None):
- """
- Sets up a peer connection test.
-
- @param title: Title of the test, shown on the test HTML page.
- @param own_script: Name of the test's own JS file in bindir.
- @param tmpdir: Directory to store tmp files, should be in the autotest
- tree.
- @param bindir: The directory that contains the test files and
- own_script.
- @param debugdir: The directory to which debug data, e.g. screenshots,
- should be written.
- @param timeout: Timeout in seconds for the test.
- @param test_runtime_seconds: How long to run the test. If errors occur
- the test can exit earlier.
- @param num_peer_connections: Number of peer connections to use.
- @param iteration_delay_millis: delay in millis between each test
- iteration.
- @param before_start_hook: function accepting a Chrome browser tab as
- argument. Is executed before the startTest() JS method call is
- made.
- """
- self.title = title
- self.own_script = own_script
- self.common_script = common_script
- self.bindir = bindir
- self.tmpdir = tmpdir
- self.debugdir = debugdir
- self.timeout = timeout
- self.test_runtime_seconds = test_runtime_seconds
- self.num_peer_connections = num_peer_connections
- self.iteration_delay_millis = iteration_delay_millis
- self.before_start_hook = before_start_hook
- self.tab = None
-
- def start_test(self, cr, html_file):
- """
- Opens the test page.
-
- @param cr: Autotest Chrome instance.
- @param html_file: File object containing the HTML code to use in the
- test. The html file needs to have the following JS methods:
- startTest(runtimeSeconds, numPeerConnections, iterationDelay)
- Starts the test. Arguments are all numbers.
- getStatus()
- Gets the status of the test. Returns a string with the
- failure message. If the string starts with 'failure', it
- is interpreted as failure. The string 'ok-done' denotes
- that the test is complete. This method should not throw
- an exception.
- """
- self.tab = cr.browser.tabs[0]
- self.tab.Navigate(cr.browser.platform.http_server.UrlOf(
- os.path.join(self.bindir, html_file.name)))
- self.tab.WaitForDocumentReadyStateToBeComplete()
- if self.before_start_hook is not None:
- self.before_start_hook(self.tab)
- self.tab.EvaluateJavaScript(
- "startTest(%d, %d, %d)" % (
- self.test_runtime_seconds,
- self.num_peer_connections,
- self.iteration_delay_millis))
-
- def stop_test(self):
- """
- Hook that always get called after the test has run.
- """
- pass
-
- def _test_done(self):
- """
- Determines if the test is done or not.
-
- Does so by querying status of the JavaScript test runner.
- @return True if the test is done, false if it is still in progress.
- @raise TestFail if the status check returns a failure status.
- """
- status = self.tab.EvaluateJavaScript('getStatus()')
- if status.startswith('failure'):
- raise error.TestFail(
- 'Test status starts with failure, status is: ' + status)
- logging.debug(status)
- return status == 'ok-done'
-
- def wait_test_completed(self, timeout_secs):
- """
- Waits until the test is done.
-
- @param timeout_secs Max time to wait in seconds.
-
- @raises TestError on timeout, or javascript eval fails, or
- error status from the getStatus() JS method.
- """
- start_secs = time.time()
- while not self._test_done():
- spent_time = time.time() - start_secs
- if spent_time > timeout_secs:
- raise utils.TimeoutError(
- 'Test timed out after {} seconds'.format(spent_time))
- self.do_in_wait_loop()
-
- def do_in_wait_loop(self):
- """
- Called repeatedly in a loop while the test waits for completion.
-
- Subclasses can override and provide specific behavior.
- """
- time.sleep(1)
-
- @helper_logger.video_log_wrapper
- def run_test(self):
- """
- Starts the test and waits until it is completed.
- """
- with chrome.Chrome(extra_browser_args = EXTRA_BROWSER_ARGS + \
- [helper_logger.chrome_vmodule_flag()],
- init_network_controller = True) as cr:
- own_script_path = os.path.join(
- self.bindir, self.own_script)
- common_script_path = webrtc_utils.get_common_script_path(
- self.common_script)
-
- # Create the URLs to the JS scripts to include in the html file.
- # Normally we would use the http_server.UrlOf method. However,
- # that requires starting the server first. The server reads
- # all file contents on startup, meaning we must completely
- # create the html file first. Hence we create the url
- # paths relative to the common prefix, which will be used as the
- # base of the server.
- base_dir = os.path.commonprefix(
- [own_script_path, common_script_path])
- base_dir = base_dir.rstrip('/')
- own_script_url = own_script_path[len(base_dir):]
- common_script_url = common_script_path[len(base_dir):]
-
- html_file = webrtc_utils.create_temp_html_file(
- self.title,
- self.tmpdir,
- own_script_url,
- common_script_url)
- # Don't bother deleting the html file, the autotest tmp dir will be
- # cleaned up by the autotest framework.
- try:
- cr.browser.platform.SetHTTPServerDirectories(
- [own_script_path, html_file.name, common_script_path])
- self.start_test(cr, html_file)
- self.wait_test_completed(self.timeout)
- self.verify_status_ok()
- finally:
- # Ensure we always have a screenshot, both when succesful and
- # when failed - useful for debugging.
- self.take_screenshots()
- self.stop_test()
-
- def verify_status_ok(self):
- """
- Verifies that the status of the test is 'ok-done'.
-
- @raises TestError the status is different from 'ok-done'.
- """
- status = self.tab.EvaluateJavaScript('getStatus()')
- if status != 'ok-done':
- raise error.TestFail('Failed: %s' % status)
-
- def take_screenshots(self):
- """
- Takes screenshots using two different mechanisms.
-
- Takes one screenshot using graphics_utils which is a really low level
- api that works between the kernel and userspace. The advantage is that
- this captures the entire screen regardless of Chrome state. Disadvantage
- is that it does not always work.
-
- Takes one screenshot of the current tab using Telemetry.
-
- Saves the screenshot in the results directory.
- """
- # Replace spaces with _ and lowercase the screenshot name for easier
- # tab completion in terminals.
- screenshot_name = self.title.replace(' ', '-').lower() + '-screenshot'
- self.take_graphics_utils_screenshot(screenshot_name)
- self.take_browser_tab_screenshot(screenshot_name)
-
- def take_graphics_utils_screenshot(self, screenshot_name):
- """
- Takes a screenshot of what is currently displayed.
-
- Uses the low level graphics_utils API.
-
- @param screenshot_name: Name of the screenshot.
- """
- try:
- full_filename = screenshot_name + '_graphics_utils'
- graphics_utils.take_screenshot(self.debugdir, full_filename)
- except Exception as e:
- logging.warn('Screenshot using graphics_utils failed', exc_info = e)
-
- def take_browser_tab_screenshot(self, screenshot_name):
- """
- Takes a screenshot of the current browser tab.
-
- @param screenshot_name: Name of the screenshot.
- """
- if self.tab is not None and self.tab.screenshot_supported:
- try:
- screenshot = self.tab.Screenshot(timeout = 10)
- full_filename = os.path.join(
- self.debugdir, screenshot_name + '_browser_tab.png')
- image_util.WritePngFile(screenshot, full_filename)
- except Exception:
- # This can for example occur if Chrome crashes. It will
- # cause the Screenshot call to timeout.
- logging.warn(
- 'Screenshot using telemetry tab.Screenshot failed',
- exc_info=True)
- else:
- logging.warn(
- 'Screenshot using telemetry tab.Screenshot() not supported')
-
-
-
-class WebRtcPeerConnectionPerformanceTest(WebRtcPeerConnectionTest):
- """
- Runs a WebRTC performance test.
- """
- def __init__(
- self,
- title,
- own_script,
- common_script,
- bindir,
- tmpdir,
- debugdir,
- timeout = 70,
- test_runtime_seconds = 60,
- num_peer_connections = 5,
- iteration_delay_millis = 500,
- before_start_hook = None):
-
- def perf_before_start_hook(tab):
- """
- Before start hook to disable cpu overuse detection.
- """
- if before_start_hook:
- before_start_hook(tab)
- tab.EvaluateJavaScript('cpuOveruseDetection = false')
-
- super(WebRtcPeerConnectionPerformanceTest, self).__init__(
- title,
- own_script,
- common_script,
- bindir,
- tmpdir,
- debugdir,
- timeout,
- test_runtime_seconds,
- num_peer_connections,
- iteration_delay_millis,
- perf_before_start_hook)
- self.collector = system_metrics_collector.SystemMetricsCollector(
- system_facade_native.SystemFacadeNative())
- # TODO(crbug/784365): If this proves to work fine, move to a separate
- # module and make more generic.
- delay = 5
- iterations = self.test_runtime_seconds / delay + 1
- utils.BgJob('top -b -d %d -n %d -w 512 -c > %s/top_output.txt'
- % (delay, iterations, self.debugdir))
- utils.BgJob('iostat -x %d %d > %s/iostat_output.txt'
- % (delay, iterations, self.debugdir))
- utils.BgJob('for i in $(seq %d);'
- 'do netstat -s >> %s/netstat_output.txt'
- ';sleep %d;done'
- % (delay, self.debugdir, iterations))
-
- def start_test(self, cr, html_file):
- super(WebRtcPeerConnectionPerformanceTest, self).start_test(
- cr, html_file)
- self.collector.pre_collect()
-
- def stop_test(self):
- self.collector.post_collect()
- super(WebRtcPeerConnectionPerformanceTest, self).stop_test()
-
- def do_in_wait_loop(self):
- self.collector.collect_snapshot()
- time.sleep(1)
diff --git a/client/common_lib/cros/tpm_utils.py b/client/common_lib/cros/tpm_utils.py
deleted file mode 100644
index c955afb..0000000
--- a/client/common_lib/cros/tpm_utils.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# Lint as: python2, python3
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging, os
-import time
-
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.cros import constants
-
-
-_RM_FILES = ['/home/chronos/.oobe_completed',
- '/home/chronos/Local\ State',
- '/var/cache/shill/default.profile']
-_RM_DIRS = ['/home/.shadow/*',
- os.path.join(constants.ALLOWLIST_DIR, '*'),
- '/var/cache/app_pack',
- '/var/lib/tpm']
-
-
-class NoTPMPasswordException(Exception):
- """No TPM Password could be found."""
- pass
-
-
-def TPMStatus(client):
- """Returns a dictionary with TPM status.
-
- @param client: client object to run commands on.
- """
- out = client.run('tpm_manager_client status --nonsensitive').stdout.strip()
- lines = out.split('\n')[1:-1]
- status = {}
- for item in lines:
- item = item.split(':')
- if not item[0]:
- continue
- if len(item) == 1:
- item.append('')
- item = [x.strip() for x in item]
- item[1] = True if item[1] == 'true' else item[1]
- item[1] = False if item[1] == 'false' else item[1]
- status[item[0]] = item[1]
- return status
-
-
-def IsTPMAvailable(client):
- """Returns True if the TPM is unowned and enabled.
-
- @param client: client object to run commands on.
- """
- status = TPMStatus(client)
- return status['is_enabled'] and not status['is_owned']
-
-
-def ClearTPMServer(client, out_dir):
- """Clears the TPM and reboots from a server-side autotest.
-
- @param client: client object to run commands on.
- @param out_dir: temporary directory.
- """
- if IsTPMAvailable(client):
- logging.debug('TPM is not owned')
- return
-
- client.run('stop ui')
- client.run('crossystem clear_tpm_owner_request=1')
- CleanupAndReboot(client)
-
-def ClearTPMOwnerRequest(client, wait_for_ready=False, timeout=60):
- """Clears the TPM using crossystem command.
-
- @param client: client object to run commands on.
- @param wait_for_ready: wait until the TPM status is ready
- @param timeout: number of seconds to wait for the TPM to become ready.
- """
- if not client.run('crossystem clear_tpm_owner_request=1',
- ignore_status=True).exit_status == 0:
- raise error.TestFail('Unable to clear TPM.')
-
- CleanupAndReboot(client)
-
- if wait_for_ready:
- status = ''
- end_time = time.time() + timeout
- # Wait for tpm_manager to send a successful reply.
- while 'STATUS_SUCCESS' not in status and time.time() < end_time:
- status = client.run('tpm_manager_client status --nonsensitive',
- ignore_status=True).stdout.strip()
- logging.debug(status)
- time.sleep(1)
- # Verify if the TPM is unowned.
- tpm_status = TPMStatus(client)
- logging.info('TPM status: %s', tpm_status)
- if tpm_status['is_owned']:
- raise error.TestFail('Failed to clear TPM.')
-
-
-def ClearTPMIfOwned(client):
- """Clear the TPM only if device is already owned.
-
- @param client: client object to run commands on."""
- tpm_status = TPMStatus(client)
- logging.info('TPM status: %s', tpm_status)
- if tpm_status['is_owned']:
- logging.info('Clearing TPM because this device is owned.')
- ClearTPMOwnerRequest(client)
-
-
-def CleanupAndReboot(client):
- """Cleanup and reboot the device.
-
- @param client: client object to run commands on.
- """
- full_rm = 'sudo rm -rf ' + ' '.join(_RM_FILES + _RM_DIRS)
- client.run(full_rm, ignore_status=True)
- client.run('sync', ignore_status=True)
- client.reboot()
diff --git a/client/common_lib/cros/virtual_ethernet_pair.py b/client/common_lib/cros/virtual_ethernet_pair.py
deleted file mode 100644
index 2ed0377..0000000
--- a/client/common_lib/cros/virtual_ethernet_pair.py
+++ /dev/null
@@ -1,260 +0,0 @@
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""
-VirtualEthernetPair provides methods for setting up and tearing down a virtual
-ethernet interface for use in tests. You will probably need to be root on test
-devices to use this class. The constructor allows you to specify your IP's to
-assign to both ends of the pair, however, if you wish to leave the interface
-unconfigured, simply pass None. You may also specify the subnet of your ip
-addresses. Failing to do so leaves them with default in ifconfig.
-
-Example usage:
-vif = virtual_ethernet_pair.VirtualEthernetPair(interface_name="main",
- peer_interface_name="peer",
- interface_ip="10.9.8.1/24",
- peer_interface_ip=None)
-vif.setup()
-if not vif.is_healthy:
- # bad things happened while creating the interface
- # ... abort gracefully
-
-interface_name = vif.interface_name
-peer_interface_name = vif.peer_interface_name
-#... do things with your interface
-
-# You must call this if you want to leave the system in a good state.
-vif.teardown()
-
-Alternatively:
-
-with virtual_ethernet_pair.VirtualEthernetPair(...) as vif:
- if not vif.is_healthy:
- # bad things happened while creating the interface
- # ... abort gracefully
-
- interface_name = vif.interface_name
- peer_interface_name = vif.peer_interface_name
- #... do things with your interface
-
-"""
-
-import logging
-
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib.cros.network import interface
-
-class VirtualEthernetPair(object):
- """ Class for configuring virtual ethernet device pair. """
-
- def __init__(self,
- interface_name='veth_main',
- peer_interface_name='veth_secondary',
- interface_ip='10.9.8.1/24',
- peer_interface_ip='10.9.8.2/24',
- interface_ipv6=None,
- peer_interface_ipv6=None,
- ignore_shutdown_errors=False,
- host=None):
- """
- Construct a object managing a virtual ethernet pair. One end of the
- interface will be called |interface_name|, and the peer end
- |peer_interface_name|. You may get the interface names later with
- VirtualEthernetPair.get_[peer_]interface_name(). The ends of the
- interface are manually configured with the given IPv4 address strings
- (like "10.9.8.2/24"). You may skip the IP configuration by passing None
- as the address for either interface.
- """
- super(VirtualEthernetPair, self).__init__()
- self._is_healthy = True
- self._interface_name = interface_name
- self._peer_interface_name = peer_interface_name
- self._interface_ip = interface_ip
- self._peer_interface_ip = peer_interface_ip
- self._interface_ipv6 = interface_ipv6
- self._peer_interface_ipv6 = peer_interface_ipv6
- self._ignore_shutdown_errors = ignore_shutdown_errors
- self._run = utils.run
- self._host = host
- if host is not None:
- self._run = host.run
-
-
- def setup(self):
- """
- Installs a virtual ethernet interface and configures one side with an IP
- address. First does some confidence checking and tries to remove an
- existing interface by the same name, and logs messages on failures.
- """
- self._is_healthy = False
- if self._either_interface_exists():
- logging.warning('At least one test interface already existed.'
- ' Attempting to remove.')
- self._remove_test_interface()
- if self._either_interface_exists():
- logging.error('Failed to remove unexpected test '
- 'interface. Aborting.')
- return
-
- self._create_test_interface()
- if not self._interface_exists(self._interface_name):
- logging.error('Failed to create main test interface.')
- return
-
- if not self._interface_exists(self._peer_interface_name):
- logging.error('Failed to create peer test interface.')
- return
- # Unless you tell the firewall about the interface, you're not going to
- # get any IP traffic through. Since this is basically a loopback
- # device, just allow all traffic.
- for name in (self._interface_name, self._peer_interface_name):
- status = self._run('iptables -w -I INPUT -i %s -j ACCEPT' % name,
- ignore_status=True)
- if status.exit_status != 0:
- logging.error('iptables rule addition failed for interface %s: '
- '%s', name, status.stderr)
- self._is_healthy = True
-
-
- def teardown(self):
- """
- Removes the interface installed by VirtualEthernetPair.setup(), with
- some simple confidence checks that print warnings when either the
- interface isn't there or fails to be removed.
- """
- for name in (self._interface_name, self._peer_interface_name):
- self._run('iptables -w -D INPUT -i %s -j ACCEPT' % name,
- ignore_status=True)
- if not self._either_interface_exists():
- logging.warning('VirtualEthernetPair.teardown() called, '
- 'but no interface was found.')
- return
-
- self._remove_test_interface()
- if self._either_interface_exists():
- logging.error('Failed to destroy test interface.')
-
-
- @property
- def is_healthy(self):
- """@return True if virtual ethernet pair is configured."""
- return self._is_healthy
-
-
- @property
- def interface_name(self):
- """@return string name of the interface."""
- return self._interface_name
-
-
- @property
- def peer_interface_name(self):
- """@return string name of the peer interface."""
- return self._peer_interface_name
-
-
- @property
- def interface_ip(self):
- """@return string IPv4 address of the interface."""
- return interface.Interface(self.interface_name).ipv4_address
-
-
- @property
- def peer_interface_ip(self):
- """@return string IPv4 address of the peer interface."""
- return interface.Interface(self.peer_interface_name).ipv4_address
-
-
- @property
- def interface_subnet_mask(self):
- """@return string IPv4 subnet mask of the interface."""
- return interface.Interface(self.interface_name).ipv4_subnet_mask
-
-
- @property
- def interface_prefix(self):
- """@return int IPv4 prefix length."""
- return interface.Interface(self.interface_name).ipv4_prefix
-
-
- @property
- def peer_interface_subnet_mask(self):
- """@return string IPv4 subnet mask of the peer interface."""
- return interface.Interface(self.peer_interface_name).ipv4_subnet_mask
-
-
- @property
- def interface_mac(self):
- """@return string MAC address of the interface."""
- return interface.Interface(self.interface_name).mac_address
-
-
- @property
- def peer_interface_mac(self):
- """@return string MAC address of the peer interface."""
- return interface.Interface(self._peer_interface_name).mac_address
-
-
- def __enter__(self):
- self.setup()
- return self
-
-
- def __exit__(self, exc_type, exc_value, traceback):
- self.teardown()
-
-
- def _interface_exists(self, interface_name):
- """
- Returns True iff we found an interface with name |interface_name|.
- """
- return interface.Interface(interface_name, host=self._host).exists
-
-
- def _either_interface_exists(self):
- return (self._interface_exists(self._interface_name) or
- self._interface_exists(self._peer_interface_name))
-
-
- def _remove_test_interface(self):
- """
- Remove the virtual ethernet device installed by
- _create_test_interface().
- """
- self._run('ip link set %s down' % self._interface_name,
- ignore_status=self._ignore_shutdown_errors)
- self._run('ip link set %s down' % self._peer_interface_name,
- ignore_status=self._ignore_shutdown_errors)
- self._run('ip link delete %s >/dev/null 2>&1' % self._interface_name,
- ignore_status=self._ignore_shutdown_errors)
-
- # Under most normal circumstances a successful deletion of
- # |_interface_name| should also remove |_peer_interface_name|,
- # but if we elected to ignore failures above, that may not be
- # the case.
- self._run('ip link delete %s >/dev/null 2>&1' %
- self._peer_interface_name, ignore_status=True)
-
-
- def _create_test_interface(self):
- """
- Set up a virtual ethernet device and configure the host side with a
- fake IP address.
- """
- self._run('ip link add name %s '
- 'type veth peer name %s >/dev/null 2>&1' %
- (self._interface_name, self._peer_interface_name))
- self._run('ip link set %s up' % self._interface_name)
- self._run('ip link set %s up' % self._peer_interface_name)
- if self._interface_ip is not None:
- self._run('ip addr add %s dev %s' % (self._interface_ip,
- self._interface_name))
- if self._peer_interface_ip is not None:
- self._run('ip addr add %s dev %s' % (self._peer_interface_ip,
- self._peer_interface_name))
- if self._interface_ipv6 is not None:
- self._run('ip -6 addr add %s dev %s' % (self._interface_ipv6,
- self._interface_name))
- if self._peer_interface_ipv6 is not None:
- self._run('ip -6 addr add %s dev %s' % (self._peer_interface_ipv6,
- self._peer_interface_name))
diff --git a/client/common_lib/cros/vpd_utils.py b/client/common_lib/cros/vpd_utils.py
deleted file mode 100644
index 7095260..0000000
--- a/client/common_lib/cros/vpd_utils.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from autotest_lib.client.common_lib import error
-from autotest_lib.utils.frozen_chromite.lib import retry_util
-
-
-_VPD_BASE_CMD = 'vpd -i %s %s %s'
-_RW = 'RW_VPD'
-_RO = 'RO_VPD'
-
-
-def _check_partition(partition):
- """
- Used to validate input string in other functions.
-
- @param partition: If this is not 'RO_VPD' or 'RW_VPD', raise a ValueError.
-
- """
- if partition not in [_RW, _RO]:
- raise ValueError("partition should be 'RW_VPD' or 'RO_VPD'")
-
-
-def dump_vpd_log(host, force=True, retries=3):
- """
- Applies changes to the VPD settings by flushing them to the VPD cache and
- output files.
-
- @param host: Host to run the command on.
- @param force: True to pass in the --force parameter to forcefully dump
- the log. False to omit it.
- @param retries: Number of times to try rerunning the command in case of
- error.
-
- """
- vpd_dump_cmd = 'dump_vpd_log%s' % (' --force' if force else '')
- retry_util.RetryException(error.AutoservRunError, retries, host.run,
- vpd_dump_cmd)
-
-
-def vpd_get(host, key, partition='RW_VPD', retries=3):
- """
- Gets the VPD value associated with the input key.
-
- @param host: Host to run the command on.
- @param key: Key of the desired VPD value.
- @param partition: Which partition to access. 'RO_VPD' or 'RW_VPD'.
- @param retries: Number of times to try rerunning the command in case of
- error.
-
- """
- _check_partition(partition)
- get_cmd = _VPD_BASE_CMD % (partition, '-g', key)
- try:
- return retry_util.RetryException(error.AutoservRunError, retries,
- host.run, get_cmd).stdout
- except error.AutoservRunError as e:
- if 'was not found' in str(e.result_obj.stderr):
- return None
- else:
- raise e
-
-
-def vpd_set(host, vpd_dict, partition='RW_VPD', dump=False, force_dump=False,
- retries=3):
- """
- Sets the given key/value pairs in the specified VPD partition.
-
- @param host: Host to run the command on.
- @param vpd_dict: Dictionary containing the VPD key/value pairs to set.
- Dictionary keys should be the VPD key strings, and values
- should be the desired values to write.
- @param partition: Which partition to access. 'RO_VPD' or 'RW_VPD'.
- @param dump: If True, also run dump_vpd_log command after setting the
- vpd values.
- @param force_dump: Whether or not to forcefully dump the vpd log.
- @param retries: Number of times to try rerunning the command in case of
- error.
-
- """
- _check_partition(partition)
- for vpd_key in vpd_dict:
- set_cmd = _VPD_BASE_CMD % (partition, '-s',
- (vpd_key + '=' + str(vpd_dict[vpd_key])))
- retry_util.RetryException(error.AutoservRunError, retries,
- host.run, set_cmd).stdout
-
- if dump:
- dump_vpd_log(host, force=force_dump, retries=retries)
-
-
-def vpd_delete(host, key, partition='RW_VPD', dump=False, force_dump=False,
- retries=3):
- """
- Deletes the specified key from the specified VPD partition.
-
- @param host: Host to run the command on.
- @param key: The VPD value to delete.
- @param partition: Which partition to access. 'RO_VPD' or 'RW_VPD'.
- @param dump: If True, also run dump_vpd_log command after deleting the
- vpd value.
- @param force_dump: Whether or not to forcefully dump the vpd log.
- @param retries: Number of times to try rerunning the command in case of
- error.
-
- """
- _check_partition(partition)
- if not vpd_get(host, key, partition=partition, retries=retries):
- return
-
- del_cmd = _VPD_BASE_CMD % (partition, '-d', key)
- retry_util.RetryException(error.AutoservRunError, retries, host.run,
- del_cmd).stdout
-
- if dump:
- dump_vpd_log(host, force=force_dump, retries=retries)
diff --git a/client/common_lib/cros/webrtc_scripts/loopback-peerconnection.js b/client/common_lib/cros/webrtc_scripts/loopback-peerconnection.js
deleted file mode 100644
index 34fe2b4..0000000
--- a/client/common_lib/cros/webrtc_scripts/loopback-peerconnection.js
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Copyright 2017 The Chromium Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-/*jshint esversion: 6 */
-
-/**
- * A loopback peer connection with one or more streams.
- */
-class PeerConnection {
- /**
- * Creates a loopback peer connection. One stream per supplied resolution is
- * created.
- * @param {!Element} videoElement the video element to render the feed on.
- * @param {!Array<!{x: number, y: number}>} resolutions. A width of -1 will
- * result in disabled video for that stream.
- * @param {?boolean=} cpuOveruseDetection Whether to enable
- * googCpuOveruseDetection (lower video quality if CPU usage is high).
- * Default is null which means that the constraint is not set at all.
- */
- constructor(videoElement, resolutions, cpuOveruseDetection=null) {
- this.localConnection = null;
- this.remoteConnection = null;
- this.remoteView = videoElement;
- this.streams = [];
- // Ensure sorted in descending order to conveniently request the highest
- // resolution first through GUM later.
- this.resolutions = resolutions.slice().sort((x, y) => y.w - x.w);
- this.activeStreamIndex = resolutions.length - 1;
- this.badResolutionsSeen = 0;
- if (cpuOveruseDetection !== null) {
- this.pcConstraints = {
- 'optional': [{'googCpuOveruseDetection': cpuOveruseDetection}]
- };
- }
- this.rtcConfig = {'sdpSemantics': 'plan-b'};
- }
-
- /**
- * Starts the connections. Triggers GetUserMedia and starts
- * to render the video on {@code this.videoElement}.
- * @return {!Promise} a Promise that resolves when everything is initalized.
- */
- start() {
- // getUserMedia fails if we first request a low resolution and
- // later a higher one. Hence, sort resolutions above and
- // start with the highest resolution here.
- const promises = this.resolutions.map((resolution) => {
- const constraints = createMediaConstraints(resolution);
- return navigator.mediaDevices
- .getUserMedia(constraints)
- .then((stream) => this.streams.push(stream));
- });
- return Promise.all(promises).then(() => {
- // Start with the smallest video to not overload the machine instantly.
- return this.onGetUserMediaSuccess_(this.streams[this.activeStreamIndex]);
- })
- };
-
- /**
- * Verifies that the state of the streams are good. The state is good if all
- * streams are active and their video elements report the resolution the
- * stream is in. Video elements are allowed to report bad resolutions
- * numSequentialBadResolutionsForFailure times before failure is reported
- * since video elements occasionally report bad resolutions during the tests
- * when we manipulate the streams frequently.
- * @param {number=} numSequentialBadResolutionsForFailure number of bad
- * resolution observations in a row before failure is reported.
- * @param {number=} allowedDelta allowed difference between expected and
- * actual resolution. We have seen videos assigned a resolution one pixel
- * off from the requested.
- * @throws {Error} in case the state is not-good.
- */
- verifyState(numSequentialBadResolutionsForFailure=10, allowedDelta=1) {
- this.verifyAllStreamsActive_();
- const expectedResolution = this.resolutions[this.activeStreamIndex];
- if (expectedResolution.w < 0 || expectedResolution.h < 0) {
- // Video is disabled.
- return;
- }
- if (!isWithin(
- this.remoteView.videoWidth, expectedResolution.w, allowedDelta) ||
- !isWithin(
- this.remoteView.videoHeight, expectedResolution.h, allowedDelta)) {
- this.badResolutionsSeen++;
- } else if (
- this.badResolutionsSeen < numSequentialBadResolutionsForFailure) {
- // Reset the count, but only if we have not yet reached the limit. If the
- // limit is reached, let keep the error state.
- this.badResolutionsSeen = 0;
- }
- if (this.badResolutionsSeen >= numSequentialBadResolutionsForFailure) {
- throw new Error(
- 'Expected video resolution ' +
- resStr(expectedResolution.w, expectedResolution.h) +
- ' but got another resolution ' + this.badResolutionsSeen +
- ' consecutive times. Last resolution was: ' +
- resStr(this.remoteView.videoWidth, this.remoteView.videoHeight));
- }
- }
-
- verifyAllStreamsActive_() {
- if (this.streams.some((x) => !x.active)) {
- throw new Error('At least one media stream is not active')
- }
- }
-
- /**
- * Switches to a random stream, i.e., use a random resolution of the
- * resolutions provided to the constructor.
- * @return {!Promise} A promise that resolved when everything is initialized.
- */
- switchToRandomStream() {
- const localStreams = this.localConnection.getLocalStreams();
- const track = localStreams[0];
- if (track != null) {
- this.localConnection.removeStream(track);
- const newStreamIndex = Math.floor(Math.random() * this.streams.length);
- return this.addStream_(this.streams[newStreamIndex])
- .then(() => this.activeStreamIndex = newStreamIndex);
- } else {
- return Promise.resolve();
- }
- }
-
- onGetUserMediaSuccess_(stream) {
- this.localConnection = new RTCPeerConnection(this.rtcConfig,
- this.pcConstraints);
- this.localConnection.onicecandidate = (event) => {
- this.onIceCandidate_(this.remoteConnection, event);
- };
- this.remoteConnection = new RTCPeerConnection(this.rtcConfig,
- this.pcConstraints);
- this.remoteConnection.onicecandidate = (event) => {
- this.onIceCandidate_(this.localConnection, event);
- };
- this.remoteConnection.onaddstream = (e) => {
- this.remoteView.srcObject = e.stream;
- };
- return this.addStream_(stream);
- }
-
- addStream_(stream) {
- this.localConnection.addStream(stream);
- return this.localConnection
- .createOffer({offerToReceiveAudio: 1, offerToReceiveVideo: 1})
- .then((desc) => this.onCreateOfferSuccess_(desc), logError);
- }
-
- onCreateOfferSuccess_(desc) {
- this.localConnection.setLocalDescription(desc);
- this.remoteConnection.setRemoteDescription(desc);
- return this.remoteConnection.createAnswer().then(
- (desc) => this.onCreateAnswerSuccess_(desc), logError);
- };
-
- onCreateAnswerSuccess_(desc) {
- this.remoteConnection.setLocalDescription(desc);
- this.localConnection.setRemoteDescription(desc);
- };
-
- onIceCandidate_(connection, event) {
- if (event.candidate) {
- connection.addIceCandidate(new RTCIceCandidate(event.candidate));
- }
- };
-}
-
-/**
- * Checks if a value is within an expected value plus/minus a delta.
- * @param {number} actual
- * @param {number} expected
- * @param {number} delta
- * @return {boolean}
- */
-function isWithin(actual, expected, delta) {
- return actual <= expected + delta && actual >= actual - delta;
-}
-
-/**
- * Creates constraints for use with GetUserMedia.
- * @param {!{x: number, y: number}} widthAndHeight Video resolution.
- */
-function createMediaConstraints(widthAndHeight) {
- let constraint;
- if (widthAndHeight.w < 0) {
- constraint = false;
- } else {
- constraint = {
- width: {exact: widthAndHeight.w},
- height: {exact: widthAndHeight.h}
- };
- }
- return {
- audio: true,
- video: constraint
- };
-}
-
-function resStr(width, height) {
- return `${width}x${height}`
-}
-
-function logError(err) {
- console.error(err);
-}
diff --git a/client/common_lib/cros/webrtc_utils.py b/client/common_lib/cros/webrtc_utils.py
deleted file mode 100644
index 531e8fa..0000000
--- a/client/common_lib/cros/webrtc_utils.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import tempfile
-
-
-HTML_TEMPLATE = """<!DOCTYPE html>
- <html>
- <body id="body">
- <h1>{title}</h1>
- <p>Status: <span id="status">not-started</span></p>
- {scripts}
- </body>
- </html>
- """
-
-
-def generate_test_html(title, *scripts):
- """
- Generates HTML contents for WebRTC tests.
-
- @param title: The title of the page.
- @param scripts: Paths to the javascript files to include.
- @returns HTML contents.
- """
- script_tag_list = [
- '<script src="{}"></script>'.format(script)
- for script in scripts
- ]
- script_tags = '\n'.join(script_tag_list)
- return HTML_TEMPLATE.format(title=title, scripts=script_tags)
-
-
-def get_common_script_path(script):
- """
- Gets the file path to a common script.
-
- @param script: The file name of the script, e.g. 'foo.js'
- @returns The absolute path to the script.
- """
- return os.path.join(
- os.path.dirname(__file__), 'webrtc_scripts', script)
-
-
-def create_temp_html_file(title, tmpdir, *scripts):
- """
- Creates a temporary file with HTML contents for WebRTC tests.
-
- @param title: The title of the page.
- @param tmpdir: Directory to put the temporary file.
- @param scripts: Paths to the javascript files to load.
- @returns The file object containing the HTML.
- """
- html = generate_test_html(
- title, *scripts)
- html_file = tempfile.NamedTemporaryFile(
- suffix = '.html', dir = tmpdir, delete = False)
- html_file.write(html)
- html_file.close()
- return html_file
-
diff --git a/client/common_lib/feedback/__init__.py b/client/common_lib/feedback/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/client/common_lib/feedback/__init__.py
+++ /dev/null
diff --git a/client/common_lib/feedback/client.py b/client/common_lib/feedback/client.py
deleted file mode 100644
index 9a0a4e3..0000000
--- a/client/common_lib/feedback/client.py
+++ /dev/null
@@ -1,275 +0,0 @@
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Interactive feedback layer abstraction."""
-
-from autotest_lib.client.common_lib import error
-
-
-# All known queries.
-#
-# Audio playback and recording testing.
-QUERY_AUDIO_PLAYBACK_SILENT = 0
-QUERY_AUDIO_PLAYBACK_AUDIBLE = 1
-QUERY_AUDIO_RECORDING = 2
-# Motion sensor testing.
-QUERY_MOTION_RESTING = 10
-QUERY_MOTION_MOVING = 11
-# USB keyboard plugging and typing.
-QUERY_KEYBOARD_PLUG = 20
-QUERY_KEYBOARD_TYPE = 21
-# GPIO write/read testing.
-QUERY_GPIO_WRITE = 30
-QUERY_GPIO_READ = 31
-# On-board light testing.
-QUERY_LIGHT_ON = 40
-# TODO(garnold) Camera controls testing.
-#QUERY_CAMERA_???
-# Power management testing.
-QUERY_POWER_WAKEUP = 60
-
-INPUT_QUERIES = set((
- QUERY_AUDIO_RECORDING,
- QUERY_MOTION_RESTING,
- QUERY_MOTION_MOVING,
- QUERY_KEYBOARD_PLUG,
- QUERY_KEYBOARD_TYPE,
- QUERY_GPIO_READ,
- QUERY_POWER_WAKEUP,
-))
-
-OUTPUT_QUERIES = set((
- QUERY_AUDIO_PLAYBACK_SILENT,
- QUERY_AUDIO_PLAYBACK_AUDIBLE,
- QUERY_GPIO_WRITE,
- QUERY_LIGHT_ON,
-))
-
-ALL_QUERIES = INPUT_QUERIES.union(OUTPUT_QUERIES)
-
-
-# Feedback client definition.
-#
-class Client(object):
- """Interface for an interactive feedback layer."""
-
- def __init__(self):
- self._initialized = False
- self._finalized = False
-
-
- def _check_active(self):
- """Ensure that the client was initialized and not finalized."""
- if not self._initialized:
- raise error.TestError('Client was not initialized')
- if self._finalized:
- raise error.TestError('Client was already finalized')
-
-
- def __enter__(self):
- self._check_active()
- return self
-
-
- def __exit__(self, ex_type, ex_val, ex_tb):
- self.finalize()
-
-
- def initialize(self, test, host=None):
- """Initializes the feedback object.
-
- This method should be called once prior to any other call.
-
- @param test: An object representing the test case.
- @param host: An object representing the DUT; required for server-side
- tests.
-
- @raise TestError: There was an error during initialization.
- """
- if self._initialized:
- raise error.TestError('Client was already initialized')
- if self._finalized:
- raise error.TestError('Client was already finalized')
- self._initialize_impl(test, host)
- self._initialized = True
- return self
-
-
- def _initialize_impl(self, test, host):
- """Implementation of feedback client initialization.
-
- This should be implemented in concrete subclasses.
- """
- raise NotImplementedError
-
-
- def new_query(self, query_id):
- """Instantiates a new query.
-
- @param query_id: A query identifier (see QUERY_ constants above).
-
- @return A query object.
-
- @raise TestError: Query is invalid or not supported.
- """
- self._check_active()
- return self._new_query_impl(query_id)
-
-
- def _new_query_impl(self, query_id):
- """Implementation of new query instantiation.
-
- This should be implemented in concrete subclasses.
- """
- raise NotImplementedError
-
-
- def finalize(self):
- """Finalizes the feedback object.
-
- This method should be called once when done using the client.
-
- @raise TestError: There was an error while finalizing the client.
- """
- self._check_active()
- self._finalize_impl()
- self._finalized = True
-
-
- def _finalize_impl(self):
- """Implementation of feedback client finalization.
-
- This should be implemented in concrete subclasses.
- """
- raise NotImplementedError
-
-
-# Feedback query definitions.
-#
-class _Query(object):
- """Interactive feedback query base class.
-
- This class is further derived and should not be inherited directly.
- """
-
- def __init__(self):
- self._prepare_called = False
- self._validate_called = False
-
-
- def prepare(self, **kwargs):
- """Prepares the tester for providing or capturing feedback.
-
- @raise TestError: Query preparation failed.
- """
- if self._prepare_called:
- raise error.TestError('Prepare was already called')
- self._prepare_impl(**kwargs)
- self._prepare_called = True
-
-
- def _prepare_impl(self, **kwargs):
- """Implementation of query preparation logic.
-
- This should be implemented in concrete subclasses.
- """
- raise NotImplementedError
-
-
- def validate(self, **kwargs):
- """Validates the interactive input/output result.
-
- This enforces that the method is called at most once, then delegates
- to an underlying implementation method.
-
- @raise TestError: An error occurred during validation.
- @raise TestFail: Query validation failed.
- """
- if self._validate_called:
- raise error.TestError('Validate was already called')
- self._validate_impl(**kwargs)
- self._validate_called = True
-
-
- def _validate_impl(self, **kwargs):
- """Implementation of query validation logic.
-
- This should be implemented in concrete subclasses.
- """
- raise NotImplementedError
-
-
-class OutputQuery(_Query):
- """Interface for an output interactive feedback query.
-
- This class mandates that prepare() is called prior to validate().
- Subclasses should override implementations of _prepare_impl() and
- _validate_impl().
- """
-
- def __init__(self):
- super(OutputQuery, self).__init__()
-
-
- def validate(self, **kwargs):
- """Validates the interactive input/output result.
-
- This enforces the precondition and delegates to the base method.
-
- @raise TestError: An error occurred during validation.
- @raise TestFail: Query validation failed.
- """
- if not self._prepare_called:
- raise error.TestError('Prepare was not called')
- super(OutputQuery, self).validate(**kwargs)
-
-
-class InputQuery(_Query):
- """Interface for an input interactive feedback query.
-
- This class mandates that prepare() is called first, then emit(), and
- finally validate(). Subclasses should override implementations of
- _prepare_impl(), _emit_impl() and _validate_impl().
- """
-
- def __init__(self):
- super(InputQuery, self).__init__()
- self._emit_called = False
-
-
- def validate(self, **kwargs):
- """Validates the interactive input/output result.
-
- This enforces the precondition and delegates to the base method.
-
- @raise TestError: An error occurred during validation.
- @raise TestFail: Query validation failed.
- """
- if not self._emit_called:
- raise error.TestError('Emit was not called')
- super(InputQuery, self).validate(**kwargs)
-
-
- def emit(self):
- """Instructs the tester to emit a feedback to be captured by the test.
-
- This enforces the precondition and ensures the method is called at most
- once, then delegates to an underlying implementation method.
-
- @raise TestError: An error occurred during emission.
- """
- if not self._prepare_called:
- raise error.TestError('Prepare was not called')
- if self._emit_called:
- raise error.TestError('Emit was already called')
- self._emit_impl()
- self._emit_called = True
-
-
- def _emit_impl(self):
- """Implementation of query emission logic.
-
- This should be implemented in concrete subclasses.
- """
- raise NotImplementedError
diff --git a/client/common_lib/feedback/tester_feedback_client.py b/client/common_lib/feedback/tester_feedback_client.py
deleted file mode 100644
index 003e75c..0000000
--- a/client/common_lib/feedback/tester_feedback_client.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# Lint as: python2, python3
-# Copyright 2016 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Feedback client implementation for interacting with a human tester."""
-
-import six.moves.xmlrpc_client
-
-import common
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib.feedback import client
-
-
-# Query return codes.
-#
-QUERY_RET_SUCCESS = 0
-QUERY_RET_FAIL = 1
-QUERY_RET_ERROR = 2
-
-
-class Client(client.Client):
- """Human tester feedback implementation."""
-
- def __init__(self, test_name, dut_name, remote_addr):
- """Constructs the client object.
-
- @param test_name: The name of the test.
- @param dut_name: The name of the DUT.
- @param remote_addr: The 'name:port' of the remote feedback service host.
- """
- super(Client, self).__init__()
- self._client_id = '%s:%s' % (test_name, dut_name)
- self._remote_addr = remote_addr
- self._query_num = 0
- self._rpc_proxy = None
- self.tmp_dir = None
- self.dut_tmp_dir = None
-
-
- def _make_query_call(self, query_num, query_method, **kwargs):
- """Make an RPC query call (used by query objects).
-
- @param query_num: The unique query identifying number.
- @param query_method: The query method being called.
-
- @raise xmlrpclib.Error: An error during RPC call processing.
- """
- # XML-RPC does not support kwargs, so we just pass it as a dictionary.
- return self._rpc_proxy.query_call(self._client_id, query_num,
- query_method, kwargs)
-
-
- # Interface overrides.
- #
- def _initialize_impl(self, test, host):
- """Initializes the feedback object.
-
- Initializes an XML-RPC proxy and registers the client at the remote end.
-
- @param test: An object representing the test case.
- @param host: An object representing the DUT.
- """
- self._rpc_proxy = six.moves.xmlrpc_client.ServerProxy('http://%s' % self._remote_addr)
- try:
- self._rpc_proxy.new_client(self._client_id)
- except six.moves.xmlrpc_client.Error as e:
- raise error.TestError('Feedback client registration error: %s' % e)
- self.tmp_dir = test.tmpdir
- self.dut_tmp_dir = host.get_tmp_dir()
-
-
- def _new_query_impl(self, query_id):
- """Instantiates a new query.
-
- @param query_id: A query identifier.
-
- @return A query object.
- """
- if query_id in client.INPUT_QUERIES:
- query_cls = InputQuery
- elif query_id in client.OUTPUT_QUERIES:
- query_cls = OutputQuery
- else:
- raise error.TestError('Unknown query (%s)' % query_id)
-
- # Create, register and return a new query.
- self._query_num += 1
- try:
- self._rpc_proxy.new_query(self._client_id, query_id, self._query_num)
- except six.moves.xmlrpc_client.Error as e:
- raise error.TestError('Feedback query registration error: %s' % e)
- return query_cls(self, self._query_num)
-
-
- def _finalize_impl(self):
- """Finalizes the feedback object."""
- try:
- self._rpc_proxy.delete_client(self._client_id)
- except six.moves.xmlrpc_client.Error as e:
- raise error.TestError(
- 'Feedback client deregistration error: %s' % e)
-
-
-class _Query(object):
- """Human tester feedback query base class."""
-
- def __init__(self, client, query_num):
- super(_Query, self).__init__()
- self.client = client
- self.query_num = query_num
-
-
- def _make_query_call(self, query_method, **kwargs):
- try:
- ret, desc = self.client._make_query_call(self.query_num,
- query_method, **kwargs)
- except six.moves.xmlrpc_client.Error as e:
- ret, desc = QUERY_RET_ERROR, str(e)
-
- if ret == QUERY_RET_SUCCESS:
- return
- if ret == QUERY_RET_FAIL:
- raise error.TestFail('Tester feedback request failed: %s' % desc)
- if ret == QUERY_RET_ERROR:
- raise error.TestError('Tester feedback request error: %s' % desc)
- raise error.TestError('Unknown feedback call return code (%s)' % ret)
-
-
- # Interface overrides.
- #
- def _prepare_impl(self, **kwargs):
- self._make_query_call('prepare', **kwargs)
-
-
- def _validate_impl(self, **kwargs):
- self._make_query_call('validate', **kwargs)
-
-
-class OutputQuery(_Query, client.OutputQuery):
- """Human tester feedback output query."""
-
- def __init__(self, client, query_num):
- super(OutputQuery, self).__init__(client, query_num)
-
-
-class InputQuery(_Query, client.InputQuery):
- """Human tester feedback input query."""
-
- def __init__(self, client, query_num):
- super(InputQuery, self).__init__(client, query_num)
-
-
- # Interface override.
- #
- def _emit_impl(self):
- self._make_query_call('emit')
diff --git a/client/cros/certificate_util.py b/client/cros/certificate_util.py
deleted file mode 100644
index 3d9bf5f..0000000
--- a/client/cros/certificate_util.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import tempfile
-
-from autotest_lib.client.bin import utils
-
-class PEMCertificate(object):
- """Object enclosing a PEM certificate.
-
- Uses the "openssl" utility to report various properties of a certificate.
-
- """
- OPENSSL_COMMAND = 'openssl'
- ATTRIBUTE_SUBJECT = 'subject'
- ATTRIBUTE_FINGERPRINT = 'fingerprint'
-
- def __init__(self, pem_contents):
- self._pem_contents = pem_contents
- self._fingerprint = None
- self._subject = None
- self._subject_dict = None
-
-
- def get_attribute(self, attribute):
- """Returns the named attribute of the certificate.
-
- @param attribute string referring to the attribute to retrieve.
- @return string containing the retrieved attribute value.
-
- """
- with tempfile.NamedTemporaryFile() as temp:
- temp.write(self._pem_contents)
- temp.flush()
- output = utils.system_output(
- '%s x509 -noout -nameopt compat -%s -in %s' %
- (self.OPENSSL_COMMAND, attribute, temp.name))
- # Output is of the form "name=value..."
- return output.split('=', 1)[1]
-
-
- @property
- def fingerprint(self):
- """Returns the SHA-1 fingerprint of a certificate."""
- if self._fingerprint is None:
- self._fingerprint = self.get_attribute(self.ATTRIBUTE_FINGERPRINT)
- return self._fingerprint
-
-
- @property
- def subject(self):
- """Returns the subject DN of the certificate as a list of name=value"""
- if self._subject is None:
- subject = self.get_attribute(self.ATTRIBUTE_SUBJECT)
- # OpenSSL returns a form of:
- # " /C=US/ST=CA/L=Mountain View/CN=chromelab..."
- # but we want to return something like:
- # [ "C=US", "ST=CA", "L=Mountain View", "CN=chromelab..." ]
- self._subject = subject.lstrip(' /').split('/')
- return self._subject
-
-
- @property
- def subject_dict(self):
- """Returns the subject DN of the certificate as a dict of name:value"""
- if self._subject_dict is None:
- # Convert the list [ 'A=B', ... ] into a dict { 'A': 'B', ... }
- self._subject_dict = dict([x.split('=', 1) for x in self.subject])
- return self._subject_dict
diff --git a/client/cros/chrooted_avahi.py b/client/cros/chrooted_avahi.py
deleted file mode 100644
index 5a76f80..0000000
--- a/client/cros/chrooted_avahi.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright 2014 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging
-import time
-
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib.cros import avahi_utils
-from autotest_lib.client.common_lib.cros import virtual_ethernet_pair
-from autotest_lib.client.common_lib.cros.network import netblock
-from autotest_lib.client.cros import network_chroot
-from autotest_lib.client.cros import service_stopper
-from autotest_lib.client.cros import tcpdump
-
-
-class ChrootedAvahi(object):
- """Helper object to start up avahi in a network chroot.
-
- Creates a virtual ethernet pair to enable communication with avahi.
- Does the necessary work to make avahi appear on DBus and allow it
- to claim its canonical service name.
-
- """
-
- SERVICES_TO_STOP = ['avahi']
- # This side has to be called something special to avoid shill touching it.
- MONITOR_IF_IP = netblock.from_addr('10.9.8.1/24')
- # We'll drop the Avahi side into our network namespace.
- AVAHI_IF_IP = netblock.from_addr('10.9.8.2/24')
- AVAHI_IF_NAME = 'pseudoethernet0'
- TCPDUMP_FILE_PATH = '/var/log/peerd_dump.pcap'
- AVAHI_CONFIG_FILE = 'etc/avahi/avahi-daemon.conf'
- AVAHI_CONFIGS = {
- AVAHI_CONFIG_FILE :
- '[server]\n'
- 'host-name-from-machine-id=yes\n'
- 'browse-domains=\n'
- 'use-ipv4=yes\n'
- 'use-ipv6=no\n'
- 'ratelimit-interval-usec=1000000\n'
- 'ratelimit-burst=1000\n'
- '[wide-area]\n'
- 'enable-wide-area=no\n'
- '[publish]\n'
- 'publish-hinfo=no\n'
- 'publish-workstation=no\n'
- 'publish-aaaa-on-ipv4=no\n'
- 'publish-a-on-ipv6=no\n'
- '[rlimits]\n'
- 'rlimit-core=0\n'
- 'rlimit-data=4194304\n'
- 'rlimit-fsize=1024\n'
- 'rlimit-nofile=768\n'
- 'rlimit-stack=4194304\n'
- 'rlimit-nproc=10\n',
-
- 'etc/passwd' :
- 'root:x:0:0:root:/root:/bin/bash\n'
- 'avahi:*:238:238::/dev/null:/bin/false\n',
-
- 'etc/group' :
- 'avahi:x:238:\n',
- }
- AVAHI_LOG_FILE = '/var/log/avahi.log'
- AVAHI_PID_FILE = 'run/avahi-daemon/pid'
- AVAHI_UP_TIMEOUT_SECONDS = 10
-
-
- def __init__(self, unchrooted_interface_name='pseudoethernet1'):
- """Construct a chrooted instance of Avahi.
-
- @param unchrooted_interface_name: string name of interface to leave
- outside the network chroot. This interface will be connected
- to the end Avahi is listening on.
-
- """
- self._unchrooted_interface_name = unchrooted_interface_name
- self._services = None
- self._vif = None
- self._tcpdump = None
- self._chroot = None
-
-
- @property
- def unchrooted_interface_name(self):
- """Get the name of the end of the VirtualEthernetPair not in the chroot.
-
- The network chroot works by isolating avahi inside with one end of a
- virtual ethernet pair. The outside world needs to interact with the
- other end in order to talk to avahi.
-
- @return name of interface not inside the chroot.
-
- """
- return self._unchrooted_interface_name
-
-
- @property
- def avahi_interface_addr(self):
- """@return string ip address of interface belonging to avahi."""
- return self.AVAHI_IF_IP.addr
-
-
- @property
- def hostname(self):
- """@return string hostname claimed by avahi on |self.dns_domain|."""
- return avahi_utils.avahi_get_hostname()
-
-
- @property
- def dns_domain(self):
- """@return string DNS domain in use by avahi (e.g. 'local')."""
- return avahi_utils.avahi_get_domain_name()
-
-
- def start(self):
- """Start up the chrooted Avahi instance."""
- # Prevent weird interactions between services which talk to Avahi.
- # TODO(wiley) Does Chrome need to die here as well?
- self._services = service_stopper.ServiceStopper(
- self.SERVICES_TO_STOP)
- self._services.stop_services()
- # We don't want Avahi talking to the real world, so give it a nice
- # fake interface to use. We'll watch the other half of the pair.
- self._vif = virtual_ethernet_pair.VirtualEthernetPair(
- interface_name=self.unchrooted_interface_name,
- peer_interface_name=self.AVAHI_IF_NAME,
- interface_ip=self.MONITOR_IF_IP.netblock,
- peer_interface_ip=self.AVAHI_IF_IP.netblock,
- # Moving one end into the chroot causes errors.
- ignore_shutdown_errors=True)
- self._vif.setup()
- if not self._vif.is_healthy:
- raise error.TestError('Failed to setup virtual ethernet pair.')
- # By default, take a packet capture of everything Avahi sends out.
- self._tcpdump = tcpdump.Tcpdump(self.unchrooted_interface_name,
- self.TCPDUMP_FILE_PATH)
- # We're going to run Avahi in a network namespace to avoid interactions
- # with the outside world.
- self._chroot = network_chroot.NetworkChroot(self.AVAHI_IF_NAME,
- self.AVAHI_IF_IP.addr,
- self.AVAHI_IF_IP.prefix_len)
- self._chroot.add_config_templates(self.AVAHI_CONFIGS)
- self._chroot.add_root_directories(['etc/avahi', 'etc/avahi/services'])
- self._chroot.add_copied_config_files(['etc/resolv.conf',
- 'etc/avahi/hosts'])
- self._chroot.add_startup_command(
- '/usr/sbin/avahi-daemon --file=/%s >%s 2>&1' %
- (self.AVAHI_CONFIG_FILE, self.AVAHI_LOG_FILE))
- self._chroot.bridge_dbus_namespaces()
- self._chroot.startup()
- # Wait for Avahi to come up, claim its DBus name, settle on a hostname.
- start_time = time.time()
- while time.time() - start_time < self.AVAHI_UP_TIMEOUT_SECONDS:
- if avahi_utils.avahi_ping():
- break
- time.sleep(0.2)
- else:
- raise error.TestFail('Avahi did not come up in time.')
-
-
- def close(self):
- """Clean up the chrooted Avahi instance."""
- if self._chroot:
- # TODO(wiley) This is sloppy. Add a helper to move the logs over.
- for line in self._chroot.get_log_contents().splitlines():
- logging.debug(line)
- self._chroot.kill_pid_file(self.AVAHI_PID_FILE)
- self._chroot.shutdown()
- if self._tcpdump:
- self._tcpdump.stop()
- if self._vif:
- self._vif.teardown()
- if self._services:
- self._services.restore_services()
diff --git a/client/cros/cros_disks.py b/client/cros/cros_disks.py
deleted file mode 100644
index 5d35e2a..0000000
--- a/client/cros/cros_disks.py
+++ /dev/null
@@ -1,903 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-from __future__ import print_function
-from __future__ import division
-from __future__ import absolute_import
-
-import dbus, gobject, logging, os, stat
-from dbus.mainloop.glib import DBusGMainLoop
-import six
-from six.moves import zip
-
-import common
-
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib import autotemp, error
-from autotest_lib.client.cros import dbus_util
-from autotest_lib.client.cros.mainloop import ExceptionForward
-from autotest_lib.client.cros.mainloop import GenericTesterMainLoop
-
-
-"""This module contains several helper classes for writing tests to verify the
-CrosDisks DBus interface. In particular, the CrosDisksTester class can be used
-to derive functional tests that interact with the CrosDisks server over DBus.
-"""
-
-
-class ExceptionSuppressor(object):
- """A context manager class for suppressing certain types of exception.
-
- An instance of this class is expected to be used with the with statement
- and takes a set of exception classes at instantiation, which are types of
- exception to be suppressed (and logged) in the code block under the with
- statement.
-
- Example:
-
- with ExceptionSuppressor(OSError, IOError):
- # An exception, which is a sub-class of OSError or IOError, is
- # suppressed in the block code under the with statement.
- """
- def __init__(self, *args):
- self.__suppressed_exc_types = (args)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- if exc_type and issubclass(exc_type, self.__suppressed_exc_types):
- try:
- logging.exception('Suppressed exception: %s(%s)',
- exc_type, exc_value)
- except Exception:
- pass
- return True
- return False
-
-
-class DBusClient(object):
- """ A base class of a DBus proxy client to test a DBus server.
-
- This class is expected to be used along with a GLib main loop and provides
- some convenient functions for testing the DBus API exposed by a DBus server.
- """
-
- def __init__(self, main_loop, bus, bus_name, object_path, timeout=None):
- """Initializes the instance.
-
- Args:
- main_loop: The GLib main loop.
- bus: The bus where the DBus server is connected to.
- bus_name: The bus name owned by the DBus server.
- object_path: The object path of the DBus server.
- timeout: Maximum time in seconds to wait for the DBus connection.
- """
- self.__signal_content = {}
- self.main_loop = main_loop
- self.signal_timeout_in_seconds = 10
- logging.debug('Getting D-Bus proxy object on bus "%s" and path "%s"',
- bus_name, object_path)
- self.proxy_object = dbus_util.get_dbus_object(bus, bus_name,
- object_path, timeout)
-
- def clear_signal_content(self, signal_name):
- """Clears the content of the signal.
-
- Args:
- signal_name: The name of the signal.
- """
- if signal_name in self.__signal_content:
- self.__signal_content[signal_name] = None
-
- def get_signal_content(self, signal_name):
- """Gets the content of a signal.
-
- Args:
- signal_name: The name of the signal.
-
- Returns:
- The content of a signal or None if the signal is not being handled.
- """
- return self.__signal_content.get(signal_name)
-
- def handle_signal(self, interface, signal_name, argument_names=()):
- """Registers a signal handler to handle a given signal.
-
- Args:
- interface: The DBus interface of the signal.
- signal_name: The name of the signal.
- argument_names: A list of argument names that the signal contains.
- """
- if signal_name in self.__signal_content:
- return
-
- self.__signal_content[signal_name] = None
-
- def signal_handler(*args):
- self.__signal_content[signal_name] = dict(zip(argument_names, args))
-
- logging.debug('Handling D-Bus signal "%s(%s)" on interface "%s"',
- signal_name, ', '.join(argument_names), interface)
- self.proxy_object.connect_to_signal(signal_name, signal_handler,
- interface)
-
- def wait_for_signal(self, signal_name):
- """Waits for the reception of a signal.
-
- Args:
- signal_name: The name of the signal to wait for.
-
- Returns:
- The content of the signal.
- """
- if signal_name not in self.__signal_content:
- return None
-
- def check_signal_content():
- context = self.main_loop.get_context()
- while context.iteration(False):
- pass
- return self.__signal_content[signal_name] is not None
-
- logging.debug('Waiting for D-Bus signal "%s"', signal_name)
- utils.poll_for_condition(condition=check_signal_content,
- desc='%s signal' % signal_name,
- timeout=self.signal_timeout_in_seconds)
- content = self.__signal_content[signal_name]
- logging.debug('Received D-Bus signal "%s(%s)"', signal_name, content)
- self.__signal_content[signal_name] = None
- return content
-
- def expect_signal(self, signal_name, expected_content):
- """Waits the the reception of a signal and verifies its content.
-
- Args:
- signal_name: The name of the signal to wait for.
- expected_content: The expected content of the signal, which can be
- partially specified. Only specified fields are
- compared between the actual and expected content.
-
- Returns:
- The actual content of the signal.
-
- Raises:
- error.TestFail: A test failure when there is a mismatch between the
- actual and expected content of the signal.
- """
- actual_content = self.wait_for_signal(signal_name)
- logging.debug("%s signal: expected=%s actual=%s",
- signal_name, expected_content, actual_content)
- for argument, expected_value in six.iteritems(expected_content):
- if argument not in actual_content:
- raise error.TestFail(
- ('%s signal missing "%s": expected=%s, actual=%s') %
- (signal_name, argument, expected_content, actual_content))
-
- if actual_content[argument] != expected_value:
- raise error.TestFail(
- ('%s signal not matched on "%s": expected=%s, actual=%s') %
- (signal_name, argument, expected_content, actual_content))
- return actual_content
-
-
-class CrosDisksClient(DBusClient):
- """A DBus proxy client for testing the CrosDisks DBus server.
- """
-
- CROS_DISKS_BUS_NAME = 'org.chromium.CrosDisks'
- CROS_DISKS_INTERFACE = 'org.chromium.CrosDisks'
- CROS_DISKS_OBJECT_PATH = '/org/chromium/CrosDisks'
- DBUS_PROPERTIES_INTERFACE = 'org.freedesktop.DBus.Properties'
- FORMAT_COMPLETED_SIGNAL = 'FormatCompleted'
- FORMAT_COMPLETED_SIGNAL_ARGUMENTS = (
- 'status', 'path'
- )
- MOUNT_COMPLETED_SIGNAL = 'MountCompleted'
- MOUNT_COMPLETED_SIGNAL_ARGUMENTS = (
- 'status', 'source_path', 'source_type', 'mount_path'
- )
- RENAME_COMPLETED_SIGNAL = 'RenameCompleted'
- RENAME_COMPLETED_SIGNAL_ARGUMENTS = (
- 'status', 'path'
- )
-
- def __init__(self, main_loop, bus, timeout_seconds=None):
- """Initializes the instance.
-
- Args:
- main_loop: The GLib main loop.
- bus: The bus where the DBus server is connected to.
- timeout_seconds: Maximum time in seconds to wait for the DBus
- connection.
- """
- super(CrosDisksClient, self).__init__(main_loop, bus,
- self.CROS_DISKS_BUS_NAME,
- self.CROS_DISKS_OBJECT_PATH,
- timeout_seconds)
- self.interface = dbus.Interface(self.proxy_object,
- self.CROS_DISKS_INTERFACE)
- self.properties = dbus.Interface(self.proxy_object,
- self.DBUS_PROPERTIES_INTERFACE)
- self.handle_signal(self.CROS_DISKS_INTERFACE,
- self.FORMAT_COMPLETED_SIGNAL,
- self.FORMAT_COMPLETED_SIGNAL_ARGUMENTS)
- self.handle_signal(self.CROS_DISKS_INTERFACE,
- self.MOUNT_COMPLETED_SIGNAL,
- self.MOUNT_COMPLETED_SIGNAL_ARGUMENTS)
- self.handle_signal(self.CROS_DISKS_INTERFACE,
- self.RENAME_COMPLETED_SIGNAL,
- self.RENAME_COMPLETED_SIGNAL_ARGUMENTS)
-
- def enumerate_devices(self):
- """Invokes the CrosDisks EnumerateMountableDevices method.
-
- Returns:
- A list of sysfs paths of devices that are recognized by
- CrosDisks.
- """
- return self.interface.EnumerateDevices()
-
- def get_device_properties(self, path):
- """Invokes the CrosDisks GetDeviceProperties method.
-
- Args:
- path: The device path.
-
- Returns:
- The properties of the device in a dictionary.
- """
- return self.interface.GetDeviceProperties(path)
-
- def format(self, path, filesystem_type=None, options=None):
- """Invokes the CrosDisks Format method.
-
- Args:
- path: The device path to format.
- filesystem_type: The filesystem type used for formatting the device.
- options: A list of options used for formatting the device.
- """
- if filesystem_type is None:
- filesystem_type = ''
- if options is None:
- options = []
- self.clear_signal_content(self.FORMAT_COMPLETED_SIGNAL)
- self.interface.Format(path, filesystem_type,
- dbus.Array(options, signature='s'))
-
- def wait_for_format_completion(self):
- """Waits for the CrosDisks FormatCompleted signal.
-
- Returns:
- The content of the FormatCompleted signal.
- """
- return self.wait_for_signal(self.FORMAT_COMPLETED_SIGNAL)
-
- def expect_format_completion(self, expected_content):
- """Waits and verifies for the CrosDisks FormatCompleted signal.
-
- Args:
- expected_content: The expected content of the FormatCompleted
- signal, which can be partially specified.
- Only specified fields are compared between the
- actual and expected content.
-
- Returns:
- The actual content of the FormatCompleted signal.
-
- Raises:
- error.TestFail: A test failure when there is a mismatch between the
- actual and expected content of the FormatCompleted
- signal.
- """
- return self.expect_signal(self.FORMAT_COMPLETED_SIGNAL,
- expected_content)
-
- def rename(self, path, volume_name=None):
- """Invokes the CrosDisks Rename method.
-
- Args:
- path: The device path to rename.
- volume_name: The new name used for renaming.
- """
- if volume_name is None:
- volume_name = ''
- self.clear_signal_content(self.RENAME_COMPLETED_SIGNAL)
- self.interface.Rename(path, volume_name)
-
- def wait_for_rename_completion(self):
- """Waits for the CrosDisks RenameCompleted signal.
-
- Returns:
- The content of the RenameCompleted signal.
- """
- return self.wait_for_signal(self.RENAME_COMPLETED_SIGNAL)
-
- def expect_rename_completion(self, expected_content):
- """Waits and verifies for the CrosDisks RenameCompleted signal.
-
- Args:
- expected_content: The expected content of the RenameCompleted
- signal, which can be partially specified.
- Only specified fields are compared between the
- actual and expected content.
-
- Returns:
- The actual content of the RenameCompleted signal.
-
- Raises:
- error.TestFail: A test failure when there is a mismatch between the
- actual and expected content of the RenameCompleted
- signal.
- """
- return self.expect_signal(self.RENAME_COMPLETED_SIGNAL,
- expected_content)
-
- def mount(self, path, filesystem_type=None, options=None):
- """Invokes the CrosDisks Mount method.
-
- Args:
- path: The device path to mount.
- filesystem_type: The filesystem type used for mounting the device.
- options: A list of options used for mounting the device.
- """
- if filesystem_type is None:
- filesystem_type = ''
- if options is None:
- options = []
- self.clear_signal_content(self.MOUNT_COMPLETED_SIGNAL)
- self.interface.Mount(path, filesystem_type,
- dbus.Array(options, signature='s'))
-
- def unmount(self, path, options=None):
- """Invokes the CrosDisks Unmount method.
-
- Args:
- path: The device or mount path to unmount.
- options: A list of options used for unmounting the path.
-
- Returns:
- The mount error code.
- """
- if options is None:
- options = []
- return self.interface.Unmount(path, dbus.Array(options, signature='s'))
-
- def wait_for_mount_completion(self):
- """Waits for the CrosDisks MountCompleted signal.
-
- Returns:
- The content of the MountCompleted signal.
- """
- return self.wait_for_signal(self.MOUNT_COMPLETED_SIGNAL)
-
- def expect_mount_completion(self, expected_content):
- """Waits and verifies for the CrosDisks MountCompleted signal.
-
- Args:
- expected_content: The expected content of the MountCompleted
- signal, which can be partially specified.
- Only specified fields are compared between the
- actual and expected content.
-
- Returns:
- The actual content of the MountCompleted signal.
-
- Raises:
- error.TestFail: A test failure when there is a mismatch between the
- actual and expected content of the MountCompleted
- signal.
- """
- return self.expect_signal(self.MOUNT_COMPLETED_SIGNAL,
- expected_content)
-
- def add_loopback_to_allowlist(self, path):
- """Adds a device by its path to the allowlist for testing.
-
- Args:
- path: path to the /dev/loopX device.
- """
- sys_path = '/sys/devices/virtual/block/' + os.path.basename(path)
- self.interface.AddDeviceToAllowlist(sys_path)
-
- def remove_loopback_from_allowlist(self, path):
- """Removes a device by its sys path from the allowlist for testing.
-
- Args:
- path: path to the /dev/loopX device.
- """
- sys_path = '/sys/devices/virtual/block/' + os.path.basename(path)
- self.interface.RemoveDeviceFromAllowlist(sys_path)
-
-
-class CrosDisksTester(GenericTesterMainLoop):
- """A base tester class for testing the CrosDisks server.
-
- A derived class should override the get_tests method to return a list of
- test methods. The perform_one_test method invokes each test method in the
- list to verify some functionalities of CrosDisks server.
- """
- def __init__(self, test):
- bus_loop = DBusGMainLoop(set_as_default=True)
- self.bus = dbus.SystemBus(mainloop=bus_loop)
- self.main_loop = gobject.MainLoop()
- super(CrosDisksTester, self).__init__(test, self.main_loop)
- self.cros_disks = CrosDisksClient(self.main_loop, self.bus)
-
- def get_tests(self):
- """Returns a list of test methods to be invoked by perform_one_test.
-
- A derived class should override this method.
-
- Returns:
- A list of test methods.
- """
- return []
-
- @ExceptionForward
- def perform_one_test(self):
- """Exercises each test method in the list returned by get_tests.
- """
- tests = self.get_tests()
- self.remaining_requirements = set([test.__name__ for test in tests])
- for test in tests:
- test()
- self.requirement_completed(test.__name__)
-
- def reconnect_client(self, timeout_seconds=None):
- """"Reconnect the CrosDisks DBus client.
-
- Args:
- timeout_seconds: Maximum time in seconds to wait for the DBus
- connection.
- """
- self.cros_disks = CrosDisksClient(self.main_loop, self.bus,
- timeout_seconds)
-
-
-class FilesystemTestObject(object):
- """A base class to represent a filesystem test object.
-
- A filesystem test object can be a file, directory or symbolic link.
- A derived class should override the _create and _verify method to implement
- how the test object should be created and verified, respectively, on a
- filesystem.
- """
- def __init__(self, path, content, mode):
- """Initializes the instance.
-
- Args:
- path: The relative path of the test object.
- content: The content of the test object.
- mode: The file permissions given to the test object.
- """
- self._path = path
- self._content = content
- self._mode = mode
-
- def create(self, base_dir):
- """Creates the test object in a base directory.
-
- Args:
- base_dir: The base directory where the test object is created.
-
- Returns:
- True if the test object is created successfully or False otherwise.
- """
- if not self._create(base_dir):
- logging.debug('Failed to create filesystem test object at "%s"',
- os.path.join(base_dir, self._path))
- return False
- return True
-
- def verify(self, base_dir):
- """Verifies the test object in a base directory.
-
- Args:
- base_dir: The base directory where the test object is expected to be
- found.
-
- Returns:
- True if the test object is found in the base directory and matches
- the expected content, or False otherwise.
- """
- if not self._verify(base_dir):
- logging.error('Mismatched filesystem object at "%s"',
- os.path.join(base_dir, self._path))
- return False
- return True
-
- def _create(self, base_dir):
- return False
-
- def _verify(self, base_dir):
- return False
-
-
-class FilesystemTestDirectory(FilesystemTestObject):
- """A filesystem test object that represents a directory."""
-
- def __init__(self, path, content, mode=stat.S_IRWXU|stat.S_IRGRP| \
- stat.S_IXGRP|stat.S_IROTH|stat.S_IXOTH, strict=False):
- """Initializes the directory.
-
- Args:
- path: The name of this directory.
- content: The list of items in this directory.
- mode: The file permissions given to this directory.
- strict: Whether verify() strictly compares directory contents for
- equality. This flag only applies to this directory, and not
- to any child directories.
- """
- super(FilesystemTestDirectory, self).__init__(path, content, mode)
- self._strict = strict
-
- def _create(self, base_dir):
- path = os.path.join(base_dir, self._path) if self._path else base_dir
-
- if self._path:
- with ExceptionSuppressor(OSError):
- os.makedirs(path)
- os.chmod(path, self._mode)
-
- if not os.path.isdir(path):
- return False
-
- for content in self._content:
- if not content.create(path):
- return False
-
- return True
-
- def _verify(self, base_dir):
- path = os.path.join(base_dir, self._path) if self._path else base_dir
- if not os.path.isdir(path):
- return False
-
- result = True
- seen = set()
-
- for content in self._content:
- if not content.verify(path):
- result = False
- seen.add(content._path)
-
- if self._strict:
- for child in os.listdir(path):
- if child not in seen:
- logging.error('Unexpected filesystem entry "%s"',
- os.path.join(path, child))
- result = False
-
- return result
-
-
-class FilesystemTestFile(FilesystemTestObject):
- """A filesystem test object that represents a file."""
-
- def __init__(self,
- path,
- content,
- mode=stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP \
- | stat.S_IROTH,
- mtime=None):
- """Initializes the file.
-
- Args:
- path: The name of this file.
- content: A byte string with the expected file contents.
- mode: The file permissions given to this file.
- mtime: If set, the expected file modification timestamp.
- """
- super(FilesystemTestFile, self).__init__(path, content, mode)
- self._mtime = mtime
-
- def _create(self, base_dir):
- path = os.path.join(base_dir, self._path)
- with ExceptionSuppressor(IOError):
- with open(path, 'wb+') as f:
- f.write(self._content)
- with ExceptionSuppressor(OSError):
- os.chmod(path, self._mode)
- return True
- return False
-
- def _verify(self, base_dir):
- path = os.path.join(base_dir, self._path)
- with ExceptionSuppressor(IOError):
- result = True
-
- if self._content is not None:
- with open(path, 'rb') as f:
- if f.read() != self._content:
- logging.error('Mismatched file contents for "%s"',
- path)
- result = False
-
- if self._mtime is not None:
- st = os.stat(path)
- if st.st_mtime != self._mtime:
- logging.error(
- 'Mismatched file modification time for "%s": ' +
- 'want %d, got %d', path, self._mtime, st.st_mtime)
- result = False
-
- return result
-
- return False
-
-
-class DefaultFilesystemTestContent(FilesystemTestDirectory):
- def __init__(self):
- super(DefaultFilesystemTestContent, self).__init__('', [
- FilesystemTestFile('file1', '0123456789'),
- FilesystemTestDirectory('dir1', [
- FilesystemTestFile('file1', ''),
- FilesystemTestFile('file2', 'abcdefg'),
- FilesystemTestDirectory('dir2', [
- FilesystemTestFile('file3', 'abcdefg'),
- FilesystemTestFile('file4', 'a' * 65536),
- ]),
- ]),
- ], stat.S_IRWXU|stat.S_IRGRP|stat.S_IXGRP|stat.S_IROTH|stat.S_IXOTH)
-
-
-class VirtualFilesystemImage(object):
- def __init__(self, block_size, block_count, filesystem_type,
- *args, **kwargs):
- """Initializes the instance.
-
- Args:
- block_size: The number of bytes of each block in the image.
- block_count: The number of blocks in the image.
- filesystem_type: The filesystem type to be given to the mkfs
- program for formatting the image.
-
- Keyword Args:
- mount_filesystem_type: The filesystem type to be given to the
- mount program for mounting the image.
- mkfs_options: A list of options to be given to the mkfs program.
- """
- self._block_size = block_size
- self._block_count = block_count
- self._filesystem_type = filesystem_type
- self._mount_filesystem_type = kwargs.get('mount_filesystem_type')
- if self._mount_filesystem_type is None:
- self._mount_filesystem_type = filesystem_type
- self._mkfs_options = kwargs.get('mkfs_options')
- if self._mkfs_options is None:
- self._mkfs_options = []
- self._image_file = None
- self._loop_device = None
- self._loop_device_stat = None
- self._mount_dir = None
-
- def __del__(self):
- with ExceptionSuppressor(Exception):
- self.clean()
-
- def __enter__(self):
- self.create()
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- self.clean()
- return False
-
- def _remove_temp_path(self, temp_path):
- """Removes a temporary file or directory created using autotemp."""
- if temp_path:
- with ExceptionSuppressor(Exception):
- path = temp_path.name
- temp_path.clean()
- logging.debug('Removed "%s"', path)
-
- def _remove_image_file(self):
- """Removes the image file if one has been created."""
- self._remove_temp_path(self._image_file)
- self._image_file = None
-
- def _remove_mount_dir(self):
- """Removes the mount directory if one has been created."""
- self._remove_temp_path(self._mount_dir)
- self._mount_dir = None
-
- @property
- def image_file(self):
- """Gets the path of the image file.
-
- Returns:
- The path of the image file or None if no image file has been
- created.
- """
- return self._image_file.name if self._image_file else None
-
- @property
- def loop_device(self):
- """Gets the loop device where the image file is attached to.
-
- Returns:
- The path of the loop device where the image file is attached to or
- None if no loop device is attaching the image file.
- """
- return self._loop_device
-
- @property
- def mount_dir(self):
- """Gets the directory where the image file is mounted to.
-
- Returns:
- The directory where the image file is mounted to or None if no
- mount directory has been created.
- """
- return self._mount_dir.name if self._mount_dir else None
-
- def create(self):
- """Creates a zero-filled image file with the specified size.
-
- The created image file is temporary and removed when clean()
- is called.
- """
- self.clean()
- self._image_file = autotemp.tempfile(unique_id='fsImage')
- try:
- logging.debug('Creating zero-filled image file at "%s"',
- self._image_file.name)
- utils.run('dd if=/dev/zero of=%s bs=%s count=%s' %
- (self._image_file.name, self._block_size,
- self._block_count))
- except error.CmdError as exc:
- self._remove_image_file()
- message = 'Failed to create filesystem image: %s' % exc
- raise RuntimeError(message)
-
- def clean(self):
- """Removes the image file if one has been created.
-
- Before removal, the image file is detached from the loop device that
- it is attached to.
- """
- self.detach_from_loop_device()
- self._remove_image_file()
-
- def attach_to_loop_device(self):
- """Attaches the created image file to a loop device.
-
- Creates the image file, if one has not been created, by calling
- create().
-
- Returns:
- The path of the loop device where the image file is attached to.
- """
- if self._loop_device:
- return self._loop_device
-
- if not self._image_file:
- self.create()
-
- logging.debug('Attaching image file "%s" to loop device',
- self._image_file.name)
- utils.run('losetup -f %s' % self._image_file.name)
- output = utils.system_output('losetup -j %s' % self._image_file.name)
- # output should look like: "/dev/loop0: [000d]:6329 (/tmp/test.img)"
- self._loop_device = output.split(':')[0]
- logging.debug('Attached image file "%s" to loop device "%s"',
- self._image_file.name, self._loop_device)
-
- self._loop_device_stat = os.stat(self._loop_device)
- logging.debug('Loop device "%s" (uid=%d, gid=%d, permissions=%04o)',
- self._loop_device,
- self._loop_device_stat.st_uid,
- self._loop_device_stat.st_gid,
- stat.S_IMODE(self._loop_device_stat.st_mode))
- return self._loop_device
-
- def detach_from_loop_device(self):
- """Detaches the image file from the loop device."""
- if not self._loop_device:
- return
-
- self.unmount()
-
- logging.debug('Cleaning up remaining mount points of loop device "%s"',
- self._loop_device)
- utils.run('umount -f %s' % self._loop_device, ignore_status=True)
-
- logging.debug('Restore ownership/permissions of loop device "%s"',
- self._loop_device)
- os.chmod(self._loop_device,
- stat.S_IMODE(self._loop_device_stat.st_mode))
- os.chown(self._loop_device,
- self._loop_device_stat.st_uid, self._loop_device_stat.st_gid)
-
- logging.debug('Detaching image file "%s" from loop device "%s"',
- self._image_file.name, self._loop_device)
- utils.run('losetup -d %s' % self._loop_device)
- self._loop_device = None
-
- def format(self):
- """Formats the image file as the specified filesystem."""
- self.attach_to_loop_device()
- try:
- logging.debug('Formatting image file at "%s" as "%s" filesystem',
- self._image_file.name, self._filesystem_type)
- utils.run('yes | mkfs -t %s %s %s' %
- (self._filesystem_type, ' '.join(self._mkfs_options),
- self._loop_device))
- logging.debug('blkid: %s', utils.system_output(
- 'blkid -c /dev/null %s' % self._loop_device,
- ignore_status=True))
- except error.CmdError as exc:
- message = 'Failed to format filesystem image: %s' % exc
- raise RuntimeError(message)
-
- def mount(self, options=None):
- """Mounts the image file to a directory.
-
- Args:
- options: An optional list of mount options.
- """
- if self._mount_dir:
- return self._mount_dir.name
-
- if options is None:
- options = []
-
- options_arg = ','.join(options)
- if options_arg:
- options_arg = '-o ' + options_arg
-
- self.attach_to_loop_device()
- self._mount_dir = autotemp.tempdir(unique_id='fsImage')
- try:
- logging.debug('Mounting image file "%s" (%s) to directory "%s"',
- self._image_file.name, self._loop_device,
- self._mount_dir.name)
- utils.run('mount -t %s %s %s %s' %
- (self._mount_filesystem_type, options_arg,
- self._loop_device, self._mount_dir.name))
- except error.CmdError as exc:
- self._remove_mount_dir()
- message = ('Failed to mount virtual filesystem image "%s": %s' %
- (self._image_file.name, exc))
- raise RuntimeError(message)
- return self._mount_dir.name
-
- def unmount(self):
- """Unmounts the image file from the mounted directory."""
- if not self._mount_dir:
- return
-
- try:
- logging.debug('Unmounting image file "%s" (%s) from directory "%s"',
- self._image_file.name, self._loop_device,
- self._mount_dir.name)
- utils.run('umount %s' % self._mount_dir.name)
- except error.CmdError as exc:
- message = ('Failed to unmount virtual filesystem image "%s": %s' %
- (self._image_file.name, exc))
- raise RuntimeError(message)
- finally:
- self._remove_mount_dir()
-
- def get_volume_label(self):
- """Gets volume name information of |self._loop_device|
-
- @return a string with volume name if it exists.
- """
- # This script is run as root in a normal autotest run,
- # so this works: It doesn't have access to the necessary info
- # when run as a non-privileged user
- cmd = "blkid -c /dev/null -o udev %s" % self._loop_device
- output = utils.system_output(cmd, ignore_status=True)
-
- for line in output.splitlines():
- udev_key, udev_val = line.split('=')
-
- if udev_key == 'ID_FS_LABEL':
- return udev_val
-
- return None
diff --git a/client/cros/cryptohome.py b/client/cros/cryptohome.py
deleted file mode 100644
index dce1f72..0000000
--- a/client/cros/cryptohome.py
+++ /dev/null
@@ -1,786 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import dbus, gobject, logging, os, random, re, shutil, string, sys, time
-from dbus.mainloop.glib import DBusGMainLoop
-from six.moves import map
-
-import common
-
-from autotest_lib.client.cros import constants
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.cros.tpm import *
-from autotest_lib.client.cros.cros_disks import DBusClient
-
-ATTESTATION_CMD = '/usr/bin/attestation_client'
-CRYPTOHOME_CMD = '/usr/sbin/cryptohome'
-TPM_MANAGER_CMD = '/usr/bin/tpm_manager_client'
-GUEST_USER_NAME = '$guest'
-UNAVAILABLE_ACTION = 'Unknown action or no action given.'
-MOUNT_RETRY_COUNT = 20
-TEMP_MOUNT_PATTERN = '/home/.shadow/%s/temporary_mount'
-VAULT_PATH_PATTERN = '/home/.shadow/%s/vault'
-
-DBUS_PROTOS_DEP = 'dbus_protos'
-
-
-def get_user_hash(user):
- """Get the user hash for the given user."""
- return utils.system_output(['cryptohome', '--action=obfuscate_user',
- '--user=%s' % user])
-
-
-def user_path(user):
- """Get the user mount point for the given user."""
- return utils.system_output(['cryptohome-path', 'user', user])
-
-
-def system_path(user):
- """Get the system mount point for the given user."""
- return utils.system_output(['cryptohome-path', 'system', user])
-
-
-def temporary_mount_path(user):
- """Get the vault mount path used during crypto-migration for the user.
-
- @param user: user the temporary mount should be for
- """
- return TEMP_MOUNT_PATTERN % (get_user_hash(user))
-
-
-def vault_path(user):
- """ Get the vault path for the given user.
-
- @param user: The user who's vault path should be returned.
- """
- return VAULT_PATH_PATTERN % (get_user_hash(user))
-
-
-def ensure_clean_cryptohome_for(user, password=None):
- """Ensure a fresh cryptohome exists for user.
-
- @param user: user who needs a shiny new cryptohome.
- @param password: if unset, a random password will be used.
- """
- if not password:
- password = ''.join(random.sample(string.ascii_lowercase, 6))
- unmount_vault(user)
- remove_vault(user)
- mount_vault(user, password, create=True)
-
-
-def get_tpm_password():
- """Get the TPM password.
-
- Returns:
- A TPM password
- """
- out = run_cmd(TPM_MANAGER_CMD + ' status')
- match = re.search('owner_password: (\w*)', out)
- password = ''
- if match:
- hex_pass = match.group(1)
- password = ''.join(
- chr(int(hex_pass[i:i + 2], 16))
- for i in range(0, len(hex_pass), 2))
- return password
-
-
-def get_fwmp(cleared_fwmp=False):
- """Get the firmware management parameters.
-
- Args:
- cleared_fwmp: True if the space should not exist.
-
- Returns:
- The dictionary with the FWMP contents, for example:
- { 'flags': 0xbb41,
- 'developer_key_hash':
- "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
- 000\000\000\000\000\000\000\000\000\000\000",
- }
- or a dictionary with the Error if the FWMP doesn't exist and
- cleared_fwmp is True
- { 'error': 'CRYPTOHOME_ERROR_FIRMWARE_MANAGEMENT_PARAMETERS_INVALID' }
-
- Raises:
- ChromiumOSError if any expected field is not found in the cryptohome
- output. This would typically happen when FWMP state does not match
- 'clreared_fwmp'
- """
- out = run_cmd(CRYPTOHOME_CMD +
- ' --action=get_firmware_management_parameters')
-
- if cleared_fwmp:
- fields = ['error']
- else:
- fields = ['flags', 'developer_key_hash']
-
- status = {}
- for field in fields:
- match = re.search('%s: (\S+)\n' % field, out)
- if not match:
- raise ChromiumOSError('Invalid FWMP field %s: "%s".' %
- (field, out))
- status[field] = match.group(1)
- return status
-
-
-def set_fwmp(flags, developer_key_hash=None):
- """Set the firmware management parameter contents.
-
- Args:
- developer_key_hash: a string with the developer key hash
-
- Raises:
- ChromiumOSError cryptohome cannot set the FWMP contents
- """
- cmd = (CRYPTOHOME_CMD +
- ' --action=set_firmware_management_parameters '
- '--flags=' + flags)
- if developer_key_hash:
- cmd += ' --developer_key_hash=' + developer_key_hash
-
- out = run_cmd(cmd)
- if 'SetFirmwareManagementParameters success' not in out:
- raise ChromiumOSError('failed to set FWMP: %s' % out)
-
-
-def is_tpm_lockout_in_effect():
- """Returns true if the TPM lockout is in effect; false otherwise."""
- status = get_tpm_da_info()
- return status.get('dictionary_attack_lockout_in_effect', None)
-
-
-def get_login_status():
- """Query the login status
-
- Returns:
- A login status dictionary containing:
- { 'owner_user_exists': True|False }
- """
- out = run_cmd(CRYPTOHOME_CMD + ' --action=get_login_status')
- status = {}
- for field in ['owner_user_exists']:
- match = re.search('%s: (true|false)' % field, out)
- if not match:
- raise ChromiumOSError('Invalid login status: "%s".' % out)
- status[field] = match.group(1) == 'true'
- return status
-
-
-def get_install_attribute_status():
- """Query the install attribute status
-
- Returns:
- A status string, which could be:
- "UNKNOWN"
- "TPM_NOT_OWNED"
- "FIRST_INSTALL"
- "VALID"
- "INVALID"
- """
- out = run_cmd(CRYPTOHOME_CMD + ' --action=install_attributes_get_status')
- return out.strip()
-
-
-def get_tpm_attestation_status():
- """Get the TPM attestation status. Works similar to get_tpm_status().
- """
- out = run_cmd(ATTESTATION_CMD + ' status')
- status = {}
- for field in ['prepared_for_enrollment', 'enrolled']:
- match = re.search('%s: (true|false)' % field, out)
- if not match:
- raise ChromiumOSError('Invalid attestation status: "%s".' % out)
- status[field] = match.group(1) == 'true'
- return status
-
-
-def take_tpm_ownership(wait_for_ownership=True):
- """Take TPM owernship.
-
- Args:
- wait_for_ownership: block until TPM is owned if true
- """
- run_cmd(CRYPTOHOME_CMD + ' --action=tpm_take_ownership')
- if wait_for_ownership:
- # Note that waiting for the 'Ready' flag is more correct than waiting
- # for the 'Owned' flag, as the latter is set by cryptohomed before some
- # of the ownership tasks are completed.
- utils.poll_for_condition(
- lambda: get_tpm_status()['Ready'],
- timeout=300,
- exception=error.TestError('Timeout waiting for TPM ownership'))
-
-
-def verify_ek():
- """Verify the TPM endorsement key.
-
- Returns true if EK is valid.
- """
- cmd = CRYPTOHOME_CMD + ' --action=tpm_verify_ek'
- return (utils.system(cmd, ignore_status=True) == 0)
-
-
-def remove_vault(user):
- """Remove the given user's vault from the shadow directory."""
- logging.debug('user is %s', user)
- user_hash = get_user_hash(user)
- logging.debug('Removing vault for user %s with hash %s', user, user_hash)
- cmd = CRYPTOHOME_CMD + ' --action=remove --force --user=%s' % user
- run_cmd(cmd)
- # Ensure that the vault does not exist.
- if os.path.exists(os.path.join(constants.SHADOW_ROOT, user_hash)):
- raise ChromiumOSError('Cryptohome could not remove the user\'s vault.')
-
-
-def remove_all_vaults():
- """Remove any existing vaults from the shadow directory.
-
- This function must be run with root privileges.
- """
- for item in os.listdir(constants.SHADOW_ROOT):
- abs_item = os.path.join(constants.SHADOW_ROOT, item)
- if os.path.isdir(os.path.join(abs_item, 'vault')):
- logging.debug('Removing vault for user with hash %s', item)
- shutil.rmtree(abs_item)
-
-
-def mount_vault(user, password, create=False, key_label=None):
- """Mount the given user's vault. Mounts should be created by calling this
- function with create=True, and can be used afterwards with create=False.
- Only try to mount existing vaults created with this function.
-
- """
- args = [CRYPTOHOME_CMD, '--action=mount_ex', '--user=%s' % user,
- '--password=%s' % password, '--async']
- if create:
- args += ['--create']
- if key_label is None:
- key_label = 'bar'
- if key_label is not None:
- args += ['--key_label=%s' % key_label]
- logging.info(run_cmd(' '.join(args)))
- # Ensure that the vault exists in the shadow directory.
- user_hash = get_user_hash(user)
- if not os.path.exists(os.path.join(constants.SHADOW_ROOT, user_hash)):
- retry = 0
- mounted = False
- while retry < MOUNT_RETRY_COUNT and not mounted:
- time.sleep(1)
- logging.info("Retry %s", str(retry + 1))
- run_cmd(' '.join(args))
- # TODO: Remove this additional call to get_user_hash(user) when
- # crbug.com/690994 is fixed
- user_hash = get_user_hash(user)
- if os.path.exists(os.path.join(constants.SHADOW_ROOT, user_hash)):
- mounted = True
- retry += 1
- if not mounted:
- raise ChromiumOSError('Cryptohome vault not found after mount.')
- # Ensure that the vault is mounted.
- if not is_permanent_vault_mounted(user=user, allow_fail=True):
- raise ChromiumOSError('Cryptohome created a vault but did not mount.')
-
-
-def mount_guest():
- """Mount the guest vault."""
- args = [CRYPTOHOME_CMD, '--action=mount_guest_ex']
- logging.info(run_cmd(' '.join(args)))
- # Ensure that the guest vault is mounted.
- if not is_guest_vault_mounted(allow_fail=True):
- raise ChromiumOSError('Cryptohome did not mount guest vault.')
-
-
-def test_auth(user, password):
- """Test key auth."""
- cmd = [CRYPTOHOME_CMD, '--action=check_key_ex', '--user=%s' % user,
- '--password=%s' % password, '--async']
- out = run_cmd(' '.join(cmd))
- logging.info(out)
- return 'Key authenticated.' in out
-
-
-def add_le_key(user, password, new_password, new_key_label):
- """Add low entropy key."""
- args = [CRYPTOHOME_CMD, '--action=add_key_ex', '--key_policy=le',
- '--user=%s' % user, '--password=%s' % password,
- '--new_key_label=%s' % new_key_label,
- '--new_password=%s' % new_password]
- logging.info(run_cmd(' '.join(args)))
-
-
-def remove_key(user, password, remove_key_label):
- """Remove a key."""
- args = [CRYPTOHOME_CMD, '--action=remove_key_ex', '--user=%s' % user,
- '--password=%s' % password,
- '--remove_key_label=%s' % remove_key_label]
- logging.info(run_cmd(' '.join(args)))
-
-
-def get_supported_key_policies():
- """Get supported key policies."""
- args = [CRYPTOHOME_CMD, '--action=get_supported_key_policies']
- out = run_cmd(' '.join(args))
- logging.info(out)
- policies = {}
- for line in out.splitlines():
- match = re.search(' ([^:]+): (true|false)', line)
- if match:
- policies[match.group(1)] = match.group(2) == 'true'
- return policies
-
-
-def unmount_vault(user=None):
- """Unmount the given user's vault.
-
- Once unmounting for a specific user is supported, the user parameter will
- name the target user. See crosbug.com/20778.
- """
- run_cmd(CRYPTOHOME_CMD + ' --action=unmount')
- # Ensure that the vault is not mounted.
- if user is not None and is_vault_mounted(user, allow_fail=True):
- raise ChromiumOSError('Cryptohome did not unmount the user.')
-
-
-def __get_mount_info(mount_point, allow_fail=False):
- """Get information about the active mount at a given mount point."""
- cryptohomed_path = '/proc/$(pgrep cryptohomed)/mounts'
- # 'cryptohome-namespace-mounter' is currently only used for Guest sessions.
- mounter_exe = 'cryptohome-namespace-mounter'
- mounter_pid = 'pgrep -o -f %s' % mounter_exe
- mounter_path = '/proc/$(%s)/mounts' % mounter_pid
-
- status = utils.system(mounter_pid, ignore_status=True)
- # Only check for these mounts if the mounter executable is running.
- if status == 0:
- try:
- logging.debug('Active %s mounts:\n' % mounter_exe +
- utils.system_output('cat %s' % mounter_path))
- ns_mount_line = utils.system_output(
- 'grep %s %s' % (mount_point, mounter_path),
- ignore_status=allow_fail)
- except Exception as e:
- logging.error(e)
- raise ChromiumOSError('Could not get info about cryptohome vault '
- 'through %s. See logs for complete '
- 'mount-point.'
- % os.path.dirname(str(mount_point)))
- return ns_mount_line.split()
-
- try:
- logging.debug('Active cryptohome mounts:\n%s',
- utils.system_output('cat %s' % cryptohomed_path))
- mount_line = utils.system_output(
- 'grep %s %s' % (mount_point, cryptohomed_path),
- ignore_status=allow_fail)
- except Exception as e:
- logging.error(e)
- raise ChromiumOSError('Could not get info about cryptohome vault '
- 'through %s. See logs for complete mount-point.'
- % os.path.dirname(str(mount_point)))
- return mount_line.split()
-
-
-def __get_user_mount_info(user, allow_fail=False):
- """Get information about the active mounts for a given user.
-
- Returns the active mounts at the user's user and system mount points. If no
- user is given, the active mount at the shared mount point is returned
- (regular users have a bind-mount at this mount point for backwards
- compatibility; the guest user has a mount at this mount point only).
- """
- return [__get_mount_info(mount_point=user_path(user),
- allow_fail=allow_fail),
- __get_mount_info(mount_point=system_path(user),
- allow_fail=allow_fail)]
-
-def is_vault_mounted(user, regexes=None, allow_fail=False):
- """Check whether a vault is mounted for the given user.
-
- user: If no user is given, the shared mount point is checked, determining
- whether a vault is mounted for any user.
- regexes: dictionary of regexes to matches against the mount information.
- The mount filesystem for the user's user and system mounts point must
- match one of the keys.
- The mount source point must match the selected device regex.
-
- In addition, if mounted over ext4, we check the directory is encrypted.
- """
- if regexes is None:
- regexes = {
- constants.CRYPTOHOME_FS_REGEX_ANY :
- constants.CRYPTOHOME_DEV_REGEX_ANY
- }
- user_mount_info = __get_user_mount_info(user=user, allow_fail=allow_fail)
- for mount_info in user_mount_info:
- # Look at each /proc/../mount lines that match mount point for a given
- # user user/system mount (/home/user/.... /home/root/...)
-
- # We should have at least 3 arguments (source, mount, type of mount)
- if len(mount_info) < 3:
- return False
-
- device_regex = None
- for fs_regex in regexes.keys():
- if re.match(fs_regex, mount_info[2]):
- device_regex = regexes[fs_regex]
- break
-
- if not device_regex:
- # The third argument in not the expected mount point type.
- return False
-
- # Check if the mount source match the device regex: it can be loose,
- # (anything) or stricter if we expect guest filesystem.
- if not re.match(device_regex, mount_info[0]):
- return False
-
- return True
-
-
-def is_guest_vault_mounted(allow_fail=False):
- """Check whether a vault is mounted for the guest user.
- It should be a mount of an ext4 partition on a loop device
- or be backed by tmpfs.
- """
- return is_vault_mounted(
- user=GUEST_USER_NAME,
- regexes={
- # Remove tmpfs support when it becomes unnecessary as all guest
- # modes will use ext4 on a loop device.
- constants.CRYPTOHOME_FS_REGEX_EXT4 :
- constants.CRYPTOHOME_DEV_REGEX_LOOP_DEVICE,
- constants.CRYPTOHOME_FS_REGEX_TMPFS :
- constants.CRYPTOHOME_DEV_REGEX_GUEST,
- },
- allow_fail=allow_fail)
-
-def is_permanent_vault_mounted(user, allow_fail=False):
- """Check if user is mounted over ecryptfs or ext4 crypto. """
- return is_vault_mounted(
- user=user,
- regexes={
- constants.CRYPTOHOME_FS_REGEX_ECRYPTFS :
- constants.CRYPTOHOME_DEV_REGEX_REGULAR_USER_SHADOW,
- constants.CRYPTOHOME_FS_REGEX_EXT4 :
- constants.CRYPTOHOME_DEV_REGEX_REGULAR_USER_DEVICE,
- },
- allow_fail=allow_fail)
-
-def get_mounted_vault_path(user, allow_fail=False):
- """Get the path where the decrypted data for the user is located."""
- return os.path.join(constants.SHADOW_ROOT, get_user_hash(user), 'mount')
-
-
-def canonicalize(credential):
- """Perform basic canonicalization of |email_address|.
-
- Perform basic canonicalization of |email_address|, taking into account that
- gmail does not consider '.' or caps inside a username to matter. It also
- ignores everything after a '+'. For example,
- c.masone+abc@gmail.com == cMaSone@gmail.com, per
- http://mail.google.com/support/bin/answer.py?hl=en&ctx=mail&answer=10313
- """
- if not credential:
- return None
-
- parts = credential.split('@')
- if len(parts) != 2:
- raise error.TestError('Malformed email: ' + credential)
-
- (name, domain) = parts
- name = name.partition('+')[0]
- if (domain == constants.SPECIAL_CASE_DOMAIN):
- name = name.replace('.', '')
- return '@'.join([name, domain]).lower()
-
-
-def crash_cryptohomed():
- """Let cryptohome crash."""
- # Try to kill cryptohomed so we get something to work with.
- pid = run_cmd('pgrep cryptohomed')
- try:
- pid = int(pid)
- except ValueError as e: # empty or invalid string
- raise error.TestError('Cryptohomed was not running')
- utils.system('kill -ABRT %d' % pid)
- # CONT just in case cryptohomed had a spurious STOP.
- utils.system('kill -CONT %d' % pid)
- utils.poll_for_condition(
- lambda: utils.system('ps -p %d' % pid,
- ignore_status=True) != 0,
- timeout=180,
- exception=error.TestError(
- 'Timeout waiting for cryptohomed to coredump'))
-
-
-def create_ecryptfs_homedir(user, password):
- """Creates a new home directory as ecryptfs.
-
- If a home directory for the user exists already, it will be removed.
- The resulting home directory will be mounted.
-
- @param user: Username to create the home directory for.
- @param password: Password to use when creating the home directory.
- """
- unmount_vault(user)
- remove_vault(user)
- args = [
- CRYPTOHOME_CMD,
- '--action=mount_ex',
- '--user=%s' % user,
- '--password=%s' % password,
- '--key_label=foo',
- '--ecryptfs',
- '--create']
- logging.info(run_cmd(' '.join(args)))
- if not is_vault_mounted(user, regexes={
- constants.CRYPTOHOME_FS_REGEX_ECRYPTFS :
- constants.CRYPTOHOME_DEV_REGEX_REGULAR_USER_SHADOW
- }, allow_fail=True):
- raise ChromiumOSError('Ecryptfs home could not be created')
-
-
-def do_dircrypto_migration(user, password, timeout=600):
- """Start dircrypto migration for the user.
-
- @param user: The user to migrate.
- @param password: The password used to mount the users vault
- @param timeout: How long in seconds to wait for the migration to finish
- before failing.
- """
- unmount_vault(user)
- args = [
- CRYPTOHOME_CMD,
- '--action=mount_ex',
- '--to_migrate_from_ecryptfs',
- '--user=%s' % user,
- '--password=%s' % password]
- logging.info(run_cmd(' '.join(args)))
- if not __get_mount_info(temporary_mount_path(user), allow_fail=True):
- raise ChromiumOSError('Failed to mount home for migration')
- args = [CRYPTOHOME_CMD, '--action=migrate_to_dircrypto', '--user=%s' % user]
- logging.info(run_cmd(' '.join(args)))
- utils.poll_for_condition(
- lambda: not __get_mount_info(
- temporary_mount_path(user), allow_fail=True),
- timeout=timeout,
- exception=error.TestError(
- 'Timeout waiting for dircrypto migration to finish'))
-
-
-def change_password(user, password, new_password):
- """Change user password."""
- args = [
- CRYPTOHOME_CMD,
- '--action=migrate_key_ex',
- '--user=%s' % user,
- '--old_password=%s' % password,
- '--password=%s' % new_password]
- out = run_cmd(' '.join(args))
- logging.info(out)
- if 'Key migration succeeded.' not in out:
- raise ChromiumOSError('Key migration failed.')
-
-
-class CryptohomeProxy(DBusClient):
- """A DBus proxy client for testing the Cryptohome DBus server.
- """
- CRYPTOHOME_BUS_NAME = 'org.chromium.Cryptohome'
- CRYPTOHOME_OBJECT_PATH = '/org/chromium/Cryptohome'
- CRYPTOHOME_INTERFACE = 'org.chromium.CryptohomeInterface'
- ASYNC_CALL_STATUS_SIGNAL = 'AsyncCallStatus'
- ASYNC_CALL_STATUS_SIGNAL_ARGUMENTS = (
- 'async_id', 'return_status', 'return_code'
- )
- DBUS_PROPERTIES_INTERFACE = 'org.freedesktop.DBus.Properties'
-
- # Default timeout in seconds for the D-Bus connection.
- DEFAULT_DBUS_TIMEOUT = 30
-
- def __init__(self, bus_loop=None, autodir=None, job=None,
- timeout=DEFAULT_DBUS_TIMEOUT):
- if autodir and job:
- # Install D-Bus protos necessary for some methods.
- dep_dir = os.path.join(autodir, 'deps', DBUS_PROTOS_DEP)
- job.install_pkg(DBUS_PROTOS_DEP, 'dep', dep_dir)
- sys.path.append(dep_dir)
-
- # Set up D-Bus main loop and interface.
- self.main_loop = gobject.MainLoop()
- if bus_loop is None:
- bus_loop = DBusGMainLoop(set_as_default=True)
- self.bus = dbus.SystemBus(mainloop=bus_loop)
- super(CryptohomeProxy, self).__init__(self.main_loop, self.bus,
- self.CRYPTOHOME_BUS_NAME,
- self.CRYPTOHOME_OBJECT_PATH,
- timeout)
- self.iface = dbus.Interface(self.proxy_object,
- self.CRYPTOHOME_INTERFACE)
- self.properties = dbus.Interface(self.proxy_object,
- self.DBUS_PROPERTIES_INTERFACE)
- self.handle_signal(self.CRYPTOHOME_INTERFACE,
- self.ASYNC_CALL_STATUS_SIGNAL,
- self.ASYNC_CALL_STATUS_SIGNAL_ARGUMENTS)
-
-
- # Wrap all proxied calls to catch cryptohomed failures.
- def __call(self, method, *args):
- try:
- return method(*args, timeout=180)
- except dbus.exceptions.DBusException as e:
- if e.get_dbus_name() == 'org.freedesktop.DBus.Error.NoReply':
- logging.error('Cryptohome is not responding. Sending ABRT')
- crash_cryptohomed()
- raise ChromiumOSError('cryptohomed aborted. Check crashes!')
- raise e
-
-
- def __wait_for_specific_signal(self, signal, data):
- """Wait for the |signal| with matching |data|
- Returns the resulting dict on success or {} on error.
- """
- # Do not bubble up the timeout here, just return {}.
- result = {}
- try:
- result = self.wait_for_signal(signal)
- except utils.TimeoutError:
- return {}
- for k in data.keys():
- if k not in result or result[k] != data[k]:
- return {}
- return result
-
-
- # Perform a data-less async call.
- # TODO(wad) Add __async_data_call.
- def __async_call(self, method, *args):
- # Clear out any superfluous async call signals.
- self.clear_signal_content(self.ASYNC_CALL_STATUS_SIGNAL)
- out = self.__call(method, *args)
- logging.debug('Issued call ' + str(method) +
- ' with async_id ' + str(out))
- result = {}
- try:
- # __wait_for_specific_signal has a 10s timeout
- result = utils.poll_for_condition(
- lambda: self.__wait_for_specific_signal(
- self.ASYNC_CALL_STATUS_SIGNAL, {'async_id' : out}),
- timeout=180,
- desc='matching %s signal' % self.ASYNC_CALL_STATUS_SIGNAL)
- except utils.TimeoutError as e:
- logging.error('Cryptohome timed out. Sending ABRT.')
- crash_cryptohomed()
- raise ChromiumOSError('cryptohomed aborted. Check crashes!')
- return result
-
-
- def mount(self, user, password, create=False, key_label='bar'):
- """Mounts a cryptohome.
-
- Returns True if the mount succeeds or False otherwise.
- """
- import rpc_pb2
-
- acc = rpc_pb2.AccountIdentifier()
- acc.account_id = user
-
- auth = rpc_pb2.AuthorizationRequest()
- auth.key.secret = password
- auth.key.data.label = key_label
-
- mount_req = rpc_pb2.MountRequest()
- if create:
- mount_req.create.copy_authorization_key = True
-
- out = self.__call(self.iface.MountEx, acc.SerializeToString(),
- auth.SerializeToString(), mount_req.SerializeToString())
- parsed_out = rpc_pb2.BaseReply()
- parsed_out.ParseFromString(''.join(map(chr, out)))
- return parsed_out.error == rpc_pb2.CRYPTOHOME_ERROR_NOT_SET
-
-
- def unmount(self, user):
- """Unmounts a cryptohome.
-
- Returns True if the unmount suceeds or false otherwise.
- """
- import rpc_pb2
-
- req = rpc_pb2.UnmountRequest()
-
- out = self.__call(self.iface.UnmountEx, req.SerializeToString())
- parsed_out = rpc_pb2.BaseReply()
- parsed_out.ParseFromString(''.join(map(chr, out)))
- return parsed_out.error == rpc_pb2.CRYPTOHOME_ERROR_NOT_SET
-
-
- def is_mounted(self, user):
- """Tests whether a user's cryptohome is mounted."""
- return (utils.is_mountpoint(user_path(user))
- and utils.is_mountpoint(system_path(user)))
-
-
- def require_mounted(self, user):
- """Raises a test failure if a user's cryptohome is not mounted."""
- utils.require_mountpoint(user_path(user))
- utils.require_mountpoint(system_path(user))
-
-
- def remove(self, user):
- """Removes a users cryptohome.
-
- Returns True if the operation succeeds or False otherwise.
- """
- import rpc_pb2
-
- acc = rpc_pb2.AccountIdentifier()
- acc.account_id = user
-
- out = self.__call(self.iface.RemoveEx, acc.SerializeToString())
- parsed_out = rpc_pb2.BaseReply()
- parsed_out.ParseFromString(''.join(map(chr, out)))
- return parsed_out.error == rpc_pb2.CRYPTOHOME_ERROR_NOT_SET
-
-
- def ensure_clean_cryptohome_for(self, user, password=None):
- """Ensure a fresh cryptohome exists for user.
-
- @param user: user who needs a shiny new cryptohome.
- @param password: if unset, a random password will be used.
- """
- if not password:
- password = ''.join(random.sample(string.ascii_lowercase, 6))
- self.remove(user)
- self.mount(user, password, create=True)
-
- def lock_install_attributes(self, attrs):
- """Set and lock install attributes for the device.
-
- @param attrs: dict of install attributes.
- """
- take_tpm_ownership()
- self.wait_for_install_attributes_ready()
- for key, value in attrs.items():
- if not self.__call(self.iface.InstallAttributesSet, key,
- dbus.ByteArray(value + '\0')):
- return False
- return self.__call(self.iface.InstallAttributesFinalize)
-
- def wait_for_install_attributes_ready(self):
- """Wait until install attributes are ready.
- """
- utils.poll_for_condition(
- lambda: self.__call(self.iface.InstallAttributesIsReady),
- timeout=300,
- exception=error.TestError(
- 'Timeout waiting for install attributes are ready'))
diff --git a/client/cros/cups.py b/client/cros/cups.py
deleted file mode 100644
index d7cf0c9..0000000
--- a/client/cros/cups.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib import utils as sys_utils
-from autotest_lib.client.cros import upstart
-"""Provides utility methods for CUPS."""
-
-
-def has_cups_upstart():
- """Returns True if cups is installed under upstart."""
- return upstart.has_service('cupsd')
-
-
-def has_cups_systemd():
- """Returns True if cups is running under systemd.
-
- Attempts to start cups if it is not already running.
- """
- return sys_utils.has_systemd() and (
- (sys_utils.get_service_pid('cups') != 0) or
- (sys_utils.start_service('cups', ignore_status=True) == 0))
-
-
-def has_cups_or_die():
- """Checks if the cups dameon is installed. Raises TestNAError if it is not.
-
- TestNA skips the test.
- """
- if not (has_cups_upstart() or has_cups_systemd()):
- raise error.TestNAError('No cupsd service found')
diff --git a/client/cros/flimflam_test_path.py b/client/cros/flimflam_test_path.py
deleted file mode 100644
index a81b9aa..0000000
--- a/client/cros/flimflam_test_path.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os, sys
-
-import constants
-
-sys.path.append(os.environ.get("SYSROOT", "/usr/local/") +
- constants.FLIMFLAM_TEST_PATH)
diff --git a/client/cros/gpio.py b/client/cros/gpio.py
deleted file mode 100644
index b838516..0000000
--- a/client/cros/gpio.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/env python2
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-'''Chrome OS device GPIO library
-
-This module provides a convenient way to detect, setup, and access to GPIO
-values on a Chrome OS compatible device.
-
-See help(Gpio) for more information.
-'''
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import os, shutil, sys, tempfile
-
-
-class Gpio(object):
- '''
- Utility to access GPIO values.
-
- Usage:
- gpio = Gpio()
- try:
- gpio.setup()
- print gpio.read(gpio.DEVELOPER_SWITCH_CURRENT)
- except:
- print "gpio failed"
- '''
-
- # GPIO property names (by "crossystem"):
- DEVELOPER_SWITCH_CURRENT = 'devsw_cur'
- RECOVERY_BUTTON_CURRENT = 'recoverysw_cur'
- WRITE_PROTECT_CURRENT = 'wpsw_cur'
-
- DEVELOPER_SWITCH_BOOT = 'devsw_boot'
- RECOVERY_BUTTON_BOOT = 'recoverysw_boot'
-
- def __init__(self, exception_type=IOError):
- self._exception_type = exception_type
-
- # list of property conversions, usually str2int.
- self._override_map = {
- self.DEVELOPER_SWITCH_CURRENT: int,
- self.DEVELOPER_SWITCH_BOOT: int,
- self.RECOVERY_BUTTON_CURRENT: int,
- self.RECOVERY_BUTTON_BOOT: int,
- self.WRITE_PROTECT_CURRENT: int,
- }
-
- # list of legacy (chromeos_acpi) property names.
- self._legacy_map = {
- 'developer_switch': self.DEVELOPER_SWITCH_CURRENT,
- 'recovery_button': self.RECOVERY_BUTTON_CURRENT,
- 'write_protect': self.WRITE_PROTECT_CURRENT,
- }
-
- def setup(self):
- '''Configures system for processing GPIO.
-
- Returns:
- Raises an exception if gpio_setup execution failed.
- '''
- # This is the place to do any configuration / system detection.
- # Currently "crossystem" handles everything so we don't need to do
- # anything now.
- pass
-
- def read(self, name):
- '''Reads a GPIO property value.
- Check "crossystem" command for the list of available property names.
-
- Parameters:
- name: the name of property to read.
-
- Returns: current value, or raise exceptions.
- '''
- debug_title = "Gpio.read('%s'): " % name
-
- # convert legacy names
- if name in self._legacy_map:
- name = self._legacy_map[name]
-
- temp_fd, temp_file = tempfile.mkstemp()
- os.close(temp_fd)
- command = "crossystem %s 2>%s" % (name, temp_file)
- pipe = os.popen(command, 'r')
- value = pipe.read()
- exit_status = pipe.close()
- if exit_status:
- with open(temp_file, 'r') as temp_handle:
- debug_info = temp_handle.read()
- value = value.strip()
- debug_info = debug_info.strip()
- if value:
- debug_info = value + '\n' + debug_info
- if debug_info:
- debug_info = '\nInformation: ' + debug_info
- raise self._exception_type(
- debug_title + "Command failed (%d): %s%s" %
- (exit_status, command, debug_info))
- # convert values
- if name in self._override_map:
- try:
- value = self._override_map[name](value)
- except:
- raise self._exception_type(debug_title +
- 'Conversion failed: %s' % value)
- return value
-
-
-def main():
- gpio = Gpio()
- try:
- gpio.setup()
- print("developer switch current status: %s" %
- gpio.read(gpio.DEVELOPER_SWITCH_CURRENT))
- except Exception as e:
- print("GPIO failed. %s" % e)
- sys.exit(1)
-
-if __name__ == '__main__':
- main()
diff --git a/client/cros/http_speed.py b/client/cros/http_speed.py
deleted file mode 100644
index a283d6b..0000000
--- a/client/cros/http_speed.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""A tool to measure single-stream link bandwidth using HTTP connections."""
-
-import logging, random, time
-from six.moves import urllib
-
-import numpy.random
-
-TIMEOUT = 90
-
-
-class Error(Exception):
- pass
-
-
-def TimeTransfer(url, data):
- """Transfers data to/from url. Returns (time, url contents)."""
- start_time = time.time()
- result = urllib.request.urlopen(url, data=data, timeout=TIMEOUT)
- got = result.read()
- transfer_time = time.time() - start_time
- if transfer_time <= 0:
- raise Error("Transfer of %s bytes took nonsensical time %s"
- % (url, transfer_time))
- return (transfer_time, got)
-
-
-def TimeTransferDown(url_pattern, size):
- url = url_pattern % {'size': size}
- (transfer_time, got) = TimeTransfer(url, data=None)
- if len(got) != size:
- raise Error('Got %d bytes, expected %d' % (len(got), size))
- return transfer_time
-
-
-def TimeTransferUp(url, size):
- """If size > 0, POST size bytes to URL, else GET url. Return time taken."""
- data = numpy.random.bytes(size)
- (transfer_time, _) = TimeTransfer(url, data)
- return transfer_time
-
-
-def BenchmarkOneDirection(latency, label, url, benchmark_function):
- """Transfer a reasonable amount of data and record the speed.
-
- Args:
- latency: Time for a 1-byte transfer
- label: Label to add to perf keyvals
- url: URL (or pattern) to transfer at
- benchmark_function: Function to perform actual transfer
- Returns:
- Key-value dictionary, suitable for reporting to write_perf_keyval.
- """
-
- size = 1 << 15 # Start with a small download
- maximum_size = 1 << 24 # Go large, if necessary
- multiple = 1
-
- remaining = 2
- transfer_time = 0
-
- # Long enough that startup latency shouldn't dominate.
- target = max(20 * latency, 10)
- logging.info('Target time: %s' % target)
-
- while remaining > 0:
- size = min(int(size * multiple), maximum_size)
- transfer_time = benchmark_function(url, size)
- logging.info('Transfer of %s took %s (%s b/s)'
- % (size, transfer_time, 8 * size / transfer_time))
- if transfer_time >= target:
- break
- remaining -= 1
-
- # Take the latency into account when guessing a size for a
- # larger transfer. This is a pretty simple model, but it
- # appears to work.
- adjusted_transfer_time = max(transfer_time - latency, 0.01)
- multiple = target / adjusted_transfer_time
-
- if remaining == 0:
- logging.warning(
- 'Max size transfer still took less than minimum desired time %s'
- % target)
-
- return {'seconds_%s_fetch_time' % label: transfer_time,
- 'bytes_%s_bytes_transferred' % label: size,
- 'bits_second_%s_speed' % label: 8 * size / transfer_time,
- }
-
-
-def HttpSpeed(download_url_format_string,
- upload_url):
- """Measures upload and download performance to the supplied URLs.
-
- Args:
- download_url_format_string: URL pattern with %(size) for payload bytes
- upload_url: URL that accepts large POSTs
- Returns:
- A dict of perf_keyval
- """
- # We want the download to be substantially longer than the
- # one-byte fetch time that we can isolate bandwidth instead of
- # latency.
- latency = TimeTransferDown(download_url_format_string, 1)
-
- logging.info('Latency is %s' % latency)
-
- down = BenchmarkOneDirection(
- latency,
- 'downlink',
- download_url_format_string,
- TimeTransferDown)
-
- up = BenchmarkOneDirection(
- latency,
- 'uplink',
- upload_url,
- TimeTransferUp)
-
- up.update(down)
- return up
diff --git a/client/cros/kernel_trace.py b/client/cros/kernel_trace.py
deleted file mode 100644
index 264cffc..0000000
--- a/client/cros/kernel_trace.py
+++ /dev/null
@@ -1,195 +0,0 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging, os, re
-
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib import error
-
-class KernelTrace(object):
- """Allows access and control to Kernel tracing facilities.
-
- Example code snippet:
- trace = KernelTrace(events=['mali_dvfs:mali_dvfs_set_clock'])
- results = trace.read(regexp=r'frequency=(\d+)')
-
- Public methods:
- on : Enables tracing
- off : Disables tracing
- is_tracing : Returns Boolean of tracing status.
- event_on : Turns event on. Returns boolean of success
- event_off : Turns event off. Returns boolean of success
- flush : Flushes trace buffer
- read : Reads trace buffer returns list of
- - tuples if regexp provided
- - else matching string
- uptime_secs : Returns float of current uptime.
-
- Private functions:
- _onoff : Disable/enable tracing
- _onoff_event : Disable/enable events
-
- Private attributes:
- _buffer : list to hold parsed results from trace buffer
- _buffer_ptr : integer pointing to last byte read
-
- TODO(tbroch): List of potential enhancements
- - currently only supports trace events. Add other tracers.
- """
- _TRACE_ROOT = '/sys/kernel/debug/tracing'
- _TRACE_ON_PATH = os.path.join(_TRACE_ROOT, 'tracing_on')
-
- def __init__(self, flush=True, events=None, on=True):
- """Constructor for KernelTrace class"""
- self._buffer = []
- self._buffer_ptr = 0
- self._events = []
- self._on = on
-
- if flush:
- self.flush()
- for event in events:
- if self.event_on(event):
- self._events.append(event)
- if on:
- self.on()
-
-
- def __del__(self, flush=True, events=None, on=True):
- """Deconstructor for KernelTrace class"""
- for event in self._events:
- self.event_off(event)
- if self._on:
- self.off()
-
-
- def _onoff(self, val):
- """Turn tracing on or off.
-
- Arguments:
- val: integer, 1 for on, 0 for off
-
- Raises:
- error.TestFail: If unable to turn tracing on or off.
- """
- utils.write_one_line(self._TRACE_ON_PATH, val)
- result = int(utils.read_one_line(self._TRACE_ON_PATH).strip())
- if not result == val:
- raise error.TestFail("Unable to %sable tracing" %
- 'en' if val == 1 else 'dis')
-
-
- def on(self):
- """Enable tracing."""
- return self._onoff(1)
-
-
- def off(self):
- """Disable tracing."""
- self._onoff(0)
-
-
- def is_tracing(self):
- """Is tracing on?
-
- Returns:
- True if tracing enabled and at least one event is enabled.
- """
- result = int(utils.read_one_line(self._TRACE_ON_PATH).strip())
- if result == 1 and len(self._events) > 0:
- return True
- return False
-
-
- def _event_onoff(self, event, val):
- """Enable/Disable tracing event.
-
- TODO(tbroch) Consider allowing wild card enabling of trace events via
- /sys/kernel/debug/tracing/set_event although it makes filling buffer
- really easy
-
- Arguments:
- event: list of events.
- See kernel(Documentation/trace/events.txt) for formatting.
- val: integer, 1 for on, 0 for off
-
- Returns:
- True if success, false otherwise
- """
- logging.debug("event_onoff: event:%s val:%d", event, val)
- event_path = event.replace(':', '/')
- fname = os.path.join(self._TRACE_ROOT, 'events', event_path, 'enable')
-
- if not os.path.exists(fname):
- logging.warning("Unable to locate tracing event %s", fname)
- return False
- utils.write_one_line(fname, val)
-
- fname = os.path.join(self._TRACE_ROOT, "set_event")
- found = False
- with open(fname) as fd:
- for ln in fd.readlines():
- logging.debug("set_event ln:%s", ln)
- if re.findall(event, ln):
- found = True
- break
-
- if val == 1 and not found:
- logging.warning("Event %s not enabled", event)
- return False
-
- if val == 0 and found:
- logging.warning("Event %s not disabled", event)
- return False
-
- return True
-
-
- def event_on(self, event):
- return self._event_onoff(event, 1)
-
-
- def event_off(self, event):
- return self._event_onoff(event, 0)
-
-
- def flush(self):
- """Flush trace buffer.
-
- Raises:
- error.TestFail: If unable to flush
- """
- self.off()
- fname = os.path.join(self._TRACE_ROOT, 'free_buffer')
- utils.write_one_line(fname, 1)
- self._buffer_ptr = 0
-
- fname = os.path.join(self._TRACE_ROOT, 'buffer_size_kb')
- result = utils.read_one_line(fname).strip()
- if result == '0':
- return True
- return False
-
-
- def read(self, regexp=None):
- fname = os.path.join(self._TRACE_ROOT, 'trace')
- fd = open(fname)
- fd.seek(self._buffer_ptr)
- for ln in fd.readlines():
- if regexp is None:
- self._buffer.append(ln)
- continue
- results = re.findall(regexp, ln)
- if results:
- logging.debug(ln)
- self._buffer.append(results[0])
- self._buffer_ptr = fd.tell()
- fd.close()
- return self._buffer
-
-
- @staticmethod
- def uptime_secs():
- results = utils.read_one_line("/proc/uptime")
- return float(results.split()[0])
diff --git a/client/cros/network.py b/client/cros/network.py
deleted file mode 100644
index 3c780d4..0000000
--- a/client/cros/network.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2019 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging
-from six.moves import urllib
-import socket
-import time
-
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib import error
-
-
-def CheckThatInterfaceCanAccessDestination(host,
- interface,
- families=[socket.AF_UNSPEC]):
- """
- Checks that we can access a host using a specific interface.
-
- @param host: Destination host
- @param interface: Name of the network interface to be used
- @raises: error.TestFail if the interface cannot access the specified host.
-
- """
- logging.debug('Check connection to %s', host)
- # addrinfo records: (family, type, proto, canonname, (addr, port))
- server_addresses = []
- for family in families:
- try:
- records = socket.getaddrinfo(host, 80, family)
- except:
- # Just ignore this family.
- continue
- server_addresses.extend(record[4][0] for record in records)
-
- found_route = False
- failing_addresses = []
- for address in set(server_addresses):
- # Routes may not always be up by this point. Note that routes for v4 or
- # v6 may come up before the other, so we simply do this poll for all
- # addresses.
- try:
- utils.poll_for_condition(condition=lambda: utils.ping(
- address, interface=interface, tries=2, timeout=2) == 0,
- exception=Exception('No route to %s' %
- address),
- timeout=2)
- except Exception as e:
- logging.info(e)
- failing_addresses.append(address)
- else:
- found_route = True
-
- if not found_route:
- raise error.TestFail('Interface %s cannot connect to %s' % (interface,
- failing_addresses))
-
-
-FETCH_URL_PATTERN_FOR_TEST = \
- 'http://testing-chargen.appspot.com/download?size=%d'
-
-def FetchUrl(url_pattern, bytes_to_fetch=10, fetch_timeout=10):
- """
- Fetches a specified number of bytes from a URL.
-
- @param url_pattern: URL pattern for fetching a specified number of bytes.
- %d in the pattern is to be filled in with the number of bytes to
- fetch.
- @param bytes_to_fetch: Number of bytes to fetch.
- @param fetch_timeout: Number of seconds to wait for the fetch to complete
- before it times out.
- @return: The time in seconds spent for fetching the specified number of
- bytes.
- @raises: error.TestError if one of the following happens:
- - The fetch takes no time.
- - The number of bytes fetched differs from the specified
- number.
-
- """
- # Limit the amount of bytes to read at a time.
- _MAX_FETCH_READ_BYTES = 1024 * 1024
-
- url = url_pattern % bytes_to_fetch
- logging.info('FetchUrl %s', url)
- start_time = time.time()
- result = urllib.request.urlopen(url, timeout=fetch_timeout)
- bytes_fetched = 0
- while bytes_fetched < bytes_to_fetch:
- bytes_left = bytes_to_fetch - bytes_fetched
- bytes_to_read = min(bytes_left, _MAX_FETCH_READ_BYTES)
- bytes_read = len(result.read(bytes_to_read))
- bytes_fetched += bytes_read
- if bytes_read != bytes_to_read:
- raise error.TestError('FetchUrl tried to read %d bytes, but got '
- '%d bytes instead.' %
- (bytes_to_read, bytes_read))
- fetch_time = time.time() - start_time
- if fetch_time > fetch_timeout:
- raise error.TestError('FetchUrl exceeded timeout.')
-
- return fetch_time
diff --git a/client/cros/network_chroot.py b/client/cros/network_chroot.py
deleted file mode 100644
index 045e2e4..0000000
--- a/client/cros/network_chroot.py
+++ /dev/null
@@ -1,301 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import errno
-import os
-import shutil
-import six
-import time
-
-from autotest_lib.client.bin import utils
-
-class NetworkChroot(object):
- """Implements a chroot environment that runs in a separate network
- namespace from the caller. This is useful for network tests that
- involve creating a server on the other end of a virtual ethernet
- pair. This object is initialized with an interface name to pass
- to the chroot, as well as the IP address to assign to this
- interface, since in passing the interface into the chroot, any
- pre-configured address is removed.
-
- The startup of the chroot is an orchestrated process where a
- small startup script is run to perform the following tasks:
- - Write out pid file which will be a handle to the
- network namespace that that |interface| should be passed to.
- - Wait for the network namespace to be passed in, by performing
- a "sleep" and writing the pid of this process as well. Our
- parent will kill this process to resume the startup process.
- - We can now configure the network interface with an address.
- - At this point, we can now start any user-requested server
- processes.
- """
- BIND_ROOT_DIRECTORIES = ('bin', 'dev', 'dev/pts', 'lib', 'lib32', 'lib64',
- 'proc', 'sbin', 'sys', 'usr', 'usr/local')
- # Subset of BIND_ROOT_DIRECTORIES that should be mounted writable.
- BIND_ROOT_WRITABLE_DIRECTORIES = frozenset(('dev/pts',))
- # Directories we'll bind mount when we want to bridge DBus namespaces.
- # Includes directories containing the system bus socket and machine ID.
- DBUS_BRIDGE_DIRECTORIES = ('run/dbus/', 'var/lib/dbus/')
-
- ROOT_DIRECTORIES = ('etc', 'etc/ssl', 'tmp', 'var', 'var/log', 'run',
- 'run/lock')
- ROOT_SYMLINKS = (
- ('var/run', '/run'),
- ('var/lock', '/run/lock'),
- )
- STARTUP = 'etc/chroot_startup.sh'
- STARTUP_DELAY_SECONDS = 5
- STARTUP_PID_FILE = 'run/vpn_startup.pid'
- STARTUP_SLEEPER_PID_FILE = 'run/vpn_sleeper.pid'
- COPIED_CONFIG_FILES = [
- 'etc/ld.so.cache',
- 'etc/ssl/openssl.cnf.compat'
- ]
- CONFIG_FILE_TEMPLATES = {
- STARTUP:
- '#!/bin/sh\n'
- 'exec > /var/log/startup.log 2>&1\n'
- 'set -x\n'
- 'echo $$ > /%(startup-pidfile)s\n'
- 'sleep %(startup-delay-seconds)d &\n'
- 'echo $! > /%(sleeper-pidfile)s &\n'
- 'wait\n'
- 'ip addr add %(local-ip-and-prefix)s dev %(local-interface-name)s\n'
- 'ip link set %(local-interface-name)s up\n'
- # For running strongSwan VPN with flag --with-piddir=/run/ipsec. We
- # want to use /run/ipsec for strongSwan runtime data dir instead of
- # /run, and the cmdline flag applies to both client and server.
- 'mkdir -p /run/ipsec\n'
- }
- CONFIG_FILE_VALUES = {
- 'sleeper-pidfile': STARTUP_SLEEPER_PID_FILE,
- 'startup-delay-seconds': STARTUP_DELAY_SECONDS,
- 'startup-pidfile': STARTUP_PID_FILE
- }
-
- def __init__(self, interface, address, prefix):
- self._interface = interface
-
- # Copy these values from the class-static since specific instances
- # of this class are allowed to modify their contents.
- self._bind_root_directories = list(self.BIND_ROOT_DIRECTORIES)
- self._root_directories = list(self.ROOT_DIRECTORIES)
- self._copied_config_files = list(self.COPIED_CONFIG_FILES)
- self._config_file_templates = self.CONFIG_FILE_TEMPLATES.copy()
- self._config_file_values = self.CONFIG_FILE_VALUES.copy()
- self._env = dict(os.environ)
-
- self._config_file_values.update({
- 'local-interface-name': interface,
- 'local-ip': address,
- 'local-ip-and-prefix': '%s/%d' % (address, prefix)
- })
-
-
- def startup(self):
- """Create the chroot and start user processes."""
- self.make_chroot()
- self.write_configs()
- self.run(['/bin/bash', os.path.join('/', self.STARTUP), '&'])
- self.move_interface_to_chroot_namespace()
- self.kill_pid_file(self.STARTUP_SLEEPER_PID_FILE)
-
-
- def shutdown(self):
- """Remove the chroot filesystem in which the VPN server was running"""
- # TODO(pstew): Some processes take a while to exit, which will cause
- # the cleanup below to fail to complete successfully...
- time.sleep(10)
- utils.system_output('rm -rf --one-file-system %s' % self._temp_dir,
- ignore_status=True)
-
-
- def add_config_templates(self, template_dict):
- """Add a filename-content dict to the set of templates for the chroot
-
- @param template_dict dict containing filename-content pairs for
- templates to be applied to the chroot. The keys to this dict
- should not contain a leading '/'.
-
- """
- self._config_file_templates.update(template_dict)
-
-
- def add_config_values(self, value_dict):
- """Add a name-value dict to the set of values for the config template
-
- @param value_dict dict containing key-value pairs of values that will
- be applied to the config file templates.
-
- """
- self._config_file_values.update(value_dict)
-
-
- def add_copied_config_files(self, files):
- """Add |files| to the set to be copied to the chroot.
-
- @param files iterable object containing a list of files to
- be copied into the chroot. These elements should not contain a
- leading '/'.
-
- """
- self._copied_config_files += files
-
-
- def add_root_directories(self, directories):
- """Add |directories| to the set created within the chroot.
-
- @param directories list/tuple containing a list of directories to
- be created in the chroot. These elements should not contain a
- leading '/'.
-
- """
- self._root_directories += directories
-
-
- def add_startup_command(self, command):
- """Add a command to the script run when the chroot starts up.
-
- @param command string containing the command line to run.
-
- """
- self._config_file_templates[self.STARTUP] += '%s\n' % command
-
-
- def add_environment(self, env_dict):
- """Add variables to the chroot environment.
-
- @param env_dict dict dictionary containing environment variables
- """
- self._env.update(env_dict)
-
-
- def get_log_contents(self):
- """Return the logfiles from the chroot."""
- return utils.system_output("head -10000 %s" %
- self.chroot_path("var/log/*"))
-
-
- def bridge_dbus_namespaces(self):
- """Make the system DBus daemon visible inside the chroot."""
- # Need the system socket and the machine-id.
- self._bind_root_directories += self.DBUS_BRIDGE_DIRECTORIES
-
-
- def chroot_path(self, path):
- """Returns the the path within the chroot for |path|.
-
- @param path string filename within the choot. This should not
- contain a leading '/'.
-
- """
- return os.path.join(self._temp_dir, path.lstrip('/'))
-
-
- def get_pid_file(self, pid_file, missing_ok=False):
- """Returns the integer contents of |pid_file| in the chroot.
-
- @param pid_file string containing the filename within the choot
- to read and convert to an integer. This should not contain a
- leading '/'.
- @param missing_ok bool indicating whether exceptions due to failure
- to open the pid file should be caught. If true a missing pid
- file will cause this method to return 0. If false, a missing
- pid file will cause an exception.
-
- """
- chroot_pid_file = self.chroot_path(pid_file)
- try:
- with open(chroot_pid_file) as f:
- return int(f.read())
- except IOError as e:
- if not missing_ok or e.errno != errno.ENOENT:
- raise e
-
- return 0
-
-
- def kill_pid_file(self, pid_file, missing_ok=False):
- """Kills the process belonging to |pid_file| in the chroot.
-
- @param pid_file string filename within the chroot to gain the process ID
- which this method will kill.
- @param missing_ok bool indicating whether a missing pid file is okay,
- and should be ignored.
-
- """
- pid = self.get_pid_file(pid_file, missing_ok=missing_ok)
- if missing_ok and pid == 0:
- return
- utils.system('kill %d' % pid, ignore_status=True)
-
-
- def make_chroot(self):
- """Make a chroot filesystem."""
- self._temp_dir = utils.system_output(
- 'mktemp -d /usr/local/tmp/chroot.XXXXXXXXX')
- utils.system('chmod go+rX %s' % self._temp_dir)
- for rootdir in self._root_directories:
- os.mkdir(self.chroot_path(rootdir))
-
- self._jail_args = []
- for rootdir in self._bind_root_directories:
- src_path = os.path.join('/', rootdir)
- dst_path = self.chroot_path(rootdir)
- if not os.path.exists(src_path):
- continue
- elif os.path.islink(src_path):
- link_path = os.readlink(src_path)
- os.symlink(link_path, dst_path)
- else:
- os.makedirs(dst_path) # Recursively create directories.
- mount_arg = '%s,%s' % (src_path, src_path)
- if rootdir in self.BIND_ROOT_WRITABLE_DIRECTORIES:
- mount_arg += ',1'
- self._jail_args += [ '-b', mount_arg ]
-
- for config_file in self._copied_config_files:
- src_path = os.path.join('/', config_file)
- dst_path = self.chroot_path(config_file)
- if os.path.exists(src_path):
- shutil.copyfile(src_path, dst_path)
-
- for src_path, target_path in self.ROOT_SYMLINKS:
- link_path = self.chroot_path(src_path)
- os.symlink(target_path, link_path)
-
-
- def move_interface_to_chroot_namespace(self):
- """Move network interface to the network namespace of the server."""
- utils.system('ip link set %s netns %d' %
- (self._interface,
- self.get_pid_file(self.STARTUP_PID_FILE)))
-
-
- def run(self, args, ignore_status=False):
- """Run a command in a chroot, within a separate network namespace.
-
- @param args list containing the command line arguments to run.
- @param ignore_status bool set to true if a failure should be ignored.
-
- """
- utils.run('minijail0 -e -C %s %s' %
- (self._temp_dir, ' '.join(self._jail_args + args)),
- timeout=None,
- ignore_status=ignore_status,
- stdout_tee=utils.TEE_TO_LOGS,
- stderr_tee=utils.TEE_TO_LOGS,
- env=self._env)
-
-
- def write_configs(self):
- """Write out config files"""
- for config_file, template in six.iteritems(self._config_file_templates):
- with open(self.chroot_path(config_file), 'w') as f:
- f.write(template % self._config_file_values)
diff --git a/client/cros/p2p_utils.py b/client/cros/p2p_utils.py
deleted file mode 100644
index dee00c9..0000000
--- a/client/cros/p2p_utils.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging
-import os
-import shutil
-
-from autotest_lib.client.common_lib import error, utils
-from autotest_lib.client.common_lib.cros import avahi_utils
-from autotest_lib.client.cros import service_stopper, tcpdump
-
-
-P2P_SHARE_PATH = '/var/cache/p2p'
-
-# A path used to store the existing p2p files during the test and restore them
-# once the test finishes.
-P2P_SHARE_BACKUP_PATH = '/var/cache/p2p-backup'
-
-
-def p2p_backup_files(backup_path=P2P_SHARE_BACKUP_PATH):
- """Backup the P2P shared files and create an empty shared directory.
-
- p2p-server shall not be running during backup or restore.
-
- @param backup_path: The path where the files will be moved to.
- @raise error.TestError
- """
- try:
- if os.path.exists(backup_path):
- shutil.rmtree(backup_path)
- if os.path.exists(P2P_SHARE_PATH):
- os.rename(P2P_SHARE_PATH, backup_path)
- except OSError as e:
- raise error.TestError("Error on P2P files backup: %s" % (str(e)))
-
-
-def p2p_restore_files(backup_path=P2P_SHARE_BACKUP_PATH):
- """Restore the P2P shared files from a backup and *delete* the backup.
-
- p2p-server shall not be running during backup or restore.
-
- @param backup_path: The path where the files will be moved from.
- """
- if os.path.exists(P2P_SHARE_PATH):
- shutil.rmtree(P2P_SHARE_PATH, ignore_errors=True)
- if os.path.exists(backup_path):
- os.rename(backup_path, P2P_SHARE_PATH)
-
-
-class P2PServerOverTap(object):
- """Manage a p2p-server instance running over a TAP interface.
-
- This class manages a p2p-server instance configured to run over a TAP
- interface, useful for any test that needs to interact with the p2p-server
- (and its p2p-http-server instance) on a controled network environment.
- """
- def __init__(self, tap_ip='169.254.10.1', tap_mask=24, tap_name='faketap'):
- """Initialize the configuration.
-
- @param tap_ip: IPv4 address for the TAP interface on the DUT's end.
- @param tap_mask: Network mask fot the tap_ip address.
- @param tap_name: The name prefix for the TAP interface.
- """
- # The network 169.254/16 shouldn't clash with other real services and we
- # use a /24 subnet of it as the default safe value here.
- self._tap_ip = tap_ip
- self._tap_mask = tap_mask
- self._tap_name = tap_name
- self._services = None
- self.tap = None
- self._tcpdump = None
-
-
- def setup(self, dumpdir=None):
- """Initializes avahi daemon on a new tap interface.
-
- @param dumpdir: Directory where the traffic on the new tap interface
- is recorded. A value of None disables traffic dumping.
- """
- try:
- from lansim import tuntap
- except ImportError:
- logging.exception('Failed to import lansim.')
- raise error.TestError('Error importing lansim. Did you setup_dep '
- 'and install_pkg lansim on your test?')
-
- # Ensure p2p and avahi aren't running.
- self._services = service_stopper.ServiceStopper(['p2p', 'avahi'])
- self._services.stop_services()
-
- # Backup p2p files.
- p2p_backup_files()
-
- # Initialize the TAP interface.
- self.tap = tuntap.TunTap(tuntap.IFF_TAP, name=self._tap_name)
- self.tap.set_addr(self._tap_ip, self._tap_mask)
- self.tap.up()
-
- # Enable traffic dump.
- if not dumpdir is None:
- dumpfile = os.path.join(dumpdir, 'dump-%s.pcap' % self.tap.name)
- self._tcpdump = tcpdump.Tcpdump(self.tap.name, dumpfile)
-
- # Re-launch avahi-daemon on the TAP interface only.
- avahi_utils.avahi_start_on_iface(self.tap.name)
- utils.system("start p2p")
-
-
- def cleanup(self):
- """Restore the original environment as before the call to setup().
-
- This method makes a best-effort attempt to restore the environment and
- logs all the errors encountered but doesn't fail.
- """
- try:
- utils.system('stop p2p')
- avahi_utils.avahi_stop()
- except:
- logging.exception('Failed to stop tested services.')
-
- if self._tcpdump:
- self._tcpdump.stop()
-
- if self.tap:
- self.tap.down()
-
- # Restore p2p files.
- try:
- p2p_restore_files()
- except OSError:
- logging.exception('Failed to restore the P2P backup.')
-
- if self._services:
- self._services.restore_services()
diff --git a/client/cros/tcpdump.py b/client/cros/tcpdump.py
deleted file mode 100644
index 9aa503b..0000000
--- a/client/cros/tcpdump.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import logging
-import os
-import subprocess
-
-from autotest_lib.client.bin import utils
-
-class Tcpdump(object):
- """tcpdump capture process wrapper."""
-
- def __init__(self, iface, dumpfilename):
- """Launches a tcpdump process on the background.
-
- @param iface: The name of the interface to listen on.
- @param dumpfilename: The filename of the destination dump file.
- @raise utils.TimeoutError if tcpdump fails to start after 10 seconds.
- """
- logging.debug('Recording %s traffic to %s.', iface, dumpfilename)
- # Force to run tcpdump as root, since the dump file is created *after*
- # the process drops to a unprivileged user, meaning that it can't create
- # the passed dumpfilename file.
- self._tcpdump_proc = subprocess.Popen(
- ['tcpdump', '-i', iface, '-w', dumpfilename, '-Z', 'root'],
- stdout=open('/dev/null', 'w'),
- stderr=subprocess.STDOUT)
- # Wait for tcpdump to initialize and create the dump file.
- utils.poll_for_condition(
- lambda: os.path.exists(dumpfilename),
- desc='tcpdump creates the dump file.',
- sleep_interval=1,
- timeout=10.)
-
-
- def stop(self, timeout=10.):
- """Stop the dump process and wait for it to return.
-
- This method stops the tcpdump process running in background and waits
- for it to finish for a given timeout.
- @param timeout: The time to wait for the tcpdump to finish in seconds.
- None means no timeout.
- @return whether the tcpdump is not running.
- """
- if not self._tcpdump_proc:
- return True
-
- # Send SIGTERM to tcpdump.
- try:
- self._tcpdump_proc.terminate()
- except OSError as e:
- # If the process exits before we can send it a SIGTERM, an
- # OSError exception is raised here which we can ignore since the
- # process already finished.
- logging.error('Trying to kill tcpdump (%d): %s',
- self._tcpdump_proc.pid, e.strerror)
-
- logging.debug('Waiting for pid %d to finish.', self._tcpdump_proc.pid)
- if timeout is None:
- self._tcpdump_proc.wait()
- else:
- try:
- utils.poll_for_condition(
- lambda: not self._tcpdump_proc.poll() is None,
- sleep_interval=1,
- timeout=timeout)
- except utils.TimeoutError:
- logging.error('tcpdump failed to finish after %f seconds. Dump '
- 'file can be truncated.', timeout)
- return False
-
- self._tcpdump_proc = None
- return True
-
-
- def __del__(self):
- self.stop()
diff --git a/client/cros/tpm_store.py b/client/cros/tpm_store.py
deleted file mode 100644
index a749903..0000000
--- a/client/cros/tpm_store.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import tempfile
-
-from autotest_lib.client.bin import utils
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.cros import cryptohome
-
-class TPMStore(object):
- """Context enclosing the use of the TPM."""
-
- CHAPS_CLIENT_COMMAND = 'chaps_client'
- CONVERT_TYPE_RSA = 'rsa'
- CONVERT_TYPE_X509 = 'x509'
- OPENSSL_COMMAND = 'openssl'
- OUTPUT_TYPE_CERTIFICATE = 'cert'
- OUTPUT_TYPE_PRIVATE_KEY = 'privkey'
- PIN = '11111'
- # TPM maintain two slots for certificates, slot 0 for system specific
- # certificates, slot 1 for user specific certificates. Currently, all
- # certificates are programmed in slot 1. So hardcode this slot ID for now.
- SLOT_ID = '1'
- PKCS11_REPLAY_COMMAND = 'p11_replay --slot=%s' % SLOT_ID
- TPM_GROUP = 'chronos-access'
- TPM_USER = 'chaps'
-
-
- def __enter__(self):
- self.setup()
- return self
-
- def __exit__(self, exception, value, traceback):
- self.reset()
-
-
- def _install_object(self, pem, identifier, conversion_type, output_type):
- """Convert a PEM object to DER and store it in the TPM.
-
- @param pem string PEM encoded object to be stored.
- @param identifier string associated with the new object.
- @param conversion_type the object type to use in PEM to DER conversion.
- @param output_type the object type to use in inserting into the TPM.
-
- """
- if cryptohome.is_tpm_lockout_in_effect():
- raise error.TestError('The TPM is in dictonary defend mode. '
- 'The TPMStore may behave in unexpected '
- 'ways, exiting.')
- pem_file = tempfile.NamedTemporaryFile()
- pem_file.file.write(pem)
- pem_file.file.flush()
- der_file = tempfile.NamedTemporaryFile()
- utils.system('%s %s -in %s -out %s -inform PEM -outform DER' %
- (self.OPENSSL_COMMAND, conversion_type, pem_file.name,
- der_file.name))
- utils.system('%s --import --type=%s --path=%s --id="%s"' %
- (self.PKCS11_REPLAY_COMMAND, output_type, der_file.name,
- identifier))
-
-
- def setup(self):
- """Set the TPM up for operation in tests."""
- self.reset()
- self._directory = tempfile.mkdtemp()
- utils.system('chown %s:%s %s' %
- (self.TPM_USER, self.TPM_GROUP, self._directory))
- utils.system('%s --load --path=%s --auth="%s"' %
- (self.CHAPS_CLIENT_COMMAND, self._directory, self.PIN))
-
-
- def reset(self):
- """Reset the crypto store and take ownership of the device."""
- utils.system('initctl restart chapsd')
- cryptohome.take_tpm_ownership(wait_for_ownership=True)
-
-
- def install_certificate(self, certificate, identifier):
- """Install a certificate into the TPM, returning the certificate ID.
-
- @param certificate string PEM x509 contents of the certificate.
- @param identifier string associated with this certificate in the TPM.
-
- """
- return self._install_object(certificate,
- identifier,
- self.CONVERT_TYPE_X509,
- self.OUTPUT_TYPE_CERTIFICATE)
-
-
- def install_private_key(self, key, identifier):
- """Install a private key into the TPM, returning the certificate ID.
-
- @param key string PEM RSA private key contents.
- @param identifier string associated with this private key in the TPM.
-
- """
- return self._install_object(key,
- identifier,
- self.CONVERT_TYPE_RSA,
- self.OUTPUT_TYPE_PRIVATE_KEY)
diff --git a/client/cros/udev_helpers.py b/client/cros/udev_helpers.py
deleted file mode 100644
index 88e1486..0000000
--- a/client/cros/udev_helpers.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Lint as: python2, python3
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-# Lint as: python3
-""" Udevadm helper classes and functions.
-"""
-
-import subprocess
-
-class UdevadmInfo():
- """ Use udevadm info on a specific path.
- """
-
- @classmethod
- def GetProperties(cls, syspath):
- """ Get all properties of given syspath as a dict.
-
- Args:
- syspath: System path to get properties for.
-
- Returns:
- Dict with attribute/property as key and it's value. All keys are
- converted to lowercase. Example: {'subsystem': 'input'}
- """
- props = {}
- rawprops = subprocess.check_output(' '.join(
- ['udevadm', 'info', '-q', 'property', '-p', syspath]),
- shell=True)
-
- for line in rawprops.splitlines():
- upper_key, value = line.split(b'=', 1)
- props[upper_key.lower()] = value.strip(b'"')
-
- return props
-
-class UdevadmTrigger():
- """ Use udevadm trigger with specific rules.
- """
-
- def __init__(self,
- verbose=True,
- event_type=None,
- attr_match=[],
- attr_nomatch=[],
- subsystem_match=[],
- subsystem_nomatch=[]):
- """ Constructor
-
- Args:
- verbose: Whether to output triggered syspaths
- event_type: What type of events to trigger (device or subsystem)
- attr_match: What attributes to match
- attr_nomatch: What attributes not to match
- subsystem_match: What subsystems to match
- subsystem_nomatch: What subsystems not to match
- """
- cmd = ['udevadm', 'trigger']
-
- if verbose:
- cmd.append('-v')
-
- if event_type:
- cmd.append('-t')
- cmd.append('"{}"'.format(event_type))
-
- for attr in attr_match:
- cmd.append('-a')
- cmd.append('"{}"'.format(attr))
-
- for attr in attr_nomatch:
- cmd.append('-A')
- cmd.append('"{}"'.format(attr))
-
- for subsystem in subsystem_match:
- cmd.append('-s')
- cmd.append('"{}"'.format(subsystem))
-
- for subsystem in subsystem_nomatch:
- cmd.append('-S')
- cmd.append('"{}"'.format(subsystem))
-
- self.cmd = cmd
-
- def DryRun(self):
- """ Do a dry run using initialized trigger rules.
-
- Returns:
- List of syspaths that would be triggered.
- """
- cmd = self.cmd + ['-n']
- lines = subprocess.check_output(' '.join(cmd), shell=True)
- return lines.splitlines() if lines else []
diff --git a/client/cros/upstart.py b/client/cros/upstart.py
deleted file mode 100644
index e0b8559..0000000
--- a/client/cros/upstart.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Provides utility methods for interacting with upstart"""
-
-import os
-import re
-
-from autotest_lib.client.common_lib import utils
-
-
-def emit_event(event_name):
- """Fails if the emit command fails.
-
- @param service_name: name of the service.
- """
- utils.system('initctl emit %s' % event_name)
-
-
-def ensure_running(service_name):
- """Fails if |service_name| is not running.
-
- @param service_name: name of the service.
- """
- cmd = 'initctl status %s | grep start/running' % service_name
- utils.system(cmd)
-
-
-def has_service(service_name):
- """Returns true if |service_name| is installed on the system.
-
- @param service_name: name of the service.
- """
- return os.path.exists('/etc/init/' + service_name + '.conf')
-
-
-def is_running(service_name):
- """
- Returns true if |service_name| is running.
-
- @param service_name: name of service
- """
- cmd = 'status %s' % service_name
- return utils.system_output(cmd).find('start/running') != -1
-
-
-def get_pid(service_name):
- """
- Returns integer of PID of |service_name| or None if not running.
-
- @param service_name: name of service
- """
- res_str = utils.system_output('status %s' % service_name)
- match = re.search('process ([0-9]+)', res_str)
- if not match:
- return None
- return int(match.group(1))
-
-
-def restart_job(service_name):
- """
- Restarts an upstart job if it's running.
- If it's not running, start it.
-
- @param service_name: name of service
- """
-
- if is_running(service_name):
- utils.system_output('restart %s' % service_name)
- else:
- utils.system_output('start %s' % service_name)
-
-
-def stop_job(service_name):
- """
- Stops an upstart job.
- Fails if the stop command fails.
-
- @param service_name: name of service
- """
-
- utils.system('stop %s' % service_name)