release-request-bd6aa7dd-7b02-4794-942c-14599bf61208-for-git_oc-mr1-release-4193791 snap-temp-L98700000083613807
Change-Id: I5faeb2596281202f2f0db5298518095b6e821301
diff --git a/client/common_lib/error.py b/client/common_lib/error.py
index b98897d..4268870 100644
--- a/client/common_lib/error.py
+++ b/client/common_lib/error.py
@@ -143,6 +143,14 @@
msg += '\n' + repr(self.result_obj)
return msg
+ def __eq__(self, other):
+ if type(self) == type(other):
+ return (self.command == other.command
+ and self.result_obj == other.result_obj
+ and self.additional_text == other.additional_text)
+ else:
+ return NotImplemented
+
class CmdTimeoutError(CmdError):
"""Indicates that a command timed out."""
diff --git a/client/common_lib/utils.py b/client/common_lib/utils.py
index 23870f6..0ce9334 100644
--- a/client/common_lib/utils.py
+++ b/client/common_lib/utils.py
@@ -1217,6 +1217,17 @@
self.duration = duration
+ def __eq__(self, other):
+ if type(self) == type(other):
+ return (self.command == other.command
+ and self.exit_status == other.exit_status
+ and self.stdout == other.stdout
+ and self.stderr == other.stderr
+ and self.duration == other.duration)
+ else:
+ return NotImplemented
+
+
def __repr__(self):
wrapper = textwrap.TextWrapper(width = 78,
initial_indent="\n ",
diff --git a/client/cros/input_playback/keyboard_ctrl+shift+f3 b/client/cros/input_playback/keyboard_ctrl+shift+f3
new file mode 100644
index 0000000..0ce1c2f
--- /dev/null
+++ b/client/cros/input_playback/keyboard_ctrl+shift+f3
@@ -0,0 +1,18 @@
+E: 1499236459.447959 0004 0004 458976
+E: 1499236459.447959 0001 001d 1
+E: 1499236459.447959 0000 0000 0
+E: 1499236459.574771 0004 0004 458977
+E: 1499236459.574771 0001 002a 1
+E: 1499236459.574771 0000 0000 0
+E: 1499236459.647955 0004 0004 458812
+E: 1499236459.647955 0001 003d 1
+E: 1499236459.647955 0000 0000 0
+E: 1499236459.839729 0004 0004 458812
+E: 1499236459.839729 0001 003d 0
+E: 1499236459.839729 0000 0000 0
+E: 1499236459.854771 0004 0004 458977
+E: 1499236459.854771 0001 002a 0
+E: 1499236459.854771 0000 0000 0
+E: 1499236459.887727 0004 0004 458976
+E: 1499236459.887727 0001 001d 0
+E: 1499236459.887727 0000 0000 0
diff --git a/client/site_tests/graphics_Idle/graphics_Idle.py b/client/site_tests/graphics_Idle/graphics_Idle.py
index 74e7404..514469f 100755
--- a/client/site_tests/graphics_Idle/graphics_Idle.py
+++ b/client/site_tests/graphics_Idle/graphics_Idle.py
@@ -21,6 +21,7 @@
'/sys/kernel/debug/dri/0/i915_fbc',
'/sys/kernel/debug/dri/0/i915_fbc_status'
]
+GEM_OBJECTS_PATHS = ['/sys/kernel/debug/dri/0/i915_gem_objects']
GEM_PATHS = ['/sys/kernel/debug/dri/0/i915_gem_active']
PSR_PATHS = ['/sys/kernel/debug/dri/0/i915_edp_psr_status']
RC6_PATHS = ['/sys/kernel/debug/dri/0/i915_drpc_info']
@@ -53,9 +54,7 @@
errors += self.verify_graphics_dvfs()
errors += self.verify_graphics_fbc()
errors += self.verify_graphics_psr()
- # TODO(ihf): enable once crbug.com/727983 is fixed.
- if not utils.system_output('uname -r').startswith('4.4.'):
- errors += self.verify_graphics_gem_idle()
+ errors += self.verify_graphics_gem_idle()
errors += self.verify_graphics_i915_min_clock()
errors += self.verify_graphics_rc6()
errors += self.verify_lvds_downclock()
@@ -353,25 +352,53 @@
def verify_graphics_gem_idle(self):
""" On systems which have i915, check that we can get all gem objects
- to become idle (i.e. the i915_gem_active list need to go to 0);
+ to become idle (i.e. the i915_gem_active list or i915_gem_objects
+ client/process gem object counts need to go to 0);
idle before doing so, and retry every second for 20 seconds."""
logging.info('Running verify_graphics_gem_idle')
- if (utils.get_cpu_soc_family() == 'x86_64' and
- self._gpu_type != 'pinetrail'):
+ if utils.get_cpu_soc_family() == 'x86_64':
tries = 0
found = False
+ per_process_check = False
+
gem_path = self.get_valid_path(GEM_PATHS)
if not gem_path:
- return 'GEM_PATHS not found.'
- while not found and tries < 240:
- time.sleep(0.25)
- with open(gem_path, 'r') as gem_file:
- for line in gem_file:
- if re.search('Total 0 objects', line):
- found = True
- break
+ gem_path = self.get_valid_path(GEM_OBJECTS_PATHS)
+ if gem_path:
+ per_process_check = True
+ else:
+ return 'GEM_PATHS not found.'
- tries += 1
+ # Checks 4.4 and later kernels
+ if per_process_check:
+ while not found and tries < 240:
+ time.sleep(0.25)
+ gem_objects_idle = False
+ gem_active_search = False
+ with open(gem_path, 'r') as gem_file:
+ for line in gem_file:
+ if gem_active_search:
+ if re.search('\(0 active,', line):
+ gem_objects_idle = True
+ else:
+ gem_objects_idle = False
+ break
+ elif line == '\n':
+ gem_active_search = True
+ if gem_objects_idle:
+ found = True
+ tries += 1
+
+ # Checks pre 4.4 kernels
+ else:
+ while not found and tries < 240:
+ time.sleep(0.25)
+ with open(gem_path, 'r') as gem_file:
+ for line in gem_file:
+ if re.search('Total 0 objects', line):
+ found = True
+ break
+ tries += 1
if not found:
return self.handle_error('Did not reach 0 gem actives. ',
gem_path)
diff --git a/contrib/crbug_crawler.py b/contrib/crbug_crawler.py
deleted file mode 100755
index 12b697f..0000000
--- a/contrib/crbug_crawler.py
+++ /dev/null
@@ -1,415 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-This script crawls crbug. Sort-of.
-Invocation:
- Get all bugs with labels, strings (in summary and/or comments):
- crbug_crawler.py --labels 'one two three'
- --queries '"first query" "second query"'
-
- Get baddest open bugs of all time:
- crbug_crawler.py --reap
-
-Tips:
- - Label based queries will return faster than text queries.
- - contrib/crbug_shell.py is a wrapper that allows you to incrementally
- filter search results using this script.
-"""
-
-import argparse
-import cmd
-import logging
-import sys
-import shlex
-
-import common
-from autotest_lib.client.common_lib import global_config
-from autotest_lib.server.cros.dynamic_suite import reporting
-
-
-def _parse_args(args):
- if not args:
- import crbug_crawler
- logging.error('Improper usage of crbug_crawler: %s',
- crbug_crawler.__doc__)
- sys.exit(1)
-
- description = ('Usage: crbug_crawler.py --reap')
- parser = argparse.ArgumentParser(description=description)
- parser.add_argument('--quiet', help=('Turn off logging noise.'),
- action='store_true', default=False)
- parser.add_argument('--num', help='Number of issues to output.', default=10,
- type=int)
- parser.add_argument('--queries',
- help=('Search query. Eg: --queries "%s %s"' %
- ('build_Root', 'login')),
- default='')
- parser.add_argument('--labels',
- help=('Search labels. Eg: --labels "%s %s"' %
- ('autofiled', 'Pri-1')), default=None)
- parser.add_argument('--reap', help=('Top autofiled bugs ordered by count.'),
- action='store_true', default=False)
- return parser.parse_args(args)
-
-
-class Update(object):
- """Class encapsulating fields of an update to a bug.
- """
- open_statuses = ['Unconfirmed', 'Untriaged', 'Available', 'Assigned',
- 'Started', 'ExternalDependency']
- closed_statuses = ['Fixed', 'Verified', 'Duplicate', 'WontFix', 'Archived']
-
- def __init__(self, comment='', labels='', status=''):
- self.comment = comment
- self.labels = labels if labels else []
- self.status = status
-
-
- def __str__(self):
- msg = 'status: %s' % self.status
- if self.labels:
- msg = '%s labels: %s' % (msg, self.labels)
- if self.comment:
- msg = '%s comment: %s' % (msg, self.comment)
- return msg
-
-
-class UpdateManager(object):
- """Update manager that allows you to revert status updates.
-
- This class keeps track of the last update applied and is capable
- of reverting it.
- """
-
- def __init__(self, autocommit=False):
- """Initialize update manager.
-
- @param autocommit: If False just print out the update instead
- of committing it.
- """
- self.history = {}
- self.present = {}
- self.reporter = reporting.Reporter()
- self.phapi_lib = self.reporter.get_bug_tracker_client()
- self.autocommit = autocommit
-
-
- def revert(self):
- """Only manages status reverts as of now.
- """
- for issue_id, update in self.history.iteritems():
- logging.warning('You will have to manually update %s and %s on %s',
- self.present[issue_id].labels,
- self.present[issue_id].comment, issue_id)
- # Create a new update with just the status.
- self.update(issue_id, Update(status=update.status))
-
-
- def update(self, old_issue, update):
- """Record the state of an issue before updating it.
-
- @param old_issue: The issue to update. If an id is specified an
- issue is constructed. If an issue object (as defined in phapi_lib
- Issue)is passed in, it is used directly.
- @param update: The Update object to apply to the issue.
- """
- if type(old_issue) == int:
- old_issue = self.phapi_lib.get_tracker_issue_by_id(old_issue)
- old_update = Update(
- labels=old_issue.labels, status=old_issue.status)
-
- if not update.status:
- update.status = old_update.status
- elif (update.status not in Update.open_statuses and
- update.status not in Update.closed_statuses):
- raise ValueError('Unknown status %s' % update.status)
-
- if not self.autocommit:
- logging.warning('Would have applied the following update: '
- '%s -> %s', old_update, update)
- return
-
- self.history[old_issue.id] = old_update
- self.reporter.modify_bug_report(
- issue_id=old_issue.id, comment=update.comment,
- label_update=update.labels,
- status=update.status)
- self.present[old_issue.id] = update
-
-
-class Crawler(object):
- """Class capable of crawling crbug.
-
- This class applies filters to issues it crawls and caches them locally.
- """
-
- # The limit at which we ask for confirmation to proceed with the crawl.
- PROMPT_LIMIT = 2000
-
- def __init__(self):
- self.reporter = reporting.Reporter()
- self.phapi_client = self.reporter.get_bug_tracker_client()
- self.issues = None
- self.all_autofiled_query = 'ANCHOR TestFailure'
- self.all_autofiled_label = 'autofiled'
- self.prompted = False
-
-
- def fuzzy_search(self, query='', label='', fast=True):
- """Returns all issues using one query and/or one label.
-
- @param query: A string representing the query.
- @param label: A string representing the label.
- @param fast: If true, don't bother fetching comments.
-
- @return: A list of issues matching the query. If fast is
- specified the issues won't have comments.
- """
- if not query and not label:
- raise ValueError('Require query or labels to make a tracker query, '
- 'try query = "%s" or one of the predefined labels %s' %
- (self.fuzzy_search_anchor(),
- self.reporter._PREDEFINED_LABELS))
- if type(label) != str:
- raise ValueError('The crawler only supports one label per query, '
- 'and it must be a string. you supplied %s' % label)
- return self.phapi_client.get_tracker_issues_by_text(
- query, label=label, full_text=not fast)
-
-
- @staticmethod
- def _get_autofiled_count(issue):
- """Return the autofiled count.
-
- @param issue: An issue object that has labels.
-
- @return: An integer representing the autofiled count.
- """
- for label in issue.labels:
- if 'autofiled-count-' in label:
- return int(label.replace('autofiled-count-', ''))
-
- # Force bugs without autofiled-count to sink
- return 0
-
-
- def _prompt_crawl(self, new_issues, start_index):
- """Warn the user that a crawl is getting large.
-
- This method prompts for a y/n answer in case the user wants to abort the
- crawl and specify another set of labels/queries.
-
- @param new_issues: A list of issues used with the start_index to
- determine the number of issues already processed.
- @param start_index: The start index of the next crawl iteration.
- """
- logging.warning('Found %s issues, Crawling issues starting from %s',
- len(new_issues), start_index)
- if start_index > self.PROMPT_LIMIT and not self.prompted:
- logging.warning('Already crawled %s issues, it is possible that'
- 'you\'ve specified a very general label. If this is the '
- 'case consider re-rodering the labels so they start with '
- 'the rarest. Continue crawling [y/n]?',
- start_index + len(new_issues))
- self.prompted = raw_input() == 'y'
- if not self.prompted:
- sys.exit(0)
-
-
- def exhaustive_crawl(self, query='', label='', fast=True):
- """Perform an exhaustive crawl using one label and query string.
-
- @param query: A string representing one query.
- @param lable: A string representing one label.
-
- @return A list of issues sorted by descending autofiled count.
- """
- start_index = 0
- self.phapi_client.set_max_results(200)
- logging.warning('Performing an exhaustive crawl with label %s query %s',
- label, query)
- vague_issues = []
- new_issues = self.fuzzy_search(query=query, label=label, fast=fast)
- while new_issues:
- vague_issues += new_issues
- start_index += len(new_issues) + 1
- self.phapi_client.set_start_index(start_index)
- new_issues = self.fuzzy_search(query=query, label=label,
- fast=fast)
- self._prompt_crawl(new_issues, start_index)
-
- # Subsequent calls will clear the issues cache with new results.
- self.phapi_client.set_start_index(1)
- return sorted(vague_issues, reverse=True,
- key=lambda issue: self._get_autofiled_count(issue))
-
-
- @staticmethod
- def filter_labels(issues, labels):
- """Takes a list of labels and returns matching issues.
-
- @param issues: A list of issues to parse for labels.
- @param labels: A list of labels to match.
-
- @return: A list of matching issues. The issues must contain
- all the labels specified.
- """
- if not labels:
- return issues
- matching_issues = set([])
- labels = set(labels)
- for issue in issues:
- issue_labels = set(issue.labels)
- if issue_labels.issuperset(labels):
- matching_issues.add(issue)
- return matching_issues
-
-
- @classmethod
- def does_query_match(cls, issue, query):
- """Check if a query matches the given issue.
-
- @param issue: The issue to check.
- @param query: The query to check against.
-
- @return: True if the query matches, false otherwise.
- """
- if query in issue.title or query in issue.summary:
- return True
- # We can only search comments if the issue is a complete issue
- # i.e as defined in phapi_lib.Issue.
- try:
- if any(query in comment for comment in issue.comments):
- return True
- except (AttributeError, TypeError):
- pass
- return False
-
-
- @classmethod
- def filter_queries(cls, issues, queries):
- """Take a list of queries and returns matching issues.
-
- @param issues: A list of issues to parse. If the issues contain
- comments and a query is not in the issues title or summmary,
- the comments are parsed for a substring match.
- @param queries: A list of queries to parse the issues for.
- This method looks for an exact substring match within each issue.
-
- @return: A list of matching issues.
- """
- if not queries:
- return issues
- matching_issues = set([])
- for issue in issues:
- # For each query, check if it's in the title, description or
- # comments. If a query isn't in any of these, discard the issue.
- for query in queries:
- if cls.does_query_match(issue, query):
- matching_issues.add(issue)
- else:
- if issue in matching_issues:
- logging.warning('%s: %s\n \tPassed a subset of the '
- 'queries but failed query %s',
- issue.id, issue.title, query)
- matching_issues.remove(issue)
- break
- return matching_issues
-
-
- def filter_issues(self, queries='', labels=None, fast=True):
- """Run the queries, labels filters by crawling crbug.
-
- @param queries: A space seperated string of queries, usually passed
- through the command line.
- @param labels: A space seperated string of labels, usually passed
- through the command line.
- @param fast: If specified, skip creating comments for issues since this
- can be a slow process. This value is only a suggestion, since it is
- ignored if multiple queries are specified.
- """
- queries = shlex.split(queries)
- labels = shlex.split(labels) if labels else None
-
- # We'll need comments to filter multiple queries.
- if len(queries) > 1:
- fast = False
- matching_issues = self.exhaustive_crawl(
- query=queries.pop(0) if queries else '',
- label=labels.pop(0) if labels else '', fast=fast)
- matching_issues = self.filter_labels(matching_issues, labels)
- matching_issues = self.filter_queries(matching_issues, queries)
- self.issues = list(matching_issues)
-
-
- def dump_issues(self, limit=None):
- """Print issues.
- """
- if limit and limit < len(self.issues):
- issues = self.issues[:limit]
- else:
- issues = self.issues
- #TODO: Modify formatting, include some paging etc.
- for issue in issues:
- try:
- print ('[%s] %s crbug.com/%s %s' %
- (self._get_autofiled_count(issue),
- issue.status, issue.id, issue.title))
- except UnicodeEncodeError as e:
- print "Unicdoe error decoding issue id %s" % issue.id
- continue
-
-
-def _update_test(args):
- """A simple update test, to record usage.
- """
- updater = UpdateManager(autocommit=True)
- for issue in issues:
- updater.update(issue,
- Update(comment='this is bogus', labels=['bogus'],
- status='Assigned'))
- updater.revert()
-
-
-def configure_logging(quiet=False):
- """Configure logging.
-
- @param quiet: True to turn off warning messages.
- """
- logging.basicConfig()
- logger = logging.getLogger()
- level = logging.WARNING
- if quiet:
- level = logging.ERROR
- logger.setLevel(level)
-
-
-def main(args):
- crawler = Crawler()
- if args.reap:
- if args.queries or args.labels:
- logging.error('Query based ranking of bugs not supported yet.')
- return
- queries = ''
- labels = crawler.all_autofiled_label
- else:
- queries = args.queries
- labels = args.labels
- crawler.filter_issues(queries=queries, labels=labels,
- fast=False if queries else True)
- crawler.dump_issues(int(args.num))
- logging.warning('\nThis is a truncated list of %s results, use --num %s '
- 'to get them all. If you want more informative results/better '
- 'querying capabilities try crbug_shell.py.',
- args.num, len(crawler.issues))
-
-
-if __name__ == '__main__':
- args = _parse_args(sys.argv[1:])
- configure_logging(args.quiet)
- main(args)
-
diff --git a/contrib/crbug_shell.py b/contrib/crbug_shell.py
deleted file mode 100755
index c2f38c3..0000000
--- a/contrib/crbug_shell.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""A shell for crbug_crawler.
-"""
-
-import crbug_crawler
-import cmd
-import logging
-import os
-import sys
-
-import common
-
-from autotest_lib.client.common_lib import global_config
-from autotest_lib.server.cros.dynamic_suite import reporting
-
-try:
- from oauth2client import file as oauth_file
- from oauth2client import client
- from oauth2client import tools
-except ImportError:
- logging.error('You do not have the appropriate oauth2client libraries'
- 'required for authorization. Run ./<autotest_checkout>/utils/\ '
- 'build_externals.py or pip install the oauth2client.')
- sys.exit(1)
-
-
-def check_auth():
- """Checks if valid oath credentials exist on the system.
-
- If valid credentials aren't found on the client they're generated,
- if possible, using the cliend_id and client_secret from the shadow_config.
- """
- shadow_config = os.path.join(common.autotest_dir, 'shadow_config.ini')
- if not os.path.exists(shadow_config):
- logging.error('Cannot autorize without a shadow_config that contains'
- 'the appropriate client id for oauth. Contact '
- 'chromeos-lab-infrastructure if you think this is a mistake.')
- sys.exit(1)
-
- auth_store = oauth_file.Storage(reporting.Reporter.get_creds_abspath())
- creds = auth_store.get()
- if creds is None or creds.invalid:
- client_id = global_config.global_config.get_config_value(
- reporting.BUG_CONFIG_SECTION, 'client_id', default='')
- client_secret = global_config.global_config.get_config_value(
- reporting.BUG_CONFIG_SECTION, 'client_secret', default='')
- scope = global_config.global_config.get_config_value(
- reporting.BUG_CONFIG_SECTION, 'scope', default='')
- if not client_secret and not client_id:
- logging.error('Unable to generate oauth credentials, client_id '
- 'is %s and client_secret %s. If you do not require oauth '
- 'run this script with --noauth. This may or may not be '
- 'implemented ATM ;).', client_id, client_secret)
-
- input_flow = client.OAuth2WebServerFlow(client_id=client_id,
- client_secret=client_secret, scope=scope)
- logging.warning('Running oauth flow, make sure you use your chromium '
- 'account during autorization.')
- creds = tools.run(input_flow, auth_store)
-
-
-class CrBugShell(cmd.Cmd):
- def __init__(self, *args, **kwargs):
- cmd.Cmd.__init__(self, *args, **kwargs)
- self.queries = []
- self.labels = []
- if not kwargs.get('noauth'):
- check_auth()
- self.crawler = crbug_crawler.Crawler()
-
-
- def do_reap(self, line):
- self.crawler.filter_issues(queries='',
- labels=self.crawler.all_autofiled_label)
- if line:
- try:
- limit = int(line)
- except ValueError:
- logging.warning('Reap can only take an integer argument.')
- return
- else:
- limit = None
- self.crawler.dump_issues(limit=limit)
-
-
- def do_query_filter(self, query):
- print 'Adding query %s' % query
- self.queries.append(query)
-
-
- def do_label_filter(self, label):
- print 'Adding label %s' % label
- self.labels.append(label)
-
-
- def do_show_filters(self, line=''):
- print ('queries: %s, labels %s' %
- (self.queries, self.labels))
-
-
- def do_reset(self, line):
- self.crawler.issues = None
- self.queries = []
- self.labels = []
-
-
- def do_run_filter(self, line):
- print 'running the following filter: %s' % self.do_show_filters()
-
- # Populate cached issues if this is a first time query. If we have
- # cached issues from an incremental search, filter those instead.
- if self.crawler.issues:
- self.crawler.issues = self.crawler.filter_labels(
- self.crawler.issues, self.labels)
- self.crawler.issues = self.crawler.filter_queries(
- self.crawler.issues, self.queries)
- else:
- self.crawler.filter_issues(queries=' '.join(self.queries),
- labels=' '.join(self.labels))
- self.crawler.dump_issues()
-
-
-if __name__ == '__main__':
- CrBugShell().cmdloop()
diff --git a/server/autotest.py b/server/autotest.py
index e548935..c38d5d5 100644
--- a/server/autotest.py
+++ b/server/autotest.py
@@ -353,8 +353,8 @@
def run(self, control_file, results_dir='.', host=None, timeout=None,
- tag=None, parallel_flag=False, background=False,
- client_disconnect_timeout=None, use_packaging=True):
+ tag=None, parallel_flag=False, client_disconnect_timeout=None,
+ use_packaging=True):
"""
Run an autotest job on the remote machine.
@@ -367,9 +367,6 @@
@param tag: Tag name for the client side instance of autotest.
@param parallel_flag: Flag set when multiple jobs are run at the
same time.
- @param background: Indicates that the client should be launched as
- a background job; the code calling run will be responsible
- for monitoring the client and collecting the results.
@param client_disconnect_timeout: Seconds to wait for the remote host
to come back after a reboot. Defaults to the host setting for
DEFAULT_REBOOT_TIMEOUT.
@@ -388,7 +385,7 @@
if tag:
results_dir = os.path.join(results_dir, tag)
- atrun = _Run(host, results_dir, tag, parallel_flag, background)
+ atrun = _Run(host, results_dir, tag, parallel_flag)
self._do_run(control_file, results_dir, host, atrun, timeout,
client_disconnect_timeout, use_packaging=use_packaging)
@@ -448,14 +445,13 @@
logging.error(e)
# on full-size installs, turn on any profilers the server is using
- if not atrun.background:
- running_profilers = host.job.profilers.add_log.iteritems()
- for profiler, (args, dargs) in running_profilers:
- call_args = [repr(profiler)]
- call_args += [repr(arg) for arg in args]
- call_args += ["%s=%r" % item for item in dargs.iteritems()]
- prologue_lines.append("job.profilers.add(%s)\n"
- % ", ".join(call_args))
+ running_profilers = host.job.profilers.add_log.iteritems()
+ for profiler, (args, dargs) in running_profilers:
+ call_args = [repr(profiler)]
+ call_args += [repr(arg) for arg in args]
+ call_args += ["%s=%r" % item for item in dargs.iteritems()]
+ prologue_lines.append("job.profilers.add(%s)\n"
+ % ", ".join(call_args))
cfile = "".join(prologue_lines)
cfile += open(tmppath).read()
@@ -494,7 +490,7 @@
def run_timed_test(self, test_name, results_dir='.', host=None,
- timeout=None, parallel_flag=False, background=False,
+ timeout=None, parallel_flag=False,
client_disconnect_timeout=None, *args, **dargs):
"""
Assemble a tiny little control file to just run one test,
@@ -508,7 +504,7 @@
cmd = ", ".join([repr(test_name)] + map(repr, args) + opts)
control = "job.run_test(%s)\n" % cmd
self.run(control, results_dir, host, timeout=timeout,
- parallel_flag=parallel_flag, background=background,
+ parallel_flag=parallel_flag,
client_disconnect_timeout=client_disconnect_timeout)
if dargs.get('check_client_result', False):
@@ -516,10 +512,10 @@
def run_test(self, test_name, results_dir='.', host=None,
- parallel_flag=False, background=False,
+ parallel_flag=False,
client_disconnect_timeout=None, *args, **dargs):
self.run_timed_test(test_name, results_dir, host, timeout=None,
- parallel_flag=parallel_flag, background=background,
+ parallel_flag=parallel_flag,
client_disconnect_timeout=client_disconnect_timeout,
*args, **dargs)
@@ -532,13 +528,12 @@
It is not intended to be used directly, rather control files
should be run using the run method in Autotest.
"""
- def __init__(self, host, results_dir, tag, parallel_flag, background):
+ def __init__(self, host, results_dir, tag, parallel_flag):
self.host = host
self.results_dir = results_dir
self.env = host.env
self.tag = tag
self.parallel_flag = parallel_flag
- self.background = background
self.autodir = Autotest.get_installed_autodir(self.host)
control = os.path.join(self.autodir, 'control')
if tag:
@@ -563,10 +558,8 @@
self.host.run('umount %s' % download, ignore_status=True)
- def get_base_cmd_args(self, section):
+ def get_base_cmd_args(self):
args = ['--verbose']
- if section > 0:
- args.append('-c')
if self.tag:
args.append('-t %s' % self.tag)
if self.host.job.use_external_logging():
@@ -579,17 +572,10 @@
return args
- def get_background_cmd(self, section):
- cmd = ['nohup', os.path.join(self.autodir, 'bin/autotest_client')]
- cmd += self.get_base_cmd_args(section)
- cmd += ['>/dev/null', '2>/dev/null', '&']
- return ' '.join(cmd)
-
-
- def get_daemon_cmd(self, section, monitor_dir):
+ def get_daemon_cmd(self, monitor_dir):
cmd = ['nohup', os.path.join(self.autodir, 'bin/autotestd'),
monitor_dir, '-H autoserv']
- cmd += self.get_base_cmd_args(section)
+ cmd += self.get_base_cmd_args()
cmd += ['>/dev/null', '2>/dev/null', '&']
return ' '.join(cmd)
@@ -738,24 +724,6 @@
self.host.job.record('END ABORT', None, None, str(e))
- def _execute_in_background(self, section, timeout):
- full_cmd = self.get_background_cmd(section)
- devnull = open(os.devnull, "w")
-
- self.copy_client_config_file(self.get_client_log())
-
- self.host.job.push_execution_context(self.results_dir)
- try:
- result = self.host.run(full_cmd, ignore_status=True,
- timeout=timeout,
- stdout_tee=devnull,
- stderr_tee=devnull)
- finally:
- self.host.job.pop_execution_context()
-
- return result
-
-
@staticmethod
def _strip_stderr_prologue(stderr):
"""Strips the 'standard' prologue that get pre-pended to every
@@ -769,10 +737,10 @@
return "\n".join(stderr_lines)
- def _execute_daemon(self, section, timeout, stderr_redirector,
+ def _execute_daemon(self, timeout, stderr_redirector,
client_disconnect_timeout):
monitor_dir = self.host.get_tmp_dir()
- daemon_cmd = self.get_daemon_cmd(section, monitor_dir)
+ daemon_cmd = self.get_daemon_cmd(monitor_dir)
# grab the location for the server-side client log file
client_log_prefix = self.get_client_log()
@@ -824,31 +792,23 @@
self.host.job.pop_execution_context()
- def execute_section(self, section, timeout, stderr_redirector,
- client_disconnect_timeout):
- # TODO(crbug.com/684311) The claim is that section is never more than 0
- # in pratice. After validating for a week or so, delete all support of
- # multiple sections.
- metrics.Counter('chromeos/autotest/autotest/sections').increment(
- fields={'is_first_section': (section == 0)})
- logging.info("Executing %s/bin/autotest %s/control phase %d",
- self.autodir, self.autodir, section)
+ def _really_execute_control(self, timeout, stderr_redirector,
+ client_disconnect_timeout):
+ logging.info("Executing %s/bin/autotest %s/controt",
+ self.autodir, self.autodir)
- if self.background:
- result = self._execute_in_background(section, timeout)
- else:
- result = self._execute_daemon(section, timeout, stderr_redirector,
- client_disconnect_timeout)
+ result = self._execute_daemon(timeout, stderr_redirector,
+ client_disconnect_timeout)
last_line = stderr_redirector.last_line
# check if we failed hard enough to warrant an exception
if result.exit_status == 1:
err = error.AutotestRunError("client job was aborted")
- elif not self.background and not result.stderr:
+ elif not result.stderr:
err = error.AutotestRunError(
- "execute_section %s failed to return anything\n"
- "stdout:%s\n" % (section, result.stdout))
+ "_really_execute_control failed to return anything\n"
+ "stdout:%s\n" % result.stdout)
else:
err = None
@@ -862,123 +822,84 @@
return stderr_redirector.last_line
- def _wait_for_reboot(self, old_boot_id):
- logging.info("Client is rebooting")
- logging.info("Waiting for client to halt")
- if not self.host.wait_down(self.host.WAIT_DOWN_REBOOT_TIMEOUT,
- old_boot_id=old_boot_id):
- err = "%s failed to shutdown after %d"
- err %= (self.host.hostname, self.host.WAIT_DOWN_REBOOT_TIMEOUT)
- raise error.AutotestRunError(err)
- logging.info("Client down, waiting for restart")
- if not self.host.wait_up(self.host.DEFAULT_REBOOT_TIMEOUT):
- # since reboot failed
- # hardreset the machine once if possible
- # before failing this control file
- warning = "%s did not come back up, hard resetting"
- warning %= self.host.hostname
- logging.warning(warning)
- try:
- self.host.hardreset(wait=False)
- except (AttributeError, error.AutoservUnsupportedError):
- warning = "Hard reset unsupported on %s"
- warning %= self.host.hostname
- logging.warning(warning)
- raise error.AutotestRunError("%s failed to boot after %ds" %
- (self.host.hostname,
- self.host.DEFAULT_REBOOT_TIMEOUT))
- self.host.reboot_followup()
-
-
def execute_control(self, timeout=None, client_disconnect_timeout=None):
- if not self.background:
- collector = log_collector(self.host, self.tag, self.results_dir)
- hostname = self.host.hostname
- remote_results = collector.client_results_dir
- local_results = collector.server_results_dir
- self.host.job.add_client_log(hostname, remote_results,
- local_results)
- job_record_context = self.host.job.get_record_context()
-
- section = 0
- start_time = time.time()
-
+ collector = log_collector(self.host, self.tag, self.results_dir)
+ hostname = self.host.hostname
+ remote_results = collector.client_results_dir
+ local_results = collector.server_results_dir
+ self.host.job.add_client_log(hostname, remote_results,
+ local_results)
+ job_record_context = self.host.job.get_record_context()
logger = client_logger(self.host, self.tag, self.results_dir)
+
try:
- while not timeout or time.time() < start_time + timeout:
- if timeout:
- section_timeout = start_time + timeout - time.time()
- else:
- section_timeout = None
- boot_id = self.host.get_boot_id()
- last = self.execute_section(section, section_timeout,
- logger, client_disconnect_timeout)
- if self.background:
- return
- section += 1
- if self.is_client_job_finished(last):
- logging.info("Client complete")
- return
- elif self.is_client_job_rebooting(last):
- try:
- self._wait_for_reboot(boot_id)
- except error.AutotestRunError, e:
- self.host.job.record("ABORT", None, "reboot", str(e))
- self.host.job.record("END ABORT", None, None, str(e))
- raise
- continue
+ boot_id = self.host.get_boot_id()
+ last = self._really_execute_control(timeout, logger,
+ client_disconnect_timeout)
+ if self.is_client_job_finished(last):
+ logging.info("Client complete")
+ return
+ elif self.is_client_job_rebooting(last):
+ # TODO(crbug.com/684311) This feature is never used. Validate
+ # and drop this case.
+ m = 'chromeos/autotest/errors/client_test_triggered_reboot'
+ metrics.Counter(m).increment()
+ self.host.job.record("ABORT", None, "reboot",
+ 'client triggered reboot is unsupported')
+ self.host.job.record("END ABORT", None, None,
+ 'client triggered reboot is unsupported')
+ return
- # If a test fails without probable cause we try to bucket it's
- # failure into one of 2 categories. If we can determine the
- # current state of the device and it is suspicious, we close the
- # status lines indicating a failure. If we either cannot
- # determine the state of the device, or it appears totally
- # healthy, we give up and abort.
- try:
- self._diagnose_dut(boot_id)
- except AutotestDeviceError as e:
- # The status lines of the test are pretty much tailed to
- # our log, with indentation, from the client job on the DUT.
- # So if the DUT goes down unexpectedly we'll end up with a
- # malformed status log unless we manually unwind the status
- # stack. Ideally we would want to write a nice wrapper like
- # server_job methods run_reboot, run_group but they expect
- # reboots and we don't.
- self.host.job.record('FAIL', None, None, str(e))
- self.host.job.record('END FAIL', None, None)
- self.host.job.record('END GOOD', None, None)
- self.host.job.failed_with_device_error = True
- return
- except AutotestAbort as e:
- self.host.job.record('ABORT', None, None, str(e))
- self.host.job.record('END ABORT', None, None)
+ # If a test fails without probable cause we try to bucket it's
+ # failure into one of 2 categories. If we can determine the
+ # current state of the device and it is suspicious, we close the
+ # status lines indicating a failure. If we either cannot
+ # determine the state of the device, or it appears totally
+ # healthy, we give up and abort.
+ try:
+ self._diagnose_dut(boot_id)
+ except AutotestDeviceError as e:
+ # The status lines of the test are pretty much tailed to
+ # our log, with indentation, from the client job on the DUT.
+ # So if the DUT goes down unexpectedly we'll end up with a
+ # malformed status log unless we manually unwind the status
+ # stack. Ideally we would want to write a nice wrapper like
+ # server_job methods run_reboot, run_group but they expect
+ # reboots and we don't.
+ self.host.job.record('FAIL', None, None, str(e))
+ self.host.job.record('END FAIL', None, None)
+ self.host.job.record('END GOOD', None, None)
+ self.host.job.failed_with_device_error = True
+ return
+ except AutotestAbort as e:
+ self.host.job.record('ABORT', None, None, str(e))
+ self.host.job.record('END ABORT', None, None)
- # give the client machine a chance to recover from a crash
- self.host.wait_up(
- self.host.HOURS_TO_WAIT_FOR_RECOVERY * 3600)
- logging.debug('Unexpected final status message from '
- 'client %s: %s', self.host.hostname, last)
- # The line 'last' may have sensitive phrases, like
- # 'END GOOD', which breaks the tko parser. So the error
- # message will exclude it, since it will be recorded to
- # status.log.
- msg = ("Aborting - unexpected final status message from "
- "client on %s\n") % self.host.hostname
- raise error.AutotestRunError(msg)
+ # give the client machine a chance to recover from a crash
+ self.host.wait_up(
+ self.host.HOURS_TO_WAIT_FOR_RECOVERY * 3600)
+ logging.debug('Unexpected final status message from '
+ 'client %s: %s', self.host.hostname, last)
+ # The line 'last' may have sensitive phrases, like
+ # 'END GOOD', which breaks the tko parser. So the error
+ # message will exclude it, since it will be recorded to
+ # status.log.
+ msg = ("Aborting - unexpected final status message from "
+ "client on %s\n") % self.host.hostname
+ raise error.AutotestRunError(msg)
finally:
logging.debug('Autotest job finishes running. Below is the '
'post-processing operations.')
logger.close()
- if not self.background:
- collector.collect_client_job_results()
- collector.remove_redundant_client_logs()
- state_file = os.path.basename(self.remote_control_file
- + '.state')
- state_path = os.path.join(self.results_dir, state_file)
- self.host.job.postprocess_client_state(state_path)
- self.host.job.remove_client_log(hostname, remote_results,
- local_results)
- job_record_context.restore()
+ collector.collect_client_job_results()
+ collector.remove_redundant_client_logs()
+ state_file = os.path.basename(self.remote_control_file
+ + '.state')
+ state_path = os.path.join(self.results_dir, state_file)
+ self.host.job.postprocess_client_state(state_path)
+ self.host.job.remove_client_log(hostname, remote_results,
+ local_results)
+ job_record_context.restore()
logging.debug('Autotest job finishes.')
diff --git a/server/autotest_unittest.py b/server/autotest_unittest.py
index b62a027..6fb5582 100755
--- a/server/autotest_unittest.py
+++ b/server/autotest_unittest.py
@@ -180,7 +180,7 @@
self.base_autotest.install.expect_call(self.host, use_packaging=True)
self.host.wait_up.expect_call(timeout=30)
os.path.abspath.expect_call('.').and_return('.')
- run_obj = autotest._Run.expect_new(self.host, '.', None, False, False)
+ run_obj = autotest._Run.expect_new(self.host, '.', None, False)
tag = None
run_obj.manual_control_file = os.path.join('autodir',
'control.%s' % tag)
@@ -189,7 +189,6 @@
run_obj.tag = tag
run_obj.autodir = 'autodir'
run_obj.verify_machine.expect_call()
- run_obj.background = False
debug = os.path.join('.', 'debug')
os.makedirs.expect_call(debug)
delete_file_list = [run_obj.remote_control_file,
diff --git a/server/frontend.py b/server/frontend.py
index c0679d5..452d3bf 100644
--- a/server/frontend.py
+++ b/server/frontend.py
@@ -117,17 +117,6 @@
print message
-class Planner(RpcClient):
- def __init__(self, user=None, server=None, print_log=True, debug=False,
- reply_debug=False):
- super(Planner, self).__init__(path='/planner/server/rpc/',
- user=user,
- server=server,
- print_log=print_log,
- debug=debug,
- reply_debug=reply_debug)
-
-
class TKO(RpcClient):
def __init__(self, user=None, server=None, print_log=True, debug=False,
reply_debug=False):
diff --git a/server/hosts/cros_repair.py b/server/hosts/cros_repair.py
index 1847455..a3b2991 100644
--- a/server/hosts/cros_repair.py
+++ b/server/hosts/cros_repair.py
@@ -384,6 +384,21 @@
return 'Reset the DUT via servo'
+class CrosRebootRepair(repair.RebootRepair):
+ """Repair a CrOS target by clearing dev mode and rebooting it."""
+
+ def repair(self, host):
+ # N.B. We need to reboot regardless of whether set_gbb_flags
+ # succeeds or fails.
+ host.run('/usr/share/vboot/bin/set_gbb_flags.sh 0',
+ ignore_status=True)
+ super(CrosRebootRepair, self).repair(host)
+
+ @property
+ def description(self):
+ return 'Reset GBB flags and Reboot the host'
+
+
class AutoUpdateRepair(hosts.RepairAction):
"""
Repair by re-installing a test image using autoupdate.
@@ -487,7 +502,7 @@
# firmware.
(FirmwareRepair, 'firmware', (), ('ssh', 'fwstatus', 'good_au',)),
- (repair.RebootRepair, 'reboot', ('ssh',), ('devmode', 'writable',)),
+ (CrosRebootRepair, 'reboot', ('ssh',), ('devmode', 'writable',)),
)
return repair_actions
diff --git a/server/hosts/cros_repair_unittest.py b/server/hosts/cros_repair_unittest.py
index 3a5bcd9..a0c6280 100755
--- a/server/hosts/cros_repair_unittest.py
+++ b/server/hosts/cros_repair_unittest.py
@@ -31,7 +31,8 @@
(cros_repair.ServoResetRepair, 'servoreset', (), ('ssh',)),
(cros_firmware.FirmwareRepair,
'firmware', (), ('ssh', 'fwstatus', 'good_au')),
- (repair.RebootRepair, 'reboot', ('ssh',), ('devmode', 'writable',)),
+ (cros_repair.CrosRebootRepair,
+ 'reboot', ('ssh',), ('devmode', 'writable',)),
(cros_repair.AutoUpdateRepair,
'au',
('ssh', 'writable', 'tpm', 'good_au', 'ext4'),
@@ -82,7 +83,8 @@
(cros_repair.ServoResetRepair, 'servoreset', (), ('ssh',)),
(cros_firmware.FirmwareRepair,
'firmware', (), ('ssh', 'fwstatus', 'good_au')),
- (repair.RebootRepair, 'reboot', ('ssh',), ('devmode', 'writable',)),
+ (cros_repair.CrosRebootRepair,
+ 'reboot', ('ssh',), ('devmode', 'writable',)),
(cros_repair.JetstreamRepair,
'jetstream_repair',
('ssh', 'writable', 'tpm', 'good_au', 'ext4'),
diff --git a/server/hosts/moblab_host.py b/server/hosts/moblab_host.py
index 6fb2a04..aa3468f 100644
--- a/server/hosts/moblab_host.py
+++ b/server/hosts/moblab_host.py
@@ -224,6 +224,10 @@
dut_hostname = match.group('ip')
if dut_hostname in existing_hosts:
break
+ # SSP package ip's start at 150 for the moblab, so it is not
+ # a DUT
+ if int(dut_hostname.split('.')[-1]) > 150:
+ break
self.add_dut(dut_hostname)
diff --git a/server/hosts/repair.py b/server/hosts/repair.py
index 667380e..64a16cd 100644
--- a/server/hosts/repair.py
+++ b/server/hosts/repair.py
@@ -65,13 +65,12 @@
"""Repair a target device by rebooting it."""
def repair(self, host):
- host.run('/usr/share/vboot/bin/set_gbb_flags.sh 0')
host.reboot()
@property
def description(self):
- return 'Reset GBB flags and Reboot the host'
+ return 'Reboot the host'
class RPMCycleRepair(hosts.RepairAction):
diff --git a/server/site_tests/autoupdate_EndToEndTest/autoupdate_EndToEndTest.py b/server/site_tests/autoupdate_EndToEndTest/autoupdate_EndToEndTest.py
index 2d6299e..6c06637 100755
--- a/server/site_tests/autoupdate_EndToEndTest/autoupdate_EndToEndTest.py
+++ b/server/site_tests/autoupdate_EndToEndTest/autoupdate_EndToEndTest.py
@@ -29,6 +29,9 @@
return ('%s%s\n%s\n%s%s' %
(start, snip[len(start):], text, end, snip[len(end):]))
+UPDATE_ENGINE_PERF_PATH = '/mnt/stateful_partition/unencrypted/preserve'
+UPDATE_ENGINE_PERF_SCRIPT = 'update_engine_performance_monitor.py'
+UPDATE_ENGINE_PERF_RESULTS_FILE = 'perf_data_results.json'
# Update event types.
EVENT_TYPE_DOWNLOAD_COMPLETE = '1'
@@ -449,9 +452,10 @@
raise NotImplementedError
- def stop_update_perf(self):
+ def stop_update_perf(self, resultdir):
"""Stops performance monitoring and returns data (if available).
+ @param resultdir: Directory containing test result files.
@return Dictionary containing performance attributes.
"""
raise NotImplementedError
@@ -712,45 +716,6 @@
client_at.run_test('login_LoginSuccess', tag=tag)
- def _start_perf_mon(self, bindir):
- """Starts monitoring performance and resource usage on a DUT.
-
- Call _stop_perf_mon() with the returned PID to stop monitoring
- and collect the results.
-
- @param bindir: Directoy containing monitoring script.
-
- @return The PID of the newly created DUT monitoring process.
- """
- # We can't assume much about the source image so we copy the
- # performance monitoring script to the DUT directly.
- path = os.path.join(bindir, 'update_engine_performance_monitor.py')
- self._host.send_file(path, '/tmp')
- cmd = 'python /tmp/update_engine_performance_monitor.py --start-bg'
- return int(self._host.run(cmd).stdout)
-
-
- def _stop_perf_mon(self, perf_mon_pid):
- """Stops monitoring performance and resource usage on a DUT.
-
- @param perf_mon_pid: the PID returned from _start_perf_mon().
-
- @return Dictionary containing performance attributes, or None if
- unavailable.
- """
- # Gracefully handle problems with performance monitoring by
- # just returning None.
- try:
- cmd = ('python /tmp/update_engine_performance_monitor.py '
- '--stop-bg=%d') % perf_mon_pid
- perf_json_txt = self._host.run(cmd).stdout
- return json.loads(perf_json_txt)
- except Exception as e:
- logging.warning('Failed to parse output from '
- 'update_engine_performance_monitor.py: %s', e)
- return None
-
-
# Interface overrides.
#
def initialize(self, autotest_devserver, results_dir):
@@ -781,17 +746,27 @@
def start_update_perf(self, bindir):
- if self._perf_mon_pid is None:
- self._perf_mon_pid = self._start_perf_mon(bindir)
+ """Copy performance monitoring script to DUT.
+
+ The updater will kick off the script during the update.
+ """
+ path = os.path.join(bindir, UPDATE_ENGINE_PERF_SCRIPT)
+ self._host.send_file(path, UPDATE_ENGINE_PERF_PATH)
- def stop_update_perf(self):
- perf_data = None
- if self._perf_mon_pid is not None:
- perf_data = self._stop_perf_mon(self._perf_mon_pid)
- self._perf_mon_pid = None
-
- return perf_data
+ def stop_update_perf(self, resultdir):
+ """ Copy the performance metrics back from the DUT."""
+ try:
+ path = os.path.join('/var/log', UPDATE_ENGINE_PERF_RESULTS_FILE)
+ self._host.get_file(path, resultdir)
+ self._host.run('rm %s' % path)
+ script = os.path.join(UPDATE_ENGINE_PERF_PATH,
+ UPDATE_ENGINE_PERF_SCRIPT)
+ self._host.run('rm %s' % script)
+ return os.path.join(resultdir, UPDATE_ENGINE_PERF_RESULTS_FILE)
+ except:
+ logging.debug('Failed to copy performance metrics from DUT.')
+ return None
def trigger_update(self, target_payload_uri):
@@ -849,12 +824,6 @@
e.g. 'localhost:8080/static/my_file.gz'. These are usually
given after staging an artifact using a autotest_devserver
though they can be re-created given enough assumptions.
- *update_url's: Urls refering to the update RPC on a given omaha devserver.
- Since we always use an instantiated omaha devserver to run
- updates, these will always reference an existing instance
- of an omaha devserver that we just created for the purposes
- of updating.
-
"""
version = 1
@@ -918,14 +887,22 @@
pass
- def _report_perf_data(self, perf_data):
+ def _report_perf_data(self, perf_file):
"""Reports performance and resource data.
Currently, performance attributes are expected to include 'rss_peak'
(peak memory usage in bytes).
- @param perf_data: A dictionary containing performance attributes.
+ @param perf_file: A file with performance metrics.
"""
+ logging.debug('Reading perf results from %s.' % perf_file)
+ try:
+ with open(perf_file, 'r') as perf_file_handle:
+ perf_data = json.loads(perf_file_handle.read())
+ except Exception as e:
+ logging.warning('Error while reading the perf data file: %s' % e)
+ return
+
rss_peak = perf_data.get('rss_peak')
if rss_peak:
rss_peak_kib = rss_peak / 1024
@@ -935,8 +912,8 @@
units='KiB',
higher_is_better=False)
else:
- logging.warning('No rss_peak key in JSON returned by '
- 'update_engine_performance_monitor.py')
+ logging.warning('No rss_peak key in JSON returned by %s',
+ UPDATE_ENGINE_PERF_SCRIPT)
def _error_initial_check(self, expected, actual, mismatched_attrs):
@@ -1063,10 +1040,7 @@
source_release = test_conf['source_release']
target_release = test_conf['target_release']
- # TODO(dhaddock): Reuse update_engine_performance_monitor
- # script with chromite autoupdater. Can't use it here anymore because
- # the DUT is restarted a bunch of times during the update and the
- # process is killed before we can get the results back.
+ test_platform.start_update_perf(self.bindir)
try:
# Update the DUT to the target image.
pid = test_platform.trigger_update(test_conf['target_payload_uri'])
@@ -1128,6 +1102,10 @@
logging.fatal('ERROR: Failure occurred during the target update.')
raise
+ perf_file = test_platform.stop_update_perf(self.job.resultdir)
+ if perf_file is not None:
+ self._report_perf_data(perf_file)
+
if test_platform.oobe_triggers_update():
# If DUT automatically checks for update during OOBE,
# checking the post-update CrOS version and slot is sufficient.
diff --git a/server/site_tests/enterprise_CFM_USBPeripheralRebootStress/enterprise_CFM_USBPeripheralRebootStress.py b/server/site_tests/enterprise_CFM_USBPeripheralRebootStress/enterprise_CFM_USBPeripheralRebootStress.py
index 37390b8..f889fb3 100644
--- a/server/site_tests/enterprise_CFM_USBPeripheralRebootStress/enterprise_CFM_USBPeripheralRebootStress.py
+++ b/server/site_tests/enterprise_CFM_USBPeripheralRebootStress/enterprise_CFM_USBPeripheralRebootStress.py
@@ -96,16 +96,16 @@
if self.client.servo:
self.client.servo.switch_usbkey('dut')
self.client.servo.set('usb_mux_sel3', 'dut_sees_usbkey')
- time.sleep(_SHORT_TIMEOUT)
+ time.sleep(SHORT_TIMEOUT)
self.client.servo.set('dut_hub1_rst1', 'off')
- time.sleep(_SHORT_TIMEOUT)
+ time.sleep(SHORT_TIMEOUT)
try:
self.cfm_facade.enroll_device()
self.cfm_facade.restart_chrome_for_cfm()
self.cfm_facade.wait_for_telemetry_commands()
if not self.cfm_facade.is_oobe_start_page():
- self.cfm_facade.wait_for_oobe_start_page()
+ self.cfm_facade.wait_for_oobe_start_page()
self.cfm_facade.skip_oobe_screen()
except Exception as e:
raise error.TestFail(str(e))
@@ -135,4 +135,4 @@
if repeat == 0:
break
- tpm_utils.ClearTPMOwnerRequest(self.client)
\ No newline at end of file
+ tpm_utils.ClearTPMOwnerRequest(self.client)
diff --git a/site_utils/merge_checksum.sh b/site_utils/merge_checksum.sh
deleted file mode 100755
index e27ca66..0000000
--- a/site_utils/merge_checksum.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/bash
-
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# This script takes a checksum file and merges it into the packages
-# checksum file in ../packages/packages.checksum.
-
-# This script is thread-safe.
-
-set -e
-
-function main () {
- local merge_file="$1"
- local packages_dir="$(dirname $0)/../packages"
- local checksum_file="${packages_dir}/packages.checksum"
-
- # Preparatory work.
- mkdir -p "${packages_dir}"
- touch ${checksum_file}
-
- if [ ! -f "${merge_file}" ]; then
- return
- fi
-
- # This operation is performed using an flock on the packages dir
- # to allow it to run concurrently.
- flock "${packages_dir}" \
- -c "sort -k2,2 -u ${merge_file} ${checksum_file} -o ${checksum_file}"
-}
-
-if [ $# != 1 ]; then
- echo "Not enough arguments."
- exit 1
-fi
-
-main $1
diff --git a/site_utils/run_suite.py b/site_utils/run_suite.py
index 11130d5..1f6c24f 100755
--- a/site_utils/run_suite.py
+++ b/site_utils/run_suite.py
@@ -444,16 +444,6 @@
_LOG_LINK_PREFIX = 'Test-Logs'
- @classmethod
- def get_bug_link(cls, bug_id):
- """Generate a bug link for the given bug_id.
-
- @param bug_id: The id of the bug.
- @return: A link, eg: https://crbug.com/<bug_id>.
- """
- return reporting_utils.link_crbug(bug_id)
-
-
def __init__(self, anchor, server, job_string, bug_info=None, reason=None,
retry_count=0, testname=None):
"""Initialize the LogLink by generating the log URL.
@@ -503,50 +493,58 @@
"""Generate a link formatted to meet buildbot expectations.
If there is a bug associated with this link, report a link to the bug
- and a link to the job logs;
- otherwise report a link to the job logs.
+ and a link to the job logs; otherwise report a link to the job logs.
- @return A list of links formatted for the buildbot log annotator.
+ @return A generator of links formatted for the buildbot log annotator.
"""
- bug_info_strings = []
- info_strings = []
+ if self.bug_url:
+ yield self._get_link_to_bug()
+ yield self._get_link_to_job_logs()
+
+ def _get_link_to_bug(self):
+ """Return buildbot link to bug.
+
+ @return A link formatted for the buildbot log annotator.
+ """
+ info_strings = self._get_info_strings()
+ info_strings.append(self._bug_count_text)
+ anchor_text = self._format_anchor_text(self._BUG_LINK_PREFIX,
+ info_strings)
+ return annotations.StepLink(anchor_text, self.bug_url)
+
+
+ def _get_link_to_job_logs(self):
+ """Return buildbot link to job logs.
+
+ @return A link formatted for the buildbot log annotator.
+ """
+ anchor_text = self._format_anchor_text(self._LOG_LINK_PREFIX,
+ self._get_info_strings())
+ return annotations.StepLink(anchor_text, self.url)
+
+
+ def _get_info_strings(self):
+ """Return a list of info strings for _format_anchor_text()."""
+ info_strings = []
if self.retry_count > 0:
info_strings.append('retry_count: %d' % self.retry_count)
- bug_info_strings.append('retry_count: %d' % self.retry_count)
-
if self.reason:
- bug_info_strings.append(self.reason)
info_strings.append(self.reason)
-
- # Add the bug link to buildbot_links
- if self.bug_url:
- bug_info_strings.append(self._bug_count_text)
-
- bug_anchor_text = self._format_anchor_text(self._BUG_LINK_PREFIX,
- bug_info_strings)
-
- yield annotations.StepLink(bug_anchor_text, self.bug_url)
-
- anchor_text = self._format_anchor_text(self._LOG_LINK_PREFIX,
- info_strings)
- yield annotations.StepLink(anchor_text, self.url)
+ return info_strings
def _format_anchor_text(self, prefix, info_strings):
"""Format anchor text given a prefix and info strings.
@param prefix The prefix of the anchor text.
- @param info_strings The infos presented in the anchor text.
+ @param info_strings Iterable of strings.
@return A anchor_text with the right prefix and info strings.
"""
- anchor_text = '[{prefix}]: {anchor}'.format(
+ return '[{prefix}]: {anchor}: {info}'.format(
prefix=prefix,
- anchor=self.anchor.strip())
- if info_strings:
- info_text = ', '.join(info_strings)
- anchor_text += ': ' + info_text
- return anchor_text
+ anchor=self.anchor.strip(),
+ info=', '.join(info_strings))
@property
def text_link(self):