Autotest: Change logging.warn() to logging.warning().
logging.warn() is deprecated. See
http://bugs.python.org/issue13235
Substitution was performed via
~/cros/src/third_party/autotest/files$ find ./ -type f | xargs sed -i 's/logging.warn(/logging.warning(/'
BUG=None.
TEST=There should be one-- and preferably only one --obvious way to do it.
Change-Id: Ie5665743121a49f7fbd5d1f47896a7c65e87e489
Reviewed-on: https://chromium-review.googlesource.com/198793
Commit-Queue: Ilja Friedel <[email protected]>
Tested-by: Ilja Friedel <[email protected]>
Reviewed-by: Alex Miller <[email protected]>
diff --git a/server/bin/unit_test_server.py b/server/bin/unit_test_server.py
index a3ab1e4..6da893c 100644
--- a/server/bin/unit_test_server.py
+++ b/server/bin/unit_test_server.py
@@ -54,7 +54,7 @@
if os.path.exists(src):
shutil.copy(src, dst)
else:
- logging.warn('Unable to locate %s' % src)
+ logging.warning('Unable to locate %s' % src)
# Append the coverage report
self.write_perf_keyval(filtered)
diff --git a/server/cros/chameleon/chameleon_test.py b/server/cros/chameleon/chameleon_test.py
index 9467751..d57ca65 100644
--- a/server/cros/chameleon/chameleon_test.py
+++ b/server/cros/chameleon/chameleon_test.py
@@ -102,7 +102,7 @@
if self.chameleon.is_healthy():
logging.info('Chameleon is healthy.')
else:
- logging.warn('Chameleon is not recovered after repair.')
+ logging.warning('Chameleon is not recovered after repair.')
# Unplug the Chameleon port, not to affect other test cases.
if hasattr(self, 'chameleon_port') and self.chameleon_port:
@@ -191,7 +191,7 @@
else:
message += (', within the acceptable range %d' %
total_wrong_pixels_margin)
- logging.warn(message)
+ logging.warning(message)
else:
logging.info('Result of %s: all pixels match', tag)
for file_path in (chameleon_path, dut_path):
diff --git a/server/cros/dynamic_suite/job_status.py b/server/cros/dynamic_suite/job_status.py
index 3fb52fa..51ff415 100644
--- a/server/cros/dynamic_suite/job_status.py
+++ b/server/cros/dynamic_suite/job_status.py
@@ -76,7 +76,7 @@
HqeIntStatus.get_value(HqeIntStatus.RUNNING)):
hosts.append(None)
elif not e['host']:
- logging.warn('Job %s (%s) has an entry with no host!',
+ logging.warning('Job %s (%s) has an entry with no host!',
job.name, job.id)
hosts.append(None)
else:
@@ -503,7 +503,7 @@
success = group.enough_hosts_succeeded()
if success:
for failure in failures:
- logging.warn("%s failed to reimage.", failure.test_name)
+ logging.warning("%s failed to reimage.", failure.test_name)
failure.override_status('WARN')
failure.record_all(record_entry)
else:
diff --git a/server/cros/dynamic_suite/suite.py b/server/cros/dynamic_suite/suite.py
index f0f0b27..aef45c0 100644
--- a/server/cros/dynamic_suite/suite.py
+++ b/server/cros/dynamic_suite/suite.py
@@ -759,7 +759,7 @@
results_generator = job_status.wait_for_child_results(
self._afe, self._tko, self._suite_job_id)
else:
- logging.warn('Unknown suite_job_id, falling back to less '
+ logging.warning('Unknown suite_job_id, falling back to less '
'efficient results_generator.')
results_generator = job_status.wait_for_results(self._afe,
self._tko,
@@ -909,7 +909,7 @@
if not forgiving_parser:
msg = "Failed parsing %s\n%s" % (file, e)
raise control_data.ControlVariableException(msg)
- logging.warn("Skipping %s\n%s", file, e)
+ logging.warning("Skipping %s\n%s", file, e)
except Exception, e:
logging.error("Bad %s\n%s", file, e)
logging.debug('Parsed %s control files.', parsed_count)
diff --git a/server/cros/faft/faft_classes.py b/server/cros/faft/faft_classes.py
index 5369ed6..cc2b00d 100644
--- a/server/cros/faft/faft_classes.py
+++ b/server/cros/faft/faft_classes.py
@@ -76,7 +76,7 @@
# commands at the fw screens.
if not self._client.ping_wait_down(timeout):
if orig_boot_id and self._client.get_boot_id() != orig_boot_id:
- logging.warn('Reboot done very quickly.')
+ logging.warning('Reboot done very quickly.')
return
raise ConnectionError()
@@ -333,7 +333,7 @@
self.wait_for_client()
return
except ConnectionError:
- logging.warn('Cold reboot doesn\'t help, still connection error.')
+ logging.warning('Cold reboot doesn\'t help, still connection error.')
# DUT may be broken by a corrupted firmware. Restore firmware.
# We assume the recovery boot still works fine. Since the recovery
@@ -347,7 +347,7 @@
self.restore_firmware()
return
except ConnectionError:
- logging.warn('Restoring firmware doesn\'t help, still '
+ logging.warning('Restoring firmware doesn\'t help, still '
'connection error.')
# Perhaps it's kernel that's broken. Let's try restoring it.
@@ -359,7 +359,7 @@
self.restore_kernel()
return
except ConnectionError:
- logging.warn('Restoring kernel doesn\'t help, still '
+ logging.warning('Restoring kernel doesn\'t help, still '
'connection error.')
# DUT may be broken by a corrupted OS image. Restore OS image.
@@ -374,7 +374,7 @@
logging.info('Successfully restore OS image.')
return
except ConnectionError:
- logging.warn('Restoring OS image doesn\'t help, still connection '
+ logging.warning('Restoring OS image doesn\'t help, still connection '
'error.')
def _ensure_client_in_recovery(self):
@@ -725,7 +725,7 @@
"""
if not self.faft_config.chrome_ec:
if not suppress_warning:
- logging.warn('Requires Chrome EC to run this test.')
+ logging.warning('Requires Chrome EC to run this test.')
return False
if not required_cap:
@@ -734,7 +734,7 @@
for cap in required_cap:
if cap not in self.faft_config.ec_capability:
if not suppress_warning:
- logging.warn('Requires EC capability "%s" to run this '
+ logging.warning('Requires EC capability "%s" to run this '
'test.', cap)
return False
@@ -1002,7 +1002,7 @@
self.ec_uart_file = os.path.join(self.resultsdir, 'ec_uart.txt')
except error.TestFail as e:
if 'No control named' in str(e):
- logging.warn('The servod is too old that ec_uart_capture '
+ logging.warning('The servod is too old that ec_uart_capture '
'not supported.')
else:
logging.info('Not a Google EC, cannot capture ec console output.')
@@ -1041,7 +1041,7 @@
if servo_log:
self.servo_log_original_len = len(servo_log)
else:
- logging.warn('Servo log file not found.')
+ logging.warning('Servo log file not found.')
def record_servo_log(self):
"""Record the servo log to the results directory."""
diff --git a/server/cros/servo/firmware_programmer.py b/server/cros/servo/firmware_programmer.py
index 542aa88..f9c5c32 100644
--- a/server/cros/servo/firmware_programmer.py
+++ b/server/cros/servo/firmware_programmer.py
@@ -214,7 +214,7 @@
'zgb', 'mario', 'squawks'):
_bios_prog = FlashromProgrammer(servo)
else:
- logging.warn('No BIOS programmer found for board: %s', _board)
+ logging.warning('No BIOS programmer found for board: %s', _board)
return _bios_prog
@@ -237,7 +237,7 @@
'squawks'):
_ec_prog = FlashECProgrammer(servo)
else:
- logging.warn('No EC programmer found for board: %s', _board)
+ logging.warning('No EC programmer found for board: %s', _board)
return _ec_prog
diff --git a/server/cros/servo/servo.py b/server/cros/servo/servo.py
index 22d8d7b..09c2c5d 100644
--- a/server/cros/servo/servo.py
+++ b/server/cros/servo/servo.py
@@ -90,7 +90,7 @@
if self.get_version() == "servo_v2":
self._programmer = firmware_programmer.ProgrammerV2(self)
else:
- logging.warn("No firmware programmer for servo version: %s",
+ logging.warning("No firmware programmer for servo version: %s",
self.get_version())
@@ -353,7 +353,7 @@
self.set_nocheck(gpio_name, gpio_value)
retry_count = Servo.GET_RETRY_MAX
while gpio_value != self.get(gpio_name) and retry_count:
- logging.warn("%s != %s, retry %d", gpio_name, gpio_value,
+ logging.warning("%s != %s, retry %d", gpio_name, gpio_value,
retry_count)
retry_count -= 1
time.sleep(Servo.SHORT_DELAY)
diff --git a/server/hosts/abstract_ssh.py b/server/hosts/abstract_ssh.py
index a598ee5..0201082 100644
--- a/server/hosts/abstract_ssh.py
+++ b/server/hosts/abstract_ssh.py
@@ -61,7 +61,7 @@
# don't try to use it for any future file transfers.
self._use_rsync = self._check_rsync()
if not self._use_rsync:
- logging.warn("rsync not available on remote host %s -- disabled",
+ logging.warning("rsync not available on remote host %s -- disabled",
self.hostname)
return self._use_rsync
@@ -280,7 +280,7 @@
utils.run(rsync)
try_scp = False
except error.CmdError, e:
- logging.warn("trying scp, rsync failed: %s", e)
+ logging.warning("trying scp, rsync failed: %s", e)
if try_scp:
logging.debug('Trying scp.')
@@ -360,7 +360,7 @@
utils.run(rsync)
try_scp = False
except error.CmdError, e:
- logging.warn("trying scp, rsync failed: %s", e)
+ logging.warning("trying scp, rsync failed: %s", e)
if try_scp:
logging.debug('Trying scp.')
@@ -683,7 +683,7 @@
break
time.sleep(.2)
else:
- logging.warn('Timed out waiting for master-ssh connection '
+ logging.warning('Timed out waiting for master-ssh connection '
'to be established.')
diff --git a/server/hosts/cros_host.py b/server/hosts/cros_host.py
index 892be7e..5ee91da 100644
--- a/server/hosts/cros_host.py
+++ b/server/hosts/cros_host.py
@@ -668,7 +668,7 @@
logging.info('DUT is updated with stateful update.')
except Exception as e:
logging.exception(e)
- logging.warn('Failed to stateful update DUT, force to update.')
+ logging.warning('Failed to stateful update DUT, force to update.')
inactive_kernel = None
# Do a full update if stateful update is not applicable or failed.
@@ -1008,7 +1008,7 @@
stats.Counter(
'%s.%s.RepairNA' % (repair_func.__name__,
board)).increment()
- logging.warn('Repair function NA: %s', e)
+ logging.warning('Repair function NA: %s', e)
errors.append(str(e))
except Exception as e:
stats.Counter(
@@ -1017,7 +1017,7 @@
stats.Counter(
'%s.%s.FAILED' % (repair_func.__name__,
board)).increment()
- logging.warn('Failed to repair device: %s', e)
+ logging.warning('Failed to repair device: %s', e)
errors.append(str(e))
stats.Counter('Full_Repair_Failed').increment()
@@ -1091,7 +1091,7 @@
self._restart_ui()
except (error.AutotestRunError, error.AutoservRunError,
FactoryImageCheckerException):
- logging.warn('Unable to restart ui, rebooting device.')
+ logging.warning('Unable to restart ui, rebooting device.')
# Since restarting the UI fails fall back to normal Autotest
# cleanup routines, i.e. reboot the machine.
super(CrosHost, self).cleanup()
diff --git a/server/hosts/logfile_monitor.py b/server/hosts/logfile_monitor.py
index e5990f3..a295f57 100644
--- a/server/hosts/logfile_monitor.py
+++ b/server/hosts/logfile_monitor.py
@@ -210,7 +210,7 @@
missing = follow_paths_set.difference(existing)
if missing:
# Log warning that we are missing expected remote paths.
- logging.warn('Target %s is missing expected remote paths: %s',
+ logging.warning('Target %s is missing expected remote paths: %s',
self.hostname, ', '.join(missing))
# If none of them exist just return (for now).
@@ -242,7 +242,7 @@
try:
patterns_path = resolve_patterns_path(patterns_path)
except InvalidPatternsPathError, e:
- logging.warn('Specified patterns_path is invalid: %s, %s',
+ logging.warning('Specified patterns_path is invalid: %s, %s',
patterns_path, str(e))
else:
sane_pattern_paths.append(patterns_path)
diff --git a/server/hosts/paramiko_host.py b/server/hosts/paramiko_host.py
index 52f7d71..3a34cc3 100644
--- a/server/hosts/paramiko_host.py
+++ b/server/hosts/paramiko_host.py
@@ -130,7 +130,7 @@
transport.close()
raise paramiko.AuthenticationException()
return transport
- logging.warn("SSH negotiation (%s:%d) timed out, retrying",
+ logging.warning("SSH negotiation (%s:%d) timed out, retrying",
self.hostname, self.port)
# HACK: we can't count on transport.join not hanging now, either
transport.join = lambda: None
@@ -175,7 +175,7 @@
try:
channel = self.transport.open_session()
except (socket.error, paramiko.SSHException, EOFError), e:
- logging.warn("Exception occured while opening session: %s", e)
+ logging.warning("Exception occured while opening session: %s", e)
if time.time() - start_time >= timeout:
raise error.AutoservSSHTimeout("ssh failed: %s" % e)
@@ -221,7 +221,7 @@
sent = channel.send(stdin[:cls.BUFFSIZE])
if not sent:
- logging.warn('Could not send a single stdin byte.')
+ logging.warning('Could not send a single stdin byte.')
else:
stdin = stdin[sent:]
if not stdin:
@@ -306,7 +306,7 @@
msg = "ssh connection unexpectedly terminated"
raise error.AutoservRunError(msg, result)
if timed_out:
- logging.warn('Paramiko command timed out after %s sec: %s', timeout,
+ logging.warning('Paramiko command timed out after %s sec: %s', timeout,
command)
if not ignore_timeout:
raise error.AutoservRunError("command timed out", result)
diff --git a/server/hosts/servo_host.py b/server/hosts/servo_host.py
index 3a5fc3f..938e2db 100644
--- a/server/hosts/servo_host.py
+++ b/server/hosts/servo_host.py
@@ -532,7 +532,7 @@
"""
if not self.is_in_lab():
- logging.warn('Skip repairing servo host %s: Not a lab device.',
+ logging.warning('Skip repairing servo host %s: Not a lab device.',
self.hostname)
return
logging.info('Attempting to repair servo host %s.', self.hostname)
@@ -550,11 +550,11 @@
stats.Counter(counter_prefix + 'SUCCEEDED').increment()
return
except ServoHostRepairMethodNA as e:
- logging.warn('Repair method NA: %s', e)
+ logging.warning('Repair method NA: %s', e)
stats.Counter(counter_prefix + 'RepairNA').increment()
errors.append(str(e))
except Exception as e:
- logging.warn('Failed to repair servo: %s', e)
+ logging.warning('Failed to repair servo: %s', e)
stats.Counter(counter_prefix + 'FAILED').increment()
errors.append(str(e))
stats.Counter('servo_host_repair.Full_Repair_Failed').increment()
diff --git a/server/hosts/sonic_host.py b/server/hosts/sonic_host.py
index c2d0c58..701799e 100644
--- a/server/hosts/sonic_host.py
+++ b/server/hosts/sonic_host.py
@@ -207,7 +207,7 @@
self.run('rm -r /data/*')
self.run('rm -f /cache/*')
except (error.AutotestRunError, error.AutoservRunError) as e:
- logging.warn('Unable to remove /data and /cache %s', e)
+ logging.warning('Unable to remove /data and /cache %s', e)
super(SonicHost, self).cleanup()
diff --git a/server/server_job.py b/server/server_job.py
index 1936b01..2d5a93d 100644
--- a/server/server_job.py
+++ b/server/server_job.py
@@ -635,7 +635,7 @@
try:
shutil.rmtree(temp_control_file_dir)
except Exception, e:
- logging.warn('Could not remove temp directory %s: %s',
+ logging.warning('Could not remove temp directory %s: %s',
temp_control_file_dir, e)
if machines and (collect_crashdumps or collect_crashinfo):
diff --git a/server/site_crashcollect.py b/server/site_crashcollect.py
index 6cf5187..b2e0178 100644
--- a/server/site_crashcollect.py
+++ b/server/site_crashcollect.py
@@ -81,7 +81,7 @@
minidumps.append(minidump)
continue
except client_utils.error.CmdError as err:
- logging.warn('Failed to generate stack trace locally for '
+ logging.warning('Failed to generate stack trace locally for '
'dump %s (rc=%d):\n%r',
minidump, err.result_obj.exit_status, err)
@@ -92,7 +92,7 @@
logging.info('Generated stack trace for dump %s', minidump)
continue
except dev_server.DevServerException as e:
- logging.warn('Failed to generate stack trace on devserver for '
+ logging.warning('Failed to generate stack trace on devserver for '
'dump %s:\n%r', minidump, e)
return minidumps
diff --git a/server/site_tests/platform_BootPerfServer/platform_BootPerfServer.py b/server/site_tests/platform_BootPerfServer/platform_BootPerfServer.py
index 7cb1f6a..f394ece 100644
--- a/server/site_tests/platform_BootPerfServer/platform_BootPerfServer.py
+++ b/server/site_tests/platform_BootPerfServer/platform_BootPerfServer.py
@@ -84,7 +84,7 @@
server_results.close()
client_results.close()
else:
- logging.warn('Unable to locate %s', src)
+ logging.warning('Unable to locate %s', src)
# Upload perf keyvals in the client keyval file to perf dashboard.
if upload_perf:
diff --git a/server/site_utils.py b/server/site_utils.py
index 7a6ad28..f34c923 100644
--- a/server/site_utils.py
+++ b/server/site_utils.py
@@ -233,7 +233,7 @@
json_status = _get_lab_status(status_url)
if json_status is None:
# We go ahead and say the lab is open if we can't get the status.
- logging.warn('Could not get a status from %s', status_url)
+ logging.warning('Could not get a status from %s', status_url)
return
_decode_lab_status(json_status, build)
diff --git a/server/subcommand.py b/server/subcommand.py
index 8aa2d96..e9ba9c3 100644
--- a/server/subcommand.py
+++ b/server/subcommand.py
@@ -77,7 +77,7 @@
@returns None or a list of results/exceptions.
"""
if not arglist:
- logging.warn('parallel_simple was called with an empty arglist, '
+ logging.warning('parallel_simple was called with an empty arglist, '
'did you forget to pass in a list of machines?')
# Bypass the multithreading if only one machine.
if len(arglist) == 1: