This is the result of a batch reindent.py across our tree.
As Martin pointed out, we ought to be more careful and
create a pre-svn commit script to avoid inserting trash
in the tree, meanwhile, this is a good start to cleanup
things
Signed-off-by: Lucas Meneghel Rodrigues <[email protected]>
git-svn-id: http://test.kernel.org/svn/autotest/trunk@3487 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/cli/change_protection_level.py b/cli/change_protection_level.py
index 0807878..0e6598d 100755
--- a/cli/change_protection_level.py
+++ b/cli/change_protection_level.py
@@ -21,13 +21,13 @@
hosts = afe_proxy.run('get_hosts', hostname__in=leftover_args[1:])
for host in hosts:
- try:
- afe_proxy.run('modify_host', host['id'], protection=protection_level)
- except Exception, exc:
- print 'For host %s:', host['hostname']
- traceback.print_exc()
- else:
- print 'Host %s succeeded' % host['hostname']
+ try:
+ afe_proxy.run('modify_host', host['id'], protection=protection_level)
+ except Exception, exc:
+ print 'For host %s:', host['hostname']
+ traceback.print_exc()
+ else:
+ print 'Host %s succeeded' % host['hostname']
print 'Invalid hosts:'
print ','.join(set(leftover_args[1:]) - set(host['hostname'] for host in hosts))
diff --git a/cli/label_unittest.py b/cli/label_unittest.py
index 3b25777..817a340 100755
--- a/cli/label_unittest.py
+++ b/cli/label_unittest.py
@@ -109,7 +109,7 @@
{'name': 'label1', 'platform': False,
'only_if_needed': False},
False,
- '''ValidationError: {'name':
+ '''ValidationError: {'name':
'This value must be unique (label0)'}''')],
out_words_ok=['Created', 'label0'],
out_words_no=['label1'],
diff --git a/cli/threads.py b/cli/threads.py
index 999adb4..7738b0a 100755
--- a/cli/threads.py
+++ b/cli/threads.py
@@ -1,5 +1,5 @@
#
-# Copyright 2008 Google Inc.
+# Copyright 2008 Google Inc.
# Released under the GPLv2
import threading, Queue
diff --git a/cli/topic_common.py b/cli/topic_common.py
index 1e1e08c..2fbacc4 100755
--- a/cli/topic_common.py
+++ b/cli/topic_common.py
@@ -285,7 +285,7 @@
# Build a dictionary with the 'what_failed' as keys. The
# values are dictionaries with the errmsg as keys and a set
# of items as values.
- # self.failed =
+ # self.failed =
# {'Operation delete_host_failed': {'AclAccessViolation:
# set('host0', 'host1')}}
# Try to gather all the same error messages together,
diff --git a/cli/topic_common_unittest.py b/cli/topic_common_unittest.py
index 1021f80..5f4284e 100755
--- a/cli/topic_common_unittest.py
+++ b/cli/topic_common_unittest.py
@@ -252,7 +252,7 @@
class opt(object):
flist_obj = cli_mock.create_file('a b c\nd,e\nf\ng, \n, ,,')
flist = flist_obj.name
- inline = 'a b,c,d h, , ,, '
+ inline = 'a b,c,d h, , ,, '
self.__test_parsing_all_good(opt(), ['i','j,d'],
['a', 'b', 'c', 'd', 'e',
'f', 'g', 'h', 'i', 'j'])
diff --git a/client/bin/kernel.py b/client/bin/kernel.py
index 9ec59a9..b8b3a65 100755
--- a/client/bin/kernel.py
+++ b/client/bin/kernel.py
@@ -783,11 +783,11 @@
def rpm_kernel_vendor(job, rpm_package, subdir):
- vendor = utils.get_os_vendor()
- if vendor == "SUSE":
- return rpm_kernel_suse(job, rpm_package, subdir)
- else:
- return rpm_kernel(job, rpm_package, subdir)
+ vendor = utils.get_os_vendor()
+ if vendor == "SUSE":
+ return rpm_kernel_suse(job, rpm_package, subdir)
+ else:
+ return rpm_kernel(job, rpm_package, subdir)
# just make the preprocessor a nop
@@ -808,7 +808,7 @@
"""
kernel_paths = [preprocess_path(path)]
if kernel_paths[0].endswith('.list'):
- # Fetch the list of packages to install
+ # Fetch the list of packages to install
kernel_list = os.path.join(tmp_dir, 'kernel.list')
utils.get_file(kernel_paths[0], kernel_list)
kernel_paths = [p.strip() for p in open(kernel_list).readlines()]
diff --git a/client/bin/kernel_config.py b/client/bin/kernel_config.py
index 58ab3a3..4b6473b 100755
--- a/client/bin/kernel_config.py
+++ b/client/bin/kernel_config.py
@@ -129,12 +129,12 @@
utils.system('yes "" | make oldconfig > /dev/null')
if new_config:
shutil.copyfile(self.build_config, new_config)
-
+
def config_record(self, name):
#Copy the current .config file to the config.<name>[.<n>]
- i = 1
+ i = 1
to = self.config_dir + '/config.%s' % name
while os.path.exists(to):
i += 1
to = self.config_dir + '/config.%s.%d' % (name,i)
- shutil.copyfile(self.build_dir + '/.config', to)
+ shutil.copyfile(self.build_dir + '/.config', to)
diff --git a/client/bin/net/net_utils.py b/client/bin/net/net_utils.py
index c9343cf..a2b8d00 100755
--- a/client/bin/net/net_utils.py
+++ b/client/bin/net/net_utils.py
@@ -709,7 +709,7 @@
"""
packet_len = len(raw_frame)
if packet_len < ethernet.HDR_LEN:
- return None
+ return None
payload_len = packet_len - ethernet.HDR_LEN
frame = {}
diff --git a/client/bin/net/net_utils_unittest.py b/client/bin/net/net_utils_unittest.py
index 32a8cef..1c7e1ef 100755
--- a/client/bin/net/net_utils_unittest.py
+++ b/client/bin/net/net_utils_unittest.py
@@ -580,7 +580,7 @@
except error.TestError:
pass
else:
- self.assertEquals(0,1)
+ self.assertEquals(0,1)
self.god.check_playback()
# catch exception on bond enabled
@@ -590,7 +590,7 @@
except error.TestError:
pass
else:
- self.assertEquals(0,1)
+ self.assertEquals(0,1)
self.god.check_playback()
# check that setting tg3 and bnx2x driver have a sleep call
@@ -648,7 +648,7 @@
except error.TestError:
pass
else:
- self.assertEquals(0,1)
+ self.assertEquals(0,1)
self.god.check_playback()
# catch exception on phyint and mac failures
@@ -664,7 +664,7 @@
except error.TestError:
pass
else:
- self.assertEquals(0,1)
+ self.assertEquals(0,1)
self.god.check_playback()
@@ -677,7 +677,7 @@
except error.TestError:
pass
else:
- self.assertEquals(0,1)
+ self.assertEquals(0,1)
self.god.check_playback()
self.god.stub_function(net_utils.bonding, 'is_enabled')
diff --git a/client/bin/utils.py b/client/bin/utils.py
index 2261266..8b19d24 100755
--- a/client/bin/utils.py
+++ b/client/bin/utils.py
@@ -13,4 +13,3 @@
from autotest_lib.client.bin.base_utils import *
if os.path.exists(os.path.join(os.path.dirname(__file__), 'site_utils.py')):
from autotest_lib.client.bin.site_utils import *
-
diff --git a/client/common_lib/logging_config.py b/client/common_lib/logging_config.py
index 657a0d4..afe754a 100644
--- a/client/common_lib/logging_config.py
+++ b/client/common_lib/logging_config.py
@@ -6,16 +6,16 @@
logging.basicConfig(level=logging.DEBUG)
class AllowBelowSeverity(logging.Filter):
- """
- Allows only records less severe than a given level (the opposite of what
- the normal logging level filtering does.
- """
- def __init__(self, level):
- self.level = level
+ """
+ Allows only records less severe than a given level (the opposite of what
+ the normal logging level filtering does.
+ """
+ def __init__(self, level):
+ self.level = level
- def filter(self, record):
- return record.levelno < self.level
+ def filter(self, record):
+ return record.levelno < self.level
class LoggingConfig(object):
@@ -33,8 +33,8 @@
datefmt='%H:%M:%S')
def __init__(self):
- self.logger = logging.getLogger()
- self.global_level = logging.DEBUG
+ self.logger = logging.getLogger()
+ self.global_level = logging.DEBUG
@classmethod
diff --git a/client/common_lib/utils.py b/client/common_lib/utils.py
index a285732..a1161d1 100644
--- a/client/common_lib/utils.py
+++ b/client/common_lib/utils.py
@@ -978,8 +978,8 @@
pid_path = os.path.abspath(os.path.join(my_path, "../.."))
pidf = open(os.path.join(pid_path, "%s.pid" % program_name), "w")
if pidf:
- pidf.write("%s\n" % os.getpid())
- pidf.close()
+ pidf.write("%s\n" % os.getpid())
+ pidf.close()
def get_relative_path(path, reference):
diff --git a/client/profilers/lttng/lttng.py b/client/profilers/lttng/lttng.py
index bf1a2e7..8905b25 100755
--- a/client/profilers/lttng/lttng.py
+++ b/client/profilers/lttng/lttng.py
@@ -92,7 +92,7 @@
def start(self, test):
self.output = os.path.join(test.profdir, 'lttng')
- utils.system('%s -n test -d -l %s/ltt -t %s' %
+ utils.system('%s -n test -d -l %s/ltt -t %s' %
(self.lttctl, self.mountpoint, self.output))
diff --git a/client/profilers/sar/sar.py b/client/profilers/sar/sar.py
index 6b55a00..fbe0639 100644
--- a/client/profilers/sar/sar.py
+++ b/client/profilers/sar/sar.py
@@ -2,7 +2,7 @@
Sets up a subprocess to run sar from the sysstat suite
Default options:
-sar -A -f
+sar -A -f
"""
diff --git a/client/tests/cerberus/cerberus.py b/client/tests/cerberus/cerberus.py
index b1db5d2..828bcea 100644
--- a/client/tests/cerberus/cerberus.py
+++ b/client/tests/cerberus/cerberus.py
@@ -5,9 +5,9 @@
class cerberus(test.test):
"""
- This autotest module runs CTCS (Cerberus Test Control System). This test
- suite was developed for the now extinct VA Linux's manufacturing system
- it has several hardware and software stress tests that can be run in
+ This autotest module runs CTCS (Cerberus Test Control System). This test
+ suite was developed for the now extinct VA Linux's manufacturing system
+ it has several hardware and software stress tests that can be run in
parallel. It does have a control file system that allows testers to specify
the sorts of tests that they want to see executed. It's an excelent stress
test for hardware and kernel.
@@ -24,7 +24,7 @@
self.nfail = 0
- def setup(self, tarball='ctcs-1.3.1pre1.tar.bz2', length = '4h',
+ def setup(self, tarball='ctcs-1.3.1pre1.tar.bz2', length = '4h',
tcf_contents=None):
"""
Builds the test suite, and sets up the control file that is going to
@@ -39,7 +39,7 @@
utils.extract_tarball_to_dir(cerberus_tarball, self.srcdir)
os.chdir(self.srcdir)
- # Apply patch to fix build problems on newer distros (absence of
+ # Apply patch to fix build problems on newer distros (absence of
# asm/page.h include.
utils.system('patch -p1 < ../fix-ctcs-build.patch')
utils.system('make')
@@ -80,7 +80,7 @@
# After we are done with this iterations, we move the log files to
# the results dir
log_base_path = os.path.join(self.srcdir, 'log')
- log_dir = glob.glob(os.path.join(log_base_path,
+ log_dir = glob.glob(os.path.join(log_base_path,
'autotest.tcf.log.*'))[0]
logging.debug('Copying %s log directory to results dir', log_dir)
dst = os.path.join(self.resultsdir, os.path.basename(log_dir))
diff --git a/client/tests/dma_memtest/dma_memtest.py b/client/tests/dma_memtest/dma_memtest.py
index c35b545..189bde4 100644
--- a/client/tests/dma_memtest/dma_memtest.py
+++ b/client/tests/dma_memtest/dma_memtest.py
@@ -5,7 +5,7 @@
class dma_memtest(test.test):
"""
- A test for the memory subsystem against heavy IO and DMA operations,
+ A test for the memory subsystem against heavy IO and DMA operations,
implemented based on the work of Doug Leford
(http://people.redhat.com/dledford/memtest.shtml)
@@ -23,8 +23,8 @@
Downloads a copy of the linux kernel, calculate an estimated size of
the uncompressed tarball, use this value to calculate the number of
copies of the linux kernel that will be uncompressed.
-
- @param tarball_base: Name of the kernel tarball location that will
+
+ @param tarball_base: Name of the kernel tarball location that will
be looked up on the kernel.org mirrors.
@param parallel: If we are going to uncompress the copies of the
kernel in parallel or not
@@ -37,7 +37,7 @@
tarball_url = os.path.join(kernel_repo, tarball_base)
tarball_md5 = '296a6d150d260144639c3664d127d174'
logging.info('Downloading linux kernel tarball')
- self.tarball = utils.unmap_url_cache(self.cachedir, tarball_url,
+ self.tarball = utils.unmap_url_cache(self.cachedir, tarball_url,
tarball_md5)
size_tarball = os.path.getsize(self.tarball) / 1024 / 1024
# Estimation of the tarball size after uncompression
@@ -119,7 +119,7 @@
logging.info('Comparing test copies with base copy')
for j in range(self.sim_cps):
- tmp_dir = 'linux.%s/%s' % (j,
+ tmp_dir = 'linux.%s/%s' % (j,
os.path.basename(self.tarball).strip('.tar.bz2'))
if self.parallel:
diff_cmd = ['diff', '-U3', '-rN', 'linux.orig', tmp_dir]
diff --git a/client/tests/interbench/interbench.py b/client/tests/interbench/interbench.py
index c36d303..e988882 100644
--- a/client/tests/interbench/interbench.py
+++ b/client/tests/interbench/interbench.py
@@ -20,5 +20,5 @@
def run_once(self, args = ''):
os.chdir(self.tmpdir)
args += " -c"
- utils.system("%s/interbench -m 'run #%s' %s" % (self.srcdir,
+ utils.system("%s/interbench -m 'run #%s' %s" % (self.srcdir,
self.iteration, args))
diff --git a/client/tests/iozone/iozone.py b/client/tests/iozone/iozone.py
index ea0a9a6..02f3413 100644
--- a/client/tests/iozone/iozone.py
+++ b/client/tests/iozone/iozone.py
@@ -120,4 +120,3 @@
keylist[key_name] = result
self.write_perf_keyval(keylist)
-
diff --git a/client/tests/ipv6connect/ipv6connect.py b/client/tests/ipv6connect/ipv6connect.py
index 1d9a63a..5260ba2 100644
--- a/client/tests/ipv6connect/ipv6connect.py
+++ b/client/tests/ipv6connect/ipv6connect.py
@@ -32,6 +32,6 @@
def postprocess(self):
- pattern = re.compile(r'\nTotal time = ([0-9.]+)s\n')
- for duration in pattern.findall('\n'.join(self.results)):
- self.write_perf_keyval({'time': duration})
+ pattern = re.compile(r'\nTotal time = ([0-9.]+)s\n')
+ for duration in pattern.findall('\n'.join(self.results)):
+ self.write_perf_keyval({'time': duration})
diff --git a/client/tests/kvm/kvm_install.py b/client/tests/kvm/kvm_install.py
index 7118357..154b83c 100755
--- a/client/tests/kvm/kvm_install.py
+++ b/client/tests/kvm/kvm_install.py
@@ -10,7 +10,7 @@
sub directory of module_dir. Function will walk through module_dir until
it finds the modules.
- @param module_dir: Directory where the KVM modules are located.
+ @param module_dir: Directory where the KVM modules are located.
"""
vendor = "intel"
if os.system("grep vmx /proc/cpuinfo 1>/dev/null") != 0:
@@ -112,7 +112,7 @@
elif load_modules == 'no':
self.load_modules = False
- if install_mode == 'localsrc':
+ if install_mode == 'localsrc':
if not srcdir:
raise error.TestError("Install from source directory specified"
"but no source directory provided on the"
@@ -144,7 +144,7 @@
snapshot_date = params.get("snapshot_date")
if not snapshot_date:
# Take yesterday's snapshot
- d = (datetime.date.today() -
+ d = (datetime.date.today() -
datetime.timedelta(1)).strftime("%Y%m%d")
else:
d = snapshot_date
diff --git a/client/tests/kvm/kvm_vm.py b/client/tests/kvm/kvm_vm.py
index eba9b84..2ea6681 100644
--- a/client/tests/kvm/kvm_vm.py
+++ b/client/tests/kvm/kvm_vm.py
@@ -751,7 +751,7 @@
else:
self.send_key(char)
-
+
def get_uuid(self):
"""
Catch UUID of the VM.
diff --git a/client/tests/lmbench/lmbench.py b/client/tests/lmbench/lmbench.py
index c375757..6bafc84 100755
--- a/client/tests/lmbench/lmbench.py
+++ b/client/tests/lmbench/lmbench.py
@@ -21,7 +21,7 @@
utils.system('make')
- def run_once(self, mem='', fastmem='NO', slowfs='NO', disks='',
+ def run_once(self, mem='', fastmem='NO', slowfs='NO', disks='',
disks_desc='', mhz='', remote='', enough='5000',
sync_max='1', fsdir=None, file=None):
if not fsdir:
diff --git a/client/tests/lsb_dtk/lsb_dtk.py b/client/tests/lsb_dtk/lsb_dtk.py
index 19f2bde..7d3abed 100644
--- a/client/tests/lsb_dtk/lsb_dtk.py
+++ b/client/tests/lsb_dtk/lsb_dtk.py
@@ -39,12 +39,12 @@
# First, we download the LSB DTK manager package, worry about
# installing it later
dtk_manager_arch = self.config.get('dtk-manager', 'arch-%s' % self.arch)
- dtk_manager_url = self.config.get('dtk-manager',
+ dtk_manager_url = self.config.get('dtk-manager',
'tarball_url') % dtk_manager_arch
if not dtk_manager_url:
raise error.TestError('Could not get DTK manager URL from'
' configuration file')
-
+
dtk_md5 = self.config.get('dtk-manager', 'md5-%s' % self.arch)
if dtk_md5:
logging.info('Caching LSB DTK manager RPM')
@@ -54,7 +54,7 @@
else:
raise error.TestError('Could not find DTK manager package md5,'
' cannot cache DTK manager tarball')
-
+
# Get LSB tarball, cache it and uncompress under autotest srcdir
if self.config.get('lsb', 'override_default_url') == 'no':
lsb_url = self.config.get('lsb', 'tarball_url') % self.arch
@@ -71,9 +71,9 @@
else:
raise error.TestError('Could not find LSB package md5, cannot'
' cache LSB tarball')
-
+
utils.extract_tarball_to_dir(lsb_pkg, self.srcdir)
-
+
# Lets load a file that contains the list of RPMs
os.chdir(self.srcdir)
if not os.path.isfile('inst-config'):
@@ -92,7 +92,7 @@
except:
# If we don't get a match, no problem
pass
-
+
# Lets figure out the host distro
distro_pkg_support = package.os_support()
if os.path.isfile('/etc/debian_version') and \
@@ -111,7 +111,7 @@
else:
logging.error('OS does not seem to be red hat or debian based')
raise EnvironmentError('Cannot handle LSB package installation')
-
+
# According to the host distro detection, we can install the packages
# using the list previously assembled
if distro_type == 'redhat-based':
@@ -130,7 +130,7 @@
for lsb_rpm in lsb_pkg_list:
lsb_dpkg = package.convert(lsb_rpm, 'dpkg')
package.install(lsb_dpkg, nodeps=True)
-
+
self.packages_installed = True
@@ -179,4 +179,3 @@
logging.info('Executing LSB main test script')
utils.system(cmd)
-
diff --git a/client/tests/memory_api/memory_api.py b/client/tests/memory_api/memory_api.py
index b1b5d24..71ad542 100755
--- a/client/tests/memory_api/memory_api.py
+++ b/client/tests/memory_api/memory_api.py
@@ -3,70 +3,70 @@
from autotest_lib.client.common_lib import error
class memory_api(test.test):
- version = 1
+ version = 1
- def setup(self):
- utils.system("gcc %s -o %s" %
- (os.path.join(self.bindir, "memory_api.c"),
- os.path.join(self.tmpdir, "memory_api")))
- utils.system("gcc %s -o %s" %
- (os.path.join(self.bindir, "mremaps.c"),
- os.path.join(self.tmpdir, "mremaps")))
+ def setup(self):
+ utils.system("gcc %s -o %s" %
+ (os.path.join(self.bindir, "memory_api.c"),
+ os.path.join(self.tmpdir, "memory_api")))
+ utils.system("gcc %s -o %s" %
+ (os.path.join(self.bindir, "mremaps.c"),
+ os.path.join(self.tmpdir, "mremaps")))
- def initialize(self):
- self.job.require_gcc()
+ def initialize(self):
+ self.job.require_gcc()
- def run_once(self, memsize = "1000000000", args=''):
+ def run_once(self, memsize = "1000000000", args=''):
- vma_re = re.compile("([0-9,a-f]+)-([0-9,a-f]+)")
- memory_re = re.compile("(\d+) bytes @(0x[0-9,a-f]+)")
+ vma_re = re.compile("([0-9,a-f]+)-([0-9,a-f]+)")
+ memory_re = re.compile("(\d+) bytes @(0x[0-9,a-f]+)")
- vma_max_shift = 0
- if os.access("/proc/sys/vm/vma_max_shift", os.R_OK):
- vma_max_shift = int(
- open("/proc/sys/vm/vma_max_shift").read().rstrip())
- p1 = subprocess.Popen('%s/memory_api ' % self.tmpdir + memsize,
- shell=True, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE)
- while p1.poll() is None:
- output = p1.stdout.readline().rstrip()
- m = memory_re.search(output)
- mem_start = 0
- mem_len = 0
- if m:
- mem_start = int(m.group(2), 16)
- mem_len = int(m.group(1))
- else:
- continue
- map_output = open("/proc/%s/maps_backing" % p1.pid).readlines()
- vma_count = 0
- vma_start = 0
- vma_len = 0
- expected_vma_count = 1
- for line in map_output:
- m = vma_re.search(line)
+ vma_max_shift = 0
+ if os.access("/proc/sys/vm/vma_max_shift", os.R_OK):
+ vma_max_shift = int(
+ open("/proc/sys/vm/vma_max_shift").read().rstrip())
+ p1 = subprocess.Popen('%s/memory_api ' % self.tmpdir + memsize,
+ shell=True, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ while p1.poll() is None:
+ output = p1.stdout.readline().rstrip()
+ m = memory_re.search(output)
+ mem_start = 0
+ mem_len = 0
if m:
- vma_start = int("0x%s" % m.group(1),16)
- vma_end = int("0x%s" % m.group(2),16)
- if ((vma_start >= mem_start) and
- (vma_start < (mem_start + mem_len))):
- vma_count+=1
+ mem_start = int(m.group(2), 16)
+ mem_len = int(m.group(1))
+ else:
+ continue
+ map_output = open("/proc/%s/maps_backing" % p1.pid).readlines()
+ vma_count = 0
+ vma_start = 0
+ vma_len = 0
+ expected_vma_count = 1
+ for line in map_output:
+ m = vma_re.search(line)
+ if m:
+ vma_start = int("0x%s" % m.group(1),16)
+ vma_end = int("0x%s" % m.group(2),16)
+ if ((vma_start >= mem_start) and
+ (vma_start < (mem_start + mem_len))):
+ vma_count+=1
- if (('file' not in output) and (vma_max_shift != 0)):
- expected_vma_count = mem_len >> vma_max_shift
- if (mem_len % (1 << vma_max_shift)):
- expected_vma_count += 1
- if expected_vma_count != vma_count:
- raise error.TestFail("VmaCountMismatch")
- logging.info("%s %s %d %d", hex(mem_start), hex(mem_len), vma_count,
- expected_vma_count)
- if p1.poll() is None:
- p1.stdin.write("\n")
- p1.stdin.flush()
+ if (('file' not in output) and (vma_max_shift != 0)):
+ expected_vma_count = mem_len >> vma_max_shift
+ if (mem_len % (1 << vma_max_shift)):
+ expected_vma_count += 1
+ if expected_vma_count != vma_count:
+ raise error.TestFail("VmaCountMismatch")
+ logging.info("%s %s %d %d", hex(mem_start), hex(mem_len), vma_count,
+ expected_vma_count)
+ if p1.poll() is None:
+ p1.stdin.write("\n")
+ p1.stdin.flush()
- if p1.poll() != 0:
- raise error.TestFail("Unexpected application abort")
+ if p1.poll() != 0:
+ raise error.TestFail("Unexpected application abort")
- utils.system('%s/mremaps ' % self.tmpdir + '100000000')
+ utils.system('%s/mremaps ' % self.tmpdir + '100000000')
diff --git a/client/tests/parallel_dd/parallel_dd.py b/client/tests/parallel_dd/parallel_dd.py
index ecdb752..02774f7 100755
--- a/client/tests/parallel_dd/parallel_dd.py
+++ b/client/tests/parallel_dd/parallel_dd.py
@@ -123,7 +123,7 @@
start = time.time()
self.fs_read()
self.fs_read_rate = self.megabytes / (time.time() - start)
-
+
self.write_perf_keyval({
'raw_write' : self.raw_write_rate,
'raw_read' : self.raw_read_rate,
diff --git a/client/tests/perfmon/perfmon.py b/client/tests/perfmon/perfmon.py
index 7ccd343..ec1145f 100755
--- a/client/tests/perfmon/perfmon.py
+++ b/client/tests/perfmon/perfmon.py
@@ -22,4 +22,4 @@
cmd = self.srcdir + '/tests/pfm_tests' + args
# self.results.append(utils.system_output(cmd, retain_output=True))
if 'FAIL' in utils.system_output(cmd, retain_output=True):
- raise error.TestError('some perfmon tests failed')
+ raise error.TestError('some perfmon tests failed')
diff --git a/client/tests/tsc/tsc.py b/client/tests/tsc/tsc.py
index 7077784..5d69edd 100755
--- a/client/tests/tsc/tsc.py
+++ b/client/tests/tsc/tsc.py
@@ -26,7 +26,7 @@
(result.exit_status, result.command))
## Analyze result.stdout to see if it is possible to form qualified
## reason of failure and to raise an appropriate exception.
- ## For this test we qualify the reason of failure if the
+ ## For this test we qualify the reason of failure if the
## following conditions are met:
## (i) result.exit_status = 1
## (ii) result.stdout ends with 'FAIL'
@@ -57,5 +57,3 @@
## If we are here, we failed to qualify the reason of test failre
## Consider it as a test error
raise error.TestError(default_reason)
-
-
diff --git a/contrib/coverage.py b/contrib/coverage.py
index 341b383..9b2bf67 100644
--- a/contrib/coverage.py
+++ b/contrib/coverage.py
@@ -107,20 +107,20 @@
self.excluded = excluded
self.suite_spots = suite_spots
self.excluding_suite = 0
-
+
def doRecursive(self, node):
for n in node.getChildNodes():
self.dispatch(n)
visitStmt = visitModule = doRecursive
-
+
def doCode(self, node):
if hasattr(node, 'decorators') and node.decorators:
self.dispatch(node.decorators)
self.recordAndDispatch(node.code)
else:
self.doSuite(node, node.code)
-
+
visitFunction = visitClass = doCode
def getFirstLine(self, node):
@@ -140,14 +140,14 @@
for n in node.getChildNodes():
lineno = max(lineno, self.getLastLine(n))
return lineno
-
+
def doStatement(self, node):
self.recordLine(self.getFirstLine(node))
visitAssert = visitAssign = visitAssTuple = visitPrint = \
visitPrintnl = visitRaise = visitSubscript = visitDecorators = \
doStatement
-
+
def visitPass(self, node):
# Pass statements have weird interactions with docstrings. If this
# pass statement is part of one of those pairs, claim that the statement
@@ -156,10 +156,10 @@
if l:
lines = self.suite_spots.get(l, [l,l])
self.statements[lines[1]] = 1
-
+
def visitDiscard(self, node):
# Discard nodes are statements that execute an expression, but then
- # discard the results. This includes function calls, so we can't
+ # discard the results. This includes function calls, so we can't
# ignore them all. But if the expression is a constant, the statement
# won't be "executed", so don't count it now.
if node.expr.__class__.__name__ != 'Const':
@@ -173,7 +173,7 @@
return self.recordLine(self.getFirstLine(node))
else:
return 0
-
+
def recordLine(self, lineno):
# Returns a bool, whether the line is included or excluded.
if lineno:
@@ -197,9 +197,9 @@
self.statements[lineno] = 1
return 1
return 0
-
+
default = recordNodeLine
-
+
def recordAndDispatch(self, node):
self.recordNodeLine(node)
self.dispatch(node)
@@ -210,7 +210,7 @@
self.excluding_suite = 1
self.recordAndDispatch(body)
self.excluding_suite = exsuite
-
+
def doPlainWordSuite(self, prevsuite, suite):
# Finding the exclude lines for else's is tricky, because they aren't
# present in the compiler parse tree. Look at the previous suite,
@@ -224,11 +224,11 @@
break
else:
self.doSuite(None, suite)
-
+
def doElse(self, prevsuite, node):
if node.else_:
self.doPlainWordSuite(prevsuite, node.else_)
-
+
def visitFor(self, node):
self.doSuite(node, node.body)
self.doElse(node.body, node)
@@ -258,14 +258,14 @@
else:
self.doSuite(a, h)
self.doElse(node.handlers[-1][2], node)
-
+
def visitTryFinally(self, node):
self.doSuite(node, node.body)
self.doPlainWordSuite(node.body, node.final)
-
+
def visitWith(self, node):
self.doSuite(node, node.body)
-
+
def visitGlobal(self, node):
# "global" statements don't execute like others (they don't call the
# trace function), so don't record their line numbers.
@@ -285,7 +285,7 @@
# A dictionary with an entry for (Python source file name, line number
# in that file) if that line has been executed.
c = {}
-
+
# A map from canonical Python source file name to a dictionary in
# which there's an entry for each line number that has been
# executed.
@@ -313,12 +313,12 @@
self.relative_dir = os.path.normcase(os.path.abspath(os.curdir)+os.sep)
self.exclude('# *pragma[: ]*[nN][oO] *[cC][oO][vV][eE][rR]')
- # t(f, x, y). This method is passed to sys.settrace as a trace function.
- # See [van Rossum 2001-07-20b, 9.2] for an explanation of sys.settrace and
+ # t(f, x, y). This method is passed to sys.settrace as a trace function.
+ # See [van Rossum 2001-07-20b, 9.2] for an explanation of sys.settrace and
# the arguments and return value of the trace function.
# See [van Rossum 2001-07-20a, 3.2] for a description of frame and code
# objects.
-
+
def t(self, f, w, unused): #pragma: no cover
if w == 'line':
#print "Executing %s @ %d" % (f.f_code.co_filename, f.f_lineno)
@@ -326,7 +326,7 @@
for c in self.cstack:
c[(f.f_code.co_filename, f.f_lineno)] = 1
return self.t
-
+
def help(self, error=None): #pragma: no cover
if error:
print error
@@ -379,14 +379,14 @@
args_needed = (settings.get('execute')
or settings.get('annotate')
or settings.get('report'))
- action = (settings.get('erase')
+ action = (settings.get('erase')
or settings.get('collect')
or args_needed)
if not action:
help_fn("You must specify at least one of -e, -x, -c, -r, or -a.")
if not args_needed and args:
help_fn("Unexpected arguments: %s" % " ".join(args))
-
+
self.parallel_mode = settings.get('parallel-mode')
self.get_ready()
@@ -406,7 +406,7 @@
self.collect()
if not args:
args = self.cexecuted.keys()
-
+
ignore_errors = settings.get('ignore-errors')
show_missing = settings.get('show-missing')
directory = settings.get('directory=')
@@ -426,7 +426,7 @@
self.usecache = usecache
if cache_file and not self.cache:
self.cache_default = cache_file
-
+
def get_ready(self, parallel_mode=False):
if self.usecache and not self.cache:
self.cache = os.environ.get(self.cache_env, self.cache_default)
@@ -434,7 +434,7 @@
self.cache += "." + gethostname() + "." + str(os.getpid())
self.restore()
self.analysis_cache = {}
-
+
def start(self, parallel_mode=False):
self.get_ready()
if self.nesting == 0: #pragma: no cover
@@ -442,7 +442,7 @@
if hasattr(threading, 'settrace'):
threading.settrace(self.t)
self.nesting += 1
-
+
def stop(self):
self.nesting -= 1
if self.nesting == 0: #pragma: no cover
@@ -466,7 +466,7 @@
def begin_recursive(self):
self.cstack.append(self.c)
self.xstack.append(self.exclude_re)
-
+
def end_recursive(self):
self.c = self.cstack.pop()
self.exclude_re = self.xstack.pop()
@@ -546,7 +546,7 @@
self.canonical_filename_cache[filename] = cf
return self.canonical_filename_cache[filename]
- # canonicalize_filenames(). Copy results from "c" to "cexecuted",
+ # canonicalize_filenames(). Copy results from "c" to "cexecuted",
# canonicalizing filenames on the way. Clear the "c" map.
def canonicalize_filenames(self):
@@ -578,7 +578,7 @@
# in the source code, (3) a list of lines of excluded statements,
# and (4), a map of line numbers to multi-line line number ranges, for
# statements that cross lines.
-
+
def analyze_morf(self, morf):
if self.analysis_cache.has_key(morf):
return self.analysis_cache[morf]
@@ -599,7 +599,7 @@
raise CoverageException(
"Couldn't parse '%s' as Python source: '%s' at line %d" %
(filename, synerr.msg, synerr.lineno)
- )
+ )
source.close()
result = filename, lines, excluded_lines, line_map
self.analysis_cache[morf] = result
@@ -610,26 +610,26 @@
if len(tree) == 3 and type(tree[2]) == type(1):
return tree[2]
tree = tree[1]
-
+
def last_line_of_tree(self, tree):
while True:
if len(tree) == 3 and type(tree[2]) == type(1):
return tree[2]
tree = tree[-1]
-
+
def find_docstring_pass_pair(self, tree, spots):
for i in range(1, len(tree)):
if self.is_string_constant(tree[i]) and self.is_pass_stmt(tree[i+1]):
first_line = self.first_line_of_tree(tree[i])
last_line = self.last_line_of_tree(tree[i+1])
self.record_multiline(spots, first_line, last_line)
-
+
def is_string_constant(self, tree):
try:
return tree[0] == symbol.stmt and tree[1][1][1][0] == symbol.expr_stmt
except:
return False
-
+
def is_pass_stmt(self, tree):
try:
return tree[0] == symbol.stmt and tree[1][1][1][0] == symbol.pass_stmt
@@ -639,7 +639,7 @@
def record_multiline(self, spots, i, j):
for l in range(i, j+1):
spots[l] = (i, j)
-
+
def get_suite_spots(self, tree, spots):
""" Analyze a parse tree to find suite introducers which span a number
of lines.
@@ -681,7 +681,7 @@
# treat them differently, especially in the common case of a
# function with a doc string and a single pass statement.
self.find_docstring_pass_pair(tree[i], spots)
-
+
elif tree[i][0] == symbol.simple_stmt:
first_line = self.first_line_of_tree(tree[i])
last_line = self.last_line_of_tree(tree[i])
@@ -706,7 +706,7 @@
tree = parser.suite(text+'\n\n').totuple(1)
self.get_suite_spots(tree, suite_spots)
#print "Suite spots:", suite_spots
-
+
# Use the compiler module to parse the text and find the executable
# statements. We add newlines to be impervious to final partial lines.
statements = {}
@@ -816,7 +816,7 @@
else:
globbed.append(morf)
morfs = globbed
-
+
morfs = self.filter_by_prefix(morfs, omit_prefixes)
morfs.sort(self.morf_name_compare)
@@ -883,7 +883,7 @@
except:
if not ignore_errors:
raise
-
+
def annotate_file(self, filename, statements, excluded, missing, directory=None):
source = open(filename, 'r')
if directory:
@@ -911,7 +911,7 @@
if self.blank_re.match(line):
dest.write(' ')
elif self.else_re.match(line):
- # Special logic for lines containing only 'else:'.
+ # Special logic for lines containing only 'else:'.
# See [GDR 2001-12-04b, 3.2].
if i >= len(statements) and j >= len(missing):
dest.write('! ')
@@ -935,40 +935,40 @@
the_coverage = coverage()
# Module functions call methods in the singleton object.
-def use_cache(*args, **kw):
+def use_cache(*args, **kw):
return the_coverage.use_cache(*args, **kw)
-def start(*args, **kw):
+def start(*args, **kw):
return the_coverage.start(*args, **kw)
-def stop(*args, **kw):
+def stop(*args, **kw):
return the_coverage.stop(*args, **kw)
-def erase(*args, **kw):
+def erase(*args, **kw):
return the_coverage.erase(*args, **kw)
-def begin_recursive(*args, **kw):
+def begin_recursive(*args, **kw):
return the_coverage.begin_recursive(*args, **kw)
-def end_recursive(*args, **kw):
+def end_recursive(*args, **kw):
return the_coverage.end_recursive(*args, **kw)
-def exclude(*args, **kw):
+def exclude(*args, **kw):
return the_coverage.exclude(*args, **kw)
-def analysis(*args, **kw):
+def analysis(*args, **kw):
return the_coverage.analysis(*args, **kw)
-def analysis2(*args, **kw):
+def analysis2(*args, **kw):
return the_coverage.analysis2(*args, **kw)
-def report(*args, **kw):
+def report(*args, **kw):
return the_coverage.report(*args, **kw)
-def annotate(*args, **kw):
+def annotate(*args, **kw):
return the_coverage.annotate(*args, **kw)
-def annotate_file(*args, **kw):
+def annotate_file(*args, **kw):
return the_coverage.annotate_file(*args, **kw)
# Save coverage data when Python exits. (The atexit module wasn't
@@ -1043,7 +1043,7 @@
# Thanks, Allen.
#
# 2005-12-02 NMB Call threading.settrace so that all threads are measured.
-# Thanks Martin Fuzzey. Add a file argument to report so that reports can be
+# Thanks Martin Fuzzey. Add a file argument to report so that reports can be
# captured to a different destination.
#
# 2005-12-03 NMB coverage.py can now measure itself.
diff --git a/frontend/afe/frontend_test_utils.py b/frontend/afe/frontend_test_utils.py
index 74a2497..dc6ebf8 100644
--- a/frontend/afe/frontend_test_utils.py
+++ b/frontend/afe/frontend_test_utils.py
@@ -51,7 +51,7 @@
platform = models.Label.objects.create(name='myplatform', platform=True)
for host in self.hosts:
- host.labels.add(platform)
+ host.labels.add(platform)
atomic_group1 = models.AtomicGroup.objects.create(
name='atomic1', max_number_of_machines=2)
diff --git a/frontend/afe/model_logic.py b/frontend/afe/model_logic.py
index f8dc06d..3e83abb 100644
--- a/frontend/afe/model_logic.py
+++ b/frontend/afe/model_logic.py
@@ -18,14 +18,14 @@
def _wrap_with_readonly(method):
- def wrapper_method(*args, **kwargs):
- readonly_connection.connection().set_django_connection()
- try:
- return method(*args, **kwargs)
- finally:
- readonly_connection.connection().unset_django_connection()
- wrapper_method.__name__ = method.__name__
- return wrapper_method
+ def wrapper_method(*args, **kwargs):
+ readonly_connection.connection().set_django_connection()
+ try:
+ return method(*args, **kwargs)
+ finally:
+ readonly_connection.connection().unset_django_connection()
+ wrapper_method.__name__ = method.__name__
+ return wrapper_method
def _quote_name(name):
diff --git a/frontend/afe/rpc_utils.py b/frontend/afe/rpc_utils.py
index 548556b..7c58ec5 100644
--- a/frontend/afe/rpc_utils.py
+++ b/frontend/afe/rpc_utils.py
@@ -227,11 +227,11 @@
continue
execution_count = count_per_execution[_execution_key_for(queue_entry)]
if execution_count < queue_entry.job.synch_count:
- raise model_logic.ValidationError(
- {'' : 'You cannot abort part of a synchronous job execution '
- '(%d/%s), %d included, %d expected'
- % (queue_entry.job.id, queue_entry.execution_subdir,
- execution_count, queue_entry.job.synch_count)})
+ raise model_logic.ValidationError(
+ {'' : 'You cannot abort part of a synchronous job execution '
+ '(%d/%s), %d included, %d expected'
+ % (queue_entry.job.id, queue_entry.execution_subdir,
+ execution_count, queue_entry.job.synch_count)})
def check_atomic_group_create_job(synch_count, host_objects, metahost_objects,
diff --git a/frontend/urls.py b/frontend/urls.py
index 3aeb5d7..74be84d 100644
--- a/frontend/urls.py
+++ b/frontend/urls.py
@@ -22,6 +22,6 @@
)
if settings.DEBUG:
- pattern_list += debug_pattern_list
+ pattern_list += debug_pattern_list
urlpatterns = patterns('', *pattern_list)
diff --git a/new_tko/tko/graphing_utils.py b/new_tko/tko/graphing_utils.py
index 4266894..49afdc1 100644
--- a/new_tko/tko/graphing_utils.py
+++ b/new_tko/tko/graphing_utils.py
@@ -398,7 +398,7 @@
# Base is 0.0 so just simplify:
# If value < base: append -100.0;
# If value == base: append 0.0 (obvious); and
- # If value > base: append 100.0.
+ # If value > base: append 100.0.
values.append(100 * float(cmp(value, base)))
# Based on error for f(x,y) = 100 * (x - y) / y
@@ -476,8 +476,8 @@
def _find_plot_by_label(plots, label):
for index, plot in enumerate(plots):
- if plot['label'] == label:
- return index
+ if plot['label'] == label:
+ return index
raise ValueError('no plot labeled "%s" found' % label)
diff --git a/new_tko/urls.py b/new_tko/urls.py
index e949466..2b2e8df 100644
--- a/new_tko/urls.py
+++ b/new_tko/urls.py
@@ -24,6 +24,6 @@
)
if settings.DEBUG:
- pattern_list += debug_pattern_list
+ pattern_list += debug_pattern_list
urlpatterns = patterns('', *pattern_list)
diff --git a/scheduler/drone_manager.py b/scheduler/drone_manager.py
index dbaa75d..83e3639 100644
--- a/scheduler/drone_manager.py
+++ b/scheduler/drone_manager.py
@@ -305,7 +305,7 @@
drone.execute_queued_calls()
try:
- self._results_drone.execute_queued_calls()
+ self._results_drone.execute_queued_calls()
except error.AutoservError:
warning = ('Results repository failed to execute calls:\n' +
traceback.format_exc())
diff --git a/scheduler/monitor_db_cleanup.py b/scheduler/monitor_db_cleanup.py
index 5e8afab..4ffadb4 100644
--- a/scheduler/monitor_db_cleanup.py
+++ b/scheduler/monitor_db_cleanup.py
@@ -49,12 +49,12 @@
def _cleanup(self):
- logging.info('Running periodic cleanup')
- self._abort_timed_out_jobs()
- self._abort_jobs_past_synch_start_timeout()
- self._abort_jobs_past_max_runtime()
- self._clear_inactive_blocks()
- self._check_for_db_inconsistencies()
+ logging.info('Running periodic cleanup')
+ self._abort_timed_out_jobs()
+ self._abort_jobs_past_synch_start_timeout()
+ self._abort_jobs_past_max_runtime()
+ self._clear_inactive_blocks()
+ self._check_for_db_inconsistencies()
def _abort_timed_out_jobs(self):
diff --git a/scheduler/monitor_db_unittest.py b/scheduler/monitor_db_unittest.py
index 39a815f..7f97a2c 100644
--- a/scheduler/monitor_db_unittest.py
+++ b/scheduler/monitor_db_unittest.py
@@ -55,17 +55,17 @@
class IsAgentWithTask(mock.argument_comparator):
- def __init__(self, task):
- self._task = task
+ def __init__(self, task):
+ self._task = task
- def is_satisfied_by(self, parameter):
- if not isinstance(parameter, monitor_db.Agent):
- return False
- tasks = list(parameter.queue.queue)
- if len(tasks) != 1:
- return False
- return tasks[0] == self._task
+ def is_satisfied_by(self, parameter):
+ if not isinstance(parameter, monitor_db.Agent):
+ return False
+ tasks = list(parameter.queue.queue)
+ if len(tasks) != 1:
+ return False
+ return tasks[0] == self._task
def _set_host_and_qe_ids(agent_or_task, id_list=None):
@@ -1231,7 +1231,7 @@
def test_delayed_call(self):
- test_time = self.god.create_mock_function('time')
+ test_time = self.god.create_mock_function('time')
test_time.expect_call().and_return(33)
test_time.expect_call().and_return(34.01)
test_time.expect_call().and_return(34.99)
diff --git a/server/autotest_unittest.py b/server/autotest_unittest.py
index 4d5554e..fec02be 100644
--- a/server/autotest_unittest.py
+++ b/server/autotest_unittest.py
@@ -53,7 +53,7 @@
def tearDown(self):
- self.god.unstub_all()
+ self.god.unstub_all()
def construct(self):
diff --git a/server/frontend.py b/server/frontend.py
index 18ec037..4f37149 100644
--- a/server/frontend.py
+++ b/server/frontend.py
@@ -46,7 +46,7 @@
Abstract RPC class for communicating with the autotest frontend
Inherited for both TKO and AFE uses.
- All the constructors go in the afe / tko class.
+ All the constructors go in the afe / tko class.
Manipulating methods go in the object classes themselves
"""
def __init__(self, path, user, server, print_log, debug, reply_debug):
@@ -114,7 +114,7 @@
def get_status_counts(self, job, **data):
entries = self.run('get_status_counts',
- group_by=['hostname', 'test_name', 'reason'],
+ group_by=['hostname', 'test_name', 'reason'],
job_tag__startswith='%s-' % job, **data)
return [TestStatus(self, e) for e in entries['groups']]
@@ -130,7 +130,7 @@
debug=debug,
reply_debug=reply_debug)
-
+
def host_statuses(self, live=None):
dead_statuses = ['Dead', 'Repair Failed', 'Repairing']
statuses = self.run('get_static_data')['host_statuses']
@@ -257,9 +257,9 @@
email_to=None, timeout=168):
"""
Run a list of test suites on a particular kernel.
-
+
Poll for them to complete, and return whether they worked or not.
-
+
pairings: list of MachineTestPairing objects to invoke
kernel: name of the kernel to run
kernel_label: label of the kernel to run
@@ -404,7 +404,7 @@
"""
Given a pairing of a control file to a machine label, find all machines
with that label, and submit that control file to them.
-
+
Returns a list of job objects
"""
job_name = '%s : %s' % (pairing.machine_label, kernel_label)
@@ -536,7 +536,7 @@
def poll_job_results(self, tko, job, debug=False):
"""
Analyse all job results by platform, return:
-
+
False: if any platform has more than one failure
None: if any platform has more than one machine not yet Good.
True: if all platforms have at least all-but-one machines Good.
@@ -560,7 +560,7 @@
failed = len(platform_map[platform].get('Failed', []))
aborted = len(platform_map[platform].get('Aborted', []))
- # We set up what we want to record here, but don't actually do
+ # We set up what we want to record here, but don't actually do
# it yet, until we have a decisive answer for this platform
if aborted or failed:
bad = aborted + failed
@@ -587,10 +587,10 @@
continue
detail.append('%s=%s' % (status,platform_map[platform][status]))
if debug:
- print '%20s %d/%d %s' % (platform, completed, total,
+ print '%20s %d/%d %s' % (platform, completed, total,
' '.join(detail))
print
-
+
if len(aborted_platforms) > 0:
if debug:
print 'Result aborted - platforms: ',
diff --git a/server/hosts/monitors/monitors_util.py b/server/hosts/monitors/monitors_util.py
index ea18f99..3dd465b 100644
--- a/server/hosts/monitors/monitors_util.py
+++ b/server/hosts/monitors/monitors_util.py
@@ -29,7 +29,7 @@
elif callable(format):
timestamp = str(format())
else:
- raise InvalidTimestampFormat
+ raise InvalidTimestampFormat
return '%s\t%s' % (timestamp, msg)
diff --git a/tko/display.py b/tko/display.py
index 0ade57a..cef477b 100755
--- a/tko/display.py
+++ b/tko/display.py
@@ -355,4 +355,3 @@
print '<input type="submit" name="clear" value="Clear all tests" />'
print '<input type="submit" name="reset" value="Reset" />'
print '</form>'
-
diff --git a/tko/nightly.py b/tko/nightly.py
index 1b0cf6f..c431829 100644
--- a/tko/nightly.py
+++ b/tko/nightly.py
@@ -58,7 +58,7 @@
for kernel in trimmed_kernels:
runs = collect_testruns(job_table[kernel], regressed_platforms, test)
if runs:
- test_runs[kernel] = runs
+ test_runs[kernel] = runs
def collect_raw_scores(runs, metric):
@@ -66,15 +66,15 @@
# arrange them by platform type
platform_scores = {} # platform --> list of perf scores
for platform in runs:
- vals = perf.get_metric_at_point(runs[platform], metric)
- if vals:
- platform_scores[platform] = vals
+ vals = perf.get_metric_at_point(runs[platform], metric)
+ if vals:
+ platform_scores[platform] = vals
return platform_scores
def collect_scaled_scores(metric):
# get scores of test runs for 1 test on some kernels and platforms
- # optionally make relative to first kernel on that platform
+ # optionally make relative to first kernel on that platform
# arrange by plotline (ie platform) for gnuplot
plot_data = {} # platform --> (kernel --> list of perf scores)
baseline = {}
@@ -134,7 +134,7 @@
max_dev_kernels,
job_table, kernel_dates)
kernels = sort_kernels(kernels)
- return kernels # sorted subset of kernels in job_table
+ return kernels # sorted subset of kernels in job_table
def graph_1_test(test, metric, size):
@@ -144,10 +144,10 @@
title = test.capitalize() + suite_notes
if regress:
title += ', Regressions Only'
- if relative:
+ if relative:
ylegend = 'Relative '
ymin = 0.9
- else:
+ else:
ylegend = ''
ymin = None
ylegend += metric.capitalize()
@@ -161,7 +161,7 @@
def table_for_1_test(test, metric):
# generate detailed html page with graph plus numeric table for 1 benchmark
print "Content-Type: text/html\n\n<html><body>"
- heading = "%s %s:  %s%s" % (test_group, kernel_legend,
+ heading = "%s %s:  %s%s" % (test_group, kernel_legend,
test.capitalize(), suite_notes)
if regress:
heading += ", Regressions Only"
@@ -192,7 +192,7 @@
if avg+std_dev < ref_thresholds[platform]:
print "bgcolor=pink",
print ( "> <a href='%s?test=%s&metric=%s"
- "&platforms=%s&runs&kernel=%s'>"
+ "&platforms=%s&runs&kernel=%s'>"
% (myself, test, metric, platform, kernel) )
print "<b>%.4g</b>" % avg, "</a><br>",
print " <small> %dr </small>" % len(vals),
@@ -243,20 +243,20 @@
print "%.4g" % v,
print "</td>"
row = get_testrun_context(testrunx)
- print ( "<td> <a href='//%s/results/%s/%s/results'> %s </a></td>"
+ print ( "<td> <a href='//%s/results/%s/%s/results'> %s </a></td>"
% (results_server, row[0], row[1], row[0]) )
for v in row[1:]:
- print "<td> %s </td>" % v
+ print "<td> %s </td>" % v
if show_attrs:
attrs = perf.get_test_attributes(testrunx)
print "<td>",
for attr in attrs:
- # if attr == "sysinfo-cmdline": continue
- # if attr[:4] == "svs-": continue
- val = attrs[attr]
- if len(val) > 40:
- val = val[:40-3] + "..."
- print "%s=%s" % (attr, val)
+ # if attr == "sysinfo-cmdline": continue
+ # if attr[:4] == "svs-": continue
+ val = attrs[attr]
+ if len(val) > 40:
+ val = val[:40-3] + "..."
+ print "%s=%s" % (attr, val)
print "</td>"
print "</tr>\n"
print "</table>"
@@ -319,7 +319,7 @@
metric = perf.benchmark_main_metric(test)
assert metric, "no default metric for test %s" % test
# perf.init()
- perf.db_cur = db.db(host=tko_mysql_server,
+ perf.db_cur = db.db(host=tko_mysql_server,
user='nobody', password='').cur
kernels = select_dev_kernels()
regressed_platforms = find_regressions(kernels, test, metric)
@@ -327,7 +327,7 @@
plot_data = collect_scaled_scores(metric)
platforms = sorted(plot_data.keys())
if 'runs' in form:
- testrun_details_for_1_test_kernel_platform(test, metric,
+ testrun_details_for_1_test_kernel_platform(test, metric,
platforms[0])
elif 'table' in form:
table_for_1_test(test, metric)
@@ -374,4 +374,3 @@
kernels.add(kernel)
kernels = sort_kernels(kernels)[-maxkernels:]
return kernels
-
diff --git a/tko/parsers/version_0.py b/tko/parsers/version_0.py
index 1018b08..881b8d9 100644
--- a/tko/parsers/version_0.py
+++ b/tko/parsers/version_0.py
@@ -79,18 +79,18 @@
@staticmethod
def load_from_dir(dir, verify_ident=None):
# try and load the booted kernel version
- attributes = False
- i = 1
- build_dir = os.path.join(dir, "build")
- while True:
- if not os.path.exists(build_dir):
- break
- build_log = os.path.join(build_dir, "debug", "build_log")
+ attributes = False
+ i = 1
+ build_dir = os.path.join(dir, "build")
+ while True:
+ if not os.path.exists(build_dir):
+ break
+ build_log = os.path.join(build_dir, "debug", "build_log")
attributes = kernel.load_from_build_log(build_log)
- if attributes:
- break
- i += 1
- build_dir = os.path.join(dir, "build.%d" % (i))
+ if attributes:
+ break
+ i += 1
+ build_dir = os.path.join(dir, "build.%d" % (i))
if not attributes:
if verify_ident:
diff --git a/tko/parsers/version_1.py b/tko/parsers/version_1.py
index 5c99d85..77a2acb 100644
--- a/tko/parsers/version_1.py
+++ b/tko/parsers/version_1.py
@@ -58,7 +58,7 @@
def parse_line_into_dicts(line, attr_dict, perf_dict):
key, val_type, value = "", "", ""
- # figure out what the key, value and keyval type are
+ # figure out what the key, value and keyval type are
typed_match = re.search("^([^=]*)\{(\w*)\}=(.*)$", line)
if typed_match:
key, val_type, value = typed_match.groups()
diff --git a/utils/test_importer.py b/utils/test_importer.py
index 5035940..fe43001 100755
--- a/utils/test_importer.py
+++ b/utils/test_importer.py
@@ -146,7 +146,7 @@
add_noncompliant=add_noncompliant,
autotest_dir=autotest_dir,
verbose=verbose)
-
+
profilers_path = os.path.join(autotest_dir, "client/profilers")
if os.path.exists(profilers_path):
if verbose:
@@ -157,7 +157,7 @@
description='NA')
# Clean bad db entries
db_clean_broken(autotest_dir, verbose)
-
+
def db_clean_broken(autotest_dir, verbose):
"""Remove tests from autotest_web that do not have valid control files
@@ -258,7 +258,7 @@
else:
control_name = "%s:%s"
control_name %= (test_new_test[-2],
- test_new_test[-1])
+ test_new_test[-1])
new_test['name'] = control_name.replace('control.', '')
# Experimental Check
if not add_experimental: