Convert all python code to use four-space indents instead of eight-space tabs.

Signed-off-by: John Admanski <[email protected]>



git-svn-id: http://test.kernel.org/svn/autotest/trunk@1658 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/client/bin/autotest b/client/bin/autotest
index 9d3a968..34c1e26 100755
--- a/client/bin/autotest
+++ b/client/bin/autotest
@@ -28,27 +28,27 @@
 parser = OptionParser()
 
 parser.add_option("-c", "--continue", dest="cont", action="store_true",
-			default=False, help="continue previously started job")
+                        default=False, help="continue previously started job")
 
 parser.add_option("-t", "--tag", dest="tag", type="string", default="default",
-			help="set the job tag")
+                        help="set the job tag")
 
 parser.add_option("-H", "--harness", dest="harness", type="string", default='',
-			help="set the harness type")
+                        help="set the harness type")
 
 parser.add_option("-l", "--external_logging", dest="log", action="store_true",
-			default=False, help="enable external logging")
+                        default=False, help="enable external logging")
 
 def usage():
-	parser.print_help()
-	sys.exit(1)
+    parser.print_help()
+    sys.exit(1)
 
 options, args = parser.parse_args()
 
 # Check for a control file.
 if len(args) != 1:
-	usage()
+    usage()
 
 # JOB: run the specified job control file.
 job.runjob(os.path.abspath(args[0]), options.cont, options.tag, options.harness,
-	   options.log)
+           options.log)
diff --git a/client/bin/autotest.py b/client/bin/autotest.py
index 9d4faa7..7695892 100755
--- a/client/bin/autotest.py
+++ b/client/bin/autotest.py
@@ -1,17 +1,17 @@
 import os, sys
 
 class system:
-	def __init__(self):
-		self.autodir = os.environ['AUTODIR']
-		self.resultdir = self.autodir + '/results'
-		self.tmpdir = self.autodir + '/tmp'
+    def __init__(self):
+        self.autodir = os.environ['AUTODIR']
+        self.resultdir = self.autodir + '/results'
+        self.tmpdir = self.autodir + '/tmp'
 
-		if not os.path.isdir(self.resultdir):
-			os.mkdir(self.resultdir)
-		if not os.path.isdir(self.tmpdir):
-			os.mkdir(self.tmpdir)
-		return None
+        if not os.path.isdir(self.resultdir):
+            os.mkdir(self.resultdir)
+        if not os.path.isdir(self.tmpdir):
+            os.mkdir(self.tmpdir)
+        return None
 
 
-	def boot(self, tag=None):
-		print "I OUGHT TO REBOOT NOW!"
+    def boot(self, tag=None):
+        print "I OUGHT TO REBOOT NOW!"
diff --git a/client/bin/autotest_client b/client/bin/autotest_client
index 762d0cb..1e275a4 100755
--- a/client/bin/autotest_client
+++ b/client/bin/autotest_client
@@ -17,11 +17,11 @@
 
 # If we're using cpusets, run inside the root one by default
 if os.path.exists("/dev/cpuset/tasks") and getpass.getuser() == "root":
-	utils.write_one_line("/dev/cpuset/tasks", str(os.getpid()))
+    utils.write_one_line("/dev/cpuset/tasks", str(os.getpid()))
 
 autodir = os.path.dirname(sys.argv[0])
 autotest = os.path.join(autodir, 'autotest')
 cmd = ' '.join([autotest, '-H simple'] + sys.argv[1:])
 exit_code = subprocess.call(cmd, shell=True, stderr=subprocess.STDOUT,
-			    close_fds=False)
+                            close_fds=False)
 sys.exit(exit_code) # pass on the exit status from autotest
diff --git a/client/bin/autotest_utils.py b/client/bin/autotest_utils.py
index 1e6c725..f6eb86e 100755
--- a/client/bin/autotest_utils.py
+++ b/client/bin/autotest_utils.py
@@ -8,593 +8,593 @@
 
 
 def grep(pattern, file):
-	"""
-	This is mainly to fix the return code inversion from grep
-	Also handles compressed files. 
+    """
+    This is mainly to fix the return code inversion from grep
+    Also handles compressed files.
 
-	returns 1 if the pattern is present in the file, 0 if not.
-	"""
-	command = 'grep "%s" > /dev/null' % pattern
-	ret = cat_file_to_cmd(file, command, ignore_status=True)
-	return not ret
+    returns 1 if the pattern is present in the file, 0 if not.
+    """
+    command = 'grep "%s" > /dev/null' % pattern
+    ret = cat_file_to_cmd(file, command, ignore_status=True)
+    return not ret
 
 
 def difflist(list1, list2):
-	"""returns items in list2 that are not in list1"""
-	diff = [];
-	for x in list2:
-		if x not in list1:
-			diff.append(x)
-	return diff
+    """returns items in list2 that are not in list1"""
+    diff = [];
+    for x in list2:
+        if x not in list1:
+            diff.append(x)
+    return diff
 
 
 def cat_file_to_cmd(file, command, ignore_status=0, return_output=False):
-	"""
-	equivalent to 'cat file | command' but knows to use 
-	zcat or bzcat if appropriate
-	"""
-	if return_output:
-		run_cmd = utils.system_output
-	else:
-		run_cmd = utils.system
+    """
+    equivalent to 'cat file | command' but knows to use
+    zcat or bzcat if appropriate
+    """
+    if return_output:
+        run_cmd = utils.system_output
+    else:
+        run_cmd = utils.system
 
-	if not os.path.isfile(file):
-		raise NameError('invalid file %s to cat to command %s'
-			% (file, command))
-	if file.endswith('.bz2'):
-		return run_cmd('bzcat ' + file + ' | ' + command, ignore_status)
-	elif (file.endswith('.gz') or file.endswith('.tgz')):
-		return run_cmd('zcat ' + file + ' | ' + command, ignore_status)
-	else:
-		return run_cmd('cat ' + file + ' | ' + command, ignore_status)
+    if not os.path.isfile(file):
+        raise NameError('invalid file %s to cat to command %s'
+                % (file, command))
+    if file.endswith('.bz2'):
+        return run_cmd('bzcat ' + file + ' | ' + command, ignore_status)
+    elif (file.endswith('.gz') or file.endswith('.tgz')):
+        return run_cmd('zcat ' + file + ' | ' + command, ignore_status)
+    else:
+        return run_cmd('cat ' + file + ' | ' + command, ignore_status)
 
 
 def extract_tarball_to_dir(tarball, dir):
-	"""
-	Extract a tarball to a specified directory name instead of whatever 
-	the top level of a tarball is - useful for versioned directory names, etc
-	"""
-	if os.path.exists(dir):
-		raise NameError, 'target %s already exists' % dir
-	pwd = os.getcwd()
-	os.chdir(os.path.dirname(os.path.abspath(dir)))
-	newdir = extract_tarball(tarball)
-	os.rename(newdir, dir)
-	os.chdir(pwd)
+    """
+    Extract a tarball to a specified directory name instead of whatever
+    the top level of a tarball is - useful for versioned directory names, etc
+    """
+    if os.path.exists(dir):
+        raise NameError, 'target %s already exists' % dir
+    pwd = os.getcwd()
+    os.chdir(os.path.dirname(os.path.abspath(dir)))
+    newdir = extract_tarball(tarball)
+    os.rename(newdir, dir)
+    os.chdir(pwd)
 
 
 def extract_tarball(tarball):
-	"""Returns the directory extracted by the tarball."""
-	extracted = cat_file_to_cmd(tarball, 'tar xvf - 2>/dev/null',
-					return_output=True).splitlines()
+    """Returns the directory extracted by the tarball."""
+    extracted = cat_file_to_cmd(tarball, 'tar xvf - 2>/dev/null',
+                                    return_output=True).splitlines()
 
-	dir = None
+    dir = None
 
-	for line in extracted:
-		line = re.sub(r'^./', '', line)
-		if not line or line == '.':
-			continue
-		topdir = line.split('/')[0]
-		if os.path.isdir(topdir):
-			if dir:
-				assert(dir == topdir)
-			else:
-				dir = topdir 
-	if dir:
-		return dir
-	else:
-		raise NameError('extracting tarball produced no dir')
+    for line in extracted:
+        line = re.sub(r'^./', '', line)
+        if not line or line == '.':
+            continue
+        topdir = line.split('/')[0]
+        if os.path.isdir(topdir):
+            if dir:
+                assert(dir == topdir)
+            else:
+                dir = topdir
+    if dir:
+        return dir
+    else:
+        raise NameError('extracting tarball produced no dir')
 
 
 def get_md5sum(file_path):
-	"""Gets the md5sum of a file. You must provide a valid path to the file"""
-	if not os.path.isfile(file_path):
-		raise ValueError, 'invalid file %s to verify' % file_path
-	return utils.system_output("md5sum " + file_path + " | awk '{print $1}'")
+    """Gets the md5sum of a file. You must provide a valid path to the file"""
+    if not os.path.isfile(file_path):
+        raise ValueError, 'invalid file %s to verify' % file_path
+    return utils.system_output("md5sum " + file_path + " | awk '{print $1}'")
 
 
 def unmap_url_cache(cachedir, url, expected_md5):
-	"""\
-	Downloads a file from a URL to a cache directory. If the file is already
-	at the expected position and has the expected md5 number, let's not
-	download it again.
-	"""
-	# Let's convert cachedir to a canonical path, if it's not already
-	cachedir = os.path.realpath(cachedir)
-	if not os.path.isdir(cachedir):
-		try:
-			system('mkdir -p ' + cachedir)
-		except:
-			raise ValueError('Could not create cache directory %s' % cachedir)
-	file_from_url = os.path.basename(url)
-	file_local_path = os.path.join(cachedir, file_from_url)
-	if os.path.isfile(file_local_path):
-		file_md5 = get_md5sum(file_local_path)
-		if file_md5 == expected_md5:
-			# File is already at the expected position and ready to go
-			src = file_from_url
-		else:
-			# Let's download the package again, it's corrupted...
-			src = url
-	else:
-		# File is not there, let's download it
-		src = url
-	return utils.unmap_url(cachedir, src, cachedir)
+    """\
+    Downloads a file from a URL to a cache directory. If the file is already
+    at the expected position and has the expected md5 number, let's not
+    download it again.
+    """
+    # Let's convert cachedir to a canonical path, if it's not already
+    cachedir = os.path.realpath(cachedir)
+    if not os.path.isdir(cachedir):
+        try:
+            system('mkdir -p ' + cachedir)
+        except:
+            raise ValueError('Could not create cache directory %s' % cachedir)
+    file_from_url = os.path.basename(url)
+    file_local_path = os.path.join(cachedir, file_from_url)
+    if os.path.isfile(file_local_path):
+        file_md5 = get_md5sum(file_local_path)
+        if file_md5 == expected_md5:
+            # File is already at the expected position and ready to go
+            src = file_from_url
+        else:
+            # Let's download the package again, it's corrupted...
+            src = url
+    else:
+        # File is not there, let's download it
+        src = url
+    return utils.unmap_url(cachedir, src, cachedir)
 
 
 def basename(path):
-	i = path.rfind('/');
-	return path[i+1:]
+    i = path.rfind('/');
+    return path[i+1:]
 
 
 def force_copy(src, dest):
-	"""Replace dest with a new copy of src, even if it exists"""
-	if os.path.isfile(dest):
-		os.remove(dest)
-	if os.path.isdir(dest):
-		dest = os.path.join(dest, os.path.basename(src))
-	shutil.copyfile(src, dest)
-	return dest
+    """Replace dest with a new copy of src, even if it exists"""
+    if os.path.isfile(dest):
+        os.remove(dest)
+    if os.path.isdir(dest):
+        dest = os.path.join(dest, os.path.basename(src))
+    shutil.copyfile(src, dest)
+    return dest
 
 
 def force_link(src, dest):
-	"""Link src to dest, overwriting it if it exists"""
-	return utils.system("ln -sf %s %s" % (src, dest))
+    """Link src to dest, overwriting it if it exists"""
+    return utils.system("ln -sf %s %s" % (src, dest))
 
 
 def file_contains_pattern(file, pattern):
-	"""Return true if file contains the specified egrep pattern"""
-	if not os.path.isfile(file):
-		raise NameError('file %s does not exist' % file)
-	return not utils.system('egrep -q "' + pattern + '" ' + file, ignore_status=True)
+    """Return true if file contains the specified egrep pattern"""
+    if not os.path.isfile(file):
+        raise NameError('file %s does not exist' % file)
+    return not utils.system('egrep -q "' + pattern + '" ' + file, ignore_status=True)
 
 
 def list_grep(list, pattern):
-	"""True if any item in list matches the specified pattern."""
-	compiled = re.compile(pattern)
-	for line in list:
-		match = compiled.search(line)
-		if (match):
-			return 1
-	return 0
+    """True if any item in list matches the specified pattern."""
+    compiled = re.compile(pattern)
+    for line in list:
+        match = compiled.search(line)
+        if (match):
+            return 1
+    return 0
 
 def get_os_vendor():
-	"""Try to guess what's the os vendor
-	"""
-	issue = '/etc/issue'
+    """Try to guess what's the os vendor
+    """
+    issue = '/etc/issue'
 
-	if not os.path.isfile(issue):
-		return 'Unknown'
+    if not os.path.isfile(issue):
+        return 'Unknown'
 
-	if file_contains_pattern(issue, 'Red Hat'):
-		return 'Red Hat'
-	elif file_contains_pattern(issue, 'Fedora Core'):
-		return 'Fedora Core'
-	elif file_contains_pattern(issue, 'SUSE'):
-		return 'SUSE'
-	elif file_contains_pattern(issue, 'Ubuntu'):
-		return 'Ubuntu'
-	elif file_contains_pattern(issue, 'Debian'):
-		return 'Debian'
-	else:
-		return 'Unknown'
+    if file_contains_pattern(issue, 'Red Hat'):
+        return 'Red Hat'
+    elif file_contains_pattern(issue, 'Fedora Core'):
+        return 'Fedora Core'
+    elif file_contains_pattern(issue, 'SUSE'):
+        return 'SUSE'
+    elif file_contains_pattern(issue, 'Ubuntu'):
+        return 'Ubuntu'
+    elif file_contains_pattern(issue, 'Debian'):
+        return 'Debian'
+    else:
+        return 'Unknown'
 
 
 def get_vmlinux():
-	"""Return the full path to vmlinux
+    """Return the full path to vmlinux
 
-	Ahem. This is crap. Pray harder. Bad Martin.
-	"""
-	vmlinux = '/boot/vmlinux-%s' % utils.system_output('uname -r')
-	if os.path.isfile(vmlinux):
-		return vmlinux
-	vmlinux = '/lib/modules/%s/build/vmlinux' % utils.system_output('uname -r')
-	if os.path.isfile(vmlinux):
-		return vmlinux
-	return None
+    Ahem. This is crap. Pray harder. Bad Martin.
+    """
+    vmlinux = '/boot/vmlinux-%s' % utils.system_output('uname -r')
+    if os.path.isfile(vmlinux):
+        return vmlinux
+    vmlinux = '/lib/modules/%s/build/vmlinux' % utils.system_output('uname -r')
+    if os.path.isfile(vmlinux):
+        return vmlinux
+    return None
 
 
 def get_systemmap():
-	"""Return the full path to System.map
+    """Return the full path to System.map
 
-	Ahem. This is crap. Pray harder. Bad Martin.
-	"""
-	map = '/boot/System.map-%s' % utils.system_output('uname -r')
-	if os.path.isfile(map):
-		return map
-	map = '/lib/modules/%s/build/System.map' % utils.system_output('uname -r')
-	if os.path.isfile(map):
-		return map
-	return None
+    Ahem. This is crap. Pray harder. Bad Martin.
+    """
+    map = '/boot/System.map-%s' % utils.system_output('uname -r')
+    if os.path.isfile(map):
+        return map
+    map = '/lib/modules/%s/build/System.map' % utils.system_output('uname -r')
+    if os.path.isfile(map):
+        return map
+    return None
 
 
 def get_modules_dir():
-	"""Return the modules dir for the running kernel version"""
-	kernel_version = utils.system_output('uname -r')
-	return '/lib/modules/%s/kernel' % kernel_version
+    """Return the modules dir for the running kernel version"""
+    kernel_version = utils.system_output('uname -r')
+    return '/lib/modules/%s/kernel' % kernel_version
 
 
 def get_cpu_arch():
-	"""Work out which CPU architecture we're running on"""
-	f = open('/proc/cpuinfo', 'r')
-	cpuinfo = f.readlines()
-	f.close()
-	if list_grep(cpuinfo, '^cpu.*(RS64|POWER3|Broadband Engine)'):
-		return 'power'
-	elif list_grep(cpuinfo, '^cpu.*POWER4'):
-		return 'power4'
-	elif list_grep(cpuinfo, '^cpu.*POWER5'):
-		return 'power5'
-	elif list_grep(cpuinfo, '^cpu.*POWER6'):
-		return 'power6'
-	elif list_grep(cpuinfo, '^cpu.*PPC970'):
-		return 'power970'
-	elif list_grep(cpuinfo, 'Opteron'):
-		return 'x86_64'
-	elif list_grep(cpuinfo, 'GenuineIntel') and list_grep(cpuinfo, '48 bits virtual'):
-		return 'x86_64'
-	else:
-		return 'i386'
+    """Work out which CPU architecture we're running on"""
+    f = open('/proc/cpuinfo', 'r')
+    cpuinfo = f.readlines()
+    f.close()
+    if list_grep(cpuinfo, '^cpu.*(RS64|POWER3|Broadband Engine)'):
+        return 'power'
+    elif list_grep(cpuinfo, '^cpu.*POWER4'):
+        return 'power4'
+    elif list_grep(cpuinfo, '^cpu.*POWER5'):
+        return 'power5'
+    elif list_grep(cpuinfo, '^cpu.*POWER6'):
+        return 'power6'
+    elif list_grep(cpuinfo, '^cpu.*PPC970'):
+        return 'power970'
+    elif list_grep(cpuinfo, 'Opteron'):
+        return 'x86_64'
+    elif list_grep(cpuinfo, 'GenuineIntel') and list_grep(cpuinfo, '48 bits virtual'):
+        return 'x86_64'
+    else:
+        return 'i386'
 
 
 def get_current_kernel_arch():
-	"""Get the machine architecture, now just a wrap of 'uname -m'."""
-	return os.popen('uname -m').read().rstrip()
+    """Get the machine architecture, now just a wrap of 'uname -m'."""
+    return os.popen('uname -m').read().rstrip()
 
 
 def get_file_arch(filename):
-	# -L means follow symlinks
-	file_data = utils.system_output('file -L ' + filename)
-	if file_data.count('80386'):
-		return 'i386'
-	return None
+    # -L means follow symlinks
+    file_data = utils.system_output('file -L ' + filename)
+    if file_data.count('80386'):
+        return 'i386'
+    return None
 
 
 def count_cpus():
-	"""number of CPUs in the local machine according to /proc/cpuinfo"""
-	f = file('/proc/cpuinfo', 'r')
-	cpus = 0
-	for line in f.readlines():
-		if line.startswith('processor'):
-			cpus += 1
-	return cpus
+    """number of CPUs in the local machine according to /proc/cpuinfo"""
+    f = file('/proc/cpuinfo', 'r')
+    cpus = 0
+    for line in f.readlines():
+        if line.startswith('processor'):
+            cpus += 1
+    return cpus
 
 
 # Returns total memory in kb
 def read_from_meminfo(key):
-	meminfo = utils.system_output('grep %s /proc/meminfo' % key)
-	return int(re.search(r'\d+', meminfo).group(0))
+    meminfo = utils.system_output('grep %s /proc/meminfo' % key)
+    return int(re.search(r'\d+', meminfo).group(0))
 
 
 def memtotal():
-	return read_from_meminfo('MemTotal')
+    return read_from_meminfo('MemTotal')
 
 
 def freememtotal():
-	return read_from_meminfo('MemFree')
+    return read_from_meminfo('MemFree')
 
 
 def sysctl_kernel(key, value=None):
-	"""(Very) partial implementation of sysctl, for kernel params"""
-	if value:
-		# write
-		utils.write_one_line('/proc/sys/kernel/%s' % key, str(value))
-	else:
-		# read
-		out = utils.read_one_line('/proc/sys/kernel/%s' % key)
-		return int(re.search(r'\d+', out).group(0))
+    """(Very) partial implementation of sysctl, for kernel params"""
+    if value:
+        # write
+        utils.write_one_line('/proc/sys/kernel/%s' % key, str(value))
+    else:
+        # read
+        out = utils.read_one_line('/proc/sys/kernel/%s' % key)
+        return int(re.search(r'\d+', out).group(0))
 
 
 def _convert_exit_status(sts):
-	if os.WIFSIGNALED(sts):
-		return -os.WTERMSIG(sts)
-	elif os.WIFEXITED(sts):
-		return os.WEXITSTATUS(sts)
-	else:
-		# impossible?
-		raise RuntimeError("Unknown exit status %d!" % sts)
+    if os.WIFSIGNALED(sts):
+        return -os.WTERMSIG(sts)
+    elif os.WIFEXITED(sts):
+        return os.WEXITSTATUS(sts)
+    else:
+        # impossible?
+        raise RuntimeError("Unknown exit status %d!" % sts)
 
 
 def where_art_thy_filehandles():
-	"""Dump the current list of filehandles"""
-	os.system("ls -l /proc/%d/fd >> /dev/tty" % os.getpid())
+    """Dump the current list of filehandles"""
+    os.system("ls -l /proc/%d/fd >> /dev/tty" % os.getpid())
 
 
 def print_to_tty(string):
-	"""Output string straight to the tty"""
-	open('/dev/tty', 'w').write(string + '\n')
+    """Output string straight to the tty"""
+    open('/dev/tty', 'w').write(string + '\n')
 
 
 def dump_object(object):
-	"""Dump an object's attributes and methods
+    """Dump an object's attributes and methods
 
-	kind of like dir()
-	"""
-	for item in object.__dict__.iteritems():
-		print item
-		try:
-			(key,value) = item
-			dump_object(value)
-		except:
-			continue
+    kind of like dir()
+    """
+    for item in object.__dict__.iteritems():
+        print item
+        try:
+            (key,value) = item
+            dump_object(value)
+        except:
+            continue
 
 
 def environ(env_key):
-	"""return the requested environment variable, or '' if unset"""
-	if (os.environ.has_key(env_key)):
-		return os.environ[env_key]
-	else:
-		return ''
+    """return the requested environment variable, or '' if unset"""
+    if (os.environ.has_key(env_key)):
+        return os.environ[env_key]
+    else:
+        return ''
 
 
 def prepend_path(newpath, oldpath):
-	"""prepend newpath to oldpath"""
-	if (oldpath):
-		return newpath + ':' + oldpath
-	else:
-		return newpath
+    """prepend newpath to oldpath"""
+    if (oldpath):
+        return newpath + ':' + oldpath
+    else:
+        return newpath
 
 
 def append_path(oldpath, newpath):
-	"""append newpath to oldpath"""
-	if (oldpath):
-		return oldpath + ':' + newpath
-	else:
-		return newpath
+    """append newpath to oldpath"""
+    if (oldpath):
+        return oldpath + ':' + newpath
+    else:
+        return newpath
 
 
 def avgtime_print(dir):
-	""" Calculate some benchmarking statistics.
-	    Input is a directory containing a file called 'time'.
-	    File contains one-per-line results of /usr/bin/time.
-	    Output is average Elapsed, User, and System time in seconds,
-	      and average CPU percentage.
-	"""
-	f = open(dir + "/time")
-	user = system = elapsed = cpu = count = 0
-	r = re.compile('([\d\.]*)user ([\d\.]*)system (\d*):([\d\.]*)elapsed (\d*)%CPU')
-	for line in f.readlines():
-		try:
-			s = r.match(line);
-			user += float(s.group(1))
-			system += float(s.group(2))
-			elapsed += (float(s.group(3)) * 60) + float(s.group(4))
-			cpu += float(s.group(5))
-			count += 1
-		except:
-			raise ValueError("badly formatted times")
+    """ Calculate some benchmarking statistics.
+        Input is a directory containing a file called 'time'.
+        File contains one-per-line results of /usr/bin/time.
+        Output is average Elapsed, User, and System time in seconds,
+          and average CPU percentage.
+    """
+    f = open(dir + "/time")
+    user = system = elapsed = cpu = count = 0
+    r = re.compile('([\d\.]*)user ([\d\.]*)system (\d*):([\d\.]*)elapsed (\d*)%CPU')
+    for line in f.readlines():
+        try:
+            s = r.match(line);
+            user += float(s.group(1))
+            system += float(s.group(2))
+            elapsed += (float(s.group(3)) * 60) + float(s.group(4))
+            cpu += float(s.group(5))
+            count += 1
+        except:
+            raise ValueError("badly formatted times")
 
-	f.close()
-	return "Elapsed: %0.2fs User: %0.2fs System: %0.2fs CPU: %0.0f%%" % \
-	      (elapsed/count, user/count, system/count, cpu/count)
+    f.close()
+    return "Elapsed: %0.2fs User: %0.2fs System: %0.2fs CPU: %0.0f%%" % \
+          (elapsed/count, user/count, system/count, cpu/count)
 
 
 def running_config():
-	"""
-	Return path of config file of the currently running kernel
-	"""
-	version = utils.system_output('uname -r')
-	for config in ('/proc/config.gz', \
-		       '/boot/config-%s' % version,
-		       '/lib/modules/%s/build/.config' % version):
-		if os.path.isfile(config):
-			return config
-	return None
+    """
+    Return path of config file of the currently running kernel
+    """
+    version = utils.system_output('uname -r')
+    for config in ('/proc/config.gz', \
+                   '/boot/config-%s' % version,
+                   '/lib/modules/%s/build/.config' % version):
+        if os.path.isfile(config):
+            return config
+    return None
 
 
 def check_for_kernel_feature(feature):
-	config = running_config()
+    config = running_config()
 
-	if not config:
-		raise TypeError("Can't find kernel config file")
+    if not config:
+        raise TypeError("Can't find kernel config file")
 
-	if config.endswith('.gz'):
-		grep = 'zgrep'
-	else:
-		grep = 'grep'
-	grep += ' ^CONFIG_%s= %s' % (feature, config)
+    if config.endswith('.gz'):
+        grep = 'zgrep'
+    else:
+        grep = 'grep'
+    grep += ' ^CONFIG_%s= %s' % (feature, config)
 
-	if not utils.system_output(grep, ignore_status=True):
-		raise ValueError("Kernel doesn't have a %s feature" % (feature))
+    if not utils.system_output(grep, ignore_status=True):
+        raise ValueError("Kernel doesn't have a %s feature" % (feature))
 
 
 def cpu_online_map():
-	"""
-	Check out the available cpu online map
-	"""
-	cpus = []
-	for line in open('/proc/cpuinfo', 'r').readlines():
-		if line.startswith('processor'):
-			cpus.append(line.split()[2]) # grab cpu number
-	return cpus
+    """
+    Check out the available cpu online map
+    """
+    cpus = []
+    for line in open('/proc/cpuinfo', 'r').readlines():
+        if line.startswith('processor'):
+            cpus.append(line.split()[2]) # grab cpu number
+    return cpus
 
 
 def check_glibc_ver(ver):
-	glibc_ver = commands.getoutput('ldd --version').splitlines()[0]
-	glibc_ver = re.search(r'(\d+\.\d+(\.\d+)?)', glibc_ver).group()
-	if glibc_ver.split('.') < ver.split('.'):
-		raise error.TestError("Glibc is too old (%s). Glibc >= %s is needed." % \
-							(glibc_ver, ver))
+    glibc_ver = commands.getoutput('ldd --version').splitlines()[0]
+    glibc_ver = re.search(r'(\d+\.\d+(\.\d+)?)', glibc_ver).group()
+    if glibc_ver.split('.') < ver.split('.'):
+        raise error.TestError("Glibc is too old (%s). Glibc >= %s is needed." % \
+                                                (glibc_ver, ver))
 
 def check_kernel_ver(ver):
-	kernel_ver = utils.system_output('uname -r')
-	kv_tmp = re.split(r'[-]', kernel_ver)[0:3]
-	if kv_tmp[0].split('.') < ver.split('.'):
-		raise error.TestError("Kernel is too old (%s). Kernel > %s is needed." % \
-							(kernel_ver, ver))
+    kernel_ver = utils.system_output('uname -r')
+    kv_tmp = re.split(r'[-]', kernel_ver)[0:3]
+    if kv_tmp[0].split('.') < ver.split('.'):
+        raise error.TestError("Kernel is too old (%s). Kernel > %s is needed." % \
+                                                (kernel_ver, ver))
 
 
 def human_format(number):
-	# Convert number to kilo / mega / giga format.
-	if number < 1024:
-		return "%d" % number
-	kilo = float(number) / 1024.0
-	if kilo < 1024:
-		return "%.2fk" % kilo
-	meg = kilo / 1024.0
-	if meg < 1024:
-		return "%.2fM" % meg
-	gig = meg / 1024.0
-	return "%.2fG" % gig
+    # Convert number to kilo / mega / giga format.
+    if number < 1024:
+        return "%d" % number
+    kilo = float(number) / 1024.0
+    if kilo < 1024:
+        return "%.2fk" % kilo
+    meg = kilo / 1024.0
+    if meg < 1024:
+        return "%.2fM" % meg
+    gig = meg / 1024.0
+    return "%.2fG" % gig
 
 
 def numa_nodes():
-	node_paths = glob.glob('/sys/devices/system/node/node*')
-	nodes = [int(re.sub(r'.*node(\d+)', r'\1', x)) for x in node_paths]
-	return (sorted(nodes))
+    node_paths = glob.glob('/sys/devices/system/node/node*')
+    nodes = [int(re.sub(r'.*node(\d+)', r'\1', x)) for x in node_paths]
+    return (sorted(nodes))
 
 
 def node_size():
-	nodes = max(len(numa_nodes()), 1)
-	return ((memtotal() * 1024) / nodes)
+    nodes = max(len(numa_nodes()), 1)
+    return ((memtotal() * 1024) / nodes)
 
 
 def to_seconds(time_string):
-	"""Converts a string in M+:SS.SS format to S+.SS"""
-	elts = time_string.split(':')
-	if len(elts) == 1:
-		return time_string
-	return str(int(elts[0]) * 60 + float(elts[1]))
+    """Converts a string in M+:SS.SS format to S+.SS"""
+    elts = time_string.split(':')
+    if len(elts) == 1:
+        return time_string
+    return str(int(elts[0]) * 60 + float(elts[1]))
 
 
 def extract_all_time_results(results_string):
-	"""Extract user, system, and elapsed times into a list of tuples"""
-	pattern = re.compile(r"(.*?)user (.*?)system (.*?)elapsed")
-	results = []
-	for result in pattern.findall(results_string):
-		results.append(tuple([to_seconds(elt) for elt in result]))
-	return results
+    """Extract user, system, and elapsed times into a list of tuples"""
+    pattern = re.compile(r"(.*?)user (.*?)system (.*?)elapsed")
+    results = []
+    for result in pattern.findall(results_string):
+        results.append(tuple([to_seconds(elt) for elt in result]))
+    return results
 
 
 def pickle_load(filename):
-	return pickle.load(open(filename, 'r'))
+    return pickle.load(open(filename, 'r'))
 
 
 # Return the kernel version and build timestamp.
 def running_os_release():
-	return os.uname()[2:4]
+    return os.uname()[2:4]
 
 
 def running_os_ident():
-	(version, timestamp) = running_os_release()
-	return version + '::' + timestamp
+    (version, timestamp) = running_os_release()
+    return version + '::' + timestamp
 
 
 # much like find . -name 'pattern'
 def locate(pattern, root=os.getcwd()):
-	for path, dirs, files in os.walk(root):
-		for f in [os.path.abspath(os.path.join(path, f))
-			for f in files if fnmatch.fnmatch(f, pattern)]:
-				yield f
+    for path, dirs, files in os.walk(root):
+        for f in [os.path.abspath(os.path.join(path, f))
+                for f in files if fnmatch.fnmatch(f, pattern)]:
+            yield f
 
 
 def freespace(path):
-	"""Return the disk free space, in bytes"""
-	s = os.statvfs(path)
-	return s.f_bavail * s.f_bsize
+    """Return the disk free space, in bytes"""
+    s = os.statvfs(path)
+    return s.f_bavail * s.f_bsize
 
 
 def disk_block_size(path):
-	"""Return the disk block size, in bytes"""
-	return os.statvfs(path).f_bsize
+    """Return the disk block size, in bytes"""
+    return os.statvfs(path).f_bsize
 
 
 def get_cpu_family():
-	procinfo = utils.system_output('cat /proc/cpuinfo')
-	CPU_FAMILY_RE = re.compile(r'^cpu family\s+:\s+(\S+)', re.M)
-	matches = CPU_FAMILY_RE.findall(procinfo)
-	if matches:
-		return int(matches[0])
-	else:
-		raise error.TestError('Could not get valid cpu family data')
+    procinfo = utils.system_output('cat /proc/cpuinfo')
+    CPU_FAMILY_RE = re.compile(r'^cpu family\s+:\s+(\S+)', re.M)
+    matches = CPU_FAMILY_RE.findall(procinfo)
+    if matches:
+        return int(matches[0])
+    else:
+        raise error.TestError('Could not get valid cpu family data')
 
 
 def get_disks():
-	df_output = utils.system_output('df')
-	disk_re = re.compile(r'^(/dev/hd[a-z]+)3', re.M)
-	return disk_re.findall(df_output)
+    df_output = utils.system_output('df')
+    disk_re = re.compile(r'^(/dev/hd[a-z]+)3', re.M)
+    return disk_re.findall(df_output)
 
 
 def load_module(module_name):
-	# Checks if a module has already been loaded
-	if module_is_loaded(module_name):
-		return False
+    # Checks if a module has already been loaded
+    if module_is_loaded(module_name):
+        return False
 
-	utils.system('/sbin/modprobe ' + module_name)
-	return True
+    utils.system('/sbin/modprobe ' + module_name)
+    return True
 
 
 def unload_module(module_name):
-	utils.system('/sbin/rmmod ' + module_name)
+    utils.system('/sbin/rmmod ' + module_name)
 
 
 def module_is_loaded(module_name):
-	module_name = module_name.replace('-', '_')
-	modules = utils.system_output('/sbin/lsmod').splitlines()
-	for module in modules:
-		if module.startswith(module_name) and module[len(module_name)] == ' ':
-			return True
-	return False
+    module_name = module_name.replace('-', '_')
+    modules = utils.system_output('/sbin/lsmod').splitlines()
+    for module in modules:
+        if module.startswith(module_name) and module[len(module_name)] == ' ':
+            return True
+    return False
 
 
 def get_loaded_modules():
-	lsmod_output = utils.system_output('/sbin/lsmod').splitlines()[1:]
-	return [line.split(None, 1)[0] for line in lsmod_output]
+    lsmod_output = utils.system_output('/sbin/lsmod').splitlines()[1:]
+    return [line.split(None, 1)[0] for line in lsmod_output]
 
 
 def get_huge_page_size():
-	output = utils.system_output('grep Hugepagesize /proc/meminfo')
-	return int(output.split()[1]) # Assumes units always in kB. :(
+    output = utils.system_output('grep Hugepagesize /proc/meminfo')
+    return int(output.split()[1]) # Assumes units always in kB. :(
 
 
 def get_num_huge_pages():
-	raw_hugepages = utils.system_output('/sbin/sysctl vm.nr_hugepages')
-	return int(raw_hugepages.split()[2])
+    raw_hugepages = utils.system_output('/sbin/sysctl vm.nr_hugepages')
+    return int(raw_hugepages.split()[2])
 
 
 def set_num_huge_pages(num):
-	utils.system('/sbin/sysctl vm.nr_hugepages=%d' % num)
+    utils.system('/sbin/sysctl vm.nr_hugepages=%d' % num)
 
 
 def get_system_nodes():
-	nodes =	os.listdir('/sys/devices/system/node')
-	nodes.sort()
-	return nodes
+    nodes = os.listdir('/sys/devices/system/node')
+    nodes.sort()
+    return nodes
 
 
 def get_cpu_vendor():
-	cpuinfo = open('/proc/cpuinfo').read()
-	vendors = re.findall(r'(?m)^vendor_id\s*:\s*(\S+)\s*$', cpuinfo)
-	for i in xrange(1, len(vendors)):
-		if vendors[i] != vendors[0]:
-			raise error.TestError('multiple cpu vendors found: ' + str(vendors))
-	return vendors[0]
+    cpuinfo = open('/proc/cpuinfo').read()
+    vendors = re.findall(r'(?m)^vendor_id\s*:\s*(\S+)\s*$', cpuinfo)
+    for i in xrange(1, len(vendors)):
+        if vendors[i] != vendors[0]:
+            raise error.TestError('multiple cpu vendors found: ' + str(vendors))
+    return vendors[0]
 
 
 def probe_cpus():
-	"""
-	    This routine returns a list of cpu devices found under /sys/devices/system/cpu.
-	"""
-	output = utils.system_output(
-	           'find /sys/devices/system/cpu/ -maxdepth 1 -type d -name cpu*')
-	return output.splitlines()
+    """
+        This routine returns a list of cpu devices found under /sys/devices/system/cpu.
+    """
+    output = utils.system_output(
+               'find /sys/devices/system/cpu/ -maxdepth 1 -type d -name cpu*')
+    return output.splitlines()
 
 
 def ping_default_gateway():
-	"""Ping the default gateway."""
-	
-	network = open('/etc/sysconfig/network')
-	m = re.search('GATEWAY=(\S+)', network.read())
+    """Ping the default gateway."""
 
-	if m:
-		gw = m.group(1)
-		cmd = 'ping %s -c 5 > /dev/null' % gw
-		return utils.system(cmd, ignore_status=True)
-	
-	raise error.TestError('Unable to find default gateway')
+    network = open('/etc/sysconfig/network')
+    m = re.search('GATEWAY=(\S+)', network.read())
+
+    if m:
+        gw = m.group(1)
+        cmd = 'ping %s -c 5 > /dev/null' % gw
+        return utils.system(cmd, ignore_status=True)
+
+    raise error.TestError('Unable to find default gateway')
 
 
 try:
-	from site_utils import *
+    from site_utils import *
 except ImportError:
-	pass
+    pass
diff --git a/client/bin/boottool.py b/client/bin/boottool.py
index 43f7f39..7c10618 100644
--- a/client/bin/boottool.py
+++ b/client/bin/boottool.py
@@ -2,125 +2,124 @@
 from autotest_lib.client.common_lib import utils, error
 
 class boottool:
-	def __init__(self, boottool_exec=None):
-		#variable to indicate if in mode to write entries for Xen
-		self.xen_mode = False
+    def __init__(self, boottool_exec=None):
+        #variable to indicate if in mode to write entries for Xen
+        self.xen_mode = False
 
-		if boottool_exec:
-			self.boottool_exec = boottool_exec
-		else:
-			autodir = os.environ['AUTODIR']
-			self.boottool_exec = autodir + '/tools/boottool'
+        if boottool_exec:
+            self.boottool_exec = boottool_exec
+        else:
+            autodir = os.environ['AUTODIR']
+            self.boottool_exec = autodir + '/tools/boottool'
 
-		if not self.boottool_exec:
-			raise error.AutotestError('Failed to set boottool_exec')
+        if not self.boottool_exec:
+            raise error.AutotestError('Failed to set boottool_exec')
 
 
-	def run_boottool(self, params):
-		return utils.system_output('%s %s' % (self.boottool_exec, params))
+    def run_boottool(self, params):
+        return utils.system_output('%s %s' % (self.boottool_exec, params))
 
 
-	def bootloader(self):
-		return self.run_boottool('--bootloader-probe')
+    def bootloader(self):
+        return self.run_boottool('--bootloader-probe')
 
 
-	def architecture(self):
-		return self.run_boottool('--arch-probe')
+    def architecture(self):
+        return self.run_boottool('--arch-probe')
 
 
-	def list_titles(self):
-		print self.run_boottool('--info all | grep title')
+    def list_titles(self):
+        print self.run_boottool('--info all | grep title')
 
 
-	def print_entry(self, index):
-		print self.run_boottool('--info=%s' % index)
+    def print_entry(self, index):
+        print self.run_boottool('--info=%s' % index)
 
 
-	def get_default(self):
-		self.run_boottool('--default')
+    def get_default(self):
+        self.run_boottool('--default')
 
 
-	def set_default(self, index):
-		print self.run_boottool('--set-default=%s' % index)
+    def set_default(self, index):
+        print self.run_boottool('--set-default=%s' % index)
 
 
-	def enable_xen_mode(self):
-		self.xen_mode = True
+    def enable_xen_mode(self):
+        self.xen_mode = True
 
 
-	def disable_xen_mode(self):
-		self.xen_mode = False
+    def disable_xen_mode(self):
+        self.xen_mode = False
 
 
-	def get_xen_mode(self):
-		return self.xen_mode
+    def get_xen_mode(self):
+        return self.xen_mode
 
 
-	# 'kernel' can be an position number or a title
-	def add_args(self, kernel, args):
-		parameters = '--update-kernel=%s --args="%s"' % (kernel, args)
+    # 'kernel' can be an position number or a title
+    def add_args(self, kernel, args):
+        parameters = '--update-kernel=%s --args="%s"' % (kernel, args)
 
-		#add parameter if this is a Xen entry
-		if self.xen_mode:
-			parameters += ' --xen'
+        #add parameter if this is a Xen entry
+        if self.xen_mode:
+            parameters += ' --xen'
 
-		print self.run_boottool(parameters)
+        print self.run_boottool(parameters)
 
 
-	def add_xen_hypervisor_args(self, kernel, args):
-		self.run_boottool('--xen --update-xenhyper=%s --xha="%s"') %(kernel, args)
- 
-
-	def remove_args(self, kernel, args):
-		parameters = '--update-kernel=%s --remove-args=%s' % (kernel, args)
-
-		#add parameter if this is a Xen entry
-		if self.xen_mode:
-			parameters += ' --xen'
-
-		print self.run_boottool(parameters)
+    def add_xen_hypervisor_args(self, kernel, args):
+        self.run_boottool('--xen --update-xenhyper=%s --xha="%s"') %(kernel, args)
 
 
-	def remove_xen_hypervisor_args(self, kernel, args):
-		self.run_boottool('--xen --update-xenhyper=%s --remove-args="%s"') \
-			% (kernel, args)
+    def remove_args(self, kernel, args):
+        parameters = '--update-kernel=%s --remove-args=%s' % (kernel, args)
+
+        #add parameter if this is a Xen entry
+        if self.xen_mode:
+            parameters += ' --xen'
+
+        print self.run_boottool(parameters)
 
 
-	def add_kernel(self, path, title='autotest', initrd='', xen_hypervisor='', args=None, root=None, position='end'):
-		parameters = '--add-kernel=%s --title=%s' % (path, title)
-
-		# add an initrd now or forever hold your peace
-		if initrd:
-			parameters += ' --initrd=%s' % initrd
-
-		# add parameter if this is a Xen entry
-		if self.xen_mode:
-			parameters += ' --xen'
-			if xen_hypervisor:
-				parameters += ' --xenhyper=%s' % xen_hypervisor
-
-		if args:
-			parameters += ' --args="%s"' % args
-		if root:
-			parameters += ' --root="%s"' % root
-		if position:
-			parameters += ' --position="%s"' % position
-
-		print self.run_boottool(parameters)
+    def remove_xen_hypervisor_args(self, kernel, args):
+        self.run_boottool('--xen --update-xenhyper=%s --remove-args="%s"') \
+                % (kernel, args)
 
 
-	def remove_kernel(self, kernel):
-		print self.run_boottool('--remove-kernel=%s' % kernel)
+    def add_kernel(self, path, title='autotest', initrd='', xen_hypervisor='', args=None, root=None, position='end'):
+        parameters = '--add-kernel=%s --title=%s' % (path, title)
+
+        # add an initrd now or forever hold your peace
+        if initrd:
+            parameters += ' --initrd=%s' % initrd
+
+        # add parameter if this is a Xen entry
+        if self.xen_mode:
+            parameters += ' --xen'
+            if xen_hypervisor:
+                parameters += ' --xenhyper=%s' % xen_hypervisor
+
+        if args:
+            parameters += ' --args="%s"' % args
+        if root:
+            parameters += ' --root="%s"' % root
+        if position:
+            parameters += ' --position="%s"' % position
+
+        print self.run_boottool(parameters)
 
 
-	def boot_once(self, title):
-		print self.run_boottool('--boot-once --title=%s' % title)
+    def remove_kernel(self, kernel):
+        print self.run_boottool('--remove-kernel=%s' % kernel)
 
 
-	def info(self, index):
-		return self.run_boottool('--info=%s' % index)
+    def boot_once(self, title):
+        print self.run_boottool('--boot-once --title=%s' % title)
+
+
+    def info(self, index):
+        return self.run_boottool('--info=%s' % index)
 
 
 # TODO:  backup()
 # TODO:  set_timeout()
-
diff --git a/client/bin/common.py b/client/bin/common.py
index 74ed759..6881386 100644
--- a/client/bin/common.py
+++ b/client/bin/common.py
@@ -5,4 +5,4 @@
 import setup_modules
 sys.path.pop(0)
 setup_modules.setup(base_path=client_dir,
-		    root_module_name="autotest_lib.client")
+                    root_module_name="autotest_lib.client")
diff --git a/client/bin/config.py b/client/bin/config.py
index 59b8279..477bece 100644
--- a/client/bin/config.py
+++ b/client/bin/config.py
@@ -8,7 +8,7 @@
 Please no StudlyCaps.
 
 For example:
-	boot.default_args
+        boot.default_args
 """
 
 __author__ = """Copyright Andy Whitcroft 2006"""
@@ -16,33 +16,33 @@
 import os
 
 class config:
-	"""The BASIC job configuration
+    """The BASIC job configuration
 
-	Properties:
-		job
-			The job object for this job
-		config
-			The job configuration dictionary
-	"""
+    Properties:
+            job
+                    The job object for this job
+            config
+                    The job configuration dictionary
+    """
 
-	def __init__(self, job):
-		"""
-			job
-				The job object for this job
-		"""
-		self.job = job
-		self.config = {}
+    def __init__(self, job):
+        """
+                job
+                        The job object for this job
+        """
+        self.job = job
+        self.config = {}
 
 
-        def set(self, name, value):
-		if name == "proxy":
-			os.environ['http_proxy'] = value
-			os.environ['ftp_proxy'] = value
+    def set(self, name, value):
+        if name == "proxy":
+            os.environ['http_proxy'] = value
+            os.environ['ftp_proxy'] = value
 
-		self.config[name] = value
+        self.config[name] = value
 
-	def get(self, name):
-		if name in self.config:
-			return self.config[name]
-		else:
-			return None
+    def get(self, name):
+        if name in self.config:
+            return self.config[name]
+        else:
+            return None
diff --git a/client/bin/cpuset.py b/client/bin/cpuset.py
index a870e76..fe583ac 100644
--- a/client/bin/cpuset.py
+++ b/client/bin/cpuset.py
@@ -9,265 +9,265 @@
 
 # Convert '1-3,7,9-12' to [1,2,3,7,9,10,11,12]
 def rangelist_to_list(rangelist):
-	result = []
-	if not rangelist:
-		return result
-	for x in rangelist.split(','):
-		if re.match(r'^(\d+)$', x):
-			result.append(int(x))
-			continue
-		m = re.match(r'^(\d+)-(\d+)$', x)
-		if m:
-			start = int(m.group(1))
-			end = int(m.group(2))
-			result += range(start, end+1)
-			continue
-		msg = 'Cannot understand data input: %s %s' % (x, rangelist)
-		raise ValueError(msg)
-	return result
+    result = []
+    if not rangelist:
+        return result
+    for x in rangelist.split(','):
+        if re.match(r'^(\d+)$', x):
+            result.append(int(x))
+            continue
+        m = re.match(r'^(\d+)-(\d+)$', x)
+        if m:
+            start = int(m.group(1))
+            end = int(m.group(2))
+            result += range(start, end+1)
+            continue
+        msg = 'Cannot understand data input: %s %s' % (x, rangelist)
+        raise ValueError(msg)
+    return result
 
 
 def rounded_memtotal():
-	# Get total of all physical mem, in Kbytes
-	usable_Kbytes = autotest_utils.memtotal()
-	# usable_Kbytes is system's usable DRAM in Kbytes,
-	#   as reported by memtotal() from device /proc/meminfo memtotal
-	#   after Linux deducts 1.5% to 5.1% for system table overhead
-	# Undo the unknown actual deduction by rounding up
-	#   to next small multiple of a big power-of-two
-	#   eg  12GB - 5.1% gets rounded back up to 12GB
-	mindeduct = 0.015  # 1.5 percent
-	maxdeduct = 0.055  # 5.5 percent
-	# deduction range 1.5% .. 5.5% supports physical mem sizes
-	#    6GB .. 12GB in steps of .5GB
-	#   12GB .. 24GB in steps of 1 GB
-	#   24GB .. 48GB in steps of 2 GB ...
-	# Finer granularity in physical mem sizes would require
-	#   tighter spread between min and max possible deductions
+    # Get total of all physical mem, in Kbytes
+    usable_Kbytes = autotest_utils.memtotal()
+    # usable_Kbytes is system's usable DRAM in Kbytes,
+    #   as reported by memtotal() from device /proc/meminfo memtotal
+    #   after Linux deducts 1.5% to 5.1% for system table overhead
+    # Undo the unknown actual deduction by rounding up
+    #   to next small multiple of a big power-of-two
+    #   eg  12GB - 5.1% gets rounded back up to 12GB
+    mindeduct = 0.015  # 1.5 percent
+    maxdeduct = 0.055  # 5.5 percent
+    # deduction range 1.5% .. 5.5% supports physical mem sizes
+    #    6GB .. 12GB in steps of .5GB
+    #   12GB .. 24GB in steps of 1 GB
+    #   24GB .. 48GB in steps of 2 GB ...
+    # Finer granularity in physical mem sizes would require
+    #   tighter spread between min and max possible deductions
 
-	# increase mem size by at least min deduction, without rounding
-	min_Kbytes   = int(usable_Kbytes / (1.0 - mindeduct))
-	# increase mem size further by 2**n rounding, by 0..roundKb or more
-	round_Kbytes = int(usable_Kbytes / (1.0 - maxdeduct)) - min_Kbytes
-	# find least binary roundup 2**n that covers worst-cast roundKb
-	mod2n = 1 << int(math.ceil(math.log(round_Kbytes, 2)))
-	# have round_Kbytes <= mod2n < round_Kbytes*2
-	# round min_Kbytes up to next multiple of mod2n
-	phys_Kbytes = min_Kbytes + mod2n - 1
-	phys_Kbytes = phys_Kbytes - (phys_Kbytes % mod2n)  # clear low bits
-	return phys_Kbytes
+    # increase mem size by at least min deduction, without rounding
+    min_Kbytes   = int(usable_Kbytes / (1.0 - mindeduct))
+    # increase mem size further by 2**n rounding, by 0..roundKb or more
+    round_Kbytes = int(usable_Kbytes / (1.0 - maxdeduct)) - min_Kbytes
+    # find least binary roundup 2**n that covers worst-cast roundKb
+    mod2n = 1 << int(math.ceil(math.log(round_Kbytes, 2)))
+    # have round_Kbytes <= mod2n < round_Kbytes*2
+    # round min_Kbytes up to next multiple of mod2n
+    phys_Kbytes = min_Kbytes + mod2n - 1
+    phys_Kbytes = phys_Kbytes - (phys_Kbytes % mod2n)  # clear low bits
+    return phys_Kbytes
 
 
 def my_container_name():
-	# Get current process's inherited or self-built container name
-	#   within /dev/cpuset.  Is '/' for root container, '/sys', etc. 
-	return utils.read_one_line('/proc/%i/cpuset' % os.getpid())
+    # Get current process's inherited or self-built container name
+    #   within /dev/cpuset.  Is '/' for root container, '/sys', etc.
+    return utils.read_one_line('/proc/%i/cpuset' % os.getpid())
 
 
 def get_mem_nodes(container_full_name):
-	file_name = os.path.join(container_full_name, "mems")
-	if os.path.exists(file_name):
-		return rangelist_to_list(utils.read_one_line(file_name))
-	else:
-		return []
+    file_name = os.path.join(container_full_name, "mems")
+    if os.path.exists(file_name):
+        return rangelist_to_list(utils.read_one_line(file_name))
+    else:
+        return []
 
 
 def available_exclusive_mem_nodes(parent_container):
-	# Get list of numa memory nodes of parent container which could 
-	#  be allocated exclusively to new child containers.
-	# This excludes any nodes now allocated (exclusively or not) 
-	#  to existing children.
-	available = set(get_mem_nodes(parent_container))
-	for child_container in glob.glob('%s/*/mems' % parent_container):
-		child_container = os.path.dirname(child_container)
-		busy = set(get_mem_nodes(child_container))
-		available -= busy
-	return list(available)
+    # Get list of numa memory nodes of parent container which could
+    #  be allocated exclusively to new child containers.
+    # This excludes any nodes now allocated (exclusively or not)
+    #  to existing children.
+    available = set(get_mem_nodes(parent_container))
+    for child_container in glob.glob('%s/*/mems' % parent_container):
+        child_container = os.path.dirname(child_container)
+        busy = set(get_mem_nodes(child_container))
+        available -= busy
+    return list(available)
 
 
 def my_mem_nodes():
-	# Get list of numa memory nodes owned by current process's container.
-	return get_mem_nodes('/dev/cpuset%s' % my_container_name())
+    # Get list of numa memory nodes owned by current process's container.
+    return get_mem_nodes('/dev/cpuset%s' % my_container_name())
 
 
 def my_available_exclusive_mem_nodes():
-	# Get list of numa memory nodes owned by current process's
-	# container, which could be allocated exclusively to new child
-	# containers.  This excludes any nodes now allocated
-	# (exclusively or not) to existing children.
-	return available_exclusive_mem_nodes('/dev/cpuset%s' % my_container_name())
+    # Get list of numa memory nodes owned by current process's
+    # container, which could be allocated exclusively to new child
+    # containers.  This excludes any nodes now allocated
+    # (exclusively or not) to existing children.
+    return available_exclusive_mem_nodes('/dev/cpuset%s' % my_container_name())
 
 
 def mbytes_per_mem_node():
-	# Get mbyte size of each numa mem node, as float
-	# Replaces autotest_utils.node_size().
-	# Based on guessed total physical mem size, not on kernel's 
-	#   lesser 'available memory' after various system tables.
-	# Can be non-integer when kernel sets up 15 nodes instead of 16.
-	return rounded_memtotal() / (len(autotest_utils.numa_nodes()) * 1024.0)
+    # Get mbyte size of each numa mem node, as float
+    # Replaces autotest_utils.node_size().
+    # Based on guessed total physical mem size, not on kernel's
+    #   lesser 'available memory' after various system tables.
+    # Can be non-integer when kernel sets up 15 nodes instead of 16.
+    return rounded_memtotal() / (len(autotest_utils.numa_nodes()) * 1024.0)
 
 
 def get_cpus(container_full_name):
-	file_name = os.path.join(container_full_name, "cpus")
-	if os.path.exists(file_name):
-		return rangelist_to_list(utils.read_one_line(file_name))
-	else:
-		return []
+    file_name = os.path.join(container_full_name, "cpus")
+    if os.path.exists(file_name):
+        return rangelist_to_list(utils.read_one_line(file_name))
+    else:
+        return []
 
 
 def my_cpus():
-	# Get list of cpu cores owned by current process's container.
-	return get_cpus('/dev/cpuset%s' % my_container_name())
+    # Get list of cpu cores owned by current process's container.
+    return get_cpus('/dev/cpuset%s' % my_container_name())
 
 
 def get_tasks(setname):
-	return [x.rstrip() for x in open(setname+'/tasks').readlines()]
+    return [x.rstrip() for x in open(setname+'/tasks').readlines()]
 
 
 def print_one_cpuset(name):
-	dir = os.path.join('/dev/cpuset', name)
-	cpus = utils.read_one_line(dir + '/cpus')
-	mems = utils.read_one_line(dir + '/mems')
-	node_size_ = int(mbytes_per_mem_node()) << 20
-	memtotal = node_size_ * len(rangelist_to_list(mems))
-	tasks = ','.join(get_tasks(dir))
-	print "cpuset %s: size %s; tasks %s; cpus %s; mems %s" % \
-	      (name, autotest_utils.human_format(memtotal), tasks, cpus, mems)
+    dir = os.path.join('/dev/cpuset', name)
+    cpus = utils.read_one_line(dir + '/cpus')
+    mems = utils.read_one_line(dir + '/mems')
+    node_size_ = int(mbytes_per_mem_node()) << 20
+    memtotal = node_size_ * len(rangelist_to_list(mems))
+    tasks = ','.join(get_tasks(dir))
+    print "cpuset %s: size %s; tasks %s; cpus %s; mems %s" % \
+          (name, autotest_utils.human_format(memtotal), tasks, cpus, mems)
 
 
 def print_all_cpusets():
-	for cpuset in glob.glob('/dev/cpuset/*'):
-		print_one_cpuset(re.sub(r'.*/', '', cpuset))
+    for cpuset in glob.glob('/dev/cpuset/*'):
+        print_one_cpuset(re.sub(r'.*/', '', cpuset))
 
 
 def release_dead_containers(parent=super_root):
-	# Delete temp subcontainers nested within parent container
-	#   that are now dead (having no tasks and no sub-containers) 
-	#   and recover their cpu and mem resources.
-	# Must not call when a parallel task may be allocating containers!
-	# Limit to test* names to preserve permanent containers.
-	for child in glob.glob('%s/test*' % parent):
-		print 'releasing dead container', child
-		release_dead_containers(child)  # bottom-up tree walk
-		# rmdir has no effect when container still 
-		#   has tasks or sub-containers
-		os.rmdir(child)
+    # Delete temp subcontainers nested within parent container
+    #   that are now dead (having no tasks and no sub-containers)
+    #   and recover their cpu and mem resources.
+    # Must not call when a parallel task may be allocating containers!
+    # Limit to test* names to preserve permanent containers.
+    for child in glob.glob('%s/test*' % parent):
+        print 'releasing dead container', child
+        release_dead_containers(child)  # bottom-up tree walk
+        # rmdir has no effect when container still
+        #   has tasks or sub-containers
+        os.rmdir(child)
 
 
 class cpuset:
 
-	def display(self):
-		print_one_cpuset(os.path.join(self.root, self.name))
+    def display(self):
+        print_one_cpuset(os.path.join(self.root, self.name))
 
 
-	def release(self):
-		print "releasing ", self.cpudir
-		parent_t = os.path.join(self.root, 'tasks')
-		# Transfer survivors (and self) to parent
-		for task in get_tasks(self.cpudir):
-			utils.write_one_line(parent_t, task)
-		os.rmdir(self.cpudir)
-		if os.path.exists(self.cpudir):
-			raise error.AutotestError('Could not delete container '
-						+ self.cpudir)
+    def release(self):
+        print "releasing ", self.cpudir
+        parent_t = os.path.join(self.root, 'tasks')
+        # Transfer survivors (and self) to parent
+        for task in get_tasks(self.cpudir):
+            utils.write_one_line(parent_t, task)
+        os.rmdir(self.cpudir)
+        if os.path.exists(self.cpudir):
+            raise error.AutotestError('Could not delete container '
+                                    + self.cpudir)
 
 
-	def __init__(self, name, job_size=None, job_pid=None, cpus=None,
-		     root=None):
-		"""\
-		Create a cpuset container and move job_pid into it
-		Allocate the list "cpus" of cpus to that container
+    def __init__(self, name, job_size=None, job_pid=None, cpus=None,
+                 root=None):
+        """\
+        Create a cpuset container and move job_pid into it
+        Allocate the list "cpus" of cpus to that container
 
-			name = arbitrary string tag
-			job_size = reqested memory for job in megabytes
-			job_pid = pid of job we're putting into the container
-			cpu = list of cpu indicies to associate with the cpuset
-			root = the cpuset to create this new set in
-		"""
-		if not os.path.exists(os.path.join(super_root, "cpus")):
-			raise error.AutotestError('Root container /dev/cpuset '
-						'is empty; please reboot')
+                name = arbitrary string tag
+                job_size = reqested memory for job in megabytes
+                job_pid = pid of job we're putting into the container
+                cpu = list of cpu indicies to associate with the cpuset
+                root = the cpuset to create this new set in
+        """
+        if not os.path.exists(os.path.join(super_root, "cpus")):
+            raise error.AutotestError('Root container /dev/cpuset '
+                                    'is empty; please reboot')
 
-		self.name = name
+        self.name = name
 
-		if root == None:
-			# default to nested in process's current container
-			root = my_container_name()[1:]
-		self.root = os.path.join(super_root, root)
-		if not os.path.exists(self.root):
-			raise error.AutotestError(('Parent container %s'
-						   '  does not exist')
-						   % self.root)
+        if root == None:
+            # default to nested in process's current container
+            root = my_container_name()[1:]
+        self.root = os.path.join(super_root, root)
+        if not os.path.exists(self.root):
+            raise error.AutotestError(('Parent container %s'
+                                       '  does not exist')
+                                       % self.root)
 
-		if job_size == None:
-			# default to biggest container we can make under root
-			job_size = int( mbytes_per_mem_node() *
-			    len(available_exclusive_mem_nodes(self.root)) )
-		if not job_size:
-			raise error.AutotestError('Creating container '
-						  'with no mem')
-		self.memory = job_size
+        if job_size == None:
+            # default to biggest container we can make under root
+            job_size = int( mbytes_per_mem_node() *
+                len(available_exclusive_mem_nodes(self.root)) )
+        if not job_size:
+            raise error.AutotestError('Creating container '
+                                      'with no mem')
+        self.memory = job_size
 
-		if cpus == None:
-			# default to biggest container we can make under root
-			cpus = get_cpus(self.root)
-		if not cpus:
-			raise error.AutotestError('Creating container '
-						  'with no cpus')
-		self.cpus = cpus
+        if cpus == None:
+            # default to biggest container we can make under root
+            cpus = get_cpus(self.root)
+        if not cpus:
+            raise error.AutotestError('Creating container '
+                                      'with no cpus')
+        self.cpus = cpus
 
-		# default to the current pid
-		if not job_pid:
-			job_pid = os.getpid()
+        # default to the current pid
+        if not job_pid:
+            job_pid = os.getpid()
 
-		print "cpuset(name=%s, root=%s, job_size=%d, pid=%d)" % \
-		      (name, root, job_size, job_pid)
+        print "cpuset(name=%s, root=%s, job_size=%d, pid=%d)" % \
+              (name, root, job_size, job_pid)
 
-		self.cpudir = os.path.join(self.root, name)
-		if os.path.exists(self.cpudir):
-			self.release()   # destructively replace old
+        self.cpudir = os.path.join(self.root, name)
+        if os.path.exists(self.cpudir):
+            self.release()   # destructively replace old
 
-		nodes_needed = int(math.ceil( float(job_size) /
-					math.ceil(mbytes_per_mem_node()) ))
+        nodes_needed = int(math.ceil( float(job_size) /
+                                math.ceil(mbytes_per_mem_node()) ))
 
-		if nodes_needed > len(get_mem_nodes(self.root)):
-			raise error.AutotestError("Container's memory "
-						  "is bigger than parent's")
+        if nodes_needed > len(get_mem_nodes(self.root)):
+            raise error.AutotestError("Container's memory "
+                                      "is bigger than parent's")
 
-		while True:
-			# Pick specific free mem nodes for this cpuset
-			mems = available_exclusive_mem_nodes(self.root)
-			if len(mems) < nodes_needed:
-				raise error.AutotestError(('Existing container'
-							   ' hold %d mem nodes'
-							   ' needed by new'
-							   'container')
-							  % (nodes_needed
-							     - len(mems)))
-			mems = mems[-nodes_needed:]
-			mems_spec = ','.join(['%d' % x for x in mems])
-			os.mkdir(self.cpudir)
-			utils.write_one_line(os.path.join(self.cpudir,
-					'mem_exclusive'), '1')
-			utils.write_one_line(os.path.join(self.cpudir,
-								   'mems'),
-						      mems_spec)
-			# Above sends err msg to client.log.0, but no exception,
-			#   if mems_spec contained any now-taken nodes
-			# Confirm that siblings didn't grab our chosen mems:
-			nodes_gotten = len(get_mem_nodes(self.cpudir))
-			if nodes_gotten >= nodes_needed:
-				break   # success
-			print "cpuset %s lost race for nodes" % name, mems_spec
-			# Return any mem we did get, and try again
-			os.rmdir(self.cpudir)
+        while True:
+            # Pick specific free mem nodes for this cpuset
+            mems = available_exclusive_mem_nodes(self.root)
+            if len(mems) < nodes_needed:
+                raise error.AutotestError(('Existing container'
+                                           ' hold %d mem nodes'
+                                           ' needed by new'
+                                           'container')
+                                          % (nodes_needed
+                                             - len(mems)))
+            mems = mems[-nodes_needed:]
+            mems_spec = ','.join(['%d' % x for x in mems])
+            os.mkdir(self.cpudir)
+            utils.write_one_line(os.path.join(self.cpudir,
+                            'mem_exclusive'), '1')
+            utils.write_one_line(os.path.join(self.cpudir,
+                                                       'mems'),
+                                          mems_spec)
+            # Above sends err msg to client.log.0, but no exception,
+            #   if mems_spec contained any now-taken nodes
+            # Confirm that siblings didn't grab our chosen mems:
+            nodes_gotten = len(get_mem_nodes(self.cpudir))
+            if nodes_gotten >= nodes_needed:
+                break   # success
+            print "cpuset %s lost race for nodes" % name, mems_spec
+            # Return any mem we did get, and try again
+            os.rmdir(self.cpudir)
 
-		# add specified cpu cores and own task pid to container:
-		cpu_spec = ','.join(['%d' % x for x in cpus])
-		utils.write_one_line(os.path.join(self.cpudir,
-							   'cpus'),
-					      cpu_spec)
-		utils.write_one_line(os.path.join(self.cpudir,
-							   'tasks'),
-					      "%d" % job_pid)
-		self.display()
+        # add specified cpu cores and own task pid to container:
+        cpu_spec = ','.join(['%d' % x for x in cpus])
+        utils.write_one_line(os.path.join(self.cpudir,
+                                                   'cpus'),
+                                      cpu_spec)
+        utils.write_one_line(os.path.join(self.cpudir,
+                                                   'tasks'),
+                                      "%d" % job_pid)
+        self.display()
diff --git a/client/bin/fd_stack.py b/client/bin/fd_stack.py
index c377cf0..e0f3f4c 100755
--- a/client/bin/fd_stack.py
+++ b/client/bin/fd_stack.py
@@ -3,137 +3,137 @@
 import sys, os
 
 class fd_stack:
-	"""a stack of fd redirects
+    """a stack of fd redirects
 
-	Redirects cause existing fd's to be pushed on the stack; restore()
-	causes the current set of redirects to be popped, restoring the previous
-	filehandle destinations.
+    Redirects cause existing fd's to be pushed on the stack; restore()
+    causes the current set of redirects to be popped, restoring the previous
+    filehandle destinations.
 
-	Note that we need to redirect both the sys.stdout type descriptor
-	(which print, etc use) and the low level OS numbered descriptor
-	which os.system() etc use.
-	"""
+    Note that we need to redirect both the sys.stdout type descriptor
+    (which print, etc use) and the low level OS numbered descriptor
+    which os.system() etc use.
+    """
 
-	def __init__(self, fd, filehandle):
-		self.fd = fd				# eg 1
-		self.filehandle = filehandle		# eg sys.stdout
-		self.stack = [(fd, filehandle)]
+    def __init__(self, fd, filehandle):
+        self.fd = fd                            # eg 1
+        self.filehandle = filehandle            # eg sys.stdout
+        self.stack = [(fd, filehandle)]
 
 
-	def update_handle(self, new):
-		if (self.filehandle == sys.stdout):
-			sys.stdout = new
-		if (self.filehandle == sys.stderr):
-			sys.stderr = new
-		self.filehandle = new
+    def update_handle(self, new):
+        if (self.filehandle == sys.stdout):
+            sys.stdout = new
+        if (self.filehandle == sys.stderr):
+            sys.stderr = new
+        self.filehandle = new
 
-	def redirect(self, filename):
-		"""Redirect output to the specified file
+    def redirect(self, filename):
+        """Redirect output to the specified file
 
-		Overwrites the previous contents, if any.	
-		"""
-		self.filehandle.flush()
-		fdcopy = os.dup(self.fd)
-		self.stack.append( (fdcopy, self.filehandle, 0) )
-		# self.filehandle = file(filename, 'w')
-		if (os.path.isfile(filename)):
-			newfd = os.open(filename, os.O_WRONLY)
-		else:
-			newfd = os.open(filename, os.O_WRONLY | os.O_CREAT)
-		os.dup2(newfd, self.fd)
-		os.close(newfd)
-		self.update_handle(os.fdopen(self.fd, 'w'))
+        Overwrites the previous contents, if any.
+        """
+        self.filehandle.flush()
+        fdcopy = os.dup(self.fd)
+        self.stack.append( (fdcopy, self.filehandle, 0) )
+        # self.filehandle = file(filename, 'w')
+        if (os.path.isfile(filename)):
+            newfd = os.open(filename, os.O_WRONLY)
+        else:
+            newfd = os.open(filename, os.O_WRONLY | os.O_CREAT)
+        os.dup2(newfd, self.fd)
+        os.close(newfd)
+        self.update_handle(os.fdopen(self.fd, 'w'))
 
 
-	def tee_redirect(self, filename):
-		"""Tee output to the specified file
+    def tee_redirect(self, filename):
+        """Tee output to the specified file
 
-		Overwrites the previous contents, if any.	
-		"""
-		self.filehandle.flush()
-		#print_to_tty("tee_redirect to " + filename)
-		#where_art_thy_filehandles()
-		fdcopy = os.dup(self.fd)
-		r, w = os.pipe()
-		pid = os.fork()
-		if pid:			# parent
-			os.close(r)
-			os.dup2(w, self.fd)
-			os.close(w)
-			self.stack.append( (fdcopy, self.filehandle, pid) )
-			self.update_handle(os.fdopen(self.fd, 'w', 0))
-			#where_art_thy_filehandles()
-			#print_to_tty("done tee_redirect to " + filename)
-		else:			# child
-			os.close(w)
-			os.dup2(r, 0)
-			os.dup2(fdcopy, 1)
-			os.close(r)
-			os.close(fdcopy)
-			os.execlp('tee', 'tee', '-a', filename)
+        Overwrites the previous contents, if any.
+        """
+        self.filehandle.flush()
+        #print_to_tty("tee_redirect to " + filename)
+        #where_art_thy_filehandles()
+        fdcopy = os.dup(self.fd)
+        r, w = os.pipe()
+        pid = os.fork()
+        if pid:                 # parent
+            os.close(r)
+            os.dup2(w, self.fd)
+            os.close(w)
+            self.stack.append( (fdcopy, self.filehandle, pid) )
+            self.update_handle(os.fdopen(self.fd, 'w', 0))
+            #where_art_thy_filehandles()
+            #print_to_tty("done tee_redirect to " + filename)
+        else:                   # child
+            os.close(w)
+            os.dup2(r, 0)
+            os.dup2(fdcopy, 1)
+            os.close(r)
+            os.close(fdcopy)
+            os.execlp('tee', 'tee', '-a', filename)
 
-	
-	def restore(self):
-		"""unredirect one level"""
-		self.filehandle.flush()
-		# print_to_tty("ENTERING RESTORE %d" % self.fd)
-		# where_art_thy_filehandles()
-		(old_fd, old_filehandle, pid) = self.stack.pop()
-		# print_to_tty("old_fd %d" % old_fd)
-		# print_to_tty("self.fd %d" % self.fd)
-		self.filehandle.close()   # seems to close old_fd as well.
-		if pid:
-			os.waitpid(pid, 0)
-		# where_art_thy_filehandles()
-		os.dup2(old_fd, self.fd)
-		# print_to_tty("CLOSING FD %d" % old_fd)
-		os.close(old_fd)
-		# where_art_thy_filehandles()
-		self.update_handle(old_filehandle)
-		# where_art_thy_filehandles()
-		# print_to_tty("EXIT RESTORE %d" % self.fd)
+
+    def restore(self):
+        """unredirect one level"""
+        self.filehandle.flush()
+        # print_to_tty("ENTERING RESTORE %d" % self.fd)
+        # where_art_thy_filehandles()
+        (old_fd, old_filehandle, pid) = self.stack.pop()
+        # print_to_tty("old_fd %d" % old_fd)
+        # print_to_tty("self.fd %d" % self.fd)
+        self.filehandle.close()   # seems to close old_fd as well.
+        if pid:
+            os.waitpid(pid, 0)
+        # where_art_thy_filehandles()
+        os.dup2(old_fd, self.fd)
+        # print_to_tty("CLOSING FD %d" % old_fd)
+        os.close(old_fd)
+        # where_art_thy_filehandles()
+        self.update_handle(old_filehandle)
+        # where_art_thy_filehandles()
+        # print_to_tty("EXIT RESTORE %d" % self.fd)
 
 
 def tee_output_logdir(fn):
-	"""\
-	Method decorator for a class to tee the output to the objects log_dir.
-	"""
-	def tee_logdir_wrapper(self, *args, **dargs):
-		self.job.stdout.tee_redirect(os.path.join(self.log_dir, 'stdout'))
-		self.job.stderr.tee_redirect(os.path.join(self.log_dir, 'stderr'))
-		try:
-			result = fn(self, *args, **dargs)
-		finally:
-			self.job.stderr.restore()
-			self.job.stdout.restore()
-		return result
-	return tee_logdir_wrapper
+    """\
+    Method decorator for a class to tee the output to the objects log_dir.
+    """
+    def tee_logdir_wrapper(self, *args, **dargs):
+        self.job.stdout.tee_redirect(os.path.join(self.log_dir, 'stdout'))
+        self.job.stderr.tee_redirect(os.path.join(self.log_dir, 'stderr'))
+        try:
+            result = fn(self, *args, **dargs)
+        finally:
+            self.job.stderr.restore()
+            self.job.stdout.restore()
+        return result
+    return tee_logdir_wrapper
 
 
 def __mark(filename, msg):
-	file = open(filename, 'a')
-	file.write(msg)
-	file.close()
+    file = open(filename, 'a')
+    file.write(msg)
+    file.close()
 
 
 def tee_output_logdir_mark(fn):
-	def tee_logdir_mark_wrapper(self, *args, **dargs):
-		mark = self.__class__.__name__ + "." + fn.__name__
-		outfile = os.path.join(self.log_dir, 'stdout')
-		errfile = os.path.join(self.log_dir, 'stderr')
-		__mark(outfile, "--- START " + mark + " ---\n")
-		__mark(errfile, "--- START " + mark + " ---\n")
-		self.job.stdout.tee_redirect(outfile)
-		self.job.stderr.tee_redirect(errfile)
-		try:
-			result = fn(self, *args, **dargs)
-		finally:
-			self.job.stderr.restore()
-			self.job.stdout.restore()
-			__mark(outfile, "--- END " + mark + " ---\n")
-			__mark(errfile, "--- END " + mark + " ---\n")
+    def tee_logdir_mark_wrapper(self, *args, **dargs):
+        mark = self.__class__.__name__ + "." + fn.__name__
+        outfile = os.path.join(self.log_dir, 'stdout')
+        errfile = os.path.join(self.log_dir, 'stderr')
+        __mark(outfile, "--- START " + mark + " ---\n")
+        __mark(errfile, "--- START " + mark + " ---\n")
+        self.job.stdout.tee_redirect(outfile)
+        self.job.stderr.tee_redirect(errfile)
+        try:
+            result = fn(self, *args, **dargs)
+        finally:
+            self.job.stderr.restore()
+            self.job.stdout.restore()
+            __mark(outfile, "--- END " + mark + " ---\n")
+            __mark(errfile, "--- END " + mark + " ---\n")
 
-		return result
+        return result
 
-	tee_logdir_mark_wrapper.__name__ = fn.__name__
-	return tee_logdir_mark_wrapper
+    tee_logdir_mark_wrapper.__name__ = fn.__name__
+    return tee_logdir_mark_wrapper
diff --git a/client/bin/filesystem.py b/client/bin/filesystem.py
index 2c24141..47c8c44 100755
--- a/client/bin/filesystem.py
+++ b/client/bin/filesystem.py
@@ -5,170 +5,170 @@
 from autotest_lib.client.common_lib import error, utils
 
 def list_mount_devices():
-	devices = []
-	# list mounted filesystems
-	for line in utils.system_output('mount').splitlines():
-		devices.append(line.split()[0])
-	# list mounted swap devices
-	for line in utils.system_output('swapon -s').splitlines():
-		if line.startswith('/'):	# skip header line
-			devices.append(line.split()[0])
-	return devices
+    devices = []
+    # list mounted filesystems
+    for line in utils.system_output('mount').splitlines():
+        devices.append(line.split()[0])
+    # list mounted swap devices
+    for line in utils.system_output('swapon -s').splitlines():
+        if line.startswith('/'):        # skip header line
+            devices.append(line.split()[0])
+    return devices
 
 
 def list_mount_points():
-	mountpoints = []
-	for line in utils.system_output('mount').splitlines():
-		mountpoints.append(line.split()[2])
-	return mountpoints 
+    mountpoints = []
+    for line in utils.system_output('mount').splitlines():
+        mountpoints.append(line.split()[2])
+    return mountpoints
 
 
 class filesystem:
-	"""
-	Class for handling filesystems
-	"""
+    """
+    Class for handling filesystems
+    """
 
-	def __init__(self, job, device, mountpoint, loop_size = 0):
-		"""
-		device should be able to be a file as well
-		which we mount as loopback
+    def __init__(self, job, device, mountpoint, loop_size = 0):
+        """
+        device should be able to be a file as well
+        which we mount as loopback
 
-		device
-			The device in question (eg "/dev/hda2")
-		mountpoint
-			Default mountpoint for the device.
-		loop_size
-			size of loopback device (in MB)
-		"""
+        device
+                The device in question (eg "/dev/hda2")
+        mountpoint
+                Default mountpoint for the device.
+        loop_size
+                size of loopback device (in MB)
+        """
 
-		part = re.compile(r'^part(\d+)$')
-		m = part.match(device)
-		if m:
-			number = int(m.groups()[0])
-			partitions = job.config_get('filesystem.partitions')
-			try:
-				device = partitions[number]
-			except:
-				raise NameError("Partition '" + device + "' not available")
+        part = re.compile(r'^part(\d+)$')
+        m = part.match(device)
+        if m:
+            number = int(m.groups()[0])
+            partitions = job.config_get('filesystem.partitions')
+            try:
+                device = partitions[number]
+            except:
+                raise NameError("Partition '" + device + "' not available")
 
-		self.device = device
-		self.mountpoint = mountpoint
-		self.job = job
-		self.fstype = None
-		self.loop = loop_size
-		if self.loop:
-			utils.system('dd if=/dev/zero of=%s bs=1M count=%d' % \
-							(device, loop_size))
+        self.device = device
+        self.mountpoint = mountpoint
+        self.job = job
+        self.fstype = None
+        self.loop = loop_size
+        if self.loop:
+            utils.system('dd if=/dev/zero of=%s bs=1M count=%d' % \
+                                            (device, loop_size))
 
 
-	def mkfs(self, fstype = 'ext2', args = ''):
-		"""
-		Format a partition to fstype
-		"""
-		if list_mount_devices().count(self.device):
-			raise NameError('Attempted to format mounted device')
-		if fstype == 'xfs':
-			args += ' -f'
-		if self.loop:
-			# BAH. Inconsistent mkfs syntax SUCKS.
-			if fstype == 'ext2' or fstype == 'ext3':
-				args += ' -F'
-			if fstype == 'reiserfs':
-				args += ' -f'
-		args = args.lstrip()
-		mkfs_cmd = "mkfs -t %s %s %s" % (fstype, args, self.device)
-		print mkfs_cmd
-		sys.stdout.flush()
-		try:
-			utils.system("yes | " + mkfs_cmd)
-		except:
-			self.job.record('FAIL', None, mkfs_cmd, error.format_error())
-			raise
-		else:
-			self.job.record('GOOD', None, mkfs_cmd)
-			self.fstype = fstype
+    def mkfs(self, fstype = 'ext2', args = ''):
+        """
+        Format a partition to fstype
+        """
+        if list_mount_devices().count(self.device):
+            raise NameError('Attempted to format mounted device')
+        if fstype == 'xfs':
+            args += ' -f'
+        if self.loop:
+            # BAH. Inconsistent mkfs syntax SUCKS.
+            if fstype == 'ext2' or fstype == 'ext3':
+                args += ' -F'
+            if fstype == 'reiserfs':
+                args += ' -f'
+        args = args.lstrip()
+        mkfs_cmd = "mkfs -t %s %s %s" % (fstype, args, self.device)
+        print mkfs_cmd
+        sys.stdout.flush()
+        try:
+            utils.system("yes | " + mkfs_cmd)
+        except:
+            self.job.record('FAIL', None, mkfs_cmd, error.format_error())
+            raise
+        else:
+            self.job.record('GOOD', None, mkfs_cmd)
+            self.fstype = fstype
 
 
-	def fsck(self, args = '-n'):
-		# I hate reiserfstools.
-		# Requires an explit Yes for some inane reason
-		fsck_cmd = 'fsck %s %s' % (self.device, args)
-		if self.fstype == 'reiserfs':
-			fsck_cmd = 'yes "Yes" | ' + fsck_cmd
-		print fsck_cmd
-		sys.stdout.flush()
-		try:
-			utils.system("yes | " + fsck_cmd)
-		except:
-			self.job.record('FAIL', None, fsck_cmd, error.format_error())
-			raise
-		else:
-			self.job.record('GOOD', None, fsck_cmd)
-
-	
-	def mount(self, mountpoint = None, args = ''):
-		if self.fstype:
-			args += ' -t ' + self.fstype
-		if self.loop:
-			args += ' -o loop'
-		args = args.lstrip()
-
-		if not mountpoint:
-			mountpoint = self.mountpoint
-		mount_cmd = "mount %s %s %s" % (args, self.device, mountpoint)
-
-		if list_mount_devices().count(self.device):
-			err = 'Attempted to mount mounted device'
-			self.job.record('FAIL', None, mount_cmd, err)
-			raise NameError(err)
-		if list_mount_points().count(mountpoint):
-			err = 'Attempted to mount busy mountpoint'
-			self.job.record('FAIL', None, mount_cmd, err)
-			raise NameError(err)
-
-		print mount_cmd
-		sys.stdout.flush()
-		try:
-			utils.system(mount_cmd)
-		except:
-			self.job.record('FAIL', None, mount_cmd, error.format_error())
-			raise
-		else:
-			self.job.record('GOOD', None, mount_cmd)
+    def fsck(self, args = '-n'):
+        # I hate reiserfstools.
+        # Requires an explit Yes for some inane reason
+        fsck_cmd = 'fsck %s %s' % (self.device, args)
+        if self.fstype == 'reiserfs':
+            fsck_cmd = 'yes "Yes" | ' + fsck_cmd
+        print fsck_cmd
+        sys.stdout.flush()
+        try:
+            utils.system("yes | " + fsck_cmd)
+        except:
+            self.job.record('FAIL', None, fsck_cmd, error.format_error())
+            raise
+        else:
+            self.job.record('GOOD', None, fsck_cmd)
 
 
-	def unmount(self, handle=None):
-		if not handle:
-			handle = self.device
-		umount_cmd = "umount " + handle
-		print umount_cmd
-		sys.stdout.flush()
-		try:
-			utils.system(umount_cmd)
-		except:
-			self.job.record('FAIL', None, umount_cmd, error.format_error())
-			raise
-		else:
-			self.job.record('GOOD', None, umount_cmd)
+    def mount(self, mountpoint = None, args = ''):
+        if self.fstype:
+            args += ' -t ' + self.fstype
+        if self.loop:
+            args += ' -o loop'
+        args = args.lstrip()
+
+        if not mountpoint:
+            mountpoint = self.mountpoint
+        mount_cmd = "mount %s %s %s" % (args, self.device, mountpoint)
+
+        if list_mount_devices().count(self.device):
+            err = 'Attempted to mount mounted device'
+            self.job.record('FAIL', None, mount_cmd, err)
+            raise NameError(err)
+        if list_mount_points().count(mountpoint):
+            err = 'Attempted to mount busy mountpoint'
+            self.job.record('FAIL', None, mount_cmd, err)
+            raise NameError(err)
+
+        print mount_cmd
+        sys.stdout.flush()
+        try:
+            utils.system(mount_cmd)
+        except:
+            self.job.record('FAIL', None, mount_cmd, error.format_error())
+            raise
+        else:
+            self.job.record('GOOD', None, mount_cmd)
 
 
-	def get_io_scheduler_list(self, device_name):
-		names = open(self.__sched_path(device_name)).read()
-		return names.translate(string.maketrans('[]', '  ')).split()
+    def unmount(self, handle=None):
+        if not handle:
+            handle = self.device
+        umount_cmd = "umount " + handle
+        print umount_cmd
+        sys.stdout.flush()
+        try:
+            utils.system(umount_cmd)
+        except:
+            self.job.record('FAIL', None, umount_cmd, error.format_error())
+            raise
+        else:
+            self.job.record('GOOD', None, umount_cmd)
 
 
-	def get_io_scheduler(self, device_name):
-		return re.split('[\[\]]',
-				open(self.__sched_path(device_name)).read())[1]
+    def get_io_scheduler_list(self, device_name):
+        names = open(self.__sched_path(device_name)).read()
+        return names.translate(string.maketrans('[]', '  ')).split()
 
 
-	def set_io_scheduler(self, device_name, name):
-		if name not in self.get_io_scheduler_list(device_name):
-			raise NameError('No such IO scheduler: %s' % name)
-		f = open(self.__sched_path(device_name), 'w')
-		print >> f, name
-		f.close()
+    def get_io_scheduler(self, device_name):
+        return re.split('[\[\]]',
+                        open(self.__sched_path(device_name)).read())[1]
 
 
-	def __sched_path(self, device_name):
-		return '/sys/block/%s/queue/scheduler' % device_name
+    def set_io_scheduler(self, device_name, name):
+        if name not in self.get_io_scheduler_list(device_name):
+            raise NameError('No such IO scheduler: %s' % name)
+        f = open(self.__sched_path(device_name), 'w')
+        print >> f, name
+        f.close()
+
+
+    def __sched_path(self, device_name):
+        return '/sys/block/%s/queue/scheduler' % device_name
diff --git a/client/bin/grub.py b/client/bin/grub.py
index 68b9bc5..81ea3de 100755
--- a/client/bin/grub.py
+++ b/client/bin/grub.py
@@ -4,139 +4,139 @@
 
 import shutil
 import re
-import os 
+import os
 import os.path
 import string
 
 class grub:
-	config_locations = ['/boot/grub/grub.conf', '/boot/grub/menu.lst',
-				'/etc/grub.conf']
+    config_locations = ['/boot/grub/grub.conf', '/boot/grub/menu.lst',
+                            '/etc/grub.conf']
 
-	def __init__(self, config_file=None):
-		if config_file:
-			self.config = config_file
-		else:
-			self.config = self.detect()
-		self.read()
+    def __init__(self, config_file=None):
+        if config_file:
+            self.config = config_file
+        else:
+            self.config = self.detect()
+        self.read()
 
 
-	def read(self):
-		conf_file = file(self.config, 'r')
-		self.lines = conf_file.readlines()
-		conf_file.close()
+    def read(self):
+        conf_file = file(self.config, 'r')
+        self.lines = conf_file.readlines()
+        conf_file.close()
 
-		self.entries = []			# list of stanzas
-		self.titles = {}			# dictionary of titles
-		entry = grub_entry(-1)
-		count = 0
-		for line in self.lines:
-			if re.match(r'\s*title', line):
-				self.entries.append(entry)
-				entry = grub_entry(count)
-				count = count + 1
-				title = line.replace('title ', '')
-				title = title.rstrip('\n')
-				entry.set('title', title)
-				self.titles[title] = entry
-			# if line.startswith('initrd'):
-			if re.match(r'\s*initrd', line):
-				entry.set('initrd',
-					re.sub(r'\s*initrd\s+', '', line))
-			if re.match(r'\s*kernel', line):
-				entry.set('kernel',
-					re.sub(r'\s*kernel\s+', '', line))
-			entry.lines.append(line)
-		self.entries.append(entry)
-		self.preamble = self.entries.pop(0)	# separate preamble
+        self.entries = []                       # list of stanzas
+        self.titles = {}                        # dictionary of titles
+        entry = grub_entry(-1)
+        count = 0
+        for line in self.lines:
+            if re.match(r'\s*title', line):
+                self.entries.append(entry)
+                entry = grub_entry(count)
+                count = count + 1
+                title = line.replace('title ', '')
+                title = title.rstrip('\n')
+                entry.set('title', title)
+                self.titles[title] = entry
+            # if line.startswith('initrd'):
+            if re.match(r'\s*initrd', line):
+                entry.set('initrd',
+                        re.sub(r'\s*initrd\s+', '', line))
+            if re.match(r'\s*kernel', line):
+                entry.set('kernel',
+                        re.sub(r'\s*kernel\s+', '', line))
+            entry.lines.append(line)
+        self.entries.append(entry)
+        self.preamble = self.entries.pop(0)     # separate preamble
 
 
-	def write(self):
-		conf_file = file(self.config, 'w')
-		conf_file.write(self.preamble)
-		for entry in self.entries:
-			conf_file.write(entry.lines)
-		conf_file.close()
+    def write(self):
+        conf_file = file(self.config, 'w')
+        conf_file.write(self.preamble)
+        for entry in self.entries:
+            conf_file.write(entry.lines)
+        conf_file.close()
 
 
-	def dump(self):
-		for line in self.preamble.lines:
-			print line,
-		for entry in self.entries:
-			for line in entry.lines:
-				print line,
+    def dump(self):
+        for line in self.preamble.lines:
+            print line,
+        for entry in self.entries:
+            for line in entry.lines:
+                print line,
 
-	def backup(self):
-		shutil.copyfile(self.config, self.config+'.bak')
-		restore = file(autodir + '/var/autotest.boot.restore', 'w')
-		restore.write('cp ' + self.config+'.bak ' + self.config + '\n')
-		restore.close()
+    def backup(self):
+        shutil.copyfile(self.config, self.config+'.bak')
+        restore = file(autodir + '/var/autotest.boot.restore', 'w')
+        restore.write('cp ' + self.config+'.bak ' + self.config + '\n')
+        restore.close()
 
 
-	def bootloader(self):
-		return 'grub'
+    def bootloader(self):
+        return 'grub'
 
 
-	def detect(self):
-		for config in grub.config_locations:
-			if os.path.isfile(config) and not os.path.islink(config):
-				return config
+    def detect(self):
+        for config in grub.config_locations:
+            if os.path.isfile(config) and not os.path.islink(config):
+                return config
 
 
-	def list_titles(self):
-		list = []
-		for entry in self.entries:
-			 list.append(entry.get('title'))
-		return list
+    def list_titles(self):
+        list = []
+        for entry in self.entries:
+            list.append(entry.get('title'))
+        return list
 
 
-	def print_entry(self, index):
-		entry = self.entries[index]
-		entry.print_entry()
+    def print_entry(self, index):
+        entry = self.entries[index]
+        entry.print_entry()
 
 
-	def renamed_entry(self, index, newname, args=False):
-		"print a specified entry, renaming it as specified"
-		entry = self.entries[index]
-		entry.set('title', newname)
-		if args:
-			entry.set_autotest_kernel()
-		entry.print_entry()
+    def renamed_entry(self, index, newname, args=False):
+        "print a specified entry, renaming it as specified"
+        entry = self.entries[index]
+        entry.set('title', newname)
+        if args:
+            entry.set_autotest_kernel()
+        entry.print_entry()
 
 
-	def omit_markers(self, marker):
-		# print, ommitting entries between specified markers
-		print_state = True
-		for line in lines:
-			if line.count(marker):
-				print_state = not print_state
-			else:
-				if print_state:
-					print line
+    def omit_markers(self, marker):
+        # print, ommitting entries between specified markers
+        print_state = True
+        for line in lines:
+            if line.count(marker):
+                print_state = not print_state
+            else:
+                if print_state:
+                    print line
 
 
-	def select(self, title, boot_options=None):
-		entry = self.titles[title]
-		print "grub: will boot entry %d (0-based)" % entry.index
-		self.set_default(entry.index)
-		self.set_timeout()
+    def select(self, title, boot_options=None):
+        entry = self.titles[title]
+        print "grub: will boot entry %d (0-based)" % entry.index
+        self.set_default(entry.index)
+        self.set_timeout()
 
 
-	def set_default(self, index):
-		lines = (self.preamble).lines
-		for i in range(len(lines)):
-			default = 'default %d' % index
-			lines[i] = re.sub(r'^\s*default.*', 
-						default, lines[i])
+    def set_default(self, index):
+        lines = (self.preamble).lines
+        for i in range(len(lines)):
+            default = 'default %d' % index
+            lines[i] = re.sub(r'^\s*default.*',
+                                    default, lines[i])
 
 
-	def set_timeout(self):
-		lines = (self.preamble).lines
-		for i in range(len(lines)):
-			lines[i] = re.sub(r'^timeout.*/', 
-						'timeout 60', lines[i])
-			lines[i] = re.sub(r'^(\s*terminal .*--timeout)=\d+',
-						r'\1=30', lines[i])
-		
+    def set_timeout(self):
+        lines = (self.preamble).lines
+        for i in range(len(lines)):
+            lines[i] = re.sub(r'^timeout.*/',
+                                    'timeout 60', lines[i])
+            lines[i] = re.sub(r'^(\s*terminal .*--timeout)=\d+',
+                                    r'\1=30', lines[i])
+
 
 # ----------------------------------------------------------------------
 
@@ -145,49 +145,49 @@
 # and bits we don't understand.
 
 class grub_entry:
-	def __init__(self, count):
-		self.lines = []
-		self.fields = {}    # title, initrd, kernel, etc
-		self.index = count
+    def __init__(self, count):
+        self.lines = []
+        self.fields = {}    # title, initrd, kernel, etc
+        self.index = count
 
 
-	def set(self, field, value):
-		print "setting '%s' to '%s'" % (field, value)
-		self.fields[field] = value
-		for i in range(len(self.lines)):
-			m = re.match(r'\s*' + field + r'\s+', self.lines[i])
-			if m:
-				self.lines[i] = m.group() + value + '\n'
+    def set(self, field, value):
+        print "setting '%s' to '%s'" % (field, value)
+        self.fields[field] = value
+        for i in range(len(self.lines)):
+            m = re.match(r'\s*' + field + r'\s+', self.lines[i])
+            if m:
+                self.lines[i] = m.group() + value + '\n'
 
 
-	def get(self, field):
-		return self.fields[field]
+    def get(self, field):
+        return self.fields[field]
 
 
-	def print_entry(self):
-		print self.lines
+    def print_entry(self):
+        print self.lines
 
 
-	def set_kernel_options(self, options):
-		kernel = self.get('kernel')
-		re.sub(r'(autotest_args:).*', r'\1'+options, kernel)
-		self.set('kernel', kernel)
+    def set_kernel_options(self, options):
+        kernel = self.get('kernel')
+        re.sub(r'(autotest_args:).*', r'\1'+options, kernel)
+        self.set('kernel', kernel)
 
-	def set_autotest_kernel(self):
-		kernel_words = []
-		found_path = False
-		# Want to copy most of the entry, replacing the 'path' 
-		# part of the entry with vmlinux-autotest in the same 
-		# dir, and make sure autotest_args: is (uniquely) added
-		for word in (self.get('kernel')).split():
-			if word.startswith('--'):
-				kernel_words.append(word)
-				continue
-			if not found_path:
-				word = os.path.dirname(word)+'vmlinuz-autotest'
-				found_path = True
-			if re.match(r'auto(bench|test)_args:', word):
-				break
-			kernel_words.append(word)
-		kernel_words.append('autotest_args: ')
-		self.set('kernel', string.join(kernel_words))
+    def set_autotest_kernel(self):
+        kernel_words = []
+        found_path = False
+        # Want to copy most of the entry, replacing the 'path'
+        # part of the entry with vmlinux-autotest in the same
+        # dir, and make sure autotest_args: is (uniquely) added
+        for word in (self.get('kernel')).split():
+            if word.startswith('--'):
+                kernel_words.append(word)
+                continue
+            if not found_path:
+                word = os.path.dirname(word)+'vmlinuz-autotest'
+                found_path = True
+            if re.match(r'auto(bench|test)_args:', word):
+                break
+            kernel_words.append(word)
+        kernel_words.append('autotest_args: ')
+        self.set('kernel', string.join(kernel_words))
diff --git a/client/bin/harness.py b/client/bin/harness.py
index 2a6378f..86441b8 100755
--- a/client/bin/harness.py
+++ b/client/bin/harness.py
@@ -8,77 +8,77 @@
 import os, sys
 
 class harness:
-	"""The NULL server harness
+    """The NULL server harness
 
-	Properties:
-		job
-			The job object for this job
-	"""
+    Properties:
+            job
+                    The job object for this job
+    """
 
-	def __init__(self, job):
-		"""
-			job
-				The job object for this job
-		"""
-		self.setup(job)
+    def __init__(self, job):
+        """
+                job
+                        The job object for this job
+        """
+        self.setup(job)
 
 
-	def setup(self, job):
-		"""
-			job
-				The job object for this job
-		"""
-		self.job = job
+    def setup(self, job):
+        """
+                job
+                        The job object for this job
+        """
+        self.job = job
 
-		configd = os.path.join(os.environ['AUTODIR'], 'configs')
-		if os.path.isdir(configd):
-			(name, dirs, files) = os.walk(configd).next()
-			job.config_set('kernel.default_config_set',
-						[ configd + '/' ] + files)
+        configd = os.path.join(os.environ['AUTODIR'], 'configs')
+        if os.path.isdir(configd):
+            (name, dirs, files) = os.walk(configd).next()
+            job.config_set('kernel.default_config_set',
+                                    [ configd + '/' ] + files)
 
 
-	def run_start(self):
-		"""A run within this job is starting"""
-		pass
+    def run_start(self):
+        """A run within this job is starting"""
+        pass
 
 
-	def run_pause(self):
-		"""A run within this job is completing (expect continue)"""
-		pass
+    def run_pause(self):
+        """A run within this job is completing (expect continue)"""
+        pass
 
 
-	def run_reboot(self):
-		"""A run within this job is performing a reboot
-		   (expect continue following reboot)
-		"""
-		pass
+    def run_reboot(self):
+        """A run within this job is performing a reboot
+           (expect continue following reboot)
+        """
+        pass
 
 
-	def run_abort(self):
-		"""A run within this job is aborting. It all went wrong"""
-		pass
+    def run_abort(self):
+        """A run within this job is aborting. It all went wrong"""
+        pass
 
 
-	def run_complete(self):
-		"""A run within this job is completing (all done)"""
-		pass
+    def run_complete(self):
+        """A run within this job is completing (all done)"""
+        pass
 
 
-	def test_status(self, status, tag):
-		"""A test within this job is completing"""
-		pass
+    def test_status(self, status, tag):
+        """A test within this job is completing"""
+        pass
 
 
-	def test_status_detail(self, code, subdir, operation, status, tag):
-		"""A test within this job is completing (detail)"""
-		pass
+    def test_status_detail(self, code, subdir, operation, status, tag):
+        """A test within this job is completing (detail)"""
+        pass
 
 
 def select(which, job):
-	if not which:
-		which = 'standalone'
-	
-	exec "import harness_%s" % (which)
-	exec "myharness = harness_%s.harness_%s(job)" % (which, which)
+    if not which:
+        which = 'standalone'
 
-	return myharness
+    exec "import harness_%s" % (which)
+    exec "myharness = harness_%s.harness_%s(job)" % (which, which)
+
+    return myharness
diff --git a/client/bin/harness_ABAT.py b/client/bin/harness_ABAT.py
index e5f2f63..8fadb2a 100755
--- a/client/bin/harness_ABAT.py
+++ b/client/bin/harness_ABAT.py
@@ -10,145 +10,145 @@
 import os, harness, time, re
 
 def autobench_load(fn):
-	disks = re.compile(r'^\s*DATS_FREE_DISKS\s*=(.*\S)\s*$')
-	parts = re.compile(r'^\s*DATS_FREE_PARTITIONS\s*=(.*\S)\s*$')
-	modules = re.compile(r'^\s*INITRD_MODULES\s*=(.*\S)\s*$')
+    disks = re.compile(r'^\s*DATS_FREE_DISKS\s*=(.*\S)\s*$')
+    parts = re.compile(r'^\s*DATS_FREE_PARTITIONS\s*=(.*\S)\s*$')
+    modules = re.compile(r'^\s*INITRD_MODULES\s*=(.*\S)\s*$')
 
-	conf = {}
+    conf = {}
 
-	try:
-		fd = file(fn, "r")
-	except:
-		return conf
-	for ln in fd.readlines():
-		m = disks.match(ln)
-		if m:
-			val = m.groups()[0]
-			conf['disks'] = val.strip('"').split()
-		m = parts.match(ln)
-		if m:
-			val = m.groups()[0]
-			conf['partitions'] = val.strip('"').split()
-		m = modules.match(ln)
-		if m:
-			val = m.groups()[0]
-			conf['modules'] = val.strip('"').split()
-	fd.close()
+    try:
+        fd = file(fn, "r")
+    except:
+        return conf
+    for ln in fd.readlines():
+        m = disks.match(ln)
+        if m:
+            val = m.groups()[0]
+            conf['disks'] = val.strip('"').split()
+        m = parts.match(ln)
+        if m:
+            val = m.groups()[0]
+            conf['partitions'] = val.strip('"').split()
+        m = modules.match(ln)
+        if m:
+            val = m.groups()[0]
+            conf['modules'] = val.strip('"').split()
+    fd.close()
 
-	return conf
+    return conf
 
 
 class harness_ABAT(harness.harness):
-	"""The ABAT server harness
+    """The ABAT server harness
 
-	Properties:
-		job
-			The job object for this job
-	"""
+    Properties:
+            job
+                    The job object for this job
+    """
 
-	def __init__(self, job):
-		"""
-			job
-				The job object for this job
-		"""
-		self.setup(job)
+    def __init__(self, job):
+        """
+                job
+                        The job object for this job
+        """
+        self.setup(job)
 
-		if 'ABAT_STATUS' in os.environ:
-			self.status = file(os.environ['ABAT_STATUS'], "w")
-		else:
-			self.status = None
+        if 'ABAT_STATUS' in os.environ:
+            self.status = file(os.environ['ABAT_STATUS'], "w")
+        else:
+            self.status = None
 
 
-	def __send(self, msg):
-		if self.status:
-			msg = msg.rstrip()
-			self.status.write(msg + "\n")
-			self.status.flush()
+    def __send(self, msg):
+        if self.status:
+            msg = msg.rstrip()
+            self.status.write(msg + "\n")
+            self.status.flush()
 
 
-	def __send_status(self, code, subdir, operation, msg):
-		self.__send("STATUS %s %s %s %s" % \
-					(code, subdir, operation, msg))
+    def __send_status(self, code, subdir, operation, msg):
+        self.__send("STATUS %s %s %s %s" % \
+                                (code, subdir, operation, msg))
 
 
-	def __root_device(self):
-		device = None
-		root = re.compile(r'^\S*(/dev/\S+).*\s/\s*$')
-		
-		df = utils.system_output('df -lP')
-		for line in df.split("\n"):
-			m = root.match(line)
-			if m:
-				device = m.groups()[0]
+    def __root_device(self):
+        device = None
+        root = re.compile(r'^\S*(/dev/\S+).*\s/\s*$')
 
-		return device
+        df = utils.system_output('df -lP')
+        for line in df.split("\n"):
+            m = root.match(line)
+            if m:
+                device = m.groups()[0]
+
+        return device
 
 
-	def run_start(self):
-		"""A run within this job is starting"""
-		self.__send_status('GOOD', '----', '----', 'run starting')
+    def run_start(self):
+        """A run within this job is starting"""
+        self.__send_status('GOOD', '----', '----', 'run starting')
 
-		# Load up the autobench.conf if it exists.
-		conf = autobench_load("/etc/autobench.conf")
-		if 'partitions' in conf:
-			self.job.config_set('filesystem.partitions',
-				conf['partitions'])
+        # Load up the autobench.conf if it exists.
+        conf = autobench_load("/etc/autobench.conf")
+        if 'partitions' in conf:
+            self.job.config_set('filesystem.partitions',
+                    conf['partitions'])
 
-		# Search the boot loader configuration for the autobench entry,
-		# and extract its args.
-		entry_args = None
-		args = None
-		for line in self.job.bootloader.info('all').split('\n'):
-			if line.startswith('args'):
-				entry_args = line.split(None, 2)[2]
-			if line.startswith('title'):
-				title = line.split()[2]
-				if title == 'autobench':
-					args = entry_args
+        # Search the boot loader configuration for the autobench entry,
+        # and extract its args.
+        entry_args = None
+        args = None
+        for line in self.job.bootloader.info('all').split('\n'):
+            if line.startswith('args'):
+                entry_args = line.split(None, 2)[2]
+            if line.startswith('title'):
+                title = line.split()[2]
+                if title == 'autobench':
+                    args = entry_args
 
-		if args:
-			args = re.sub(r'autobench_args:.*', '', args)
-			args = re.sub(r'root=\S*', '', args)
-			args += " root=" + self.__root_device()
+        if args:
+            args = re.sub(r'autobench_args:.*', '', args)
+            args = re.sub(r'root=\S*', '', args)
+            args += " root=" + self.__root_device()
 
-			self.job.config_set('boot.default_args', args)
+            self.job.config_set('boot.default_args', args)
 
-		# Turn off boot_once semantics.
-		self.job.config_set('boot.set_default', True)
+        # Turn off boot_once semantics.
+        self.job.config_set('boot.set_default', True)
 
-		# For RedHat installs we do not load up the module.conf
-		# as they cannot be builtin.  Pass them as arguments.
-		vendor = autotest_utils.get_os_vendor()
-		if vendor in ['Red Hat', 'Fedora Core'] and 'modules' in conf:
-			args = '--allow-missing'
-			for mod in conf['modules']:
-				args += " --with " + mod
-			self.job.config_set('kernel.mkinitrd_extra_args', args)
+        # For RedHat installs we do not load up the module.conf
+        # as they cannot be builtin.  Pass them as arguments.
+        vendor = autotest_utils.get_os_vendor()
+        if vendor in ['Red Hat', 'Fedora Core'] and 'modules' in conf:
+            args = '--allow-missing'
+            for mod in conf['modules']:
+                args += " --with " + mod
+            self.job.config_set('kernel.mkinitrd_extra_args', args)
 
 
-	def run_reboot(self):
-		"""A run within this job is performing a reboot
-		   (expect continue following reboot)
-		"""
-		self.__send("REBOOT")
+    def run_reboot(self):
+        """A run within this job is performing a reboot
+           (expect continue following reboot)
+        """
+        self.__send("REBOOT")
 
 
-	def run_complete(self):
-		"""A run within this job is completing (all done)"""
-		self.__send("DONE")
+    def run_complete(self):
+        """A run within this job is completing (all done)"""
+        self.__send("DONE")
 
 
-	def test_status_detail(self, code, subdir, operation, msg, tag):
-		"""A test within this job is completing (detail)"""
+    def test_status_detail(self, code, subdir, operation, msg, tag):
+        """A test within this job is completing (detail)"""
 
-		# Send the first line with the status code as a STATUS message.
-		lines = msg.split("\n")
-		self.__send_status(code, subdir, operation, lines[0])
+        # Send the first line with the status code as a STATUS message.
+        lines = msg.split("\n")
+        self.__send_status(code, subdir, operation, lines[0])
 
 
-	def test_status(self, msg, tag):
-		lines = msg.split("\n")
+    def test_status(self, msg, tag):
+        lines = msg.split("\n")
 
-		# Send each line as a SUMMARY message.
-		for line in lines:
-			self.__send("SUMMARY :" + line)
+        # Send each line as a SUMMARY message.
+        for line in lines:
+            self.__send("SUMMARY :" + line)
diff --git a/client/bin/harness_simple.py b/client/bin/harness_simple.py
index 7f104da..5ff90d4 100755
--- a/client/bin/harness_simple.py
+++ b/client/bin/harness_simple.py
@@ -7,31 +7,31 @@
 import os, harness, time
 
 class harness_simple(harness.harness):
-	"""
-	The simple server harness
+    """
+    The simple server harness
 
-	Properties:
-		job
-			The job object for this job
-	"""
+    Properties:
+            job
+                    The job object for this job
+    """
 
-	def __init__(self, job):
-		"""
-			job
-				The job object for this job
-		"""
-		self.setup(job)
+    def __init__(self, job):
+        """
+                job
+                        The job object for this job
+        """
+        self.setup(job)
 
-		self.status = os.fdopen(3, 'w')
+        self.status = os.fdopen(3, 'w')
 
 
-	def test_status(self, status, tag):
-		"""A test within this job is completing"""
-		if self.status:
-			for line in status.split('\n'):
-				# prepend status messages with
-				# AUTOTEST_STATUS:tag: so that we can tell
-				# which lines were sent by the autotest client
-				pre = 'AUTOTEST_STATUS:%s:' % (tag,)
-				self.status.write(pre + line + '\n')
-				self.status.flush()
+    def test_status(self, status, tag):
+        """A test within this job is completing"""
+        if self.status:
+            for line in status.split('\n'):
+                # prepend status messages with
+                # AUTOTEST_STATUS:tag: so that we can tell
+                # which lines were sent by the autotest client
+                pre = 'AUTOTEST_STATUS:%s:' % (tag,)
+                self.status.write(pre + line + '\n')
+                self.status.flush()
diff --git a/client/bin/harness_standalone.py b/client/bin/harness_standalone.py
index dccdcab..d6a4687 100644
--- a/client/bin/harness_standalone.py
+++ b/client/bin/harness_standalone.py
@@ -9,40 +9,40 @@
 import os, harness, shutil
 
 class harness_standalone(harness.harness):
-	"""The standalone server harness
+    """The standalone server harness
 
-	Properties:
-		job
-			The job object for this job
-	"""
+    Properties:
+            job
+                    The job object for this job
+    """
 
-	def __init__(self, job):
-		"""
-			job
-				The job object for this job
-		"""
-		self.autodir = os.path.abspath(os.environ['AUTODIR'])
-		self.setup(job)
+    def __init__(self, job):
+        """
+                job
+                        The job object for this job
+        """
+        self.autodir = os.path.abspath(os.environ['AUTODIR'])
+        self.setup(job)
 
-		src = job.control_get()
-		dest = os.path.join(self.autodir, 'control')
-		if os.path.abspath(src) != os.path.abspath(dest):
-			shutil.copyfile(src, dest)
-			job.control_set(dest)
+        src = job.control_get()
+        dest = os.path.join(self.autodir, 'control')
+        if os.path.abspath(src) != os.path.abspath(dest):
+            shutil.copyfile(src, dest)
+            job.control_set(dest)
 
-		print 'Symlinking init scripts'
-		rc = os.path.join(self.autodir, 'tools/autotest')
-		# see if system supports event.d versus inittab
-		if os.path.exists('/etc/event.d'):
-			# NB: assuming current runlevel is default
-			initdefault = utils.system_output('runlevel').split()[1]
-		else:
-			initdefault = utils.system_output('grep :initdefault: /etc/inittab')
-			initdefault = initdefault.split(':')[1]
+        print 'Symlinking init scripts'
+        rc = os.path.join(self.autodir, 'tools/autotest')
+        # see if system supports event.d versus inittab
+        if os.path.exists('/etc/event.d'):
+            # NB: assuming current runlevel is default
+            initdefault = utils.system_output('runlevel').split()[1]
+        else:
+            initdefault = utils.system_output('grep :initdefault: /etc/inittab')
+            initdefault = initdefault.split(':')[1]
 
-		try:
-			utils.system('ln -sf %s /etc/init.d/autotest' % rc)
-			utils.system('ln -sf %s /etc/rc%s.d/S99autotest' % \
-							(rc, initdefault))
-		except:
-			print "WARNING: linking init scripts failed"
+        try:
+            utils.system('ln -sf %s /etc/init.d/autotest' % rc)
+            utils.system('ln -sf %s /etc/rc%s.d/S99autotest' % \
+                                            (rc, initdefault))
+        except:
+            print "WARNING: linking init scripts failed"
diff --git a/client/bin/job.py b/client/bin/job.py
index ac2745b..870b63e 100755
--- a/client/bin/job.py
+++ b/client/bin/job.py
@@ -20,995 +20,995 @@
 """
 
 class StepError(error.AutotestError):
-	pass
+    pass
 
 
 class base_job:
-	"""The actual job against which we do everything.
+    """The actual job against which we do everything.
 
-	Properties:
-		autodir
-			The top level autotest directory (/usr/local/autotest).
-			Comes from os.environ['AUTODIR'].
-		bindir
-			<autodir>/bin/
-		libdir
-			<autodir>/lib/
-		testdir
-			<autodir>/tests/
-		site_testdir
-			<autodir>/site_tests/
-		profdir
-			<autodir>/profilers/
-		tmpdir
-			<autodir>/tmp/
-		resultdir
-			<autodir>/results/<jobtag>
-		stdout
-			fd_stack object for stdout
-		stderr
-			fd_stack object for stderr
-		profilers
-			the profilers object for this job
-		harness
-			the server harness object for this job
-		config
-			the job configuration for this job
-	"""
+    Properties:
+            autodir
+                    The top level autotest directory (/usr/local/autotest).
+                    Comes from os.environ['AUTODIR'].
+            bindir
+                    <autodir>/bin/
+            libdir
+                    <autodir>/lib/
+            testdir
+                    <autodir>/tests/
+            site_testdir
+                    <autodir>/site_tests/
+            profdir
+                    <autodir>/profilers/
+            tmpdir
+                    <autodir>/tmp/
+            resultdir
+                    <autodir>/results/<jobtag>
+            stdout
+                    fd_stack object for stdout
+            stderr
+                    fd_stack object for stderr
+            profilers
+                    the profilers object for this job
+            harness
+                    the server harness object for this job
+            config
+                    the job configuration for this job
+    """
 
-	DEFAULT_LOG_FILENAME = "status"
+    DEFAULT_LOG_FILENAME = "status"
 
-	def __init__(self, control, jobtag, cont, harness_type=None,
-			use_external_logging = False):
-		"""
-			control
-				The control file (pathname of)
-			jobtag
-				The job tag string (eg "default")
-			cont
-				If this is the continuation of this job
-			harness_type
-				An alternative server harness
-		"""
-		self.autodir = os.environ['AUTODIR']
-		self.bindir = os.path.join(self.autodir, 'bin')
-		self.libdir = os.path.join(self.autodir, 'lib')
-		self.testdir = os.path.join(self.autodir, 'tests')
-		self.site_testdir = os.path.join(self.autodir, 'site_tests')
-		self.profdir = os.path.join(self.autodir, 'profilers')
-		self.tmpdir = os.path.join(self.autodir, 'tmp')
-		self.resultdir = os.path.join(self.autodir, 'results', jobtag)
-		self.sysinfodir = os.path.join(self.resultdir, 'sysinfo')
-		self.control = os.path.abspath(control)
-		self.state_file = self.control + '.state'
-		self.current_step_ancestry = []
-		self.next_step_index = 0 
-		self.__load_state()
+    def __init__(self, control, jobtag, cont, harness_type=None,
+                    use_external_logging = False):
+        """
+                control
+                        The control file (pathname of)
+                jobtag
+                        The job tag string (eg "default")
+                cont
+                        If this is the continuation of this job
+                harness_type
+                        An alternative server harness
+        """
+        self.autodir = os.environ['AUTODIR']
+        self.bindir = os.path.join(self.autodir, 'bin')
+        self.libdir = os.path.join(self.autodir, 'lib')
+        self.testdir = os.path.join(self.autodir, 'tests')
+        self.site_testdir = os.path.join(self.autodir, 'site_tests')
+        self.profdir = os.path.join(self.autodir, 'profilers')
+        self.tmpdir = os.path.join(self.autodir, 'tmp')
+        self.resultdir = os.path.join(self.autodir, 'results', jobtag)
+        self.sysinfodir = os.path.join(self.resultdir, 'sysinfo')
+        self.control = os.path.abspath(control)
+        self.state_file = self.control + '.state'
+        self.current_step_ancestry = []
+        self.next_step_index = 0
+        self.__load_state()
 
-		if not cont:
-			"""
-			Don't cleanup the tmp dir (which contains the lockfile)
-			in the constructor, this would be a problem for multiple
-			jobs starting at the same time on the same client. Instead
-			do the delete at the server side. We simply create the tmp
-			directory here if it does not already exist.
-			"""
-			if not os.path.exists(self.tmpdir):
-				os.mkdir(self.tmpdir)
+        if not cont:
+            """
+            Don't cleanup the tmp dir (which contains the lockfile)
+            in the constructor, this would be a problem for multiple
+            jobs starting at the same time on the same client. Instead
+            do the delete at the server side. We simply create the tmp
+            directory here if it does not already exist.
+            """
+            if not os.path.exists(self.tmpdir):
+                os.mkdir(self.tmpdir)
 
-			results = os.path.join(self.autodir, 'results')
-			if not os.path.exists(results):
-				os.mkdir(results)
-				
-			download = os.path.join(self.testdir, 'download')
-			if not os.path.exists(download):
-				os.mkdir(download)
+            results = os.path.join(self.autodir, 'results')
+            if not os.path.exists(results):
+                os.mkdir(results)
 
-			if os.path.exists(self.resultdir):
-				utils.system('rm -rf ' 
-							+ self.resultdir)
-			os.mkdir(self.resultdir)
-			os.mkdir(self.sysinfodir)
+            download = os.path.join(self.testdir, 'download')
+            if not os.path.exists(download):
+                os.mkdir(download)
 
-			os.mkdir(os.path.join(self.resultdir, 'debug'))
-			os.mkdir(os.path.join(self.resultdir, 'analysis'))
+            if os.path.exists(self.resultdir):
+                utils.system('rm -rf '
+                                        + self.resultdir)
+            os.mkdir(self.resultdir)
+            os.mkdir(self.sysinfodir)
 
-			shutil.copyfile(self.control,
-					os.path.join(self.resultdir, 'control'))
+            os.mkdir(os.path.join(self.resultdir, 'debug'))
+            os.mkdir(os.path.join(self.resultdir, 'analysis'))
 
+            shutil.copyfile(self.control,
+                            os.path.join(self.resultdir, 'control'))
 
-		self.control = control
-		self.jobtag = jobtag
-		self.log_filename = self.DEFAULT_LOG_FILENAME
-		self.container = None
 
-		self.stdout = fd_stack.fd_stack(1, sys.stdout)
-		self.stderr = fd_stack.fd_stack(2, sys.stderr)
+        self.control = control
+        self.jobtag = jobtag
+        self.log_filename = self.DEFAULT_LOG_FILENAME
+        self.container = None
 
-		self._init_group_level()
+        self.stdout = fd_stack.fd_stack(1, sys.stdout)
+        self.stderr = fd_stack.fd_stack(2, sys.stderr)
 
-		self.config = config.config(self)
+        self._init_group_level()
 
-		self.harness = harness.select(harness_type, self)
+        self.config = config.config(self)
 
-		self.profilers = profilers.profilers(self)
+        self.harness = harness.select(harness_type, self)
 
-		try:
-			tool = self.config_get('boottool.executable')
-			self.bootloader = boottool.boottool(tool)
-		except:
-			pass
+        self.profilers = profilers.profilers(self)
 
-		sysinfo.log_per_reboot_data(self.sysinfodir)
+        try:
+            tool = self.config_get('boottool.executable')
+            self.bootloader = boottool.boottool(tool)
+        except:
+            pass
 
-		if not cont:
-			self.record('START', None, None)
-			self._increment_group_level()
+        sysinfo.log_per_reboot_data(self.sysinfodir)
 
-		self.harness.run_start()
-		
-		if use_external_logging:
-			self.enable_external_logging()
+        if not cont:
+            self.record('START', None, None)
+            self._increment_group_level()
 
-		# load the max disk usage rate - default to no monitoring
-		self.max_disk_usage_rate = self.get_state('__monitor_disk',
-							  default=0.0)
+        self.harness.run_start()
 
+        if use_external_logging:
+            self.enable_external_logging()
 
-	def monitor_disk_usage(self, max_rate):
-		"""\
-		Signal that the job should monitor disk space usage on /
-		and generate a warning if a test uses up disk space at a
-		rate exceeding 'max_rate'.
+        # load the max disk usage rate - default to no monitoring
+        self.max_disk_usage_rate = self.get_state('__monitor_disk',
+                                                  default=0.0)
 
-		Parameters:
-		     max_rate - the maximium allowed rate of disk consumption
-		                during a test, in MB/hour, or 0 to indicate
-				no limit.
-		"""
-		self.set_state('__monitor_disk', max_rate)
-		self.max_disk_usage_rate = max_rate
 
+    def monitor_disk_usage(self, max_rate):
+        """\
+        Signal that the job should monitor disk space usage on /
+        and generate a warning if a test uses up disk space at a
+        rate exceeding 'max_rate'.
 
-	def relative_path(self, path):
-		"""\
-		Return a patch relative to the job results directory
-		"""
-		head = len(self.resultdir) + 1     # remove the / inbetween
-		return path[head:]
+        Parameters:
+             max_rate - the maximium allowed rate of disk consumption
+                        during a test, in MB/hour, or 0 to indicate
+                        no limit.
+        """
+        self.set_state('__monitor_disk', max_rate)
+        self.max_disk_usage_rate = max_rate
 
 
-	def control_get(self):
-		return self.control
+    def relative_path(self, path):
+        """\
+        Return a patch relative to the job results directory
+        """
+        head = len(self.resultdir) + 1     # remove the / inbetween
+        return path[head:]
 
 
-	def control_set(self, control):
-		self.control = os.path.abspath(control)
+    def control_get(self):
+        return self.control
 
 
-	def harness_select(self, which):
-		self.harness = harness.select(which, self)
+    def control_set(self, control):
+        self.control = os.path.abspath(control)
 
 
-	def config_set(self, name, value):
-		self.config.set(name, value)
+    def harness_select(self, which):
+        self.harness = harness.select(which, self)
 
 
-	def config_get(self, name):
-		return self.config.get(name)
+    def config_set(self, name, value):
+        self.config.set(name, value)
 
-	def setup_dirs(self, results_dir, tmp_dir):
-		if not tmp_dir:
-			tmp_dir = os.path.join(self.tmpdir, 'build')
-		if not os.path.exists(tmp_dir):
-			os.mkdir(tmp_dir)
-		if not os.path.isdir(tmp_dir):
-			e_msg = "Temp dir (%s) is not a dir - args backwards?" % self.tmpdir
-			raise ValueError(e_msg)
 
-		# We label the first build "build" and then subsequent ones 
-		# as "build.2", "build.3", etc. Whilst this is a little bit 
-		# inconsistent, 99.9% of jobs will only have one build 
-		# (that's not done as kernbench, sparse, or buildtest),
-		# so it works out much cleaner. One of life's comprimises.
-		if not results_dir:
-			results_dir = os.path.join(self.resultdir, 'build')
-			i = 2
-			while os.path.exists(results_dir):
-				results_dir = os.path.join(self.resultdir, 'build.%d' % i)
-				i += 1
-		if not os.path.exists(results_dir):
-			os.mkdir(results_dir)
+    def config_get(self, name):
+        return self.config.get(name)
 
-		return (results_dir, tmp_dir)
+    def setup_dirs(self, results_dir, tmp_dir):
+        if not tmp_dir:
+            tmp_dir = os.path.join(self.tmpdir, 'build')
+        if not os.path.exists(tmp_dir):
+            os.mkdir(tmp_dir)
+        if not os.path.isdir(tmp_dir):
+            e_msg = "Temp dir (%s) is not a dir - args backwards?" % self.tmpdir
+            raise ValueError(e_msg)
 
+        # We label the first build "build" and then subsequent ones
+        # as "build.2", "build.3", etc. Whilst this is a little bit
+        # inconsistent, 99.9% of jobs will only have one build
+        # (that's not done as kernbench, sparse, or buildtest),
+        # so it works out much cleaner. One of life's comprimises.
+        if not results_dir:
+            results_dir = os.path.join(self.resultdir, 'build')
+            i = 2
+            while os.path.exists(results_dir):
+                results_dir = os.path.join(self.resultdir, 'build.%d' % i)
+                i += 1
+        if not os.path.exists(results_dir):
+            os.mkdir(results_dir)
 
-	def xen(self, base_tree, results_dir = '', tmp_dir = '', leave = False, \
-				kjob = None ):
-		"""Summon a xen object"""
-		(results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
-		build_dir = 'xen'
-		return xen.xen(self, base_tree, results_dir, tmp_dir, build_dir, leave, kjob)
+        return (results_dir, tmp_dir)
 
 
-	def kernel(self, base_tree, results_dir = '', tmp_dir = '', leave = False):
-		"""Summon a kernel object"""
-		(results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
-		build_dir = 'linux'
-		return kernel.auto_kernel(self, base_tree, results_dir,
-					  tmp_dir, build_dir, leave)
+    def xen(self, base_tree, results_dir = '', tmp_dir = '', leave = False, \
+                            kjob = None ):
+        """Summon a xen object"""
+        (results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
+        build_dir = 'xen'
+        return xen.xen(self, base_tree, results_dir, tmp_dir, build_dir, leave, kjob)
 
 
-	def barrier(self, *args, **kwds):
-		"""Create a barrier object"""
-		return barrier.barrier(*args, **kwds)
+    def kernel(self, base_tree, results_dir = '', tmp_dir = '', leave = False):
+        """Summon a kernel object"""
+        (results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
+        build_dir = 'linux'
+        return kernel.auto_kernel(self, base_tree, results_dir,
+                                  tmp_dir, build_dir, leave)
 
 
-	def setup_dep(self, deps): 
-		"""Set up the dependencies for this test.
-		
-		deps is a list of libraries required for this test.
-		"""
-		for dep in deps: 
-			try: 
-				os.chdir(os.path.join(self.autodir, 'deps', dep))
-				utils.system('./' + dep + '.py')
-			except: 
-				err = "setting up dependency " + dep + "\n"
-				raise error.UnhandledError(err)
+    def barrier(self, *args, **kwds):
+        """Create a barrier object"""
+        return barrier.barrier(*args, **kwds)
 
 
-	def __runtest(self, url, tag, args, dargs):
-		try:
-			l = lambda : test.runtest(self, url, tag, args, dargs)
-			pid = parallel.fork_start(self.resultdir, l)
-			parallel.fork_waitfor(self.resultdir, pid)
-		except error.AutotestError:
-			raise
-		except Exception, e:
-			msg = "Unhandled %s error occured during test\n"
-			msg %= str(e.__class__.__name__)
-			raise error.UnhandledError(msg)
+    def setup_dep(self, deps):
+        """Set up the dependencies for this test.
 
+        deps is a list of libraries required for this test.
+        """
+        for dep in deps:
+            try:
+                os.chdir(os.path.join(self.autodir, 'deps', dep))
+                utils.system('./' + dep + '.py')
+            except:
+                err = "setting up dependency " + dep + "\n"
+                raise error.UnhandledError(err)
 
-	def run_test(self, url, *args, **dargs):
-		"""Summon a test object and run it.
-		
-		tag
-			tag to add to testname
-		url
-			url of the test to run
-		"""
 
-		if not url:
-			raise TypeError("Test name is invalid. "
-			                "Switched arguments?")
-		(group, testname) = test.testname(url)
-		namelen = len(testname)
-		dargs = dargs.copy()
-		tntag = dargs.pop('tag', None)
-		if tntag:  # testname tag is included in reported test name
-			testname += '.' + tntag
-		subdir = testname
-		sdtag = dargs.pop('subdir_tag', None)
-		if sdtag:  # subdir-only tag is not included in reports
-			subdir = subdir + '.' + sdtag
-		tag = subdir[namelen+1:]    # '' if none
+    def __runtest(self, url, tag, args, dargs):
+        try:
+            l = lambda : test.runtest(self, url, tag, args, dargs)
+            pid = parallel.fork_start(self.resultdir, l)
+            parallel.fork_waitfor(self.resultdir, pid)
+        except error.AutotestError:
+            raise
+        except Exception, e:
+            msg = "Unhandled %s error occured during test\n"
+            msg %= str(e.__class__.__name__)
+            raise error.UnhandledError(msg)
 
-		outputdir = os.path.join(self.resultdir, subdir)
-		if os.path.exists(outputdir):
-			msg = ("%s already exists, test <%s> may have"
-				" already run with tag <%s>" 
-				% (outputdir, testname, tag) )
-			raise error.TestError(msg)
-		os.mkdir(outputdir)
-	
-		container = dargs.pop('container', None)
-		if container:
-			cname = container.get('name', None)
-			if not cname:   # get old name
-				cname = container.get('container_name', None)
-			mbytes = container.get('mbytes', None)
-			if not mbytes:  # get old name
-				mbytes = container.get('mem', None) 
-			cpus  = container.get('cpus', None)
-			if not cpus:    # get old name
-				cpus  = container.get('cpu', None)
-			root  = container.get('root', None)
-			self.new_container(mbytes=mbytes, cpus=cpus, 
-					root=root, name=cname)
-			# We are running in a container now...
 
-		def log_warning(reason):
-			self.record("WARN", subdir, testname, reason)
-		@disk_usage_monitor.watch(log_warning, "/",
-					  self.max_disk_usage_rate)
-		def group_func():
-			try:
-				self.__runtest(url, tag, args, dargs)
-			except error.TestNAError, detail:
-				self.record('TEST_NA', subdir, testname,
-					    str(detail))
-				raise
-			except Exception, detail:
-				self.record('FAIL', subdir, testname,
-					    str(detail))
-				raise
-			else:
-				self.record('GOOD', subdir, testname,
-					    'completed successfully')
+    def run_test(self, url, *args, **dargs):
+        """Summon a test object and run it.
 
-		result, exc_info = self.__rungroup(subdir, testname, group_func)
-		if container:
-			self.release_container()
-		if exc_info and isinstance(exc_info[1], error.TestError):
-			return False
-		elif exc_info:
-			raise exc_info[0], exc_info[1], exc_info[2]
-		else:
-			return True
+        tag
+                tag to add to testname
+        url
+                url of the test to run
+        """
 
+        if not url:
+            raise TypeError("Test name is invalid. "
+                            "Switched arguments?")
+        (group, testname) = test.testname(url)
+        namelen = len(testname)
+        dargs = dargs.copy()
+        tntag = dargs.pop('tag', None)
+        if tntag:  # testname tag is included in reported test name
+            testname += '.' + tntag
+        subdir = testname
+        sdtag = dargs.pop('subdir_tag', None)
+        if sdtag:  # subdir-only tag is not included in reports
+            subdir = subdir + '.' + sdtag
+        tag = subdir[namelen+1:]    # '' if none
 
-	def __rungroup(self, subdir, testname, function, *args, **dargs):
-		"""\
-		subdir:
-		        name of the group
-		testname: 
-			name of the test to run, or support step
-		function:
-			subroutine to run
-		*args:
-			arguments for the function
+        outputdir = os.path.join(self.resultdir, subdir)
+        if os.path.exists(outputdir):
+            msg = ("%s already exists, test <%s> may have"
+                    " already run with tag <%s>"
+                    % (outputdir, testname, tag) )
+            raise error.TestError(msg)
+        os.mkdir(outputdir)
 
-		Returns a 2-tuple (result, exc_info) where result
-		is the return value of function, and exc_info is
-		the sys.exc_info() of the exception thrown by the
-		function (which may be None).
-		"""
+        container = dargs.pop('container', None)
+        if container:
+            cname = container.get('name', None)
+            if not cname:   # get old name
+                cname = container.get('container_name', None)
+            mbytes = container.get('mbytes', None)
+            if not mbytes:  # get old name
+                mbytes = container.get('mem', None)
+            cpus  = container.get('cpus', None)
+            if not cpus:    # get old name
+                cpus  = container.get('cpu', None)
+            root  = container.get('root', None)
+            self.new_container(mbytes=mbytes, cpus=cpus,
+                            root=root, name=cname)
+            # We are running in a container now...
 
-		result, exc_info = None, None
-		try:
-			self.record('START', subdir, testname)
-			self._increment_group_level()
-			result = function(*args, **dargs)
-			self._decrement_group_level()
-			self.record('END GOOD', subdir, testname)
-		except error.TestNAError, e:
-			self._decrement_group_level()
-			self.record('END TEST_NA', subdir, testname, str(e))
-		except Exception, e:
-			exc_info = sys.exc_info()
-			self._decrement_group_level()
-			err_msg = str(e) + '\n' + traceback.format_exc()
-			self.record('END FAIL', subdir, testname, err_msg)
+        def log_warning(reason):
+            self.record("WARN", subdir, testname, reason)
+        @disk_usage_monitor.watch(log_warning, "/",
+                                  self.max_disk_usage_rate)
+        def group_func():
+            try:
+                self.__runtest(url, tag, args, dargs)
+            except error.TestNAError, detail:
+                self.record('TEST_NA', subdir, testname,
+                            str(detail))
+                raise
+            except Exception, detail:
+                self.record('FAIL', subdir, testname,
+                            str(detail))
+                raise
+            else:
+                self.record('GOOD', subdir, testname,
+                            'completed successfully')
 
-		return result, exc_info
+        result, exc_info = self.__rungroup(subdir, testname, group_func)
+        if container:
+            self.release_container()
+        if exc_info and isinstance(exc_info[1], error.TestError):
+            return False
+        elif exc_info:
+            raise exc_info[0], exc_info[1], exc_info[2]
+        else:
+            return True
 
 
-	def run_group(self, function, *args, **dargs):
-		"""\
-		function:
-			subroutine to run
-		*args:
-			arguments for the function
-		"""
+    def __rungroup(self, subdir, testname, function, *args, **dargs):
+        """\
+        subdir:
+                name of the group
+        testname:
+                name of the test to run, or support step
+        function:
+                subroutine to run
+        *args:
+                arguments for the function
 
-		# Allow the tag for the group to be specified
-		name = function.__name__
-		tag = dargs.pop('tag', None)
-		if tag:
-			name = tag
+        Returns a 2-tuple (result, exc_info) where result
+        is the return value of function, and exc_info is
+        the sys.exc_info() of the exception thrown by the
+        function (which may be None).
+        """
 
-		outputdir = os.path.join(self.resultdir, name)
-		if os.path.exists(outputdir):
-			msg = ("%s already exists, test <%s> may have"
-				" already run with tag <%s>"
-				% (outputdir, name, name) )
-			raise error.TestError(msg)
-		os.mkdir(outputdir)
+        result, exc_info = None, None
+        try:
+            self.record('START', subdir, testname)
+            self._increment_group_level()
+            result = function(*args, **dargs)
+            self._decrement_group_level()
+            self.record('END GOOD', subdir, testname)
+        except error.TestNAError, e:
+            self._decrement_group_level()
+            self.record('END TEST_NA', subdir, testname, str(e))
+        except Exception, e:
+            exc_info = sys.exc_info()
+            self._decrement_group_level()
+            err_msg = str(e) + '\n' + traceback.format_exc()
+            self.record('END FAIL', subdir, testname, err_msg)
 
-		result, exc_info = self.__rungroup(name, name, function,
-						   *args, **dargs)
+        return result, exc_info
 
-		# if there was a non-TestError exception, raise it
-		if exc_info and not isinstance(exc_info[1], error.TestError):
-			err = ''.join(traceback.format_exception(*exc_info))
-			raise error.TestError(name + ' failed\n' + err)
 
-		# pass back the actual return value from the function
-		return result
+    def run_group(self, function, *args, **dargs):
+        """\
+        function:
+                subroutine to run
+        *args:
+                arguments for the function
+        """
 
+        # Allow the tag for the group to be specified
+        name = function.__name__
+        tag = dargs.pop('tag', None)
+        if tag:
+            name = tag
 
-	def new_container(self, mbytes=None, cpus=None, root=None, name=None):
-		if not autotest_utils.grep('cpuset', '/proc/filesystems'):
-			print "Containers not enabled by latest reboot"
-			return  # containers weren't enabled in this kernel boot
-		pid = os.getpid()
-		if not name:
-			name = 'test%d' % pid  # make arbitrary unique name
-		self.container = cpuset.cpuset(name, job_size=mbytes, 
-			job_pid=pid, cpus=cpus, root=root)
-		# This job's python shell is now running in the new container
-		# and all forked test processes will inherit that container
+        outputdir = os.path.join(self.resultdir, name)
+        if os.path.exists(outputdir):
+            msg = ("%s already exists, test <%s> may have"
+                    " already run with tag <%s>"
+                    % (outputdir, name, name) )
+            raise error.TestError(msg)
+        os.mkdir(outputdir)
+
+        result, exc_info = self.__rungroup(name, name, function,
+                                           *args, **dargs)
+
+        # if there was a non-TestError exception, raise it
+        if exc_info and not isinstance(exc_info[1], error.TestError):
+            err = ''.join(traceback.format_exception(*exc_info))
+            raise error.TestError(name + ' failed\n' + err)
+
+        # pass back the actual return value from the function
+        return result
+
+
+    def new_container(self, mbytes=None, cpus=None, root=None, name=None):
+        if not autotest_utils.grep('cpuset', '/proc/filesystems'):
+            print "Containers not enabled by latest reboot"
+            return  # containers weren't enabled in this kernel boot
+        pid = os.getpid()
+        if not name:
+            name = 'test%d' % pid  # make arbitrary unique name
+        self.container = cpuset.cpuset(name, job_size=mbytes,
+                job_pid=pid, cpus=cpus, root=root)
+        # This job's python shell is now running in the new container
+        # and all forked test processes will inherit that container
+
+
+    def release_container(self):
+        if self.container:
+            self.container.release()
+            self.container = None
+
 
+    def cpu_count(self):
+        if self.container:
+            return len(self.container.cpus)
+        return autotest_utils.count_cpus()  # use total system count
 
-	def release_container(self):
-		if self.container:
-			self.container.release()
-			self.container = None
 
+    # Check the passed kernel identifier against the command line
+    # and the running kernel, abort the job on missmatch.
+    def kernel_check_ident(self, expected_when, expected_id, subdir,
+                           type = 'src', patches=[]):
+        print (("POST BOOT: checking booted kernel " +
+                "mark=%d identity='%s' type='%s'") %
+               (expected_when, expected_id, type))
 
-	def cpu_count(self):
-		if self.container:
-			return len(self.container.cpus)
-		return autotest_utils.count_cpus()  # use total system count
+        running_id = autotest_utils.running_os_ident()
 
+        cmdline = utils.read_one_line("/proc/cmdline")
 
-	# Check the passed kernel identifier against the command line
-	# and the running kernel, abort the job on missmatch.
-	def kernel_check_ident(self, expected_when, expected_id, subdir,
-			       type = 'src', patches=[]):
-		print (("POST BOOT: checking booted kernel " +
-			"mark=%d identity='%s' type='%s'") %
-		       (expected_when, expected_id, type))
+        find_sum = re.compile(r'.*IDENT=(\d+)')
+        m = find_sum.match(cmdline)
+        cmdline_when = -1
+        if m:
+            cmdline_when = int(m.groups()[0])
 
-		running_id = autotest_utils.running_os_ident()
+        # We have all the facts, see if they indicate we
+        # booted the requested kernel or not.
+        bad = False
+        if (type == 'src' and expected_id != running_id or
+            type == 'rpm' and
+            not running_id.startswith(expected_id + '::')):
+            print "check_kernel_ident: kernel identifier mismatch"
+            bad = True
+        if expected_when != cmdline_when:
+            print "check_kernel_ident: kernel command line mismatch"
+            bad = True
 
-		cmdline = utils.read_one_line("/proc/cmdline")
+        if bad:
+            print "   Expected Ident: " + expected_id
+            print "    Running Ident: " + running_id
+            print "    Expected Mark: %d" % (expected_when)
+            print "Command Line Mark: %d" % (cmdline_when)
+            print "     Command Line: " + cmdline
 
-		find_sum = re.compile(r'.*IDENT=(\d+)')
-		m = find_sum.match(cmdline)
-		cmdline_when = -1
-		if m:
-			cmdline_when = int(m.groups()[0])
+            raise error.JobError("boot failure", "reboot.verify")
 
-		# We have all the facts, see if they indicate we
-		# booted the requested kernel or not.
-		bad = False
-		if (type == 'src' and expected_id != running_id or
-		    type == 'rpm' and
-		    not running_id.startswith(expected_id + '::')):
-			print "check_kernel_ident: kernel identifier mismatch"
-			bad = True
-		if expected_when != cmdline_when:
-			print "check_kernel_ident: kernel command line mismatch"
-			bad = True
+        kernel_info = {'kernel': expected_id}
+        for i, patch in enumerate(patches):
+            kernel_info["patch%d" % i] = patch
+        self.record('GOOD', subdir, 'reboot.verify', expected_id)
+        self._decrement_group_level()
+        self.record('END GOOD', subdir, 'reboot',
+                    optional_fields=kernel_info)
 
-		if bad:
-			print "   Expected Ident: " + expected_id
-			print "    Running Ident: " + running_id
-			print "    Expected Mark: %d" % (expected_when)
-			print "Command Line Mark: %d" % (cmdline_when)
-			print "     Command Line: " + cmdline
 
-			raise error.JobError("boot failure", "reboot.verify")
+    def filesystem(self, device, mountpoint = None, loop_size = 0):
+        if not mountpoint:
+            mountpoint = self.tmpdir
+        return filesystem.filesystem(self, device, mountpoint,loop_size)
 
-		kernel_info = {'kernel': expected_id}
-		for i, patch in enumerate(patches):
-			kernel_info["patch%d" % i] = patch
-		self.record('GOOD', subdir, 'reboot.verify', expected_id)
-		self._decrement_group_level()
-		self.record('END GOOD', subdir, 'reboot',
-			    optional_fields=kernel_info)
 
+    def enable_external_logging(self):
+        pass
 
-	def filesystem(self, device, mountpoint = None, loop_size = 0):
-		if not mountpoint:
-			mountpoint = self.tmpdir
-		return filesystem.filesystem(self, device, mountpoint,loop_size)
 
-	
-	def enable_external_logging(self):
-		pass
+    def disable_external_logging(self):
+        pass
 
 
-	def disable_external_logging(self):
-		pass
-	
+    def reboot_setup(self):
+        pass
 
-	def reboot_setup(self):
-		pass
 
+    def reboot(self, tag='autotest'):
+        self.reboot_setup()
+        self.record('START', None, 'reboot')
+        self._increment_group_level()
+        self.record('GOOD', None, 'reboot.start')
+        self.harness.run_reboot()
+        default = self.config_get('boot.set_default')
+        if default:
+            self.bootloader.set_default(tag)
+        else:
+            self.bootloader.boot_once(tag)
+        cmd = "(sleep 5; reboot) </dev/null >/dev/null 2>&1 &"
+        utils.system(cmd)
+        self.quit()
 
-	def reboot(self, tag='autotest'):
-		self.reboot_setup()
-		self.record('START', None, 'reboot')
-		self._increment_group_level()
-		self.record('GOOD', None, 'reboot.start')
-		self.harness.run_reboot()
-		default = self.config_get('boot.set_default')
-		if default:
-			self.bootloader.set_default(tag)
-		else:
-			self.bootloader.boot_once(tag)
-		cmd = "(sleep 5; reboot) </dev/null >/dev/null 2>&1 &"
-		utils.system(cmd)
-		self.quit()
 
+    def noop(self, text):
+        print "job: noop: " + text
 
-	def noop(self, text):
-		print "job: noop: " + text
 
+    def parallel(self, *tasklist):
+        """Run tasks in parallel"""
 
-	def parallel(self, *tasklist):
-		"""Run tasks in parallel"""
+        pids = []
+        old_log_filename = self.log_filename
+        for i, task in enumerate(tasklist):
+            self.log_filename = old_log_filename + (".%d" % i)
+            task_func = lambda: task[0](*task[1:])
+            pids.append(parallel.fork_start(self.resultdir,
+                                            task_func))
 
-		pids = []
-		old_log_filename = self.log_filename
-		for i, task in enumerate(tasklist):
-			self.log_filename = old_log_filename + (".%d" % i)
-			task_func = lambda: task[0](*task[1:])
-			pids.append(parallel.fork_start(self.resultdir, 
-							task_func))
+        old_log_path = os.path.join(self.resultdir, old_log_filename)
+        old_log = open(old_log_path, "a")
+        exceptions = []
+        for i, pid in enumerate(pids):
+            # wait for the task to finish
+            try:
+                parallel.fork_waitfor(self.resultdir, pid)
+            except Exception, e:
+                exceptions.append(e)
+            # copy the logs from the subtask into the main log
+            new_log_path = old_log_path + (".%d" % i)
+            if os.path.exists(new_log_path):
+                new_log = open(new_log_path)
+                old_log.write(new_log.read())
+                new_log.close()
+                old_log.flush()
+                os.remove(new_log_path)
+        old_log.close()
 
-		old_log_path = os.path.join(self.resultdir, old_log_filename)
-		old_log = open(old_log_path, "a")
-		exceptions = []
-		for i, pid in enumerate(pids):
-			# wait for the task to finish
-			try:
-				parallel.fork_waitfor(self.resultdir, pid)
-			except Exception, e:
-				exceptions.append(e)
-			# copy the logs from the subtask into the main log
-			new_log_path = old_log_path + (".%d" % i)
-			if os.path.exists(new_log_path):
-				new_log = open(new_log_path)
-				old_log.write(new_log.read())
-				new_log.close()
-				old_log.flush()
-				os.remove(new_log_path)
-		old_log.close()
+        self.log_filename = old_log_filename
 
-		self.log_filename = old_log_filename
+        # handle any exceptions raised by the parallel tasks
+        if exceptions:
+            msg = "%d task(s) failed" % len(exceptions)
+            raise error.JobError(msg, str(exceptions), exceptions)
 
-		# handle any exceptions raised by the parallel tasks
-		if exceptions:
-			msg = "%d task(s) failed" % len(exceptions)
-			raise error.JobError(msg, str(exceptions), exceptions)
 
+    def quit(self):
+        # XXX: should have a better name.
+        self.harness.run_pause()
+        raise error.JobContinue("more to come")
 
-	def quit(self):
-		# XXX: should have a better name.
-		self.harness.run_pause()
-		raise error.JobContinue("more to come")
 
+    def complete(self, status):
+        """Clean up and exit"""
+        # We are about to exit 'complete' so clean up the control file.
+        try:
+            os.unlink(self.state_file)
+        except:
+            pass
 
-	def complete(self, status):
-		"""Clean up and exit"""
-		# We are about to exit 'complete' so clean up the control file.
-		try:
-			os.unlink(self.state_file)
-		except:
-			pass
+        self.harness.run_complete()
+        self.disable_external_logging()
+        sys.exit(status)
 
-		self.harness.run_complete()
-		self.disable_external_logging()
-		sys.exit(status)
 
+    def set_state(self, var, val):
+        # Deep copies make sure that the state can't be altered
+        # without it being re-written.  Perf wise, deep copies
+        # are overshadowed by pickling/loading.
+        self.state[var] = copy.deepcopy(val)
+        pickle.dump(self.state, open(self.state_file, 'w'))
 
-	def set_state(self, var, val):
-		# Deep copies make sure that the state can't be altered
-		# without it being re-written.  Perf wise, deep copies
-		# are overshadowed by pickling/loading.
-		self.state[var] = copy.deepcopy(val)
-		pickle.dump(self.state, open(self.state_file, 'w'))
 
+    def __load_state(self):
+        assert not hasattr(self, "state")
+        try:
+            self.state = pickle.load(open(self.state_file, 'r'))
+            self.state_existed = True
+        except Exception:
+            print "Initializing the state engine."
+            self.state = {}
+            self.set_state('__steps', []) # writes pickle file
+            self.state_existed = False
 
-	def __load_state(self):
-		assert not hasattr(self, "state")
-		try:
-			self.state = pickle.load(open(self.state_file, 'r'))
-			self.state_existed = True
-		except Exception:
-			print "Initializing the state engine."
-			self.state = {}
-			self.set_state('__steps', []) # writes pickle file
-			self.state_existed = False
 
+    def get_state(self, var, default=None):
+        if var in self.state or default == None:
+            val = self.state[var]
+        else:
+            val = default
+        return copy.deepcopy(val)
 
-	def get_state(self, var, default=None):
-		if var in self.state or default == None:
-			val = self.state[var]
-		else:
-			val = default
-		return copy.deepcopy(val)
 
+    def __create_step_tuple(self, fn, args, dargs):
+        # Legacy code passes in an array where the first arg is
+        # the function or its name.
+        if isinstance(fn, list):
+            assert(len(args) == 0)
+            assert(len(dargs) == 0)
+            args = fn[1:]
+            fn = fn[0]
+        # Pickling actual functions is harry, thus we have to call
+        # them by name.  Unfortunately, this means only functions
+        # defined globally can be used as a next step.
+        if callable(fn):
+            fn = fn.__name__
+        if not isinstance(fn, types.StringTypes):
+            raise StepError("Next steps must be functions or "
+                            "strings containing the function name")
+        ancestry = copy.copy(self.current_step_ancestry)
+        return (ancestry, fn, args, dargs)
 
-	def __create_step_tuple(self, fn, args, dargs):
-		# Legacy code passes in an array where the first arg is
-		# the function or its name.
-		if isinstance(fn, list):
-			assert(len(args) == 0)
-			assert(len(dargs) == 0)
-			args = fn[1:]
-			fn = fn[0]
-		# Pickling actual functions is harry, thus we have to call
-		# them by name.  Unfortunately, this means only functions
-		# defined globally can be used as a next step.
-		if callable(fn):
-			fn = fn.__name__
-		if not isinstance(fn, types.StringTypes):
-			raise StepError("Next steps must be functions or "
-			                "strings containing the function name")
-		ancestry = copy.copy(self.current_step_ancestry)
-		return (ancestry, fn, args, dargs)
 
+    def next_step_append(self, fn, *args, **dargs):
+        """Define the next step and place it at the end"""
+        steps = self.get_state('__steps')
+        steps.append(self.__create_step_tuple(fn, args, dargs))
+        self.set_state('__steps', steps)
 
-	def next_step_append(self, fn, *args, **dargs):
-		"""Define the next step and place it at the end"""
-		steps = self.get_state('__steps')
-		steps.append(self.__create_step_tuple(fn, args, dargs))
-		self.set_state('__steps', steps)
 
+    def next_step(self, fn, *args, **dargs):
+        """Create a new step and place it after any steps added
+        while running the current step but before any steps added in
+        previous steps"""
+        steps = self.get_state('__steps')
+        steps.insert(self.next_step_index,
+                     self.__create_step_tuple(fn, args, dargs))
+        self.next_step_index += 1
+        self.set_state('__steps', steps)
 
-	def next_step(self, fn, *args, **dargs):
-		"""Create a new step and place it after any steps added
-		while running the current step but before any steps added in
-		previous steps"""
-		steps = self.get_state('__steps')
-		steps.insert(self.next_step_index,
-		             self.__create_step_tuple(fn, args, dargs))
-		self.next_step_index += 1
-		self.set_state('__steps', steps)
 
+    def next_step_prepend(self, fn, *args, **dargs):
+        """Insert a new step, executing first"""
+        steps = self.get_state('__steps')
+        steps.insert(0, self.__create_step_tuple(fn, args, dargs))
+        self.next_step_index += 1
+        self.set_state('__steps', steps)
 
-	def next_step_prepend(self, fn, *args, **dargs):
-		"""Insert a new step, executing first"""
-		steps = self.get_state('__steps')
-		steps.insert(0, self.__create_step_tuple(fn, args, dargs))
-		self.next_step_index += 1
-		self.set_state('__steps', steps)
 
+    def _run_step_fn(self, local_vars, fn, args, dargs):
+        """Run a (step) function within the given context"""
 
-	def _run_step_fn(self, local_vars, fn, args, dargs):
-		"""Run a (step) function within the given context"""
+        local_vars['__args'] = args
+        local_vars['__dargs'] = dargs
+        exec('__ret = %s(*__args, **__dargs)' % fn,
+             local_vars, local_vars)
+        return local_vars['__ret']
 
-		local_vars['__args'] = args
-		local_vars['__dargs'] = dargs
-		exec('__ret = %s(*__args, **__dargs)' % fn,
-		     local_vars, local_vars)
-		return local_vars['__ret']
 
+    def _create_frame(self, global_vars, ancestry, fn_name):
+        """Set up the environment like it would have been when this
+        function was first defined.
 
-	def _create_frame(self, global_vars, ancestry, fn_name):
-		"""Set up the environment like it would have been when this
-		function was first defined.
+        Child step engine 'implementations' must have 'return locals()'
+        at end end of their steps.  Because of this, we can call the
+        parent function and get back all child functions (i.e. those
+        defined within it).
 
-		Child step engine 'implementations' must have 'return locals()'
-		at end end of their steps.  Because of this, we can call the
-		parent function and get back all child functions (i.e. those
-		defined within it).
+        Unfortunately, the call stack of the function calling
+        job.next_step might have been deeper than the function it
+        added.  In order to make sure that the environment is what it
+        should be, we need to then pop off the frames we built until
+        we find the frame where the function was first defined."""
 
-		Unfortunately, the call stack of the function calling 
-		job.next_step might have been deeper than the function it
-		added.  In order to make sure that the environment is what it
-		should be, we need to then pop off the frames we built until
-		we find the frame where the function was first defined."""
+        # The copies ensure that the parent frames are not modified
+        # while building child frames.  This matters if we then
+        # pop some frames in the next part of this function.
+        current_frame = copy.copy(global_vars)
+        frames = [current_frame]
+        for steps_fn_name in ancestry:
+            ret = self._run_step_fn(current_frame,
+                                    steps_fn_name, [], {})
+            current_frame = copy.copy(ret)
+            frames.append(current_frame)
 
-		# The copies ensure that the parent frames are not modified
-		# while building child frames.  This matters if we then
-		# pop some frames in the next part of this function.
-		current_frame = copy.copy(global_vars)
-		frames = [current_frame] 
-		for steps_fn_name in ancestry:
-			ret = self._run_step_fn(current_frame,
-			                        steps_fn_name, [], {})
-			current_frame = copy.copy(ret)
-			frames.append(current_frame)
+        while len(frames) > 2:
+            if fn_name not in frames[-2]:
+                break
+            if frames[-2][fn_name] != frames[-1][fn_name]:
+                break
+            frames.pop()
+            ancestry.pop()
 
-		while len(frames) > 2:
-			if fn_name not in frames[-2]:
-				break
-			if frames[-2][fn_name] != frames[-1][fn_name]:
-				break
-			frames.pop() 
-			ancestry.pop()
+        return (frames[-1], ancestry)
 
-		return (frames[-1], ancestry)
 
+    def _add_step_init(self, local_vars, current_function):
+        """If the function returned a dictionary that includes a
+        function named 'step_init', prepend it to our list of steps.
+        This will only get run the first time a function with a nested
+        use of the step engine is run."""
 
-	def _add_step_init(self, local_vars, current_function):
-		"""If the function returned a dictionary that includes a
-		function named 'step_init', prepend it to our list of steps.
-		This will only get run the first time a function with a nested
-		use of the step engine is run."""
+        if (isinstance(local_vars, dict) and
+            'step_init' in local_vars and
+            callable(local_vars['step_init'])):
+            # The init step is a child of the function
+            # we were just running.
+            self.current_step_ancestry.append(current_function)
+            self.next_step_prepend('step_init')
 
-		if (isinstance(local_vars, dict) and
-		    'step_init' in local_vars and
-		    callable(local_vars['step_init'])):
-			# The init step is a child of the function
-			# we were just running.
-			self.current_step_ancestry.append(current_function)
-			self.next_step_prepend('step_init')
 
+    def step_engine(self):
+        """the stepping engine -- if the control file defines
+        step_init we will be using this engine to drive multiple runs.
+        """
+        """Do the next step"""
 
-	def step_engine(self):
-		"""the stepping engine -- if the control file defines
-		step_init we will be using this engine to drive multiple runs.
-		"""
-		"""Do the next step"""
+        # Set up the environment and then interpret the control file.
+        # Some control files will have code outside of functions,
+        # which means we need to have our state engine initialized
+        # before reading in the file.
+        global_control_vars = {'job': self}
+        exec(JOB_PREAMBLE, global_control_vars, global_control_vars)
+        execfile(self.control, global_control_vars, global_control_vars)
 
-		# Set up the environment and then interpret the control file.
-		# Some control files will have code outside of functions,
-		# which means we need to have our state engine initialized
-		# before reading in the file.
-		global_control_vars = {'job': self}
-		exec(JOB_PREAMBLE, global_control_vars, global_control_vars)
-		execfile(self.control, global_control_vars, global_control_vars)
+        # If we loaded in a mid-job state file, then we presumably
+        # know what steps we have yet to run.
+        if not self.state_existed:
+            if global_control_vars.has_key('step_init'):
+                self.next_step(global_control_vars['step_init'])
 
-		# If we loaded in a mid-job state file, then we presumably
-		# know what steps we have yet to run.
-		if not self.state_existed:
-			if global_control_vars.has_key('step_init'):
-				self.next_step(global_control_vars['step_init'])
+        # Iterate through the steps.  If we reboot, we'll simply
+        # continue iterating on the next step.
+        while len(self.get_state('__steps')) > 0:
+            steps = self.get_state('__steps')
+            (ancestry, fn_name, args, dargs) = steps.pop(0)
+            self.set_state('__steps', steps)
 
-		# Iterate through the steps.  If we reboot, we'll simply
-		# continue iterating on the next step.
-		while len(self.get_state('__steps')) > 0:
-			steps = self.get_state('__steps')
-			(ancestry, fn_name, args, dargs) = steps.pop(0)
-			self.set_state('__steps', steps)
+            self.next_step_index = 0
+            ret = self._create_frame(global_control_vars, ancestry,
+                                     fn_name)
+            local_vars, self.current_step_ancestry = ret
+            local_vars = self._run_step_fn(local_vars, fn_name,
+                                           args, dargs)
+            self._add_step_init(local_vars, fn_name)
 
-			self.next_step_index = 0
-			ret = self._create_frame(global_control_vars, ancestry,
-			                         fn_name)
-			local_vars, self.current_step_ancestry = ret
-			local_vars = self._run_step_fn(local_vars, fn_name,
-			                               args, dargs)
-			self._add_step_init(local_vars, fn_name)
 
+    def _init_group_level(self):
+        self.group_level = self.get_state("__group_level", default=0)
 
-	def _init_group_level(self):
-		self.group_level = self.get_state("__group_level", default=0)
 
+    def _increment_group_level(self):
+        self.group_level += 1
+        self.set_state("__group_level", self.group_level)
 
-	def _increment_group_level(self):
-		self.group_level += 1
-		self.set_state("__group_level", self.group_level)
 
+    def _decrement_group_level(self):
+        self.group_level -= 1
+        self.set_state("__group_level", self.group_level)
 
-	def _decrement_group_level(self):
-		self.group_level -= 1
-		self.set_state("__group_level", self.group_level)
 
+    def record(self, status_code, subdir, operation, status = '',
+               optional_fields=None):
+        """
+        Record job-level status
 
-	def record(self, status_code, subdir, operation, status = '',
-		   optional_fields=None):
-		"""
-		Record job-level status
+        The intent is to make this file both machine parseable and
+        human readable. That involves a little more complexity, but
+        really isn't all that bad ;-)
 
-		The intent is to make this file both machine parseable and
-		human readable. That involves a little more complexity, but
-		really isn't all that bad ;-)
+        Format is <status code>\t<subdir>\t<operation>\t<status>
 
-		Format is <status code>\t<subdir>\t<operation>\t<status>
+        status code: (GOOD|WARN|FAIL|ABORT)
+                or   START
+                or   END (GOOD|WARN|FAIL|ABORT)
 
-		status code: (GOOD|WARN|FAIL|ABORT)
-			or   START
-			or   END (GOOD|WARN|FAIL|ABORT)
+        subdir: MUST be a relevant subdirectory in the results,
+        or None, which will be represented as '----'
 
-		subdir: MUST be a relevant subdirectory in the results,
-		or None, which will be represented as '----'
+        operation: description of what you ran (e.g. "dbench", or
+                                        "mkfs -t foobar /dev/sda9")
 
-		operation: description of what you ran (e.g. "dbench", or
-						"mkfs -t foobar /dev/sda9")
+        status: error message or "completed sucessfully"
 
-		status: error message or "completed sucessfully"
+        ------------------------------------------------------------
 
-		------------------------------------------------------------
+        Initial tabs indicate indent levels for grouping, and is
+        governed by self.group_level
 
-		Initial tabs indicate indent levels for grouping, and is
-		governed by self.group_level
+        multiline messages have secondary lines prefaced by a double
+        space ('  ')
+        """
 
-		multiline messages have secondary lines prefaced by a double
-		space ('  ')
-		"""
+        if subdir:
+            if re.match(r'[\n\t]', subdir):
+                raise ValueError("Invalid character in "
+                                 "subdir string")
+            substr = subdir
+        else:
+            substr = '----'
 
-		if subdir:
-			if re.match(r'[\n\t]', subdir):
-				raise ValueError("Invalid character in "
-						 "subdir string")
-			substr = subdir
-		else:
-			substr = '----'
-		
-		if not logging.is_valid_status(status_code):
-			raise ValueError("Invalid status code supplied: %s" %
-					 status_code)
-		if not operation:
-			operation = '----'
+        if not logging.is_valid_status(status_code):
+            raise ValueError("Invalid status code supplied: %s" %
+                             status_code)
+        if not operation:
+            operation = '----'
 
-		if re.match(r'[\n\t]', operation):
-			raise ValueError("Invalid character in "
-					 "operation string")
-		operation = operation.rstrip()
+        if re.match(r'[\n\t]', operation):
+            raise ValueError("Invalid character in "
+                             "operation string")
+        operation = operation.rstrip()
 
-		if not optional_fields:
-			optional_fields = {}
+        if not optional_fields:
+            optional_fields = {}
 
-		status = status.rstrip()
-		status = re.sub(r"\t", "  ", status)
-		# Ensure any continuation lines are marked so we can
-		# detect them in the status file to ensure it is parsable.
-		status = re.sub(r"\n", "\n" + "\t" * self.group_level + "  ",
-				status)
+        status = status.rstrip()
+        status = re.sub(r"\t", "  ", status)
+        # Ensure any continuation lines are marked so we can
+        # detect them in the status file to ensure it is parsable.
+        status = re.sub(r"\n", "\n" + "\t" * self.group_level + "  ",
+                        status)
 
-		# Generate timestamps for inclusion in the logs
-		epoch_time = int(time.time())  # seconds since epoch, in UTC
-		local_time = time.localtime(epoch_time)
-		optional_fields["timestamp"] = str(epoch_time)
-		optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
-							     local_time)
+        # Generate timestamps for inclusion in the logs
+        epoch_time = int(time.time())  # seconds since epoch, in UTC
+        local_time = time.localtime(epoch_time)
+        optional_fields["timestamp"] = str(epoch_time)
+        optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
+                                                     local_time)
 
-		fields = [status_code, substr, operation]
-		fields += ["%s=%s" % x for x in optional_fields.iteritems()]
-		fields.append(status)
+        fields = [status_code, substr, operation]
+        fields += ["%s=%s" % x for x in optional_fields.iteritems()]
+        fields.append(status)
 
-		msg = '\t'.join(str(x) for x in fields)
-		msg = '\t' * self.group_level + msg
+        msg = '\t'.join(str(x) for x in fields)
+        msg = '\t' * self.group_level + msg
 
-		msg_tag = ""
-		if "." in self.log_filename:
-			msg_tag = self.log_filename.split(".", 1)[1]
+        msg_tag = ""
+        if "." in self.log_filename:
+            msg_tag = self.log_filename.split(".", 1)[1]
 
-		self.harness.test_status_detail(status_code, substr,
-						operation, status, msg_tag)
-		self.harness.test_status(msg, msg_tag)
+        self.harness.test_status_detail(status_code, substr,
+                                        operation, status, msg_tag)
+        self.harness.test_status(msg, msg_tag)
 
-		# log to stdout (if enabled)
-		#if self.log_filename == self.DEFAULT_LOG_FILENAME:
-		print msg
+        # log to stdout (if enabled)
+        #if self.log_filename == self.DEFAULT_LOG_FILENAME:
+        print msg
 
-		# log to the "root" status log
-		status_file = os.path.join(self.resultdir, self.log_filename)
-		open(status_file, "a").write(msg + "\n")
+        # log to the "root" status log
+        status_file = os.path.join(self.resultdir, self.log_filename)
+        open(status_file, "a").write(msg + "\n")
 
-		# log to the subdir status log (if subdir is set)
-		if subdir:
-			dir = os.path.join(self.resultdir, subdir)
-			status_file = os.path.join(dir,
-						   self.DEFAULT_LOG_FILENAME)
-			open(status_file, "a").write(msg + "\n")
+        # log to the subdir status log (if subdir is set)
+        if subdir:
+            dir = os.path.join(self.resultdir, subdir)
+            status_file = os.path.join(dir,
+                                       self.DEFAULT_LOG_FILENAME)
+            open(status_file, "a").write(msg + "\n")
 
 
 class disk_usage_monitor:
-	def __init__(self, logging_func, device, max_mb_per_hour):
-		self.func = logging_func
-		self.device = device
-		self.max_mb_per_hour = max_mb_per_hour
+    def __init__(self, logging_func, device, max_mb_per_hour):
+        self.func = logging_func
+        self.device = device
+        self.max_mb_per_hour = max_mb_per_hour
 
 
-	def start(self):
-		self.initial_space = autotest_utils.freespace(self.device)
-		self.start_time = time.time()
+    def start(self):
+        self.initial_space = autotest_utils.freespace(self.device)
+        self.start_time = time.time()
 
 
-	def stop(self):
-		# if no maximum usage rate was set, we don't need to
-		# generate any warnings
-		if not self.max_mb_per_hour:
-			return
+    def stop(self):
+        # if no maximum usage rate was set, we don't need to
+        # generate any warnings
+        if not self.max_mb_per_hour:
+            return
 
-		final_space = autotest_utils.freespace(self.device)
-		used_space = self.initial_space - final_space
-		stop_time = time.time()
-		total_time = stop_time - self.start_time
-		# round up the time to one minute, to keep extremely short
-		# tests from generating false positives due to short, badly
-		# timed bursts of activity
-		total_time = max(total_time, 60.0)
+        final_space = autotest_utils.freespace(self.device)
+        used_space = self.initial_space - final_space
+        stop_time = time.time()
+        total_time = stop_time - self.start_time
+        # round up the time to one minute, to keep extremely short
+        # tests from generating false positives due to short, badly
+        # timed bursts of activity
+        total_time = max(total_time, 60.0)
 
-		# determine the usage rate
-		bytes_per_sec = used_space / total_time
-		mb_per_sec = bytes_per_sec / 1024**2
-		mb_per_hour = mb_per_sec * 60 * 60
+        # determine the usage rate
+        bytes_per_sec = used_space / total_time
+        mb_per_sec = bytes_per_sec / 1024**2
+        mb_per_hour = mb_per_sec * 60 * 60
 
-		if mb_per_hour > self.max_mb_per_hour:
-			msg = ("disk space on %s was consumed at a rate of "
-			       "%.2f MB/hour")
-			msg %= (self.device, mb_per_hour)
-			self.func(msg)
+        if mb_per_hour > self.max_mb_per_hour:
+            msg = ("disk space on %s was consumed at a rate of "
+                   "%.2f MB/hour")
+            msg %= (self.device, mb_per_hour)
+            self.func(msg)
 
 
-	@classmethod
-	def watch(cls, *monitor_args, **monitor_dargs):
-		""" Generic decorator to wrap a function call with the
-		standard create-monitor -> start -> call -> stop idiom."""
-		def decorator(func):
-			def watched_func(*args, **dargs):
-				monitor = cls(*monitor_args, **monitor_dargs)
-				monitor.start()
-				try:
-					func(*args, **dargs)
-				finally:
-					monitor.stop()
-			return watched_func
-		return decorator
+    @classmethod
+    def watch(cls, *monitor_args, **monitor_dargs):
+        """ Generic decorator to wrap a function call with the
+        standard create-monitor -> start -> call -> stop idiom."""
+        def decorator(func):
+            def watched_func(*args, **dargs):
+                monitor = cls(*monitor_args, **monitor_dargs)
+                monitor.start()
+                try:
+                    func(*args, **dargs)
+                finally:
+                    monitor.stop()
+            return watched_func
+        return decorator
 
 
 def runjob(control, cont = False, tag = "default", harness_type = '',
-	   use_external_logging = False):
-	"""The main interface to this module
+           use_external_logging = False):
+    """The main interface to this module
 
-	control
-		The control file to use for this job.
-	cont
-		Whether this is the continuation of a previously started job
-	"""
-	control = os.path.abspath(control)
-	state = control + '.state'
+    control
+            The control file to use for this job.
+    cont
+            Whether this is the continuation of a previously started job
+    """
+    control = os.path.abspath(control)
+    state = control + '.state'
 
-	# instantiate the job object ready for the control file.
-	myjob = None
-	try:
-		# Check that the control file is valid
-		if not os.path.exists(control):
-			raise error.JobError(control + 
-						": control file not found")
+    # instantiate the job object ready for the control file.
+    myjob = None
+    try:
+        # Check that the control file is valid
+        if not os.path.exists(control):
+            raise error.JobError(control +
+                                    ": control file not found")
 
-		# When continuing, the job is complete when there is no
-		# state file, ensure we don't try and continue.
-		if cont and not os.path.exists(state):
-			raise error.JobComplete("all done")
-		if cont == False and os.path.exists(state):
-			os.unlink(state)
+        # When continuing, the job is complete when there is no
+        # state file, ensure we don't try and continue.
+        if cont and not os.path.exists(state):
+            raise error.JobComplete("all done")
+        if cont == False and os.path.exists(state):
+            os.unlink(state)
 
-		myjob = job(control, tag, cont, harness_type,
-			    use_external_logging)
+        myjob = job(control, tag, cont, harness_type,
+                    use_external_logging)
 
-		# Load in the users control file, may do any one of:
-		#  1) execute in toto
-		#  2) define steps, and select the first via next_step()
-		myjob.step_engine()
+        # Load in the users control file, may do any one of:
+        #  1) execute in toto
+        #  2) define steps, and select the first via next_step()
+        myjob.step_engine()
 
-	except error.JobContinue:
-		sys.exit(5)
+    except error.JobContinue:
+        sys.exit(5)
 
-	except error.JobComplete:
-		sys.exit(1)
+    except error.JobComplete:
+        sys.exit(1)
 
-	except error.JobError, instance:
-		print "JOB ERROR: " + instance.args[0]
-		if myjob:
-			command = None
-			if len(instance.args) > 1:
-				command = instance.args[1]
-			myjob.record('ABORT', None, command, instance.args[0])
-			myjob._decrement_group_level()
-			myjob.record('END ABORT', None, None)
-			assert(myjob.group_level == 0)
-			myjob.complete(1)
-		else:
-			sys.exit(1)
+    except error.JobError, instance:
+        print "JOB ERROR: " + instance.args[0]
+        if myjob:
+            command = None
+            if len(instance.args) > 1:
+                command = instance.args[1]
+            myjob.record('ABORT', None, command, instance.args[0])
+            myjob._decrement_group_level()
+            myjob.record('END ABORT', None, None)
+            assert(myjob.group_level == 0)
+            myjob.complete(1)
+        else:
+            sys.exit(1)
 
-	except Exception, e:
-		msg = str(e) + '\n' + traceback.format_exc()
-		print "JOB ERROR: " + msg
-		if myjob:
-			myjob.record('ABORT', None, None, msg)
-			myjob._decrement_group_level()
-			myjob.record('END ABORT', None, None)
-			assert(myjob.group_level == 0)
-			myjob.complete(1)
-		else:
-			sys.exit(1)
+    except Exception, e:
+        msg = str(e) + '\n' + traceback.format_exc()
+        print "JOB ERROR: " + msg
+        if myjob:
+            myjob.record('ABORT', None, None, msg)
+            myjob._decrement_group_level()
+            myjob.record('END ABORT', None, None)
+            assert(myjob.group_level == 0)
+            myjob.complete(1)
+        else:
+            sys.exit(1)
 
-	# If we get here, then we assume the job is complete and good.
-	myjob._decrement_group_level()
-	myjob.record('END GOOD', None, None)
-	assert(myjob.group_level == 0)
+    # If we get here, then we assume the job is complete and good.
+    myjob._decrement_group_level()
+    myjob.record('END GOOD', None, None)
+    assert(myjob.group_level == 0)
 
-	myjob.complete(0)
+    myjob.complete(0)
 
 
 # site_job.py may be non-existant or empty, make sure that an appropriate
 # site_job class is created nevertheless
 try:
-	from site_job import site_job
+    from site_job import site_job
 except ImportError:
-	class site_job(base_job):
-		pass
+    class site_job(base_job):
+        pass
 
 class job(site_job):
-	pass
+    pass
diff --git a/client/bin/kernel.py b/client/bin/kernel.py
index fca8f7e..ad65ae9 100755
--- a/client/bin/kernel.py
+++ b/client/bin/kernel.py
@@ -8,727 +8,727 @@
 
 
 class kernel:
-	""" Class for compiling kernels. 
+    """ Class for compiling kernels.
 
-	Data for the object includes the src files
-	used to create the kernel, patches applied, config (base + changes),
-	the build directory itself, and logged output
+    Data for the object includes the src files
+    used to create the kernel, patches applied, config (base + changes),
+    the build directory itself, and logged output
 
-	Properties:
-		job
-			Backpointer to the job object we're part of
-		autodir
-			Path to the top level autotest dir (/usr/local/autotest)
-		src_dir
-			<tmp_dir>/src/
-		build_dir
-			<tmp_dir>/linux/
-		config_dir
-			<results_dir>/config/
-		log_dir
-			<results_dir>/debug/
-		results_dir
-			<results_dir>/results/
-	"""
+    Properties:
+            job
+                    Backpointer to the job object we're part of
+            autodir
+                    Path to the top level autotest dir (/usr/local/autotest)
+            src_dir
+                    <tmp_dir>/src/
+            build_dir
+                    <tmp_dir>/linux/
+            config_dir
+                    <results_dir>/config/
+            log_dir
+                    <results_dir>/debug/
+            results_dir
+                    <results_dir>/results/
+    """
 
-	autodir = ''
+    autodir = ''
 
-	def __init__(self, job, base_tree, subdir, tmp_dir, build_dir, leave = False):
-		"""Initialize the kernel build environment
+    def __init__(self, job, base_tree, subdir, tmp_dir, build_dir, leave = False):
+        """Initialize the kernel build environment
 
-		job
-			which job this build is part of
-		base_tree
-			base kernel tree. Can be one of the following:
-				1. A local tarball
-				2. A URL to a tarball
-				3. A local directory (will symlink it)
-				4. A shorthand expandable (eg '2.6.11-git3')
-		subdir
-			subdir in the results directory (eg "build")
-			(holds config/, debug/, results/)
-		tmp_dir
+        job
+                which job this build is part of
+        base_tree
+                base kernel tree. Can be one of the following:
+                        1. A local tarball
+                        2. A URL to a tarball
+                        3. A local directory (will symlink it)
+                        4. A shorthand expandable (eg '2.6.11-git3')
+        subdir
+                subdir in the results directory (eg "build")
+                (holds config/, debug/, results/)
+        tmp_dir
 
-		leave
-			Boolean, whether to leave existing tmpdir or not
-		"""
-		self.job = job
-		self.autodir = job.autodir
+        leave
+                Boolean, whether to leave existing tmpdir or not
+        """
+        self.job = job
+        self.autodir = job.autodir
 
-		self.src_dir    = os.path.join(tmp_dir, 'src')
-		self.build_dir  = os.path.join(tmp_dir, build_dir)
-			# created by get_kernel_tree
-		self.config_dir = os.path.join(subdir, 'config')
-		self.log_dir    = os.path.join(subdir, 'debug')
-		self.results_dir = os.path.join(subdir, 'results')
-		self.subdir	= os.path.basename(subdir)
+        self.src_dir    = os.path.join(tmp_dir, 'src')
+        self.build_dir  = os.path.join(tmp_dir, build_dir)
+                # created by get_kernel_tree
+        self.config_dir = os.path.join(subdir, 'config')
+        self.log_dir    = os.path.join(subdir, 'debug')
+        self.results_dir = os.path.join(subdir, 'results')
+        self.subdir     = os.path.basename(subdir)
 
-		self.installed_as = None
+        self.installed_as = None
 
-		if not leave:
-			if os.path.isdir(self.src_dir):
-				utils.system('rm -rf ' + self.src_dir)
-			if os.path.isdir(self.build_dir):
-				utils.system('rm -rf ' + self.build_dir)
+        if not leave:
+            if os.path.isdir(self.src_dir):
+                utils.system('rm -rf ' + self.src_dir)
+            if os.path.isdir(self.build_dir):
+                utils.system('rm -rf ' + self.build_dir)
 
-		if not os.path.exists(self.src_dir):
-			os.mkdir(self.src_dir)
-		for path in [self.config_dir, self.log_dir, self.results_dir]:
-			if os.path.exists(path):
-				utils.system('rm -rf ' + path)
-			os.mkdir(path)
+        if not os.path.exists(self.src_dir):
+            os.mkdir(self.src_dir)
+        for path in [self.config_dir, self.log_dir, self.results_dir]:
+            if os.path.exists(path):
+                utils.system('rm -rf ' + path)
+            os.mkdir(path)
 
-		logpath = os.path.join(self.log_dir, 'build_log')
-		self.logfile = open(logpath, 'w+')
-		self.applied_patches = []
+        logpath = os.path.join(self.log_dir, 'build_log')
+        self.logfile = open(logpath, 'w+')
+        self.applied_patches = []
 
-		self.target_arch = None
-		self.build_target = 'bzImage'
-		self.build_image = None
+        self.target_arch = None
+        self.build_target = 'bzImage'
+        self.build_image = None
 
-		arch = autotest_utils.get_current_kernel_arch()
-		if arch == 's390' or arch == 's390x':
-			self.build_target = 'image'
-		elif arch == 'ia64':
-			self.build_target = 'all'
-			self.build_image = 'vmlinux.gz'
+        arch = autotest_utils.get_current_kernel_arch()
+        if arch == 's390' or arch == 's390x':
+            self.build_target = 'image'
+        elif arch == 'ia64':
+            self.build_target = 'all'
+            self.build_image = 'vmlinux.gz'
 
-		if leave:
-			return
+        if leave:
+            return
 
-		self.logfile.write('BASE: %s\n' % base_tree)
+        self.logfile.write('BASE: %s\n' % base_tree)
 
-		# Where we have direct version hint record that
-		# for later configuration selection.
-		shorthand = re.compile(r'^\d+\.\d+\.\d+')
-		if shorthand.match(base_tree):
-			self.base_tree_version = base_tree
-		else:
-			self.base_tree_version = None
-			
-		# Actually extract the tree.  Make sure we know it occured
-		self.extract(base_tree)
+        # Where we have direct version hint record that
+        # for later configuration selection.
+        shorthand = re.compile(r'^\d+\.\d+\.\d+')
+        if shorthand.match(base_tree):
+            self.base_tree_version = base_tree
+        else:
+            self.base_tree_version = None
 
+        # Actually extract the tree.  Make sure we know it occured
+        self.extract(base_tree)
 
-	def kernelexpand(self, kernel):
-		# If we have something like a path, just use it as it is
-		if '/' in kernel:
-			return [kernel]
 
-		# Find the configured mirror list.
-		mirrors = self.job.config_get('mirror.mirrors')
-		if not mirrors:
-			# LEGACY: convert the kernel.org mirror
-			mirror = self.job.config_get('mirror.ftp_kernel_org')
-			if mirror:
-				korg = 'http://www.kernel.org/pub/linux/kernel'
-				mirrors = [
-				  [ korg + '/v2.6', mirror + '/v2.6' ],
-				  [ korg + '/people/akpm/patches/2.6',
-				    mirror + '/akpm' ],
-				  [ korg + '/people/mbligh',
-				    mirror + '/mbligh' ],
-				]
+    def kernelexpand(self, kernel):
+        # If we have something like a path, just use it as it is
+        if '/' in kernel:
+            return [kernel]
 
-		patches = kernelexpand.expand_classic(kernel, mirrors)
-		print patches
+        # Find the configured mirror list.
+        mirrors = self.job.config_get('mirror.mirrors')
+        if not mirrors:
+            # LEGACY: convert the kernel.org mirror
+            mirror = self.job.config_get('mirror.ftp_kernel_org')
+            if mirror:
+                korg = 'http://www.kernel.org/pub/linux/kernel'
+                mirrors = [
+                  [ korg + '/v2.6', mirror + '/v2.6' ],
+                  [ korg + '/people/akpm/patches/2.6',
+                    mirror + '/akpm' ],
+                  [ korg + '/people/mbligh',
+                    mirror + '/mbligh' ],
+                ]
 
-		return patches
+        patches = kernelexpand.expand_classic(kernel, mirrors)
+        print patches
 
+        return patches
 
-	@logging.record
-	@tee_output_logdir_mark
-	def extract(self, base_tree):
-		if os.path.exists(base_tree):
-			self.get_kernel_tree(base_tree)
-		else:
-			base_components = self.kernelexpand(base_tree)
-			print 'kernelexpand: '
-			print base_components
-			self.get_kernel_tree(base_components.pop(0))
-			if base_components:      # apply remaining patches
-				self.patch(*base_components)
 
+    @logging.record
+    @tee_output_logdir_mark
+    def extract(self, base_tree):
+        if os.path.exists(base_tree):
+            self.get_kernel_tree(base_tree)
+        else:
+            base_components = self.kernelexpand(base_tree)
+            print 'kernelexpand: '
+            print base_components
+            self.get_kernel_tree(base_components.pop(0))
+            if base_components:      # apply remaining patches
+                self.patch(*base_components)
 
-	@logging.record
-	@tee_output_logdir_mark
-	def patch(self, *patches):
-		"""Apply a list of patches (in order)"""
-		if not patches:
-			return
-		print 'Applying patches: ', patches
-		self.apply_patches(self.get_patches(patches))
 
+    @logging.record
+    @tee_output_logdir_mark
+    def patch(self, *patches):
+        """Apply a list of patches (in order)"""
+        if not patches:
+            return
+        print 'Applying patches: ', patches
+        self.apply_patches(self.get_patches(patches))
 
-	@logging.record
-	@tee_output_logdir_mark
-	def config(self, config_file = '', config_list = None, defconfig = False):
-		self.set_cross_cc()
-		config = kernel_config.kernel_config(self.job, self.build_dir,
-			 self.config_dir, config_file, config_list,
-			 defconfig, self.base_tree_version)
 
+    @logging.record
+    @tee_output_logdir_mark
+    def config(self, config_file = '', config_list = None, defconfig = False):
+        self.set_cross_cc()
+        config = kernel_config.kernel_config(self.job, self.build_dir,
+                 self.config_dir, config_file, config_list,
+                 defconfig, self.base_tree_version)
 
-	def get_patches(self, patches):
-		"""fetch the patches to the local src_dir"""
-		local_patches = []
-		for patch in patches:
-			dest = os.path.join(self.src_dir, basename(patch))
-			# FIXME: this isn't unique. Append something to it
-			# like wget does if it's not there?
-			print "get_file %s %s %s %s" % (patch, dest, self.src_dir, basename(patch))
-			utils.get_file(patch, dest)
-			# probably safer to use the command, not python library
-			md5sum = utils.system_output('md5sum ' + dest).split()[0]
-			local_patches.append((patch, dest, md5sum))
-		return local_patches
 
+    def get_patches(self, patches):
+        """fetch the patches to the local src_dir"""
+        local_patches = []
+        for patch in patches:
+            dest = os.path.join(self.src_dir, basename(patch))
+            # FIXME: this isn't unique. Append something to it
+            # like wget does if it's not there?
+            print "get_file %s %s %s %s" % (patch, dest, self.src_dir, basename(patch))
+            utils.get_file(patch, dest)
+            # probably safer to use the command, not python library
+            md5sum = utils.system_output('md5sum ' + dest).split()[0]
+            local_patches.append((patch, dest, md5sum))
+        return local_patches
 
-	def apply_patches(self, local_patches):
-		"""apply the list of patches, in order"""
-		builddir = self.build_dir
-		os.chdir(builddir)
 
-		if not local_patches:
-			return None
-		for (spec, local, md5sum) in local_patches:
-			if local.endswith('.bz2') or local.endswith('.gz'):
-				ref = spec
-			else:
-				ref = force_copy(local, self.results_dir)
-				ref = self.job.relative_path(ref)
-			patch_id = "%s %s %s" % (spec, ref, md5sum)
-			log = "PATCH: " + patch_id + "\n"
-			print log
-			cat_file_to_cmd(local, 'patch -p1 > /dev/null')
-			self.logfile.write(log)
-			self.applied_patches.append(patch_id)
+    def apply_patches(self, local_patches):
+        """apply the list of patches, in order"""
+        builddir = self.build_dir
+        os.chdir(builddir)
 
+        if not local_patches:
+            return None
+        for (spec, local, md5sum) in local_patches:
+            if local.endswith('.bz2') or local.endswith('.gz'):
+                ref = spec
+            else:
+                ref = force_copy(local, self.results_dir)
+                ref = self.job.relative_path(ref)
+            patch_id = "%s %s %s" % (spec, ref, md5sum)
+            log = "PATCH: " + patch_id + "\n"
+            print log
+            cat_file_to_cmd(local, 'patch -p1 > /dev/null')
+            self.logfile.write(log)
+            self.applied_patches.append(patch_id)
 
-	def get_kernel_tree(self, base_tree):
-		"""Extract/link base_tree to self.build_dir"""
-  
-		# if base_tree is a dir, assume uncompressed kernel
-		if os.path.isdir(base_tree):
-			print 'Symlinking existing kernel source'
-			os.symlink(base_tree, self.build_dir)
 
-		# otherwise, extract tarball
-		else:
-			os.chdir(os.path.dirname(self.src_dir))
-			# Figure out local destination for tarball
-			tarball = os.path.join(self.src_dir, os.path.basename(base_tree))
-			utils.get_file(base_tree, tarball)
-			print 'Extracting kernel tarball:', tarball, '...'
-			autotest_utils.extract_tarball_to_dir(tarball,
-			                                      self.build_dir)
+    def get_kernel_tree(self, base_tree):
+        """Extract/link base_tree to self.build_dir"""
 
+        # if base_tree is a dir, assume uncompressed kernel
+        if os.path.isdir(base_tree):
+            print 'Symlinking existing kernel source'
+            os.symlink(base_tree, self.build_dir)
 
-	def extraversion(self, tag, append=1):
-		os.chdir(self.build_dir)
-		extraversion_sub = r's/^EXTRAVERSION =\s*\(.*\)/EXTRAVERSION = '
-		if append:
-			p = extraversion_sub + '\\1-%s/' % tag
-		else:
-			p = extraversion_sub + '-%s/' % tag
-		utils.system('mv Makefile Makefile.old')
-		utils.system('sed "%s" < Makefile.old > Makefile' % p)
+        # otherwise, extract tarball
+        else:
+            os.chdir(os.path.dirname(self.src_dir))
+            # Figure out local destination for tarball
+            tarball = os.path.join(self.src_dir, os.path.basename(base_tree))
+            utils.get_file(base_tree, tarball)
+            print 'Extracting kernel tarball:', tarball, '...'
+            autotest_utils.extract_tarball_to_dir(tarball,
+                                                  self.build_dir)
 
 
-	@logging.record
-	@tee_output_logdir_mark
-	def build(self, make_opts = '', logfile = '', extraversion='autotest'):
-		"""build the kernel
+    def extraversion(self, tag, append=1):
+        os.chdir(self.build_dir)
+        extraversion_sub = r's/^EXTRAVERSION =\s*\(.*\)/EXTRAVERSION = '
+        if append:
+            p = extraversion_sub + '\\1-%s/' % tag
+        else:
+            p = extraversion_sub + '-%s/' % tag
+        utils.system('mv Makefile Makefile.old')
+        utils.system('sed "%s" < Makefile.old > Makefile' % p)
 
-		make_opts
-			additional options to make, if any
-		"""
-		os_dep.commands('gcc', 'make')
-		if logfile == '':
-			logfile = os.path.join(self.log_dir, 'kernel_build')
-		os.chdir(self.build_dir)
-		if extraversion:
-			self.extraversion(extraversion)
-		self.set_cross_cc()
-		# setup_config_file(config_file, config_overrides)
 
-		# Not needed on 2.6, but hard to tell -- handle failure
-		utils.system('make dep', ignore_status=True)
-		threads = 2 * autotest_utils.count_cpus()
-		build_string = 'make -j %d %s %s' % (threads, make_opts,
-					     self.build_target)
-					# eg make bzImage, or make zImage
-		print build_string
-		system(build_string)
-		if kernel_config.modules_needed('.config'):
-			utils.system('make -j %d modules' % (threads))
+    @logging.record
+    @tee_output_logdir_mark
+    def build(self, make_opts = '', logfile = '', extraversion='autotest'):
+        """build the kernel
 
-		kernel_version = self.get_kernel_build_ver()
-		kernel_version = re.sub('-autotest', '', kernel_version)
-		self.logfile.write('BUILD VERSION: %s\n' % kernel_version)
+        make_opts
+                additional options to make, if any
+        """
+        os_dep.commands('gcc', 'make')
+        if logfile == '':
+            logfile = os.path.join(self.log_dir, 'kernel_build')
+        os.chdir(self.build_dir)
+        if extraversion:
+            self.extraversion(extraversion)
+        self.set_cross_cc()
+        # setup_config_file(config_file, config_overrides)
 
-		force_copy(self.build_dir+'/System.map', self.results_dir)
+        # Not needed on 2.6, but hard to tell -- handle failure
+        utils.system('make dep', ignore_status=True)
+        threads = 2 * autotest_utils.count_cpus()
+        build_string = 'make -j %d %s %s' % (threads, make_opts,
+                                     self.build_target)
+                                # eg make bzImage, or make zImage
+        print build_string
+        system(build_string)
+        if kernel_config.modules_needed('.config'):
+            utils.system('make -j %d modules' % (threads))
 
+        kernel_version = self.get_kernel_build_ver()
+        kernel_version = re.sub('-autotest', '', kernel_version)
+        self.logfile.write('BUILD VERSION: %s\n' % kernel_version)
 
-	def build_timed(self, threads, timefile = '/dev/null', make_opts = '',
-							output = '/dev/null'):
-		"""time the bulding of the kernel"""
-		os.chdir(self.build_dir)
-		self.set_cross_cc()
+        force_copy(self.build_dir+'/System.map', self.results_dir)
 
-		self.clean(logged=False)
-		build_string = "/usr/bin/time -o %s make %s -j %s vmlinux" \
-			 			% (timefile, make_opts, threads)
-		build_string += ' > %s 2>&1' % output
-		print build_string
-		utils.system(build_string)
 
-		if (not os.path.isfile('vmlinux')):
-			errmsg = "no vmlinux found, kernel build failed"
-			raise error.TestError(errmsg)
+    def build_timed(self, threads, timefile = '/dev/null', make_opts = '',
+                                                    output = '/dev/null'):
+        """time the bulding of the kernel"""
+        os.chdir(self.build_dir)
+        self.set_cross_cc()
+
+        self.clean(logged=False)
+        build_string = "/usr/bin/time -o %s make %s -j %s vmlinux" \
+                                        % (timefile, make_opts, threads)
+        build_string += ' > %s 2>&1' % output
+        print build_string
+        utils.system(build_string)
+
+        if (not os.path.isfile('vmlinux')):
+            errmsg = "no vmlinux found, kernel build failed"
+            raise error.TestError(errmsg)
+
+
+    @logging.record
+    @tee_output_logdir_mark
+    def clean(self):
+        """make clean in the kernel tree"""
+        os.chdir(self.build_dir)
+        print "make clean"
+        utils.system('make clean > /dev/null 2> /dev/null')
+
+
+    @logging.record
+    @tee_output_logdir_mark
+    def mkinitrd(self, version, image, system_map, initrd):
+        """Build kernel initrd image.
+        Try to use distro specific way to build initrd image.
+        Parameters:
+                version
+                        new kernel version
+                image
+                        new kernel image file
+                system_map
+                        System.map file
+                initrd
+                        initrd image file to build
+        """
+        vendor = autotest_utils.get_os_vendor()
 
+        if os.path.isfile(initrd):
+            print "Existing %s file, will remove it." % initrd
+            os.remove(initrd)
 
-	@logging.record
-	@tee_output_logdir_mark
-	def clean(self):
-		"""make clean in the kernel tree"""
-		os.chdir(self.build_dir) 
-		print "make clean"
-		utils.system('make clean > /dev/null 2> /dev/null')
+        args = self.job.config_get('kernel.mkinitrd_extra_args')
 
+        # don't leak 'None' into mkinitrd command
+        if not args:
+            args = ''
 
-	@logging.record
-	@tee_output_logdir_mark
-	def mkinitrd(self, version, image, system_map, initrd):
-		"""Build kernel initrd image.
-		Try to use distro specific way to build initrd image.
-		Parameters:
-			version
-				new kernel version
-			image
-				new kernel image file
-			system_map
-				System.map file
-			initrd
-				initrd image file to build
-		"""
-		vendor = autotest_utils.get_os_vendor()
-		
-		if os.path.isfile(initrd):
-			print "Existing %s file, will remove it." % initrd
-			os.remove(initrd)
+        if vendor in ['Red Hat', 'Fedora Core']:
+            utils.system('mkinitrd %s %s %s' % (args, initrd, version))
+        elif vendor in ['SUSE']:
+            utils.system('mkinitrd %s -k %s -i %s -M %s' % (args, image, initrd, system_map))
+        elif vendor in ['Debian', 'Ubuntu']:
+            if os.path.isfile('/usr/sbin/mkinitrd'):
+                cmd = '/usr/sbin/mkinitrd'
+            elif os.path.isfile('/usr/sbin/mkinitramfs'):
+                cmd = '/usr/sbin/mkinitramfs'
+            else:
+                raise error.TestError('No Debian initrd builder')
+            utils.system('%s %s -o %s %s' % (cmd, args, initrd, version))
+        else:
+            raise error.TestError('Unsupported vendor %s' % vendor)
 
-		args = self.job.config_get('kernel.mkinitrd_extra_args')
 
-		# don't leak 'None' into mkinitrd command
-		if not args:
-			args = ''
+    def set_build_image(self, image):
+        self.build_image = image
 
-		if vendor in ['Red Hat', 'Fedora Core']:
-			utils.system('mkinitrd %s %s %s' % (args, initrd, version))
-		elif vendor in ['SUSE']:
-			utils.system('mkinitrd %s -k %s -i %s -M %s' % (args, image, initrd, system_map))
-		elif vendor in ['Debian', 'Ubuntu']:
-			if os.path.isfile('/usr/sbin/mkinitrd'):
-				cmd = '/usr/sbin/mkinitrd'
-			elif os.path.isfile('/usr/sbin/mkinitramfs'):
-				cmd = '/usr/sbin/mkinitramfs'
-			else:
-				raise error.TestError('No Debian initrd builder')
-			utils.system('%s %s -o %s %s' % (cmd, args, initrd, version))
-		else:
-			raise error.TestError('Unsupported vendor %s' % vendor)
 
+    @logging.record
+    @tee_output_logdir_mark
+    def install(self, tag='autotest', prefix = '/'):
+        """make install in the kernel tree"""
 
-	def set_build_image(self, image):
-		self.build_image = image
+        # Record that we have installed the kernel, and
+        # the tag under which we installed it.
+        self.installed_as = tag
 
+        os.chdir(self.build_dir)
 
-	@logging.record
-	@tee_output_logdir_mark
-	def install(self, tag='autotest', prefix = '/'):
-		"""make install in the kernel tree"""
+        if not os.path.isdir(prefix):
+            os.mkdir(prefix)
+        self.boot_dir = os.path.join(prefix, 'boot')
+        if not os.path.isdir(self.boot_dir):
+            os.mkdir(self.boot_dir)
 
-		# Record that we have installed the kernel, and
-		# the tag under which we installed it.
-		self.installed_as = tag
+        if not self.build_image:
+            images = glob.glob('arch/*/boot/' + self.build_target)
+            if len(images):
+                self.build_image = images[0]
+            else:
+                self.build_image = self.build_target
 
-		os.chdir(self.build_dir)
+        # remember installed files
+        self.vmlinux = self.boot_dir + '/vmlinux-' + tag
+        if (self.build_image != 'vmlinux'):
+            self.image = self.boot_dir + '/vmlinuz-' + tag
+        else:
+            self.image = self.vmlinux
+        self.system_map = self.boot_dir + '/System.map-' + tag
+        self.config = self.boot_dir + '/config-' + tag
+        self.initrd = ''
 
-		if not os.path.isdir(prefix):
-			os.mkdir(prefix)
-		self.boot_dir = os.path.join(prefix, 'boot')
-		if not os.path.isdir(self.boot_dir):
-			os.mkdir(self.boot_dir)
+        # copy to boot dir
+        autotest_utils.force_copy('vmlinux', self.vmlinux)
+        if (self.build_image != 'vmlinux'):
+            force_copy(self.build_image, self.image)
+        autotest_utils.force_copy('System.map', self.system_map)
+        autotest_utils.force_copy('.config', self.config)
 
-		if not self.build_image:
-			images = glob.glob('arch/*/boot/' + self.build_target)
-			if len(images):
-				self.build_image = images[0]
-			else:
-				self.build_image = self.build_target
+        if not kernel_config.modules_needed('.config'):
+            return
 
-		# remember installed files
-		self.vmlinux = self.boot_dir + '/vmlinux-' + tag
-		if (self.build_image != 'vmlinux'):
-			self.image = self.boot_dir + '/vmlinuz-' + tag
-		else:
-			self.image = self.vmlinux
-		self.system_map = self.boot_dir + '/System.map-' + tag
-		self.config = self.boot_dir + '/config-' + tag
-		self.initrd = ''
+        utils.system('make modules_install INSTALL_MOD_PATH=%s' % prefix)
+        if prefix == '/':
+            self.initrd = self.boot_dir + '/initrd-' + tag
+            self.mkinitrd(self.get_kernel_build_ver(), self.image,
+                          self.system_map, self.initrd)
 
-		# copy to boot dir
-		autotest_utils.force_copy('vmlinux', self.vmlinux)
-		if (self.build_image != 'vmlinux'):
-			force_copy(self.build_image, self.image)
-		autotest_utils.force_copy('System.map', self.system_map)
-		autotest_utils.force_copy('.config', self.config)
 
-		if not kernel_config.modules_needed('.config'):
-			return
+    def add_to_bootloader(self, tag='autotest', args=''):
+        """ add this kernel to bootloader, taking an
+            optional parameter of space separated parameters
+            e.g.:  kernel.add_to_bootloader('mykernel', 'ro acpi=off')
+        """
 
-		utils.system('make modules_install INSTALL_MOD_PATH=%s' % prefix)
-		if prefix == '/':
-			self.initrd = self.boot_dir + '/initrd-' + tag
-			self.mkinitrd(self.get_kernel_build_ver(), self.image,
-				      self.system_map, self.initrd)
+        # remove existing entry if present
+        self.job.bootloader.remove_kernel(tag)
 
+        # pull the base argument set from the job config,
+        baseargs = self.job.config_get('boot.default_args')
+        if baseargs:
+            args = baseargs + " " + args
 
-	def add_to_bootloader(self, tag='autotest', args=''):
-		""" add this kernel to bootloader, taking an
-		    optional parameter of space separated parameters
-		    e.g.:  kernel.add_to_bootloader('mykernel', 'ro acpi=off')
-		"""
+        # otherwise populate from /proc/cmdline
+        # if not baseargs:
+        #       baseargs = open('/proc/cmdline', 'r').readline().strip()
+        # NOTE: This is unnecessary, because boottool does it.
 
-		# remove existing entry if present
-		self.job.bootloader.remove_kernel(tag)
+        root = None
+        roots = [x for x in args.split() if x.startswith('root=')]
+        if roots:
+            root = re.sub('^root=', '', roots[0])
+        arglist = [x for x in args.split() if not x.startswith('root=')]
+        args = ' '.join(arglist)
 
-		# pull the base argument set from the job config,
-		baseargs = self.job.config_get('boot.default_args')
-		if baseargs:
-			args = baseargs + " " + args
-		
-		# otherwise populate from /proc/cmdline
-		# if not baseargs:
-		#	baseargs = open('/proc/cmdline', 'r').readline().strip()
-		# NOTE: This is unnecessary, because boottool does it.
+        # add the kernel entry
+        # add_kernel(image, title='autotest', initrd='')
+        self.job.bootloader.add_kernel(self.image, tag, self.initrd, \
+                                        args = args, root = root)
 
-		root = None
-		roots = [x for x in args.split() if x.startswith('root=')]
-		if roots:
-			root = re.sub('^root=', '', roots[0])
-		arglist = [x for x in args.split() if not x.startswith('root=')]
-		args = ' '.join(arglist)
 
-		# add the kernel entry
-		# add_kernel(image, title='autotest', initrd='')
-		self.job.bootloader.add_kernel(self.image, tag, self.initrd, \
-						args = args, root = root)
+    def get_kernel_build_arch(self, arch=None):
+        """
+        Work out the current kernel architecture (as a kernel arch)
+        """
+        if not arch:
+            arch = autotest_utils.get_current_kernel_arch()
+        if re.match('i.86', arch):
+            return 'i386'
+        elif re.match('sun4u', arch):
+            return 'sparc64'
+        elif re.match('arm.*', arch):
+            return 'arm'
+        elif re.match('sa110', arch):
+            return 'arm'
+        elif re.match('s390x', arch):
+            return 's390'
+        elif re.match('parisc64', arch):
+            return 'parisc'
+        elif re.match('ppc.*', arch):
+            return 'powerpc'
+        elif re.match('mips.*', arch):
+            return 'mips'
+        else:
+            return arch
 
 
-	def get_kernel_build_arch(self, arch=None):
-		"""
-		Work out the current kernel architecture (as a kernel arch)
-		"""
-		if not arch:
-			arch = autotest_utils.get_current_kernel_arch()
-		if re.match('i.86', arch):
-			return 'i386'
-		elif re.match('sun4u', arch):
-			return 'sparc64'
-		elif re.match('arm.*', arch):
-			return 'arm'
-		elif re.match('sa110', arch):
-			return 'arm'
-		elif re.match('s390x', arch):
-			return 's390'
-		elif re.match('parisc64', arch):
-			return 'parisc'
-		elif re.match('ppc.*', arch):
-			return 'powerpc'
-		elif re.match('mips.*', arch):
-			return 'mips'
-		else:
-			return arch
+    def get_kernel_build_release(self):
+        releasem = re.compile(r'.*UTS_RELEASE\s+"([^"]+)".*');
+        versionm = re.compile(r'.*UTS_VERSION\s+"([^"]+)".*');
 
+        release = None
+        version = None
 
-	def get_kernel_build_release(self):
-		releasem = re.compile(r'.*UTS_RELEASE\s+"([^"]+)".*');
-		versionm = re.compile(r'.*UTS_VERSION\s+"([^"]+)".*');
+        for file in [ self.build_dir + "/include/linux/version.h",
+                      self.build_dir + "/include/linux/utsrelease.h",
+                      self.build_dir + "/include/linux/compile.h" ]:
+            if os.path.exists(file):
+                fd = open(file, 'r')
+                for line in fd.readlines():
+                    m = releasem.match(line)
+                    if m:
+                        release = m.groups()[0]
+                    m = versionm.match(line)
+                    if m:
+                        version = m.groups()[0]
+                fd.close()
 
-		release = None
-		version = None
+        return (release, version)
 
-		for file in [ self.build_dir + "/include/linux/version.h",
-			      self.build_dir + "/include/linux/utsrelease.h",
-			      self.build_dir + "/include/linux/compile.h" ]:
-			if os.path.exists(file):
-				fd = open(file, 'r')
-				for line in fd.readlines():
-					m = releasem.match(line)
-					if m:
-						release = m.groups()[0]
-					m = versionm.match(line)
-					if m:
-						version = m.groups()[0]
-				fd.close()
 
-		return (release, version)
+    def get_kernel_build_ident(self):
+        (release, version) = self.get_kernel_build_release()
 
-	
-	def get_kernel_build_ident(self):
-		(release, version) = self.get_kernel_build_release()
+        if not release or not version:
+            raise error.JobError('kernel has no identity')
 
-		if not release or not version:
-			raise error.JobError('kernel has no identity')
+        return release + '::' + version
 
-		return release + '::' + version
 
+    def boot(self, args='', ident=1):
+        """ install and boot this kernel, do not care how
+            just make it happen.
+        """
 
-	def boot(self, args='', ident=1):
-		""" install and boot this kernel, do not care how
-		    just make it happen.
-		"""
+        # If we can check the kernel identity do so.
+        if ident:
+            when = int(time.time())
+            ident = self.get_kernel_build_ident()
+            args += " IDENT=%d" % (when)
 
-		# If we can check the kernel identity do so.
-		if ident:
-			when = int(time.time())
-			ident = self.get_kernel_build_ident()
-			args += " IDENT=%d" % (when)
+            self.job.next_step_prepend(["job.kernel_check_ident",
+                                        when, ident, self.subdir,
+                                        self.applied_patches])
 
-			self.job.next_step_prepend(["job.kernel_check_ident",
-						    when, ident, self.subdir,
-						    self.applied_patches])
+        # Check if the kernel has been installed, if not install
+        # as the default tag and boot that.
+        if not self.installed_as:
+            self.install()
 
-		# Check if the kernel has been installed, if not install
-		# as the default tag and boot that.
-		if not self.installed_as:
-			self.install()
+        # Boot the selected tag.
+        self.add_to_bootloader(args=args, tag=self.installed_as)
 
-		# Boot the selected tag.
-		self.add_to_bootloader(args=args, tag=self.installed_as)
+        # Boot it.
+        self.job.reboot(tag=self.installed_as)
 
-		# Boot it.
-		self.job.reboot(tag=self.installed_as)
 
+    def get_kernel_build_ver(self):
+        """Check Makefile and .config to return kernel version"""
+        version = patchlevel = sublevel = extraversion = localversion = ''
 
-	def get_kernel_build_ver(self):
-		"""Check Makefile and .config to return kernel version"""
-		version = patchlevel = sublevel = extraversion = localversion = ''
+        for line in open(self.build_dir + '/Makefile', 'r').readlines():
+            if line.startswith('VERSION'):
+                version = line[line.index('=') + 1:].strip()
+            if line.startswith('PATCHLEVEL'):
+                patchlevel = line[line.index('=') + 1:].strip()
+            if line.startswith('SUBLEVEL'):
+                sublevel = line[line.index('=') + 1:].strip()
+            if line.startswith('EXTRAVERSION'):
+                extraversion = line[line.index('=') + 1:].strip()
 
-		for line in open(self.build_dir + '/Makefile', 'r').readlines():
-			if line.startswith('VERSION'):
-				version = line[line.index('=') + 1:].strip()
-			if line.startswith('PATCHLEVEL'):
-				patchlevel = line[line.index('=') + 1:].strip()
-			if line.startswith('SUBLEVEL'):
-				sublevel = line[line.index('=') + 1:].strip()
-			if line.startswith('EXTRAVERSION'):
-				extraversion = line[line.index('=') + 1:].strip()
+        for line in open(self.build_dir + '/.config', 'r').readlines():
+            if line.startswith('CONFIG_LOCALVERSION='):
+                localversion = line.rstrip().split('"')[1]
 
-		for line in open(self.build_dir + '/.config', 'r').readlines():
-			if line.startswith('CONFIG_LOCALVERSION='):
-				localversion = line.rstrip().split('"')[1]
+        return "%s.%s.%s%s%s" %(version, patchlevel, sublevel, extraversion, localversion)
 
-		return "%s.%s.%s%s%s" %(version, patchlevel, sublevel, extraversion, localversion)
 
+    def set_build_target(self, build_target):
+        if build_target:
+            self.build_target = build_target
+            print 'BUILD TARGET: %s' % self.build_target
 
-	def set_build_target(self, build_target):
-		if build_target:
-			self.build_target = build_target
-			print 'BUILD TARGET: %s' % self.build_target
 
+    def set_cross_cc(self, target_arch=None, cross_compile=None,
+                     build_target='bzImage'):
+        """Set up to cross-compile.
+                This is broken. We need to work out what the default
+                compile produces, and if not, THEN set the cross
+                compiler.
+        """
 
-	def set_cross_cc(self, target_arch=None, cross_compile=None,
-			 build_target='bzImage'):
-		"""Set up to cross-compile.
-			This is broken. We need to work out what the default
-			compile produces, and if not, THEN set the cross
-			compiler.
-		"""
+        if self.target_arch:
+            return
 
-		if self.target_arch:
-			return
+        # if someone has set build_target, don't clobber in set_cross_cc
+        # run set_build_target before calling set_cross_cc
+        if not self.build_target:
+            self.set_build_target(build_target)
 
-		# if someone has set build_target, don't clobber in set_cross_cc
-		# run set_build_target before calling set_cross_cc
-		if not self.build_target:
-			self.set_build_target(build_target)
+        # If no 'target_arch' given assume native compilation
+        if target_arch == None:
+            target_arch = autotest_utils.get_current_kernel_arch()
+            if target_arch == 'ppc64':
+                if self.build_target == 'bzImage':
+                    self.build_target = 'vmlinux'
 
-		# If no 'target_arch' given assume native compilation
-		if target_arch == None:
-			target_arch = autotest_utils.get_current_kernel_arch()
-			if target_arch == 'ppc64':
-				if self.build_target == 'bzImage':
-					self.build_target = 'vmlinux'
+        if not cross_compile:
+            cross_compile = self.job.config_get('kernel.cross_cc')
 
-		if not cross_compile:
-			cross_compile = self.job.config_get('kernel.cross_cc')
+        if cross_compile:
+            os.environ['CROSS_COMPILE'] = cross_compile
+        else:
+            if os.environ.has_key('CROSS_COMPILE'):
+                del os.environ['CROSS_COMPILE']
 
-		if cross_compile:
-			os.environ['CROSS_COMPILE'] = cross_compile
-		else:
-			if os.environ.has_key('CROSS_COMPILE'):
-				del os.environ['CROSS_COMPILE']
-	
-		return                 # HACK. Crap out for now.
+        return                 # HACK. Crap out for now.
 
-		# At this point I know what arch I *want* to build for
-		# but have no way of working out what arch the default
-		# compiler DOES build for.
+        # At this point I know what arch I *want* to build for
+        # but have no way of working out what arch the default
+        # compiler DOES build for.
 
-		# Oh, and BTW, install_package() doesn't exist yet.
+        # Oh, and BTW, install_package() doesn't exist yet.
 
-		if target_arch == 'ppc64':
-			install_package('ppc64-cross')
-			cross_compile = os.path.join(self.autodir, 'sources/ppc64-cross/bin')
+        if target_arch == 'ppc64':
+            install_package('ppc64-cross')
+            cross_compile = os.path.join(self.autodir, 'sources/ppc64-cross/bin')
 
-		elif target_arch == 'x86_64':
-			install_package('x86_64-cross')
-			cross_compile = os.path.join(self.autodir, 'sources/x86_64-cross/bin')
+        elif target_arch == 'x86_64':
+            install_package('x86_64-cross')
+            cross_compile = os.path.join(self.autodir, 'sources/x86_64-cross/bin')
 
-		os.environ['ARCH'] = self.target_arch = target_arch
+        os.environ['ARCH'] = self.target_arch = target_arch
 
-		self.cross_compile = cross_compile
-		if self.cross_compile:
-			os.environ['CROSS_COMPILE'] = self.cross_compile
+        self.cross_compile = cross_compile
+        if self.cross_compile:
+            os.environ['CROSS_COMPILE'] = self.cross_compile
 
 
-	def pickle_dump(self, filename):
-		"""dump a pickle of ourself out to the specified filename
+    def pickle_dump(self, filename):
+        """dump a pickle of ourself out to the specified filename
 
-		we can't pickle the backreference to job (it contains fd's), 
-		nor would we want to. Same for logfile (fd's).
-		"""
-		temp = copy.copy(self)
-		temp.job = None
-		temp.logfile = None
-		pickle.dump(temp, open(filename, 'w'))
+        we can't pickle the backreference to job (it contains fd's),
+        nor would we want to. Same for logfile (fd's).
+        """
+        temp = copy.copy(self)
+        temp.job = None
+        temp.logfile = None
+        pickle.dump(temp, open(filename, 'w'))
 
 
 class rpm_kernel:
-	""" Class for installing rpm kernel package
-	"""
+    """ Class for installing rpm kernel package
+    """
 
-	def __init__(self, job, rpm_package, subdir):
-		self.job = job
-		self.rpm_package = rpm_package
-		self.log_dir = os.path.join(subdir, 'debug')
-		self.subdir = os.path.basename(subdir)
-		if os.path.exists(self.log_dir):
-			utils.system('rm -rf ' + self.log_dir)
-		os.mkdir(self.log_dir)
-		self.installed_as = None
+    def __init__(self, job, rpm_package, subdir):
+        self.job = job
+        self.rpm_package = rpm_package
+        self.log_dir = os.path.join(subdir, 'debug')
+        self.subdir = os.path.basename(subdir)
+        if os.path.exists(self.log_dir):
+            utils.system('rm -rf ' + self.log_dir)
+        os.mkdir(self.log_dir)
+        self.installed_as = None
 
 
-	@logging.record
-	@tee_output_logdir_mark
-	def install(self, tag='autotest'):
-		self.installed_as = tag
+    @logging.record
+    @tee_output_logdir_mark
+    def install(self, tag='autotest'):
+        self.installed_as = tag
 
-		self.rpm_name = utils.system_output('rpm -qp ' + self.rpm_package)
+        self.rpm_name = utils.system_output('rpm -qp ' + self.rpm_package)
 
-		# install
-		utils.system('rpm -i --force ' + self.rpm_package)
+        # install
+        utils.system('rpm -i --force ' + self.rpm_package)
 
-		# get file list
-		files = utils.system_output('rpm -ql ' + self.rpm_name).splitlines()
+        # get file list
+        files = utils.system_output('rpm -ql ' + self.rpm_name).splitlines()
 
-		# search for vmlinuz
-		for file in files:
-			if file.startswith('/boot/vmlinuz'):
-				self.image = file
-				break
-		else:
-			errmsg = "%s doesn't contain /boot/vmlinuz"
-			errmsg %= self.rpm_package
-			raise error.TestError(errmsg)
+        # search for vmlinuz
+        for file in files:
+            if file.startswith('/boot/vmlinuz'):
+                self.image = file
+                break
+        else:
+            errmsg = "%s doesn't contain /boot/vmlinuz"
+            errmsg %= self.rpm_package
+            raise error.TestError(errmsg)
 
-		# search for initrd
-		self.initrd = ''
-		for file in files:
-			if file.startswith('/boot/initrd'):
-				self.initrd = file
-				break
+        # search for initrd
+        self.initrd = ''
+        for file in files:
+            if file.startswith('/boot/initrd'):
+                self.initrd = file
+                break
 
-		# get version and release number
-		self.version, self.release = utils.system_output(
-			'rpm --queryformat="%{VERSION}\\n%{RELEASE}\\n" -q ' + self.rpm_name).splitlines()[0:2]
+        # get version and release number
+        self.version, self.release = utils.system_output(
+                'rpm --queryformat="%{VERSION}\\n%{RELEASE}\\n" -q ' + self.rpm_name).splitlines()[0:2]
 
 
-	def add_to_bootloader(self, tag='autotest', args=''):
-		""" Add this kernel to bootloader
-		"""
+    def add_to_bootloader(self, tag='autotest', args=''):
+        """ Add this kernel to bootloader
+        """
 
-		# remove existing entry if present
-		self.job.bootloader.remove_kernel(tag)
+        # remove existing entry if present
+        self.job.bootloader.remove_kernel(tag)
 
-		# pull the base argument set from the job config
-		baseargs = self.job.config_get('boot.default_args')
-		if baseargs:
-			args = baseargs + ' ' + args
+        # pull the base argument set from the job config
+        baseargs = self.job.config_get('boot.default_args')
+        if baseargs:
+            args = baseargs + ' ' + args
 
-		# otherwise populate from /proc/cmdline
-		# if not baseargs:
-		#	baseargs = open('/proc/cmdline', 'r').readline().strip()
-		# NOTE: This is unnecessary, because boottool does it.
+        # otherwise populate from /proc/cmdline
+        # if not baseargs:
+        #       baseargs = open('/proc/cmdline', 'r').readline().strip()
+        # NOTE: This is unnecessary, because boottool does it.
 
-		root = None
-		roots = [x for x in args.split() if x.startswith('root=')]
-		if roots:
-			root = re.sub('^root=', '', roots[0])
-		arglist = [x for x in args.split() if not x.startswith('root=')]
-		args = ' '.join(arglist)
+        root = None
+        roots = [x for x in args.split() if x.startswith('root=')]
+        if roots:
+            root = re.sub('^root=', '', roots[0])
+        arglist = [x for x in args.split() if not x.startswith('root=')]
+        args = ' '.join(arglist)
 
-		# add the kernel entry
-		self.job.bootloader.add_kernel(self.image, tag, self.initrd, args = args, root = root)
+        # add the kernel entry
+        self.job.bootloader.add_kernel(self.image, tag, self.initrd, args = args, root = root)
 
 
-	def boot(self, args='', ident=1):
-		""" install and boot this kernel
-		"""
+    def boot(self, args='', ident=1):
+        """ install and boot this kernel
+        """
 
-		# Check if the kernel has been installed, if not install
-		# as the default tag and boot that.
-		if not self.installed_as:
-			self.install()
+        # Check if the kernel has been installed, if not install
+        # as the default tag and boot that.
+        if not self.installed_as:
+            self.install()
 
-		# If we can check the kernel identity do so.
-		if ident:
-			when = int(time.time())
-			ident = '-'.join([self.version,
-					  self.rpm_name.split('-')[1],
-					  self.release])
-			args += " IDENT=%d" % (when)
+        # If we can check the kernel identity do so.
+        if ident:
+            when = int(time.time())
+            ident = '-'.join([self.version,
+                              self.rpm_name.split('-')[1],
+                              self.release])
+            args += " IDENT=%d" % (when)
 
-		self.job.next_step_prepend(["job.kernel_check_ident",
-			when, ident, self.subdir, 'rpm'])
+        self.job.next_step_prepend(["job.kernel_check_ident",
+                when, ident, self.subdir, 'rpm'])
 
-		# Boot the selected tag.
-		self.add_to_bootloader(args=args, tag=self.installed_as)
+        # Boot the selected tag.
+        self.add_to_bootloader(args=args, tag=self.installed_as)
 
-		# Boot it.
-		self.job.reboot(tag=self.installed_as)
+        # Boot it.
+        self.job.reboot(tag=self.installed_as)
 
 
 # pull in some optional site-specific path pre-processing
 try:
-	import site_kernel
-	preprocess_path = site_kernel.preprocess_path
-	del site_kernel
+    import site_kernel
+    preprocess_path = site_kernel.preprocess_path
+    del site_kernel
 except ImportError:
-	# just make the preprocessor a nop
-	def preprocess_path(path):
-		return path
+    # just make the preprocessor a nop
+    def preprocess_path(path):
+        return path
 
 def auto_kernel(job, path, subdir, tmp_dir, build_dir, leave=False):
-	"""\
-	Create a kernel object, dynamically selecting the appropriate class to use
-	based on the path provided.
-	"""
-	path = preprocess_path(path)
-	if path.endswith('.rpm'):
-		return rpm_kernel(job, path, subdir)
-	else:
-		return kernel(job, path, subdir, tmp_dir, build_dir, leave)
+    """\
+    Create a kernel object, dynamically selecting the appropriate class to use
+    based on the path provided.
+    """
+    path = preprocess_path(path)
+    if path.endswith('.rpm'):
+        return rpm_kernel(job, path, subdir)
+    else:
+        return kernel(job, path, subdir, tmp_dir, build_dir, leave)
diff --git a/client/bin/kernel_config.py b/client/bin/kernel_config.py
index ed6321a..9b471b5 100755
--- a/client/bin/kernel_config.py
+++ b/client/bin/kernel_config.py
@@ -5,122 +5,122 @@
 from autotest_lib.client.common_lib import error, utils
 
 def apply_overrides(orig_file, changes_file, output_file):
-	override = dict()
+    override = dict()
 
-	# First suck all the changes into a dictionary.
-	input = file(changes_file, 'r')
-	for line in input.readlines():
-		if line.startswith('CONFIG_'):
-			key = line.split('=')[0]
-			override[key] = line;
-		elif line.startswith('# CONFIG_'):
-			key = line.split(' ')[1]
-			override[key] = line;
-	input.close()
+    # First suck all the changes into a dictionary.
+    input = file(changes_file, 'r')
+    for line in input.readlines():
+        if line.startswith('CONFIG_'):
+            key = line.split('=')[0]
+            override[key] = line;
+        elif line.startswith('# CONFIG_'):
+            key = line.split(' ')[1]
+            override[key] = line;
+    input.close()
 
-	# Now go through the input file, overriding lines where need be
-	input = file(orig_file, 'r')
-	output = file(output_file, 'w')
-	for line in input.readlines():
-		if line.startswith('CONFIG_'):
-			key = line.split('=')[0]
-		elif line.startswith('# CONFIG_'):
-			key = line.split(' ')[1]
-		else:
-			key = None
-		if key and key in override:
-			output.write(override[key])
-		else:
-			output.write(line)
-	input.close()
-	output.close()
+    # Now go through the input file, overriding lines where need be
+    input = file(orig_file, 'r')
+    output = file(output_file, 'w')
+    for line in input.readlines():
+        if line.startswith('CONFIG_'):
+            key = line.split('=')[0]
+        elif line.startswith('# CONFIG_'):
+            key = line.split(' ')[1]
+        else:
+            key = None
+        if key and key in override:
+            output.write(override[key])
+        else:
+            output.write(line)
+    input.close()
+    output.close()
 
 
 def diff_configs(old, new):
-	utils.system('diff -u %s %s > %s' % (old, new, new + '.diff'),
-		     ignore_status=True)
+    utils.system('diff -u %s %s > %s' % (old, new, new + '.diff'),
+                 ignore_status=True)
 
 
 
 def modules_needed(config):
-	return (autotest_utils.grep('CONFIG_MODULES=y', config)
-		and autotest_utils.grep('=m', config))
+    return (autotest_utils.grep('CONFIG_MODULES=y', config)
+            and autotest_utils.grep('=m', config))
 
 
 def config_by_name(name, set):
-	version = kernel_versions.version_choose_config(name, set[1:])
-	if version:
-		return set[0] + version
-	return None
+    version = kernel_versions.version_choose_config(name, set[1:])
+    if version:
+        return set[0] + version
+    return None
 
 
 class kernel_config:
-	# Build directory must be ready before init'ing config.
-	# 
-	# Stages:
-	# 	1. Get original config file
-	#	2. Apply overrides
-	#	3. Do 'make oldconfig' to update it to current source code
-	#                  (gets done implicitly during the process)
-	#
-	# You may specifiy the a defconfig within the tree to build,
-	# or a custom config file you want, or None, to get machine's
-	# default config file from the repo.
+    # Build directory must be ready before init'ing config.
+    #
+    # Stages:
+    #       1. Get original config file
+    #       2. Apply overrides
+    #       3. Do 'make oldconfig' to update it to current source code
+    #                  (gets done implicitly during the process)
+    #
+    # You may specifiy the a defconfig within the tree to build,
+    # or a custom config file you want, or None, to get machine's
+    # default config file from the repo.
 
-	build_dir = ''		# the directory we're building in
-	config_dir = ''		# local repository for config_file data
+    build_dir = ''          # the directory we're building in
+    config_dir = ''         # local repository for config_file data
 
-	build_config = ''	# the config file in the build directory
-	orig_config = ''	# the original config file
-	over_config = ''	# config file + overrides
+    build_config = ''       # the config file in the build directory
+    orig_config = ''        # the original config file
+    over_config = ''        # config file + overrides
 
 
-	def __init__(self, job, build_dir, config_dir, orig_file,
-				overrides, defconfig = False, name = None):
-		self.build_dir = build_dir
-		self.config_dir = config_dir
+    def __init__(self, job, build_dir, config_dir, orig_file,
+                            overrides, defconfig = False, name = None):
+        self.build_dir = build_dir
+        self.config_dir = config_dir
 
-		# 	1. Get original config file
-		self.build_config = build_dir + '/.config'
-		if (orig_file == '' and not defconfig):	# use user default
-			set = job.config_get("kernel.default_config_set")
-			defconf = None
-			if set and name:
-				defconf = config_by_name(name, set)
-			if not defconf: 
-				defconf = job.config_get("kernel.default_config")
-			if defconf:
-				orig_file = defconf
-		if (orig_file == '' or defconfig):	# use defconfig
-			print "kernel_config: using defconfig to configure kernel"
-			os.chdir(build_dir)
-			utils.system('make defconfig')
-		else:
-			print "kernel_config: using " + orig_file + \
-							" to configure kernel"
-			self.orig_config = config_dir + '/config.orig'
-			utils.get_file(orig_file, self.orig_config)
-			self.update_config(self.orig_config, self.orig_config+'.new')
-			diff_configs(self.orig_config, self.orig_config+'.new')
+        #       1. Get original config file
+        self.build_config = build_dir + '/.config'
+        if (orig_file == '' and not defconfig): # use user default
+            set = job.config_get("kernel.default_config_set")
+            defconf = None
+            if set and name:
+                defconf = config_by_name(name, set)
+            if not defconf:
+                defconf = job.config_get("kernel.default_config")
+            if defconf:
+                orig_file = defconf
+        if (orig_file == '' or defconfig):      # use defconfig
+            print "kernel_config: using defconfig to configure kernel"
+            os.chdir(build_dir)
+            utils.system('make defconfig')
+        else:
+            print "kernel_config: using " + orig_file + \
+                                            " to configure kernel"
+            self.orig_config = config_dir + '/config.orig'
+            utils.get_file(orig_file, self.orig_config)
+            self.update_config(self.orig_config, self.orig_config+'.new')
+            diff_configs(self.orig_config, self.orig_config+'.new')
 
 
-		#	2. Apply overrides
-		if overrides:
-			print "kernel_config: using " + overrides + \
-							" to re-configure kernel"
-			self.over_config = config_dir + '/config.over'
-			overrides_local = self.over_config + '.changes'
-			get_file(overrides, overrides_local)
-			apply_overrides(self.build_config, overrides_local, self.over_config)
-			self.update_config(self.over_config, self.over_config+'.new')
-			diff_configs(self.over_config, self.over_config+'.new')
-		else:
-			self.over_config = self.orig_config
+        #       2. Apply overrides
+        if overrides:
+            print "kernel_config: using " + overrides + \
+                                            " to re-configure kernel"
+            self.over_config = config_dir + '/config.over'
+            overrides_local = self.over_config + '.changes'
+            get_file(overrides, overrides_local)
+            apply_overrides(self.build_config, overrides_local, self.over_config)
+            self.update_config(self.over_config, self.over_config+'.new')
+            diff_configs(self.over_config, self.over_config+'.new')
+        else:
+            self.over_config = self.orig_config
 
 
-	def update_config(self, old_config, new_config = 'None'):
-		os.chdir(self.build_dir)
-		shutil.copyfile(old_config, self.build_config)
-		utils.system('yes "" | make oldconfig > /dev/null')
-		if new_config:
-			shutil.copyfile(self.build_config, new_config)
+    def update_config(self, old_config, new_config = 'None'):
+        os.chdir(self.build_dir)
+        shutil.copyfile(old_config, self.build_config)
+        utils.system('yes "" | make oldconfig > /dev/null')
+        if new_config:
+            shutil.copyfile(self.build_config, new_config)
diff --git a/client/bin/kernel_versions.py b/client/bin/kernel_versions.py
index 3af08fe..9b04beb 100644
--- a/client/bin/kernel_versions.py
+++ b/client/bin/kernel_versions.py
@@ -5,111 +5,109 @@
 
 import sys,re
 
-# 
+#
 # Sort key for ordering versions chronologically.  The key ordering
 # problem is between that introduced by -rcN.  These come _before_
 # their accompanying version.
-# 
+#
 #   2.6.0 -> 2.6.1-rc1 -> 2.6.1
-# 
+#
 # In order to sort them we convert all non-rc releases to a pseudo
 # -rc99 release.  We also convert all numbers to two digits.  The
 # result is then sortable textually.
-# 
+#
 #   02.06.00-rc99 -> 02.06.01-rc01 -> 02.06.01-rc99
-# 
+#
 encode_sep = re.compile(r'(\D+)')
 
 def version_encode(version):
-	bits = encode_sep.split(version)
-	n = 9
-	if len(bits[0]) == 0:
-		n += 2
-	if len(bits) == n or (len(bits) > n and bits[n] != '_rc'):
-		# Insert missing _rc99 after 2 . 6 . 18 -smp- 220 . 0 
-		bits.insert(n, '_rc')
-		bits.insert(n+1, '99')
-	n = 5
-	if len(bits[0]) == 0:
-		n += 2
-	if len(bits) <= n or bits[n] != '-rc':
-		bits.insert(n, '-rc')
-		bits.insert(n+1, '99')
-	for n in range(0, len(bits), 2):
-		if len(bits[n]) == 1:
-			bits[n] = '0' + bits[n]
+    bits = encode_sep.split(version)
+    n = 9
+    if len(bits[0]) == 0:
+        n += 2
+    if len(bits) == n or (len(bits) > n and bits[n] != '_rc'):
+        # Insert missing _rc99 after 2 . 6 . 18 -smp- 220 . 0
+        bits.insert(n, '_rc')
+        bits.insert(n+1, '99')
+    n = 5
+    if len(bits[0]) == 0:
+        n += 2
+    if len(bits) <= n or bits[n] != '-rc':
+        bits.insert(n, '-rc')
+        bits.insert(n+1, '99')
+    for n in range(0, len(bits), 2):
+        if len(bits[n]) == 1:
+            bits[n] = '0' + bits[n]
 
-	return ''.join(bits)
+    return ''.join(bits)
 
 
 def version_limit(version, n):
-	bits = encode_sep.split(version)
-	return ''.join(bits[0:n])
+    bits = encode_sep.split(version)
+    return ''.join(bits[0:n])
 
 
 def version_len(version):
-	return len(encode_sep.split(version))
+    return len(encode_sep.split(version))
 
 #
 # Given a list of versions find the nearest version which is deemed
 # less than or equal to the target.  Versions are in linux order
 # as follows:
-# 
+#
 #   2.6.0 -> 2.6.1 -> 2.6.2-rc1 -> 2.6.2-rc2 -> 2.6.2 -> 2.6.3-rc1
 #              |        |\
 #              |        | 2.6.2-rc1-mm1 -> 2.6.2-rc1-mm2
 #              |        \
 #              |         2.6.2-rc1-ac1 -> 2.6.2-rc1-ac2
-#              \   
+#              \
 #               2.6.1-mm1 -> 2.6.1-mm2
-# 
+#
 # Note that a 2.6.1-mm1 is not a predecessor of 2.6.2-rc1-mm1.
 #
 def version_choose_config(version, candidates):
-	# Check if we have an exact match ... if so magic
-	if version in candidates:
-		return version
+    # Check if we have an exact match ... if so magic
+    if version in candidates:
+        return version
 
-	# Sort the search key into the list ordered by 'age'
-	deco = [ (version_encode(v), i, v) for i, v in
-					enumerate(candidates + [ version ]) ]
-	deco.sort()
-	versions = [ v for _, _, v in deco ]
+    # Sort the search key into the list ordered by 'age'
+    deco = [ (version_encode(v), i, v) for i, v in
+                                    enumerate(candidates + [ version ]) ]
+    deco.sort()
+    versions = [ v for _, _, v in deco ]
 
-	# Everything sorted below us is of interst.
-	for n in range(len(versions) - 1, -1, -1):
-		if versions[n] == version:
-			break
-	n -= 1
+    # Everything sorted below us is of interst.
+    for n in range(len(versions) - 1, -1, -1):
+        if versions[n] == version:
+            break
+    n -= 1
 
-	# Try ever shorter 'prefixes' 2.6.20-rc3-mm, 2.6.20-rc, 2.6. etc
-	# to match against the ordered list newest to oldest.
-	length = version_len(version) - 1
-	version = version_limit(version, length)
-	while length > 1:
-		for o in range(n, -1, -1):
-			if version_len(versions[o]) == (length + 1) and \
-			   version_limit(versions[o], length) == version:
-				return versions[o]
-		length -= 2
-		version = version_limit(version, length)
+    # Try ever shorter 'prefixes' 2.6.20-rc3-mm, 2.6.20-rc, 2.6. etc
+    # to match against the ordered list newest to oldest.
+    length = version_len(version) - 1
+    version = version_limit(version, length)
+    while length > 1:
+        for o in range(n, -1, -1):
+            if version_len(versions[o]) == (length + 1) and \
+               version_limit(versions[o], length) == version:
+                return versions[o]
+        length -= 2
+        version = version_limit(version, length)
 
-	return None
+    return None
 
 
 def is_released_kernel(version):
-	# True if version name suggests a released kernel,  
-	#   not some release candidate or experimental kernel name
-	#   e.g.  2.6.18-smp-200.0  includes no other text, underscores, etc
-	version = version.strip('01234567890.-')
-	return version in ['', 'smp', 'smpx', 'pae']
+    # True if version name suggests a released kernel,
+    #   not some release candidate or experimental kernel name
+    #   e.g.  2.6.18-smp-200.0  includes no other text, underscores, etc
+    version = version.strip('01234567890.-')
+    return version in ['', 'smp', 'smpx', 'pae']
 
 
 def is_release_candidate(version):
-	# True if version names a released kernel or release candidate,
-	#   not some experimental name containing arbitrary text
-	#   e.g.  2.6.18-smp-220.0_rc3  but not  2.6.18_patched
-	version = re.sub(r'[_-]rc\d+', '', version)
-	return is_released_kernel(version)
-
-
+    # True if version names a released kernel or release candidate,
+    #   not some experimental name containing arbitrary text
+    #   e.g.  2.6.18-smp-220.0_rc3  but not  2.6.18_patched
+    version = re.sub(r'[_-]rc\d+', '', version)
+    return is_released_kernel(version)
diff --git a/client/bin/kernel_versions_unittest.py b/client/bin/kernel_versions_unittest.py
index fc21a65..b63ecc8 100755
--- a/client/bin/kernel_versions_unittest.py
+++ b/client/bin/kernel_versions_unittest.py
@@ -5,83 +5,83 @@
 
 class kernel_versions_test(unittest.TestCase):
 
-	def increases(self, kernels):
-		for i in xrange(len(kernels)-1):
-			k1 = kernels[i]
-			k2 = kernels[i+1]
-			ek1 = version_encode(k1)
-			ek2 = version_encode(k2)
-			self.assert_(ek1 < ek2, 
-				'%s (-> %s)  should sort <  %s (-> %s)'
-				% (k1, ek1, k2, ek2) )
+    def increases(self, kernels):
+        for i in xrange(len(kernels)-1):
+            k1 = kernels[i]
+            k2 = kernels[i+1]
+            ek1 = version_encode(k1)
+            ek2 = version_encode(k2)
+            self.assert_(ek1 < ek2,
+                    '%s (-> %s)  should sort <  %s (-> %s)'
+                    % (k1, ek1, k2, ek2) )
 
 
-	def test_version_encode(self):
-		series1 = [
-			'2.6',
-			'2.6.0',
-			'2.6.1-rc1',
-			'2.6.1-rc1_fix',
-			'2.6.1-rc1_patch',
-			'2.6.1-rc9',
-			'2.6.1-rc9-mm1',
-			'2.6.1-rc9-mm2',
-			'2.6.1-rc10',
-			'2.6.1-rc98',
-			'2.6.1',
-			'2.6.1_patch',
-			'2.6.9',
-			'2.6.10',
-			'2.6.99',
-			'2.7',
-			'2.9.99',
-			'2.10.0',
-			'99.99.99',
-			'UNKNOWN',
-			]
-		self.increases(series1)
-		self.increases(['pathX'+k for k in series1])
-		series2 = [
-			'2.6.18-smp-220',
-			'2.6.18-smp-220.0',
-			'2.6.18-smp-220.1_rc1',
-			'2.6.18-smp-220.1_rc1_fix',
-			'2.6.18-smp-220.1_rc1_patch',
-			'2.6.18-smp-220.1_rc9',
-			'2.6.18-smp-220.1_rc9_mm1',
-			'2.6.18-smp-220.1_rc9_mm2',
-			'2.6.18-smp-220.1_rc10',
-			'2.6.18-smp-220.1_rc98',
-			'2.6.18-smp-220.1',
-			'2.6.18-smp-220.1_patch',
-			'2.6.18-smp-220.9',
-			'2.6.18-smp-220.10',
-			'2.6.18-smp-220.99',
-			'2.6.18-smp-221',
-			'UNKNOWN',
-			]
-		self.increases(series2)
-		self.increases(['pathX'+k for k in series2])
+    def test_version_encode(self):
+        series1 = [
+                '2.6',
+                '2.6.0',
+                '2.6.1-rc1',
+                '2.6.1-rc1_fix',
+                '2.6.1-rc1_patch',
+                '2.6.1-rc9',
+                '2.6.1-rc9-mm1',
+                '2.6.1-rc9-mm2',
+                '2.6.1-rc10',
+                '2.6.1-rc98',
+                '2.6.1',
+                '2.6.1_patch',
+                '2.6.9',
+                '2.6.10',
+                '2.6.99',
+                '2.7',
+                '2.9.99',
+                '2.10.0',
+                '99.99.99',
+                'UNKNOWN',
+                ]
+        self.increases(series1)
+        self.increases(['pathX'+k for k in series1])
+        series2 = [
+                '2.6.18-smp-220',
+                '2.6.18-smp-220.0',
+                '2.6.18-smp-220.1_rc1',
+                '2.6.18-smp-220.1_rc1_fix',
+                '2.6.18-smp-220.1_rc1_patch',
+                '2.6.18-smp-220.1_rc9',
+                '2.6.18-smp-220.1_rc9_mm1',
+                '2.6.18-smp-220.1_rc9_mm2',
+                '2.6.18-smp-220.1_rc10',
+                '2.6.18-smp-220.1_rc98',
+                '2.6.18-smp-220.1',
+                '2.6.18-smp-220.1_patch',
+                '2.6.18-smp-220.9',
+                '2.6.18-smp-220.10',
+                '2.6.18-smp-220.99',
+                '2.6.18-smp-221',
+                'UNKNOWN',
+                ]
+        self.increases(series2)
+        self.increases(['pathX'+k for k in series2])
 
 
-	releases    = ['2.6.1'      , '2.6.18-smp-220.0'   ]
-	candidates  = ['2.6.1-rc1'  , '2.6.18-smp-220.0_rc1']
-	experiments = ['2.6.1-patch', '2.6.1-rc1_patch', 
-			'2.6.18-smp-220.0_patch', 'UNKNOWN']
+    releases    = ['2.6.1'      , '2.6.18-smp-220.0'   ]
+    candidates  = ['2.6.1-rc1'  , '2.6.18-smp-220.0_rc1']
+    experiments = ['2.6.1-patch', '2.6.1-rc1_patch',
+                    '2.6.18-smp-220.0_patch', 'UNKNOWN']
 
-	def test_is_released_kernel(self):
-		for v in self.releases:
-			self.assert_(    is_released_kernel(v))
-		for v in self.candidates + self.experiments:
-			self.assert_(not is_released_kernel(v))
+    def test_is_released_kernel(self):
+        for v in self.releases:
+            self.assert_(    is_released_kernel(v))
+        for v in self.candidates + self.experiments:
+            self.assert_(not is_released_kernel(v))
 
 
-	def test_is_release_candidate(self):
-		for v in self.releases + self.candidates:
-			self.assert_(    is_release_candidate(v))
-		for v in self.experiments:
-			self.assert_(not is_release_candidate(v))
+    def test_is_release_candidate(self):
+        for v in self.releases + self.candidates:
+            self.assert_(    is_release_candidate(v))
+        for v in self.experiments:
+            self.assert_(not is_release_candidate(v))
 
 
 if  __name__ == "__main__":
-	unittest.main()
+    unittest.main()
diff --git a/client/bin/kernelexpand-test.py b/client/bin/kernelexpand-test.py
index 3c34010..24b4ce0 100755
--- a/client/bin/kernelexpand-test.py
+++ b/client/bin/kernelexpand-test.py
@@ -11,136 +11,136 @@
 akpml = 'http://www.example.com/mirror/akpm/'
 
 mirrorA = [
-	[ akpm, akpml ],
-	[ km, kml ],
+        [ akpm, akpml ],
+        [ km, kml ],
 ]
 
 class kernelexpandTest(unittest.TestCase):
-	def test_decompose_simple(self):
-		correct = [
-		  [ km + 'v2.6/linux-2.6.23.tar.bz2' ]
-		]
-		sample = decompose_kernel('2.6.23')
-		self.assertEqual(sample, correct)
+    def test_decompose_simple(self):
+        correct = [
+          [ km + 'v2.6/linux-2.6.23.tar.bz2' ]
+        ]
+        sample = decompose_kernel('2.6.23')
+        self.assertEqual(sample, correct)
 
 
-	def test_decompose_fail(self):
-		success = False
-		try:
-			sample = decompose_kernel('1.0.0.0.0')
-			success = True
-		except NameError:
-			pass
-		except Exception, e:
-			self.fail('expected NameError, got something else')
+    def test_decompose_fail(self):
+        success = False
+        try:
+            sample = decompose_kernel('1.0.0.0.0')
+            success = True
+        except NameError:
+            pass
+        except Exception, e:
+            self.fail('expected NameError, got something else')
 
-		if success:
-			self.fail('expected NameError, was successful')
+        if success:
+            self.fail('expected NameError, was successful')
 
 
-	def test_decompose_rcN(self):
-		correct = [
-		  [ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
-		    km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2']
-		]
-		sample = decompose_kernel('2.6.23-rc1')
-		self.assertEqual(sample, correct)
-
-	
-	def test_decompose_mmN(self):
-		correct = [
-		  [ km + 'v2.6/linux-2.6.23.tar.bz2' ],
-		  [ akpm + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2' ]
-		]
-		sample = decompose_kernel('2.6.23-mm1')
-		self.assertEqual(sample, correct)
+    def test_decompose_rcN(self):
+        correct = [
+          [ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+            km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2']
+        ]
+        sample = decompose_kernel('2.6.23-rc1')
+        self.assertEqual(sample, correct)
 
 
-	def test_decompose_gitN(self):
-		correct = [
-		  [ km + 'v2.6/linux-2.6.23.tar.bz2' ],
-		  [ km + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
-		    km + 'v2.6/snapshots/patch-2.6.23-git1.bz2']
-		] 
-		sample = decompose_kernel('2.6.23-git1')
-		self.assertEqual(sample, correct)
+    def test_decompose_mmN(self):
+        correct = [
+          [ km + 'v2.6/linux-2.6.23.tar.bz2' ],
+          [ akpm + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2' ]
+        ]
+        sample = decompose_kernel('2.6.23-mm1')
+        self.assertEqual(sample, correct)
 
 
-	def test_decompose_rcN_mmN(self):
-		correct = [
-		  [ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
-		    km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2' ],
-		  [ akpm + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2']
-		]
-		sample = decompose_kernel('2.6.23-rc1-mm1')
-		self.assertEqual(sample, correct)
+    def test_decompose_gitN(self):
+        correct = [
+          [ km + 'v2.6/linux-2.6.23.tar.bz2' ],
+          [ km + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
+            km + 'v2.6/snapshots/patch-2.6.23-git1.bz2']
+        ]
+        sample = decompose_kernel('2.6.23-git1')
+        self.assertEqual(sample, correct)
 
 
-	def test_mirrorA_simple(self):
-		correct = [
-		  [ kml + 'v2.6/linux-2.6.23.tar.bz2',
-		    km + 'v2.6/linux-2.6.23.tar.bz2' ]
-		]
-		sample = decompose_kernel('2.6.23')
-		sample = mirror_kernel_components(mirrorA, sample)
-
-		self.assertEqual(sample, correct)
+    def test_decompose_rcN_mmN(self):
+        correct = [
+          [ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+            km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2' ],
+          [ akpm + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2']
+        ]
+        sample = decompose_kernel('2.6.23-rc1-mm1')
+        self.assertEqual(sample, correct)
 
 
-	def test_mirrorA_rcN(self):
-		correct = [
-		  [ kml + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
-		    kml + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2',
-		    km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
-		    km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2' ]
-		]
-		sample = decompose_kernel('2.6.23-rc1')
-		sample = mirror_kernel_components(mirrorA, sample)
-		self.assertEqual(sample, correct)
+    def test_mirrorA_simple(self):
+        correct = [
+          [ kml + 'v2.6/linux-2.6.23.tar.bz2',
+            km + 'v2.6/linux-2.6.23.tar.bz2' ]
+        ]
+        sample = decompose_kernel('2.6.23')
+        sample = mirror_kernel_components(mirrorA, sample)
 
-	
-	def test_mirrorA_mmN(self):
-		correct = [
-		  [ kml + 'v2.6/linux-2.6.23.tar.bz2',
-		    km + 'v2.6/linux-2.6.23.tar.bz2'],
-		  [ akpml + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2',
-		    kml + 'people/akpm/patches/2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2',
-		    akpm + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2' ]
-		]
-
-		sample = decompose_kernel('2.6.23-mm1')
-		sample = mirror_kernel_components(mirrorA, sample)
-		self.assertEqual(sample, correct)
+        self.assertEqual(sample, correct)
 
 
-	def test_mirrorA_gitN(self):
-		correct = [
-		  [ kml + 'v2.6/linux-2.6.23.tar.bz2',
-		    km + 'v2.6/linux-2.6.23.tar.bz2'],
-		  [ kml + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
-		    kml + 'v2.6/snapshots/patch-2.6.23-git1.bz2',
-		    km + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
-		    km + 'v2.6/snapshots/patch-2.6.23-git1.bz2' ]
-		]
-		sample = decompose_kernel('2.6.23-git1')
-		sample = mirror_kernel_components(mirrorA, sample)
-		self.assertEqual(sample, correct)
+    def test_mirrorA_rcN(self):
+        correct = [
+          [ kml + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+            kml + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2',
+            km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+            km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2' ]
+        ]
+        sample = decompose_kernel('2.6.23-rc1')
+        sample = mirror_kernel_components(mirrorA, sample)
+        self.assertEqual(sample, correct)
 
 
-	def test_mirrorA_rcN_mmN(self):
-		correct = [
-		  [ kml + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
-		    kml + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2',
-		    km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
-		    km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2'],
-		  [ akpml + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2',
-		    kml + 'people/akpm/patches/2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2',
-		    akpm + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2' ]
-		]
-		sample = decompose_kernel('2.6.23-rc1-mm1')
-		sample = mirror_kernel_components(mirrorA, sample)
-		self.assertEqual(sample, correct)
+    def test_mirrorA_mmN(self):
+        correct = [
+          [ kml + 'v2.6/linux-2.6.23.tar.bz2',
+            km + 'v2.6/linux-2.6.23.tar.bz2'],
+          [ akpml + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2',
+            kml + 'people/akpm/patches/2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2',
+            akpm + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2' ]
+        ]
+
+        sample = decompose_kernel('2.6.23-mm1')
+        sample = mirror_kernel_components(mirrorA, sample)
+        self.assertEqual(sample, correct)
+
+
+    def test_mirrorA_gitN(self):
+        correct = [
+          [ kml + 'v2.6/linux-2.6.23.tar.bz2',
+            km + 'v2.6/linux-2.6.23.tar.bz2'],
+          [ kml + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
+            kml + 'v2.6/snapshots/patch-2.6.23-git1.bz2',
+            km + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
+            km + 'v2.6/snapshots/patch-2.6.23-git1.bz2' ]
+        ]
+        sample = decompose_kernel('2.6.23-git1')
+        sample = mirror_kernel_components(mirrorA, sample)
+        self.assertEqual(sample, correct)
+
+
+    def test_mirrorA_rcN_mmN(self):
+        correct = [
+          [ kml + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+            kml + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2',
+            km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+            km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2'],
+          [ akpml + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2',
+            kml + 'people/akpm/patches/2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2',
+            akpm + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2' ]
+        ]
+        sample = decompose_kernel('2.6.23-rc1-mm1')
+        sample = mirror_kernel_components(mirrorA, sample)
+        self.assertEqual(sample, correct)
 
 
 if __name__ == '__main__':
-	unittest.main()
+    unittest.main()
diff --git a/client/bin/kernelexpand.py b/client/bin/kernelexpand.py
index 73028fa..e23b865 100755
--- a/client/bin/kernelexpand.py
+++ b/client/bin/kernelexpand.py
@@ -12,180 +12,180 @@
 
 kernel = 'http://www.kernel.org/pub/linux/kernel/'
 mappings = [
-	[ r'^\d+\.\d+\.\d+$', '', True, [
-		kernel + 'v%(major)s/linux-%(full)s.tar.bz2'
-	]],
-	[ r'^\d+\.\d+\.\d+\.\d+$', '', True, [
-		kernel + 'v%(major)s/linux-%(full)s.tar.bz2'
-	]],
-	[ r'-rc\d+$', '%(minor-prev)s', True, [
-		kernel + 'v%(major)s/testing/v%(minor)s/linux-%(full)s.tar.bz2',
-		kernel + 'v%(major)s/testing/linux-%(full)s.tar.bz2',
-	]],
-	[ r'-(git|bk)\d+$', '%(base)s', False, [
-		kernel + 'v%(major)s/snapshots/old/patch-%(full)s.bz2',
-		kernel + 'v%(major)s/snapshots/patch-%(full)s.bz2',
-	]],
-	[ r'-mm\d+$', '%(base)s', False, [
-		kernel + 'people/akpm/patches/' +
-			'%(major)s/%(base)s/%(full)s/%(full)s.bz2'
-	]],
-	[ r'-mjb\d+$', '%(base)s', False, [
-		kernel + 'people/mbligh/%(base)s/patch-%(full)s.bz2'
-	]]
+        [ r'^\d+\.\d+\.\d+$', '', True, [
+                kernel + 'v%(major)s/linux-%(full)s.tar.bz2'
+        ]],
+        [ r'^\d+\.\d+\.\d+\.\d+$', '', True, [
+                kernel + 'v%(major)s/linux-%(full)s.tar.bz2'
+        ]],
+        [ r'-rc\d+$', '%(minor-prev)s', True, [
+                kernel + 'v%(major)s/testing/v%(minor)s/linux-%(full)s.tar.bz2',
+                kernel + 'v%(major)s/testing/linux-%(full)s.tar.bz2',
+        ]],
+        [ r'-(git|bk)\d+$', '%(base)s', False, [
+                kernel + 'v%(major)s/snapshots/old/patch-%(full)s.bz2',
+                kernel + 'v%(major)s/snapshots/patch-%(full)s.bz2',
+        ]],
+        [ r'-mm\d+$', '%(base)s', False, [
+                kernel + 'people/akpm/patches/' +
+                        '%(major)s/%(base)s/%(full)s/%(full)s.bz2'
+        ]],
+        [ r'-mjb\d+$', '%(base)s', False, [
+                kernel + 'people/mbligh/%(base)s/patch-%(full)s.bz2'
+        ]]
 ];
 
 def decompose_kernel_once(kernel):
-	##print "S<" + kernel + ">"
-	for mapping in mappings:
-		(suffix, becomes, is_full, patch_templates) = mapping
+    ##print "S<" + kernel + ">"
+    for mapping in mappings:
+        (suffix, becomes, is_full, patch_templates) = mapping
 
-		params = {}
+        params = {}
 
-		match = re.search(r'^(.*)' + suffix, kernel)
-		if not match:
-			continue
+        match = re.search(r'^(.*)' + suffix, kernel)
+        if not match:
+            continue
 
-		# Generate the parameters for the patches:
-		#  full         => full kernel name
-		#  base         => all but the matches suffix
-		#  minor        => 2.n.m
-		#  major        => 2.n
-		#  minor-prev   => 2.n.m-1
-		params['full'] = kernel
-		params['base'] = match.group(1)
+        # Generate the parameters for the patches:
+        #  full         => full kernel name
+        #  base         => all but the matches suffix
+        #  minor        => 2.n.m
+        #  major        => 2.n
+        #  minor-prev   => 2.n.m-1
+        params['full'] = kernel
+        params['base'] = match.group(1)
 
-		match = re.search(r'^((\d+\.\d+)\.(\d+))', kernel)
-		if not match:
-			raise "unable to determine major/minor version"
-		params['minor'] = match.group(1)
-		params['major'] = match.group(2)
-		params['minor-prev'] = match.group(2) + \
-					'.%d' % (int(match.group(3)) - 1)
+        match = re.search(r'^((\d+\.\d+)\.(\d+))', kernel)
+        if not match:
+            raise "unable to determine major/minor version"
+        params['minor'] = match.group(1)
+        params['major'] = match.group(2)
+        params['minor-prev'] = match.group(2) + \
+                                '.%d' % (int(match.group(3)) - 1)
 
-		# Build the new kernel and patch list.
-		new_kernel = becomes % params
-		patch_list = []
-		for template in patch_templates:
-			patch_list.append(template % params)
-		
-		return (is_full, new_kernel, patch_list)
+        # Build the new kernel and patch list.
+        new_kernel = becomes % params
+        patch_list = []
+        for template in patch_templates:
+            patch_list.append(template % params)
 
-	return (True, kernel, None)
+        return (is_full, new_kernel, patch_list)
+
+    return (True, kernel, None)
 
 
 def decompose_kernel(kernel):
-	kernel_patches = []
+    kernel_patches = []
 
-	done = False
-	while not done:
-		(done, kernel, patch_list) = decompose_kernel_once(kernel)
-		if patch_list:
-			kernel_patches.insert(0, patch_list)
-	if not len(kernel_patches):
-		raise NameError('kernelexpand: %s: unknown kernel' % (kernel))
+    done = False
+    while not done:
+        (done, kernel, patch_list) = decompose_kernel_once(kernel)
+        if patch_list:
+            kernel_patches.insert(0, patch_list)
+    if not len(kernel_patches):
+        raise NameError('kernelexpand: %s: unknown kernel' % (kernel))
 
-	return kernel_patches
+    return kernel_patches
 
 
 # Look for and add potential mirrors.
 def mirror_kernel_components(mirrors, components):
-	new_components = []
-	for component in components:
-		new_patches = []
-		for mirror in mirrors:
-			(prefix, local) = mirror
-			for patch in component:
-				if patch.startswith(prefix):
-					new_patch = local + \
-							patch[len(prefix):]
-					new_patches.append(new_patch)
-		for patch in component:
-			new_patches.append(patch)
-		new_components.append(new_patches)
-	
-	return new_components
+    new_components = []
+    for component in components:
+        new_patches = []
+        for mirror in mirrors:
+            (prefix, local) = mirror
+            for patch in component:
+                if patch.startswith(prefix):
+                    new_patch = local + \
+                                    patch[len(prefix):]
+                    new_patches.append(new_patch)
+        for patch in component:
+            new_patches.append(patch)
+        new_components.append(new_patches)
+
+    return new_components
 
 
 def url_accessible(url):
-	status = os.system("wget --spider -q '%s'" % (url))
-	#print url + ": status=%d" % (status)
-	
-	return status == 0
+    status = os.system("wget --spider -q '%s'" % (url))
+    #print url + ": status=%d" % (status)
+
+    return status == 0
 
 
 def select_kernel_components(components):
-	new_components = []
-	for component in components:
-		new_patches = []
-		for patch in component:
-			if url_accessible(patch):
-				new_patches.append(patch)
-				break
-		if not len(new_patches):
-			new_patches.append(component[-1])
-		new_components.append(new_patches)
-	return new_components
+    new_components = []
+    for component in components:
+        new_patches = []
+        for patch in component:
+            if url_accessible(patch):
+                new_patches.append(patch)
+                break
+        if not len(new_patches):
+            new_patches.append(component[-1])
+        new_components.append(new_patches)
+    return new_components
 
 
 def expand_classic(kernel, mirrors):
-	components = decompose_kernel(kernel)
-	if mirrors:
-		components = mirror_kernel_components(mirrors, components)
-	components = select_kernel_components(components)
+    components = decompose_kernel(kernel)
+    if mirrors:
+        components = mirror_kernel_components(mirrors, components)
+    components = select_kernel_components(components)
 
-	patches = []
-	for component in components:
-		patches.append(component[0])
+    patches = []
+    for component in components:
+        patches.append(component[0])
 
-	return patches
+    return patches
 
 
 if __name__ == '__main__':
-	from optparse import OptionParser
+    from optparse import OptionParser
 
-	parser = OptionParser()
+    parser = OptionParser()
 
-	parser.add_option("-m", "--mirror",
-		type="string", dest="mirror", action="append", nargs=2,
-		help="mirror prefix")
-	parser.add_option("-v", "--no-validate", dest="validate",
-		action="store_false", default=True,
-		help="prune invalid entries")
+    parser.add_option("-m", "--mirror",
+            type="string", dest="mirror", action="append", nargs=2,
+            help="mirror prefix")
+    parser.add_option("-v", "--no-validate", dest="validate",
+            action="store_false", default=True,
+            help="prune invalid entries")
 
-	def usage():
-		parser.print_help()
-		sys.exit(1)
+    def usage():
+        parser.print_help()
+        sys.exit(1)
 
-	options, args = parser.parse_args()
+    options, args = parser.parse_args()
 
-	# Check for a kernel version
-	if len(args) != 1:
-		usage()
-	kernel = args[0]
+    # Check for a kernel version
+    if len(args) != 1:
+        usage()
+    kernel = args[0]
 
-	#mirrors = [
-	#	[ 'http://www.kernel.org/pub/linux/kernel/v2.4',
-	#	  'http://kernel.beaverton.ibm.com/mirror/v2.4' ],
-	#	[ 'http://www.kernel.org/pub/linux/kernel/v2.6',
-	#	  'http://kernel.beaverton.ibm.com/mirror/v2.6' ],
-	#	[ 'http://www.kernel.org/pub/linux/kernel/people/akpm/patches',
-	#	  'http://kernel.beaverton.ibm.com/mirror/akpm' ],
-	#]
-	mirrors = options.mirror
+    #mirrors = [
+    #       [ 'http://www.kernel.org/pub/linux/kernel/v2.4',
+    #         'http://kernel.beaverton.ibm.com/mirror/v2.4' ],
+    #       [ 'http://www.kernel.org/pub/linux/kernel/v2.6',
+    #         'http://kernel.beaverton.ibm.com/mirror/v2.6' ],
+    #       [ 'http://www.kernel.org/pub/linux/kernel/people/akpm/patches',
+    #         'http://kernel.beaverton.ibm.com/mirror/akpm' ],
+    #]
+    mirrors = options.mirror
 
-	try:
-		components = decompose_kernel(kernel)
-	except NameError, e:
-		sys.stderr.write(e.args[0] + "\n")
-		sys.exit(1)
+    try:
+        components = decompose_kernel(kernel)
+    except NameError, e:
+        sys.stderr.write(e.args[0] + "\n")
+        sys.exit(1)
 
-	if mirrors:
-		components = mirror_kernel_components(mirrors, components)
+    if mirrors:
+        components = mirror_kernel_components(mirrors, components)
 
-	if options.validate:
-		components = select_kernel_components(components)
+    if options.validate:
+        components = select_kernel_components(components)
 
-	# Dump them out.
-	for component in components:
-		print " ".join(component)
+    # Dump them out.
+    for component in components:
+        print " ".join(component)
diff --git a/client/bin/os_dep.py b/client/bin/os_dep.py
index f61d46d..9022913 100644
--- a/client/bin/os_dep.py
+++ b/client/bin/os_dep.py
@@ -8,30 +8,30 @@
 """
 
 def command(cmd):
-	# this could use '/usr/bin/which', I suppose. But this seems simpler
-	for dir in os.environ['PATH'].split(':'):
-		file = os.path.join(dir, cmd)
-		if os.path.exists(file):
-			return file
-	raise ValueError('Missing command: %s' % cmd)
+    # this could use '/usr/bin/which', I suppose. But this seems simpler
+    for dir in os.environ['PATH'].split(':'):
+        file = os.path.join(dir, cmd)
+        if os.path.exists(file):
+            return file
+    raise ValueError('Missing command: %s' % cmd)
 
 
 def commands(*cmds):
-	results = []
-	for cmd in cmds:
-		results.append(command(cmd))
+    results = []
+    for cmd in cmds:
+        results.append(command(cmd))
 
 
 def library(lib):
-	lddirs = [x.rstrip() for x in open('/etc/ld.so.conf', 'r').readlines()]
-	for dir in ['/lib', '/usr/lib'] + lddirs:
-		file = os.path.join(dir, lib)
-		if os.path.exists(file):
-			return file
-	raise ValueError('Missing library: %s' % lib)
+    lddirs = [x.rstrip() for x in open('/etc/ld.so.conf', 'r').readlines()]
+    for dir in ['/lib', '/usr/lib'] + lddirs:
+        file = os.path.join(dir, lib)
+        if os.path.exists(file):
+            return file
+    raise ValueError('Missing library: %s' % lib)
 
 
 def libraries(*libs):
-	results = []
-	for lib in libs:
-		results.append(library(lib))
+    results = []
+    for lib in libs:
+        results.append(library(lib))
diff --git a/client/bin/package.py b/client/bin/package.py
index c889a9c..42a8604 100644
--- a/client/bin/package.py
+++ b/client/bin/package.py
@@ -1,5 +1,5 @@
 """
-Functions to handle software packages. The functions covered here aim to be 
+Functions to handle software packages. The functions covered here aim to be
 generic, with implementations that deal with different package managers, such
 as dpkg and rpm.
 """
@@ -15,277 +15,277 @@
 
 
 def __rpm_info(rpm_package):
-	"""\
-	Private function that returns a dictionary with information about an 
-	RPM package file
-	- type: Package management program that handles the file
-	- system_support: If the package management program is installed on the
-	system or not
-	- source: If it is a source (True) our binary (False) package
-	- version: The package version (or name), that is used to check against the
-	package manager if the package is installed
-	- arch: The architecture for which a binary package was built
-	- installed: Whether the package is installed (True) on the system or not
-	(False)
-	"""
-	# We will make good use of what the file command has to tell us about the
-	# package :)
-	file_result = utils.system_output('file ' + rpm_package)
-	package_info = {}
-	package_info['type'] = 'rpm'
-	try:
-		os_dep.command('rpm')
-		# Build the command strings that will be used to get package info
-		# s_cmd - Command to determine if package is a source package
-		# a_cmd - Command to determine package architecture
-		# v_cmd - Command to determine package version
-		# i_cmd - Command to determiine if package is installed
-		s_cmd = 'rpm -qp --qf %{SOURCE} ' + rpm_package + ' 2>/dev/null'
-		a_cmd = 'rpm -qp --qf %{ARCH} ' + rpm_package + ' 2>/dev/null'
-		v_cmd = 'rpm -qp ' + rpm_package + ' 2>/dev/null' 
-		i_cmd = 'rpm -q ' + utils.system_output(v_cmd) + ' 2>&1 >/dev/null' 
+    """\
+    Private function that returns a dictionary with information about an
+    RPM package file
+    - type: Package management program that handles the file
+    - system_support: If the package management program is installed on the
+    system or not
+    - source: If it is a source (True) our binary (False) package
+    - version: The package version (or name), that is used to check against the
+    package manager if the package is installed
+    - arch: The architecture for which a binary package was built
+    - installed: Whether the package is installed (True) on the system or not
+    (False)
+    """
+    # We will make good use of what the file command has to tell us about the
+    # package :)
+    file_result = utils.system_output('file ' + rpm_package)
+    package_info = {}
+    package_info['type'] = 'rpm'
+    try:
+        os_dep.command('rpm')
+        # Build the command strings that will be used to get package info
+        # s_cmd - Command to determine if package is a source package
+        # a_cmd - Command to determine package architecture
+        # v_cmd - Command to determine package version
+        # i_cmd - Command to determiine if package is installed
+        s_cmd = 'rpm -qp --qf %{SOURCE} ' + rpm_package + ' 2>/dev/null'
+        a_cmd = 'rpm -qp --qf %{ARCH} ' + rpm_package + ' 2>/dev/null'
+        v_cmd = 'rpm -qp ' + rpm_package + ' 2>/dev/null'
+        i_cmd = 'rpm -q ' + utils.system_output(v_cmd) + ' 2>&1 >/dev/null'
 
-		package_info['system_support'] = True
-		# Checking whether this is a source or src package
-		source = utils.system_output(s_cmd)
-		if source == '(none)':
-			package_info['source'] = False
-		else:
-			package_info['source'] = True
-		package_info['version'] = utils.system_output(v_cmd)
-		package_info['arch'] = utils.system_output(a_cmd)
-		# Checking if package is installed
-		try:
-			utils.system(i_cmd)
-			package_info['installed'] = True
-		except:
-			package_info['installed'] = False
+        package_info['system_support'] = True
+        # Checking whether this is a source or src package
+        source = utils.system_output(s_cmd)
+        if source == '(none)':
+            package_info['source'] = False
+        else:
+            package_info['source'] = True
+        package_info['version'] = utils.system_output(v_cmd)
+        package_info['arch'] = utils.system_output(a_cmd)
+        # Checking if package is installed
+        try:
+            utils.system(i_cmd)
+            package_info['installed'] = True
+        except:
+            package_info['installed'] = False
 
-	except:
-		package_info['system_support'] = False
-		package_info['installed'] = False
-		# File gives a wealth of information about rpm packages.
-		# However, we can't trust all this info, as incorrectly
-		# packaged rpms can report some wrong values.
-		# It's better than nothing though :)
-		if len(file_result.split(' ')) == 6:
-			# Figure if package is a source package
-			if file_result.split(' ')[3] == 'src':
-				package_info['source'] = True
-			elif file_result.split(' ')[3] == 'bin':
-				package_info['source'] = False
-			else:
-				package_info['source'] = False
-			# Get architecture
-			package_info['arch'] = file_result.split(' ')[4]
-			# Get version
-			package_info['version'] = file_result.split(' ')[5]
-		elif len(file_result.split(' ')) == 5:
-			# Figure if package is a source package
-			if file_result.split(' ')[3] == 'src':
-				package_info['source'] = True
-			elif file_result.split(' ')[3] == 'bin':
-				package_info['source'] = False
-			else:
-				package_info['source'] = False
-			# When the arch param is missing on file, we assume noarch
-			package_info['arch'] = 'noarch'
-			# Get version
-			package_info['version'] = file_result.split(' ')[4]
-		else:
-			# If everything else fails...
-			package_info['source'] =  False
-			package_info['arch'] = 'Not Available'
-			package_info['version'] = 'Not Available'
-	return package_info
+    except:
+        package_info['system_support'] = False
+        package_info['installed'] = False
+        # File gives a wealth of information about rpm packages.
+        # However, we can't trust all this info, as incorrectly
+        # packaged rpms can report some wrong values.
+        # It's better than nothing though :)
+        if len(file_result.split(' ')) == 6:
+            # Figure if package is a source package
+            if file_result.split(' ')[3] == 'src':
+                package_info['source'] = True
+            elif file_result.split(' ')[3] == 'bin':
+                package_info['source'] = False
+            else:
+                package_info['source'] = False
+            # Get architecture
+            package_info['arch'] = file_result.split(' ')[4]
+            # Get version
+            package_info['version'] = file_result.split(' ')[5]
+        elif len(file_result.split(' ')) == 5:
+            # Figure if package is a source package
+            if file_result.split(' ')[3] == 'src':
+                package_info['source'] = True
+            elif file_result.split(' ')[3] == 'bin':
+                package_info['source'] = False
+            else:
+                package_info['source'] = False
+            # When the arch param is missing on file, we assume noarch
+            package_info['arch'] = 'noarch'
+            # Get version
+            package_info['version'] = file_result.split(' ')[4]
+        else:
+            # If everything else fails...
+            package_info['source'] =  False
+            package_info['arch'] = 'Not Available'
+            package_info['version'] = 'Not Available'
+    return package_info
 
 
 def __dpkg_info(dpkg_package):
-	"""\
-	Private function that returns a dictionary with information about a 
-	dpkg package file
-	- type: Package management program that handles the file
-	- system_support: If the package management program is installed on the
-	system or not
-	- source: If it is a source (True) our binary (False) package
-	- version: The package version (or name), that is used to check against the
-	package manager if the package is installed
-	- arch: The architecture for which a binary package was built
-	- installed: Whether the package is installed (True) on the system or not
-	(False)
-	"""
-	# We will make good use of what the file command has to tell us about the
-	# package :)
-	file_result = utils.system_output('file ' + dpkg_package)
-	package_info = {}
-	package_info['type'] = 'dpkg'
-	# There's no single debian source package as is the case
-	# with RPM
-	package_info['source'] = False
-	try:
-		os_dep.command('dpkg')
-		# Build the command strings that will be used to get package info
-		# a_cmd - Command to determine package architecture
-		# v_cmd - Command to determine package version
-		# i_cmd - Command to determiine if package is installed
-		a_cmd = 'dpkg -f ' + dpkg_package + ' Architecture 2>/dev/null'
-		v_cmd = 'dpkg -f ' + dpkg_package + ' Package 2>/dev/null'
-		i_cmd = 'dpkg -s ' + utils.system_output(v_cmd) + ' 2>/dev/null'
+    """\
+    Private function that returns a dictionary with information about a
+    dpkg package file
+    - type: Package management program that handles the file
+    - system_support: If the package management program is installed on the
+    system or not
+    - source: If it is a source (True) our binary (False) package
+    - version: The package version (or name), that is used to check against the
+    package manager if the package is installed
+    - arch: The architecture for which a binary package was built
+    - installed: Whether the package is installed (True) on the system or not
+    (False)
+    """
+    # We will make good use of what the file command has to tell us about the
+    # package :)
+    file_result = utils.system_output('file ' + dpkg_package)
+    package_info = {}
+    package_info['type'] = 'dpkg'
+    # There's no single debian source package as is the case
+    # with RPM
+    package_info['source'] = False
+    try:
+        os_dep.command('dpkg')
+        # Build the command strings that will be used to get package info
+        # a_cmd - Command to determine package architecture
+        # v_cmd - Command to determine package version
+        # i_cmd - Command to determiine if package is installed
+        a_cmd = 'dpkg -f ' + dpkg_package + ' Architecture 2>/dev/null'
+        v_cmd = 'dpkg -f ' + dpkg_package + ' Package 2>/dev/null'
+        i_cmd = 'dpkg -s ' + utils.system_output(v_cmd) + ' 2>/dev/null'
 
-		package_info['system_support'] = True
-		package_info['version'] = utils.system_output(v_cmd)
-		package_info['arch'] = utils.system_output(a_cmd)
-		# Checking if package is installed
-		package_status = utils.system_output(i_cmd, ignore_status=True)
-		not_inst_pattern = re.compile('not-installed', re.IGNORECASE)
-		dpkg_not_installed = re.search(not_inst_pattern, package_status)
-		if dpkg_not_installed:
-			package_info['installed'] = False
-		else:
-			package_info['installed'] = True
+        package_info['system_support'] = True
+        package_info['version'] = utils.system_output(v_cmd)
+        package_info['arch'] = utils.system_output(a_cmd)
+        # Checking if package is installed
+        package_status = utils.system_output(i_cmd, ignore_status=True)
+        not_inst_pattern = re.compile('not-installed', re.IGNORECASE)
+        dpkg_not_installed = re.search(not_inst_pattern, package_status)
+        if dpkg_not_installed:
+            package_info['installed'] = False
+        else:
+            package_info['installed'] = True
 
-	except:
-		package_info['system_support'] = False
-		package_info['installed'] = False
-		# The output of file is not as generous for dpkg files as
-		# it is with rpm files
-		package_info['arch'] = 'Not Available'
-		package_info['version'] = 'Not Available'
+    except:
+        package_info['system_support'] = False
+        package_info['installed'] = False
+        # The output of file is not as generous for dpkg files as
+        # it is with rpm files
+        package_info['arch'] = 'Not Available'
+        package_info['version'] = 'Not Available'
 
-	return package_info
+    return package_info
 
 
 def info(package):
-	"""\
-	Returns a dictionary with package information about a given package file:
-	- type: Package management program that handles the file
-	- system_support: If the package management program is installed on the
-	system or not
-	- source: If it is a source (True) our binary (False) package
-	- version: The package version (or name), that is used to check against the
-	package manager if the package is installed
-	- arch: The architecture for which a binary package was built
-	- installed: Whether the package is installed (True) on the system or not
-	(False)
+    """\
+    Returns a dictionary with package information about a given package file:
+    - type: Package management program that handles the file
+    - system_support: If the package management program is installed on the
+    system or not
+    - source: If it is a source (True) our binary (False) package
+    - version: The package version (or name), that is used to check against the
+    package manager if the package is installed
+    - arch: The architecture for which a binary package was built
+    - installed: Whether the package is installed (True) on the system or not
+    (False)
 
-	Implemented package types:
-	- 'dpkg' - dpkg (debian, ubuntu) package files
-	- 'rpm' - rpm (red hat, suse) package files
-	Raises an exception if the package type is not one of the implemented
-	package types.
-	"""
-	if not os.path.isfile(package):
-		raise ValueError('invalid file %s to verify' % package)
-	# Use file and libmagic to determine the actual package file type.
-	file_result = utils.system_output('file ' + package)
-	for package_manager in KNOWN_PACKAGE_MANAGERS:
-		if package_manager == 'rpm':
-			package_pattern = re.compile('RPM', re.IGNORECASE)
-		elif package_manager == 'dpkg':
-			package_pattern = re.compile('Debian', re.IGNORECASE)
+    Implemented package types:
+    - 'dpkg' - dpkg (debian, ubuntu) package files
+    - 'rpm' - rpm (red hat, suse) package files
+    Raises an exception if the package type is not one of the implemented
+    package types.
+    """
+    if not os.path.isfile(package):
+        raise ValueError('invalid file %s to verify' % package)
+    # Use file and libmagic to determine the actual package file type.
+    file_result = utils.system_output('file ' + package)
+    for package_manager in KNOWN_PACKAGE_MANAGERS:
+        if package_manager == 'rpm':
+            package_pattern = re.compile('RPM', re.IGNORECASE)
+        elif package_manager == 'dpkg':
+            package_pattern = re.compile('Debian', re.IGNORECASE)
 
-		result = re.search(package_pattern, file_result)
+        result = re.search(package_pattern, file_result)
 
-		if result and package_manager == 'rpm':
-			return __rpm_info(package)
-		elif result and package_manager == 'dpkg':
-			return __dpkg_info(package)
+        if result and package_manager == 'rpm':
+            return __rpm_info(package)
+        elif result and package_manager == 'dpkg':
+            return __dpkg_info(package)
 
-	# If it's not one of the implemented package manager methods, there's
-	# not much that can be done, hence we throw an exception.
-	raise error.PackageError('Unknown package type %s' % file_result)
+    # If it's not one of the implemented package manager methods, there's
+    # not much that can be done, hence we throw an exception.
+    raise error.PackageError('Unknown package type %s' % file_result)
 
 
 def install(package, nodeps = False):
-	"""\
-	Tries to install a package file. If the package is already installed,
-	it prints a message to the user and ends gracefully. If nodeps is set to
-	true, it will ignore package dependencies.
-	"""
-	my_package_info = info(package)
-	type = my_package_info['type']
-	system_support = my_package_info['system_support']
-	source = my_package_info['source']
-	installed = my_package_info['installed']
+    """\
+    Tries to install a package file. If the package is already installed,
+    it prints a message to the user and ends gracefully. If nodeps is set to
+    true, it will ignore package dependencies.
+    """
+    my_package_info = info(package)
+    type = my_package_info['type']
+    system_support = my_package_info['system_support']
+    source = my_package_info['source']
+    installed = my_package_info['installed']
 
-	if not system_support:
-		e_msg = 'Client does not have package manager %s to handle %s install' \
-		% (type, package)
-		raise error.PackageError(e_msg)
+    if not system_support:
+        e_msg = 'Client does not have package manager %s to handle %s install' \
+        % (type, package)
+        raise error.PackageError(e_msg)
 
-	opt_args = ''
-	if type == 'rpm':
-		if nodeps:
-			opt_args = opt_args + '--nodeps'
-		install_command = 'rpm %s -U %s' % (opt_args, package)
-	if type == 'dpkg':
-		if nodeps:
-			opt_args = opt_args + '--force-depends'
-		install_command = 'dpkg %s -i %s' % (opt_args, package)
+    opt_args = ''
+    if type == 'rpm':
+        if nodeps:
+            opt_args = opt_args + '--nodeps'
+        install_command = 'rpm %s -U %s' % (opt_args, package)
+    if type == 'dpkg':
+        if nodeps:
+            opt_args = opt_args + '--force-depends'
+        install_command = 'dpkg %s -i %s' % (opt_args, package)
 
-	# RPM source packages can be installed along with the binary versions
-	# with this check
-	if installed and not source:
-		return 'Package %s is already installed' % package
+    # RPM source packages can be installed along with the binary versions
+    # with this check
+    if installed and not source:
+        return 'Package %s is already installed' % package
 
-	# At this point, the most likely thing to go wrong is that there are 
-	# unmet dependencies for the package. We won't cover this case, at 
-	# least for now.
-	utils.system(install_command)
-	return 'Package %s was installed successfuly' % package
+    # At this point, the most likely thing to go wrong is that there are
+    # unmet dependencies for the package. We won't cover this case, at
+    # least for now.
+    utils.system(install_command)
+    return 'Package %s was installed successfuly' % package
 
 
 def convert(package, destination_format):
-	"""\
-	Convert packages with the 'alien' utility. If alien is not installed, it
-	throws a NotImplementedError exception.
-	returns: filename of the package generated.
-	"""
-	try:
-		os_dep.command('alien')
-	except:
-		e_msg = 'Cannot convert to %s, alien not installed' % destination_format
-		raise error.TestError(e_msg)
+    """\
+    Convert packages with the 'alien' utility. If alien is not installed, it
+    throws a NotImplementedError exception.
+    returns: filename of the package generated.
+    """
+    try:
+        os_dep.command('alien')
+    except:
+        e_msg = 'Cannot convert to %s, alien not installed' % destination_format
+        raise error.TestError(e_msg)
 
-	# alien supports converting to many formats, but its interesting to map
-	# convertions only for the implemented package types.
-	if destination_format == 'dpkg':
-		deb_pattern = re.compile('[A-Za-z0-9_.-]*[.][d][e][b]')
-		conv_output = utils.system_output('alien --to-deb %s 2>/dev/null' % package)
-		converted_package = re.findall(deb_pattern, conv_output)[0]
-	elif destination_format == 'rpm':
-		rpm_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]')
-		conv_output = utils.system_output('alien --to-rpm %s 2>/dev/null' % package)
-		converted_package = re.findall(rpm_pattern, conv_output)[0]
-	else:
-		e_msg = 'Convertion to format %s not implemented' % destination_format
-		raise NotImplementedError(e_msg)
+    # alien supports converting to many formats, but its interesting to map
+    # convertions only for the implemented package types.
+    if destination_format == 'dpkg':
+        deb_pattern = re.compile('[A-Za-z0-9_.-]*[.][d][e][b]')
+        conv_output = utils.system_output('alien --to-deb %s 2>/dev/null' % package)
+        converted_package = re.findall(deb_pattern, conv_output)[0]
+    elif destination_format == 'rpm':
+        rpm_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]')
+        conv_output = utils.system_output('alien --to-rpm %s 2>/dev/null' % package)
+        converted_package = re.findall(rpm_pattern, conv_output)[0]
+    else:
+        e_msg = 'Convertion to format %s not implemented' % destination_format
+        raise NotImplementedError(e_msg)
 
-	print 'Package %s successfuly converted to %s' % \
-		(os.path.basename(package), os.path.basename(converted_package))
-	return os.path.abspath(converted_package)
+    print 'Package %s successfuly converted to %s' % \
+            (os.path.basename(package), os.path.basename(converted_package))
+    return os.path.abspath(converted_package)
 
 
 def os_support():
-	"""\
-	Returns a dictionary with host os package support info:
-	- rpm: True if system supports rpm packages, False otherwise
-	- dpkg: True if system supports dpkg packages, False otherwise
-	- conversion: True if the system can convert packages (alien installed),
-	or False otherwise
-	"""
-	support_info = {}
-	for package_manager in KNOWN_PACKAGE_MANAGERS:
-		try:
-			os_dep.command(package_manager)
-			support_info[package_manager] = True
-		except:
-			support_info[package_manager] = False
+    """\
+    Returns a dictionary with host os package support info:
+    - rpm: True if system supports rpm packages, False otherwise
+    - dpkg: True if system supports dpkg packages, False otherwise
+    - conversion: True if the system can convert packages (alien installed),
+    or False otherwise
+    """
+    support_info = {}
+    for package_manager in KNOWN_PACKAGE_MANAGERS:
+        try:
+            os_dep.command(package_manager)
+            support_info[package_manager] = True
+        except:
+            support_info[package_manager] = False
 
-	try:
-		os_dep.command('alien')
-		support_info['conversion'] = True
-	except:
-		support_info['conversion'] = False
+    try:
+        os_dep.command('alien')
+        support_info['conversion'] = True
+    except:
+        support_info['conversion'] = False
 
-	return support_info
+    return support_info
diff --git a/client/bin/parallel.py b/client/bin/parallel.py
index a95b643..13a8b51 100644
--- a/client/bin/parallel.py
+++ b/client/bin/parallel.py
@@ -6,42 +6,42 @@
 from autotest_lib.client.common_lib import error
 
 def fork_start(tmp, l):
-	sys.stdout.flush()
-	sys.stderr.flush()
-	pid = os.fork()
-	if pid:
-		# Parent
-		return pid
+    sys.stdout.flush()
+    sys.stderr.flush()
+    pid = os.fork()
+    if pid:
+        # Parent
+        return pid
 
-	try:
-		try:
-			l()
+    try:
+        try:
+            l()
 
-		except error.AutotestError:
-			raise
+        except error.AutotestError:
+            raise
 
-		except:
-			raise error.UnhandledError("test failed and threw:\n")
+        except:
+            raise error.UnhandledError("test failed and threw:\n")
 
-	except Exception, detail:
-		ename = tmp + "/debug/error-%d" % (os.getpid())
-		pickle.dump(detail, open(ename, "w"))
+    except Exception, detail:
+        ename = tmp + "/debug/error-%d" % (os.getpid())
+        pickle.dump(detail, open(ename, "w"))
 
-		sys.stdout.flush()
-		sys.stderr.flush()
-		os._exit(1)
+        sys.stdout.flush()
+        sys.stderr.flush()
+        os._exit(1)
 
-	sys.stdout.flush()
-	sys.stderr.flush()
-	os._exit(0)
+    sys.stdout.flush()
+    sys.stderr.flush()
+    os._exit(0)
 
 
 def fork_waitfor(tmp, pid):
-	(pid, status) = os.waitpid(pid, 0)
+    (pid, status) = os.waitpid(pid, 0)
 
-	ename = tmp + "/debug/error-%d" % pid
-	if (os.path.exists(ename)):
-		raise pickle.load(file(ename, 'r'))
+    ename = tmp + "/debug/error-%d" % pid
+    if (os.path.exists(ename)):
+        raise pickle.load(file(ename, 'r'))
 
-	if (status != 0):
-		raise error.TestError("test failed rc=%d" % (status))
+    if (status != 0):
+        raise error.TestError("test failed rc=%d" % (status))
diff --git a/client/bin/profiler.py b/client/bin/profiler.py
index b919542..ff821f9 100755
--- a/client/bin/profiler.py
+++ b/client/bin/profiler.py
@@ -1,25 +1,24 @@
 class profiler:
-	preserve_srcdir = False
+    preserve_srcdir = False
 
-	def __init__(self, job):
-		self.job = job
+    def __init__(self, job):
+        self.job = job
 
-	def setup(self, *args):
-		return
+    def setup(self, *args):
+        return
 
 
-	def initialize(self, *args):
-		return
+    def initialize(self, *args):
+        return
 
 
-	def start(self, test):
-		return
+    def start(self, test):
+        return
 
 
-	def stop(self, test):
-		return
+    def stop(self, test):
+        return
 
 
-	def report(self, test):
-		return
-
+    def report(self, test):
+        return
diff --git a/client/bin/profilers.py b/client/bin/profilers.py
index c080021..298cba9 100755
--- a/client/bin/profilers.py
+++ b/client/bin/profilers.py
@@ -4,71 +4,71 @@
 
 class profilers:
 
-	def __init__(self, job):
-		self.job = job
-		self.list = []
-		self.profdir = job.autodir + '/profilers'
-		self.tmpdir = job.tmpdir
-		self.profile_run_only = False
+    def __init__(self, job):
+        self.job = job
+        self.list = []
+        self.profdir = job.autodir + '/profilers'
+        self.tmpdir = job.tmpdir
+        self.profile_run_only = False
 
-	# add a profiler
-	def add(self, profiler, *args, **dargs):
-		try:
-			sys.path.insert(0, self.job.profdir + '/' + profiler)
-			exec 'import ' + profiler
-			exec 'newprofiler = %s.%s(self)' % (profiler, profiler)
-		finally:
-			sys.path.pop(0)
-		newprofiler.name = profiler
-		newprofiler.bindir = self.profdir + '/' + profiler
-		newprofiler.srcdir = newprofiler.bindir + '/src'
-		newprofiler.tmpdir = self.tmpdir + '/' + profiler
-		utils.update_version(newprofiler.srcdir, newprofiler.preserve_srcdir,
-				     newprofiler.version, newprofiler.setup,
-				     *args, **dargs)
-		newprofiler.initialize(*args, **dargs)
-		self.list.append(newprofiler)
+    # add a profiler
+    def add(self, profiler, *args, **dargs):
+        try:
+            sys.path.insert(0, self.job.profdir + '/' + profiler)
+            exec 'import ' + profiler
+            exec 'newprofiler = %s.%s(self)' % (profiler, profiler)
+        finally:
+            sys.path.pop(0)
+        newprofiler.name = profiler
+        newprofiler.bindir = self.profdir + '/' + profiler
+        newprofiler.srcdir = newprofiler.bindir + '/src'
+        newprofiler.tmpdir = self.tmpdir + '/' + profiler
+        utils.update_version(newprofiler.srcdir, newprofiler.preserve_srcdir,
+                             newprofiler.version, newprofiler.setup,
+                             *args, **dargs)
+        newprofiler.initialize(*args, **dargs)
+        self.list.append(newprofiler)
 
 
-	# remove a profiler
-	def delete(self, profiler):
-		nukeme = None
-		for p in self.list:
-			if (p.name == profiler):
-				nukeme = p
-		self.list.remove(p)
+    # remove a profiler
+    def delete(self, profiler):
+        nukeme = None
+        for p in self.list:
+            if (p.name == profiler):
+                nukeme = p
+        self.list.remove(p)
 
 
-	# are any profilers enabled ?
-	def present(self):
-		if self.list:
-			return 1
-		else:
-			return 0
+    # are any profilers enabled ?
+    def present(self):
+        if self.list:
+            return 1
+        else:
+            return 0
 
-        # Returns True if job is supposed to be run only with profiling turned
-        # on, False otherwise
-        def only(self):
-                return self.profile_run_only
+    # Returns True if job is supposed to be run only with profiling turned
+    # on, False otherwise
+    def only(self):
+        return self.profile_run_only
 
-        # Changes the flag which determines whether or not the job is to be
-        # run without profilers at all
-        def set_only(self, value):
-                self.profile_run_only = value
+    # Changes the flag which determines whether or not the job is to be
+    # run without profilers at all
+    def set_only(self, value):
+        self.profile_run_only = value
 
-	# Start all enabled profilers
-	def start(self, test):
-		for p in self.list:
-			p.start(test)
+    # Start all enabled profilers
+    def start(self, test):
+        for p in self.list:
+            p.start(test)
 
 
-	# Stop all enabled profilers
-	def stop(self, test):
-		for p in self.list:
-			p.stop(test)
+    # Stop all enabled profilers
+    def stop(self, test):
+        for p in self.list:
+            p.stop(test)
 
 
-	# Report on all enabled profilers
-	def report(self, test):
-		for p in self.list:
-			p.report(test)
+    # Report on all enabled profilers
+    def report(self, test):
+        for p in self.list:
+            p.report(test)
diff --git a/client/bin/sysinfo.py b/client/bin/sysinfo.py
index 2f605ea..107229c 100755
--- a/client/bin/sysinfo.py
+++ b/client/bin/sysinfo.py
@@ -6,14 +6,14 @@
 from autotest_lib.client.common_lib import utils
 
 try:
-	from autotest_lib.client.bin import site_sysinfo
-	local = True
+    from autotest_lib.client.bin import site_sysinfo
+    local = True
 except ImportError:
-	local = False
+    local = False
 
 # stuff to log per reboot
-files = ['/proc/pci', '/proc/meminfo', '/proc/slabinfo', '/proc/version', 
-	'/proc/cpuinfo', '/proc/cmdline', '/proc/modules']
+files = ['/proc/pci', '/proc/meminfo', '/proc/slabinfo', '/proc/version',
+        '/proc/cpuinfo', '/proc/cmdline', '/proc/modules']
 # commands = ['lshw']        # this causes problems triggering CDROM drives
 commands = ['uname -a', 'lspci -vvn', 'gcc --version', 'ld --version',
             'mount', 'hostname']
@@ -21,84 +21,84 @@
 
 
 def run_command(command, output):
-	parts = command.split(None, 1)
-	cmd = parts[0]
-	if len(parts) > 1:
-		args = parts[1]
-	else:
-		args = ''
-	for dir in path:
-		pathname = dir + '/' + cmd
-		if not os.path.exists(pathname):
-			continue
-		tmp_cmd = "%s %s > %s 2> /dev/null" % (pathname, args, output)
-		utils.system(tmp_cmd)
+    parts = command.split(None, 1)
+    cmd = parts[0]
+    if len(parts) > 1:
+        args = parts[1]
+    else:
+        args = ''
+    for dir in path:
+        pathname = dir + '/' + cmd
+        if not os.path.exists(pathname):
+            continue
+        tmp_cmd = "%s %s > %s 2> /dev/null" % (pathname, args, output)
+        utils.system(tmp_cmd)
 
 
 def reboot_count():
-	if not glob.glob('*'):
-		return -1          # No reboots, initial data not logged
-	else:
-		return len(glob.glob('reboot*'))
-			
-	
+    if not glob.glob('*'):
+        return -1          # No reboots, initial data not logged
+    else:
+        return len(glob.glob('reboot*'))
+
+
 def boot_subdir(reboot_count):
-	"""subdir of job sysinfo"""
-	if reboot_count == 0:
-		return '.'
-	else:
-		return 'reboot%d' % reboot_count
+    """subdir of job sysinfo"""
+    if reboot_count == 0:
+        return '.'
+    else:
+        return 'reboot%d' % reboot_count
 
 
 def log_per_reboot_data(sysinfo_dir):
-	"""we log this data when the job starts, and again after any reboot"""
-	pwd = os.getcwd()
-	try:
-		os.chdir(sysinfo_dir)
-		subdir = boot_subdir(reboot_count() + 1)
-		if not os.path.exists(subdir):
-			os.mkdir(subdir)
-		os.chdir(os.path.join(sysinfo_dir, subdir))
-		_log_per_reboot_data()
-	finally:
-		os.chdir(pwd)
+    """we log this data when the job starts, and again after any reboot"""
+    pwd = os.getcwd()
+    try:
+        os.chdir(sysinfo_dir)
+        subdir = boot_subdir(reboot_count() + 1)
+        if not os.path.exists(subdir):
+            os.mkdir(subdir)
+        os.chdir(os.path.join(sysinfo_dir, subdir))
+        _log_per_reboot_data()
+    finally:
+        os.chdir(pwd)
 
 
 def _log_per_reboot_data():
-	"""system info to log before each step of the job"""
-	for command in commands:
-		run_command(command, re.sub(r'\s', '_', command))
+    """system info to log before each step of the job"""
+    for command in commands:
+        run_command(command, re.sub(r'\s', '_', command))
 
-	for file in files:
-		if (os.path.exists(file)):
-			shutil.copyfile(file, os.path.basename(file))
+    for file in files:
+        if (os.path.exists(file)):
+            shutil.copyfile(file, os.path.basename(file))
 
-	utils.system('dmesg -c > dmesg', ignore_status=True)
-	utils.system('df -mP > df', ignore_status=True)
-	if local:
-		site_sysinfo.log_per_reboot_data()
+    utils.system('dmesg -c > dmesg', ignore_status=True)
+    utils.system('df -mP > df', ignore_status=True)
+    if local:
+        site_sysinfo.log_per_reboot_data()
 
 
 def log_after_each_test(test_sysinfo_dir, job_sysinfo_dir):
-	"""log things that change after each test (called from test.py)"""
-	pwd = os.getcwd()
-	try:
-		os.chdir(job_sysinfo_dir)
-		reboot_subdir = boot_subdir(reboot_count())
-		reboot_dir = os.path.join(job_sysinfo_dir, reboot_subdir)
-		assert os.path.exists(reboot_dir)
+    """log things that change after each test (called from test.py)"""
+    pwd = os.getcwd()
+    try:
+        os.chdir(job_sysinfo_dir)
+        reboot_subdir = boot_subdir(reboot_count())
+        reboot_dir = os.path.join(job_sysinfo_dir, reboot_subdir)
+        assert os.path.exists(reboot_dir)
 
-		os.makedirs(test_sysinfo_dir)
-		os.chdir(test_sysinfo_dir)
-		utils.system('ln -s %s reboot_current' % reboot_dir)
+        os.makedirs(test_sysinfo_dir)
+        os.chdir(test_sysinfo_dir)
+        utils.system('ln -s %s reboot_current' % reboot_dir)
 
-		utils.system('dmesg -c > dmesg', ignore_status=True)
-		utils.system('df -mP > df', ignore_status=True)
-		if local:
-			site_sysinfo.log_after_each_test()
-	finally:
-		os.chdir(pwd)
-	
-	
+        utils.system('dmesg -c > dmesg', ignore_status=True)
+        utils.system('df -mP > df', ignore_status=True)
+        if local:
+            site_sysinfo.log_after_each_test()
+    finally:
+        os.chdir(pwd)
+
+
 if __name__ == '__main__':
-	log_per_reboot_data()
+    log_per_reboot_data()
diff --git a/client/bin/test.py b/client/bin/test.py
index 78dbdd1..5d5d4f9 100755
--- a/client/bin/test.py
+++ b/client/bin/test.py
@@ -3,20 +3,20 @@
 # Shell class for a test, inherited by all individual tests
 #
 # Methods:
-#	__init__	initialise
-#	initialize	run once for each job
-#	setup		run once for each new version of the test installed
-#	run		run the test (wrapped by job.run_test())
+#       __init__        initialise
+#       initialize      run once for each job
+#       setup           run once for each new version of the test installed
+#       run             run the test (wrapped by job.run_test())
 #
 # Data:
-#	job		backreference to the job this test instance is part of
-#	outputdir	eg. results/<job>/<testname.tag>
-#	resultsdir	eg. results/<job>/<testname.tag>/results
-#	profdir		eg. results/<job>/<testname.tag>/profiling
-#	debugdir	eg. results/<job>/<testname.tag>/debug
-#	bindir		eg. tests/<test>
-#	src		eg. tests/<test>/src
-#	tmpdir		eg. tmp/<testname.tag>
+#       job             backreference to the job this test instance is part of
+#       outputdir       eg. results/<job>/<testname.tag>
+#       resultsdir      eg. results/<job>/<testname.tag>/results
+#       profdir         eg. results/<job>/<testname.tag>/profiling
+#       debugdir        eg. results/<job>/<testname.tag>/debug
+#       bindir          eg. tests/<test>
+#       src             eg. tests/<test>/src
+#       tmpdir          eg. tmp/<testname.tag>
 
 import os, traceback
 
@@ -26,23 +26,23 @@
 
 
 class test(common_test.base_test):
-	pass
+    pass
 
 
 testname = common_test.testname
 
 
 def _grab_sysinfo(mytest):
-	try:
-		sysinfo_dir = os.path.join(mytest.outputdir, 'sysinfo')
-		sysinfo.log_after_each_test(sysinfo_dir, mytest.job.sysinfodir)
-		if os.path.exists(mytest.tmpdir):
-			utils.system('rm -rf ' + mytest.tmpdir)
-	except:
-		print 'after-test error:'
-		traceback.print_exc(file=sys.stdout)
+    try:
+        sysinfo_dir = os.path.join(mytest.outputdir, 'sysinfo')
+        sysinfo.log_after_each_test(sysinfo_dir, mytest.job.sysinfodir)
+        if os.path.exists(mytest.tmpdir):
+            utils.system('rm -rf ' + mytest.tmpdir)
+    except:
+        print 'after-test error:'
+        traceback.print_exc(file=sys.stdout)
 
 
 def runtest(job, url, tag, args, dargs):
-	common_test.runtest(job, url, tag, args, dargs,
-	                    locals(), globals(), _grab_sysinfo)
+    common_test.runtest(job, url, tag, args, dargs,
+                        locals(), globals(), _grab_sysinfo)
diff --git a/client/bin/test_config.py b/client/bin/test_config.py
index e926813..d8a5337 100644
--- a/client/bin/test_config.py
+++ b/client/bin/test_config.py
@@ -13,79 +13,79 @@
 __all__ = ['config_loader']
 
 class config_loader:
-	"""Base class of the configuration parser"""
-	def __init__(self, cfg, tmpdir = '/tmp'):
-		"""\
-		Instantiate ConfigParser and provide the file like object that we'll 
-		use to read configuration data from.
-		Args:
-			* cfg: Where we'll get configuration data. It can be either:
-				* A URL containing the file
-				* A valid file path inside the filesystem
-				* A string containing configuration data
-			* tmpdir: Where we'll dump the temporary conf files. The default
-			is the /tmp directory.
-		"""
-		# Base Parser
-		self.parser = ConfigParser()
-		# File is already a file like object
-		if hasattr(cfg, 'read'):
-			self.cfg = cfg
-			self.parser.readfp(self.cfg)
-		elif isinstance(cfg, types.StringTypes):
-			# Config file is a URL. Download it to a temp dir
-			if cfg.startswith('http') or cfg.startswith('ftp'):
-				self.cfg = path.join(tmpdir, path.basename(cfg))
-				utils.urlretrieve(cfg, self.cfg)
-				self.parser.read(self.cfg)
-			# Config is a valid filesystem path to a file.
-			elif path.exists(path.abspath(cfg)):
-				if path.isfile(cfg):
-					self.cfg = path.abspath(cfg)
-					self.parser.read(self.cfg)
-				else:
-					e_msg = 'Invalid config file path: %s' % cfg
-					raise IOError(e_msg)
-			# Config file is just a string, convert it to a python file like
-			# object using StringIO
-			else:
-				self.cfg = StringIO(cfg)
-				self.parser.readfp(self.cfg)
+    """Base class of the configuration parser"""
+    def __init__(self, cfg, tmpdir = '/tmp'):
+        """\
+        Instantiate ConfigParser and provide the file like object that we'll
+        use to read configuration data from.
+        Args:
+                * cfg: Where we'll get configuration data. It can be either:
+                        * A URL containing the file
+                        * A valid file path inside the filesystem
+                        * A string containing configuration data
+                * tmpdir: Where we'll dump the temporary conf files. The default
+                is the /tmp directory.
+        """
+        # Base Parser
+        self.parser = ConfigParser()
+        # File is already a file like object
+        if hasattr(cfg, 'read'):
+            self.cfg = cfg
+            self.parser.readfp(self.cfg)
+        elif isinstance(cfg, types.StringTypes):
+            # Config file is a URL. Download it to a temp dir
+            if cfg.startswith('http') or cfg.startswith('ftp'):
+                self.cfg = path.join(tmpdir, path.basename(cfg))
+                utils.urlretrieve(cfg, self.cfg)
+                self.parser.read(self.cfg)
+            # Config is a valid filesystem path to a file.
+            elif path.exists(path.abspath(cfg)):
+                if path.isfile(cfg):
+                    self.cfg = path.abspath(cfg)
+                    self.parser.read(self.cfg)
+                else:
+                    e_msg = 'Invalid config file path: %s' % cfg
+                    raise IOError(e_msg)
+            # Config file is just a string, convert it to a python file like
+            # object using StringIO
+            else:
+                self.cfg = StringIO(cfg)
+                self.parser.readfp(self.cfg)
 
 
-	def get(self, section, name, default=None):
-		"""Get the value of a option.
+    def get(self, section, name, default=None):
+        """Get the value of a option.
 
-		Section of the config file and the option name.
-		You can pass a default value if the option doesn't exist.
-		"""
-		if not self.parser.has_option(section, name):
-			return default
-		return self.parser.get(section, name)
+        Section of the config file and the option name.
+        You can pass a default value if the option doesn't exist.
+        """
+        if not self.parser.has_option(section, name):
+            return default
+        return self.parser.get(section, name)
 
 
-	def set(self, section, option, value):
-		"""Set an option.
+    def set(self, section, option, value):
+        """Set an option.
 
-		This change is not persistent unless saved with 'save()'.
-		"""
-		if not self.parser.has_section(section):
-			self.parser.add_section(section)
-		return self.parser.set(section, name, value)
+        This change is not persistent unless saved with 'save()'.
+        """
+        if not self.parser.has_section(section):
+            self.parser.add_section(section)
+        return self.parser.set(section, name, value)
 
 
-	def remove(self, section, name):
-		"""Remove an option."""
-		if self.parser.has_section(section):
-			self.parser.remove_option(section, name)
+    def remove(self, section, name):
+        """Remove an option."""
+        if self.parser.has_section(section):
+            self.parser.remove_option(section, name)
 
 
-	def save(self):
-		"""Save the configuration file with all modifications"""
-		if not self.filename:
-			return
-		fileobj = file(self.filename, 'w')
-		try:
-			self.parser.write(fileobj)
-		finally:
-			fileobj.close()
+    def save(self):
+        """Save the configuration file with all modifications"""
+        if not self.filename:
+            return
+        fileobj = file(self.filename, 'w')
+        try:
+            self.parser.write(fileobj)
+        finally:
+            fileobj.close()
diff --git a/client/bin/xen.py b/client/bin/xen.py
index d69b2c5..a792427 100644
--- a/client/bin/xen.py
+++ b/client/bin/xen.py
@@ -8,201 +8,201 @@
 
 class xen(kernel.kernel):
 
-	def log(self, msg):
-		print msg
-		self.logfile.write('%s\n' % msg)
+    def log(self, msg):
+        print msg
+        self.logfile.write('%s\n' % msg)
 
 
-	def __init__(self, job, base_tree, results_dir, tmp_dir, build_dir, \
-					leave = False, kjob = None):
-		# call base-class
-		kernel.kernel.__init__(self, job, base_tree, results_dir, \
-						tmp_dir, build_dir, leave)
-		self.kjob = kjob
+    def __init__(self, job, base_tree, results_dir, tmp_dir, build_dir, \
+                                    leave = False, kjob = None):
+        # call base-class
+        kernel.kernel.__init__(self, job, base_tree, results_dir, \
+                                        tmp_dir, build_dir, leave)
+        self.kjob = kjob
 
 
-	def config(self, config_file, config_list = None):
-		raise NotImplementedError('config() not implemented for xen')
+    def config(self, config_file, config_list = None):
+        raise NotImplementedError('config() not implemented for xen')
 
 
-	def build(self, make_opts = '', logfile = '', extraversion='autotest'):
-		"""build xen
+    def build(self, make_opts = '', logfile = '', extraversion='autotest'):
+        """build xen
 
-		make_opts
-			additional options to make, if any
-		"""
-		self.log('running build')
-		os_dep.commands('gcc', 'make')
-	        # build xen with extraversion flag
-		os.environ['XEN_EXTRAVERSION'] = '-unstable-%s'% extraversion
-		if logfile == '':
-			logfile = os.path.join(self.log_dir, 'xen_build')
-		os.chdir(self.build_dir)
-		self.log('log_dir: %s ' % os.path.join(self.log_dir, 'stdout'))
-		self.job.stdout.tee_redirect(logfile + '.stdout')
-		self.job.stderr.tee_redirect(logfile + '.stderr')
+        make_opts
+                additional options to make, if any
+        """
+        self.log('running build')
+        os_dep.commands('gcc', 'make')
+        # build xen with extraversion flag
+        os.environ['XEN_EXTRAVERSION'] = '-unstable-%s'% extraversion
+        if logfile == '':
+            logfile = os.path.join(self.log_dir, 'xen_build')
+        os.chdir(self.build_dir)
+        self.log('log_dir: %s ' % os.path.join(self.log_dir, 'stdout'))
+        self.job.stdout.tee_redirect(logfile + '.stdout')
+        self.job.stderr.tee_redirect(logfile + '.stderr')
 
-		# build xen hypervisor and user-space tools
-		targets = ['xen', 'tools']
-		threads = 2 * autotest_utils.count_cpus()
-		for t in targets:
-			build_string = 'make -j %d %s %s' % (threads, make_opts, t)
-			self.log('build_string: %s' % build_string)
-			system(build_string)
+        # build xen hypervisor and user-space tools
+        targets = ['xen', 'tools']
+        threads = 2 * autotest_utils.count_cpus()
+        for t in targets:
+            build_string = 'make -j %d %s %s' % (threads, make_opts, t)
+            self.log('build_string: %s' % build_string)
+            system(build_string)
 
-		# make a kernel job out of the kernel from the xen src if one isn't provided
-		if self.kjob == None:
-			# get xen kernel tree ready
-			self.log("prep-ing xen'ified kernel source tree")
-			system('make prep-kernels')
+        # make a kernel job out of the kernel from the xen src if one isn't provided
+        if self.kjob == None:
+            # get xen kernel tree ready
+            self.log("prep-ing xen'ified kernel source tree")
+            system('make prep-kernels')
 
-			v = self.get_xen_kernel_build_ver()
-			self.log('building xen kernel version: %s' % v)
+            v = self.get_xen_kernel_build_ver()
+            self.log('building xen kernel version: %s' % v)
 
-			# build xen-ified kernel in xen tree
-			kernel_base_tree = os.path.join(self.build_dir, \
-				'linux-%s' % self.get_xen_kernel_build_ver())
+            # build xen-ified kernel in xen tree
+            kernel_base_tree = os.path.join(self.build_dir, \
+                    'linux-%s' % self.get_xen_kernel_build_ver())
 
-			self.log('kernel_base_tree = %s' % kernel_base_tree)
-			# fix up XENGUEST value in EXTRAVERSION; we can't have
-			# files with '$(XENGEUST)' in the name, =(
-			self.fix_up_xen_kernel_makefile(kernel_base_tree)
+            self.log('kernel_base_tree = %s' % kernel_base_tree)
+            # fix up XENGUEST value in EXTRAVERSION; we can't have
+            # files with '$(XENGEUST)' in the name, =(
+            self.fix_up_xen_kernel_makefile(kernel_base_tree)
 
-			# make the kernel job
-			self.kjob = self.job.kernel(kernel_base_tree)
+            # make the kernel job
+            self.kjob = self.job.kernel(kernel_base_tree)
 
-			# hardcoding dom0 config (no modules for testing, yay!)
-			# FIXME: probe host to determine which config to pick
-			c = self.build_dir + '/buildconfigs/linux-defconfig_xen0_x86_32'
-			self.log('using kernel config: %s ' % c)
-			self.kjob.config(c)
+            # hardcoding dom0 config (no modules for testing, yay!)
+            # FIXME: probe host to determine which config to pick
+            c = self.build_dir + '/buildconfigs/linux-defconfig_xen0_x86_32'
+            self.log('using kernel config: %s ' % c)
+            self.kjob.config(c)
 
-			# Xen's kernel tree sucks; doesn't use bzImage, but vmlinux 
-			self.kjob.set_build_target('vmlinuz')
+            # Xen's kernel tree sucks; doesn't use bzImage, but vmlinux
+            self.kjob.set_build_target('vmlinuz')
 
-			# also, the vmlinuz is not out in arch/*/boot, ARGH! more hackery
-			self.kjob.set_build_image(self.job.tmpdir + '/build/linux/vmlinuz')
+            # also, the vmlinuz is not out in arch/*/boot, ARGH! more hackery
+            self.kjob.set_build_image(self.job.tmpdir + '/build/linux/vmlinuz')
 
-		self.kjob.build()
+        self.kjob.build()
 
-		self.job.stdout.restore()
-		self.job.stderr.restore()
+        self.job.stdout.restore()
+        self.job.stderr.restore()
 
-		xen_version = self.get_xen_build_ver()
-		self.log('BUILD VERSION: Xen: %s Kernel:%s' % \
-				(xen_version, self.kjob.get_kernel_build_ver()))
+        xen_version = self.get_xen_build_ver()
+        self.log('BUILD VERSION: Xen: %s Kernel:%s' % \
+                        (xen_version, self.kjob.get_kernel_build_ver()))
 
 
-	def build_timed(self, *args, **kwds):
-		raise NotImplementedError('build_timed() not implemented')
+    def build_timed(self, *args, **kwds):
+        raise NotImplementedError('build_timed() not implemented')
 
 
-	def install(self, tag='', prefix = '/', extraversion='autotest'):
-		"""make install in the kernel tree"""
-		self.log('Installing ...')
+    def install(self, tag='', prefix = '/', extraversion='autotest'):
+        """make install in the kernel tree"""
+        self.log('Installing ...')
 
-		os.chdir(self.build_dir)
+        os.chdir(self.build_dir)
 
-		if not os.path.isdir(prefix):
-			os.mkdir(prefix)
-		self.boot_dir = os.path.join(prefix, 'boot')
-		if not os.path.isdir(self.boot_dir):
-			os.mkdir(self.boot_dir)
+        if not os.path.isdir(prefix):
+            os.mkdir(prefix)
+        self.boot_dir = os.path.join(prefix, 'boot')
+        if not os.path.isdir(self.boot_dir):
+            os.mkdir(self.boot_dir)
 
-		# remember what we are going to install
-		xen_version = '%s-%s' % (self.get_xen_build_ver(), extraversion)
-		self.xen_image = self.boot_dir + '/xen-' + xen_version + '.gz'
-		self.xen_syms  = self.boot_dir + '/xen-syms-' + xen_version
+        # remember what we are going to install
+        xen_version = '%s-%s' % (self.get_xen_build_ver(), extraversion)
+        self.xen_image = self.boot_dir + '/xen-' + xen_version + '.gz'
+        self.xen_syms  = self.boot_dir + '/xen-syms-' + xen_version
 
-		self.log('Installing Xen ...')
-		os.environ['XEN_EXTRAVERSION'] = '-unstable-%s'% extraversion
+        self.log('Installing Xen ...')
+        os.environ['XEN_EXTRAVERSION'] = '-unstable-%s'% extraversion
 
-		# install xen
-		system('make DESTDIR=%s -C xen install' % prefix)
+        # install xen
+        system('make DESTDIR=%s -C xen install' % prefix)
 
-		# install tools
-		system('make DESTDIR=%s -C tools install' % prefix)
+        # install tools
+        system('make DESTDIR=%s -C tools install' % prefix)
 
-		# install kernel
-		ktag = self.kjob.get_kernel_build_ver()
-		kprefix = prefix
-		self.kjob.install(tag=ktag, prefix=kprefix)
+        # install kernel
+        ktag = self.kjob.get_kernel_build_ver()
+        kprefix = prefix
+        self.kjob.install(tag=ktag, prefix=kprefix)
 
 
-	def add_to_bootloader(self, tag='autotest', args=''):
-		""" add this kernel to bootloader, taking an
-		    optional parameter of space separated parameters
-		    e.g.:  kernel.add_to_bootloader('mykernel', 'ro acpi=off')
-		"""
+    def add_to_bootloader(self, tag='autotest', args=''):
+        """ add this kernel to bootloader, taking an
+            optional parameter of space separated parameters
+            e.g.:  kernel.add_to_bootloader('mykernel', 'ro acpi=off')
+        """
 
-		# turn on xen mode
-		self.job.bootloader.enable_xen_mode()
+        # turn on xen mode
+        self.job.bootloader.enable_xen_mode()
 
-		# remove existing entry if present
-		self.job.bootloader.remove_kernel(tag)
+        # remove existing entry if present
+        self.job.bootloader.remove_kernel(tag)
 
-		# add xen and xen kernel
-		self.job.bootloader.add_kernel(self.kjob.image, tag, \
-					   self.kjob.initrd, self.xen_image)
+        # add xen and xen kernel
+        self.job.bootloader.add_kernel(self.kjob.image, tag, \
+                                   self.kjob.initrd, self.xen_image)
 
-		# if no args passed, populate from /proc/cmdline
-		if not args:
-			args = open('/proc/cmdline', 'r').readline().strip()
+        # if no args passed, populate from /proc/cmdline
+        if not args:
+            args = open('/proc/cmdline', 'r').readline().strip()
 
-		# add args to entry one at a time
-		for a in args.split(' '):
-			self.job.bootloader.add_args(tag, a)
+        # add args to entry one at a time
+        for a in args.split(' '):
+            self.job.bootloader.add_args(tag, a)
 
-		# turn off xen mode
-		self.job.bootloader.disable_xen_mode()
+        # turn off xen mode
+        self.job.bootloader.disable_xen_mode()
 
 
-	def get_xen_kernel_build_ver(self):
-		"""Check xen buildconfig for current kernel version"""
-		version = patchlevel = sublevel = ''
-		extraversion = localversion = ''
+    def get_xen_kernel_build_ver(self):
+        """Check xen buildconfig for current kernel version"""
+        version = patchlevel = sublevel = ''
+        extraversion = localversion = ''
 
-		version_file = self.build_dir + '/buildconfigs/mk.linux-2.6-xen'
+        version_file = self.build_dir + '/buildconfigs/mk.linux-2.6-xen'
 
-		for line in open(version_file, 'r').readlines():
-			if line.startswith('LINUX_VER'):
-				start = line.index('=') + 1
-				version = line[start:].strip() + "-xen"
-				break
+        for line in open(version_file, 'r').readlines():
+            if line.startswith('LINUX_VER'):
+                start = line.index('=') + 1
+                version = line[start:].strip() + "-xen"
+                break
 
-		return version
+        return version
 
 
-	def fix_up_xen_kernel_makefile(self, kernel_dir):
-		"""Fix up broken EXTRAVERSION in xen-ified Linux kernel Makefile"""
-		xenguest = ''
-		makefile = kernel_dir + '/Makefile'
+    def fix_up_xen_kernel_makefile(self, kernel_dir):
+        """Fix up broken EXTRAVERSION in xen-ified Linux kernel Makefile"""
+        xenguest = ''
+        makefile = kernel_dir + '/Makefile'
 
-		for line in open(makefile, 'r').readlines():
-			if line.startswith('XENGUEST'): 
-				start = line.index('=') + 1
-				xenguest = line[start:].strip()
-				break;
+        for line in open(makefile, 'r').readlines():
+            if line.startswith('XENGUEST'):
+                start = line.index('=') + 1
+                xenguest = line[start:].strip()
+                break;
 
-		# change out $XENGUEST in EXTRAVERSION line
-		system('sed -i.old "s,\$(XENGUEST),%s," %s' % \
-							(xenguest, makefile))
+        # change out $XENGUEST in EXTRAVERSION line
+        system('sed -i.old "s,\$(XENGUEST),%s," %s' % \
+                                                (xenguest, makefile))
 
 
-	def get_xen_build_ver(self):
-		"""Check Makefile and .config to return kernel version"""
-		version = patchlevel = sublevel = ''
-		extraversion = localversion = ''
+    def get_xen_build_ver(self):
+        """Check Makefile and .config to return kernel version"""
+        version = patchlevel = sublevel = ''
+        extraversion = localversion = ''
 
-		for line in open(self.build_dir + '/xen/Makefile', 'r').readlines():
-			if line.startswith('export XEN_VERSION'):
-				start = line.index('=') + 1
-				version = line[start:].strip()
-			if line.startswith('export XEN_SUBVERSION'):
-				start = line.index('=') + 1
-				sublevel = line[start:].strip()
-			if line.startswith('export XEN_EXTRAVERSION'):
-				start = line.index('=') + 1
-				extraversion = line[start:].strip()
+        for line in open(self.build_dir + '/xen/Makefile', 'r').readlines():
+            if line.startswith('export XEN_VERSION'):
+                start = line.index('=') + 1
+                version = line[start:].strip()
+            if line.startswith('export XEN_SUBVERSION'):
+                start = line.index('=') + 1
+                sublevel = line[start:].strip()
+            if line.startswith('export XEN_EXTRAVERSION'):
+                start = line.index('=') + 1
+                extraversion = line[start:].strip()
 
-		return "%s.%s%s" % (version, sublevel, extraversion)
+        return "%s.%s%s" % (version, sublevel, extraversion)
diff --git a/client/common_lib/barrier.py b/client/common_lib/barrier.py
index bc6bb61..d9da330 100755
--- a/client/common_lib/barrier.py
+++ b/client/common_lib/barrier.py
@@ -6,462 +6,462 @@
 
 
 class BarrierError(error.JobError):
-	pass
+    pass
 
 
 class barrier:
-	""" Multi-machine barrier support
+    """ Multi-machine barrier support
 
-	Provides multi-machine barrier mechanism.  Execution
-	stopping until all members arrive at the barrier.
+    Provides multi-machine barrier mechanism.  Execution
+    stopping until all members arrive at the barrier.
 
-	When a barrier is forming the master node (first in sort
-	order) in the set accepts connections from each member
-	of the set.	As they arrive they indicate the barrier
-	they are joining and their identifier (their hostname
-	or IP address and optional tag).  They are then asked
-	to wait.  When all members are present the master node
-	then checks that each member is still responding via a
-	ping/pong exchange.	If this is successful then everyone
-	has checked in at the barrier.  We then tell everyone
-	they may continue via a rlse message.
+    When a barrier is forming the master node (first in sort
+    order) in the set accepts connections from each member
+    of the set.     As they arrive they indicate the barrier
+    they are joining and their identifier (their hostname
+    or IP address and optional tag).  They are then asked
+    to wait.  When all members are present the master node
+    then checks that each member is still responding via a
+    ping/pong exchange.     If this is successful then everyone
+    has checked in at the barrier.  We then tell everyone
+    they may continue via a rlse message.
 
-	Where the master is not the first to reach the barrier
-	the client connects will fail.  Client will retry until
-	they either succeed in connecting to master or the overal
-	timeout is exceeded.
+    Where the master is not the first to reach the barrier
+    the client connects will fail.  Client will retry until
+    they either succeed in connecting to master or the overal
+    timeout is exceeded.
 
-	As an example here is the exchange for a three node
-	barrier called 'TAG'
+    As an example here is the exchange for a three node
+    barrier called 'TAG'
 
-	  MASTER                        CLIENT1         CLIENT2
-	    <-------------TAG C1-------------
-	    --------------wait-------------->
-	                  [...]
-	    <-------------TAG C2-----------------------------
-	    --------------wait------------------------------>
-	                  [...]
-	    --------------ping-------------->
-	    <-------------pong---------------
-	    --------------ping------------------------------>
-	    <-------------pong-------------------------------
-	            ----- BARRIER conditions MET -----
-	    --------------rlse-------------->
-	    --------------rlse------------------------------>
+      MASTER                        CLIENT1         CLIENT2
+        <-------------TAG C1-------------
+        --------------wait-------------->
+                      [...]
+        <-------------TAG C2-----------------------------
+        --------------wait------------------------------>
+                      [...]
+        --------------ping-------------->
+        <-------------pong---------------
+        --------------ping------------------------------>
+        <-------------pong-------------------------------
+                ----- BARRIER conditions MET -----
+        --------------rlse-------------->
+        --------------rlse------------------------------>
 
-	Note that once the last client has responded to pong the
-	barrier is implicitly deemed satisifed, they have all
-	acknowledged their presence.  If we fail to send any
-	of the rlse messages the barrier is still a success,
-	the failed host has effectively broken 'right at the
-	beginning' of the post barrier execution window.
+    Note that once the last client has responded to pong the
+    barrier is implicitly deemed satisifed, they have all
+    acknowledged their presence.  If we fail to send any
+    of the rlse messages the barrier is still a success,
+    the failed host has effectively broken 'right at the
+    beginning' of the post barrier execution window.
 
-	In addition, there is another rendevous, that makes each slave a server
-	and the master a client. The connection process and usage is still the
-	same but allows barriers from machines that only have a one-way
-	connection initiation. This is called rendevous_servers.
+    In addition, there is another rendevous, that makes each slave a server
+    and the master a client. The connection process and usage is still the
+    same but allows barriers from machines that only have a one-way
+    connection initiation. This is called rendevous_servers.
 
-	For example:
-	    if ME == SERVER:
-	        server start
+    For example:
+        if ME == SERVER:
+            server start
 
-	    b = job.barrier(ME, 'server-up', 120)
-	    b.rendevous(CLIENT, SERVER)
+        b = job.barrier(ME, 'server-up', 120)
+        b.rendevous(CLIENT, SERVER)
 
-	    if ME == CLIENT:
-	        client run
+        if ME == CLIENT:
+            client run
 
-	    b = job.barrier(ME, 'test-complete', 3600)
-	    b.rendevous(CLIENT, SERVER)
+        b = job.barrier(ME, 'test-complete', 3600)
+        b.rendevous(CLIENT, SERVER)
 
-	    if ME == SERVER:
-	        server stop
+        if ME == SERVER:
+            server stop
 
-	Properties:
-		hostid
-			My hostname/IP address + optional tag
-		tag
-			Symbolic name of the barrier in progress
-		port
-			TCP port used for this barrier
-		timeout
-			Maximum time to wait for a the barrier to meet
-		start
-			Timestamp when we started waiting
-		members
-			All members we expect to find in the barrier
-		seen
-			Number of clients seen (should be the length of waiting)
-		waiting
-			Clients who have checked in and are waiting (master)
-		masterid
-			Hostname/IP address + optional tag of selected master
-	"""
+    Properties:
+            hostid
+                    My hostname/IP address + optional tag
+            tag
+                    Symbolic name of the barrier in progress
+            port
+                    TCP port used for this barrier
+            timeout
+                    Maximum time to wait for a the barrier to meet
+            start
+                    Timestamp when we started waiting
+            members
+                    All members we expect to find in the barrier
+            seen
+                    Number of clients seen (should be the length of waiting)
+            waiting
+                    Clients who have checked in and are waiting (master)
+            masterid
+                    Hostname/IP address + optional tag of selected master
+    """
 
-	def __init__(self, hostid, tag, timeout, port=63000):
-		self.hostid = hostid
-		self.tag = tag
-		self.port = port
-		self.timeout = timeout
+    def __init__(self, hostid, tag, timeout, port=63000):
+        self.hostid = hostid
+        self.tag = tag
+        self.port = port
+        self.timeout = timeout
 
-		self.report("tag=%s port=%d timeout=%d" \
-			% (self.tag, self.port, self.timeout))
+        self.report("tag=%s port=%d timeout=%d" \
+                % (self.tag, self.port, self.timeout))
 
 
-	def get_host_from_id(self, id):
-		# Remove any trailing local identifier following a #.
-		# This allows multiple members per host which is particularly
-		# helpful in testing.
-		return id.split('#')[0]
+    def get_host_from_id(self, id):
+        # Remove any trailing local identifier following a #.
+        # This allows multiple members per host which is particularly
+        # helpful in testing.
+        return id.split('#')[0]
 
 
-	def report(self, out):
-		print "barrier:", self.hostid, out
-		sys.stdout.flush()
+    def report(self, out):
+        print "barrier:", self.hostid, out
+        sys.stdout.flush()
 
 
-	def update_timeout(self, timeout):
-		self.timeout = (time() - self.start) + timeout
+    def update_timeout(self, timeout):
+        self.timeout = (time() - self.start) + timeout
 
 
-	def remaining(self):
-		timeout = self.timeout - (time() - self.start)
-		if (timeout <= 0):
-			raise BarrierError("timeout waiting for barrier")
+    def remaining(self):
+        timeout = self.timeout - (time() - self.start)
+        if (timeout <= 0):
+            raise BarrierError("timeout waiting for barrier")
 
-		self.report("remaining: %d" % (timeout))
-		return timeout
+        self.report("remaining: %d" % (timeout))
+        return timeout
 
 
-	def master_welcome(self, connection):
-		(client, addr) = connection
-		name = None
+    def master_welcome(self, connection):
+        (client, addr) = connection
+        name = None
 
-		client.settimeout(5)
-		try:
-			# Get the clients name.
-			intro = client.recv(1024)
-			intro = intro.strip("\r\n")
+        client.settimeout(5)
+        try:
+            # Get the clients name.
+            intro = client.recv(1024)
+            intro = intro.strip("\r\n")
 
-			(tag, name) = intro.split(' ')
+            (tag, name) = intro.split(' ')
 
-			self.report("new client tag=%s, name=%s" % (tag, name))
+            self.report("new client tag=%s, name=%s" % (tag, name))
 
-			# Ok, we know who is trying to attach.  Confirm that
-			# they are coming to the same meeting.  Also, everyone
-			# should be using a unique handle (their IP address).
-			# If we see a duplicate, something _bad_ has happened
-			# so drop them now.
-			if self.tag != tag:
-				self.report("client arriving for the " \
-								"wrong barrier")
-				client.settimeout(5)
-				client.send("!tag")
-				client.close()
-				return
-			elif name in self.waiting:
-				self.report("duplicate client")
-				client.settimeout(5)
-				client.send("!dup")
-				client.close()
-				return
-			
-			# Acknowledge the client
-			client.send("wait")
+            # Ok, we know who is trying to attach.  Confirm that
+            # they are coming to the same meeting.  Also, everyone
+            # should be using a unique handle (their IP address).
+            # If we see a duplicate, something _bad_ has happened
+            # so drop them now.
+            if self.tag != tag:
+                self.report("client arriving for the " \
+                                                "wrong barrier")
+                client.settimeout(5)
+                client.send("!tag")
+                client.close()
+                return
+            elif name in self.waiting:
+                self.report("duplicate client")
+                client.settimeout(5)
+                client.send("!dup")
+                client.close()
+                return
 
-		except socket.timeout:
-			# This is nominally an error, but as we do not know
-			# who that was we cannot do anything sane other
-			# than report it and let the normal timeout kill
-			# us when thats appropriate.
-			self.report("client handshake timeout: (%s:%d)" %\
-				(addr[0], addr[1]))
-			client.close()
-			return
+            # Acknowledge the client
+            client.send("wait")
 
-		self.report("client now waiting: %s (%s:%d)" % \
-						(name, addr[0], addr[1]))
+        except socket.timeout:
+            # This is nominally an error, but as we do not know
+            # who that was we cannot do anything sane other
+            # than report it and let the normal timeout kill
+            # us when thats appropriate.
+            self.report("client handshake timeout: (%s:%d)" %\
+                    (addr[0], addr[1]))
+            client.close()
+            return
 
-		# They seem to be valid record them.
-		self.waiting[name] = connection
-		self.seen += 1
+        self.report("client now waiting: %s (%s:%d)" % \
+                                        (name, addr[0], addr[1]))
 
+        # They seem to be valid record them.
+        self.waiting[name] = connection
+        self.seen += 1
 
-	def slave_hello(self, connection):
-		(client, addr) = connection
-		name = None
 
-		client.settimeout(5)
-		try:
-			client.send(self.tag + " " + self.hostid)
+    def slave_hello(self, connection):
+        (client, addr) = connection
+        name = None
 
-			reply = client.recv(4)
-			reply = reply.strip("\r\n")
-			self.report("master said: " + reply)
+        client.settimeout(5)
+        try:
+            client.send(self.tag + " " + self.hostid)
 
-			# Confirm the master accepted the connection.
-			if reply != "wait":
-				self.report("Bad connection request to master")
-				client.close()
-				return
+            reply = client.recv(4)
+            reply = reply.strip("\r\n")
+            self.report("master said: " + reply)
 
-		except socket.timeout:
-			# This is nominally an error, but as we do not know
-			# who that was we cannot do anything sane other
-			# than report it and let the normal timeout kill
-			# us when thats appropriate.
-			self.report("master handshake timeout: (%s:%d)" %\
-				(addr[0], addr[1]))
-			client.close()
-			return
+            # Confirm the master accepted the connection.
+            if reply != "wait":
+                self.report("Bad connection request to master")
+                client.close()
+                return
 
-		self.report("slave now waiting: (%s:%d)" % \
-						(addr[0], addr[1]))
+        except socket.timeout:
+            # This is nominally an error, but as we do not know
+            # who that was we cannot do anything sane other
+            # than report it and let the normal timeout kill
+            # us when thats appropriate.
+            self.report("master handshake timeout: (%s:%d)" %\
+                    (addr[0], addr[1]))
+            client.close()
+            return
 
-		# They seem to be valid record them.
-		self.waiting[self.hostid] = connection
-		self.seen = 1
+        self.report("slave now waiting: (%s:%d)" % \
+                                        (addr[0], addr[1]))
 
+        # They seem to be valid record them.
+        self.waiting[self.hostid] = connection
+        self.seen = 1
 
-	def master_release(self):
-		# Check everyone is still there, that they have not
-		# crashed or disconnected in the meantime.
-		allpresent = 1
-		for name in self.waiting:
-			(client, addr) = self.waiting[name]
 
-			self.report("checking client present: " + name)
+    def master_release(self):
+        # Check everyone is still there, that they have not
+        # crashed or disconnected in the meantime.
+        allpresent = 1
+        for name in self.waiting:
+            (client, addr) = self.waiting[name]
 
-			client.settimeout(5)
-			reply = 'none'
-			try:
-				client.send("ping")
-				reply = client.recv(1024)
-			except socket.timeout:
-				self.report("ping/pong timeout: " + name)
-				pass
+            self.report("checking client present: " + name)
+
+            client.settimeout(5)
+            reply = 'none'
+            try:
+                client.send("ping")
+                reply = client.recv(1024)
+            except socket.timeout:
+                self.report("ping/pong timeout: " + name)
+                pass
+
+            if reply != "pong":
+                allpresent = 0
+
+        if not allpresent:
+            raise BarrierError("master lost client")
 
-			if reply != "pong":
-				allpresent = 0
+        # If every ones checks in then commit the release.
+        for name in self.waiting:
+            (client, addr) = self.waiting[name]
 
-		if not allpresent:
-			raise BarrierError("master lost client")
-			
-		# If every ones checks in then commit the release.
-		for name in self.waiting:
-			(client, addr) = self.waiting[name]
+            self.report("releasing client: " + name)
 
-			self.report("releasing client: " + name)
+            client.settimeout(5)
+            try:
+                client.send("rlse")
+            except socket.timeout:
+                self.report("release timeout: " + name)
+                pass
 
-			client.settimeout(5)
-			try:
-				client.send("rlse")
-			except socket.timeout:
-				self.report("release timeout: " + name)
-				pass
 
+    def waiting_close(self):
+        # Either way, close out all the clients.  If we have
+        # not released them then they know to abort.
+        for name in self.waiting:
+            (client, addr) = self.waiting[name]
 
-	def waiting_close(self):
-		# Either way, close out all the clients.  If we have
-		# not released them then they know to abort.
-		for name in self.waiting:
-			(client, addr) = self.waiting[name]
+            self.report("closing client: " + name)
 
-			self.report("closing client: " + name)
-	
-			try:
-				client.close()
-			except:
-				pass
+            try:
+                client.close()
+            except:
+                pass
 
 
-	def run_server(self, is_master):
-		self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-		self.server.setsockopt(socket.SOL_SOCKET,
-							socket.SO_REUSEADDR, 1)
-		self.server.bind(('', self.port))
-		self.server.listen(10)
+    def run_server(self, is_master):
+        self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.server.setsockopt(socket.SOL_SOCKET,
+                                                socket.SO_REUSEADDR, 1)
+        self.server.bind(('', self.port))
+        self.server.listen(10)
 
-		failed = 0
-		try:
-			while 1:
-				try:
-					# Wait for callers welcoming each.
-					self.server.settimeout(self.remaining())
-					connection = self.server.accept()
-					if is_master:
-						self.master_welcome(connection)
-					else:
-						self.slave_hello(connection)
-				except socket.timeout:
-					self.report("timeout waiting for " +
-						"remaining clients")
-					pass
+        failed = 0
+        try:
+            while 1:
+                try:
+                    # Wait for callers welcoming each.
+                    self.server.settimeout(self.remaining())
+                    connection = self.server.accept()
+                    if is_master:
+                        self.master_welcome(connection)
+                    else:
+                        self.slave_hello(connection)
+                except socket.timeout:
+                    self.report("timeout waiting for " +
+                            "remaining clients")
+                    pass
 
-				if is_master:
-					# Check if everyone is here.
-					self.report("master seen %d of %d" % \
-						(self.seen, len(self.members)))
-					if self.seen == len(self.members):
-						self.master_release()
-						break
-				else:
-					# Check if master connected.
-					if self.seen:
-						self.report("slave connected " +
-							"to master")
-						self.slave_wait()
-						break
+                if is_master:
+                    # Check if everyone is here.
+                    self.report("master seen %d of %d" % \
+                            (self.seen, len(self.members)))
+                    if self.seen == len(self.members):
+                        self.master_release()
+                        break
+                else:
+                    # Check if master connected.
+                    if self.seen:
+                        self.report("slave connected " +
+                                "to master")
+                        self.slave_wait()
+                        break
 
-			self.waiting_close()
-			self.server.close()
-		except:
-			self.waiting_close()
-			self.server.close()
-			raise
+            self.waiting_close()
+            self.server.close()
+        except:
+            self.waiting_close()
+            self.server.close()
+            raise
 
 
-	def run_client(self, is_master):
-		while self.remaining() > 0:
-			try:
-				remote = socket.socket(socket.AF_INET,
-					socket.SOCK_STREAM)
-				remote.settimeout(30)
-				if is_master:
-					# Connect to all slaves.
-					host = self.get_host_from_id(
-						self.members[self.seen])
-					self.report("calling slave: %s" % host)
-					connection = (remote, (host, self.port))
-					remote.connect(connection[1])
-					self.master_welcome(connection)
-				else:
-					# Just connect to the master.
-					host = self.get_host_from_id(
-						self.masterid)
-					self.report("calling master")
-					connection = (remote, (host, self.port))
-					remote.connect(connection[1])
-					self.slave_hello(connection)
-			except socket.timeout:
-				self.report("timeout calling host, retry")
-				sleep(10)
-				pass
-			except socket.error, err:
-				(code, str) = err
-				if (code != errno.ECONNREFUSED):
-					raise
-				sleep(10)
+    def run_client(self, is_master):
+        while self.remaining() > 0:
+            try:
+                remote = socket.socket(socket.AF_INET,
+                        socket.SOCK_STREAM)
+                remote.settimeout(30)
+                if is_master:
+                    # Connect to all slaves.
+                    host = self.get_host_from_id(
+                            self.members[self.seen])
+                    self.report("calling slave: %s" % host)
+                    connection = (remote, (host, self.port))
+                    remote.connect(connection[1])
+                    self.master_welcome(connection)
+                else:
+                    # Just connect to the master.
+                    host = self.get_host_from_id(
+                            self.masterid)
+                    self.report("calling master")
+                    connection = (remote, (host, self.port))
+                    remote.connect(connection[1])
+                    self.slave_hello(connection)
+            except socket.timeout:
+                self.report("timeout calling host, retry")
+                sleep(10)
+                pass
+            except socket.error, err:
+                (code, str) = err
+                if (code != errno.ECONNREFUSED):
+                    raise
+                sleep(10)
 
-			if is_master:
-				# Check if everyone is here.
-				self.report("master seen %d of %d" % \
-					(self.seen, len(self.members)))
-				if self.seen == len(self.members):
-					self.master_release()
-					break
-			else:
-				# Check if master connected.
-				if self.seen:
-					self.report("slave connected " +
-						"to master")
-					self.slave_wait()
-					break
+            if is_master:
+                # Check if everyone is here.
+                self.report("master seen %d of %d" % \
+                        (self.seen, len(self.members)))
+                if self.seen == len(self.members):
+                    self.master_release()
+                    break
+            else:
+                # Check if master connected.
+                if self.seen:
+                    self.report("slave connected " +
+                            "to master")
+                    self.slave_wait()
+                    break
 
-		self.waiting_close()
+        self.waiting_close()
 
 
-	def slave_wait(self):
-		remote = self.waiting[self.hostid][0]
-		mode = "wait"
-		while 1:
-			# All control messages are the same size to allow
-			# us to split individual messages easily.
-			remote.settimeout(self.remaining())
-			reply = remote.recv(4)
-			if not reply:
-				break
+    def slave_wait(self):
+        remote = self.waiting[self.hostid][0]
+        mode = "wait"
+        while 1:
+            # All control messages are the same size to allow
+            # us to split individual messages easily.
+            remote.settimeout(self.remaining())
+            reply = remote.recv(4)
+            if not reply:
+                break
 
-			reply = reply.strip("\r\n")
-			self.report("master said: " + reply)
+            reply = reply.strip("\r\n")
+            self.report("master said: " + reply)
 
-			mode = reply
-			if reply == "ping":
-				# Ensure we have sufficient time for the
-				# ping/pong/rlse cyle to complete normally.
-				self.update_timeout(10 + 10 * len(self.members))
+            mode = reply
+            if reply == "ping":
+                # Ensure we have sufficient time for the
+                # ping/pong/rlse cyle to complete normally.
+                self.update_timeout(10 + 10 * len(self.members))
 
-				self.report("pong")
-				remote.settimeout(self.remaining())
-				remote.send("pong")
+                self.report("pong")
+                remote.settimeout(self.remaining())
+                remote.send("pong")
 
-			elif reply == "rlse":
-				# Ensure we have sufficient time for the
-				# ping/pong/rlse cyle to complete normally.
-				self.update_timeout(10 + 10 * len(self.members))
+            elif reply == "rlse":
+                # Ensure we have sufficient time for the
+                # ping/pong/rlse cyle to complete normally.
+                self.update_timeout(10 + 10 * len(self.members))
 
-				self.report("was released, waiting for close")
+                self.report("was released, waiting for close")
 
-		if mode == "rlse":
-			pass
-		elif mode == "wait":
-			raise BarrierError("master abort -- barrier timeout")
-		elif mode == "ping":
-			raise BarrierError("master abort -- client lost")
-		elif mode == "!tag":
-			raise BarrierError("master abort -- incorrect tag")
-		elif mode == "!dup":
-			raise BarrierError("master abort -- duplicate client")
-		else:
-			raise BarrierError("master handshake failure: " + mode)
+        if mode == "rlse":
+            pass
+        elif mode == "wait":
+            raise BarrierError("master abort -- barrier timeout")
+        elif mode == "ping":
+            raise BarrierError("master abort -- client lost")
+        elif mode == "!tag":
+            raise BarrierError("master abort -- incorrect tag")
+        elif mode == "!dup":
+            raise BarrierError("master abort -- duplicate client")
+        else:
+            raise BarrierError("master handshake failure: " + mode)
 
 
-	def rendevous(self, *hosts):
-		self.start = time()
-		self.members = list(hosts)
-		self.members.sort()
-		self.masterid = self.members.pop(0)
+    def rendevous(self, *hosts):
+        self.start = time()
+        self.members = list(hosts)
+        self.members.sort()
+        self.masterid = self.members.pop(0)
 
-		self.report("masterid: %s" % self.masterid)
-		if not len(self.members):
-			self.report("No other members listed.")
-			return
-		self.report("members: %s" % ",".join(self.members))
+        self.report("masterid: %s" % self.masterid)
+        if not len(self.members):
+            self.report("No other members listed.")
+            return
+        self.report("members: %s" % ",".join(self.members))
 
-		self.seen = 0
-		self.waiting = {}
+        self.seen = 0
+        self.waiting = {}
 
-		# Figure out who is the master in this barrier.
-		if self.hostid == self.masterid:
-			self.report("selected as master")
-			self.run_server(is_master=True)
-		else:
-			self.report("selected as slave")
-			self.run_client(is_master=False)
+        # Figure out who is the master in this barrier.
+        if self.hostid == self.masterid:
+            self.report("selected as master")
+            self.run_server(is_master=True)
+        else:
+            self.report("selected as slave")
+            self.run_client(is_master=False)
 
 
-	def rendevous_servers(self, masterid, *hosts):
-		self.start = time()
-		self.members = list(hosts)
-		self.members.sort()
-		self.masterid = masterid
+    def rendevous_servers(self, masterid, *hosts):
+        self.start = time()
+        self.members = list(hosts)
+        self.members.sort()
+        self.masterid = masterid
 
-		self.report("masterid: %s" % self.masterid)
-		if not len(self.members):
-			self.report("No other members listed.")
-			return
-		self.report("members: %s" % ",".join(self.members))
+        self.report("masterid: %s" % self.masterid)
+        if not len(self.members):
+            self.report("No other members listed.")
+            return
+        self.report("members: %s" % ",".join(self.members))
 
-		self.seen = 0
-		self.waiting = {}
+        self.seen = 0
+        self.waiting = {}
 
-		# Figure out who is the master in this barrier.
-		if self.hostid == self.masterid:
-			self.report("selected as master")
-			self.run_client(is_master=True)
-		else:
-			self.report("selected as slave")
-			self.run_server(is_master=False)
+        # Figure out who is the master in this barrier.
+        if self.hostid == self.masterid:
+            self.report("selected as master")
+            self.run_client(is_master=True)
+        else:
+            self.report("selected as slave")
+            self.run_server(is_master=False)
 
 #
 # TESTING -- direct test harness.
@@ -472,28 +472,28 @@
 #   python bin/barrier.py 3 meeting
 #
 if __name__ == "__main__":
-	barrier = barrier('127.0.0.1#' + sys.argv[1], sys.argv[2], 60)
+    barrier = barrier('127.0.0.1#' + sys.argv[1], sys.argv[2], 60)
 
-	try:
-		all = [ '127.0.0.1#2', '127.0.0.1#1', '127.0.0.1#3' ]
-		barrier.rendevous(*all)
-	except BarrierError, err:
-		print "barrier: 127.0.0.1#" + sys.argv[1] + \
-						": barrier failed:", err
-		sys.exit(1)
-	else:
-		print "barrier: 127.0.0.1#" + sys.argv[1] + \
-					": all present and accounted for"
+    try:
+        all = [ '127.0.0.1#2', '127.0.0.1#1', '127.0.0.1#3' ]
+        barrier.rendevous(*all)
+    except BarrierError, err:
+        print "barrier: 127.0.0.1#" + sys.argv[1] + \
+                                        ": barrier failed:", err
+        sys.exit(1)
+    else:
+        print "barrier: 127.0.0.1#" + sys.argv[1] + \
+                                ": all present and accounted for"
 
-	try:
-		all = [ '127.0.0.1#2', '127.0.0.1#1' ]
-		if 1 <= int(sys.argv[1]) <= 2:
-			barrier.rendevous_servers(*all)
-	except BarrierError, err:
-		print "barrier: 127.0.0.1#" + sys.argv[1] + \
-						": barrier failed:", err
-		sys.exit(1)
-	else:
-		print "barrier: 127.0.0.1#" + sys.argv[1] + \
-					": all present and accounted for"
-		sys.exit(0)
+    try:
+        all = [ '127.0.0.1#2', '127.0.0.1#1' ]
+        if 1 <= int(sys.argv[1]) <= 2:
+            barrier.rendevous_servers(*all)
+    except BarrierError, err:
+        print "barrier: 127.0.0.1#" + sys.argv[1] + \
+                                        ": barrier failed:", err
+        sys.exit(1)
+    else:
+        print "barrier: 127.0.0.1#" + sys.argv[1] + \
+                                ": all present and accounted for"
+        sys.exit(0)
diff --git a/client/common_lib/check_version.py b/client/common_lib/check_version.py
index 981a21c..6971049 100755
--- a/client/common_lib/check_version.py
+++ b/client/common_lib/check_version.py
@@ -2,41 +2,41 @@
 
 
 def extract_version(path):
-	match = re.search(r'/python(\d+)\.(\d+)$', path)
-	if match:
-		return (int(match.group(1)), int(match.group(2)))
-	else:
-		return None
+    match = re.search(r'/python(\d+)\.(\d+)$', path)
+    if match:
+        return (int(match.group(1)), int(match.group(2)))
+    else:
+        return None
 
 
 def find_newest_python():
-	pythons = []
-	pythons.extend(glob.glob('/usr/bin/python*'))
-	pythons.extend(glob.glob('/usr/local/bin/python*'))
+    pythons = []
+    pythons.extend(glob.glob('/usr/bin/python*'))
+    pythons.extend(glob.glob('/usr/local/bin/python*'))
 
-	best_python = (0, 0), ''
-	for python in pythons:
-		version = extract_version(python)
-		if version > best_python[0] and version >= (2, 4):
-			best_python = version, python
+    best_python = (0, 0), ''
+    for python in pythons:
+        version = extract_version(python)
+        if version > best_python[0] and version >= (2, 4):
+            best_python = version, python
 
-	if best_python[0] == (0, 0):
-		raise ValueError('Python 2.4 or newer is needed')
-	return best_python[1]
-	
+    if best_python[0] == (0, 0):
+        raise ValueError('Python 2.4 or newer is needed')
+    return best_python[1]
+
 
 def restart():
-	python = find_newest_python()
-	sys.argv.insert(0, '-u')
-	sys.argv.insert(0, python)
-	os.execv(sys.argv[0], sys.argv)
+    python = find_newest_python()
+    sys.argv.insert(0, '-u')
+    sys.argv.insert(0, python)
+    os.execv(sys.argv[0], sys.argv)
 
 
 def check_python_version():
-	version = None
-	try:
-		version = sys.version_info[0:2]
-	except AttributeError:
-		pass # pre 2.0, no neat way to get the exact number
-	if not version or version < (2, 4):
-		restart()
+    version = None
+    try:
+        version = sys.version_info[0:2]
+    except AttributeError:
+        pass # pre 2.0, no neat way to get the exact number
+    if not version or version < (2, 4):
+        restart()
diff --git a/client/common_lib/error.py b/client/common_lib/error.py
index 6eeb3fe..1fe3370 100644
--- a/client/common_lib/error.py
+++ b/client/common_lib/error.py
@@ -6,137 +6,137 @@
 from traceback import format_exception
 
 def format_error():
-	t, o, tb = sys.exc_info()
-	trace = format_exception(t, o, tb)
-	# Clear the backtrace to prevent a circular reference
-	# in the heap -- as per tutorial
-	tb = ''
+    t, o, tb = sys.exc_info()
+    trace = format_exception(t, o, tb)
+    # Clear the backtrace to prevent a circular reference
+    # in the heap -- as per tutorial
+    tb = ''
 
-	return ''.join(trace)
+    return ''.join(trace)
 
 
 class JobContinue(SystemExit):
-	"""Allow us to bail out requesting continuance."""
-	pass
+    """Allow us to bail out requesting continuance."""
+    pass
 
 
 class JobComplete(SystemExit):
-	"""Allow us to bail out indicating continuation not required."""
-	pass
+    """Allow us to bail out indicating continuation not required."""
+    pass
 
 
 class AutotestError(Exception):
-	"""The parent of all errors deliberatly thrown within the client code."""
-	pass
+    """The parent of all errors deliberatly thrown within the client code."""
+    pass
 
 
 class JobError(AutotestError):
-	"""Indicates an error which terminates and fails the whole job."""
-	pass
+    """Indicates an error which terminates and fails the whole job."""
+    pass
 
 
 class TestError(AutotestError):
-	"""Indicates an error which terminates and fails the test."""
-	pass
+    """Indicates an error which terminates and fails the test."""
+    pass
 
 
 class TestNAError(AutotestError):
-	"""Indictates that the test is Not Applicable.  Should be thrown
-	when various conditions are such that the test is inappropriate"""
-	pass
+    """Indictates that the test is Not Applicable.  Should be thrown
+    when various conditions are such that the test is inappropriate"""
+    pass
 
 
 class CmdError(TestError):
-	"""\
-	Indicates that a command failed, is fatal to the test unless caught.
-	"""
-	def __init__(self, command, result_obj, additional_text=None):
-		TestError.__init__(self, command, result_obj, additional_text)
+    """\
+    Indicates that a command failed, is fatal to the test unless caught.
+    """
+    def __init__(self, command, result_obj, additional_text=None):
+        TestError.__init__(self, command, result_obj, additional_text)
 
 
-	def __str__(self):
-		msg = "Command <%s> failed, rc=%d" % (self.args[0],
-						      self.args[1].exit_status)
-		if self.args[2]:
-			msg += ", " + self.args[2]
-		return msg
+    def __str__(self):
+        msg = "Command <%s> failed, rc=%d" % (self.args[0],
+                                              self.args[1].exit_status)
+        if self.args[2]:
+            msg += ", " + self.args[2]
+        return msg
 
 
 class PackageError(TestError):
-	"""Indicates an error trying to perform a package operation."""
-	pass
+    """Indicates an error trying to perform a package operation."""
+    pass
 
 
 class UnhandledError(TestError):
-	"""Indicates an unhandled exception in a test."""
-	def __init__(self, prefix):
-		msg = prefix + format_error()
-		TestError.__init__(self, msg)
+    """Indicates an unhandled exception in a test."""
+    def __init__(self, prefix):
+        msg = prefix + format_error()
+        TestError.__init__(self, msg)
 
 
 class InstallError(JobError):
-	"""Indicates an installation error which Terminates and fails the job."""
-	pass
+    """Indicates an installation error which Terminates and fails the job."""
+    pass
 
 
 class AutotestRunError(AutotestError):
-	pass
+    pass
 
 
 class AutotestTimeoutError(AutotestError):
-	"""This exception is raised when an autotest test exceeds the timeout
-	parameter passed to run_timed_test and is killed.
-	"""
+    """This exception is raised when an autotest test exceeds the timeout
+    parameter passed to run_timed_test and is killed.
+    """
 
 
 # server-specific errors
 
 class AutoservError(Exception):
-	pass
+    pass
 
 
 class AutoservSSHTimeout(AutoservError):
-	"""SSH experienced a connection timeout"""
-	pass
+    """SSH experienced a connection timeout"""
+    pass
 
 
 class AutoservRunError(AutoservError):
-	"""\
-	Errors raised by one of the run functions.  Should always be
-	constructed with a tuple of two args (error description (str),
-	run result object).
-	"""
-	def __init__(self, description, result_obj):
-		AutoservError.__init__(self, description, result_obj)
+    """\
+    Errors raised by one of the run functions.  Should always be
+    constructed with a tuple of two args (error description (str),
+    run result object).
+    """
+    def __init__(self, description, result_obj):
+        AutoservError.__init__(self, description, result_obj)
 
 
 class AutoservVirtError(AutoservError):
-	"""Vitualization related error"""
-	pass
+    """Vitualization related error"""
+    pass
 
 
 class AutoservUnsupportedError(AutoservError):
-	"""Error raised when you try to use an unsupported optional feature"""
-	pass
+    """Error raised when you try to use an unsupported optional feature"""
+    pass
 
 
 class AutoservHostError(AutoservError):
-	"""Error reaching a host"""
-	pass
+    """Error reaching a host"""
+    pass
 
 
 class AutoservRebootError(AutoservError):
-	"""Error occured while rebooting a machine"""
-	pass
+    """Error occured while rebooting a machine"""
+    pass
 
 
 class AutoservSubcommandError(AutoservError):
-	"""Indicates an error while executing a (forked) subcommand"""
-	def __init__(self, func, exit_code):
-		AutoservError.__init__(self, func, exit_code)
-		self.func = func
-		self.exit_code = exit_code
+    """Indicates an error while executing a (forked) subcommand"""
+    def __init__(self, func, exit_code):
+        AutoservError.__init__(self, func, exit_code)
+        self.func = func
+        self.exit_code = exit_code
 
-	def __str__(self):
-		return ("Subcommand %s failed with exit code %d" %
-			(self.func, self.exit_code))
+    def __str__(self):
+        return ("Subcommand %s failed with exit code %d" %
+                (self.func, self.exit_code))
diff --git a/client/common_lib/global_config.py b/client/common_lib/global_config.py
index 1036e3d..8db02e1 100644
--- a/client/common_lib/global_config.py
+++ b/client/common_lib/global_config.py
@@ -11,115 +11,115 @@
 import error
 
 dirname = os.path.dirname(sys.modules[__name__].__file__)
-DEFAULT_CONFIG_FILE = os.path.abspath(os.path.join(dirname, 
-						"../../global_config.ini"))
-DEFAULT_SHADOW_FILE = os.path.abspath(os.path.join(dirname, 
-						"../../shadow_config.ini"))
-						
+DEFAULT_CONFIG_FILE = os.path.abspath(os.path.join(dirname,
+                                                "../../global_config.ini"))
+DEFAULT_SHADOW_FILE = os.path.abspath(os.path.join(dirname,
+                                                "../../shadow_config.ini"))
+
 
 class ConfigError(error.AutotestError):
-	pass
+    pass
 
 
 class ConfigValueError(ConfigError):
-	pass
+    pass
 
 
 class global_config(object):
-	config = None
-	config_file = DEFAULT_CONFIG_FILE
-	shadow_file = DEFAULT_SHADOW_FILE
-	
-	
-	def set_config_files(self, config_file=DEFAULT_CONFIG_FILE, 
-				shadow_file=DEFAULT_SHADOW_FILE):
-		self.config_file = config_file
-		self.shadow_file = shadow_file
-		self.config = None
+    config = None
+    config_file = DEFAULT_CONFIG_FILE
+    shadow_file = DEFAULT_SHADOW_FILE
 
 
-	def get_config_value(self, section, key, type=str, default=None):
-	        if self.config == None:
-	        	self.parse_config_file()
-	        	
-	        try:
-                	val = self.config.get(section, key)
-                except:
-                        if default == None:
-                                msg = ("Value '%s' not found in section '%s'" %
-                                      (key, section))
-                                raise ConfigError(msg)
-                        else:
-                                return default
-
-		return self.convert_value(key, section, val, type, default)
+    def set_config_files(self, config_file=DEFAULT_CONFIG_FILE,
+                            shadow_file=DEFAULT_SHADOW_FILE):
+        self.config_file = config_file
+        self.shadow_file = shadow_file
+        self.config = None
 
 
-	def merge_configs(self, shadow_config):
-		# overwrite whats in config with whats in shadow_config
-		sections = shadow_config.sections()
-		for section in sections:
-			# add the section if need be
-			if not self.config.has_section(section):
-				self.config.add_section(section)
-			# now run through all options and set them
-			options = shadow_config.options(section)
-			for option in options:
-				val = shadow_config.get(section, option)
-				self.config.set(section, option, val)
+    def get_config_value(self, section, key, type=str, default=None):
+        if self.config == None:
+            self.parse_config_file()
+
+        try:
+            val = self.config.get(section, key)
+        except:
+            if default == None:
+                msg = ("Value '%s' not found in section '%s'" %
+                      (key, section))
+                raise ConfigError(msg)
+            else:
+                return default
+
+        return self.convert_value(key, section, val, type, default)
 
 
-	def parse_config_file(self):
-		if not os.path.exists(self.config_file):
-			raise ConfigError('%s not found' % (self.config_file))
-		self.config = ConfigParser.ConfigParser()
-		self.config.read(self.config_file)
+    def merge_configs(self, shadow_config):
+        # overwrite whats in config with whats in shadow_config
+        sections = shadow_config.sections()
+        for section in sections:
+            # add the section if need be
+            if not self.config.has_section(section):
+                self.config.add_section(section)
+            # now run through all options and set them
+            options = shadow_config.options(section)
+            for option in options:
+                val = shadow_config.get(section, option)
+                self.config.set(section, option, val)
 
-		# now also read the shadow file if there is one
-		# this will overwrite anything that is found in the 
-		# other config
-		if os.path.exists(self.shadow_file):
-			shadow_config = ConfigParser.ConfigParser()
-			shadow_config.read(self.shadow_file)
-			# now we merge shadow into global
-			self.merge_configs(shadow_config)
-			
-	
-	# the values that are pulled from ini
-	# are strings.  But we should attempt to
-	# convert them to other types if needed.
-	def convert_value(self, key, section, value, type, default):
-		# strip off leading and trailing white space
-		sval = value.strip()
-		
-		# if length of string is zero then return None
-		if len(sval) == 0:
-			if type == str:
-				return ""
-			elif type == bool:
-				return False
-			elif type == int:
-				return 0
-			elif type == float:
-				return 0.0
-			else:
-				return None
-		
-		if type == bool:
-			if sval.lower() == "false":
-				return False
-			else:
-				return True
-		
-		try:
-			conv_val = type(sval)
-			return conv_val
-		except:
-			msg = ("Could not covert %s in section %s" % 
-				(key, section))
-			raise ConfigValueError(msg)
 
-		
-# insure the class is a singleton.  Now the symbol global_config 
+    def parse_config_file(self):
+        if not os.path.exists(self.config_file):
+            raise ConfigError('%s not found' % (self.config_file))
+        self.config = ConfigParser.ConfigParser()
+        self.config.read(self.config_file)
+
+        # now also read the shadow file if there is one
+        # this will overwrite anything that is found in the
+        # other config
+        if os.path.exists(self.shadow_file):
+            shadow_config = ConfigParser.ConfigParser()
+            shadow_config.read(self.shadow_file)
+            # now we merge shadow into global
+            self.merge_configs(shadow_config)
+
+
+    # the values that are pulled from ini
+    # are strings.  But we should attempt to
+    # convert them to other types if needed.
+    def convert_value(self, key, section, value, type, default):
+        # strip off leading and trailing white space
+        sval = value.strip()
+
+        # if length of string is zero then return None
+        if len(sval) == 0:
+            if type == str:
+                return ""
+            elif type == bool:
+                return False
+            elif type == int:
+                return 0
+            elif type == float:
+                return 0.0
+            else:
+                return None
+
+        if type == bool:
+            if sval.lower() == "false":
+                return False
+            else:
+                return True
+
+        try:
+            conv_val = type(sval)
+            return conv_val
+        except:
+            msg = ("Could not covert %s in section %s" %
+                    (key, section))
+            raise ConfigValueError(msg)
+
+
+# insure the class is a singleton.  Now the symbol global_config
 # will point to the one and only one instace of the class
 global_config = global_config()
diff --git a/client/common_lib/global_config_unittest.py b/client/common_lib/global_config_unittest.py
index e41d709..dab6825 100644
--- a/client/common_lib/global_config_unittest.py
+++ b/client/common_lib/global_config_unittest.py
@@ -30,90 +30,90 @@
 
 
 def create_config_files():
-	(fp, global_file) = tempfile.mkstemp(".ini", text=True)
-	os.write(fp, global_config_ini_contents)
-	os.close(fp)
-	
-	(fp, shadow_file) = tempfile.mkstemp(".ini", text=True)
-	os.write(fp, shadow_config_ini_contents)
-	os.close(fp)
-	
-	return (global_file, shadow_file)
+    (fp, global_file) = tempfile.mkstemp(".ini", text=True)
+    os.write(fp, global_config_ini_contents)
+    os.close(fp)
+
+    (fp, shadow_file) = tempfile.mkstemp(".ini", text=True)
+    os.write(fp, shadow_config_ini_contents)
+    os.close(fp)
+
+    return (global_file, shadow_file)
 
 
 class global_config_test(unittest.TestCase):
-	# grab the singelton
-	conf = global_config.global_config
-	
-	def setUp(self):
-		# set the config files to our test files
-		(self.global_file, self.shadow_file) = create_config_files()	
-		self.conf.set_config_files(self.global_file, self.shadow_file)
+    # grab the singelton
+    conf = global_config.global_config
 
-	
-	def tearDown(self):
-		os.remove(self.global_file)
-		os.remove(self.shadow_file)
-		self.conf.set_config_files(global_config.DEFAULT_CONFIG_FILE,
-					global_config.DEFAULT_SHADOW_FILE)
+    def setUp(self):
+        # set the config files to our test files
+        (self.global_file, self.shadow_file) = create_config_files()
+        self.conf.set_config_files(self.global_file, self.shadow_file)
 
 
-	def testFloat(self):
-		val = self.conf.get_config_value("SECTION_A", "value_1", float)
-		self.assertEquals(type(val), types.FloatType)
-		self.assertEquals(val, 6.0)
-
-		
-	def testInt(self):
-		val = self.conf.get_config_value("SECTION_B", "value_1", int)
-		self.assertEquals(type(val), types.IntType)
-		self.assertTrue(val < 0)
-		val = self.conf.get_config_value("SECTION_B", "value_3", int)
-		self.assertEquals(val, 0)
-		val = self.conf.get_config_value("SECTION_B", "value_4", int)
-		self.assertTrue(val > 0)
+    def tearDown(self):
+        os.remove(self.global_file)
+        os.remove(self.shadow_file)
+        self.conf.set_config_files(global_config.DEFAULT_CONFIG_FILE,
+                                global_config.DEFAULT_SHADOW_FILE)
 
 
-	def testString(self):
-		val = self.conf.get_config_value("SECTION_A", "value_2")
-		self.assertEquals(type(val),types.StringType)
-		self.assertEquals(val, "hello")
-
-	
-	def testOverride(self):
-		val = self.conf.get_config_value("SECTION_C", "value_1")
-		self.assertEquals(val, "somebody@remotehost")
+    def testFloat(self):
+        val = self.conf.get_config_value("SECTION_A", "value_1", float)
+        self.assertEquals(type(val), types.FloatType)
+        self.assertEquals(val, 6.0)
 
 
-	def testException(self):
-		error = 0
-		try:
-			val = self.conf.get_config_value("SECTION_B", 
-							"value_2", int)
-		except:
-			error = 1
-		self.assertEquals(error, 1)
+    def testInt(self):
+        val = self.conf.get_config_value("SECTION_B", "value_1", int)
+        self.assertEquals(type(val), types.IntType)
+        self.assertTrue(val < 0)
+        val = self.conf.get_config_value("SECTION_B", "value_3", int)
+        self.assertEquals(val, 0)
+        val = self.conf.get_config_value("SECTION_B", "value_4", int)
+        self.assertTrue(val > 0)
 
 
-	def testBoolean(self):
-		val = self.conf.get_config_value("SECTION_A", "value_3", bool)
-		self.assertEquals(val, True)
-		val = self.conf.get_config_value("SECTION_A", "value_4", bool)
-		self.assertEquals(val, False)
-		val = self.conf.get_config_value("SECTION_A", "value_5", bool)
-		self.assertEquals(val, True)
-		val = self.conf.get_config_value("SECTION_A", "value_6", bool)
-		self.assertEquals(val, False)
+    def testString(self):
+        val = self.conf.get_config_value("SECTION_A", "value_2")
+        self.assertEquals(type(val),types.StringType)
+        self.assertEquals(val, "hello")
 
 
-	def testDefaults(self):
-		val = self.conf.get_config_value("MISSING", "foo", float, 3.6)
-		self.assertEquals(val, 3.6)
-		val = self.conf.get_config_value("SECTION_A", "novalue", str, 
-							"default")
-		self.assertEquals(val, "default")
+    def testOverride(self):
+        val = self.conf.get_config_value("SECTION_C", "value_1")
+        self.assertEquals(val, "somebody@remotehost")
+
+
+    def testException(self):
+        error = 0
+        try:
+            val = self.conf.get_config_value("SECTION_B",
+                                            "value_2", int)
+        except:
+            error = 1
+        self.assertEquals(error, 1)
+
+
+    def testBoolean(self):
+        val = self.conf.get_config_value("SECTION_A", "value_3", bool)
+        self.assertEquals(val, True)
+        val = self.conf.get_config_value("SECTION_A", "value_4", bool)
+        self.assertEquals(val, False)
+        val = self.conf.get_config_value("SECTION_A", "value_5", bool)
+        self.assertEquals(val, True)
+        val = self.conf.get_config_value("SECTION_A", "value_6", bool)
+        self.assertEquals(val, False)
+
+
+    def testDefaults(self):
+        val = self.conf.get_config_value("MISSING", "foo", float, 3.6)
+        self.assertEquals(val, 3.6)
+        val = self.conf.get_config_value("SECTION_A", "novalue", str,
+                                                "default")
+        self.assertEquals(val, "default")
 
 
 # this is so the test can be run in standalone mode
 if __name__ == '__main__':
-	unittest.main()
+    unittest.main()
diff --git a/client/common_lib/logging.py b/client/common_lib/logging.py
index 004975a..64b8086 100644
--- a/client/common_lib/logging.py
+++ b/client/common_lib/logging.py
@@ -8,48 +8,48 @@
 
 
 job_statuses = ["TEST_NA", "ABORT", "ERROR", "FAIL", "WARN", "GOOD", "ALERT",
-		"NOSTATUS"]
+                "NOSTATUS"]
 
 def is_valid_status(status):
-	if not re.match(r'(START|(END )?(GOOD|WARN|FAIL|ABORT|TEST_NA))$', 
-			status):
-		return False
-	else:
-		return True
+    if not re.match(r'(START|(END )?(GOOD|WARN|FAIL|ABORT|TEST_NA))$',
+                    status):
+        return False
+    else:
+        return True
 
 
 def record(fn):
-	"""
-	Generic method decorator for logging calls under the
-	assumption that return=GOOD, exception=FAIL. The method
-	determines parameters as:
-		subdir = self.subdir if it exists, or None
-		operation = "class name"."method name"
-		status = None on GOOD, str(exception) on FAIL
-	The object using this method must have a job attribute
-	for the logging to actually occur, otherwise the logging
-	will silently fail.
+    """
+    Generic method decorator for logging calls under the
+    assumption that return=GOOD, exception=FAIL. The method
+    determines parameters as:
+            subdir = self.subdir if it exists, or None
+            operation = "class name"."method name"
+            status = None on GOOD, str(exception) on FAIL
+    The object using this method must have a job attribute
+    for the logging to actually occur, otherwise the logging
+    will silently fail.
 
-	Logging can explicitly be disabled for a call by passing
-	a logged=False parameter
-	"""
-	def recorded_func(self, *args, **dargs):
-		logged = dargs.pop('logged', True)
-		job = getattr(self, 'job', None)
-		# if logging is disabled/unavailable, just
-		# call the method
-		if not logged or job is None:
-			return fn(self, *args, **dargs)
-		# logging is available, so wrap the method call
-		# in success/failure logging
-		subdir = getattr(self, 'subdir', None)
-		operation = '%s.%s' % (self.__class__.__name__,
-				       fn.__name__)
-		try:
-			result = fn(self, *args, **dargs)
-			job.record('GOOD', subdir, operation)
-		except Exception, detail:
-			job.record('FAIL', subdir, operation, str(detail))
-			raise
-		return result
-	return recorded_func
+    Logging can explicitly be disabled for a call by passing
+    a logged=False parameter
+    """
+    def recorded_func(self, *args, **dargs):
+        logged = dargs.pop('logged', True)
+        job = getattr(self, 'job', None)
+        # if logging is disabled/unavailable, just
+        # call the method
+        if not logged or job is None:
+            return fn(self, *args, **dargs)
+        # logging is available, so wrap the method call
+        # in success/failure logging
+        subdir = getattr(self, 'subdir', None)
+        operation = '%s.%s' % (self.__class__.__name__,
+                               fn.__name__)
+        try:
+            result = fn(self, *args, **dargs)
+            job.record('GOOD', subdir, operation)
+        except Exception, detail:
+            job.record('FAIL', subdir, operation, str(detail))
+            raise
+        return result
+    return recorded_func
diff --git a/client/common_lib/mail.py b/client/common_lib/mail.py
index 697c209..0c641dd 100644
--- a/client/common_lib/mail.py
+++ b/client/common_lib/mail.py
@@ -1,39 +1,39 @@
-import os, email.Message, smtplib 
+import os, email.Message, smtplib
 
 
 def send(from_address, to_addresses, cc_addresses, subject, message_body):
-	"""
-	Send out a plain old text email. It uses sendmail by default, but
-	if that fails then it falls back to using smtplib.
+    """
+    Send out a plain old text email. It uses sendmail by default, but
+    if that fails then it falls back to using smtplib.
 
-	Args:
-		from_address: the email address to put in the "From:" field
-		to_addresses: either a single string or an iterable of
-		              strings to put in the "To:" field of the email
-		cc_addresses: either a single string of an iterable of
-		              strings to put in the "Cc:" field of the email
-		subject: the email subject
-		message_body: the body of the email. there's no special
-		              handling of encoding here, so it's safest to
-			      stick to 7-bit ASCII text
-	"""
-	# addresses can be a tuple or a single string, so make them tuples
-	if isinstance(to_addresses, str):
-		to_addresses = [to_addresses]
-	else:
-		to_addresses = list(to_addresses)
-	if isinstance(cc_addresses, str):
-		cc_addresses = [cc_addresses]
-	else:
-		cc_addresses = list(cc_addresses)
+    Args:
+            from_address: the email address to put in the "From:" field
+            to_addresses: either a single string or an iterable of
+                          strings to put in the "To:" field of the email
+            cc_addresses: either a single string of an iterable of
+                          strings to put in the "Cc:" field of the email
+            subject: the email subject
+            message_body: the body of the email. there's no special
+                          handling of encoding here, so it's safest to
+                          stick to 7-bit ASCII text
+    """
+    # addresses can be a tuple or a single string, so make them tuples
+    if isinstance(to_addresses, str):
+        to_addresses = [to_addresses]
+    else:
+        to_addresses = list(to_addresses)
+    if isinstance(cc_addresses, str):
+        cc_addresses = [cc_addresses]
+    else:
+        cc_addresses = list(cc_addresses)
 
-	message = email.Message.Message()
-	message["To"] = ", ".join(to_addresses)
-	message["Cc"] = ", ".join(cc_addresses)
-	message["From"] = from_address
-	message["Subject"] = subject
-	message.set_payload(message_body)
+    message = email.Message.Message()
+    message["To"] = ", ".join(to_addresses)
+    message["Cc"] = ", ".join(cc_addresses)
+    message["From"] = from_address
+    message["Subject"] = subject
+    message.set_payload(message_body)
 
-	server = smtplib.SMTP("localhost")
-	server.sendmail(from_address, to_addresses + cc_addresses, message.as_string())
-	server.quit()
+    server = smtplib.SMTP("localhost")
+    server.sendmail(from_address, to_addresses + cc_addresses, message.as_string())
+    server.quit()
diff --git a/client/common_lib/mail_unittest.py b/client/common_lib/mail_unittest.py
index 95de782..cac7ee6 100644
--- a/client/common_lib/mail_unittest.py
+++ b/client/common_lib/mail_unittest.py
@@ -4,66 +4,66 @@
 import mail, email.Message
 
 class test_data:
-	mail_host = None
-	mail_port = None
-	mail_connect = False
-	mail_from_address = None
-	mail_to_address = None
-	mail_message = None
+    mail_host = None
+    mail_port = None
+    mail_connect = False
+    mail_from_address = None
+    mail_to_address = None
+    mail_message = None
 
 
 # we define our needed mock SMTP
 class SMTP:
-	def __init__(self, host=None, port=25):
-		test_data.mail_host = host
-		test_data.mail_port = port
-		
-		if test_data.mail_host:
-			self.connect(test_data.mail_host, test_data.mail_port)
-		
-		
-	def connect(self, host, port):
-		test_data.mail_connect = True
+    def __init__(self, host=None, port=25):
+        test_data.mail_host = host
+        test_data.mail_port = port
+
+        if test_data.mail_host:
+            self.connect(test_data.mail_host, test_data.mail_port)
 
 
-	def quit(self):
-		test_data.mail_connect = False
+    def connect(self, host, port):
+        test_data.mail_connect = True
 
 
-	def sendmail(self, from_address, to_address, message):
-		test_data.mail_from_address = from_address
-		test_data.mail_to_address = to_address
-		test_data.mail_message = message
+    def quit(self):
+        test_data.mail_connect = False
+
+
+    def sendmail(self, from_address, to_address, message):
+        test_data.mail_from_address = from_address
+        test_data.mail_to_address = to_address
+        test_data.mail_message = message
 
 
 class mail_test(unittest.TestCase):
-	cached_SMTP = None
+    cached_SMTP = None
 
-	def setUp(self):
-		# now perform the slip
-		self.cached_SMTP = mail.smtplib.SMTP
-		mail.smtplib.SMTP = SMTP
-
-	
-	def tearDown(self):
-		# now put things back
-		mail.smtplib.SMTP = self.cached_SMTP
+    def setUp(self):
+        # now perform the slip
+        self.cached_SMTP = mail.smtplib.SMTP
+        mail.smtplib.SMTP = SMTP
 
 
-	def test_send_message(self):
-		message = email.Message.Message()
-		message["To"] = "you"
-		message["Cc"] = "them"
-		message["From"] = "me"
-		message["Subject"] = "hello"
-		message.set_payload("Hello everybody!")
-		
-		mail.send("me", "you", "them", "hello", "Hello everybody!")
-		self.assertEquals("me", test_data.mail_from_address)
-		self.assertEquals(["you","them"], test_data.mail_to_address)
-		self.assertEquals(message.as_string(), test_data.mail_message)
-		
+    def tearDown(self):
+        # now put things back
+        mail.smtplib.SMTP = self.cached_SMTP
+
+
+    def test_send_message(self):
+        message = email.Message.Message()
+        message["To"] = "you"
+        message["Cc"] = "them"
+        message["From"] = "me"
+        message["Subject"] = "hello"
+        message.set_payload("Hello everybody!")
+
+        mail.send("me", "you", "them", "hello", "Hello everybody!")
+        self.assertEquals("me", test_data.mail_from_address)
+        self.assertEquals(["you","them"], test_data.mail_to_address)
+        self.assertEquals(message.as_string(), test_data.mail_message)
+
 
 # this is so the test can be run in standalone mode
 if __name__ == '__main__':
-	unittest.main()
+    unittest.main()
diff --git a/client/common_lib/pexpect.py b/client/common_lib/pexpect.py
index 67c6389..260c1d8 100644
--- a/client/common_lib/pexpect.py
+++ b/client/common_lib/pexpect.py
@@ -336,12 +336,12 @@
         the input from the child and output sent to the child. Sometimes you
         don't want to see everything you write to the child. You only want to
         log what the child sends back. For example::
-        
+
             child = pexpect.spawn('some_command')
             child.logfile_read = sys.stdout
 
         To separately log output sent to the child use logfile_send::
-        
+
             self.logfile_send = fout
 
         The delaybeforesend helps overcome a weird behavior that many users
@@ -704,7 +704,7 @@
         if timeout == -1:
             timeout = self.timeout
         if timeout is not None:
-            end_time = time.time() + timeout 
+            end_time = time.time() + timeout
         while True:
             if not self.getecho():
                 return True
@@ -1355,7 +1355,7 @@
         if timeout == -1:
             timeout = self.timeout
         if timeout is not None:
-            end_time = time.time() + timeout 
+            end_time = time.time() + timeout
         if searchwindowsize == -1:
             searchwindowsize = self.searchwindowsize
 
@@ -1653,7 +1653,7 @@
         # rescanning until we've read three more bytes.
         #
         # Sadly, I don't know enough about this interesting topic. /grahn
-        
+
         for index, s in self._strings:
             if searchwindowsize is None:
                 # the match, if any, can only be in the fresh data,
@@ -1732,7 +1732,7 @@
         'buffer' which have not been searched before.
 
         See class spawn for the 'searchwindowsize' argument.
-        
+
         If there is a match this returns the index of that string, and sets
         'start', 'end' and 'match'. Otherwise, returns -1."""
 
diff --git a/client/common_lib/pxssh.py b/client/common_lib/pxssh.py
index f8add8d..1e5a6a4 100644
--- a/client/common_lib/pxssh.py
+++ b/client/common_lib/pxssh.py
@@ -30,10 +30,10 @@
     shells.
 
     Example that runs a few commands on a remote server and prints the result::
-        
+
         import pxssh
         import getpass
-        try:                                                            
+        try:
             s = pxssh.pxssh()
             hostname = raw_input('hostname: ')
             username = raw_input('username: ')
@@ -74,7 +74,7 @@
         spawn.__init__(self, None, timeout=timeout, maxread=maxread, searchwindowsize=searchwindowsize, logfile=logfile, cwd=cwd, env=env)
 
         self.name = '<pxssh>'
-        
+
         #SUBTLE HACK ALERT! Note that the command to set the prompt uses a
         #slightly different string than the regular expression to match it. This
         #is because when you set the prompt the command will echo back, but we
@@ -98,7 +98,7 @@
         # Unsetting SSH_ASKPASS on the remote side doesn't disable it! Annoying!
         #self.SSH_OPTS = "-x -o'RSAAuthentication=no' -o 'PubkeyAuthentication=no'"
         self.force_password = False
-        self.auto_prompt_reset = True 
+        self.auto_prompt_reset = True
 
     def levenshtein_distance(self, a,b):
 
@@ -173,7 +173,7 @@
         to guess when we have reached the prompt. Then we hope for the best and
         blindly try to reset the prompt to something more unique. If that fails
         then login() raises an ExceptionPxssh exception.
-        
+
         In some situations it is not possible or desirable to reset the
         original prompt. In this case, set 'auto_prompt_reset' to False to
         inhibit setting the prompt to the UNIQUE_PROMPT. Remember that pxssh
@@ -194,7 +194,7 @@
         i = self.expect(["(?i)are you sure you want to continue connecting", original_prompt, "(?i)(?:password)|(?:passphrase for key)", "(?i)permission denied", "(?i)terminal type", TIMEOUT, "(?i)connection closed by remote host"], timeout=login_timeout)
 
         # First phase
-        if i==0: 
+        if i==0:
             # New certificate -- always accept it.
             # This is what you get if SSH does not have the remote host's
             # public key stored in the 'known_hosts' cache.
@@ -212,14 +212,14 @@
             # This is weird. This should not happen twice in a row.
             self.close()
             raise ExceptionPxssh ('Weird error. Got "are you sure" prompt twice.')
-        elif i==1: # can occur if you have a public key pair set to authenticate. 
+        elif i==1: # can occur if you have a public key pair set to authenticate.
             ### TODO: May NOT be OK if expect() got tricked and matched a false prompt.
             pass
         elif i==2: # password prompt again
             # For incorrect passwords, some ssh servers will
             # ask for the password again, others return 'denied' right away.
             # If we get the password prompt again then this means
-            # we didn't get the password right the first time. 
+            # we didn't get the password right the first time.
             self.close()
             raise ExceptionPxssh ('password refused')
         elif i==3: # permission denied -- password was bad.
@@ -239,7 +239,7 @@
         elif i==6: # Connection closed by remote host
             self.close()
             raise ExceptionPxssh ('connection closed')
-        else: # Unexpected 
+        else: # Unexpected
             self.close()
             raise ExceptionPxssh ('unexpected login response')
         if not self.synch_original_prompt():
@@ -278,7 +278,7 @@
         if i==1:
             return False
         return True
-        
+
     def set_unique_prompt (self):
 
         """This sets the remote prompt to something more unique than # or $.
diff --git a/client/common_lib/test.py b/client/common_lib/test.py
index 52aaf54..dc79371 100644
--- a/client/common_lib/test.py
+++ b/client/common_lib/test.py
@@ -1,20 +1,20 @@
 # Shell class for a test, inherited by all individual tests
 #
 # Methods:
-#	__init__	initialise
-#	initialize	run once for each job
-#	setup		run once for each new version of the test installed
-#	run		run the test (wrapped by job.run_test())
+#       __init__        initialise
+#       initialize      run once for each job
+#       setup           run once for each new version of the test installed
+#       run             run the test (wrapped by job.run_test())
 #
 # Data:
-#	job		backreference to the job this test instance is part of
-#	outputdir	eg. results/<job>/<testname.tag>
-#	resultsdir	eg. results/<job>/<testname.tag>/results
-#	profdir		eg. results/<job>/<testname.tag>/profiling
-#	debugdir	eg. results/<job>/<testname.tag>/debug
-#	bindir		eg. tests/<test>
-#	src		eg. tests/<test>/src
-#	tmpdir		eg. tmp/<testname.tag>
+#       job             backreference to the job this test instance is part of
+#       outputdir       eg. results/<job>/<testname.tag>
+#       resultsdir      eg. results/<job>/<testname.tag>/results
+#       profdir         eg. results/<job>/<testname.tag>/profiling
+#       debugdir        eg. results/<job>/<testname.tag>/debug
+#       bindir          eg. tests/<test>
+#       src             eg. tests/<test>/src
+#       tmpdir          eg. tmp/<testname.tag>
 
 import os, sys, re, fcntl, shutil, tarfile, warnings
 
@@ -22,243 +22,243 @@
 
 
 class base_test:
-	preserve_srcdir = False
+    preserve_srcdir = False
 
-	def __init__(self, job, bindir, outputdir):
-		self.job = job
-		self.autodir = job.autodir
+    def __init__(self, job, bindir, outputdir):
+        self.job = job
+        self.autodir = job.autodir
 
-		self.outputdir = outputdir
-		tagged_testname = os.path.basename(self.outputdir)
-		self.resultsdir = os.path.join(self.outputdir, 'results')
-		os.mkdir(self.resultsdir)
-		self.profdir = os.path.join(self.outputdir, 'profiling')
-		os.mkdir(self.profdir)
-		self.debugdir = os.path.join(self.outputdir, 'debug')
-		os.mkdir(self.debugdir)
-		self.bindir = bindir
-		if hasattr(job, 'libdir'):
-			self.libdir = job.libdir
-		self.srcdir = os.path.join(self.bindir, 'src')
+        self.outputdir = outputdir
+        tagged_testname = os.path.basename(self.outputdir)
+        self.resultsdir = os.path.join(self.outputdir, 'results')
+        os.mkdir(self.resultsdir)
+        self.profdir = os.path.join(self.outputdir, 'profiling')
+        os.mkdir(self.profdir)
+        self.debugdir = os.path.join(self.outputdir, 'debug')
+        os.mkdir(self.debugdir)
+        self.bindir = bindir
+        if hasattr(job, 'libdir'):
+            self.libdir = job.libdir
+        self.srcdir = os.path.join(self.bindir, 'src')
 
-		self.tmpdir = os.path.join(job.tmpdir, tagged_testname)
+        self.tmpdir = os.path.join(job.tmpdir, tagged_testname)
 
-		if os.path.exists(self.tmpdir):
-			shutil.rmtree(self.tmpdir)
-		os.mkdir(self.tmpdir)
+        if os.path.exists(self.tmpdir):
+            shutil.rmtree(self.tmpdir)
+        os.mkdir(self.tmpdir)
 
-		self.job.stdout.tee_redirect(
-			os.path.join(self.debugdir, 'stdout'))
-		self.job.stderr.tee_redirect(
-			os.path.join(self.debugdir, 'stderr'))
-		try:
-			self.initialize()
-			# compile and install the test, if needed.
-			utils.update_version(self.srcdir, self.preserve_srcdir,
-					     self.version, self.setup)
-		finally:
-			self.job.stderr.restore()
-			self.job.stdout.restore()
+        self.job.stdout.tee_redirect(
+                os.path.join(self.debugdir, 'stdout'))
+        self.job.stderr.tee_redirect(
+                os.path.join(self.debugdir, 'stderr'))
+        try:
+            self.initialize()
+            # compile and install the test, if needed.
+            utils.update_version(self.srcdir, self.preserve_srcdir,
+                                 self.version, self.setup)
+        finally:
+            self.job.stderr.restore()
+            self.job.stdout.restore()
 
 
-	def assert_(self, expr, msg='Assertion failed.'):
-		if not expr:
-			raise error.TestError(msg)
+    def assert_(self, expr, msg='Assertion failed.'):
+        if not expr:
+            raise error.TestError(msg)
 
 
-	def write_test_keyval(self, attr_dict):
-		utils.write_keyval(self.outputdir, attr_dict)
+    def write_test_keyval(self, attr_dict):
+        utils.write_keyval(self.outputdir, attr_dict)
 
 
-	@staticmethod
-	def _append_type_to_keys(dictionary, typename):
-		new_dict = {}
-		for key, value in dictionary.iteritems():
-			new_key = "%s{%s}" % (key, typename)
-			new_dict[new_key] = value
-		return new_dict
+    @staticmethod
+    def _append_type_to_keys(dictionary, typename):
+        new_dict = {}
+        for key, value in dictionary.iteritems():
+            new_key = "%s{%s}" % (key, typename)
+            new_dict[new_key] = value
+        return new_dict
 
 
-	def write_iteration_keyval(self, attr_dict, perf_dict):
-		attr_dict = self._append_type_to_keys(attr_dict, "attr")
-		perf_dict = self._append_type_to_keys(perf_dict, "perf")
+    def write_iteration_keyval(self, attr_dict, perf_dict):
+        attr_dict = self._append_type_to_keys(attr_dict, "attr")
+        perf_dict = self._append_type_to_keys(perf_dict, "perf")
 
-		utils.write_keyval(self.resultsdir, attr_dict,
-				   type_tag="attr")
-		utils.write_keyval(self.resultsdir, perf_dict,
-				   type_tag="perf")
+        utils.write_keyval(self.resultsdir, attr_dict,
+                           type_tag="attr")
+        utils.write_keyval(self.resultsdir, perf_dict,
+                           type_tag="perf")
 
-		keyval_path = os.path.join(self.resultsdir, "keyval")
-		print >> open(keyval_path, "a"), ""
+        keyval_path = os.path.join(self.resultsdir, "keyval")
+        print >> open(keyval_path, "a"), ""
 
 
-	# TODO: deprecate, remove from code in favour of
-	# the write_*_keyval methods
-        def write_keyval(self, dictionary):
-		warnings.warn("test.write_keyval is deprecated, use "
-			      "test.write_test_keyval or "
-			      "test.write_iteration_keyval instead",
-			      DeprecationWarning)
-		self.write_iteration_keyval({}, dictionary)
+    # TODO: deprecate, remove from code in favour of
+    # the write_*_keyval methods
+    def write_keyval(self, dictionary):
+        warnings.warn("test.write_keyval is deprecated, use "
+                      "test.write_test_keyval or "
+                      "test.write_iteration_keyval instead",
+                      DeprecationWarning)
+        self.write_iteration_keyval({}, dictionary)
 
 
-	def initialize(self):
-		pass
+    def initialize(self):
+        pass
 
 
-	def setup(self):
-		pass
+    def setup(self):
+        pass
 
 
-	def cleanup(self):
-		pass
+    def cleanup(self):
+        pass
 
 
-	def _exec(self, args, dargs):
-		try:
-			self.job.stdout.tee_redirect(
-			    os.path.join(self.debugdir, 'stdout'))
-			self.job.stderr.tee_redirect(
-			    os.path.join(self.debugdir, 'stderr'))
+    def _exec(self, args, dargs):
+        try:
+            self.job.stdout.tee_redirect(
+                os.path.join(self.debugdir, 'stdout'))
+            self.job.stderr.tee_redirect(
+                os.path.join(self.debugdir, 'stderr'))
 
-			try:
-				os.chdir(self.outputdir)
-				dargs   = dargs.copy()
-				keyvals = dargs.pop('test_attributes', dict())
-				keyvals = keyvals.copy()
-				keyvals['version'] = self.version
-				self.write_test_keyval(keyvals)
+            try:
+                os.chdir(self.outputdir)
+                dargs   = dargs.copy()
+                keyvals = dargs.pop('test_attributes', dict())
+                keyvals = keyvals.copy()
+                keyvals['version'] = self.version
+                self.write_test_keyval(keyvals)
 
-				self.execute(*args, **dargs)
-			finally:
-				self.cleanup()
-				self.job.stderr.restore()
-				self.job.stdout.restore()
-		except error.AutotestError:
-			raise
-		except Exception, e:
-			msg = "Unhandled %s error occured during test\n"
-			msg %= str(e.__class__.__name__)
-			raise error.UnhandledError(msg)
+                self.execute(*args, **dargs)
+            finally:
+                self.cleanup()
+                self.job.stderr.restore()
+                self.job.stdout.restore()
+        except error.AutotestError:
+            raise
+        except Exception, e:
+            msg = "Unhandled %s error occured during test\n"
+            msg %= str(e.__class__.__name__)
+            raise error.UnhandledError(msg)
 
 
 def testname(url):
-	# Extract the testname from the test url.
-	match = re.match('[^:]+://(.*)/([^/]*)$', url)
-	if not match:
-		return ('', url)
-	(group, filename) = match.groups()
+    # Extract the testname from the test url.
+    match = re.match('[^:]+://(.*)/([^/]*)$', url)
+    if not match:
+        return ('', url)
+    (group, filename) = match.groups()
 
-	# Generate the group prefix.
-	group = re.sub(r'\W', '_', group)
+    # Generate the group prefix.
+    group = re.sub(r'\W', '_', group)
 
-	# Drop the extension to get the raw test name.
-	testname = re.sub(r'\.tgz', '', filename)
+    # Drop the extension to get the raw test name.
+    testname = re.sub(r'\.tgz', '', filename)
 
-	return (group, testname)
+    return (group, testname)
 
 
 def _installtest(job, url):
-	(group, name) = testname(url)
+    (group, name) = testname(url)
 
-	# Bail if the test is already installed
-	group_dir = os.path.join(job.testdir, "download", group)
-	if os.path.exists(os.path.join(group_dir, name)):
-		return (group, name)
+    # Bail if the test is already installed
+    group_dir = os.path.join(job.testdir, "download", group)
+    if os.path.exists(os.path.join(group_dir, name)):
+        return (group, name)
 
-	# If the group directory is missing create it and add
-	# an empty  __init__.py so that sub-directories are
-	# considered for import.
-	if not os.path.exists(group_dir):
-		os.mkdir(group_dir)
-		f = file(os.path.join(group_dir, '__init__.py'), 'w+')
-		f.close()
+    # If the group directory is missing create it and add
+    # an empty  __init__.py so that sub-directories are
+    # considered for import.
+    if not os.path.exists(group_dir):
+        os.mkdir(group_dir)
+        f = file(os.path.join(group_dir, '__init__.py'), 'w+')
+        f.close()
 
-	print name + ": installing test url=" + url
-	get_file(url, os.path.join(group_dir, 'test.tgz'))
-	old_wd = os.getcwd()
-	os.chdir(group_dir)
-	tar = tarfile.open('test.tgz')
-	for member in tar.getmembers():
-		tar.extract(member)
-	tar.close()
-	os.chdir(old_wd)
-	os.remove(os.path.join(group_dir, 'test.tgz'))
+    print name + ": installing test url=" + url
+    get_file(url, os.path.join(group_dir, 'test.tgz'))
+    old_wd = os.getcwd()
+    os.chdir(group_dir)
+    tar = tarfile.open('test.tgz')
+    for member in tar.getmembers():
+        tar.extract(member)
+    tar.close()
+    os.chdir(old_wd)
+    os.remove(os.path.join(group_dir, 'test.tgz'))
 
-	# For this 'sub-object' to be importable via the name
-	# 'group.name' we need to provide an __init__.py,
-	# so link the main entry point to this.
-	os.symlink(name + '.py', os.path.join(group_dir, name,
-				'__init__.py'))
+    # For this 'sub-object' to be importable via the name
+    # 'group.name' we need to provide an __init__.py,
+    # so link the main entry point to this.
+    os.symlink(name + '.py', os.path.join(group_dir, name,
+                            '__init__.py'))
 
-	# The test is now installed.
-	return (group, name)
+    # The test is now installed.
+    return (group, name)
 
 
 def runtest(job, url, tag, args, dargs,
-	    local_namespace={}, global_namespace={}, after_test_hook=None):
-	local_namespace = local_namespace.copy()
-	global_namespace = global_namespace.copy()
+            local_namespace={}, global_namespace={}, after_test_hook=None):
+    local_namespace = local_namespace.copy()
+    global_namespace = global_namespace.copy()
 
-	# if this is not a plain test name then download and install the
-	# specified test
-	if utils.is_url(url):
-		(group, testname) = _installtest(job, url)
-		bindir = os.path.join(job.testdir, 'download', group, testname)
-		site_bindir = None
-	else:
-		# if the test is local, it can be found in either testdir
-		# or site_testdir. tests in site_testdir override tests
-		# defined in testdir
-		(group, testname) = ('', url)
-		bindir = os.path.join(job.testdir, group, testname)
-		if hasattr(job, 'site_testdir'):
-			site_bindir = os.path.join(job.site_testdir,
-						   group, testname)
-		else:
-			site_bindir = None
+    # if this is not a plain test name then download and install the
+    # specified test
+    if utils.is_url(url):
+        (group, testname) = _installtest(job, url)
+        bindir = os.path.join(job.testdir, 'download', group, testname)
+        site_bindir = None
+    else:
+        # if the test is local, it can be found in either testdir
+        # or site_testdir. tests in site_testdir override tests
+        # defined in testdir
+        (group, testname) = ('', url)
+        bindir = os.path.join(job.testdir, group, testname)
+        if hasattr(job, 'site_testdir'):
+            site_bindir = os.path.join(job.site_testdir,
+                                       group, testname)
+        else:
+            site_bindir = None
 
-	outputdir = os.path.join(job.resultdir, testname)
-	if tag:
-		outputdir += '.' + tag
+    outputdir = os.path.join(job.resultdir, testname)
+    if tag:
+        outputdir += '.' + tag
 
-	# if we can find the test in site_bindir, use this version
-	if site_bindir and os.path.exists(site_bindir):
-		bindir = site_bindir
-		testdir = job.site_testdir
-	elif os.path.exists(bindir):
-		testdir = job.testdir
-	elif not os.path.exists(bindir):
-		raise error.TestError(testname + ': test does not exist')
+    # if we can find the test in site_bindir, use this version
+    if site_bindir and os.path.exists(site_bindir):
+        bindir = site_bindir
+        testdir = job.site_testdir
+    elif os.path.exists(bindir):
+        testdir = job.testdir
+    elif not os.path.exists(bindir):
+        raise error.TestError(testname + ': test does not exist')
 
-	if group:
-		sys.path.insert(0, os.path.join(testdir, 'download'))
-		group += '.'
-	else:
-		sys.path.insert(0, os.path.join(testdir, testname))
+    if group:
+        sys.path.insert(0, os.path.join(testdir, 'download'))
+        group += '.'
+    else:
+        sys.path.insert(0, os.path.join(testdir, testname))
 
-	local_namespace['job'] = job
-	local_namespace['bindir'] = bindir
-	local_namespace['outputdir'] = outputdir
+    local_namespace['job'] = job
+    local_namespace['bindir'] = bindir
+    local_namespace['outputdir'] = outputdir
 
-	lockfile = open(os.path.join(job.tmpdir, '.testlock'), 'w')
-	try:
-		fcntl.flock(lockfile, fcntl.LOCK_EX)
-		exec ("import %s%s" % (group, testname),
-		      local_namespace, global_namespace)
-		exec ("mytest = %s%s.%s(job, bindir, outputdir)" %
-		      (group, testname, testname),
-		      local_namespace, global_namespace)
-	finally:
-		fcntl.flock(lockfile, fcntl.LOCK_UN)
-		lockfile.close()
-		sys.path.pop(0)
+    lockfile = open(os.path.join(job.tmpdir, '.testlock'), 'w')
+    try:
+        fcntl.flock(lockfile, fcntl.LOCK_EX)
+        exec ("import %s%s" % (group, testname),
+              local_namespace, global_namespace)
+        exec ("mytest = %s%s.%s(job, bindir, outputdir)" %
+              (group, testname, testname),
+              local_namespace, global_namespace)
+    finally:
+        fcntl.flock(lockfile, fcntl.LOCK_UN)
+        lockfile.close()
+        sys.path.pop(0)
 
-	pwd = os.getcwd()
-	os.chdir(outputdir)
-	try:
-		mytest = global_namespace['mytest']
-		mytest._exec(args, dargs)
-	finally:
-		if after_test_hook:
-			after_test_hook(mytest)
+    pwd = os.getcwd()
+    os.chdir(outputdir)
+    try:
+        mytest = global_namespace['mytest']
+        mytest._exec(args, dargs)
+    finally:
+        if after_test_hook:
+            after_test_hook(mytest)
diff --git a/client/common_lib/test_utils/mock.py b/client/common_lib/test_utils/mock.py
index 556e165..9906c94 100644
--- a/client/common_lib/test_utils/mock.py
+++ b/client/common_lib/test_utils/mock.py
@@ -5,426 +5,426 @@
 
 
 class CheckPlaybackError(Exception):
-	'Raised when mock playback does not match recorded calls.'
+    'Raised when mock playback does not match recorded calls.'
 
 
 class ExitException(Exception):
-	'Raised when the mocked sys.exit() is called'
-	pass
+    'Raised when the mocked sys.exit() is called'
+    pass
 
 
 class argument_comparator(object):
-	def is_satisfied_by(self, parameter):
-		raise NotImplementedError
+    def is_satisfied_by(self, parameter):
+        raise NotImplementedError
 
 
 class equality_comparator(argument_comparator):
-	def __init__(self, value):
-		self.value = value
+    def __init__(self, value):
+        self.value = value
 
 
-	def is_satisfied_by(self, parameter):
-		return parameter == self.value
+    def is_satisfied_by(self, parameter):
+        return parameter == self.value
 
 
-	def __str__(self):
-		return repr(self.value)
+    def __str__(self):
+        return repr(self.value)
 
 
 class regex_comparator(argument_comparator):
-	def __init__(self, pattern, flags=0):
-		self.regex = re.compile(pattern, flags)
+    def __init__(self, pattern, flags=0):
+        self.regex = re.compile(pattern, flags)
 
 
-	def is_satisfied_by(self, parameter):
-		return self.regex.search(parameter) is not None
+    def is_satisfied_by(self, parameter):
+        return self.regex.search(parameter) is not None
 
 
-	def __str__(self):
-		return self.regex.pattern
+    def __str__(self):
+        return self.regex.pattern
 
 
 class is_string_comparator(argument_comparator):
-	def is_satisfied_by(self, parameter):
-		return isinstance(parameter, basestring)
+    def is_satisfied_by(self, parameter):
+        return isinstance(parameter, basestring)
 
 
-	def __str__(self):
-		return "a string"
+    def __str__(self):
+        return "a string"
 
 
 class is_instance_comparator(argument_comparator):
-	def __init__(self, cls):
-		self.cls = cls
+    def __init__(self, cls):
+        self.cls = cls
 
 
-	def is_satisfied_by(self, parameter):
-		return isinstance(parameter, self.cls)
+    def is_satisfied_by(self, parameter):
+        return isinstance(parameter, self.cls)
 
 
-	def __str__(self):
-		return "is a %s" % self.cls
+    def __str__(self):
+        return "is a %s" % self.cls
 
 
 class function_map(object):
-	def __init__(self, symbol, return_val, *args, **dargs):
-		self.return_val = return_val
-		self.args = []
-		self.symbol = symbol
-		for arg in args:
-			if isinstance(arg, argument_comparator):
-				self.args.append(arg)
-			else:
-				self.args.append(equality_comparator(arg))
+    def __init__(self, symbol, return_val, *args, **dargs):
+        self.return_val = return_val
+        self.args = []
+        self.symbol = symbol
+        for arg in args:
+            if isinstance(arg, argument_comparator):
+                self.args.append(arg)
+            else:
+                self.args.append(equality_comparator(arg))
 
-		self.dargs = dargs
-		self.error = None
+        self.dargs = dargs
+        self.error = None
 
 
-	def and_return(self, return_val):
-		self.return_val = return_val
+    def and_return(self, return_val):
+        self.return_val = return_val
 
 
-	def and_raises(self, error):
-		self.error = error
+    def and_raises(self, error):
+        self.error = error
 
 
-	def match(self, *args, **dargs):
-		if len(args) != len(self.args) or len(dargs) != len(self.dargs):
-			return False
+    def match(self, *args, **dargs):
+        if len(args) != len(self.args) or len(dargs) != len(self.dargs):
+            return False
 
-		for i, expected_arg in enumerate(self.args):
-			if not expected_arg.is_satisfied_by(args[i]):
-				return False
+        for i, expected_arg in enumerate(self.args):
+            if not expected_arg.is_satisfied_by(args[i]):
+                return False
 
-		if self.dargs != dargs:
-			return False
+        if self.dargs != dargs:
+            return False
 
-		return True
+        return True
 
 
-	def __str__(self):
-		return _dump_function_call(self.symbol, self.args, self.dargs)
+    def __str__(self):
+        return _dump_function_call(self.symbol, self.args, self.dargs)
 
 
 class mock_function(object):
-	def __init__(self, symbol, default_return_val=None,
-		     record=None, playback=None):
-		self.default_return_val = default_return_val
-		self.num_calls = 0
-		self.args = []
-		self.dargs = []
-		self.symbol = symbol
-		self.record = record
-		self.playback = playback
-		self.__name__ = symbol
+    def __init__(self, symbol, default_return_val=None,
+                 record=None, playback=None):
+        self.default_return_val = default_return_val
+        self.num_calls = 0
+        self.args = []
+        self.dargs = []
+        self.symbol = symbol
+        self.record = record
+        self.playback = playback
+        self.__name__ = symbol
 
 
-	def __call__(self, *args, **dargs):
-		self.num_calls += 1
-		self.args.append(args)
-		self.dargs.append(dargs)
-		if self.playback:
-			return self.playback(self.symbol, *args, **dargs)
-		else:
-			return self.default_return_val
+    def __call__(self, *args, **dargs):
+        self.num_calls += 1
+        self.args.append(args)
+        self.dargs.append(dargs)
+        if self.playback:
+            return self.playback(self.symbol, *args, **dargs)
+        else:
+            return self.default_return_val
 
 
-	def expect_call(self, *args, **dargs):
-		mapping = function_map(self.symbol, None, *args, **dargs)
-		if self.record:
-			self.record(mapping)
+    def expect_call(self, *args, **dargs):
+        mapping = function_map(self.symbol, None, *args, **dargs)
+        if self.record:
+            self.record(mapping)
 
-		return mapping
+        return mapping
 
 
 class mask_function(mock_function):
-	def __init__(self, symbol, original_function, default_return_val=None,
-		     record=None, playback=None):
-		super(mask_function, self).__init__(symbol,
-		                                    default_return_val,
-		                                    record, playback)
-		self.original_function = original_function
+    def __init__(self, symbol, original_function, default_return_val=None,
+                 record=None, playback=None):
+        super(mask_function, self).__init__(symbol,
+                                            default_return_val,
+                                            record, playback)
+        self.original_function = original_function
 
 
-	def run_original_function(self, *args, **dargs):
-		return self.original_function(*args, **dargs)
+    def run_original_function(self, *args, **dargs):
+        return self.original_function(*args, **dargs)
 
 
 class mock_class(object):
-	def __init__(self, cls, name, default_ret_val=None,
-	             record=None, playback=None):
-		self.errors = []
-		self.name = name
-		self.record = record
-		self.playback = playback
+    def __init__(self, cls, name, default_ret_val=None,
+                 record=None, playback=None):
+        self.errors = []
+        self.name = name
+        self.record = record
+        self.playback = playback
 
-		for symbol in dir(cls):
-			if symbol.startswith("_"):
-				continue
+        for symbol in dir(cls):
+            if symbol.startswith("_"):
+                continue
 
-			orig_symbol = getattr(cls, symbol)
-			if callable(orig_symbol):
-				f_name = "%s.%s" % (self.name, symbol)
-				func = mock_function(f_name, default_ret_val,
-					             self.record, self.playback)
-				setattr(self, symbol, func)
-			else:
-				setattr(self, symbol, orig_symbol)
+            orig_symbol = getattr(cls, symbol)
+            if callable(orig_symbol):
+                f_name = "%s.%s" % (self.name, symbol)
+                func = mock_function(f_name, default_ret_val,
+                                     self.record, self.playback)
+                setattr(self, symbol, func)
+            else:
+                setattr(self, symbol, orig_symbol)
 
 
 class mock_god:
-	NONEXISTENT_ATTRIBUTE = object()
+    NONEXISTENT_ATTRIBUTE = object()
 
-	def __init__(self, debug=False):
-		"""
-		With debug=True, all recorded method calls will be printed as
-		they happen.
-		"""
-		self.recording = collections.deque()
-		self.errors = []
-		self._stubs = []
-		self._debug = debug
+    def __init__(self, debug=False):
+        """
+        With debug=True, all recorded method calls will be printed as
+        they happen.
+        """
+        self.recording = collections.deque()
+        self.errors = []
+        self._stubs = []
+        self._debug = debug
 
 
-	def create_mock_class_obj(self, cls, name, default_ret_val=None):
-		record = self.__record_call
-		playback = self.__method_playback
-		errors = self.errors
+    def create_mock_class_obj(self, cls, name, default_ret_val=None):
+        record = self.__record_call
+        playback = self.__method_playback
+        errors = self.errors
 
-		class cls_sub(cls):
-			cls_count = 0
-			creations = collections.deque()
+        class cls_sub(cls):
+            cls_count = 0
+            creations = collections.deque()
 
-			# overwrite the initializer
-			def __init__(self, *args, **dargs):
-				pass
+            # overwrite the initializer
+            def __init__(self, *args, **dargs):
+                pass
 
 
-			@classmethod
-			def expect_new(typ, *args, **dargs):
-				obj = typ.make_new(*args, **dargs)
-				typ.creations.append(obj)
-				return obj
+            @classmethod
+            def expect_new(typ, *args, **dargs):
+                obj = typ.make_new(*args, **dargs)
+                typ.creations.append(obj)
+                return obj
 
 
-			def __new__(typ, *args, **dargs):
-				if len(typ.creations) == 0:
-					msg = ("not expecting call to %s "
-					       "constructor" % (name))
-					errors.append(msg)
-					return None
-				else:
-					return typ.creations.popleft()
+            def __new__(typ, *args, **dargs):
+                if len(typ.creations) == 0:
+                    msg = ("not expecting call to %s "
+                           "constructor" % (name))
+                    errors.append(msg)
+                    return None
+                else:
+                    return typ.creations.popleft()
 
 
-			@classmethod
-			def make_new(typ, *args, **dargs):
-				obj = super(cls_sub, typ).__new__(typ, *args,
-								  **dargs)
+            @classmethod
+            def make_new(typ, *args, **dargs):
+                obj = super(cls_sub, typ).__new__(typ, *args,
+                                                  **dargs)
 
-				typ.cls_count += 1
-				obj_name = "%s_%s" % (name, typ.cls_count)
-				for symbol in dir(obj):
-					if (symbol.startswith("__") and
-                                            symbol.endswith("__")):
-						continue
+                typ.cls_count += 1
+                obj_name = "%s_%s" % (name, typ.cls_count)
+                for symbol in dir(obj):
+                    if (symbol.startswith("__") and
+                        symbol.endswith("__")):
+                        continue
 
-					orig_symbol = getattr(obj, symbol)
-					if callable(orig_symbol):
-						f_name = ("%s.%s" %
-                                                          (obj_name, symbol))
-						func = mock_function(f_name,
-                                                                default_ret_val,
-					             		record,
-							        playback)
-						setattr(obj, symbol, func)
-					else:
-						setattr(obj, symbol,
-                                                        orig_symbol)
+                    orig_symbol = getattr(obj, symbol)
+                    if callable(orig_symbol):
+                        f_name = ("%s.%s" %
+                                  (obj_name, symbol))
+                        func = mock_function(f_name,
+                                        default_ret_val,
+                                        record,
+                                        playback)
+                        setattr(obj, symbol, func)
+                    else:
+                        setattr(obj, symbol,
+                                orig_symbol)
 
-				return obj
+                return obj
 
 
 
-		return cls_sub
+        return cls_sub
 
 
-	def create_mock_class(self, cls, name, default_ret_val=None):
-		"""
-		Given something that defines a namespace cls (class, object,
-		module), and a (hopefully unique) name, will create a
-		mock_class object with that name and that possessess all
-		the public attributes of cls.  default_ret_val sets the
-		default_ret_val on all methods of the cls mock.
-		"""
-		return mock_class(cls, name, default_ret_val,
-		                  self.__record_call, self.__method_playback)
+    def create_mock_class(self, cls, name, default_ret_val=None):
+        """
+        Given something that defines a namespace cls (class, object,
+        module), and a (hopefully unique) name, will create a
+        mock_class object with that name and that possessess all
+        the public attributes of cls.  default_ret_val sets the
+        default_ret_val on all methods of the cls mock.
+        """
+        return mock_class(cls, name, default_ret_val,
+                          self.__record_call, self.__method_playback)
 
 
-	def create_mock_function(self, symbol, default_return_val=None):
-		"""
-		create a mock_function with name symbol and default return
-		value of default_ret_val.
-		"""
-		return mock_function(symbol, default_return_val,
-		                  self.__record_call, self.__method_playback)
+    def create_mock_function(self, symbol, default_return_val=None):
+        """
+        create a mock_function with name symbol and default return
+        value of default_ret_val.
+        """
+        return mock_function(symbol, default_return_val,
+                          self.__record_call, self.__method_playback)
 
 
-	def mock_up(self, obj, name, default_ret_val=None):
-		"""
-		Given an object (class instance or module) and a registration
-		name, then replace all its methods with mock function objects
-		(passing the orignal functions to the mock functions).
-		"""
-		for symbol in dir(obj):
-			if symbol.startswith("__"):
-				continue
+    def mock_up(self, obj, name, default_ret_val=None):
+        """
+        Given an object (class instance or module) and a registration
+        name, then replace all its methods with mock function objects
+        (passing the orignal functions to the mock functions).
+        """
+        for symbol in dir(obj):
+            if symbol.startswith("__"):
+                continue
 
-			orig_symbol = getattr(obj, symbol)
-			if callable(orig_symbol):
-				f_name = "%s.%s" % (name, symbol)
-				func = mask_function(f_name, orig_symbol,
-						     default_ret_val,
-					             self.__record_call,
-					             self.__method_playback)
-				setattr(obj, symbol, func)
+            orig_symbol = getattr(obj, symbol)
+            if callable(orig_symbol):
+                f_name = "%s.%s" % (name, symbol)
+                func = mask_function(f_name, orig_symbol,
+                                     default_ret_val,
+                                     self.__record_call,
+                                     self.__method_playback)
+                setattr(obj, symbol, func)
 
 
-	def stub_with(self, namespace, symbol, new_attribute):
-		original_attribute = getattr(namespace, symbol,
-					     self.NONEXISTENT_ATTRIBUTE)
-		self._stubs.append((namespace, symbol, original_attribute))
-		setattr(namespace, symbol, new_attribute)
+    def stub_with(self, namespace, symbol, new_attribute):
+        original_attribute = getattr(namespace, symbol,
+                                     self.NONEXISTENT_ATTRIBUTE)
+        self._stubs.append((namespace, symbol, original_attribute))
+        setattr(namespace, symbol, new_attribute)
 
 
-	def stub_function(self, namespace, symbol):
-		mock_attribute = self.create_mock_function(symbol)
-		self.stub_with(namespace, symbol, mock_attribute)
+    def stub_function(self, namespace, symbol):
+        mock_attribute = self.create_mock_function(symbol)
+        self.stub_with(namespace, symbol, mock_attribute)
 
 
-	def stub_class_method(self, cls, symbol):
-		mock_attribute = self.create_mock_function(symbol)
-		self.stub_with(cls, symbol, staticmethod(mock_attribute))
+    def stub_class_method(self, cls, symbol):
+        mock_attribute = self.create_mock_function(symbol)
+        self.stub_with(cls, symbol, staticmethod(mock_attribute))
 
 
-	def unstub_all(self):
-		self._stubs.reverse()
-		for namespace, symbol, original_attribute in self._stubs:
-			if original_attribute == self.NONEXISTENT_ATTRIBUTE:
-				delattr(namespace, symbol)
-			else:
-				setattr(namespace, symbol, original_attribute)
-		self._stubs = []
+    def unstub_all(self):
+        self._stubs.reverse()
+        for namespace, symbol, original_attribute in self._stubs:
+            if original_attribute == self.NONEXISTENT_ATTRIBUTE:
+                delattr(namespace, symbol)
+            else:
+                setattr(namespace, symbol, original_attribute)
+        self._stubs = []
 
 
-	def __method_playback(self, symbol, *args, **dargs):
-		if self._debug:
-			print 'Mock call:', _dump_function_call(symbol,
-								args, dargs)
-		if len(self.recording) != 0:
-			func_call = self.recording[0]
-			if func_call.symbol != symbol:
-				msg = ("Unexpected call: %s. Expected %s"
-				    % (_dump_function_call(symbol, args, dargs),
-				       func_call))
-				self.errors.append(msg)
-				return None
+    def __method_playback(self, symbol, *args, **dargs):
+        if self._debug:
+            print 'Mock call:', _dump_function_call(symbol,
+                                                    args, dargs)
+        if len(self.recording) != 0:
+            func_call = self.recording[0]
+            if func_call.symbol != symbol:
+                msg = ("Unexpected call: %s. Expected %s"
+                    % (_dump_function_call(symbol, args, dargs),
+                       func_call))
+                self.errors.append(msg)
+                return None
 
-			if not func_call.match(*args, **dargs):
-				msg = ("%s called. Expected %s"
-				    % (_dump_function_call(symbol, args, dargs),
-				      func_call))
-				self.errors.append(msg)
-				return None
+            if not func_call.match(*args, **dargs):
+                msg = ("%s called. Expected %s"
+                    % (_dump_function_call(symbol, args, dargs),
+                      func_call))
+                self.errors.append(msg)
+                return None
 
-			# this is the expected call so pop it and return
-			self.recording.popleft()
-			if func_call.error:
-				raise func_call.error
-			else:
-				return func_call.return_val
-		else:
-			msg = ("unexpected call: %s"
-			       % (_dump_function_call(symbol, args, dargs)))
-			self.errors.append(msg)
-			return None
+            # this is the expected call so pop it and return
+            self.recording.popleft()
+            if func_call.error:
+                raise func_call.error
+            else:
+                return func_call.return_val
+        else:
+            msg = ("unexpected call: %s"
+                   % (_dump_function_call(symbol, args, dargs)))
+            self.errors.append(msg)
+            return None
 
 
-	def __record_call(self, mapping):
-		self.recording.append(mapping)
+    def __record_call(self, mapping):
+        self.recording.append(mapping)
 
 
-	def check_playback(self):
-		"""
-		Report any errors that were encounterd during calls
-		to __method_playback().
-		"""
-		if len(self.errors) > 0:
-			for error in self.errors:
-				print error
-			raise CheckPlaybackError
-		elif len(self.recording) != 0:
-			for func_call in self.recording:
-				print "%s not called" % (func_call)
-			raise CheckPlaybackError
+    def check_playback(self):
+        """
+        Report any errors that were encounterd during calls
+        to __method_playback().
+        """
+        if len(self.errors) > 0:
+            for error in self.errors:
+                print error
+            raise CheckPlaybackError
+        elif len(self.recording) != 0:
+            for func_call in self.recording:
+                print "%s not called" % (func_call)
+            raise CheckPlaybackError
 
 
-	def mock_exit(self):
-		def mock_exit_handler(self):
-			raise ExitException
+    def mock_exit(self):
+        def mock_exit_handler(self):
+            raise ExitException
 
-		self.saved_exit = sys.exit
-		sys.exit = mock_exit_handler
+        self.saved_exit = sys.exit
+        sys.exit = mock_exit_handler
 
 
-	def unmock_exit(self):
-		sys.exit = self.saved_exit
-		self.saved_exit = None
+    def unmock_exit(self):
+        sys.exit = self.saved_exit
+        self.saved_exit = None
 
 
-	def mock_stdout_stderr(self):
-		"""Mocks and saves the stdout & stderr output"""
-		self.mock_streams_stdout = StringIO.StringIO('')
-		self.mock_streams_stderr = StringIO.StringIO('')
+    def mock_stdout_stderr(self):
+        """Mocks and saves the stdout & stderr output"""
+        self.mock_streams_stdout = StringIO.StringIO('')
+        self.mock_streams_stderr = StringIO.StringIO('')
 
-		sys.stdout = self.mock_streams_stdout
-		sys.stderr = self.mock_streams_stderr
+        sys.stdout = self.mock_streams_stdout
+        sys.stderr = self.mock_streams_stderr
 
 
-	def unmock_stdout_stderr(self):
-		"""Restores the stdout & stderr, and returns both
-		output strings"""
-		sys.stdout = sys.__stdout__
-		sys.stderr = sys.__stderr__
-		values = (self.mock_streams_stdout.getvalue(),
-			  self.mock_streams_stderr.getvalue())
+    def unmock_stdout_stderr(self):
+        """Restores the stdout & stderr, and returns both
+        output strings"""
+        sys.stdout = sys.__stdout__
+        sys.stderr = sys.__stderr__
+        values = (self.mock_streams_stdout.getvalue(),
+                  self.mock_streams_stderr.getvalue())
 
-		self.mock_streams_stdout.close()
-		self.mock_streams_stderr.close()
-		return values
+        self.mock_streams_stdout.close()
+        self.mock_streams_stderr.close()
+        return values
 
 
-	def mock_io_exit(self):
-		self.mock_exit()
-		self.mock_stdout_stderr()
+    def mock_io_exit(self):
+        self.mock_exit()
+        self.mock_stdout_stderr()
 
 
-	def unmock_io_exit(self):
-		self.unmock_exit()
-		return self.unmock_stdout_stderr()
+    def unmock_io_exit(self):
+        self.unmock_exit()
+        return self.unmock_stdout_stderr()
 
 
 def _arg_to_str(arg):
-	if isinstance(arg, argument_comparator):
-		return str(arg)
-	return repr(arg)
+    if isinstance(arg, argument_comparator):
+        return str(arg)
+    return repr(arg)
 
 
 def _dump_function_call(symbol, args, dargs):
-	arg_vec = []
-	for arg in args:
-		arg_vec.append(_arg_to_str(arg))
-	for key, val in dargs.iteritems():
-		arg_vec.append("%s=%s" % (key, _arg_to_stv(val)))
-	return "%s(%s)" % (symbol, ', '.join(arg_vec))
+    arg_vec = []
+    for arg in args:
+        arg_vec.append(_arg_to_str(arg))
+    for key, val in dargs.iteritems():
+        arg_vec.append("%s=%s" % (key, _arg_to_stv(val)))
+    return "%s(%s)" % (symbol, ', '.join(arg_vec))
diff --git a/client/common_lib/test_utils/mock_demo.py b/client/common_lib/test_utils/mock_demo.py
index fec116b..f01fe60 100644
--- a/client/common_lib/test_utils/mock_demo.py
+++ b/client/common_lib/test_utils/mock_demo.py
@@ -5,140 +5,140 @@
 import mock, mock_demo_MUT
 
 class MyError(Exception):
-	pass
+    pass
 
 
 class A(object):
-	var = 8
+    var = 8
 
-	def __init__(self):
-		self.x = 0
+    def __init__(self):
+        self.x = 0
 
-	def method1(self):
-		self.x += 1
-		return self.x
+    def method1(self):
+        self.x += 1
+        return self.x
 
-	def method2(self, y):
-		return y * self.x
+    def method2(self, y):
+        return y * self.x
 
 class B(A):
-	def method3(self, z):
-		return self.x + z
+    def method3(self, z):
+        return self.x + z
 
-	def method4(self, z, w):
-		return self.x * z + w
+    def method4(self, z, w):
+        return self.x * z + w
 
 
 class C(B):
-	def method5(self):
-		self.method1()
-		t = self.method2(4)
-		u = self.method3(t)
-		return u
+    def method5(self):
+        self.method1()
+        t = self.method2(4)
+        u = self.method3(t)
+        return u
 
 
 class D(C):
-	def method6(self, error):
-		if error:
-			raise MyError("woops")
-		else:
-			return 10
+    def method6(self, error):
+        if error:
+            raise MyError("woops")
+        else:
+            return 10
 
 class E(D):
-	def __init__(self, val):
-		self.val = val
+    def __init__(self, val):
+        self.val = val
 
 
 # say we want to test that do_stuff is doing what we think it is doing
 def do_stuff(a, b, func):
-	print b.method1()
-	print b.method3(10)
-	print func("how many")
-	print a.method2(5)
-	print b.method1()
-	print b.method4(1, 4)
-	print b.method2(3)
-	print b.method2("hello")
+    print b.method1()
+    print b.method3(10)
+    print func("how many")
+    print a.method2(5)
+    print b.method1()
+    print b.method4(1, 4)
+    print b.method2(3)
+    print b.method2("hello")
 
 
 def do_more_stuff(d):
-	print d.method6(False)
-	try:
-		d.method6(True)
-	except:
-		print "caught error"
+    print d.method6(False)
+    try:
+        d.method6(True)
+    except:
+        print "caught error"
 
 
 def main():
-	god = mock.mock_god()
+    god = mock.mock_god()
 
-	m1 = god.create_mock_class(A, "A")
-	print m1.var
-	m2 = god.create_mock_class(B, "B")
-	f = god.create_mock_function("func")
+    m1 = god.create_mock_class(A, "A")
+    print m1.var
+    m2 = god.create_mock_class(B, "B")
+    f = god.create_mock_function("func")
 
-	print dir(m1)
-	print dir(m2)
+    print dir(m1)
+    print dir(m2)
 
-	# sets up the "recording"
-	m2.method1.expect_call().and_return(1)
-	m2.method3.expect_call(10).and_return(10)
-	f.expect_call("how many").and_return(42)
-	m1.method2.expect_call(5).and_return(0)
-	m2.method1.expect_call().and_return(2)
-	m2.method4.expect_call(1, 4).and_return(6)
-	m2.method2.expect_call(3).and_return(6)
-	m2.method2.expect_call(mock.is_string_comparator()).and_return("foo")
+    # sets up the "recording"
+    m2.method1.expect_call().and_return(1)
+    m2.method3.expect_call(10).and_return(10)
+    f.expect_call("how many").and_return(42)
+    m1.method2.expect_call(5).and_return(0)
+    m2.method1.expect_call().and_return(2)
+    m2.method4.expect_call(1, 4).and_return(6)
+    m2.method2.expect_call(3).and_return(6)
+    m2.method2.expect_call(mock.is_string_comparator()).and_return("foo")
 
-	# check the recording order
-	for func_call in god.recording:
-		print func_call
+    # check the recording order
+    for func_call in god.recording:
+        print func_call
 
-	# once we start making calls into the methods we are in
-	# playback mode
-	do_stuff(m1, m2, f)
+    # once we start making calls into the methods we are in
+    # playback mode
+    do_stuff(m1, m2, f)
 
-	# we can now check that playback succeeded
-	god.check_playback()
+    # we can now check that playback succeeded
+    god.check_playback()
 
-	# now test the ability to mock out all methods of an object
-	# except those under test
-	c = C()
-	god.mock_up(c, "c")
+    # now test the ability to mock out all methods of an object
+    # except those under test
+    c = C()
+    god.mock_up(c, "c")
 
-	# setup recording
-	c.method1.expect_call()
-	c.method2.expect_call(4).and_return(4)
-	c.method3.expect_call(4).and_return(5)
+    # setup recording
+    c.method1.expect_call()
+    c.method2.expect_call(4).and_return(4)
+    c.method3.expect_call(4).and_return(5)
 
-	# perform the test
-	answer = c.method5.run_original_function()
+    # perform the test
+    answer = c.method5.run_original_function()
 
-	# check playback
-	print "answer = %s" % (answer)
-	god.check_playback()
+    # check playback
+    print "answer = %s" % (answer)
+    god.check_playback()
 
-	# check exception returns too
-	m3 = god.create_mock_class(D, "D")
-	m3.method6.expect_call(False).and_return(10)
-	m3.method6.expect_call(True).and_raises(MyError("woops"))
+    # check exception returns too
+    m3 = god.create_mock_class(D, "D")
+    m3.method6.expect_call(False).and_return(10)
+    m3.method6.expect_call(True).and_raises(MyError("woops"))
 
-	do_more_stuff(m3)
-	god.check_playback()
+    do_more_stuff(m3)
+    god.check_playback()
 
-	# now check we can mock out a whole class (rather than just an instance)
-	mockE = god.create_mock_class_obj(E, "E")
-	oldE = mock_demo_MUT.E
-	mock_demo_MUT.E = mockE
+    # now check we can mock out a whole class (rather than just an instance)
+    mockE = god.create_mock_class_obj(E, "E")
+    oldE = mock_demo_MUT.E
+    mock_demo_MUT.E = mockE
 
-	m4 = mockE.expect_new(val=7)
-	m4.method1.expect_call().and_return(1)
+    m4 = mockE.expect_new(val=7)
+    m4.method1.expect_call().and_return(1)
 
-	mock_demo_MUT.do_create_stuff()
-	god.check_playback()
+    mock_demo_MUT.do_create_stuff()
+    god.check_playback()
 
-	mock_demo_MUT.E = oldE
+    mock_demo_MUT.E = oldE
 
 
 if __name__ == "__main__":
-	main()
+    main()
diff --git a/client/common_lib/test_utils/mock_demo_MUT.py b/client/common_lib/test_utils/mock_demo_MUT.py
index c44e5a2..b2fde77 100644
--- a/client/common_lib/test_utils/mock_demo_MUT.py
+++ b/client/common_lib/test_utils/mock_demo_MUT.py
@@ -1,5 +1,5 @@
 from mock_demo import E
 
 def do_create_stuff():
-	obj = E(val=7)
-	print obj.method1()
+    obj = E(val=7)
+    print obj.method1()
diff --git a/client/common_lib/utils.py b/client/common_lib/utils.py
index 92e53f7..f19ecc5 100644
--- a/client/common_lib/utils.py
+++ b/client/common_lib/utils.py
@@ -8,360 +8,360 @@
 
 
 def read_one_line(filename):
-	return open(filename, 'r').readline().strip()
+    return open(filename, 'r').readline().strip()
 
 
 def write_one_line(filename, str):
-	open(filename, 'w').write(str.rstrip() + "\n")
+    open(filename, 'w').write(str.rstrip() + "\n")
 
 
 def read_keyval(path):
-	"""
-	Read a key-value pair format file into a dictionary, and return it.
-	Takes either a filename or directory name as input. If it's a
-	directory name, we assume you want the file to be called keyval.
-	"""
-	if os.path.isdir(path):
-		path = os.path.join(path, 'keyval')
-	keyval = {}
-	for line in open(path):
-		line = re.sub('#.*', '', line.rstrip())
-		if not re.search(r'^[-\w]+=', line):
-			raise ValueError('Invalid format line: %s' % line)
-		key, value = line.split('=', 1)
-		if re.search('^\d+$', value):
-			value = int(value)
-		elif re.search('^(\d+\.)?\d+$', value):
-			value = float(value)
-		keyval[key] = value
-	return keyval
+    """
+    Read a key-value pair format file into a dictionary, and return it.
+    Takes either a filename or directory name as input. If it's a
+    directory name, we assume you want the file to be called keyval.
+    """
+    if os.path.isdir(path):
+        path = os.path.join(path, 'keyval')
+    keyval = {}
+    for line in open(path):
+        line = re.sub('#.*', '', line.rstrip())
+        if not re.search(r'^[-\w]+=', line):
+            raise ValueError('Invalid format line: %s' % line)
+        key, value = line.split('=', 1)
+        if re.search('^\d+$', value):
+            value = int(value)
+        elif re.search('^(\d+\.)?\d+$', value):
+            value = float(value)
+        keyval[key] = value
+    return keyval
 
 
 def write_keyval(path, dictionary, type_tag=None):
-	"""
-	Write a key-value pair format file out to a file. This uses append
-	mode to open the file, so existing text will not be overwritten or
-	reparsed.
+    """
+    Write a key-value pair format file out to a file. This uses append
+    mode to open the file, so existing text will not be overwritten or
+    reparsed.
 
-	If type_tag is None, then the key must be composed of alphanumeric
-	characters (or dashes+underscores). However, if type-tag is not
-	null then the keys must also have "{type_tag}" as a suffix. At
-	the moment the only valid values of type_tag are "attr" and "perf".
-	"""
-	if os.path.isdir(path):
-		path = os.path.join(path, 'keyval')
-	keyval = open(path, 'a')
+    If type_tag is None, then the key must be composed of alphanumeric
+    characters (or dashes+underscores). However, if type-tag is not
+    null then the keys must also have "{type_tag}" as a suffix. At
+    the moment the only valid values of type_tag are "attr" and "perf".
+    """
+    if os.path.isdir(path):
+        path = os.path.join(path, 'keyval')
+    keyval = open(path, 'a')
 
-	if type_tag is None:
-		key_regex = re.compile(r'^[-\w]+$')
-	else:
-		if type_tag not in ('attr', 'perf'):
-			raise ValueError('Invalid type tag: %s' % type_tag)
-		escaped_tag = re.escape(type_tag)
-		key_regex = re.compile(r'^[-\w]+\{%s\}$' % escaped_tag)
-	try:
-		for key, value in dictionary.iteritems():
-			if not key_regex.search(key):
-				raise ValueError('Invalid key: %s' % key)
-			keyval.write('%s=%s\n' % (key, value))
-	finally:
-		keyval.close()
+    if type_tag is None:
+        key_regex = re.compile(r'^[-\w]+$')
+    else:
+        if type_tag not in ('attr', 'perf'):
+            raise ValueError('Invalid type tag: %s' % type_tag)
+        escaped_tag = re.escape(type_tag)
+        key_regex = re.compile(r'^[-\w]+\{%s\}$' % escaped_tag)
+    try:
+        for key, value in dictionary.iteritems():
+            if not key_regex.search(key):
+                raise ValueError('Invalid key: %s' % key)
+            keyval.write('%s=%s\n' % (key, value))
+    finally:
+        keyval.close()
 
 
 def is_url(path):
-	"""Return true if path looks like a URL"""
-	# for now, just handle http and ftp
-	url_parts = urlparse.urlparse(path)
-	return (url_parts[0] in ('http', 'ftp'))
+    """Return true if path looks like a URL"""
+    # for now, just handle http and ftp
+    url_parts = urlparse.urlparse(path)
+    return (url_parts[0] in ('http', 'ftp'))
 
 
 def urlopen(url, data=None, proxies=None, timeout=300):
-	"""Wrapper to urllib.urlopen with timeout addition."""
+    """Wrapper to urllib.urlopen with timeout addition."""
 
-	# Save old timeout
-	old_timeout = socket.getdefaulttimeout()
-	socket.setdefaulttimeout(timeout)
-	try:
-		return urllib.urlopen(url, data=data, proxies=proxies)
-	finally:
-		socket.setdefaulttimeout(old_timeout)
+    # Save old timeout
+    old_timeout = socket.getdefaulttimeout()
+    socket.setdefaulttimeout(timeout)
+    try:
+        return urllib.urlopen(url, data=data, proxies=proxies)
+    finally:
+        socket.setdefaulttimeout(old_timeout)
 
 
 def urlretrieve(url, filename=None, reporthook=None, data=None, timeout=300):
-	"""Wrapper to urllib.urlretrieve with timeout addition."""
-	old_timeout = socket.getdefaulttimeout()
-	socket.setdefaulttimeout(timeout)
-	try:
-		return urllib.urlretrieve(url, filename=filename,
-		                          reporthook=reporthook, data=data)
-	finally:
-		socket.setdefaulttimeout(old_timeout)
-	
+    """Wrapper to urllib.urlretrieve with timeout addition."""
+    old_timeout = socket.getdefaulttimeout()
+    socket.setdefaulttimeout(timeout)
+    try:
+        return urllib.urlretrieve(url, filename=filename,
+                                  reporthook=reporthook, data=data)
+    finally:
+        socket.setdefaulttimeout(old_timeout)
+
 
 def get_file(src, dest, permissions=None):
-	"""Get a file from src, which can be local or a remote URL"""
-	if (src == dest):
-		return
-	if (is_url(src)):
-		print 'PWD: ' + os.getcwd()
-		print 'Fetching \n\t', src, '\n\t->', dest
-		try:
-			urllib.urlretrieve(src, dest)
-		except IOError, e:
-			raise error.AutotestError('Unable to retrieve %s (to %s)'
-					    % (src, dest), e)
-	else:
-		shutil.copyfile(src, dest)
-	if permissions:
-		os.chmod(dest, permissions)
-	return dest
+    """Get a file from src, which can be local or a remote URL"""
+    if (src == dest):
+        return
+    if (is_url(src)):
+        print 'PWD: ' + os.getcwd()
+        print 'Fetching \n\t', src, '\n\t->', dest
+        try:
+            urllib.urlretrieve(src, dest)
+        except IOError, e:
+            raise error.AutotestError('Unable to retrieve %s (to %s)'
+                                % (src, dest), e)
+    else:
+        shutil.copyfile(src, dest)
+    if permissions:
+        os.chmod(dest, permissions)
+    return dest
 
 
 def unmap_url(srcdir, src, destdir='.'):
-	"""
-	Receives either a path to a local file or a URL.
-	returns either the path to the local file, or the fetched URL
+    """
+    Receives either a path to a local file or a URL.
+    returns either the path to the local file, or the fetched URL
 
-	unmap_url('/usr/src', 'foo.tar', '/tmp')
-				= '/usr/src/foo.tar'
-	unmap_url('/usr/src', 'http://site/file', '/tmp')
-				= '/tmp/file'
-				(after retrieving it)
-	"""
-	if is_url(src):
-		url_parts = urlparse.urlparse(src)
-		filename = os.path.basename(url_parts[2])
-		dest = os.path.join(destdir, filename)
-		return get_file(src, dest)
-	else:
-		return os.path.join(srcdir, src)
+    unmap_url('/usr/src', 'foo.tar', '/tmp')
+                            = '/usr/src/foo.tar'
+    unmap_url('/usr/src', 'http://site/file', '/tmp')
+                            = '/tmp/file'
+                            (after retrieving it)
+    """
+    if is_url(src):
+        url_parts = urlparse.urlparse(src)
+        filename = os.path.basename(url_parts[2])
+        dest = os.path.join(destdir, filename)
+        return get_file(src, dest)
+    else:
+        return os.path.join(srcdir, src)
 
 
 def update_version(srcdir, preserve_srcdir, new_version, install,
-		   *args, **dargs):
-	"""
-	Make sure srcdir is version new_version
+                   *args, **dargs):
+    """
+    Make sure srcdir is version new_version
 
-	If not, delete it and install() the new version.
+    If not, delete it and install() the new version.
 
-	In the preserve_srcdir case, we just check it's up to date,
-	and if not, we rerun install, without removing srcdir
-	"""
-	versionfile = os.path.join(srcdir, '.version')
-	install_needed = True
+    In the preserve_srcdir case, we just check it's up to date,
+    and if not, we rerun install, without removing srcdir
+    """
+    versionfile = os.path.join(srcdir, '.version')
+    install_needed = True
 
-	if os.path.exists(versionfile):
-		old_version = pickle.load(open(versionfile))
-		if old_version == new_version:
-			install_needed = False
+    if os.path.exists(versionfile):
+        old_version = pickle.load(open(versionfile))
+        if old_version == new_version:
+            install_needed = False
 
-	if install_needed:
-		if not preserve_srcdir and os.path.exists(srcdir):
-			shutil.rmtree(srcdir)
-		install(*args, **dargs)
-		if os.path.exists(srcdir):
-			pickle.dump(new_version, open(versionfile, 'w'))
+    if install_needed:
+        if not preserve_srcdir and os.path.exists(srcdir):
+            shutil.rmtree(srcdir)
+        install(*args, **dargs)
+        if os.path.exists(srcdir):
+            pickle.dump(new_version, open(versionfile, 'w'))
 
 
 def run(command, timeout=None, ignore_status=False,
-	stdout_tee=None, stderr_tee=None):
-	"""
-	Run a command on the host.
+        stdout_tee=None, stderr_tee=None):
+    """
+    Run a command on the host.
 
-	Args:
-		command: the command line string
-		timeout: time limit in seconds before attempting to
-			kill the running process. The run() function
-			will take a few seconds longer than 'timeout'
-			to complete if it has to kill the process.
-		ignore_status: do not raise an exception, no matter what
-			the exit code of the command is.
-		stdout_tee: optional file-like object to which stdout data
-		            will be written as it is generated (data will still
-			    be stored in result.stdout)
-		stderr_tee: likewise for stderr
+    Args:
+            command: the command line string
+            timeout: time limit in seconds before attempting to
+                    kill the running process. The run() function
+                    will take a few seconds longer than 'timeout'
+                    to complete if it has to kill the process.
+            ignore_status: do not raise an exception, no matter what
+                    the exit code of the command is.
+            stdout_tee: optional file-like object to which stdout data
+                        will be written as it is generated (data will still
+                        be stored in result.stdout)
+            stderr_tee: likewise for stderr
 
-	Returns:
-		a CmdResult object
+    Returns:
+            a CmdResult object
 
-	Raises:
-		CmdError: the exit code of the command
-			execution was not 0
-	"""
-	return join_bg_job(run_bg(command), command, timeout, ignore_status,
-			   stdout_tee, stderr_tee)
+    Raises:
+            CmdError: the exit code of the command
+                    execution was not 0
+    """
+    return join_bg_job(run_bg(command), command, timeout, ignore_status,
+                       stdout_tee, stderr_tee)
 
 
 def run_bg(command):
-	"""Run the command in a subprocess and return the subprocess."""
-	result = CmdResult(command)
-	sp = subprocess.Popen(command, stdout=subprocess.PIPE,
-			      stderr=subprocess.PIPE,
-			      shell=True, executable="/bin/bash")
-	return sp, result
+    """Run the command in a subprocess and return the subprocess."""
+    result = CmdResult(command)
+    sp = subprocess.Popen(command, stdout=subprocess.PIPE,
+                          stderr=subprocess.PIPE,
+                          shell=True, executable="/bin/bash")
+    return sp, result
 
 
 def join_bg_job(bg_job, command, timeout=None, ignore_status=False,
-	stdout_tee=None, stderr_tee=None):
-	"""Join the subprocess with the current thread. See run description."""
-	sp, result = bg_job
-	stdout_file = StringIO.StringIO()
-	stderr_file = StringIO.StringIO()
-	(ret, timeouterr) = (0, False)
+        stdout_tee=None, stderr_tee=None):
+    """Join the subprocess with the current thread. See run description."""
+    sp, result = bg_job
+    stdout_file = StringIO.StringIO()
+    stderr_file = StringIO.StringIO()
+    (ret, timeouterr) = (0, False)
 
-	try:
-		# We are holding ends to stdin, stdout pipes
-		# hence we need to be sure to close those fds no mater what
-		start_time = time.time()
-		(ret, timeouterr) = _wait_for_command(sp, start_time,
-					timeout, stdout_file, stderr_file,
-					stdout_tee, stderr_tee)
-		result.exit_status = ret
-		result.duration = time.time() - start_time
-		# don't use os.read now, so we get all the rest of the output
-		_process_output(sp.stdout, stdout_file, stdout_tee,
-				use_os_read=False)
-		_process_output(sp.stderr, stderr_file, stderr_tee,
-				use_os_read=False)
-	finally:
-		# close our ends of the pipes to the sp no matter what
-		sp.stdout.close()
-		sp.stderr.close()
+    try:
+        # We are holding ends to stdin, stdout pipes
+        # hence we need to be sure to close those fds no mater what
+        start_time = time.time()
+        (ret, timeouterr) = _wait_for_command(sp, start_time,
+                                timeout, stdout_file, stderr_file,
+                                stdout_tee, stderr_tee)
+        result.exit_status = ret
+        result.duration = time.time() - start_time
+        # don't use os.read now, so we get all the rest of the output
+        _process_output(sp.stdout, stdout_file, stdout_tee,
+                        use_os_read=False)
+        _process_output(sp.stderr, stderr_file, stderr_tee,
+                        use_os_read=False)
+    finally:
+        # close our ends of the pipes to the sp no matter what
+        sp.stdout.close()
+        sp.stderr.close()
 
-	result.stdout = stdout_file.getvalue()
-	result.stderr = stderr_file.getvalue()
+    result.stdout = stdout_file.getvalue()
+    result.stderr = stderr_file.getvalue()
 
-	if result.exit_status != 0:
-		if timeouterr:
-			raise error.CmdError(command, result, "Command did not "
-					     "complete within %d seconds" % timeout)
-		elif not ignore_status:
-			raise error.CmdError(command, result,
-					     "Command returned non-zero exit status")
+    if result.exit_status != 0:
+        if timeouterr:
+            raise error.CmdError(command, result, "Command did not "
+                                 "complete within %d seconds" % timeout)
+        elif not ignore_status:
+            raise error.CmdError(command, result,
+                                 "Command returned non-zero exit status")
 
-	return result
+    return result
 
 # this returns a tuple with the return code and a flag to specify if the error
 # is due to the process not terminating within timeout
 def _wait_for_command(subproc, start_time, timeout, stdout_file, stderr_file,
-		      stdout_tee, stderr_tee):
-	if timeout:
-		stop_time = start_time + timeout
-		time_left = stop_time - time.time()
-	else:
-		time_left = None # so that select never times out
-	while not timeout or time_left > 0:
-		# select will return when stdout is ready (including when it is
-		# EOF, that is the process has terminated).
-		ready, _, _ = select.select([subproc.stdout, subproc.stderr],
-					     [], [], time_left)
-		# os.read() has to be used instead of
-		# subproc.stdout.read() which will otherwise block
-		if subproc.stdout in ready:
-			_process_output(subproc.stdout, stdout_file,
-					stdout_tee)
-		if subproc.stderr in ready:
-			_process_output(subproc.stderr, stderr_file,
-					stderr_tee)
+                      stdout_tee, stderr_tee):
+    if timeout:
+        stop_time = start_time + timeout
+        time_left = stop_time - time.time()
+    else:
+        time_left = None # so that select never times out
+    while not timeout or time_left > 0:
+        # select will return when stdout is ready (including when it is
+        # EOF, that is the process has terminated).
+        ready, _, _ = select.select([subproc.stdout, subproc.stderr],
+                                     [], [], time_left)
+        # os.read() has to be used instead of
+        # subproc.stdout.read() which will otherwise block
+        if subproc.stdout in ready:
+            _process_output(subproc.stdout, stdout_file,
+                            stdout_tee)
+        if subproc.stderr in ready:
+            _process_output(subproc.stderr, stderr_file,
+                            stderr_tee)
 
-		exit_status_indication = subproc.poll()
+        exit_status_indication = subproc.poll()
 
-		if exit_status_indication is not None:
-			return (exit_status_indication, False)
+        if exit_status_indication is not None:
+            return (exit_status_indication, False)
 
-		if timeout:
-			time_left = stop_time - time.time()
+        if timeout:
+            time_left = stop_time - time.time()
 
-	# the process has not terminated within timeout,
-	# kill it via an escalating series of signals.
-	if exit_status_indication is None:
-		exit_status_indication = nuke_subprocess(subproc)
+    # the process has not terminated within timeout,
+    # kill it via an escalating series of signals.
+    if exit_status_indication is None:
+        exit_status_indication = nuke_subprocess(subproc)
 
-	return (exit_status_indication, True)
+    return (exit_status_indication, True)
 
 
 def _process_output(pipe, fbuffer, teefile=None, use_os_read=True):
-	if use_os_read:
-		data = os.read(pipe.fileno(), 1024)
-	else:
-		data = pipe.read()
-	fbuffer.write(data)
-	if teefile:
-		teefile.write(data)
-		teefile.flush()
+    if use_os_read:
+        data = os.read(pipe.fileno(), 1024)
+    else:
+        data = pipe.read()
+    fbuffer.write(data)
+    if teefile:
+        teefile.write(data)
+        teefile.flush()
 
 
 def nuke_subprocess(subproc):
-       # the process has not terminated within timeout,
-       # kill it via an escalating series of signals.
-       signal_queue = [signal.SIGTERM, signal.SIGKILL]
-       for sig in signal_queue:
-	       try:
-		       os.kill(subproc.pid, sig)
-	       # The process may have died before we could kill it.
-	       except OSError:
-		       pass
+    # the process has not terminated within timeout,
+    # kill it via an escalating series of signals.
+    signal_queue = [signal.SIGTERM, signal.SIGKILL]
+    for sig in signal_queue:
+        try:
+            os.kill(subproc.pid, sig)
+        # The process may have died before we could kill it.
+        except OSError:
+            pass
 
-	       for i in range(5):
-		       rc = subproc.poll()
-		       if rc != None:
-			       return rc
-		       time.sleep(1)
+        for i in range(5):
+            rc = subproc.poll()
+            if rc != None:
+                return rc
+            time.sleep(1)
 
 
 def nuke_pid(pid):
-       # the process has not terminated within timeout,
-       # kill it via an escalating series of signals.
-       signal_queue = [signal.SIGTERM, signal.SIGKILL]
-       for sig in signal_queue:
-	       try:
-		       os.kill(pid, sig)
+    # the process has not terminated within timeout,
+    # kill it via an escalating series of signals.
+    signal_queue = [signal.SIGTERM, signal.SIGKILL]
+    for sig in signal_queue:
+        try:
+            os.kill(pid, sig)
 
-	       # The process may have died before we could kill it.
-	       except OSError:
-		       pass
+        # The process may have died before we could kill it.
+        except OSError:
+            pass
 
-	       try:
-		       for i in range(5):
-			       status = os.waitpid(pid, os.WNOHANG)[0]
-			       if status == pid:
-				       return
-			       time.sleep(1)
+        try:
+            for i in range(5):
+                status = os.waitpid(pid, os.WNOHANG)[0]
+                if status == pid:
+                    return
+                time.sleep(1)
 
-		       if status != pid:
-			       raise error.AutoservRunError('Could not kill %d'
-				       % pid, None)
+            if status != pid:
+                raise error.AutoservRunError('Could not kill %d'
+                        % pid, None)
 
-	       # the process died before we join it.
-	       except OSError:
-		       pass
+        # the process died before we join it.
+        except OSError:
+            pass
 
 
 def _process_output(pipe, fbuffer, teefile=None, use_os_read=True):
-	if use_os_read:
-		data = os.read(pipe.fileno(), 1024)
-	else:
-		data = pipe.read()
-	fbuffer.write(data)
-	if teefile:
-		teefile.write(data)
-		teefile.flush()
+    if use_os_read:
+        data = os.read(pipe.fileno(), 1024)
+    else:
+        data = pipe.read()
+    fbuffer.write(data)
+    if teefile:
+        teefile.write(data)
+        teefile.flush()
 
 
 def system(command, timeout=None, ignore_status=False):
-	return run(command, timeout, ignore_status,
-		stdout_tee=sys.stdout, stderr_tee=sys.stderr).exit_status
+    return run(command, timeout, ignore_status,
+            stdout_tee=sys.stdout, stderr_tee=sys.stderr).exit_status
 
 
 def system_output(command, timeout=None, ignore_status=False,
-		  retain_output=False):
-	if retain_output:
-		out = run(command, timeout, ignore_status,
-			  stdout_tee=sys.stdout, stderr_tee=sys.stderr).stdout
-	else:
-		out = run(command, timeout, ignore_status).stdout
-	if out[-1:] == '\n': out = out[:-1]
-	return out
+                  retain_output=False):
+    if retain_output:
+        out = run(command, timeout, ignore_status,
+                  stdout_tee=sys.stdout, stderr_tee=sys.stderr).stdout
+    else:
+        out = run(command, timeout, ignore_status).stdout
+    if out[-1:] == '\n': out = out[:-1]
+    return out
 
 """
 This function is used when there is a need to run more than one
@@ -400,134 +400,134 @@
 
 """
 def get_sync_control_file(control, host_name, host_num,
-			  instance, num_jobs, port_base=63100):
-	sc_bar_port = port_base
-	c_bar_port = port_base
-	if host_num < 0:
-		print "Please provide a non negative number for the host"
-		return None
-	s_bar_port = port_base + 1 + host_num # The set of s_bar_ports are
-                                              # the same for a given machine
+                          instance, num_jobs, port_base=63100):
+    sc_bar_port = port_base
+    c_bar_port = port_base
+    if host_num < 0:
+        print "Please provide a non negative number for the host"
+        return None
+    s_bar_port = port_base + 1 + host_num # The set of s_bar_ports are
+                                          # the same for a given machine
 
-	sc_bar_timeout = 180
-	s_bar_timeout = c_bar_timeout = 120
+    sc_bar_timeout = 180
+    s_bar_timeout = c_bar_timeout = 120
 
-	# The barrier code snippet is prepended into the conrol file
-	# dynamically before at.run() is called finally.
-	control_new = []
+    # The barrier code snippet is prepended into the conrol file
+    # dynamically before at.run() is called finally.
+    control_new = []
 
-       	# jobid is the unique name used to identify the processes
-	# trying to reach the barriers
-	jobid = "%s#%d" % (host_name, instance)
+    # jobid is the unique name used to identify the processes
+    # trying to reach the barriers
+    jobid = "%s#%d" % (host_name, instance)
 
-	rendv = []
-	# rendvstr is a temp holder for the rendezvous list of the processes
-	for n in range(num_jobs):
-		rendv.append("'%s#%d'" % (host_name, n))
-	rendvstr = ",".join(rendv)
+    rendv = []
+    # rendvstr is a temp holder for the rendezvous list of the processes
+    for n in range(num_jobs):
+        rendv.append("'%s#%d'" % (host_name, n))
+    rendvstr = ",".join(rendv)
 
-	if instance == 0:
-		# Do the setup and wait at the server barrier
-		# Clean up the tmp and the control dirs for the first instance
-		control_new.append('if os.path.exists(job.tmpdir):')
-		control_new.append("\t system('umount -f %s > /dev/null"
-				   "2> /dev/null' % job.tmpdir,"
-				   "ignore_status=True)")
-		control_new.append("\t system('rm -rf ' + job.tmpdir)")
-		control_new.append(
-		    'b0 = job.barrier("%s", "sc_bar", %d, port=%d)'
-		    % (jobid, sc_bar_timeout, sc_bar_port))
-		control_new.append(
-		'b0.rendevous_servers("PARALLEL_MASTER", "%s")'
-		 % jobid)
+    if instance == 0:
+        # Do the setup and wait at the server barrier
+        # Clean up the tmp and the control dirs for the first instance
+        control_new.append('if os.path.exists(job.tmpdir):')
+        control_new.append("\t system('umount -f %s > /dev/null"
+                           "2> /dev/null' % job.tmpdir,"
+                           "ignore_status=True)")
+        control_new.append("\t system('rm -rf ' + job.tmpdir)")
+        control_new.append(
+            'b0 = job.barrier("%s", "sc_bar", %d, port=%d)'
+            % (jobid, sc_bar_timeout, sc_bar_port))
+        control_new.append(
+        'b0.rendevous_servers("PARALLEL_MASTER", "%s")'
+         % jobid)
 
-	elif instance == 1:
-		# Wait at the server barrier to wait for instance=0
-		# process to complete setup
-		b0 = barrier.barrier("PARALLEL_MASTER", "sc_bar", sc_bar_timeout,
-			     port=sc_bar_port)
-		b0.rendevous_servers("PARALLEL_MASTER", jobid)
+    elif instance == 1:
+        # Wait at the server barrier to wait for instance=0
+        # process to complete setup
+        b0 = barrier.barrier("PARALLEL_MASTER", "sc_bar", sc_bar_timeout,
+                     port=sc_bar_port)
+        b0.rendevous_servers("PARALLEL_MASTER", jobid)
 
-		if(num_jobs > 2):
-			b1 = barrier.barrier(jobid, "s_bar", s_bar_timeout,
-				     port=s_bar_port)
-	        	b1.rendevous(rendvstr)
+        if(num_jobs > 2):
+            b1 = barrier.barrier(jobid, "s_bar", s_bar_timeout,
+                         port=s_bar_port)
+            b1.rendevous(rendvstr)
 
-	else:
-		# For the rest of the clients
-		b2 = barrier.barrier(jobid, "s_bar", s_bar_timeout, port=s_bar_port)
-		b2.rendevous(rendvstr)
+    else:
+        # For the rest of the clients
+        b2 = barrier.barrier(jobid, "s_bar", s_bar_timeout, port=s_bar_port)
+        b2.rendevous(rendvstr)
 
-	# Client side barrier for all the tests to start at the same time
-	control_new.append('b1 = job.barrier("%s", "c_bar", %d, port=%d)'
-			% (jobid, c_bar_timeout, c_bar_port))
-	control_new.append("b1.rendevous(%s)" % rendvstr)
+    # Client side barrier for all the tests to start at the same time
+    control_new.append('b1 = job.barrier("%s", "c_bar", %d, port=%d)'
+                    % (jobid, c_bar_timeout, c_bar_port))
+    control_new.append("b1.rendevous(%s)" % rendvstr)
 
-	# Stick in the rest of the control file
-	control_new.append(control)
+    # Stick in the rest of the control file
+    control_new.append(control)
 
-	return "\n".join(control_new)
+    return "\n".join(control_new)
 
 
 class CmdResult(object):
-	"""
-	Command execution result.
+    """
+    Command execution result.
 
-	command:     String containing the command line itself
-	exit_status: Integer exit code of the process
-	stdout:      String containing stdout of the process
-	stderr:      String containing stderr of the process
-	duration:    Elapsed wall clock time running the process
-	"""
+    command:     String containing the command line itself
+    exit_status: Integer exit code of the process
+    stdout:      String containing stdout of the process
+    stderr:      String containing stderr of the process
+    duration:    Elapsed wall clock time running the process
+    """
 
 
-	def __init__(self, command=None, stdout="", stderr="", 
-		     exit_status=None, duration=0):
-		self.command = command
-		self.exit_status = exit_status
-		self.stdout = stdout
-		self.stderr = stderr
-		self.duration = duration
+    def __init__(self, command=None, stdout="", stderr="",
+                 exit_status=None, duration=0):
+        self.command = command
+        self.exit_status = exit_status
+        self.stdout = stdout
+        self.stderr = stderr
+        self.duration = duration
 
 
-	def __repr__(self):
-		wrapper = textwrap.TextWrapper(width = 78, 
-					       initial_indent="\n    ",
-					       subsequent_indent="    ")
-		
-		stdout = self.stdout.rstrip()
-		if stdout:
-			stdout = "\nstdout:\n%s" % stdout
-		
-		stderr = self.stderr.rstrip()
-		if stderr:
-			stderr = "\nstderr:\n%s" % stderr
-		
-		return ("* Command: %s\n"
-			"Exit status: %s\n"
-			"Duration: %s\n"
-			"%s"
-			"%s"
-			% (wrapper.fill(self.command), self.exit_status, 
-			self.duration, stdout, stderr))
+    def __repr__(self):
+        wrapper = textwrap.TextWrapper(width = 78,
+                                       initial_indent="\n    ",
+                                       subsequent_indent="    ")
+
+        stdout = self.stdout.rstrip()
+        if stdout:
+            stdout = "\nstdout:\n%s" % stdout
+
+        stderr = self.stderr.rstrip()
+        if stderr:
+            stderr = "\nstderr:\n%s" % stderr
+
+        return ("* Command: %s\n"
+                "Exit status: %s\n"
+                "Duration: %s\n"
+                "%s"
+                "%s"
+                % (wrapper.fill(self.command), self.exit_status,
+                self.duration, stdout, stderr))
 
 
 class run_randomly:
-	def __init__(self, run_sequentially=False):
-		# Run sequentially is for debugging control files
-		self.test_list = []
-		self.run_sequentially = run_sequentially
+    def __init__(self, run_sequentially=False):
+        # Run sequentially is for debugging control files
+        self.test_list = []
+        self.run_sequentially = run_sequentially
 
 
-	def add(self, *args, **dargs):
-		test = (args, dargs)
-		self.test_list.append(test)
+    def add(self, *args, **dargs):
+        test = (args, dargs)
+        self.test_list.append(test)
 
 
-	def run(self, fn):
-		while self.test_list:
-			test_index = random.randint(0, len(self.test_list)-1)
-			if self.run_sequentially:
-				test_index = 0
-			(args, dargs) = self.test_list.pop(test_index)
-			fn(*args, **dargs)
+    def run(self, fn):
+        while self.test_list:
+            test_index = random.randint(0, len(self.test_list)-1)
+            if self.run_sequentially:
+                test_index = 0
+            (args, dargs) = self.test_list.pop(test_index)
+            fn(*args, **dargs)
diff --git a/client/deps/boottool/boottool.py b/client/deps/boottool/boottool.py
index e801357..43e5cb2 100755
--- a/client/deps/boottool/boottool.py
+++ b/client/deps/boottool/boottool.py
@@ -7,21 +7,21 @@
 from autotest_lib.client.bin import autotest_utils
 
 # To use this, you have to set PERL5LIB to:
-# 		autodir+'deps/boottool/lib/perl' 
+#               autodir+'deps/boottool/lib/perl'
 # or on Ubuntu we also need
-# 		autodir+'deps/boottool/share/perl'
+#               autodir+'deps/boottool/share/perl'
 # because it uses nonstandard locations
 
 version = 1
 
-def setup(tarball, topdir): 
-	srcdir = os.path.join(topdir, 'src')
-	autotest_utils.extract_tarball_to_dir(tarball, srcdir)
-	os.chdir(srcdir)
-	utils.system ('perl Makefile.PL PREFIX=' + topdir)
-	utils.system ('make')
-	utils.system ('make install')
-	os.chdir(topdir)
+def setup(tarball, topdir):
+    srcdir = os.path.join(topdir, 'src')
+    autotest_utils.extract_tarball_to_dir(tarball, srcdir)
+    os.chdir(srcdir)
+    utils.system ('perl Makefile.PL PREFIX=' + topdir)
+    utils.system ('make')
+    utils.system ('make install')
+    os.chdir(topdir)
 
 
 pwd = os.getcwd()
diff --git a/client/deps/libaio/libaio.py b/client/deps/libaio/libaio.py
index cfb037d..e673678 100755
--- a/client/deps/libaio/libaio.py
+++ b/client/deps/libaio/libaio.py
@@ -9,12 +9,12 @@
 version = 1
 
 def setup(tarball, topdir):
-	srcdir = os.path.join(topdir, 'src')
-	autotest_utils.extract_tarball_to_dir(tarball, srcdir)
-	os.chdir(srcdir)
-	utils.system ('make')
-	utils.system ('make prefix=%s install' % topdir)
-	os.chdir(topdir)
+    srcdir = os.path.join(topdir, 'src')
+    autotest_utils.extract_tarball_to_dir(tarball, srcdir)
+    os.chdir(srcdir)
+    utils.system ('make')
+    utils.system ('make prefix=%s install' % topdir)
+    os.chdir(topdir)
 
 
 # old source was
diff --git a/client/deps/libnet/libnet.py b/client/deps/libnet/libnet.py
index eac66b2..90ab023 100755
--- a/client/deps/libnet/libnet.py
+++ b/client/deps/libnet/libnet.py
@@ -8,19 +8,19 @@
 
 version = 1
 
-def setup(tarball, topdir): 
-	srcdir = os.path.join(topdir, 'src')
-	if not os.path.exists(tarball):
-		utils.get_file('http://www.packetfactory.net/libnet/dist/libnet.tar.gz',
-			       tarball)
-	autotest_utils.extract_tarball_to_dir(tarball, 'src')
-	os.chdir(srcdir)
-	utils.system ('./configure --prefix=%s/libnet' % topdir)
-	utils.system('make')
-	utils.system('make install')
+def setup(tarball, topdir):
+    srcdir = os.path.join(topdir, 'src')
+    if not os.path.exists(tarball):
+        utils.get_file('http://www.packetfactory.net/libnet/dist/libnet.tar.gz',
+                       tarball)
+    autotest_utils.extract_tarball_to_dir(tarball, 'src')
+    os.chdir(srcdir)
+    utils.system ('./configure --prefix=%s/libnet' % topdir)
+    utils.system('make')
+    utils.system('make install')
 
-	os.chdir(topdir)
-	
+    os.chdir(topdir)
+
 pwd = os.getcwd()
 tarball = os.path.join(pwd, 'libnet.tar.gz')
 utils.update_version(pwd+'/src', False, version, setup, tarball, pwd)
diff --git a/client/deps/mysql/mysql.py b/client/deps/mysql/mysql.py
index 0fafe27..9a9eeb6 100755
--- a/client/deps/mysql/mysql.py
+++ b/client/deps/mysql/mysql.py
@@ -8,32 +8,32 @@
 
 version = 3
 
-def setup(tarball, topdir): 
-	srcdir = os.path.join(topdir, 'src')
-	if not os.path.exists(tarball):
-		utils.get_file('http://mirror.x10.com/mirror/mysql/Downloads/MySQL-5.0/mysql-5.0.45.tar.gz', tarball)
-	autotest_utils.extract_tarball_to_dir(tarball, 'src')
-	os.chdir(srcdir)
-	utils.system ('./configure --prefix=%s/mysql --enable-thread-safe-client' \
-			% topdir)
-	utils.system('make -j %d' % count_cpus())
-	utils.system('make install')
+def setup(tarball, topdir):
+    srcdir = os.path.join(topdir, 'src')
+    if not os.path.exists(tarball):
+        utils.get_file('http://mirror.x10.com/mirror/mysql/Downloads/MySQL-5.0/mysql-5.0.45.tar.gz', tarball)
+    autotest_utils.extract_tarball_to_dir(tarball, 'src')
+    os.chdir(srcdir)
+    utils.system ('./configure --prefix=%s/mysql --enable-thread-safe-client' \
+                    % topdir)
+    utils.system('make -j %d' % count_cpus())
+    utils.system('make install')
 
-	#
-	# MySQL doesn't create this directory on it's own.  
-	# This is where database logs and files are created.
-	#
-	try:
-		os.mkdir(topdir + '/mysql/var')
-	except:
-		pass
-	#
-	# Initialize the database.
-	#
-	utils.system('%s/mysql/bin/mysql_install_db' % topdir)
-	
-	os.chdir(topdir)
-	
+    #
+    # MySQL doesn't create this directory on it's own.
+    # This is where database logs and files are created.
+    #
+    try:
+        os.mkdir(topdir + '/mysql/var')
+    except:
+        pass
+    #
+    # Initialize the database.
+    #
+    utils.system('%s/mysql/bin/mysql_install_db' % topdir)
+
+    os.chdir(topdir)
+
 pwd = os.getcwd()
 tarball = os.path.join(pwd, 'mysql-5.0.45.tar.gz')
 utils.update_version(pwd+'/src', False, version, setup, tarball, pwd)
diff --git a/client/deps/pgpool/pgpool.py b/client/deps/pgpool/pgpool.py
index 3034b6a..24afc26 100755
--- a/client/deps/pgpool/pgpool.py
+++ b/client/deps/pgpool/pgpool.py
@@ -8,23 +8,23 @@
 
 version = 1
 
-def setup(tarball, topdir): 
-	# FIXME - Waiting to be able to specify dependency.
-	#self.job.setup_dep(['pgsql'])
-	srcdir = os.path.join(topdir, 'src')
-	if not os.path.exists(tarball):
-		utils.get_file('http://pgfoundry.org/frs/download.php/1083/pgpool-II-1.0.1.tar.gz', tarball)
-	autotest_utils.extract_tarball_to_dir(tarball, 'src')
-	os.chdir(srcdir)
-	# FIXEME - Waiting to be able to use self.autodir instead of
-	# os.environ['AUTODIR']
-	utils.system('./configure --prefix=%s/pgpool --with-pgsql=%s/deps/pgsql/pgsql' \
-			% (topdir, os.environ['AUTODIR']))
-	utils.system('make -j %d' % count_cpus())
-	utils.system('make install')
+def setup(tarball, topdir):
+    # FIXME - Waiting to be able to specify dependency.
+    #self.job.setup_dep(['pgsql'])
+    srcdir = os.path.join(topdir, 'src')
+    if not os.path.exists(tarball):
+        utils.get_file('http://pgfoundry.org/frs/download.php/1083/pgpool-II-1.0.1.tar.gz', tarball)
+    autotest_utils.extract_tarball_to_dir(tarball, 'src')
+    os.chdir(srcdir)
+    # FIXEME - Waiting to be able to use self.autodir instead of
+    # os.environ['AUTODIR']
+    utils.system('./configure --prefix=%s/pgpool --with-pgsql=%s/deps/pgsql/pgsql' \
+                    % (topdir, os.environ['AUTODIR']))
+    utils.system('make -j %d' % count_cpus())
+    utils.system('make install')
 
-	os.chdir(topdir)
-	
+    os.chdir(topdir)
+
 pwd = os.getcwd()
 tarball = os.path.join(pwd, 'pgpool-II-1.0.1.tar.gz')
 utils.update_version(pwd+'/src', False, version, setup, tarball, pwd)
diff --git a/client/deps/pgsql/pgsql.py b/client/deps/pgsql/pgsql.py
index 1de96b8..cd53653 100755
--- a/client/deps/pgsql/pgsql.py
+++ b/client/deps/pgsql/pgsql.py
@@ -8,18 +8,18 @@
 
 version = 4
 
-def setup(tarball, topdir): 
-	srcdir = os.path.join(topdir, 'src')
-	if not os.path.exists(tarball):
-		utils.get_file('ftp://ftp.postgresql.org/pub/source/v8.3.1/postgresql-8.3.1.tar.bz2', tarball)
-	autotest_utils.extract_tarball_to_dir(tarball, 'src')
-	os.chdir(srcdir)
-	utils.system ('./configure --without-readline --without-zlib --enable-debug --prefix=%s/pgsql' % topdir)
-	utils.system('make -j %d' % count_cpus())
-	utils.system('make install')
-	
-	os.chdir(topdir)
-	
+def setup(tarball, topdir):
+    srcdir = os.path.join(topdir, 'src')
+    if not os.path.exists(tarball):
+        utils.get_file('ftp://ftp.postgresql.org/pub/source/v8.3.1/postgresql-8.3.1.tar.bz2', tarball)
+    autotest_utils.extract_tarball_to_dir(tarball, 'src')
+    os.chdir(srcdir)
+    utils.system ('./configure --without-readline --without-zlib --enable-debug --prefix=%s/pgsql' % topdir)
+    utils.system('make -j %d' % count_cpus())
+    utils.system('make install')
+
+    os.chdir(topdir)
+
 pwd = os.getcwd()
 tarball = os.path.join(pwd, 'postgresql-8.3.1.tar.bz2')
 utils.update_version(pwd+'/src', False, version, setup, tarball, pwd)
diff --git a/client/profilers/catprofile/catprofile.py b/client/profilers/catprofile/catprofile.py
index 30167fc..5ccc835 100755
--- a/client/profilers/catprofile/catprofile.py
+++ b/client/profilers/catprofile/catprofile.py
@@ -3,41 +3,40 @@
 import profiler,time,os
 
 class catprofile(profiler.profiler):
-	version = 1
+    version = 1
 
-	# filenames: list of filenames to cat
-	def initialize(self, filenames = ['/proc/meminfo', '/proc/slabinfo'], 
-				outfile = 'monitor', interval = 1):
-		self.filenames = filenames
-		self.outfile = outfile
-		self.interval = interval
+    # filenames: list of filenames to cat
+    def initialize(self, filenames = ['/proc/meminfo', '/proc/slabinfo'],
+                            outfile = 'monitor', interval = 1):
+        self.filenames = filenames
+        self.outfile = outfile
+        self.interval = interval
 
 
-	def start(self, test):
-		self.child_pid = os.fork()
-		if self.child_pid:			# parent
-			return None
-		else:					# child
-			while 1:
-				lines = []
-				for filename in self.filenames:
-					input = open(filename, 'r')
-					lines += '\n----- %s -----\n' % filename
-					lines += input.readlines()
-					input.close
-				outfile = test.profdir + '/' + self.outfile
-				output = open(outfile, 'a')
-				output.write(time.asctime() + '\n')
-				output.writelines(lines)
-				output.write('\n=========================\n')
-				output.close()
-				time.sleep(self.interval)
+    def start(self, test):
+        self.child_pid = os.fork()
+        if self.child_pid:                      # parent
+            return None
+        else:                                   # child
+            while 1:
+                lines = []
+                for filename in self.filenames:
+                    input = open(filename, 'r')
+                    lines += '\n----- %s -----\n' % filename
+                    lines += input.readlines()
+                    input.close
+                outfile = test.profdir + '/' + self.outfile
+                output = open(outfile, 'a')
+                output.write(time.asctime() + '\n')
+                output.writelines(lines)
+                output.write('\n=========================\n')
+                output.close()
+                time.sleep(self.interval)
 
 
-	def stop(self, test):
-		os.kill(self.child_pid, 15)
+    def stop(self, test):
+        os.kill(self.child_pid, 15)
 
 
-	def report(self, test):
-		return None
-
+    def report(self, test):
+        return None
diff --git a/client/profilers/iostat/iostat.py b/client/profilers/iostat/iostat.py
index 11fb24e..a258c1f 100755
--- a/client/profilers/iostat/iostat.py
+++ b/client/profilers/iostat/iostat.py
@@ -3,24 +3,23 @@
 import profiler,time,os,subprocess
 
 class iostat(profiler.profiler):
-	version = 1
+    version = 1
 
-	def initialize(self, interval = 1):
-		self.interval = interval
+    def initialize(self, interval = 1):
+        self.interval = interval
 
 
-	def start(self, test):
-		cmd = "/usr/bin/iostat %d" % self.interval
-		logfile = open(os.path.join(test.profdir, "iostat"), 'w')
-		p = subprocess.Popen(cmd, shell=True, stdout=logfile, \
-						stderr=subprocess.STDOUT)
-		self.pid = p.pid
+    def start(self, test):
+        cmd = "/usr/bin/iostat %d" % self.interval
+        logfile = open(os.path.join(test.profdir, "iostat"), 'w')
+        p = subprocess.Popen(cmd, shell=True, stdout=logfile, \
+                                        stderr=subprocess.STDOUT)
+        self.pid = p.pid
 
 
-	def stop(self, test):
-		os.kill(self.pid, 15)
+    def stop(self, test):
+        os.kill(self.pid, 15)
 
 
-	def report(self, test):
-		return None
-
+    def report(self, test):
+        return None
diff --git a/client/profilers/lockmeter/lockmeter.py b/client/profilers/lockmeter/lockmeter.py
index bea3cc6..8c3d014 100755
--- a/client/profilers/lockmeter/lockmeter.py
+++ b/client/profilers/lockmeter/lockmeter.py
@@ -1,47 +1,47 @@
 # NOTE: if you get compile errors from config.h, referring you to a FAQ,
-# you might need to do 'cat < /dev/null > /usr/include/linux/config.h'. 
+# you might need to do 'cat < /dev/null > /usr/include/linux/config.h'.
 # But read the FAQ first.
 import profiler
 from autotest_lib.client.common_lib import utils
 from autotest_lib.client.bin import autotest_utils
 
 class lockmeter(profiler.profiler):
-	version = 1
+    version = 1
 
 # ftp://oss.sgi.com/projects/lockmeter/download/lockstat-1.4.11.tar.gz
 # patched with lockstat.diff
 # ftp://oss.sgi.com/projects/lockmeter/download/v2.6/patch.2.6.14-lockmeter-1.gz
 # is the kernel patch
 
-	def setup(self, tarball = 'lockstat-1.4.11.tar.bz2'):
-		self.tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    def setup(self, tarball = 'lockstat-1.4.11.tar.bz2'):
+        self.tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('make')
-		self.cmd = self.srcdir + '/lockstat'
+        utils.system('make')
+        self.cmd = self.srcdir + '/lockstat'
 
 
-	def initialize(self):
-		try:
-			assert os.path.exists('/proc/lockmeter')
-		except:
-			print 'Lockmeter is not compiled into your kernel'
-			print 'Please fix and try again'
-			raise AssertionError
+    def initialize(self):
+        try:
+            assert os.path.exists('/proc/lockmeter')
+        except:
+            print 'Lockmeter is not compiled into your kernel'
+            print 'Please fix and try again'
+            raise AssertionError
 
 
-	def start(self, test):
-		utils.system(self.cmd + ' off')
-		utils.system(self.cmd + ' reset')
-		utils.system(self.cmd + ' on')
+    def start(self, test):
+        utils.system(self.cmd + ' off')
+        utils.system(self.cmd + ' reset')
+        utils.system(self.cmd + ' on')
 
 
-	def stop(self, test):
-		utils.system(self.cmd + ' off')
+    def stop(self, test):
+        utils.system(self.cmd + ' off')
 
 
-	def report(self, test):
-		args = ' -m ' + autotest_utils.get_systemmap()
-		self.output = self.profdir + '/results/lockstat'
-		utils.system(self.cmd + args + ' print > ' + self.output)
+    def report(self, test):
+        args = ' -m ' + autotest_utils.get_systemmap()
+        self.output = self.profdir + '/results/lockstat'
+        utils.system(self.cmd + args + ' print > ' + self.output)
diff --git a/client/profilers/mpstat/mpstat.py b/client/profilers/mpstat/mpstat.py
index 261bd25..5870c9c 100644
--- a/client/profilers/mpstat/mpstat.py
+++ b/client/profilers/mpstat/mpstat.py
@@ -2,24 +2,24 @@
 import profiler,time,os,subprocess
 
 class mpstat(profiler.profiler):
-	version = 1
+    version = 1
 
 
-	def initialize(self, interval = 1):
-		self.interval = interval
+    def initialize(self, interval = 1):
+        self.interval = interval
 
 
-	def start(self, test):
-		cmd = "mpstat -P ALL %d" % self.interval
-		logfile = open(os.path.join(test.profdir, "mpstat"), 'w')
-		p = subprocess.Popen(cmd, shell=True, stdout=logfile,
-						stderr=subprocess.STDOUT)
-		self.pid = p.pid
+    def start(self, test):
+        cmd = "mpstat -P ALL %d" % self.interval
+        logfile = open(os.path.join(test.profdir, "mpstat"), 'w')
+        p = subprocess.Popen(cmd, shell=True, stdout=logfile,
+                                        stderr=subprocess.STDOUT)
+        self.pid = p.pid
 
 
-	def stop(self, test):
-		os.kill(self.pid, 15)
+    def stop(self, test):
+        os.kill(self.pid, 15)
 
 
-	def report(self, test):
-		return None
+    def report(self, test):
+        return None
diff --git a/client/profilers/oprofile/oprofile.py b/client/profilers/oprofile/oprofile.py
index a36989a..8f55736 100755
--- a/client/profilers/oprofile/oprofile.py
+++ b/client/profilers/oprofile/oprofile.py
@@ -1,112 +1,112 @@
-# Will need some libaries to compile. Do 'apt-get build-dep oprofile' 
+# Will need some libaries to compile. Do 'apt-get build-dep oprofile'
 import profiler, shutil
 from autotest_lib.client.common_lib import utils
 from autotest_lib.client.bin import autotest_utils
 
 class oprofile(profiler.profiler):
-	version = 5
+    version = 5
 
 # Notes on whether to use the local copy or the builtin from source:
 # local = None
 #      Try to use source copy if it works, else use local
 # local = False
-#	Force use of the source copy
+#       Force use of the source copy
 # local = True
-#	Force use of the local copy
+#       Force use of the local copy
 
 # http://prdownloads.sourceforge.net/oprofile/oprofile-0.9.3.tar.gz
-	def setup(self, tarball = 'oprofile-0.9.3.tar.bz2', local = None,
-								*args, **dargs):
-		if local == True:
-			return
+    def setup(self, tarball = 'oprofile-0.9.3.tar.bz2', local = None,
+                                                            *args, **dargs):
+        if local == True:
+            return
 
-		try:
-			self.tarball = utils.unmap_url(self.bindir, tarball,
-								self.tmpdir)
-			autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
-			os.chdir(self.srcdir)
+        try:
+            self.tarball = utils.unmap_url(self.bindir, tarball,
+                                                    self.tmpdir)
+            autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+            os.chdir(self.srcdir)
 
-			patch = os.path.join(self.bindir,"oprofile-69455.patch")
-			utils.system('patch -p1 < %s' % patch)
-			utils.system('./configure --with-kernel-support --prefix=' + \
-								self.srcdir)
-			utils.system('make')
-			utils.system('make install')
-		except:
-			# Build from source failed.
-			# But maybe can still use the local copy
-			local_opcontrol = os.path.exists('/usr/bin/opcontrol')
-			local_opreport = os.path.exists('/usr/bin/opreport')
-			if local == False or not local_opcontrol or not local_opreport:
-				raise
+            patch = os.path.join(self.bindir,"oprofile-69455.patch")
+            utils.system('patch -p1 < %s' % patch)
+            utils.system('./configure --with-kernel-support --prefix=' + \
+                                                    self.srcdir)
+            utils.system('make')
+            utils.system('make install')
+        except:
+            # Build from source failed.
+            # But maybe can still use the local copy
+            local_opcontrol = os.path.exists('/usr/bin/opcontrol')
+            local_opreport = os.path.exists('/usr/bin/opreport')
+            if local == False or not local_opcontrol or not local_opreport:
+                raise
 
 
-	def initialize(self, vmlinux = None, events = [], others = None,
-								local = None):
-		if not vmlinux:
-			self.vmlinux = get_vmlinux()
-		else:
-			self.vmlinux = vmlinux
-		if not len(events):
-			self.events = ['default']
-		else:
-			self.events = events
-		self.others = others
+    def initialize(self, vmlinux = None, events = [], others = None,
+                                                            local = None):
+        if not vmlinux:
+            self.vmlinux = get_vmlinux()
+        else:
+            self.vmlinux = vmlinux
+        if not len(events):
+            self.events = ['default']
+        else:
+            self.events = events
+        self.others = others
 
-		# If there is existing setup file, oprofile may fail to start with default parameters.
-		if os.path.isfile('/root/.oprofile/daemonrc'):
-			os.rename('/root/.oprofile/daemonrc', '/root/.oprofile/daemonrc.org')
+        # If there is existing setup file, oprofile may fail to start with default parameters.
+        if os.path.isfile('/root/.oprofile/daemonrc'):
+            os.rename('/root/.oprofile/daemonrc', '/root/.oprofile/daemonrc.org')
 
-		setup = ' --setup'
-		if not self.vmlinux:
-			setup += ' --no-vmlinux'
-		else:
-			setup += ' --vmlinux=%s' % self.vmlinux
-		for e in self.events:
-			setup += ' --event=%s' % e
-		if self.others:
-			setup += ' ' + self.others
+        setup = ' --setup'
+        if not self.vmlinux:
+            setup += ' --no-vmlinux'
+        else:
+            setup += ' --vmlinux=%s' % self.vmlinux
+        for e in self.events:
+            setup += ' --event=%s' % e
+        if self.others:
+            setup += ' ' + self.others
 
-		src_opreport  = os.path.join(self.srcdir, '/bin/opreport')
-		src_opcontrol = os.path.join(self.srcdir, '/bin/opcontrol')
-		if local == False or (local == None and 
-					os.path.exists(src_opreport) and 
-					os.path.exists(src_opcontrol)):
-			print "Using source-built copy of oprofile"
-			self.opreport = src_opreport
-			self.opcontrol = src_opcontrol
-		else:
-			print "Using machine local copy of oprofile"
-			self.opreport = '/usr/bin/opreport'
-			self.opcontrol = '/usr/bin/opcontrol'
+        src_opreport  = os.path.join(self.srcdir, '/bin/opreport')
+        src_opcontrol = os.path.join(self.srcdir, '/bin/opcontrol')
+        if local == False or (local == None and
+                                os.path.exists(src_opreport) and
+                                os.path.exists(src_opcontrol)):
+            print "Using source-built copy of oprofile"
+            self.opreport = src_opreport
+            self.opcontrol = src_opcontrol
+        else:
+            print "Using machine local copy of oprofile"
+            self.opreport = '/usr/bin/opreport'
+            self.opcontrol = '/usr/bin/opcontrol'
 
-		utils.system(self.opcontrol + setup)
+        utils.system(self.opcontrol + setup)
 
 
-	def start(self, test):
-		utils.system(self.opcontrol + ' --shutdown')
-		utils.system(self.opcontrol + ' --reset')
-		utils.system(self.opcontrol + ' --start')
+    def start(self, test):
+        utils.system(self.opcontrol + ' --shutdown')
+        utils.system(self.opcontrol + ' --reset')
+        utils.system(self.opcontrol + ' --start')
 
 
-	def stop(self, test):
-		utils.system(self.opcontrol + ' --stop')
-		utils.system(self.opcontrol + ' --dump')
+    def stop(self, test):
+        utils.system(self.opcontrol + ' --stop')
+        utils.system(self.opcontrol + ' --dump')
 
 
-	def report(self, test):
-		# Output kernel per-symbol profile report
-		reportfile = test.profdir + '/oprofile.kernel'
-		if self.vmlinux:
-			report = self.opreport + ' -l ' + self.vmlinux
-			if os.path.exists(get_modules_dir()):
-				report += ' -p ' + get_modules_dir()
-			utils.system(report + ' > ' + reportfile)
-		else:
-			utils.system("echo 'no vmlinux found.' > %s" %reportfile)
+    def report(self, test):
+        # Output kernel per-symbol profile report
+        reportfile = test.profdir + '/oprofile.kernel'
+        if self.vmlinux:
+            report = self.opreport + ' -l ' + self.vmlinux
+            if os.path.exists(get_modules_dir()):
+                report += ' -p ' + get_modules_dir()
+            utils.system(report + ' > ' + reportfile)
+        else:
+            utils.system("echo 'no vmlinux found.' > %s" %reportfile)
 
-		# output profile summary report
-		reportfile = test.profdir + '/oprofile.user'
-		utils.system(self.opreport + ' --long-filenames ' + ' > ' + reportfile)
+        # output profile summary report
+        reportfile = test.profdir + '/oprofile.user'
+        utils.system(self.opreport + ' --long-filenames ' + ' > ' + reportfile)
 
-		utils.system(self.opcontrol + ' --shutdown')
+        utils.system(self.opcontrol + ' --shutdown')
diff --git a/client/profilers/readprofile/readprofile.py b/client/profilers/readprofile/readprofile.py
index 95b1c60..8ac1c37 100755
--- a/client/profilers/readprofile/readprofile.py
+++ b/client/profilers/readprofile/readprofile.py
@@ -3,45 +3,45 @@
 from autotest_lib.client.bin import autotest_utils
 
 class readprofile(profiler.profiler):
-	version = 1
+    version = 1
 
 # http://www.kernel.org/pub/linux/utils/util-linux/util-linux-2.12r.tar.bz2
-	def setup(self, tarball = 'util-linux-2.12r.tar.bz2'):
-		self.tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    def setup(self, tarball = 'util-linux-2.12r.tar.bz2'):
+        self.tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('./configure')
-		os.chdir('sys-utils')
-		utils.system('make readprofile')
+        utils.system('./configure')
+        os.chdir('sys-utils')
+        utils.system('make readprofile')
 
 
-	def initialize(self):
-		try:
-			utils.system('grep -iq " profile=" /proc/cmdline')
-		except CmdError:
-			raise AutotestError('readprofile not enabled')
+    def initialize(self):
+        try:
+            utils.system('grep -iq " profile=" /proc/cmdline')
+        except CmdError:
+            raise AutotestError('readprofile not enabled')
 
-		self.cmd = self.srcdir + '/sys-utils/readprofile'
+        self.cmd = self.srcdir + '/sys-utils/readprofile'
 
 
-	def start(self, test):
-		utils.system(self.cmd + ' -r')
+    def start(self, test):
+        utils.system(self.cmd + ' -r')
 
 
-	def stop(self, test):
-		# There's no real way to stop readprofile, so we stash the
-		# raw data at this point instead. BAD EXAMPLE TO COPY! ;-)
-		self.rawprofile = test.profdir + '/profile.raw'
-		print "STOP"
-		shutil.copyfile('/proc/profile', self.rawprofile)
+    def stop(self, test):
+        # There's no real way to stop readprofile, so we stash the
+        # raw data at this point instead. BAD EXAMPLE TO COPY! ;-)
+        self.rawprofile = test.profdir + '/profile.raw'
+        print "STOP"
+        shutil.copyfile('/proc/profile', self.rawprofile)
 
 
-	def report(self, test):
-		args  = ' -n'
-		args += ' -m ' + get_systemmap()
-		args += ' -p ' + self.rawprofile
-		cmd = self.cmd + ' ' + args
-		txtprofile = test.profdir + '/profile.text'
-		utils.system(cmd + ' | sort -nr > ' + txtprofile)
-		utils.system('bzip2 ' + self.rawprofile)
+    def report(self, test):
+        args  = ' -n'
+        args += ' -m ' + get_systemmap()
+        args += ' -p ' + self.rawprofile
+        cmd = self.cmd + ' ' + args
+        txtprofile = test.profdir + '/profile.text'
+        utils.system(cmd + ' | sort -nr > ' + txtprofile)
+        utils.system('bzip2 ' + self.rawprofile)
diff --git a/client/profilers/vmstat/vmstat.py b/client/profilers/vmstat/vmstat.py
index 632cc34..b381649 100755
--- a/client/profilers/vmstat/vmstat.py
+++ b/client/profilers/vmstat/vmstat.py
@@ -3,24 +3,23 @@
 import profiler,time,os,subprocess
 
 class vmstat(profiler.profiler):
-	version = 1
+    version = 1
 
-	def initialize(self, interval = 1):
-                self.interval = interval
+    def initialize(self, interval = 1):
+        self.interval = interval
 
 
-	def start(self, test):
-		cmd = "/usr/bin/vmstat %d" % self.interval
-		logfile = open(os.path.join(test.profdir, "vmstat"), 'w')
-		p = subprocess.Popen(cmd, shell=True, stdout=logfile, \
-						stderr=subprocess.STDOUT)
-		self.pid = p.pid
+    def start(self, test):
+        cmd = "/usr/bin/vmstat %d" % self.interval
+        logfile = open(os.path.join(test.profdir, "vmstat"), 'w')
+        p = subprocess.Popen(cmd, shell=True, stdout=logfile, \
+                                        stderr=subprocess.STDOUT)
+        self.pid = p.pid
 
 
-	def stop(self, test):
-		os.kill(self.pid, 15)
+    def stop(self, test):
+        os.kill(self.pid, 15)
 
 
-	def report(self, test):
-		return None
-
+    def report(self, test):
+        return None
diff --git a/client/setup_modules.py b/client/setup_modules.py
index 4e2185e..f2de48c 100644
--- a/client/setup_modules.py
+++ b/client/setup_modules.py
@@ -4,95 +4,95 @@
 
 
 def _create_module(name):
-	"""Create a single top-level module"""
-	module = new.module(name)
-	sys.modules[name] = module
-	return module
+    """Create a single top-level module"""
+    module = new.module(name)
+    sys.modules[name] = module
+    return module
 
 
 def _create_module_and_parents(name):
-	"""Create a module, and all the necessary parents"""
-	parts = name.split(".")
-	# first create the top-level module
-	parent = _create_module(parts[0])
-	created_parts = [parts[0]]
-	parts.pop(0)
-	# now, create any remaining child modules
-	while parts:
-		child_name = parts.pop(0)
-		module = new.module(child_name)
-		setattr(parent, child_name, module)
-		created_parts.append(child_name)
-		sys.modules[".".join(created_parts)] = module
-		parent = module
+    """Create a module, and all the necessary parents"""
+    parts = name.split(".")
+    # first create the top-level module
+    parent = _create_module(parts[0])
+    created_parts = [parts[0]]
+    parts.pop(0)
+    # now, create any remaining child modules
+    while parts:
+        child_name = parts.pop(0)
+        module = new.module(child_name)
+        setattr(parent, child_name, module)
+        created_parts.append(child_name)
+        sys.modules[".".join(created_parts)] = module
+        parent = module
 
 
 def _import_children_into_module(parent_module_name, path):
-	"""Import all the packages on a path into a parent module"""
-	# find all the packages at 'path'
-	names = []
-	for filename in os.listdir(path):
-		full_name = os.path.join(path, filename)
-		if not os.path.isdir(full_name):
-			continue   # skip files
-		if "__init__.py" in os.listdir(full_name):
-			names.append(filename)
-	# import all the packages and insert them into 'parent_module'
-	sys.path.insert(0, path)
-	for name in names:
-		module = __import__(name)
-		# add the package to the parent
-		parent_module = sys.modules[parent_module_name]
-		setattr(parent_module, name, module)
-		full_name = parent_module_name + "." + name
-		sys.modules[full_name] = module
-	# restore the system path
-	sys.path.pop(0)
+    """Import all the packages on a path into a parent module"""
+    # find all the packages at 'path'
+    names = []
+    for filename in os.listdir(path):
+        full_name = os.path.join(path, filename)
+        if not os.path.isdir(full_name):
+            continue   # skip files
+        if "__init__.py" in os.listdir(full_name):
+            names.append(filename)
+    # import all the packages and insert them into 'parent_module'
+    sys.path.insert(0, path)
+    for name in names:
+        module = __import__(name)
+        # add the package to the parent
+        parent_module = sys.modules[parent_module_name]
+        setattr(parent_module, name, module)
+        full_name = parent_module_name + "." + name
+        sys.modules[full_name] = module
+    # restore the system path
+    sys.path.pop(0)
 
 
 def _setup_common_library(root_module_name):
-	"""
-	Setup aliases for all the common libraries, e.g.
-		common -> autotest_lib.client.common_lib
-		common.error -> autotest_lib.client.common_lib.error
-	"""
-	# convert the root_module_name into a client module name
-	parts = root_module_name.split(".")
-	if parts[-1] == "client":
-		client_name = root_module_name
-	else:
-		client_name = root_module_name + ".client"
-	# import the top-level common library
-	common_lib = __import__(client_name, globals(), locals(),
-				["common_lib"]).common_lib
-	sys.modules["common"] = common_lib
-	# patch up all the root_module_name.*.common libs
-	for module_name in sys.modules.iterkeys():
-		if (module_name.startswith(root_module_name + ".") and
-		    module_name.endswith(".common")):
-			sys.modules[module_name] = common_lib
-	# import the specific common libraries
-	for library in common_lib.__all__:
-		module = __import__(client_name + ".common_lib", globals(),
-				    locals(), [library])
-		module = getattr(module, library)
-		setattr(common_lib, library, module)
-		sys.modules["common.%s" % library] = module
+    """
+    Setup aliases for all the common libraries, e.g.
+            common -> autotest_lib.client.common_lib
+            common.error -> autotest_lib.client.common_lib.error
+    """
+    # convert the root_module_name into a client module name
+    parts = root_module_name.split(".")
+    if parts[-1] == "client":
+        client_name = root_module_name
+    else:
+        client_name = root_module_name + ".client"
+    # import the top-level common library
+    common_lib = __import__(client_name, globals(), locals(),
+                            ["common_lib"]).common_lib
+    sys.modules["common"] = common_lib
+    # patch up all the root_module_name.*.common libs
+    for module_name in sys.modules.iterkeys():
+        if (module_name.startswith(root_module_name + ".") and
+            module_name.endswith(".common")):
+            sys.modules[module_name] = common_lib
+    # import the specific common libraries
+    for library in common_lib.__all__:
+        module = __import__(client_name + ".common_lib", globals(),
+                            locals(), [library])
+        module = getattr(module, library)
+        setattr(common_lib, library, module)
+        sys.modules["common.%s" % library] = module
 
 
 def setup(base_path, root_module_name=""):
-	"""
-	Perform all the necessary setup so that all the packages at
-	'base_path' can be imported via "import root_module_name.package".
-	If root_module_name is empty, then all the packages at base_path
-	are inserted as top-level packages.
+    """
+    Perform all the necessary setup so that all the packages at
+    'base_path' can be imported via "import root_module_name.package".
+    If root_module_name is empty, then all the packages at base_path
+    are inserted as top-level packages.
 
-	Also, setup all the common.* aliases for modules in the common
-	library.
-	"""
-	_create_module_and_parents(root_module_name)
-	_import_children_into_module(root_module_name, base_path)
-	_setup_common_library(root_module_name)
+    Also, setup all the common.* aliases for modules in the common
+    library.
+    """
+    _create_module_and_parents(root_module_name)
+    _import_children_into_module(root_module_name, base_path)
+    _setup_common_library(root_module_name)
 
 
 # This must run on Python versions less than 2.4.
diff --git a/client/tests/aborttest/aborttest.py b/client/tests/aborttest/aborttest.py
index 44ceb17..6e8dadd 100755
--- a/client/tests/aborttest/aborttest.py
+++ b/client/tests/aborttest/aborttest.py
@@ -2,7 +2,7 @@
 from autotest_lib.client.bin import test
 
 class aborttest(test.test):
-	version = 1
+    version = 1
 
-	def execute(self):
-		raise error.JobError('Arrrrrrrrggggh. You are DOOOMED')
+    def execute(self):
+        raise error.JobError('Arrrrrrrrggggh. You are DOOOMED')
diff --git a/client/tests/aio_dio_bugs/aio_dio_bugs.py b/client/tests/aio_dio_bugs/aio_dio_bugs.py
index a4563d8..05132db 100644
--- a/client/tests/aio_dio_bugs/aio_dio_bugs.py
+++ b/client/tests/aio_dio_bugs/aio_dio_bugs.py
@@ -5,36 +5,36 @@
 
 # tests is a simple array of "cmd" "arguments"
 tests = [["aio-dio-invalidate-failure", "poo"],
-	 ["aio-dio-subblock-eof-read", "eoftest"],
-	 ["aio-free-ring-with-bogus-nr-pages", ""],
-	 ["aio-io-setup-with-nonwritable-context-pointer", ""],
-	 ["aio-dio-extend-stat", "file"],
-	]
+         ["aio-dio-subblock-eof-read", "eoftest"],
+         ["aio-free-ring-with-bogus-nr-pages", ""],
+         ["aio-io-setup-with-nonwritable-context-pointer", ""],
+         ["aio-dio-extend-stat", "file"],
+        ]
 name = 0
 arglist = 1
 
 class aio_dio_bugs(test.test):
-	version = 5
-	preserve_srcdir = True
+    version = 5
+    preserve_srcdir = True
 
-	def initialize(self):
-		self.job.setup_dep(['libaio'])
-		ldflags = '-L ' + self.autodir + '/deps/libaio/lib'
-		cflags = '-I ' + self.autodir + '/deps/libaio/include'
-		self.gcc_flags = ldflags + ' ' + cflags
+    def initialize(self):
+        self.job.setup_dep(['libaio'])
+        ldflags = '-L ' + self.autodir + '/deps/libaio/lib'
+        cflags = '-I ' + self.autodir + '/deps/libaio/include'
+        self.gcc_flags = ldflags + ' ' + cflags
 
-	def setup(self):
-		os.chdir(self.srcdir)
-		utils.system('make ' + '"CFLAGS=' + self.gcc_flags + '"')
+    def setup(self):
+        os.chdir(self.srcdir)
+        utils.system('make ' + '"CFLAGS=' + self.gcc_flags + '"')
 
 
-	def execute(self, args = ''):
-		os.chdir(self.tmpdir)
-		libs = self.autodir + '/deps/libaio/lib/'
-		ld_path = autotest_utils.prepend_path(libs,
-                                      autotest_utils.environ('LD_LIBRARY_PATH'))
-		var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
-		for test in tests:
-			cmd = self.srcdir + '/' + test[name] + ' ' \
-			      + args + ' ' + test[arglist]
-			utils.system(var_ld_path + ' ' + cmd)
+    def execute(self, args = ''):
+        os.chdir(self.tmpdir)
+        libs = self.autodir + '/deps/libaio/lib/'
+        ld_path = autotest_utils.prepend_path(libs,
+                              autotest_utils.environ('LD_LIBRARY_PATH'))
+        var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
+        for test in tests:
+            cmd = self.srcdir + '/' + test[name] + ' ' \
+                  + args + ' ' + test[arglist]
+            utils.system(var_ld_path + ' ' + cmd)
diff --git a/client/tests/aiostress/aiostress.py b/client/tests/aiostress/aiostress.py
index 14878f8..6d39edc 100755
--- a/client/tests/aiostress/aiostress.py
+++ b/client/tests/aiostress/aiostress.py
@@ -1,7 +1,7 @@
 # This requires aio headers to build.
 # Should work automagically out of deps now.
 
-# NOTE - this should also have the ability to mount a filesystem, 
+# NOTE - this should also have the ability to mount a filesystem,
 # run the tests, unmount it, then fsck the filesystem
 import os
 from autotest_lib.client.bin import test, autotest_utils
@@ -9,68 +9,68 @@
 
 
 class aiostress(test.test):
-	version = 2
+    version = 2
 
-	def initialize(self):
-		self.job.setup_dep(['libaio'])
-		ldflags = '-L ' + self.autodir + '/deps/libaio/lib'
-		cflags = '-I ' + self.autodir + '/deps/libaio/include'
-		self.gcc_flags = ldflags + ' ' + cflags
+    def initialize(self):
+        self.job.setup_dep(['libaio'])
+        ldflags = '-L ' + self.autodir + '/deps/libaio/lib'
+        cflags = '-I ' + self.autodir + '/deps/libaio/include'
+        self.gcc_flags = ldflags + ' ' + cflags
 
 
-	# ftp://ftp.suse.com/pub/people/mason/utils/aio-stress.c
-	def setup(self, tarball = None):
-		print self.srcdir, self.bindir, self.tmpdir
-		os.mkdir(self.srcdir)
-		os.chdir(self.srcdir)
-		utils.system('cp ' + self.bindir+'/aio-stress.c .')
-		os.chdir(self.srcdir)
-		self.gcc_flags += ' -Wall -lpthread -laio'
-		cmd = 'gcc ' + self.gcc_flags + ' aio-stress.c -o aio-stress'
-		utils.system(cmd)
+    # ftp://ftp.suse.com/pub/people/mason/utils/aio-stress.c
+    def setup(self, tarball = None):
+        print self.srcdir, self.bindir, self.tmpdir
+        os.mkdir(self.srcdir)
+        os.chdir(self.srcdir)
+        utils.system('cp ' + self.bindir+'/aio-stress.c .')
+        os.chdir(self.srcdir)
+        self.gcc_flags += ' -Wall -lpthread -laio'
+        cmd = 'gcc ' + self.gcc_flags + ' aio-stress.c -o aio-stress'
+        utils.system(cmd)
 
 
-	def execute(self, args = ''):
-		os.chdir(self.tmpdir)
-		libs = self.autodir+'/deps/libaio/lib/'
-		ld_path = autotest_utils.prepend_path(libs,
-		                      autotest_utils.environ('LD_LIBRARY_PATH'))
-		var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
-		cmd = self.srcdir + '/aio-stress ' + args + ' poo'
-		profilers = self.job.profilers
+    def execute(self, args = ''):
+        os.chdir(self.tmpdir)
+        libs = self.autodir+'/deps/libaio/lib/'
+        ld_path = autotest_utils.prepend_path(libs,
+                              autotest_utils.environ('LD_LIBRARY_PATH'))
+        var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
+        cmd = self.srcdir + '/aio-stress ' + args + ' poo'
+        profilers = self.job.profilers
 
-		if not profilers.only():
-			utils.system(var_ld_path + ' ' + cmd)
-			report = open(self.debugdir + '/stderr')
-			keyval = open(self.resultsdir + '/keyval', 'w')
-			_format_results(report, keyval)
+        if not profilers.only():
+            utils.system(var_ld_path + ' ' + cmd)
+            report = open(self.debugdir + '/stderr')
+            keyval = open(self.resultsdir + '/keyval', 'w')
+            _format_results(report, keyval)
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			utils.system(var_ld_path + ' ' + cmd)
-			profilers.stop(self)
-			profilers.report(self)
-			if profilers.only():
-				report = open(self.debugdir + '/stderr')
-				keyval = open(self.resultsdir + '/keyval', 'w')
-				_format_results(report, keyval)
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            utils.system(var_ld_path + ' ' + cmd)
+            profilers.stop(self)
+            profilers.report(self)
+            if profilers.only():
+                report = open(self.debugdir + '/stderr')
+                keyval = open(self.resultsdir + '/keyval', 'w')
+                _format_results(report, keyval)
 
 
 
 def _format_results(report, keyval):
-	for line in report:
-		if 'threads' in line:
-			if 'files' in line:
-				if 'contexts' in line:
-					break
+    for line in report:
+        if 'threads' in line:
+            if 'files' in line:
+                if 'contexts' in line:
+                    break
 
-	for line in report:
-		line = line.split(')')[0]
-		key, value = line.split('(')
-		key = key.strip().replace(' ', '_')
-		value = value.split()[0]
-		print >> keyval, '%s=%s' % (key, value)
+    for line in report:
+        line = line.split(')')[0]
+        key, value = line.split('(')
+        key = key.strip().replace(' ', '_')
+        value = value.split()[0]
+        print >> keyval, '%s=%s' % (key, value)
 
 
 """
diff --git a/client/tests/barriertest/barriertest.py b/client/tests/barriertest/barriertest.py
index cf890ad..26bcab7 100644
--- a/client/tests/barriertest/barriertest.py
+++ b/client/tests/barriertest/barriertest.py
@@ -2,30 +2,30 @@
 from autotest_utils.client.bin import test
 
 class barriertest(test.test):
-	version = 1
+    version = 1
 
 
-	def execute(self, timeout_sync, timeout_start, timeout_stop,
-			hostid, masterid, all_ids):
-		profilers = self.job.profilers
+    def execute(self, timeout_sync, timeout_start, timeout_stop,
+                    hostid, masterid, all_ids):
+        profilers = self.job.profilers
 
-		b0 = self.job.barrier(hostid, "sync_profilers",
-			timeout_start, port=63100)
-		b0.rendevous_servers(masterid, hostid)
+        b0 = self.job.barrier(hostid, "sync_profilers",
+                timeout_start, port=63100)
+        b0.rendevous_servers(masterid, hostid)
 
-		b1 = self.job.barrier(hostid, "start_profilers",
-			timeout_start, port=63100)
-		b1.rendevous_servers(masterid, hostid)
+        b1 = self.job.barrier(hostid, "start_profilers",
+                timeout_start, port=63100)
+        b1.rendevous_servers(masterid, hostid)
 
-		b2 = self.job.barrier(hostid, "local_sync_profilers",
-			timeout_sync)
-		b2.rendevous(*all_ids)
+        b2 = self.job.barrier(hostid, "local_sync_profilers",
+                timeout_sync)
+        b2.rendevous(*all_ids)
 
-		profilers.start(self)
+        profilers.start(self)
 
-		b3 = self.job.barrier(hostid, "stop_profilers",
-			timeout_stop, port=63100)
-		b3.rendevous_servers(masterid, hostid)
+        b3 = self.job.barrier(hostid, "stop_profilers",
+                timeout_stop, port=63100)
+        b3.rendevous_servers(masterid, hostid)
 
-		profilers.stop(self)
-		profilers.report(self)
+        profilers.stop(self)
+        profilers.report(self)
diff --git a/client/tests/bash_shared_mapping/bash_shared_mapping.py b/client/tests/bash_shared_mapping/bash_shared_mapping.py
index e3fb69d..df993c1 100755
--- a/client/tests/bash_shared_mapping/bash_shared_mapping.py
+++ b/client/tests/bash_shared_mapping/bash_shared_mapping.py
@@ -3,39 +3,39 @@
 from autotest_lib.client.common_lib import utils
 
 class bash_shared_mapping(test.test):
-	version = 3
+    version = 3
 
-	# http://www.zip.com.au/~akpm/linux/patches/stuff/ext3-tools.tar.gz
-	def setup(self, tarball = 'ext3-tools.tar.gz'):
-		self.tarball = utils.unmap_url(self.bindir, tarball,
-		                               self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+    # http://www.zip.com.au/~akpm/linux/patches/stuff/ext3-tools.tar.gz
+    def setup(self, tarball = 'ext3-tools.tar.gz'):
+        self.tarball = utils.unmap_url(self.bindir, tarball,
+                                       self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
 
-		os.chdir(self.srcdir)
-		utils.system('make bash-shared-mapping usemem')
+        os.chdir(self.srcdir)
+        utils.system('make bash-shared-mapping usemem')
 
 
-	def execute(self, testdir = None, iterations = 10000):
-		if not testdir:
-			testdir = self.tmpdir
-		os.chdir(testdir)
-		file = os.path.join(testdir, 'foo')
-		# Want to use 3/4 of all memory for each of 
-		# bash-shared-mapping and usemem
-		kilobytes = (3 * autotest_utils.memtotal()) / 4
+    def execute(self, testdir = None, iterations = 10000):
+        if not testdir:
+            testdir = self.tmpdir
+        os.chdir(testdir)
+        file = os.path.join(testdir, 'foo')
+        # Want to use 3/4 of all memory for each of
+        # bash-shared-mapping and usemem
+        kilobytes = (3 * autotest_utils.memtotal()) / 4
 
-		# Want two usemem -m megabytes in parallel in background.
-		pid = [None, None]
-		usemem = os.path.join(self.srcdir, 'usemem')
-		args = ('usemem', '-N', '-m', '%d' % (kilobytes / 1024))
-		# print_to_tty ('2 x ' + ' '.join(args))
-		for i in (0,1):
-			pid[i] = os.spawnv(os.P_NOWAIT, usemem, args)
+        # Want two usemem -m megabytes in parallel in background.
+        pid = [None, None]
+        usemem = os.path.join(self.srcdir, 'usemem')
+        args = ('usemem', '-N', '-m', '%d' % (kilobytes / 1024))
+        # print_to_tty ('2 x ' + ' '.join(args))
+        for i in (0,1):
+            pid[i] = os.spawnv(os.P_NOWAIT, usemem, args)
 
-		cmd = "%s/bash-shared-mapping %s %d -t %d -n %d" % \
-					(self.srcdir, file, kilobytes,
-					 count_cpus(), iterations)
-		os.system(cmd)
+        cmd = "%s/bash-shared-mapping %s %d -t %d -n %d" % \
+                                (self.srcdir, file, kilobytes,
+                                 count_cpus(), iterations)
+        os.system(cmd)
 
-		for i in (0,1):
-			os.kill(pid[i], signal.SIGKILL)
+        for i in (0,1):
+            os.kill(pid[i], signal.SIGKILL)
diff --git a/client/tests/bonnie/bonnie.py b/client/tests/bonnie/bonnie.py
index e025221..e6def25 100755
--- a/client/tests/bonnie/bonnie.py
+++ b/client/tests/bonnie/bonnie.py
@@ -4,73 +4,73 @@
 
 
 def convert_size(values):
-        values = values.split(':')
-        size = values[0]
-        if len(values) > 1:
-                chunk = values[1]
+    values = values.split(':')
+    size = values[0]
+    if len(values) > 1:
+        chunk = values[1]
+    else:
+        chunk = 0
+    if size.endswith('G') or size.endswith('g'):
+        size = int(size[:-1]) * 2**30
+    else:
+        if size.endswith('M') or size.endswith('m'):
+            size = int(size[:-1])
+        size = int(size) * 2**20
+    if chunk:
+        if chunk.endswith('K') or chunk.endswith('k'):
+            chunk = int(chunk[:-1]) * 2**10
         else:
-                chunk = 0
-        if size.endswith('G') or size.endswith('g'):
-                size = int(size[:-1]) * 2**30
-        else:
-                if size.endswith('M') or size.endswith('m'):
-                        size = int(size[:-1])
-                size = int(size) * 2**20
-        if chunk:
-                if chunk.endswith('K') or chunk.endswith('k'):
-                        chunk = int(chunk[:-1]) * 2**10
-                else:
-                        chunk = int(chunk)
-        return [size, chunk]
+            chunk = int(chunk)
+    return [size, chunk]
 
 
 class bonnie(test.test):
-	version = 1
+    version = 1
 
-	# http://www.coker.com.au/bonnie++/bonnie++-1.03a.tgz
-	def setup(self, tarball = 'bonnie++-1.03a.tgz'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    # http://www.coker.com.au/bonnie++/bonnie++-1.03a.tgz
+    def setup(self, tarball = 'bonnie++-1.03a.tgz'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		os_dep.command('g++')
-		utils.system('./configure')
-		utils.system('make')
+        os_dep.command('g++')
+        utils.system('./configure')
+        utils.system('make')
 
-	def execute(self, testdir = None, iterations = 1, extra_args = '', user = 'root'):
-		if not testdir:
-			testdir = self.tmpdir
+    def execute(self, testdir = None, iterations = 1, extra_args = '', user = 'root'):
+        if not testdir:
+            testdir = self.tmpdir
 
-		args = '-d ' + testdir + ' -u ' + user + ' ' + extra_args
-		cmd = self.srcdir + '/bonnie++ ' + args
-		results = []
-		profilers = self.job.profilers
-		if not profilers.only():
-			for i in range(iterations):
-				results.append(utils.system_output(cmd,
-							    retain_output=True))
+        args = '-d ' + testdir + ' -u ' + user + ' ' + extra_args
+        cmd = self.srcdir + '/bonnie++ ' + args
+        results = []
+        profilers = self.job.profilers
+        if not profilers.only():
+            for i in range(iterations):
+                results.append(utils.system_output(cmd,
+                                            retain_output=True))
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			results.append(utils.system_output(cmd,
-			               retain_output=True))
-			profilers.stop(self)
-			profilers.report(self)
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            results.append(utils.system_output(cmd,
+                           retain_output=True))
+            profilers.stop(self)
+            profilers.report(self)
 
-		self.__format_results("\n".join(results))
+        self.__format_results("\n".join(results))
 
-	def __format_results(self, results):
-		strip_plus = lambda s: re.sub(r"^\++$", "0", s)
-		out = open(self.resultsdir + '/keyval', 'w')
-		for line in results.split('\n'):
-			if len([c for c in line if c == ',']) != 26:
-				continue
-			fields = tuple(line.split(','))
-			fields = [strip_plus(f) for f in fields]
-			fields = tuple(convert_size(fields[1]) + fields[2:])
-			print >> out, """size=%s
+    def __format_results(self, results):
+        strip_plus = lambda s: re.sub(r"^\++$", "0", s)
+        out = open(self.resultsdir + '/keyval', 'w')
+        for line in results.split('\n'):
+            if len([c for c in line if c == ',']) != 26:
+                continue
+            fields = tuple(line.split(','))
+            fields = [strip_plus(f) for f in fields]
+            fields = tuple(convert_size(fields[1]) + fields[2:])
+            print >> out, """size=%s
 chnk=%s
 seqout_perchr_ksec=%s
 seqout_perchr_pctcp=%s
@@ -98,4 +98,4 @@
 randcreate_delete_ksec=%s
 randcreate_delete_pctcp=%s
 """ % fields
-		out.close()
+        out.close()
diff --git a/client/tests/btreplay/btreplay.py b/client/tests/btreplay/btreplay.py
index ba6e5cb..5bfc289 100644
--- a/client/tests/btreplay/btreplay.py
+++ b/client/tests/btreplay/btreplay.py
@@ -4,131 +4,130 @@
 
 
 class btreplay(test.test):
-	version = 1
+    version = 1
 
-	# http://brick.kernel.dk/snaps/blktrace-git-latest.tar.gz
-	def setup(self, tarball = 'blktrace-git-latest.tar.gz'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+    # http://brick.kernel.dk/snaps/blktrace-git-latest.tar.gz
+    def setup(self, tarball = 'blktrace-git-latest.tar.gz'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
 
-		self.job.setup_dep(['libaio'])
-		libs = '-L' + self.autodir + '/deps/libaio/lib -laio'
-		cflags = '-I ' + self.autodir + '/deps/libaio/include'
-		var_libs = 'LIBS="' + libs + '"'
-		var_cflags  = 'CFLAGS="' + cflags + '"'
-		self.make_flags = var_libs + ' ' + var_cflags
+        self.job.setup_dep(['libaio'])
+        libs = '-L' + self.autodir + '/deps/libaio/lib -laio'
+        cflags = '-I ' + self.autodir + '/deps/libaio/include'
+        var_libs = 'LIBS="' + libs + '"'
+        var_cflags  = 'CFLAGS="' + cflags + '"'
+        self.make_flags = var_libs + ' ' + var_cflags
 
-		os.chdir(self.srcdir)
-		utils.system('patch -p1 < ../Makefile.patch')
-		utils.system(self.make_flags + ' make')
+        os.chdir(self.srcdir)
+        utils.system('patch -p1 < ../Makefile.patch')
+        utils.system(self.make_flags + ' make')
 
 
-	def initialize(self):
-		self.ldlib = 'LD_LIBRARY_PATH=%s/deps/libaio/lib'%(self.autodir)
-
-        
-        def _run_btreplay(self, dev, devices, tmpdir, extra_args):
-                alldevs="-d /dev/"+dev
-                alldnames = dev
-                for d in devices.split():
-                        alldevs += " -d /dev/"+d
-                        alldnames += " "+d
-
-                # convert the trace (assumed to be in this test's base
-                # directory) into btreplay's required format
-                utils.system("./btreplay/btrecord -d .. -D "+tmpdir+" "+dev)
-
-                # time a replay that omits "thinktime" between requests
-                # (by use of the -N flag)
-                utils.system(self.ldlib+" /usr/bin/time ./btreplay/btreplay -d "+\
-                        tmpdir+" -N -W "+dev+" "+extra_args+" 2>&1")
-
-                # trace a replay that reproduces inter-request delays, and
-                # analyse the trace with btt to determine the average request
-                # completion latency
-                utils.system("./blktrace -D "+tmpdir+" "+alldevs+" >/dev/null &")
-                utils.system(self.ldlib+" ./btreplay/btreplay -d "+tmpdir+" -W "+\
-                        dev+" "+extra_args)
-                utils.system("killall -INT blktrace")
-		
-		# wait until blktrace is really done
-		slept = 0.0
-		while utils.system("ps -C blktrace > /dev/null",
-			     ignore_status=True) == 0:
-			time.sleep(0.1)
-			slept += 0.1
-			if slept > 30.0:
-				utils.system("killall -9 blktrace")
-				raise error.TestError("blktrace failed to exit after 30 seconds")
-                utils.system("./blkparse -q -D "+tmpdir+" -d "+tmpdir+\
-                        "/trace.bin -O "+alldnames+" >/dev/null")
-                utils.system("./btt/btt -i "+tmpdir+"/trace.bin")
-        
-	def execute(self, iterations = 1, dev="", devices="",
-			extra_args = '', tmpdir = None):
-                # @dev: The device against which the trace will be replayed.
-                #       e.g. "sdb" or "md_d1"
-                # @devices: A space-separated list of the underlying devices
-                #    which make up dev, e.g. "sdb sdc". You only need to set
-                #    devices if dev is an MD, LVM, or similar device;
-                #    otherwise leave it as an empty string.
-
-		if not tmpdir:
-			tmpdir = self.tmpdir
-
-		os.chdir(self.srcdir)
-
-		profilers = self.job.profilers
-		if not profilers.only():
-			for i in range(iterations):
-				self._run_btreplay(dev, devices, tmpdir, extra_args)
-
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			self._run_btreplay(dev, devices, tmpdir, extra_args)
-			profilers.stop(self)
-			profilers.report(self)
-
-		self.job.stdout.filehandle.flush()
-		self.__format_results(open(self.debugdir + '/stdout').read())
+    def initialize(self):
+        self.ldlib = 'LD_LIBRARY_PATH=%s/deps/libaio/lib'%(self.autodir)
 
 
-	def __format_results(self, results):
-		out = open(self.resultsdir + '/keyval', 'w')
-		lines = results.split('\n')
+    def _run_btreplay(self, dev, devices, tmpdir, extra_args):
+        alldevs="-d /dev/"+dev
+        alldnames = dev
+        for d in devices.split():
+            alldevs += " -d /dev/"+d
+            alldnames += " "+d
 
-		for n in range(len(lines)):
-			if lines[n].strip() == "==================== All Devices ====================":
-				words = lines[n-2].split()
-				s = words[1].strip('sytem').split(':')
-				e = words[2].strip('elapsd').split(':')
-				break
+        # convert the trace (assumed to be in this test's base
+        # directory) into btreplay's required format
+        utils.system("./btreplay/btrecord -d .. -D "+tmpdir+" "+dev)
 
-		systime = 0.0
-		for n in range(len(s)):
-			i = (len(s)-1) - n
-			systime += float(s[i])*(60**n)
-		elapsed = 0.0
-		for n in range(len(e)):
-			i = (len(e)-1) - n
-			elapsed += float(e[i])*(60**n)
+        # time a replay that omits "thinktime" between requests
+        # (by use of the -N flag)
+        utils.system(self.ldlib+" /usr/bin/time ./btreplay/btreplay -d "+\
+                tmpdir+" -N -W "+dev+" "+extra_args+" 2>&1")
 
-		q2c = 0.0
-		for line in lines:
-			words = line.split()
-			if len(words) < 3:
-				continue
-			if words[0] == 'Q2C':
-				q2c = float(words[2])
-				break
+        # trace a replay that reproduces inter-request delays, and
+        # analyse the trace with btt to determine the average request
+        # completion latency
+        utils.system("./blktrace -D "+tmpdir+" "+alldevs+" >/dev/null &")
+        utils.system(self.ldlib+" ./btreplay/btreplay -d "+tmpdir+" -W "+\
+                dev+" "+extra_args)
+        utils.system("killall -INT blktrace")
 
-		
-		print >> out, """\
+        # wait until blktrace is really done
+        slept = 0.0
+        while utils.system("ps -C blktrace > /dev/null",
+                     ignore_status=True) == 0:
+            time.sleep(0.1)
+            slept += 0.1
+            if slept > 30.0:
+                utils.system("killall -9 blktrace")
+                raise error.TestError("blktrace failed to exit after 30 seconds")
+        utils.system("./blkparse -q -D "+tmpdir+" -d "+tmpdir+\
+                "/trace.bin -O "+alldnames+" >/dev/null")
+        utils.system("./btt/btt -i "+tmpdir+"/trace.bin")
+
+    def execute(self, iterations = 1, dev="", devices="",
+                    extra_args = '', tmpdir = None):
+        # @dev: The device against which the trace will be replayed.
+        #       e.g. "sdb" or "md_d1"
+        # @devices: A space-separated list of the underlying devices
+        #    which make up dev, e.g. "sdb sdc". You only need to set
+        #    devices if dev is an MD, LVM, or similar device;
+        #    otherwise leave it as an empty string.
+
+        if not tmpdir:
+            tmpdir = self.tmpdir
+
+        os.chdir(self.srcdir)
+
+        profilers = self.job.profilers
+        if not profilers.only():
+            for i in range(iterations):
+                self._run_btreplay(dev, devices, tmpdir, extra_args)
+
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            self._run_btreplay(dev, devices, tmpdir, extra_args)
+            profilers.stop(self)
+            profilers.report(self)
+
+        self.job.stdout.filehandle.flush()
+        self.__format_results(open(self.debugdir + '/stdout').read())
+
+
+    def __format_results(self, results):
+        out = open(self.resultsdir + '/keyval', 'w')
+        lines = results.split('\n')
+
+        for n in range(len(lines)):
+            if lines[n].strip() == "==================== All Devices ====================":
+                words = lines[n-2].split()
+                s = words[1].strip('sytem').split(':')
+                e = words[2].strip('elapsd').split(':')
+                break
+
+        systime = 0.0
+        for n in range(len(s)):
+            i = (len(s)-1) - n
+            systime += float(s[i])*(60**n)
+        elapsed = 0.0
+        for n in range(len(e)):
+            i = (len(e)-1) - n
+            elapsed += float(e[i])*(60**n)
+
+        q2c = 0.0
+        for line in lines:
+            words = line.split()
+            if len(words) < 3:
+                continue
+            if words[0] == 'Q2C':
+                q2c = float(words[2])
+                break
+
+
+        print >> out, """\
 time=%f
 systime=%f
 avg_q2c_latency=%f
 """ % (elapsed, systime, q2c)
-		out.close()
-
+        out.close()
diff --git a/client/tests/container_functional/container_functional.py b/client/tests/container_functional/container_functional.py
index 62bc3e8..eec0532 100644
--- a/client/tests/container_functional/container_functional.py
+++ b/client/tests/container_functional/container_functional.py
@@ -7,66 +7,66 @@
 
 
 class container_functional(test.test):
-	version = 1
+    version = 1
 
-	def execute(self, mbytes=None, cpus=None, root='', name=None):
-		"""Check that the container was setup.
-		The arguments must be the same than
-		job.new_container()"""
-		if not name:
-			raise error.TestError("Must have a container name")
+    def execute(self, mbytes=None, cpus=None, root='', name=None):
+        """Check that the container was setup.
+        The arguments must be the same than
+        job.new_container()"""
+        if not name:
+            raise error.TestError("Must have a container name")
 
-		# Do container exists?
-		for container in ['sys', name]:
-			try:
-				utils.system('ls %s > /dev/null' % \
-					     os.path.join('/dev/cpuset',
-							  container))
-			except error.CmdError:
-				raise error.TestError("Container %s not created." % \
-						      container)
+        # Do container exists?
+        for container in ['sys', name]:
+            try:
+                utils.system('ls %s > /dev/null' % \
+                             os.path.join('/dev/cpuset',
+                                          container))
+            except error.CmdError:
+                raise error.TestError("Container %s not created." % \
+                                      container)
 
-		# Did we get the CPUs?
-		if cpus:
-			actual_cpus = utils.system_output('cat %s' % \
-							  os.path.join('/dev/cpuset',
-								       name,
-								       'cpus'))
-			if cpus != cpuset.rangelist_to_list(actual_cpus):
-				raise error.TestError(("CPUs = %s, "
-						      "expecting: %s") %
-						      (actual_cpus, cpus))
+        # Did we get the CPUs?
+        if cpus:
+            actual_cpus = utils.system_output('cat %s' % \
+                                              os.path.join('/dev/cpuset',
+                                                           name,
+                                                           'cpus'))
+            if cpus != cpuset.rangelist_to_list(actual_cpus):
+                raise error.TestError(("CPUs = %s, "
+                                      "expecting: %s") %
+                                      (actual_cpus, cpus))
 
-		# Are we in this container?
-		actual_pid = utils.system_output('cat %s' % \
-						 os.path.join('/dev/cpuset',
-							      name,
-							      'tasks'))
+        # Are we in this container?
+        actual_pid = utils.system_output('cat %s' % \
+                                         os.path.join('/dev/cpuset',
+                                                      name,
+                                                      'tasks'))
 
- 		if str(os.getpid()) not in actual_pid:
- 			raise error.TestError("My pid %s is not in "
- 					      "container task list: %s" % \
- 					      (str(os.getpid()), actual_pid))
+        if str(os.getpid()) not in actual_pid:
+            raise error.TestError("My pid %s is not in "
+                                  "container task list: %s" % \
+                                  (str(os.getpid()), actual_pid))
 
-		# Our memory nodes != sys memory nodes
-		actual_mems = utils.system_output('cat %s' % \
-						  os.path.join('/dev/cpuset',
-							       name,
-							       'mems'))
-		sys_mems = utils.system_output('cat %s' % \
-					       os.path.join('/dev/cpuset',
-							    'sys',
-							    'mems'))
+        # Our memory nodes != sys memory nodes
+        actual_mems = utils.system_output('cat %s' % \
+                                          os.path.join('/dev/cpuset',
+                                                       name,
+                                                       'mems'))
+        sys_mems = utils.system_output('cat %s' % \
+                                       os.path.join('/dev/cpuset',
+                                                    'sys',
+                                                    'mems'))
 
-		actual_nodes = set(cpuset.rangelist_to_list(actual_mems))
-		sys_nodes = set(cpuset.rangelist_to_list(sys_mems))
+        actual_nodes = set(cpuset.rangelist_to_list(actual_mems))
+        sys_nodes = set(cpuset.rangelist_to_list(sys_mems))
 
-		if actual_nodes.intersection(sys_nodes):
-			raise error.TestError("Sys nodes = %s\n"
-					      "My nodes = %s" % \
-					      (sys_nodes, actual_nodes))
+        if actual_nodes.intersection(sys_nodes):
+            raise error.TestError("Sys nodes = %s\n"
+                                  "My nodes = %s" % \
+                                  (sys_nodes, actual_nodes))
 
-		# Should only have one node for 100MB
-		if len(actual_nodes) != 1:
-			raise error.TestError(("Got more than 1 node: %s" %
-					       actual_nodes))
+        # Should only have one node for 100MB
+        if len(actual_nodes) != 1:
+            raise error.TestError(("Got more than 1 node: %s" %
+                                   actual_nodes))
diff --git a/client/tests/cpu_hotplug/cpu_hotplug.py b/client/tests/cpu_hotplug/cpu_hotplug.py
index 9d016cc..352145b 100644
--- a/client/tests/cpu_hotplug/cpu_hotplug.py
+++ b/client/tests/cpu_hotplug/cpu_hotplug.py
@@ -3,44 +3,44 @@
 from autotest_lib.client.common_lib import utils
 
 class cpu_hotplug(test.test):
-	version = 2
+    version = 2
 
-	# http://developer.osdl.org/dev/hotplug/tests/lhcs_regression-1.6.tgz
-	def setup(self, tarball = 'lhcs_regression-1.6.tgz'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		
-	def execute(self):
-		# Check if the kernel supports cpu hotplug
-		if autotest_utils.running_config():
-			autotest_utils.check_for_kernel_feature('HOTPLUG_CPU')
-		
-		# Check cpu nums, if equals 1, quit.
-		if autotest_utils.count_cpus() == 1:
-			print 'Just only single cpu online, quiting...'
-			sys.exit()
-		
-		# Have a simple and quick check first, FIX me please.
-		utils.system('dmesg -c > /dev/null')
-		for cpu in autotest_utils.cpu_online_map():
-			if os.path.isfile('/sys/devices/system/cpu/cpu%s/online' % cpu):
-				utils.system('echo 0 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
-				utils.system('dmesg -c')
-				time.sleep(3)
-				utils.system('echo 1 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
-				utils.system('dmesg -c')
-				time.sleep(3)
-		
-		# Begin this cpu hotplug test big guru.
-		os.chdir(self.srcdir)
-		profilers = self.job.profilers
-		if not profilers.only():
-			utils.system('./runtests.sh')
+    # http://developer.osdl.org/dev/hotplug/tests/lhcs_regression-1.6.tgz
+    def setup(self, tarball = 'lhcs_regression-1.6.tgz'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			utils.system('./runtests.sh')
-			profilers.stop(self)
-			profilers.report(self)
+    def execute(self):
+        # Check if the kernel supports cpu hotplug
+        if autotest_utils.running_config():
+            autotest_utils.check_for_kernel_feature('HOTPLUG_CPU')
+
+        # Check cpu nums, if equals 1, quit.
+        if autotest_utils.count_cpus() == 1:
+            print 'Just only single cpu online, quiting...'
+            sys.exit()
+
+        # Have a simple and quick check first, FIX me please.
+        utils.system('dmesg -c > /dev/null')
+        for cpu in autotest_utils.cpu_online_map():
+            if os.path.isfile('/sys/devices/system/cpu/cpu%s/online' % cpu):
+                utils.system('echo 0 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
+                utils.system('dmesg -c')
+                time.sleep(3)
+                utils.system('echo 1 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
+                utils.system('dmesg -c')
+                time.sleep(3)
+
+        # Begin this cpu hotplug test big guru.
+        os.chdir(self.srcdir)
+        profilers = self.job.profilers
+        if not profilers.only():
+            utils.system('./runtests.sh')
+
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            utils.system('./runtests.sh')
+            profilers.stop(self)
+            profilers.report(self)
diff --git a/client/tests/cyclictest/cyclictest.py b/client/tests/cyclictest/cyclictest.py
index 5f0f98e..5c9189d 100755
--- a/client/tests/cyclictest/cyclictest.py
+++ b/client/tests/cyclictest/cyclictest.py
@@ -4,14 +4,14 @@
 
 
 class cyclictest(test.test):
-	version = 2
-	preserve_srcdir = True
+    version = 2
+    preserve_srcdir = True
 
-	# git://git.kernel.org/pub/scm/linux/kernel/git/tglx/rt-tests.git
+    # git://git.kernel.org/pub/scm/linux/kernel/git/tglx/rt-tests.git
 
-	def setup(self):
-		os.chdir(self.srcdir)
-		utils.system('make')
+    def setup(self):
+        os.chdir(self.srcdir)
+        utils.system('make')
 
-	def execute(self, args = '-t 10 -l 100000'):
-		utils.system(self.srcdir + '/cyclictest ' + args)
+    def execute(self, args = '-t 10 -l 100000'):
+        utils.system(self.srcdir + '/cyclictest ' + args)
diff --git a/client/tests/dacapo/dacapo.py b/client/tests/dacapo/dacapo.py
index 38bb711..e7f10c2 100644
--- a/client/tests/dacapo/dacapo.py
+++ b/client/tests/dacapo/dacapo.py
@@ -1,10 +1,10 @@
 # Dacapo test suite wrapper
 #
-# This benchmark suite is intended as a tool for Java benchmarking by the 
+# This benchmark suite is intended as a tool for Java benchmarking by the
 # programming language, memory management and computer architecture communities.
-# It consists of a set of open source, real world applications with non-trivial 
-# memory loads. The suite is the culmination of over five years work at eight 
-# institutions, as part of the DaCapo research project, which was funded by a 
+# It consists of a set of open source, real world applications with non-trivial
+# memory loads. The suite is the culmination of over five years work at eight
+# institutions, as part of the DaCapo research project, which was funded by a
 # National Science Foundation ITR Grant, CCR-0085792.
 #
 import os
@@ -14,101 +14,100 @@
 
 
 class dacapo(test.test):
-	version = 1
+    version = 1
 
-	def set_java_environment(self, jvm, java_root):
-		'''\
-		Setup java environment variables (path and classpath in order to
-		execute a specific jvm specified by the java_root variable. 
-		java_root - Base of the java vm installation
-		'''
-		# Sun has changed the directory layout for java 6
-		# (now there's no jre directory). Let's work around this...
-		if jvm == 'sun16':
-			self.java_home = java_root
-		else:
-			self.java_home = os.path.join(java_root, 'jre')
-		self.java_bin = os.path.join(self.java_home, 'bin')
-		self.java_lib =  os.path.join(self.java_home, 'lib')
-		os.environ['JAVA_ROOT'] = java_root
-		os.environ['JAVA_HOME'] = self.java_home
-		os.environ['JRE_HOME'] = self.java_home
-		os.environ['CLASSPATH'] = self.java_lib
-		os.environ['JAVA_BINDIR'] = self.java_bin
-		os.environ['PATH'] = self.java_bin + ':' + os.environ['PATH']
+    def set_java_environment(self, jvm, java_root):
+        '''\
+        Setup java environment variables (path and classpath in order to
+        execute a specific jvm specified by the java_root variable.
+        java_root - Base of the java vm installation
+        '''
+        # Sun has changed the directory layout for java 6
+        # (now there's no jre directory). Let's work around this...
+        if jvm == 'sun16':
+            self.java_home = java_root
+        else:
+            self.java_home = os.path.join(java_root, 'jre')
+        self.java_bin = os.path.join(self.java_home, 'bin')
+        self.java_lib =  os.path.join(self.java_home, 'lib')
+        os.environ['JAVA_ROOT'] = java_root
+        os.environ['JAVA_HOME'] = self.java_home
+        os.environ['JRE_HOME'] = self.java_home
+        os.environ['CLASSPATH'] = self.java_lib
+        os.environ['JAVA_BINDIR'] = self.java_bin
+        os.environ['PATH'] = self.java_bin + ':' + os.environ['PATH']
 
 
-	def execute(self, test = 'antlr', config = './dacapo.cfg', jvm = 'ibm14-ppc64'):
-		# Load the test configuration. If needed, use autotest tmpdir to write
-		# files.
-		my_config = config_loader(config, self.tmpdir)
-		# Directory where we will cache the dacapo jar file
-		# and the jvm package files
-		self.cachedir = os.path.join(self.bindir, 'cache')
-		if not os.path.isdir(self.cachedir):
-			os.makedirs(self.cachedir)
+    def execute(self, test = 'antlr', config = './dacapo.cfg', jvm = 'ibm14-ppc64'):
+        # Load the test configuration. If needed, use autotest tmpdir to write
+        # files.
+        my_config = config_loader(config, self.tmpdir)
+        # Directory where we will cache the dacapo jar file
+        # and the jvm package files
+        self.cachedir = os.path.join(self.bindir, 'cache')
+        if not os.path.isdir(self.cachedir):
+            os.makedirs(self.cachedir)
 
-		# Get dacapo jar URL
-		# (It's possible to override the default URL that points to the 
-		# sourceforge repository)
-		if my_config.get('dacapo', 'override_default_url') == 'no':
-			self.dacapo_url = my_config.get('dacapo', 'tarball_url')
-		else:
-			self.dacapo_url = my_config.get('dacapo', 'tarball_url_alt')
-		if not self.dacapo_url:
-			raise error.TestError('Could not read dacapo URL from conf file')
-		# We can cache the dacapo package file if we take some
-		# precautions (checking md5 sum of the downloaded file)
-		self.dacapo_md5 = my_config.get('dacapo', 'package_md5')
-		if not self.dacapo_md5:
-			e_msg = 'Could not read dacapo package md5sum from conf file'
-			raise error.TestError(e_msg)
-		self.dacapo_pkg = \
-		autotest_utils.unmap_url_cache(self.cachedir, self.dacapo_url,
-		                               self.dacapo_md5)
+        # Get dacapo jar URL
+        # (It's possible to override the default URL that points to the
+        # sourceforge repository)
+        if my_config.get('dacapo', 'override_default_url') == 'no':
+            self.dacapo_url = my_config.get('dacapo', 'tarball_url')
+        else:
+            self.dacapo_url = my_config.get('dacapo', 'tarball_url_alt')
+        if not self.dacapo_url:
+            raise error.TestError('Could not read dacapo URL from conf file')
+        # We can cache the dacapo package file if we take some
+        # precautions (checking md5 sum of the downloaded file)
+        self.dacapo_md5 = my_config.get('dacapo', 'package_md5')
+        if not self.dacapo_md5:
+            e_msg = 'Could not read dacapo package md5sum from conf file'
+            raise error.TestError(e_msg)
+        self.dacapo_pkg = \
+        autotest_utils.unmap_url_cache(self.cachedir, self.dacapo_url,
+                                       self.dacapo_md5)
 
-		# Get jvm package URL
-		self.jvm_pkg_url = my_config.get(jvm, 'jvm_pkg_url')
-		if not self.jvm_pkg_url:
-			raise error.TestError('Could not read java vm URL from conf file')
-		# Let's cache the jvm package as well
-		self.jvm_pkg_md5 = my_config.get(jvm, 'package_md5')
-		if not self.jvm_pkg_md5:
-			raise error.TestError('Could not read java package_md5 from conf file')
-		self.jvm_pkg = \
-		autotest_utils.unmap_url_cache(self.cachedir, self.jvm_pkg_url,
-		                               self.jvm_pkg_md5)
+        # Get jvm package URL
+        self.jvm_pkg_url = my_config.get(jvm, 'jvm_pkg_url')
+        if not self.jvm_pkg_url:
+            raise error.TestError('Could not read java vm URL from conf file')
+        # Let's cache the jvm package as well
+        self.jvm_pkg_md5 = my_config.get(jvm, 'package_md5')
+        if not self.jvm_pkg_md5:
+            raise error.TestError('Could not read java package_md5 from conf file')
+        self.jvm_pkg = \
+        autotest_utils.unmap_url_cache(self.cachedir, self.jvm_pkg_url,
+                                       self.jvm_pkg_md5)
 
-		# Install the jvm pakage
-		package.install(self.jvm_pkg)
+        # Install the jvm pakage
+        package.install(self.jvm_pkg)
 
-		# Basic Java environment variables setup
-		self.java_root = my_config.get(jvm, 'java_root')
-		if not self.java_root:
-			raise error.TestError('Could not read java root dir from conf file')
-		self.set_java_environment(jvm, self.java_root)
+        # Basic Java environment variables setup
+        self.java_root = my_config.get(jvm, 'java_root')
+        if not self.java_root:
+            raise error.TestError('Could not read java root dir from conf file')
+        self.set_java_environment(jvm, self.java_root)
 
-		# If use_global is set to 'yes', then we want to use the global
-		# setting instead of per test settings
-		if my_config.get('global', 'use_global') == 'yes':
-			self.iterations = my_config.get('global', 'iterations')
-			self.workload = my_config.get('global', 'workload')
-		else:
-			self.iterations = my_config.get(test, 'iterations')
-			self.workload = my_config.get(test, 'workload')
+        # If use_global is set to 'yes', then we want to use the global
+        # setting instead of per test settings
+        if my_config.get('global', 'use_global') == 'yes':
+            self.iterations = my_config.get('global', 'iterations')
+            self.workload = my_config.get('global', 'workload')
+        else:
+            self.iterations = my_config.get(test, 'iterations')
+            self.workload = my_config.get(test, 'workload')
 
-		self.verbose = '-v '
-		self.workload = '-s %s ' % self.workload
-		self.iterations = '-n %s ' % self.iterations
-		self.scratch = '-scratch %s ' % os.path.join(self.resultsdir, test)
-		# Compose the arguments string
-		self.args = self.verbose + self.workload + self.scratch \
-		+ self.iterations + test
-		# Execute the actual test
-		try:
-			utils.system('java -jar %s %s' % (self.dacapo_pkg, self.args))
-		except:
-			e_msg = \
-			'Test %s has failed, command line options "%s"' % (test, self.args)
-			raise error.TestError(e_msg)
-
+        self.verbose = '-v '
+        self.workload = '-s %s ' % self.workload
+        self.iterations = '-n %s ' % self.iterations
+        self.scratch = '-scratch %s ' % os.path.join(self.resultsdir, test)
+        # Compose the arguments string
+        self.args = self.verbose + self.workload + self.scratch \
+        + self.iterations + test
+        # Execute the actual test
+        try:
+            utils.system('java -jar %s %s' % (self.dacapo_pkg, self.args))
+        except:
+            e_msg = \
+            'Test %s has failed, command line options "%s"' % (test, self.args)
+            raise error.TestError(e_msg)
diff --git a/client/tests/dbench/dbench.py b/client/tests/dbench/dbench.py
index a75adc6..085b4b8 100755
--- a/client/tests/dbench/dbench.py
+++ b/client/tests/dbench/dbench.py
@@ -3,47 +3,47 @@
 from autotest_lib.client.common_lib import utils
 
 class dbench(test.test):
-	version = 1
+    version = 1
 
-	# http://samba.org/ftp/tridge/dbench/dbench-3.04.tar.gz
-	def setup(self, tarball = 'dbench-3.04.tar.gz'):
-		tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    # http://samba.org/ftp/tridge/dbench/dbench-3.04.tar.gz
+    def setup(self, tarball = 'dbench-3.04.tar.gz'):
+        tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('./configure')
-		utils.system('make')
+        utils.system('./configure')
+        utils.system('make')
 
 
-	def execute(self, iterations = 1, dir = None, nprocs = None, args = ''):
-		if not nprocs:
-			nprocs = self.job.cpu_count()
-		profilers = self.job.profilers
-		args = args + ' -c '+self.srcdir+'/client.txt'
-		if dir:
-			args += ' -D ' + dir
-		args += ' %s' % nprocs
-		cmd = self.srcdir + '/dbench ' + args
-		results = []
-		if not profilers.only():
-			for i in range(iterations):
-				results.append(utils.system_output(cmd,
-							retain_output=True))
+    def execute(self, iterations = 1, dir = None, nprocs = None, args = ''):
+        if not nprocs:
+            nprocs = self.job.cpu_count()
+        profilers = self.job.profilers
+        args = args + ' -c '+self.srcdir+'/client.txt'
+        if dir:
+            args += ' -D ' + dir
+        args += ' %s' % nprocs
+        cmd = self.srcdir + '/dbench ' + args
+        results = []
+        if not profilers.only():
+            for i in range(iterations):
+                results.append(utils.system_output(cmd,
+                                        retain_output=True))
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			results.append(utils.system_output(cmd,
-			                                   retain_output=True))
-			profilers.stop(self)
-			profilers.report(self)
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            results.append(utils.system_output(cmd,
+                                               retain_output=True))
+            profilers.stop(self)
+            profilers.report(self)
 
-		self.__format_results("\n".join(results))
+        self.__format_results("\n".join(results))
 
 
-	def __format_results(self, results):
-		out = open(self.resultsdir + '/keyval', 'w')
-		pattern = re.compile(r"Throughput (.*?) MB/sec (.*?) procs")
-		for result in pattern.findall(results):
-			print >> out, "throughput=%s\nprocs=%s\n" % result
-		out.close()
+    def __format_results(self, results):
+        out = open(self.resultsdir + '/keyval', 'w')
+        pattern = re.compile(r"Throughput (.*?) MB/sec (.*?) procs")
+        for result in pattern.findall(results):
+            print >> out, "throughput=%s\nprocs=%s\n" % result
+        out.close()
diff --git a/client/tests/dbt2/dbt2.py b/client/tests/dbt2/dbt2.py
index d35018b..5d6d6fc 100644
--- a/client/tests/dbt2/dbt2.py
+++ b/client/tests/dbt2/dbt2.py
@@ -3,72 +3,72 @@
 from autotest_lib.client.common_lib import utils
 
 
-# Dbt-2 is a fair-use implementation of the TPC-C benchmark.  The test is 
+# Dbt-2 is a fair-use implementation of the TPC-C benchmark.  The test is
 # currently hardcoded to use PostgreSQL but the kit also supports MySQL.
 
 class dbt2(test.test):
-	version = 2
+    version = 2
 
-	# http://osdn.dl.sourceforge.net/sourceforge/osdldbt/dbt2-0.39.tar.gz
-	def setup(self, tarball = 'dbt2-0.39.tar.bz2'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		self.job.setup_dep(['pgsql', 'pgpool', 'mysql'])
+    # http://osdn.dl.sourceforge.net/sourceforge/osdldbt/dbt2-0.39.tar.gz
+    def setup(self, tarball = 'dbt2-0.39.tar.bz2'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        self.job.setup_dep(['pgsql', 'pgpool', 'mysql'])
 
-		#
-		# Extract one copy of the kit for MySQL.
-		#
-		utils.system('cp -pR ' + self.srcdir + ' ' + self.srcdir + '.mysql')
-		os.chdir(self.srcdir + '.mysql')
-		utils.system('./configure --with-mysql=%s/deps/mysql/mysql' \
-				% self.autodir)
-		utils.system('make')
+        #
+        # Extract one copy of the kit for MySQL.
+        #
+        utils.system('cp -pR ' + self.srcdir + ' ' + self.srcdir + '.mysql')
+        os.chdir(self.srcdir + '.mysql')
+        utils.system('./configure --with-mysql=%s/deps/mysql/mysql' \
+                        % self.autodir)
+        utils.system('make')
 
-		#
-		# Extract one copy of the kit for PostgreSQL.
-		#
-		utils.system('cp -pR ' + self.srcdir + ' ' + self.srcdir + '.pgsql')
-		os.chdir(self.srcdir + '.pgsql')
-		utils.system('./configure --with-postgresql=%s/deps/pgsql/pgsql' \
-				% self.autodir)
-		utils.system('make')
+        #
+        # Extract one copy of the kit for PostgreSQL.
+        #
+        utils.system('cp -pR ' + self.srcdir + ' ' + self.srcdir + '.pgsql')
+        os.chdir(self.srcdir + '.pgsql')
+        utils.system('./configure --with-postgresql=%s/deps/pgsql/pgsql' \
+                        % self.autodir)
+        utils.system('make')
 
-		# Create symlinks to autotest's results directory from dbt-2's
-		# preferred results directory to self.resultsdir
-		utils.system('ln -s %s %s' % (self.resultsdir, \
-				self.srcdir + '.mysql/scripts/output'))
-		utils.system('ln -s %s %s' % (self.resultsdir, \
-				self.srcdir + '.pgsql/scripts/output'))
+        # Create symlinks to autotest's results directory from dbt-2's
+        # preferred results directory to self.resultsdir
+        utils.system('ln -s %s %s' % (self.resultsdir, \
+                        self.srcdir + '.mysql/scripts/output'))
+        utils.system('ln -s %s %s' % (self.resultsdir, \
+                        self.srcdir + '.pgsql/scripts/output'))
 
-	def execute(self, db_type, args = ''):
-		logfile = self.resultsdir + '/dbt2.log'
+    def execute(self, db_type, args = ''):
+        logfile = self.resultsdir + '/dbt2.log'
 
-		if (db_type == "mysql"):
-			self.execute_mysql(args)
-		elif (db_type == "pgpool"):
-			self.execute_pgpool(args)
-		elif (db_type == "pgsql"):
-			self.execute_pgsql(args)
+        if (db_type == "mysql"):
+            self.execute_mysql(args)
+        elif (db_type == "pgpool"):
+            self.execute_pgpool(args)
+        elif (db_type == "pgsql"):
+            self.execute_pgsql(args)
 
-	def execute_mysql(self, args = ''):
-		args = args
-		utils.system(self.srcdir + '.mysql/scripts/mysql/build_db.sh -g -w 1')
-		utils.system(self.srcdir + '.mysql/scripts/run_workload.sh ' + args)
+    def execute_mysql(self, args = ''):
+        args = args
+        utils.system(self.srcdir + '.mysql/scripts/mysql/build_db.sh -g -w 1')
+        utils.system(self.srcdir + '.mysql/scripts/run_workload.sh ' + args)
 
- 	def execute_pgpool(self, args = ''):
- 		utils.system('%s/deps/pgpool/pgpool/bin/pgpool -f %s/../pgpool.conf' \
- 				% (self.autodir, self.srcdir))
- 		self.execute_pgsql(args)
- 		utils.system('%s/deps/pgpool/pgpool/bin/pgpool stop' % self.autodir)
+    def execute_pgpool(self, args = ''):
+        utils.system('%s/deps/pgpool/pgpool/bin/pgpool -f %s/../pgpool.conf' \
+                        % (self.autodir, self.srcdir))
+        self.execute_pgsql(args)
+        utils.system('%s/deps/pgpool/pgpool/bin/pgpool stop' % self.autodir)
 
 
-	def execute_pgsql(self, args = ''):
-		utils.system(self.srcdir + '.pgsql/scripts/pgsql/build_db.sh -g -w 1')
-		utils.system(self.srcdir + '.pgsql/scripts/run_workload.sh ' + args)
-		#
-		# Clean up by dropping the database after the test.
-		#
-		utils.system(self.srcdir + '.pgsql/scripts/pgsql/start_db.sh')
-		utils.system(self.srcdir + '.pgsql/scripts/pgsql/drop_db.sh')
-		utils.system(self.srcdir + '.pgsql/scripts/pgsql/stop_db.sh')
+    def execute_pgsql(self, args = ''):
+        utils.system(self.srcdir + '.pgsql/scripts/pgsql/build_db.sh -g -w 1')
+        utils.system(self.srcdir + '.pgsql/scripts/run_workload.sh ' + args)
+        #
+        # Clean up by dropping the database after the test.
+        #
+        utils.system(self.srcdir + '.pgsql/scripts/pgsql/start_db.sh')
+        utils.system(self.srcdir + '.pgsql/scripts/pgsql/drop_db.sh')
+        utils.system(self.srcdir + '.pgsql/scripts/pgsql/stop_db.sh')
diff --git a/client/tests/disktest/disktest.py b/client/tests/disktest/disktest.py
index e021cb8..2019c00 100755
--- a/client/tests/disktest/disktest.py
+++ b/client/tests/disktest/disktest.py
@@ -4,58 +4,57 @@
 
 
 class disktest(test.test):
-	version = 1
+    version = 1
 
-	def setup(self):
-		os.mkdir(self.srcdir)
-		os.chdir(self.bindir)
-		utils.system('cp disktest.c src/')
-		os.chdir(self.srcdir)
-		cflags = '-D_FILE_OFFSET_BITS=64 -D _GNU_SOURCE -static -Wall'
-		utils.system('cc disktest.c ' + cflags + ' -o disktest')
+    def setup(self):
+        os.mkdir(self.srcdir)
+        os.chdir(self.bindir)
+        utils.system('cp disktest.c src/')
+        os.chdir(self.srcdir)
+        cflags = '-D_FILE_OFFSET_BITS=64 -D _GNU_SOURCE -static -Wall'
+        utils.system('cc disktest.c ' + cflags + ' -o disktest')
 
 
-	def test_one_disk_chunk(self, disk, chunk):
-		print "testing %d MB files on %s in %d MB memory" % \
-					(self.chunk_mb, disk, self.memory_mb)
-		cmd = "%s/disktest -m %d -f %s/testfile.%d -i -S" % \
-				(self.srcdir, self.chunk_mb, disk, chunk)
-		p = subprocess.Popen(cmd, shell=True)
-		return(p.pid)
+    def test_one_disk_chunk(self, disk, chunk):
+        print "testing %d MB files on %s in %d MB memory" % \
+                                (self.chunk_mb, disk, self.memory_mb)
+        cmd = "%s/disktest -m %d -f %s/testfile.%d -i -S" % \
+                        (self.srcdir, self.chunk_mb, disk, chunk)
+        p = subprocess.Popen(cmd, shell=True)
+        return(p.pid)
 
 
-	def execute(self, disks = None, gigabytes = None,
-				chunk_mb = autotest_utils.memtotal() / 1024):
-		os.chdir(self.srcdir)
+    def execute(self, disks = None, gigabytes = None,
+                            chunk_mb = autotest_utils.memtotal() / 1024):
+        os.chdir(self.srcdir)
 
-		if not disks:
-			disks = [self.tmpdir]
-		if not gigabytes:
-			free = 100       # cap it at 100GB by default
-			for disk in disks:
-				free = min(autotest_utils.freespace(disk) / 1024**3, free)
-			gigabytes = free
-			print "resizing to %s GB" % gigabytes
-			sys.stdout.flush()
+        if not disks:
+            disks = [self.tmpdir]
+        if not gigabytes:
+            free = 100       # cap it at 100GB by default
+            for disk in disks:
+                free = min(autotest_utils.freespace(disk) / 1024**3, free)
+            gigabytes = free
+            print "resizing to %s GB" % gigabytes
+            sys.stdout.flush()
 
-		self.chunk_mb = chunk_mb
-		self.memory_mb = autotest_utils.memtotal()/1024
-		if self.memory_mb > chunk_mb:
-			e_msg = "Too much RAM (%dMB) for this test to work" % self.memory_mb
-			raise error.TestError(e_msg)
+        self.chunk_mb = chunk_mb
+        self.memory_mb = autotest_utils.memtotal()/1024
+        if self.memory_mb > chunk_mb:
+            e_msg = "Too much RAM (%dMB) for this test to work" % self.memory_mb
+            raise error.TestError(e_msg)
 
-		chunks = (1024 * gigabytes) / chunk_mb
+        chunks = (1024 * gigabytes) / chunk_mb
 
-		for i in range(chunks):
-			pids = []
-			for disk in disks:
-				pid = self.test_one_disk_chunk(disk, i)
-				pids.append(pid)
-			errors = []
-			for pid in pids:
-				(junk, retval) = os.waitpid(pid, 0)
-				if (retval != 0):
-					errors.append(retval)
-			if errors:
-				raise error.TestError("Errors from children: %s" % errors)
-
+        for i in range(chunks):
+            pids = []
+            for disk in disks:
+                pid = self.test_one_disk_chunk(disk, i)
+                pids.append(pid)
+            errors = []
+            for pid in pids:
+                (junk, retval) = os.waitpid(pid, 0)
+                if (retval != 0):
+                    errors.append(retval)
+            if errors:
+                raise error.TestError("Errors from children: %s" % errors)
diff --git a/client/tests/fio/fio.py b/client/tests/fio/fio.py
index e1d9734..aad6bf7 100644
--- a/client/tests/fio/fio.py
+++ b/client/tests/fio/fio.py
@@ -4,36 +4,36 @@
 
 
 class fio(test.test):
-	version = 2
+    version = 2
 
-	# http://brick.kernel.dk/snaps/fio-1.16.5.tar.bz2
-	def setup(self, tarball = 'fio-1.16.5.tar.bz2'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+    # http://brick.kernel.dk/snaps/fio-1.16.5.tar.bz2
+    def setup(self, tarball = 'fio-1.16.5.tar.bz2'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
 
-		self.job.setup_dep(['libaio'])
-		ldflags = '-L' + self.autodir + '/deps/libaio/lib'
-		cflags = '-I' + self.autodir + '/deps/libaio/include'
-		var_ldflags = 'LDFLAGS="' + ldflags + '"'
-		var_cflags  = 'CFLAGS="' + cflags + '"'
+        self.job.setup_dep(['libaio'])
+        ldflags = '-L' + self.autodir + '/deps/libaio/lib'
+        cflags = '-I' + self.autodir + '/deps/libaio/include'
+        var_ldflags = 'LDFLAGS="' + ldflags + '"'
+        var_cflags  = 'CFLAGS="' + cflags + '"'
 
-		os.chdir(self.srcdir)
-		utils.system('patch -p1 < ../Makefile.patch')
-		utils.system('%s %s make' % (var_ldflags, var_cflags))
+        os.chdir(self.srcdir)
+        utils.system('patch -p1 < ../Makefile.patch')
+        utils.system('%s %s make' % (var_ldflags, var_cflags))
 
-	def execute(self, args = '', user = 'root'):
-		os.chdir(self.srcdir)
-		##vars = 'TMPDIR=\"%s\" RESULTDIR=\"%s\"' % (self.tmpdir, self.resultsdir)
-		vars = 'LD_LIBRARY_PATH="' + self.autodir + '/deps/libaio/lib"'
-		##args = '-m -o ' + self.resultsdir + '/fio-tio.log ' + self.srcdir + '/examples/tiobench-example';
-		args = '--output ' + self.resultsdir + '/fio-mixed.log ' + self.bindir + '/fio-mixed.job';
-		utils.system(vars + ' ./fio ' + args)
+    def execute(self, args = '', user = 'root'):
+        os.chdir(self.srcdir)
+        ##vars = 'TMPDIR=\"%s\" RESULTDIR=\"%s\"' % (self.tmpdir, self.resultsdir)
+        vars = 'LD_LIBRARY_PATH="' + self.autodir + '/deps/libaio/lib"'
+        ##args = '-m -o ' + self.resultsdir + '/fio-tio.log ' + self.srcdir + '/examples/tiobench-example';
+        args = '--output ' + self.resultsdir + '/fio-mixed.log ' + self.bindir + '/fio-mixed.job';
+        utils.system(vars + ' ./fio ' + args)
 
-		# Do a profiling run if necessary
-		profilers = self.job.profilers
-		if profilers.present():
-			profilers.start(self)
-			utils.system(vars + ' ./fio ' + args)
-			profilers.stop(self)
-			profilers.report(self)
+        # Do a profiling run if necessary
+        profilers = self.job.profilers
+        if profilers.present():
+            profilers.start(self)
+            utils.system(vars + ' ./fio ' + args)
+            profilers.stop(self)
+            profilers.report(self)
diff --git a/client/tests/fs_mark/fs_mark.py b/client/tests/fs_mark/fs_mark.py
index f38f011..6b52e19 100644
--- a/client/tests/fs_mark/fs_mark.py
+++ b/client/tests/fs_mark/fs_mark.py
@@ -4,30 +4,30 @@
 
 
 class fs_mark(test.test):
-	version = 1
+    version = 1
 
-	# http://developer.osdl.org/dev/doubt/fs_mark/archive/fs_mark-3.2.tgz
-	def setup(self, tarball = 'fs_mark-3.2.tgz'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    # http://developer.osdl.org/dev/doubt/fs_mark/archive/fs_mark-3.2.tgz
+    def setup(self, tarball = 'fs_mark-3.2.tgz'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('make')
-		
-	def execute(self, dir, iterations = 2, args = None):
-		os.chdir(self.srcdir)
-		if not args:
-			# Just provide a sample run parameters
-			args = '-s 10240 -n 1000'
-		profilers = self.job.profilers
-		if not profilers.only():
-			for i in range(iterations):
-				utils.system('./fs_mark -d %s %s' %(dir, args))
+        utils.system('make')
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			utils.system('./fs_mark -d %s %s' %(dir, args))
-			profilers.stop(self)
-			profilers.report(self)
+    def execute(self, dir, iterations = 2, args = None):
+        os.chdir(self.srcdir)
+        if not args:
+            # Just provide a sample run parameters
+            args = '-s 10240 -n 1000'
+        profilers = self.job.profilers
+        if not profilers.only():
+            for i in range(iterations):
+                utils.system('./fs_mark -d %s %s' %(dir, args))
+
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            utils.system('./fs_mark -d %s %s' %(dir, args))
+            profilers.stop(self)
+            profilers.report(self)
diff --git a/client/tests/fsfuzzer/fsfuzzer.py b/client/tests/fsfuzzer/fsfuzzer.py
index 07d2c09..d8310e3 100755
--- a/client/tests/fsfuzzer/fsfuzzer.py
+++ b/client/tests/fsfuzzer/fsfuzzer.py
@@ -4,27 +4,27 @@
 
 
 class fsfuzzer(test.test):
-	version = 1
+    version = 1
 
-	# http://people.redhat.com/sgrubb/files/fsfuzzer-0.6.tar.gz
-	def setup(self, tarball = 'fsfuzzer-0.6.tar.gz'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    # http://people.redhat.com/sgrubb/files/fsfuzzer-0.6.tar.gz
+    def setup(self, tarball = 'fsfuzzer-0.6.tar.gz'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('make')
-		
-	def execute(self, iterations = 1, fstype = 'iso9660'):
-		profilers = self.job.profilers
-		args = fstype + ' 1'
-		if not profilers.only():
-			for i in range(iterations):
-				utils.system(self.srcdir + '/run_test ' + args)
+        utils.system('make')
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			utils.system(self.srcdir + '/run_test ' + args)
-			profilers.stop(self)
-			profilers.report(self)
+    def execute(self, iterations = 1, fstype = 'iso9660'):
+        profilers = self.job.profilers
+        args = fstype + ' 1'
+        if not profilers.only():
+            for i in range(iterations):
+                utils.system(self.srcdir + '/run_test ' + args)
+
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            utils.system(self.srcdir + '/run_test ' + args)
+            profilers.stop(self)
+            profilers.report(self)
diff --git a/client/tests/fsstress/fsstress.py b/client/tests/fsstress/fsstress.py
index 9b2a2db..6a6e20f 100644
--- a/client/tests/fsstress/fsstress.py
+++ b/client/tests/fsstress/fsstress.py
@@ -5,33 +5,33 @@
 
 
 class fsstress(test.test):
-	version = 1
+    version = 1
 
-	# http://www.zip.com.au/~akpm/linux/patches/stuff/ext3-tools.tar.gz
-	def setup(self, tarball = 'ext3-tools.tar.gz'):
-		self.tarball = utils.unmap_url(self.bindir, tarball,
-		                                        self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+    # http://www.zip.com.au/~akpm/linux/patches/stuff/ext3-tools.tar.gz
+    def setup(self, tarball = 'ext3-tools.tar.gz'):
+        self.tarball = utils.unmap_url(self.bindir, tarball,
+                                                self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
 
-		os.chdir(self.srcdir)
-		utils.system('patch -p1 < ../fsstress-ltp.patch')
-		utils.system('make fsstress')
+        os.chdir(self.srcdir)
+        utils.system('patch -p1 < ../fsstress-ltp.patch')
+        utils.system('make fsstress')
 
 
-	def execute(self, testdir = None, extra_args = '', nproc = '1000', nops = '1000'):
-		if not testdir:
-			testdir = self.tmpdir
+    def execute(self, testdir = None, extra_args = '', nproc = '1000', nops = '1000'):
+        if not testdir:
+            testdir = self.tmpdir
 
-		args = '-d ' + testdir + ' -p ' + nproc + ' -n ' + nops + ' ' + extra_args
+        args = '-d ' + testdir + ' -p ' + nproc + ' -n ' + nops + ' ' + extra_args
 
-		cmd = self.srcdir + '/fsstress ' + args
-		profilers = self.job.profilers
-		if not profilers.only():
-			utils.system(cmd)
+        cmd = self.srcdir + '/fsstress ' + args
+        profilers = self.job.profilers
+        if not profilers.only():
+            utils.system(cmd)
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			utils.system(cmd)
-			profilers.stop(self)
-			profilers.report(self)
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            utils.system(cmd)
+            profilers.stop(self)
+            profilers.report(self)
diff --git a/client/tests/fsx/fsx.py b/client/tests/fsx/fsx.py
index 5e77cf1..08328be 100755
--- a/client/tests/fsx/fsx.py
+++ b/client/tests/fsx/fsx.py
@@ -1,7 +1,7 @@
 # This requires aio headers to build.
 # Should work automagically out of deps now.
 
-# NOTE - this should also have the ability to mount a filesystem, 
+# NOTE - this should also have the ability to mount a filesystem,
 # run the tests, unmount it, then fsck the filesystem
 import os
 from autotest_lib.client.bin import test, autotest_utils
@@ -9,43 +9,43 @@
 
 
 class fsx(test.test):
-	version = 3
+    version = 3
 
-	# http://www.zip.com.au/~akpm/linux/patches/stuff/ext3-tools.tar.gz
-	def setup(self, tarball = 'ext3-tools.tar.gz'):
-		self.tarball = utils.unmap_url(self.bindir, tarball,
-		                                        self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+    # http://www.zip.com.au/~akpm/linux/patches/stuff/ext3-tools.tar.gz
+    def setup(self, tarball = 'ext3-tools.tar.gz'):
+        self.tarball = utils.unmap_url(self.bindir, tarball,
+                                                self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
 
-		self.job.setup_dep(['libaio'])
-		ldflags = '-L' + self.autodir + '/deps/libaio/lib'
-		cflags = '-I' + self.autodir + '/deps/libaio/include'
-		var_ldflags = 'LDFLAGS="' + ldflags + '"'
-		var_cflags  = 'CFLAGS="' + cflags + '"'
-		self.make_flags = var_ldflags + ' ' + var_cflags
-		
-		os.chdir(self.srcdir)
-		utils.system('patch -p1 < ../fsx-linux.diff')
-		utils.system(self.make_flags + ' make fsx-linux')
+        self.job.setup_dep(['libaio'])
+        ldflags = '-L' + self.autodir + '/deps/libaio/lib'
+        cflags = '-I' + self.autodir + '/deps/libaio/include'
+        var_ldflags = 'LDFLAGS="' + ldflags + '"'
+        var_cflags  = 'CFLAGS="' + cflags + '"'
+        self.make_flags = var_ldflags + ' ' + var_cflags
+
+        os.chdir(self.srcdir)
+        utils.system('patch -p1 < ../fsx-linux.diff')
+        utils.system(self.make_flags + ' make fsx-linux')
 
 
-	def execute(self, testdir = None, repeat = '100000'):
-		args = '-N ' + repeat
-		if not testdir:
-			testdir = self.tmpdir
-		os.chdir(testdir)
-		libs = self.autodir+'/deps/libaio/lib/'
-		ld_path = autotest_utils.prepend_path(libs,
-                                   autotest_utils.environ('LD_LIBRARY_PATH'))
-		var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
-		cmd = self.srcdir + '/fsx-linux ' + args + ' poo'
-		profilers = self.job.profilers
-		if not profilers.only():
-			utils.system(var_ld_path + ' ' + cmd)
+    def execute(self, testdir = None, repeat = '100000'):
+        args = '-N ' + repeat
+        if not testdir:
+            testdir = self.tmpdir
+        os.chdir(testdir)
+        libs = self.autodir+'/deps/libaio/lib/'
+        ld_path = autotest_utils.prepend_path(libs,
+                           autotest_utils.environ('LD_LIBRARY_PATH'))
+        var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
+        cmd = self.srcdir + '/fsx-linux ' + args + ' poo'
+        profilers = self.job.profilers
+        if not profilers.only():
+            utils.system(var_ld_path + ' ' + cmd)
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			utils.system(var_ld_path + ' ' + cmd)
-			profilers.stop(self)
-			profilers.report(self)
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            utils.system(var_ld_path + ' ' + cmd)
+            profilers.stop(self)
+            profilers.report(self)
diff --git a/client/tests/interbench/interbench.py b/client/tests/interbench/interbench.py
index 0bbdec4..aca7834 100644
--- a/client/tests/interbench/interbench.py
+++ b/client/tests/interbench/interbench.py
@@ -4,31 +4,31 @@
 
 
 class interbench(test.test):
-	version = 1
+    version = 1
 
-	# http://www.kernel.org/pub/linux/kernel/people/ck/apps/interbench/interbench-0.30.tar.bz2
-	def setup(self, tarball = 'interbench-0.30.tar.bz2'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    # http://www.kernel.org/pub/linux/kernel/people/ck/apps/interbench/interbench-0.30.tar.bz2
+    def setup(self, tarball = 'interbench-0.30.tar.bz2'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('make')
-		
-	def execute(self, iterations = 1, args = ''):
-		os.chdir(self.tmpdir)
-		args += " -c"
+        utils.system('make')
 
-		profilers = self.job.profilers
-		if not profilers.only():
-			for i in range(iterations):
-				utils.system("%s/interbench -m 'run #%s' %s" % \
-					(self.srcdir, i, args))
+    def execute(self, iterations = 1, args = ''):
+        os.chdir(self.tmpdir)
+        args += " -c"
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			utils.system("%s/interbench -m 'profile run' %s" % \
-				(self.srcdir, args))
-			profilers.stop(self)
-			profilers.report(self)
+        profilers = self.job.profilers
+        if not profilers.only():
+            for i in range(iterations):
+                utils.system("%s/interbench -m 'run #%s' %s" % \
+                        (self.srcdir, i, args))
+
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            utils.system("%s/interbench -m 'profile run' %s" % \
+                    (self.srcdir, args))
+            profilers.stop(self)
+            profilers.report(self)
diff --git a/client/tests/iozone/iozone.py b/client/tests/iozone/iozone.py
index 26c23d6..8347a32 100644
--- a/client/tests/iozone/iozone.py
+++ b/client/tests/iozone/iozone.py
@@ -5,66 +5,66 @@
 
 
 class iozone(test.test):
-	version = 1
+    version = 1
 
-	# http://www.iozone.org/src/current/iozone3_283.tar
-	def setup(self, tarball = 'iozone3_283.tar'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(os.path.join(self.srcdir, 'src/current'))
+    # http://www.iozone.org/src/current/iozone3_283.tar
+    def setup(self, tarball = 'iozone3_283.tar'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(os.path.join(self.srcdir, 'src/current'))
 
-		arch = autotest_utils.get_current_kernel_arch()
-		if (arch == 'ppc'):
-			utils.system('make linux-powerpc')
-		elif (arch == 'ppc64'):
-			utils.system('make linux-powerpc64')
-		elif (arch == 'x86_64'):
-			utils.system('make linux-AMD64')
-		else: 
-			utils.system('make linux')
+        arch = autotest_utils.get_current_kernel_arch()
+        if (arch == 'ppc'):
+            utils.system('make linux-powerpc')
+        elif (arch == 'ppc64'):
+            utils.system('make linux-powerpc64')
+        elif (arch == 'x86_64'):
+            utils.system('make linux-AMD64')
+        else:
+            utils.system('make linux')
 
 
-	def execute(self, dir = None, iterations=1, args = None):
-		self.keyval = open(os.path.join(self.resultsdir, 'keyval'),
-		                   'w')
-		if not dir:
-			dir = self.tmpdir
-		os.chdir(dir)
-		if not args:
-			args = '-a'
-		profilers = self.job.profilers
-		if not profilers.only():
-			for i in range(iterations):
-				output = utils.system_output('%s/src/current/iozone %s' %
-				                       (self.srcdir, args))
-				self.__format_results(output)
+    def execute(self, dir = None, iterations=1, args = None):
+        self.keyval = open(os.path.join(self.resultsdir, 'keyval'),
+                           'w')
+        if not dir:
+            dir = self.tmpdir
+        os.chdir(dir)
+        if not args:
+            args = '-a'
+        profilers = self.job.profilers
+        if not profilers.only():
+            for i in range(iterations):
+                output = utils.system_output('%s/src/current/iozone %s' %
+                                       (self.srcdir, args))
+                self.__format_results(output)
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			output = utils.system_output('%s/src/current/iozone %s' %
-			                       (self.srcdir, args))
-			self.__format_results(output)
-			profilers.stop(self)
-			profilers.report(self)
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            output = utils.system_output('%s/src/current/iozone %s' %
+                                   (self.srcdir, args))
+            self.__format_results(output)
+            profilers.stop(self)
+            profilers.report(self)
 
-		self.keyval.close()
+        self.keyval.close()
 
 
-	def __format_results(self, results):
-		labels = ('write', 'rewrite', 'read', 'reread', 'randread',
-			  'randwrite', 'bkwdread', 'recordrewrite',
-			  'strideread', 'fwrite', 'frewrite',
-			  'fread', 'freread')
-		for line in results.splitlines():
-			fields = line.split()
-			if len(fields) != 15:
-				continue
-			try:
-				fields = tuple([int(i) for i in fields])
-			except ValueError:
-				continue
-			for l, v in zip(labels, fields[2:]):
-				print >> self.keyval, "%d-%d-%s=%d" % (fields[0], fields[1], l, v)
-		print >> self.keyval
+    def __format_results(self, results):
+        labels = ('write', 'rewrite', 'read', 'reread', 'randread',
+                  'randwrite', 'bkwdread', 'recordrewrite',
+                  'strideread', 'fwrite', 'frewrite',
+                  'fread', 'freread')
+        for line in results.splitlines():
+            fields = line.split()
+            if len(fields) != 15:
+                continue
+            try:
+                fields = tuple([int(i) for i in fields])
+            except ValueError:
+                continue
+            for l, v in zip(labels, fields[2:]):
+                print >> self.keyval, "%d-%d-%s=%d" % (fields[0], fields[1], l, v)
+        print >> self.keyval
diff --git a/client/tests/isic/isic.py b/client/tests/isic/isic.py
index 9378830..df616be 100644
--- a/client/tests/isic/isic.py
+++ b/client/tests/isic/isic.py
@@ -4,23 +4,23 @@
 
 
 class isic(test.test):
-	version = 2
+    version = 2
 
-	# http://www.packetfactory.net/Projects/ISIC/isic-0.06.tgz
-	# + http://www.stardust.webpages.pl/files/crap/isic-gcc41-fix.patch
+    # http://www.packetfactory.net/Projects/ISIC/isic-0.06.tgz
+    # + http://www.stardust.webpages.pl/files/crap/isic-gcc41-fix.patch
 
-        def initialize(self):
-		self.job.setup_dep(['libnet'])
+    def initialize(self):
+        self.job.setup_dep(['libnet'])
 
-	def setup(self, tarball = 'isic-0.06.tar.bz2'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    def setup(self, tarball = 'isic-0.06.tar.bz2'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('patch -p1 < ../build-fixes.patch')
-		utils.system('PREFIX=' + self.autodir + '/deps/libnet/libnet/ ./configure')
-		utils.system('make')
+        utils.system('patch -p1 < ../build-fixes.patch')
+        utils.system('PREFIX=' + self.autodir + '/deps/libnet/libnet/ ./configure')
+        utils.system('make')
 
-	def execute(self, args = '-s rand -d 127.0.0.1 -p 10000000'):
-		utils.system(self.srcdir + '/isic ' + args)
+    def execute(self, args = '-s rand -d 127.0.0.1 -p 10000000'):
+        utils.system(self.srcdir + '/isic ' + args)
diff --git a/client/tests/kernbench/kernbench.py b/client/tests/kernbench/kernbench.py
index 8abf6a3..de7d849 100755
--- a/client/tests/kernbench/kernbench.py
+++ b/client/tests/kernbench/kernbench.py
@@ -4,107 +4,107 @@
 
 
 class kernbench(test.test):
-	version = 2
+    version = 2
 
-	def setup(self, build_dir = None):
-		if not build_dir:
-			build_dir = self.srcdir
-		os.mkdir(build_dir)
+    def setup(self, build_dir = None):
+        if not build_dir:
+            build_dir = self.srcdir
+        os.mkdir(build_dir)
 
 
-	def __init_tree(self, build_dir, version = None):
-		#
-		# If we have a local copy of the 2.6.14 tarball use that
-		# else let the kernel object use the defined mirrors
-		# to obtain it.
-		#
-		# http://kernel.org/pub/linux/kernel/v2.6/linux-2.6.14.tar.bz2
-		#
-		# On ia64, we default to 2.6.20, as it can't compile 2.6.14.
-		if version:
-			default_ver = version
-		elif autotest_utils.get_current_kernel_arch() == 'ia64':
-			default_ver = '2.6.20'
-		else:
-			default_ver = '2.6.14'
+    def __init_tree(self, build_dir, version = None):
+        #
+        # If we have a local copy of the 2.6.14 tarball use that
+        # else let the kernel object use the defined mirrors
+        # to obtain it.
+        #
+        # http://kernel.org/pub/linux/kernel/v2.6/linux-2.6.14.tar.bz2
+        #
+        # On ia64, we default to 2.6.20, as it can't compile 2.6.14.
+        if version:
+            default_ver = version
+        elif autotest_utils.get_current_kernel_arch() == 'ia64':
+            default_ver = '2.6.20'
+        else:
+            default_ver = '2.6.14'
 
-		kversionfile = os.path.join(build_dir, ".kversion")
-		install_needed = True
-		if os.path.exists(kversionfile):
-			old_version = pickle.load(open(kversionfile, 'r'))
-			if (old_version == default_ver):
-				install_needed = False
+        kversionfile = os.path.join(build_dir, ".kversion")
+        install_needed = True
+        if os.path.exists(kversionfile):
+            old_version = pickle.load(open(kversionfile, 'r'))
+            if (old_version == default_ver):
+                install_needed = False
 
-		if not install_needed:
-			return
+        if not install_needed:
+            return
 
-		# Clear out the old version
-		utils.system("echo rm -rf '" + build_dir + "/*'")
+        # Clear out the old version
+        utils.system("echo rm -rf '" + build_dir + "/*'")
 
-		pickle.dump(default_ver, open(kversionfile, 'w'))
+        pickle.dump(default_ver, open(kversionfile, 'w'))
 
-		tarball = None
-		for dir in (self.bindir, '/usr/local/src'):
-			tar = 'linux-%s.tar.bz2' % default_ver
-			path = os.path.join(dir, tar)
-			if os.path.exists(path):
-				tarball = path
-				break
-		if not tarball:
-			tarball = default_ver
+        tarball = None
+        for dir in (self.bindir, '/usr/local/src'):
+            tar = 'linux-%s.tar.bz2' % default_ver
+            path = os.path.join(dir, tar)
+            if os.path.exists(path):
+                tarball = path
+                break
+        if not tarball:
+            tarball = default_ver
 
-		# Do the extraction of the kernel tree
-		kernel = self.job.kernel(tarball, self.tmpdir, build_dir)
-		kernel.config(defconfig=True, logged=False)
+        # Do the extraction of the kernel tree
+        kernel = self.job.kernel(tarball, self.tmpdir, build_dir)
+        kernel.config(defconfig=True, logged=False)
 
 
-	def execute(self, iterations = 1, threads = None, dir = None, version = None):
-		if not threads:
-			threads = self.job.cpu_count()*2
-		if dir:
-			build_dir = dir
-		else:
-			build_dir = os.path.join(self.tmpdir, "src")
-			if not os.path.exists(build_dir):
-				os.makedirs(build_dir)
+    def execute(self, iterations = 1, threads = None, dir = None, version = None):
+        if not threads:
+            threads = self.job.cpu_count()*2
+        if dir:
+            build_dir = dir
+        else:
+            build_dir = os.path.join(self.tmpdir, "src")
+            if not os.path.exists(build_dir):
+                os.makedirs(build_dir)
 
-		self.__init_tree(build_dir, version)
+        self.__init_tree(build_dir, version)
 
-		kernel = self.job.kernel(build_dir, self.tmpdir, build_dir,
-								leave = True)
-		print "kernbench x %d: %d threads" % (iterations, threads)
+        kernel = self.job.kernel(build_dir, self.tmpdir, build_dir,
+                                                        leave = True)
+        print "kernbench x %d: %d threads" % (iterations, threads)
 
-		logfile = os.path.join(self.debugdir, 'build_log')
+        logfile = os.path.join(self.debugdir, 'build_log')
 
-		print "Warmup run ..."
-		kernel.build_timed(threads, output = logfile)      # warmup run
+        print "Warmup run ..."
+        kernel.build_timed(threads, output = logfile)      # warmup run
 
-		profilers = self.job.profilers
-                if not profilers.only():
-		        for i in range(iterations):
-				print "Performance run, iteration %d ..." % i
-			        timefile = os.path.join(self.resultsdir, 
-								'time.%d' % i)
-			        kernel.build_timed(threads, timefile)
+        profilers = self.job.profilers
+        if not profilers.only():
+            for i in range(iterations):
+                print "Performance run, iteration %d ..." % i
+                timefile = os.path.join(self.resultsdir,
+                                                'time.%d' % i)
+                kernel.build_timed(threads, timefile)
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			print "Profiling run ..."
-			timefile = os.path.join(self.resultsdir, 'time.profile')
-			kernel.build_timed(threads, timefile)
-			profilers.stop(self)
-			profilers.report(self)
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            print "Profiling run ..."
+            timefile = os.path.join(self.resultsdir, 'time.profile')
+            kernel.build_timed(threads, timefile)
+            profilers.stop(self)
+            profilers.report(self)
 
-		kernel.clean(logged=False)    # Don't leave litter lying around
-		os.chdir(self.resultsdir)
-		utils.system("grep -h elapsed time.* > time")
+        kernel.clean(logged=False)    # Don't leave litter lying around
+        os.chdir(self.resultsdir)
+        utils.system("grep -h elapsed time.* > time")
 
-		self.__format_results(open('time').read())
+        self.__format_results(open('time').read())
 
 
-	def __format_results(self, results):
-		out = open('keyval', 'w')
-		for result in autotest_utils.extract_all_time_results(results):
-			print >> out, "user=%s\nsystem=%s\nelapsed=%s\n" % result
-		out.close()
+    def __format_results(self, results):
+        out = open('keyval', 'w')
+        for result in autotest_utils.extract_all_time_results(results):
+            print >> out, "user=%s\nsystem=%s\nelapsed=%s\n" % result
+        out.close()
diff --git a/client/tests/kernelbuild/kernelbuild.py b/client/tests/kernelbuild/kernelbuild.py
index 95b639a..836a766 100755
--- a/client/tests/kernelbuild/kernelbuild.py
+++ b/client/tests/kernelbuild/kernelbuild.py
@@ -2,12 +2,12 @@
 
 
 class kernelbuild(test.test):
-	version = 1
+    version = 1
 
-	def execute(self, base_tree, patches, config, config_list = None):
-		kernel = self.job.kernel(base_tree, self.outputdir)
-		if patches:
-			kernel.patch(*patches)
-		kernel.config(config, config_list)
+    def execute(self, base_tree, patches, config, config_list = None):
+        kernel = self.job.kernel(base_tree, self.outputdir)
+        if patches:
+            kernel.patch(*patches)
+        kernel.config(config, config_list)
 
-		kernel.build()
+        kernel.build()
diff --git a/client/tests/kvmtest/control.with_modbuild b/client/tests/kvmtest/control.with_modbuild
index 09c3992..a527c68 100644
--- a/client/tests/kvmtest/control.with_modbuild
+++ b/client/tests/kvmtest/control.with_modbuild
@@ -1,6 +1,6 @@
 # CHANGEME  - point to kvm release tarball
-# 
-# 
+#
+#
 # nightly kvm snapshot base URL
 SNAPBASE = 'http://people.qumranet.com/avi/snapshots/'
 
@@ -19,44 +19,44 @@
 
 
 def install_kvm_external_modules(tarball=None, base=SNAPBASE, daysold=DAYSOLD):
-	dldir = os.environ['AUTODIR']+'/tmp'
-	srcdir = os.environ['AUTODIR']+'/tmp/kvm'
-	print "kvm dldir->%s"%(dldir)
-	print "kvm srcdir->%s"%(srcdir)
-	
-	# ex: http://people.qumranet.com/avi/snapshots/kvm-snapshot-20071021.tar.gz
-	if tarball == None:
-		d = (date.today() - timedelta(days=daysold)).strftime('%Y%m%d')
-		tarball = base+'kvm-snapshot-%s.tar.gz' %(d)
-		sys.stderr.write("tarball url: %s\n" %(tarball))
+    dldir = os.environ['AUTODIR']+'/tmp'
+    srcdir = os.environ['AUTODIR']+'/tmp/kvm'
+    print "kvm dldir->%s"%(dldir)
+    print "kvm srcdir->%s"%(srcdir)
 
-	tarball = unmap_url("/", tarball, dldir)
-	extract_tarball_to_dir(tarball, srcdir)
-	os.chdir(srcdir)
+    # ex: http://people.qumranet.com/avi/snapshots/kvm-snapshot-20071021.tar.gz
+    if tarball == None:
+        d = (date.today() - timedelta(days=daysold)).strftime('%Y%m%d')
+        tarball = base+'kvm-snapshot-%s.tar.gz' %(d)
+        sys.stderr.write("tarball url: %s\n" %(tarball))
 
-	print "detecting cpu vendor..."
-	vendor = "intel"
-	if os.system("grep vmx /proc/cpuinfo 1>/dev/null") != 0:
-		vendor = "amd"
-	print "detected cpu vendor as '%s'" %(vendor)
+    tarball = unmap_url("/", tarball, dldir)
+    extract_tarball_to_dir(tarball, srcdir)
+    os.chdir(srcdir)
 
-	print "building kvm..."
-	system('./configure')
-	system('make')
-	system('make install')
-	print "done building and installing kvm"
+    print "detecting cpu vendor..."
+    vendor = "intel"
+    if os.system("grep vmx /proc/cpuinfo 1>/dev/null") != 0:
+        vendor = "amd"
+    print "detected cpu vendor as '%s'" %(vendor)
 
-	# remove existing in kernel kvm modules
-	print "unloading loaded kvm modules (if present) ..."
-	if system("grep kvm_%s /proc/modules 1>/dev/null" %(vendor), 1) == 0:
-		system("rmmod -f kvm_%s" %(vendor))
-	if system("grep kvm /proc/modules 1>/dev/null", 1) == 0:
-		system("rmmod -f kvm")
+    print "building kvm..."
+    system('./configure')
+    system('make')
+    system('make install')
+    print "done building and installing kvm"
 
-	# load new modules
-	print "loading new kvm modules..."
-	os.chdir(srcdir+'/kernel')
-	system("insmod ./kvm.ko && sleep 1 && insmod ./kvm-%s.ko" %(vendor))
+    # remove existing in kernel kvm modules
+    print "unloading loaded kvm modules (if present) ..."
+    if system("grep kvm_%s /proc/modules 1>/dev/null" %(vendor), 1) == 0:
+        system("rmmod -f kvm_%s" %(vendor))
+    if system("grep kvm /proc/modules 1>/dev/null", 1) == 0:
+        system("rmmod -f kvm")
+
+    # load new modules
+    print "loading new kvm modules..."
+    os.chdir(srcdir+'/kernel')
+    system("insmod ./kvm.ko && sleep 1 && insmod ./kvm-%s.ko" %(vendor))
 
 
 # build and install kvm external modules
diff --git a/client/tests/kvmtest/kvmtest.py b/client/tests/kvmtest/kvmtest.py
index 96806c0..f776033 100644
--- a/client/tests/kvmtest/kvmtest.py
+++ b/client/tests/kvmtest/kvmtest.py
@@ -4,158 +4,158 @@
 
 
 class kvmtest(test.test):
-	version = 1
+    version = 1
 
-	def setup(self, tarball = 'kvm-test.tar.gz'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
-		utils.system('python setup.py install')
+    def setup(self, tarball = 'kvm-test.tar.gz'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
+        utils.system('python setup.py install')
 
 
-	def execute(self, testdir = '', args = ''):
-		dirs = []
-		results = []
-		passed = 0
-		failed = 0
+    def execute(self, testdir = '', args = ''):
+        dirs = []
+        results = []
+        passed = 0
+        failed = 0
 
-		# spawn vncserver if needed
-		if not os.environ.has_key('DISPLAY'):
-			print("No DISPLAY set in environment,"
-                              "spawning vncserver...")
-			display = self.__create_vncserver(os.environ['HOME']+"/.vnc")
-			print("Setting DISPLAY=%s"%(display))
-			os.environ['DISPLAY'] = display
+        # spawn vncserver if needed
+        if not os.environ.has_key('DISPLAY'):
+            print("No DISPLAY set in environment,"
+                  "spawning vncserver...")
+            display = self.__create_vncserver(os.environ['HOME']+"/.vnc")
+            print("Setting DISPLAY=%s"%(display))
+            os.environ['DISPLAY'] = display
 
-		# build a list of dirs with 'vm.log' files
-		os.path.walk(testdir, self.__has_vmlog, dirs)
+        # build a list of dirs with 'vm.log' files
+        os.path.walk(testdir, self.__has_vmlog, dirs)
 
-		for d in dirs:
-			replaydir = os.path.join(self.resultsdir,
-						 os.path.basename(d))
-			os.mkdir(replaydir)
-			logfile = replaydir + "/%s.log" %(os.path.basename(d))
+        for d in dirs:
+            replaydir = os.path.join(self.resultsdir,
+                                     os.path.basename(d))
+            os.mkdir(replaydir)
+            logfile = replaydir + "/%s.log" %(os.path.basename(d))
 
-			os.chdir(d)
-			rv = utils.system("kvm-test-replay > %s" %(logfile), 1)
+            os.chdir(d)
+            rv = utils.system("kvm-test-replay > %s" %(logfile), 1)
 
-			results.append((d, rv))
-			if rv != 0:
-				screenshot = self.__get_expected_file(logfile)
-				expected = "expected-%03d.png" %(
-					    random.randint(0, 999))
-				dest = os.path.join(replaydir,expected)
+            results.append((d, rv))
+            if rv != 0:
+                screenshot = self.__get_expected_file(logfile)
+                expected = "expected-%03d.png" %(
+                            random.randint(0, 999))
+                dest = os.path.join(replaydir,expected)
 
-				# make a copy of the screen shot
-				utils.system("cp %s %s" %(screenshot, dest), 1)
-					  
-				# move the failure
-				utils.system("mv failure-*.png %s" %(replaydir), 1)
+                # make a copy of the screen shot
+                utils.system("cp %s %s" %(screenshot, dest), 1)
 
-		# generate html output
-		self.__format_results(results)
+                # move the failure
+                utils.system("mv failure-*.png %s" %(replaydir), 1)
 
-		# produce pass/fail output
-		for (x, y) in results:
-			if y != 0:
-				print("FAIL: '%s' with rv %s" %(x, y))
-				failed = failed + 1
-			else:
-				print("pass: '%s' with rv %s" %(x, y))
-				passed = passed + 1
+        # generate html output
+        self.__format_results(results)
 
-		print("Summary: Passed %d Failed %d" %(passed, failed))
-		# if we had any tests not passed, fail entire test
-		if failed != 0:
-			raise error.TestError('kvm-test-replay')
+        # produce pass/fail output
+        for (x, y) in results:
+            if y != 0:
+                print("FAIL: '%s' with rv %s" %(x, y))
+                failed = failed + 1
+            else:
+                print("pass: '%s' with rv %s" %(x, y))
+                passed = passed + 1
+
+        print("Summary: Passed %d Failed %d" %(passed, failed))
+        # if we had any tests not passed, fail entire test
+        if failed != 0:
+            raise error.TestError('kvm-test-replay')
 
 
-	def __get_expected_file(self, logfile):
-		# pull out screeshot name from logfile
-		return filter(lambda x: "Expected" in x,
-			      open(logfile, 'r').readlines())\
-			      [0].split('{')[1].split('}')[0]
+    def __get_expected_file(self, logfile):
+        # pull out screeshot name from logfile
+        return filter(lambda x: "Expected" in x,
+                      open(logfile, 'r').readlines())\
+                      [0].split('{')[1].split('}')[0]
 
 
-	def __create_vncserver(self, dirname):
-		"""
-		this test may run without an X connection in kvm/qemu needs
-		a DISPLAY to push the vga buffer.  If a DISPLAY is not set
-		in the environment, then attempt to spawn a vncserver, and
-		change env DISPLAY so that kvmtest can run
-		"""
-		for pidfile in locate("*:*.pid", dirname):
-			pid = open(pidfile, 'r').readline().strip()
-			# if the server is still active, just use it for display
-			if os.path.exists('/proc/%s/status' % pid):
-				vncdisplay = os.path.basename(pidfile)\
-					       .split(":")[1].split(".")[0]
-				print("Found vncserver on port %s, using it"%(
-				      vncdisplay))
-				return ':%s.0' %(vncdisplay)
-			
-		# none of the vncserver were still alive, spawn our own and
-		# return the display whack existing server first, then spawn it
-		vncdisplay = "1"
-		print("Spawning vncserver on port %s"%(vncdisplay))
-		utils.system('vncserver :%s' %(vncdisplay))
-		return ':%s.0' %(vncdisplay)
+    def __create_vncserver(self, dirname):
+        """
+        this test may run without an X connection in kvm/qemu needs
+        a DISPLAY to push the vga buffer.  If a DISPLAY is not set
+        in the environment, then attempt to spawn a vncserver, and
+        change env DISPLAY so that kvmtest can run
+        """
+        for pidfile in locate("*:*.pid", dirname):
+            pid = open(pidfile, 'r').readline().strip()
+            # if the server is still active, just use it for display
+            if os.path.exists('/proc/%s/status' % pid):
+                vncdisplay = os.path.basename(pidfile)\
+                               .split(":")[1].split(".")[0]
+                print("Found vncserver on port %s, using it"%(
+                      vncdisplay))
+                return ':%s.0' %(vncdisplay)
+
+        # none of the vncserver were still alive, spawn our own and
+        # return the display whack existing server first, then spawn it
+        vncdisplay = "1"
+        print("Spawning vncserver on port %s"%(vncdisplay))
+        utils.system('vncserver :%s' %(vncdisplay))
+        return ':%s.0' %(vncdisplay)
 
 
-	def __has_vmlog(self, arg, dirname, names):
-		if os.path.exists(os.path.join(dirname, 'vm.log')):
-			arg.append(dirname)
+    def __has_vmlog(self, arg, dirname, names):
+        if os.path.exists(os.path.join(dirname, 'vm.log')):
+            arg.append(dirname)
 
 
-	def __gen_fail_html(self, testdir):
-		# generate a failure index.html to display the expected and failure
-		# images
-		fail_dir = os.path.join(self.resultsdir, os.path.basename(testdir))
-		fail_index = os.path.join(fail_dir, "index.html")
+    def __gen_fail_html(self, testdir):
+        # generate a failure index.html to display the expected and failure
+        # images
+        fail_dir = os.path.join(self.resultsdir, os.path.basename(testdir))
+        fail_index = os.path.join(fail_dir, "index.html")
 
-		# lambda helpers for pulling out image files
-		is_png = lambda x: x.endswith('.png')
-		failure_filter = lambda x: x.startswith('failure') and is_png(x)
-		expected_filter = lambda x: x.startswith('expected') and is_png(x)
+        # lambda helpers for pulling out image files
+        is_png = lambda x: x.endswith('.png')
+        failure_filter = lambda x: x.startswith('failure') and is_png(x)
+        expected_filter = lambda x: x.startswith('expected') and is_png(x)
 
-		failure_img = filter(failure_filter, os.listdir(fail_dir))[0]
-		expected_img = filter(expected_filter, os.listdir(fail_dir))[0]
-		if not failure_img or not expected_img:
-			raise "Failed to find images"
+        failure_img = filter(failure_filter, os.listdir(fail_dir))[0]
+        expected_img = filter(expected_filter, os.listdir(fail_dir))[0]
+        if not failure_img or not expected_img:
+            raise "Failed to find images"
 
-		fail_buff = "<html><table border=1><tr><th>Barrier Diff</th>\n" + \
-					"<th>Expected Barrier</th><th>Failure</th></tr><tr><td></td>\n"
-		for img in expected_img, failure_img:
-			fail_buff = fail_buff + "<td><a href=\"%s\"><img width=320 " \
-						"height=200 src=\"%s\"></a></td>\n" %(img, img)
+        fail_buff = "<html><table border=1><tr><th>Barrier Diff</th>\n" + \
+                                "<th>Expected Barrier</th><th>Failure</th></tr><tr><td></td>\n"
+        for img in expected_img, failure_img:
+            fail_buff = fail_buff + "<td><a href=\"%s\"><img width=320 " \
+                                    "height=200 src=\"%s\"></a></td>\n" %(img, img)
 
-		fail_buff = fail_buff + "</tr></table></html>\n"
+        fail_buff = fail_buff + "</tr></table></html>\n"
 
-		fh = open(fail_index, "w+")
-		fh.write(fail_buff)
-		fh.close()
+        fh = open(fail_index, "w+")
+        fh.write(fail_buff)
+        fh.close()
 
-	def __format_results(self, results):
-		# generate kvmtest/index.html and an index.html for each fail
-		test_index = os.path.join(self.outputdir, "index.html")
-		test_buff = "<html><table border=1><tr><th>Test</th>\n"
+    def __format_results(self, results):
+        # generate kvmtest/index.html and an index.html for each fail
+        test_index = os.path.join(self.outputdir, "index.html")
+        test_buff = "<html><table border=1><tr><th>Test</th>\n"
 
-		for (x,y) in results:
-			test_buff = test_buff + "<th>%s</th>\n" %(os.path.basename(x))
+        for (x,y) in results:
+            test_buff = test_buff + "<th>%s</th>\n" %(os.path.basename(x))
 
-		test_buff = test_buff + "</tr><tr><td></td>\n"
+        test_buff = test_buff + "</tr><tr><td></td>\n"
 
-		for (x,y) in results:
-			if y != 0:
-				fail = "<td><a href=\"results/%s/\">FAIL</a></td>\n" %(os.path.basename(x))
-				test_buff = test_buff + fail
-				self.__gen_fail_html(x)
-			else:
-				test_buff = test_buff + "<td>GOOD</td>\n"
+        for (x,y) in results:
+            if y != 0:
+                fail = "<td><a href=\"results/%s/\">FAIL</a></td>\n" %(os.path.basename(x))
+                test_buff = test_buff + fail
+                self.__gen_fail_html(x)
+            else:
+                test_buff = test_buff + "<td>GOOD</td>\n"
 
-		test_buff = test_buff + "</tr></table></html>"
+        test_buff = test_buff + "</tr></table></html>"
 
-		fh = open(test_index, "w+")
-		fh.write(test_buff)
-		fh.close()
+        fh = open(test_index, "w+")
+        fh.write(test_buff)
+        fh.close()
diff --git a/client/tests/libhugetlbfs/libhugetlbfs.py b/client/tests/libhugetlbfs/libhugetlbfs.py
index 1f0aed1..aa018c5 100644
--- a/client/tests/libhugetlbfs/libhugetlbfs.py
+++ b/client/tests/libhugetlbfs/libhugetlbfs.py
@@ -3,58 +3,58 @@
 from autotest_lib.client.common_lib import utils, error
 
 class libhugetlbfs(test.test):
-	version = 4
+    version = 4
 
-	# http://libhugetlbfs.ozlabs.org/releases/libhugetlbfs-1.3-pre1.tar.gz
-	def setup(self, tarball = 'libhugetlbfs-1.3-pre1.tar.gz'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    # http://libhugetlbfs.ozlabs.org/releases/libhugetlbfs-1.3-pre1.tar.gz
+    def setup(self, tarball = 'libhugetlbfs-1.3-pre1.tar.gz'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		# make might fail if there are no proper headers for the 32 bit
-		# version, in that case try only for the 64 bit version
-		try:
-			utils.system('make')
-		except:
-			utils.system('make OBJDIRS=obj64')
+        # make might fail if there are no proper headers for the 32 bit
+        # version, in that case try only for the 64 bit version
+        try:
+            utils.system('make')
+        except:
+            utils.system('make OBJDIRS=obj64')
 
-	def execute(self, dir = None, pages_requested = 20):
-		autotest_utils.check_kernel_ver("2.6.16")
+    def execute(self, dir = None, pages_requested = 20):
+        autotest_utils.check_kernel_ver("2.6.16")
 
-		# Check huge page number
-		pages_available = 0
-		if os.path.exists('/proc/sys/vm/nr_hugepages'):
-			utils.write_one_line('/proc/sys/vm/nr_hugepages',
-						      str(pages_requested))
-			pages_available = int(open('/proc/sys/vm/nr_hugepages', 'r').readline())
-		else:
-			raise error.TestNAError('Kernel does not support hugepages')
+        # Check huge page number
+        pages_available = 0
+        if os.path.exists('/proc/sys/vm/nr_hugepages'):
+            utils.write_one_line('/proc/sys/vm/nr_hugepages',
+                                          str(pages_requested))
+            pages_available = int(open('/proc/sys/vm/nr_hugepages', 'r').readline())
+        else:
+            raise error.TestNAError('Kernel does not support hugepages')
 
-		if pages_available < pages_requested:
-			raise error.TestError('%d huge pages available, < %d pages requested' % (pages_available, pages_requested))
+        if pages_available < pages_requested:
+            raise error.TestError('%d huge pages available, < %d pages requested' % (pages_available, pages_requested))
 
-		# Check if hugetlbfs has been mounted
-		if not autotest_utils.file_contains_pattern('/proc/mounts', 'hugetlbfs'):
-			if not dir:
-				dir = os.path.join(self.tmpdir, 'hugetlbfs')
-				os.makedirs(dir)
-			utils.system('mount -t hugetlbfs none %s' % dir)
+        # Check if hugetlbfs has been mounted
+        if not autotest_utils.file_contains_pattern('/proc/mounts', 'hugetlbfs'):
+            if not dir:
+                dir = os.path.join(self.tmpdir, 'hugetlbfs')
+                os.makedirs(dir)
+            utils.system('mount -t hugetlbfs none %s' % dir)
 
-		os.chdir(self.srcdir)
+        os.chdir(self.srcdir)
 
-		profilers = self.job.profilers
-		if profilers.present():
-			profilers.start(self)
-			os.chdir(self.srcdir)
-		# make check might fail for 32 bit if the 32 bit compile earlier
-		# had failed. See if it passes for 64 bit in that case.
-		try:
-			utils.system('make check')
-		except:
-			utils.system('make check OBJDIRS=obj64')
-		if profilers.present():
-			profilers.stop(self)
-			profilers.report(self)
+        profilers = self.job.profilers
+        if profilers.present():
+            profilers.start(self)
+            os.chdir(self.srcdir)
+        # make check might fail for 32 bit if the 32 bit compile earlier
+        # had failed. See if it passes for 64 bit in that case.
+        try:
+            utils.system('make check')
+        except:
+            utils.system('make check OBJDIRS=obj64')
+        if profilers.present():
+            profilers.stop(self)
+            profilers.report(self)
 
-		utils.system('umount %s' % dir)
+        utils.system('umount %s' % dir)
diff --git a/client/tests/linus_stress/linus_stress.py b/client/tests/linus_stress/linus_stress.py
index 3a81134..6160307 100755
--- a/client/tests/linus_stress/linus_stress.py
+++ b/client/tests/linus_stress/linus_stress.py
@@ -4,40 +4,40 @@
 
 
 class linus_stress(test.test):
-	version = 1
+    version = 1
 
-	def setup(self):
-		os.mkdir(self.srcdir)
-		os.chdir(self.bindir)
-		utils.system('cp linus_stress.c src/')
-		os.chdir(self.srcdir)
-		utils.system('cc linus_stress.c -D_POSIX_C_SOURCE=200112 -o linus_stress')
+    def setup(self):
+        os.mkdir(self.srcdir)
+        os.chdir(self.bindir)
+        utils.system('cp linus_stress.c src/')
+        os.chdir(self.srcdir)
+        utils.system('cc linus_stress.c -D_POSIX_C_SOURCE=200112 -o linus_stress')
 
 
-	def run_the_test(self, iterations):
-		utils.write_one_line('/proc/sys/vm/dirty_ratio', '4')
-		utils.write_one_line('/proc/sys/vm/dirty_background_ratio', '2')
+    def run_the_test(self, iterations):
+        utils.write_one_line('/proc/sys/vm/dirty_ratio', '4')
+        utils.write_one_line('/proc/sys/vm/dirty_background_ratio', '2')
 
-		cmd = os.path.join(self.srcdir, 'linus_stress')
-		args = "%d" % (autotest_utils.memtotal() / 32)
+        cmd = os.path.join(self.srcdir, 'linus_stress')
+        args = "%d" % (autotest_utils.memtotal() / 32)
 
-		profilers = self.job.profilers
-		if profilers.present():
-			profilers.start(self)
+        profilers = self.job.profilers
+        if profilers.present():
+            profilers.start(self)
 
-		for i in range(iterations):
-			utils.system(cmd + ' ' + args)
+        for i in range(iterations):
+            utils.system(cmd + ' ' + args)
 
-		if profilers.present():
-			profilers.stop(self)
-			profilers.report(self)
+        if profilers.present():
+            profilers.stop(self)
+            profilers.report(self)
 
 
-	def execute(self, iterations = 1):
-		dirty_ratio = utils.read_one_line('/proc/sys/vm/dirty_ratio')
-		dirty_background_ratio = utils.read_one_line('/proc/sys/vm/dirty_background_ratio')
-		try:
-			self.run_the_test(iterations)
-		finally:
-			utils.write_one_line('/proc/sys/vm/dirty_ratio', dirty_ratio)
-			utils.write_one_line('/proc/sys/vm/dirty_background_ratio', dirty_background_ratio)
+    def execute(self, iterations = 1):
+        dirty_ratio = utils.read_one_line('/proc/sys/vm/dirty_ratio')
+        dirty_background_ratio = utils.read_one_line('/proc/sys/vm/dirty_background_ratio')
+        try:
+            self.run_the_test(iterations)
+        finally:
+            utils.write_one_line('/proc/sys/vm/dirty_ratio', dirty_ratio)
+            utils.write_one_line('/proc/sys/vm/dirty_background_ratio', dirty_background_ratio)
diff --git a/client/tests/lmbench/lmbench.py b/client/tests/lmbench/lmbench.py
index fc4807b..a4bef45 100755
--- a/client/tests/lmbench/lmbench.py
+++ b/client/tests/lmbench/lmbench.py
@@ -5,44 +5,44 @@
 
 
 class lmbench(test.test):
-	version = 2
+    version = 2
 
-	def setup(self, tarball = 'lmbench3.tar.bz2'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		# http://www.bitmover.com/lm/lmbench/lmbench3.tar.gz
-		# + lmbench3.diff 
-		#	removes Makefile references to bitkeeper
-		#	default mail to no, fix job placement defaults (masouds)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    def setup(self, tarball = 'lmbench3.tar.bz2'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        # http://www.bitmover.com/lm/lmbench/lmbench3.tar.gz
+        # + lmbench3.diff
+        #       removes Makefile references to bitkeeper
+        #       default mail to no, fix job placement defaults (masouds)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('make')
+        utils.system('make')
 
 
-	def execute(self, iterations = 1, mem = '', fastmem = 'NO', 
-			slowfs = 'NO', disks = '', disks_desc = '', 
-			mhz = '', remote = '', enough = '5000', sync_max = '1',
-			fsdir = None, file = None):
-		if not fsdir:
-			fsdir = self.tmpdir
-		if not file:
-			file = self.tmpdir+'XXX'
+    def execute(self, iterations = 1, mem = '', fastmem = 'NO',
+                    slowfs = 'NO', disks = '', disks_desc = '',
+                    mhz = '', remote = '', enough = '5000', sync_max = '1',
+                    fsdir = None, file = None):
+        if not fsdir:
+            fsdir = self.tmpdir
+        if not file:
+            file = self.tmpdir+'XXX'
 
-		os.chdir(self.srcdir)
-		cmd = "yes '' | make rerun"
-		profilers = self.job.profilers
-		if not profilers.only():
-			for i in range(iterations):
-				utils.system(cmd)
+        os.chdir(self.srcdir)
+        cmd = "yes '' | make rerun"
+        profilers = self.job.profilers
+        if not profilers.only():
+            for i in range(iterations):
+                utils.system(cmd)
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			utils.system(cmd)
-			profilers.stop(self)
-			profilers.report(self)
-		# Get the results:
-		outputdir = self.srcdir + "/results"
-		results = self.resultsdir + "/summary.txt"
-		utils.system("make -C " + outputdir + " summary > " + results)
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            utils.system(cmd)
+            profilers.stop(self)
+            profilers.report(self)
+        # Get the results:
+        outputdir = self.srcdir + "/results"
+        results = self.resultsdir + "/summary.txt"
+        utils.system("make -C " + outputdir + " summary > " + results)
diff --git a/client/tests/lsb_dtk/lsb_dtk.py b/client/tests/lsb_dtk/lsb_dtk.py
index 0d1db50..f8d25df 100644
--- a/client/tests/lsb_dtk/lsb_dtk.py
+++ b/client/tests/lsb_dtk/lsb_dtk.py
@@ -12,154 +12,154 @@
 '''
 
 class lsb_dtk(test.test):
-	version = 1
-	def get_lsb_arch(self):
-		self.arch = autotest_utils.get_current_kernel_arch()
-		if self.arch in ['i386', 'i486', 'i586', 'i686', 'athlon']:
-			return 'ia32'
-		elif self.arch == 'ppc':
-			return 'ppc32'
-		elif self.arch in ['s390', 's390x', 'ia64', 'x86_64', 'ppc64']:
-			return self.arch
-		else:
-			e_msg = 'Architecture %s not supported by LSB' % self.arch
-			raise error.TestError(e_msg)
+    version = 1
+    def get_lsb_arch(self):
+        self.arch = autotest_utils.get_current_kernel_arch()
+        if self.arch in ['i386', 'i486', 'i586', 'i686', 'athlon']:
+            return 'ia32'
+        elif self.arch == 'ppc':
+            return 'ppc32'
+        elif self.arch in ['s390', 's390x', 'ia64', 'x86_64', 'ppc64']:
+            return self.arch
+        else:
+            e_msg = 'Architecture %s not supported by LSB' % self.arch
+            raise error.TestError(e_msg)
 
 
-	def install_lsb_packages(self, srcdir, cachedir, my_config):
-		# First, we download the LSB DTK manager package, worry about installing it later
-		self.dtk_manager_arch = my_config.get('dtk-manager', 'arch-%s' % self.get_lsb_arch())
-		self.dtk_manager_url = my_config.get('dtk-manager', 'tarball_url') % self.dtk_manager_arch
-		if not self.dtk_manager_url:
-			raise error.TestError('Could not get DTK manager URL from configuration file')
-		self.dtk_md5 = my_config.get('dtk-manager', 'md5-%s' % self.get_lsb_arch())
-		if self.dtk_md5:
-			print 'Caching LSB DTK manager RPM'
-			self.dtk_manager_pkg = autotest_utils.unmap_url_cache(cachedir, self.dtk_manager_url, self.dtk_md5)
-		else:
-			raise error.TestError('Could not find DTK manager package md5, cannot cache DTK manager tarball')
+    def install_lsb_packages(self, srcdir, cachedir, my_config):
+        # First, we download the LSB DTK manager package, worry about installing it later
+        self.dtk_manager_arch = my_config.get('dtk-manager', 'arch-%s' % self.get_lsb_arch())
+        self.dtk_manager_url = my_config.get('dtk-manager', 'tarball_url') % self.dtk_manager_arch
+        if not self.dtk_manager_url:
+            raise error.TestError('Could not get DTK manager URL from configuration file')
+        self.dtk_md5 = my_config.get('dtk-manager', 'md5-%s' % self.get_lsb_arch())
+        if self.dtk_md5:
+            print 'Caching LSB DTK manager RPM'
+            self.dtk_manager_pkg = autotest_utils.unmap_url_cache(cachedir, self.dtk_manager_url, self.dtk_md5)
+        else:
+            raise error.TestError('Could not find DTK manager package md5, cannot cache DTK manager tarball')
 
-		# Get LSB tarball, cache it and uncompress under autotest srcdir
-		if my_config.get('lsb', 'override_default_url') == 'no':
-			self.lsb_url = my_config.get('lsb', 'tarball_url') % self.get_lsb_arch()
-		else:
-			self.lsb_url = my_config.get('lsb', 'tarball_url_alt') % self.get_lsb_arch()
-		if not self.lsb_url:
-			raise TestError('Could not get lsb URL from configuration file')
-		self.md5_key = 'md5-%s' % self.get_lsb_arch()
-		self.lsb_md5 = my_config.get('lsb', self.md5_key)
-		if self.lsb_md5:
-			print 'Caching LSB tarball'
-			self.lsb_pkg = autotest_utils.unmap_url_cache(self.cachedir, self.lsb_url, self.lsb_md5)
-		else:
-			raise error.TestError('Could not find LSB package md5, cannot cache LSB tarball')
+        # Get LSB tarball, cache it and uncompress under autotest srcdir
+        if my_config.get('lsb', 'override_default_url') == 'no':
+            self.lsb_url = my_config.get('lsb', 'tarball_url') % self.get_lsb_arch()
+        else:
+            self.lsb_url = my_config.get('lsb', 'tarball_url_alt') % self.get_lsb_arch()
+        if not self.lsb_url:
+            raise TestError('Could not get lsb URL from configuration file')
+        self.md5_key = 'md5-%s' % self.get_lsb_arch()
+        self.lsb_md5 = my_config.get('lsb', self.md5_key)
+        if self.lsb_md5:
+            print 'Caching LSB tarball'
+            self.lsb_pkg = autotest_utils.unmap_url_cache(self.cachedir, self.lsb_url, self.lsb_md5)
+        else:
+            raise error.TestError('Could not find LSB package md5, cannot cache LSB tarball')
 
-		autotest_utils.extract_tarball_to_dir(self.lsb_pkg, srcdir)
+        autotest_utils.extract_tarball_to_dir(self.lsb_pkg, srcdir)
 
-		# Lets load a file that contains the list of RPMs
-		os.chdir(srcdir)
-		if not os.path.isfile('inst-config'):
-			raise IOError('Could not find file with package info, inst-config')
-		self.rpm_file_list = open('inst-config', 'r')
-		self.pkg_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]')
-		self.lsb_pkg_list = []
-		for self.line in self.rpm_file_list.readlines():
-			try:
-				# We will install lsb-dtk-manager separately, so we can remove
-				# it from the list of packages
-				if not 'lsb-dtk-manager' in self.line:
-					self.line = re.findall(self.pkg_pattern, self.line)[0]
-					self.lsb_pkg_list.append(self.line)
-			except:
-				# If we don't get a match, no problem
-				pass
+        # Lets load a file that contains the list of RPMs
+        os.chdir(srcdir)
+        if not os.path.isfile('inst-config'):
+            raise IOError('Could not find file with package info, inst-config')
+        self.rpm_file_list = open('inst-config', 'r')
+        self.pkg_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]')
+        self.lsb_pkg_list = []
+        for self.line in self.rpm_file_list.readlines():
+            try:
+                # We will install lsb-dtk-manager separately, so we can remove
+                # it from the list of packages
+                if not 'lsb-dtk-manager' in self.line:
+                    self.line = re.findall(self.pkg_pattern, self.line)[0]
+                    self.lsb_pkg_list.append(self.line)
+            except:
+                # If we don't get a match, no problem
+                pass
 
-		# Lets figure out the host distro
-		distro_pkg_support = package.os_support()
-		if os.path.isfile('/etc/debian_version') and distro_pkg_support['dpkg']:
-			print 'Debian based distro detected'
-			if distro_pkg_support['conversion']:
-				print 'Package conversion supported'
-				self.distro_type = 'debian-based'
-			else:
-				e_msg = 'Package conversion not supported. Cannot handle LSB package installation'
-				raise EnvironmentError(e_msg)
-		elif distro_pkg_support['rpm']:
-			print 'Red Hat based distro detected'
-			self.distro_type = 'redhat-based'
-		else:
-			print 'OS does not seem to be red hat or debian based'
-			e_msg = 'Cannot handle LSB package installation'
-			raise EnvironmentError(e_msg)
+        # Lets figure out the host distro
+        distro_pkg_support = package.os_support()
+        if os.path.isfile('/etc/debian_version') and distro_pkg_support['dpkg']:
+            print 'Debian based distro detected'
+            if distro_pkg_support['conversion']:
+                print 'Package conversion supported'
+                self.distro_type = 'debian-based'
+            else:
+                e_msg = 'Package conversion not supported. Cannot handle LSB package installation'
+                raise EnvironmentError(e_msg)
+        elif distro_pkg_support['rpm']:
+            print 'Red Hat based distro detected'
+            self.distro_type = 'redhat-based'
+        else:
+            print 'OS does not seem to be red hat or debian based'
+            e_msg = 'Cannot handle LSB package installation'
+            raise EnvironmentError(e_msg)
 
-		# According to the host distro detection, we can install the packages
-		# using the list previously assembled
-		if self.distro_type == 'redhat-based':
-			print 'Installing LSB RPM packages'
-			package.install(self.dtk_manager_pkg)
-			for self.lsb_rpm in self.lsb_pkg_list:
-				package.install(self.lsb_rpm, nodeps = True)
-		elif self.distro_type == 'debian-based':
-			print 'Remember that you must have the following lsb compliance packages installed:'
-			print 'lsb-core lsb-cxx lsb-graphics lsb-desktop lsb-qt4 lsb-languages lsb-multimedia lsb-printing'
-			print 'Converting and installing LSB packages'
-			self.dtk_manager_dpkg = package.convert(self.dtk_manager_pkg, 'dpkg')
-			package.install(self.dtk_manager_dpkg)
-			for self.lsb_rpm in self.lsb_pkg_list:
-				self.lsb_dpkg = package.convert(self.lsb_rpm, 'dpkg')
-				package.install(self.lsb_dpkg, nodeps = True)
+        # According to the host distro detection, we can install the packages
+        # using the list previously assembled
+        if self.distro_type == 'redhat-based':
+            print 'Installing LSB RPM packages'
+            package.install(self.dtk_manager_pkg)
+            for self.lsb_rpm in self.lsb_pkg_list:
+                package.install(self.lsb_rpm, nodeps = True)
+        elif self.distro_type == 'debian-based':
+            print 'Remember that you must have the following lsb compliance packages installed:'
+            print 'lsb-core lsb-cxx lsb-graphics lsb-desktop lsb-qt4 lsb-languages lsb-multimedia lsb-printing'
+            print 'Converting and installing LSB packages'
+            self.dtk_manager_dpkg = package.convert(self.dtk_manager_pkg, 'dpkg')
+            package.install(self.dtk_manager_dpkg)
+            for self.lsb_rpm in self.lsb_pkg_list:
+                self.lsb_dpkg = package.convert(self.lsb_rpm, 'dpkg')
+                package.install(self.lsb_dpkg, nodeps = True)
 
-	def link_lsb_libraries(self, config):
-		print 'Linking LSB libraries'
-		self.libdir_key = 'libdir-%s' % self.get_lsb_arch()
-		self.os_libdir = config.get('lib', self.libdir_key)
-		if not self.os_libdir:
-			raise TypeError('Could not find OS lib dir from conf file')
-		self.lib_key = 'lib-%s' % self.get_lsb_arch()
-		self.lib_list_raw = config.get('lib', self.lib_key)
-		if not self.lib_list_raw:
-			raise TypeError('Could not find library list from conf file')
-		self.lib_list = eval(self.lib_list_raw)
+    def link_lsb_libraries(self, config):
+        print 'Linking LSB libraries'
+        self.libdir_key = 'libdir-%s' % self.get_lsb_arch()
+        self.os_libdir = config.get('lib', self.libdir_key)
+        if not self.os_libdir:
+            raise TypeError('Could not find OS lib dir from conf file')
+        self.lib_key = 'lib-%s' % self.get_lsb_arch()
+        self.lib_list_raw = config.get('lib', self.lib_key)
+        if not self.lib_list_raw:
+            raise TypeError('Could not find library list from conf file')
+        self.lib_list = eval(self.lib_list_raw)
 
-		# Remove any previous ld-lsb*.so symbolic links
-		self.lsb_libs = glob.glob('%s/ld-lsb*.so*' % self.os_libdir)
-		for self.lib in self.lsb_libs:
-			os.remove(self.lib)
+        # Remove any previous ld-lsb*.so symbolic links
+        self.lsb_libs = glob.glob('%s/ld-lsb*.so*' % self.os_libdir)
+        for self.lib in self.lsb_libs:
+            os.remove(self.lib)
 
-		# Get the base library that we'll use to recreate the symbolic links
-		self.system_lib = glob.glob('%s/ld-2*.so*' % self.os_libdir)[0]
+        # Get the base library that we'll use to recreate the symbolic links
+        self.system_lib = glob.glob('%s/ld-2*.so*' % self.os_libdir)[0]
 
-		# Now just link the system lib that we just found to each one of the
-		# needed LSB libraries that we provided on the conf file
-		for self.lsb_lib in self.lib_list:
-			# Get the library absolute path
-			self.lsb_lib = os.path.join(self.os_libdir, self.lsb_lib)
-			# Link the library system_lib -> lsb_lib
-			os.symlink(self.system_lib, self.lsb_lib)
+        # Now just link the system lib that we just found to each one of the
+        # needed LSB libraries that we provided on the conf file
+        for self.lsb_lib in self.lib_list:
+            # Get the library absolute path
+            self.lsb_lib = os.path.join(self.os_libdir, self.lsb_lib)
+            # Link the library system_lib -> lsb_lib
+            os.symlink(self.system_lib, self.lsb_lib)
 
 
-	def execute(self, args = 'all', config = './lsb31.cfg'):
-		# Load configuration. Use autotest tmpdir if needed
-		my_config = config_loader(config, self.tmpdir)
-		# Cache directory, that will store LSB tarball and DTK manager RPM
-		self.cachedir = os.path.join(self.bindir, 'cache')
-		if not os.path.isdir(self.cachedir):
-			os.makedirs(self.cachedir)
+    def execute(self, args = 'all', config = './lsb31.cfg'):
+        # Load configuration. Use autotest tmpdir if needed
+        my_config = config_loader(config, self.tmpdir)
+        # Cache directory, that will store LSB tarball and DTK manager RPM
+        self.cachedir = os.path.join(self.bindir, 'cache')
+        if not os.path.isdir(self.cachedir):
+            os.makedirs(self.cachedir)
 
-		self.install_lsb_packages(self.srcdir, self.cachedir, my_config)
-		self.link_lsb_libraries(my_config)
+        self.install_lsb_packages(self.srcdir, self.cachedir, my_config)
+        self.link_lsb_libraries(my_config)
 
-		self.main_script_path = my_config.get('lsb', 'main_script_path')
-		logfile = os.path.join(self.resultsdir, 'lsb.log')
-		args2 = '-r %s' % (logfile)
-		args = args + ' ' + args2
-		cmd = os.path.join(self.srcdir, self.main_script_path) + ' ' + args
+        self.main_script_path = my_config.get('lsb', 'main_script_path')
+        logfile = os.path.join(self.resultsdir, 'lsb.log')
+        args2 = '-r %s' % (logfile)
+        args = args + ' ' + args2
+        cmd = os.path.join(self.srcdir, self.main_script_path) + ' ' + args
 
-		profilers = self.job.profilers
-		if profilers.present():
-			profilers.start(self)
-		print 'Executing LSB main test script'
-		utils.system(cmd)
-		if profilers.present():
-			profilers.stop(self)
-			profilers.report(self)
+        profilers = self.job.profilers
+        if profilers.present():
+            profilers.start(self)
+        print 'Executing LSB main test script'
+        utils.system(cmd)
+        if profilers.present():
+            profilers.stop(self)
+            profilers.report(self)
diff --git a/client/tests/ltp/ltp-diff.py b/client/tests/ltp/ltp-diff.py
index 12031a3..d0f0f9f 100644
--- a/client/tests/ltp/ltp-diff.py
+++ b/client/tests/ltp/ltp-diff.py
@@ -4,106 +4,106 @@
 # Description:
 #  Input:  Two or more files containing results from different executions of
 #          the LTP. The input can either be file names or the url location
-#          of the ltp.results file.  
+#          of the ltp.results file.
 #  Output: A report on the following:
-#	   - The total number of tests executed in each run
-#	   - The testname, sequence number, and output of each run
+#          - The total number of tests executed in each run
+#          - The testname, sequence number, and output of each run
 #            where the results of those runs differ
 #  Return:
-#	   0 if all runs had identical results
-#	   Non-zero if results differ, or bad input
+#          0 if all runs had identical results
+#          Non-zero if results differ, or bad input
 
 
 import sys, string, re
 from autotest_lib.client.comon_lib import utils
 
 def usage():
-	print "\nUsage: \n\
-  ltp-diff results1 results2 ... locationN \n\
-  Note: location[1,2,N] may be local files or URLs of LTP results\n"
-	sys.exit(1)
+    print "\nUsage: \n\
+ltp-diff results1 results2 ... locationN \n\
+Note: location[1,2,N] may be local files or URLs of LTP results\n"
+    sys.exit(1)
 
 def get_results(results_files):
-	"""
-	Download the results if needed.
-	Return results of each run in a numerically-indexed dictionary
-	of dictionaries keyed on testnames.
-	Return dictionary keyed on unique testnames across all runs.
-	"""
-	r = re.compile('(\S+\s+\S+)\s+(\S+)\s+:')
-	i = 0
-	runs = {}
-	testnames = {}
-	for file in results_files:
-		runs[i] = {}
-		try:
-			fh = utils.urlopen(file)
-			results = fh.readlines()
-			fh.close()
-		except:
-			print "ERROR: reading results resource [%s]" % (file)
-			usage()
-		for line in results:
-			try:
-				s = r.match(line)
-				testname = s.group(1)
-				status = s.group(2)
-				runs[i][testname] = status
-				testnames[testname] = 1
-			except:
-				pass
-		i += 1
-	return (runs, testnames)
+    """
+    Download the results if needed.
+    Return results of each run in a numerically-indexed dictionary
+    of dictionaries keyed on testnames.
+    Return dictionary keyed on unique testnames across all runs.
+    """
+    r = re.compile('(\S+\s+\S+)\s+(\S+)\s+:')
+    i = 0
+    runs = {}
+    testnames = {}
+    for file in results_files:
+        runs[i] = {}
+        try:
+            fh = utils.urlopen(file)
+            results = fh.readlines()
+            fh.close()
+        except:
+            print "ERROR: reading results resource [%s]" % (file)
+            usage()
+        for line in results:
+            try:
+                s = r.match(line)
+                testname = s.group(1)
+                status = s.group(2)
+                runs[i][testname] = status
+                testnames[testname] = 1
+            except:
+                pass
+        i += 1
+    return (runs, testnames)
 
 
 
 def compare_results(runs):
-	""" 
-	Loop through all testnames alpahbetically.
-	Print any testnames with differing results across runs.
-	Return 1 if any test results across runs differ.
-	Return 0 if all test results match.
-	"""
-	rc = 0
-	print "LTP Test Results to Compare"
-	for i in range(len(runs)):
-		print "  Run[%d]: %d" % (i, len(runs[i].keys()))
-	print ""
-	header = 0
-	all_testnames = testnames.keys()
-	all_testnames.sort()
-	for testname in all_testnames:
-		differ = 0
-		for i in range(1,len(runs)):
-			# Must handle testcases that executed in one run
-			# but not another by setting status to "null"
-			if not runs[i].has_key(testname):
-				runs[i][testname] = "null"
-			if not runs[i-1].has_key(testname):
-				runs[i-1][testname] = "null"
-			# Check for the results inconsistencies
-			if runs[i][testname] != runs[i-1][testname]:
-				differ = 1
-		if differ:
-			if header == 0:
-				# Print the differences header only once
-				print "Tests with Inconsistent Results across Runs"
-				print "  %-35s:\t%s" % ("Testname,Sequence", "Run Results")
-				header = 1
+    """
+    Loop through all testnames alpahbetically.
+    Print any testnames with differing results across runs.
+    Return 1 if any test results across runs differ.
+    Return 0 if all test results match.
+    """
+    rc = 0
+    print "LTP Test Results to Compare"
+    for i in range(len(runs)):
+        print "  Run[%d]: %d" % (i, len(runs[i].keys()))
+    print ""
+    header = 0
+    all_testnames = testnames.keys()
+    all_testnames.sort()
+    for testname in all_testnames:
+        differ = 0
+        for i in range(1,len(runs)):
+            # Must handle testcases that executed in one run
+            # but not another by setting status to "null"
+            if not runs[i].has_key(testname):
+                runs[i][testname] = "null"
+            if not runs[i-1].has_key(testname):
+                runs[i-1][testname] = "null"
+            # Check for the results inconsistencies
+            if runs[i][testname] != runs[i-1][testname]:
+                differ = 1
+        if differ:
+            if header == 0:
+                # Print the differences header only once
+                print "Tests with Inconsistent Results across Runs"
+                print "  %-35s:\t%s" % ("Testname,Sequence", "Run Results")
+                header = 1
 
-			# Print info if results differ
-			rc = 1
-			testname_cleaned = re.sub('\s+', ',', testname)
-			print "  %-35s:\t" % (testname_cleaned),
-			all_results = ""
-			for i in range(len(runs)):
-				all_results += runs[i][testname]
-				if i+1<len(runs):
-					all_results += "/"
-			print all_results
-	if rc == 0:
-		print "All LTP results are identical"	
-	return rc
+            # Print info if results differ
+            rc = 1
+            testname_cleaned = re.sub('\s+', ',', testname)
+            print "  %-35s:\t" % (testname_cleaned),
+            all_results = ""
+            for i in range(len(runs)):
+                all_results += runs[i][testname]
+                if i+1<len(runs):
+                    all_results += "/"
+            print all_results
+    if rc == 0:
+        print "All LTP results are identical"
+    return rc
 
 
 ########
@@ -111,7 +111,7 @@
 ########
 sys.argv.pop(0)
 if (len(sys.argv) < 2):
-	usage()
+    usage()
 (runs, testnames) = get_results(sys.argv)
 rc = compare_results(runs)
 sys.exit(rc)
diff --git a/client/tests/ltp/ltp.py b/client/tests/ltp/ltp.py
index 043ac00..2417f46 100755
--- a/client/tests/ltp/ltp.py
+++ b/client/tests/ltp/ltp.py
@@ -3,46 +3,46 @@
 from autotest_lib.client.common_lib import utils, error
 
 class ltp(test.test):
-	version = 4
+    version = 4
 
-	# http://prdownloads.sourceforge.net/ltp/ltp-full-20080229.tgz
-	def setup(self, tarball = 'ltp-full-20080229.tar.bz2'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    # http://prdownloads.sourceforge.net/ltp/ltp-full-20080229.tgz
+    def setup(self, tarball = 'ltp-full-20080229.tar.bz2'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('patch -p1 < ../ltp.patch')
+        utils.system('patch -p1 < ../ltp.patch')
 
-		# comment the capability tests if we fail to load the capability module
-		try:
-			utils.system('modprobe capability')
-		except error.CmdError, detail:
-			utils.system('patch -p1 < ../ltp_capability.patch')
+        # comment the capability tests if we fail to load the capability module
+        try:
+            utils.system('modprobe capability')
+        except error.CmdError, detail:
+            utils.system('patch -p1 < ../ltp_capability.patch')
 
-		utils.system('cp ../scan.c pan/')   # saves having lex installed
-		utils.system('make -j %d' % autotest_utils.count_cpus())
-		utils.system('yes n | make install')
+        utils.system('cp ../scan.c pan/')   # saves having lex installed
+        utils.system('make -j %d' % autotest_utils.count_cpus())
+        utils.system('yes n | make install')
 
 
-	# Note: to run a specific test, try '-f cmdfile -s test' in the
-	# in the args (-f for test file and -s for the test case)
-	# eg, job.run_test('ltp', '-f math -s float_bessel')
-	def execute(self, args = '', script = 'runltp'):
+    # Note: to run a specific test, try '-f cmdfile -s test' in the
+    # in the args (-f for test file and -s for the test case)
+    # eg, job.run_test('ltp', '-f math -s float_bessel')
+    def execute(self, args = '', script = 'runltp'):
 
-		# In case the user wants to run another test script
-		if script == 'runltp':
-			logfile = os.path.join(self.resultsdir, 'ltp.log')
-			failcmdfile = os.path.join(self.debugdir, 'failcmdfile')
-			args2 = '-q -l %s -C %s -d %s' % (logfile, failcmdfile, self.tmpdir)
-			args = args + ' ' + args2
+        # In case the user wants to run another test script
+        if script == 'runltp':
+            logfile = os.path.join(self.resultsdir, 'ltp.log')
+            failcmdfile = os.path.join(self.debugdir, 'failcmdfile')
+            args2 = '-q -l %s -C %s -d %s' % (logfile, failcmdfile, self.tmpdir)
+            args = args + ' ' + args2
 
-		cmd = os.path.join(self.srcdir, script) + ' ' + args
+        cmd = os.path.join(self.srcdir, script) + ' ' + args
 
-		profilers = self.job.profilers
-		if profilers.present():
-			profilers.start(self)
-		utils.system(cmd)
-		if profilers.present():
-			profilers.stop(self)
-			profilers.report(self)
+        profilers = self.job.profilers
+        if profilers.present():
+            profilers.start(self)
+        utils.system(cmd)
+        if profilers.present():
+            profilers.stop(self)
+            profilers.report(self)
diff --git a/client/tests/netperf2/control.client b/client/tests/netperf2/control.client
index bfff402..7c2ecac 100644
--- a/client/tests/netperf2/control.client
+++ b/client/tests/netperf2/control.client
@@ -1,2 +1 @@
 job.run_test('netperf2', '10.10.1.2', '10.10.1.6', 'client', tag='client')
-
diff --git a/client/tests/netperf2/control.parallel b/client/tests/netperf2/control.parallel
index c845879..8414a3a 100644
--- a/client/tests/netperf2/control.parallel
+++ b/client/tests/netperf2/control.parallel
@@ -1,7 +1,7 @@
 def client():
-	job.run_test('netperf2', '127.0.0.1', '127.0.0.1', 'client', tag='client')
+    job.run_test('netperf2', '127.0.0.1', '127.0.0.1', 'client', tag='client')
 
 def server():
-	job.run_test('netperf2', '127.0.0.1', '127.0.0.1', 'server', tag='server')
+    job.run_test('netperf2', '127.0.0.1', '127.0.0.1', 'server', tag='server')
 
 job.parallel([server], [client])
diff --git a/client/tests/netperf2/netperf2.py b/client/tests/netperf2/netperf2.py
index 1f7fb66..e01f815 100755
--- a/client/tests/netperf2/netperf2.py
+++ b/client/tests/netperf2/netperf2.py
@@ -4,75 +4,75 @@
 
 
 class netperf2(test.test):
-	version = 1
+    version = 1
 
-	# ftp://ftp.netperf.org/netperf/netperf-2.4.1.tar.gz
-	def setup(self, tarball = 'netperf-2.4.1.tar.gz'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    # ftp://ftp.netperf.org/netperf/netperf-2.4.1.tar.gz
+    def setup(self, tarball = 'netperf-2.4.1.tar.gz'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('./configure')
-		utils.system('make')
+        utils.system('./configure')
+        utils.system('make')
 
 
-	def initialize(self):
-		# netserver doesn't detach properly from the console. When 
-		# it is run from ssh, this causes the ssh command not to 
-		# return even though netserver meant to be backgrounded.
-		# This behavior is remedied by redirecting fd 0, 1 & 2
-		self.server_path = ('%s &>/dev/null </dev/null' 
-			% os.path.join(self.srcdir, 'src/netserver'))
-		self.client_path = os.path.join(self.srcdir, 'src/netperf')
+    def initialize(self):
+        # netserver doesn't detach properly from the console. When
+        # it is run from ssh, this causes the ssh command not to
+        # return even though netserver meant to be backgrounded.
+        # This behavior is remedied by redirecting fd 0, 1 & 2
+        self.server_path = ('%s &>/dev/null </dev/null'
+                % os.path.join(self.srcdir, 'src/netserver'))
+        self.client_path = os.path.join(self.srcdir, 'src/netperf')
 
 
-	def execute(self, server_ip, client_ip, role, 
-					script='snapshot_script', args=''):
-		server_tag = server_ip + '#netperf-server'
-		client_tag = client_ip + '#netperf-client'
-		all = [server_tag, client_tag]
-		job = self.job
-		if (role == 'server'):
-			self.server_start()
-			try:
-				job.barrier(server_tag, 'start',
-							600).rendevous(*all)
-				job.barrier(server_tag, 'stop',
-							3600).rendevous(*all)
-			finally:
-				self.server_stop()
-		elif (role == 'client'):
-			os.environ['NETPERF_CMD'] = self.client_path
-			job.barrier(client_tag, 'start', 600).rendevous(*all)
-			self.client(script, server_ip, args)
-			job.barrier(client_tag, 'stop',  30).rendevous(*all)
-		else:
-			raise error.UnhandledError('invalid role specified')
+    def execute(self, server_ip, client_ip, role,
+                                    script='snapshot_script', args=''):
+        server_tag = server_ip + '#netperf-server'
+        client_tag = client_ip + '#netperf-client'
+        all = [server_tag, client_tag]
+        job = self.job
+        if (role == 'server'):
+            self.server_start()
+            try:
+                job.barrier(server_tag, 'start',
+                                        600).rendevous(*all)
+                job.barrier(server_tag, 'stop',
+                                        3600).rendevous(*all)
+            finally:
+                self.server_stop()
+        elif (role == 'client'):
+            os.environ['NETPERF_CMD'] = self.client_path
+            job.barrier(client_tag, 'start', 600).rendevous(*all)
+            self.client(script, server_ip, args)
+            job.barrier(client_tag, 'stop',  30).rendevous(*all)
+        else:
+            raise error.UnhandledError('invalid role specified')
 
 
-	def server_start(self):
-		# we should really record the pid we forked off, but there
-		# was no obvious way to run the daemon in the foreground.
-		# Hacked it for now
-		system('killall netserver', ignore_status=True)
-		system(self.server_path)
+    def server_start(self):
+        # we should really record the pid we forked off, but there
+        # was no obvious way to run the daemon in the foreground.
+        # Hacked it for now
+        system('killall netserver', ignore_status=True)
+        system(self.server_path)
 
 
-	def server_stop(self):
-		# this should really just kill the pid I forked, but ...
-		system('killall netserver')
+    def server_stop(self):
+        # this should really just kill the pid I forked, but ...
+        system('killall netserver')
 
 
-	def client(self, script, server_ip, args = 'CPU'):
-		# run some client stuff
-		stdout_path = os.path.join(self.resultsdir, script + '.stdout')
-		stderr_path = os.path.join(self.resultsdir, script + '.stderr')
-		self.job.stdout.tee_redirect(stdout_path)
-		self.job.stderr.tee_redirect(stderr_path)
+    def client(self, script, server_ip, args = 'CPU'):
+        # run some client stuff
+        stdout_path = os.path.join(self.resultsdir, script + '.stdout')
+        stderr_path = os.path.join(self.resultsdir, script + '.stderr')
+        self.job.stdout.tee_redirect(stdout_path)
+        self.job.stderr.tee_redirect(stderr_path)
 
-		script_path = os.path.join(self.srcdir, 'doc/examples', script)
-		system('%s %s %s' % (script_path, server_ip, args))
+        script_path = os.path.join(self.srcdir, 'doc/examples', script)
+        system('%s %s %s' % (script_path, server_ip, args))
 
-		self.job.stdout.restore()
-		self.job.stderr.restore()
+        self.job.stdout.restore()
+        self.job.stderr.restore()
diff --git a/client/tests/parallel_dd/parallel_dd.py b/client/tests/parallel_dd/parallel_dd.py
index a524e6d..5d92667 100755
--- a/client/tests/parallel_dd/parallel_dd.py
+++ b/client/tests/parallel_dd/parallel_dd.py
@@ -4,119 +4,119 @@
 
 
 class parallel_dd(test.test):
-	version = 1
+    version = 1
 
 
-	def raw_write(self):
-		print "Timing raw write of %d megabytes" % self.megabytes
-		sys.stdout.flush()
-		dd = 'dd if=/dev/zero of=%s bs=4K count=%d' % \
-						(self.fs.device, self.blocks)
-		print dd
-		utils.system(dd + ' > /dev/null')
+    def raw_write(self):
+        print "Timing raw write of %d megabytes" % self.megabytes
+        sys.stdout.flush()
+        dd = 'dd if=/dev/zero of=%s bs=4K count=%d' % \
+                                        (self.fs.device, self.blocks)
+        print dd
+        utils.system(dd + ' > /dev/null')
 
 
-	def raw_read(self):
-		print "Timing raw read of %d megabytes" % self.megabytes
-		sys.stdout.flush()
-		dd = 'dd if=%s of=/dev/null bs=4K count=%d' % \
-						(self.fs.device, self.blocks)
-		print dd
-		utils.system(dd + ' > /dev/null')
+    def raw_read(self):
+        print "Timing raw read of %d megabytes" % self.megabytes
+        sys.stdout.flush()
+        dd = 'dd if=%s of=/dev/null bs=4K count=%d' % \
+                                        (self.fs.device, self.blocks)
+        print dd
+        utils.system(dd + ' > /dev/null')
 
 
-	def fs_write(self):
-		p = []
-		# Write out 'streams' files in parallel background tasks
-		for i in range(self.streams):
-			file = 'poo%d' % (i+1)
-			file = os.path.join(self.job.tmpdir, file)
-			dd = 'dd if=/dev/zero of=%s bs=4K count=%d' % \
-						(file, self.blocks_per_file)
-			print dd
-			p.append(subprocess.Popen(dd + ' > /dev/null',
-			                          shell=True))
-		print "Waiting for %d streams" % self.streams
-		# Wait for everyone to complete
-		for i in range(self.streams):
-			print "Waiting for %d" % p[i].pid
-			sys.stdout.flush()
-			os.waitpid(p[i].pid, 0)
-		sys.stdout.flush()
-		sys.stderr.flush()
+    def fs_write(self):
+        p = []
+        # Write out 'streams' files in parallel background tasks
+        for i in range(self.streams):
+            file = 'poo%d' % (i+1)
+            file = os.path.join(self.job.tmpdir, file)
+            dd = 'dd if=/dev/zero of=%s bs=4K count=%d' % \
+                                    (file, self.blocks_per_file)
+            print dd
+            p.append(subprocess.Popen(dd + ' > /dev/null',
+                                      shell=True))
+        print "Waiting for %d streams" % self.streams
+        # Wait for everyone to complete
+        for i in range(self.streams):
+            print "Waiting for %d" % p[i].pid
+            sys.stdout.flush()
+            os.waitpid(p[i].pid, 0)
+        sys.stdout.flush()
+        sys.stderr.flush()
 
 
-	def fs_read(self):
-		for i in range(self.streams):
-			file = os.path.join(self.job.tmpdir, 'poo%d' % (i+1))
-			dd = 'dd if=%s of=/dev/null bs=4K count=%d' % \
-						(file, self.blocks_per_file)
-			utils.system(dd + ' > /dev/null')
+    def fs_read(self):
+        for i in range(self.streams):
+            file = os.path.join(self.job.tmpdir, 'poo%d' % (i+1))
+            dd = 'dd if=%s of=/dev/null bs=4K count=%d' % \
+                                    (file, self.blocks_per_file)
+            utils.system(dd + ' > /dev/null')
 
 
-	def test(self, tag):
-		start = time.time()
-		self.raw_write()
-		self.raw_write_rate = self.megabytes / (time.time() - start)
+    def test(self, tag):
+        start = time.time()
+        self.raw_write()
+        self.raw_write_rate = self.megabytes / (time.time() - start)
 
-		start = time.time()
-		self.raw_read()
-		self.raw_read_rate = self.megabytes / (time.time() - start)
+        start = time.time()
+        self.raw_read()
+        self.raw_read_rate = self.megabytes / (time.time() - start)
 
-		self.fs.mkfs(self.fstype)
-		self.fs.mount()
-		start = time.time()
-		try:
-			self.fs_write()
-		except:
-			try:
-				self.fs.unmount()
-			finally:
-				raise
-		self.fs.unmount()
-		self.fs_write_rate = self.megabytes / (time.time() - start)
+        self.fs.mkfs(self.fstype)
+        self.fs.mount()
+        start = time.time()
+        try:
+            self.fs_write()
+        except:
+            try:
+                self.fs.unmount()
+            finally:
+                raise
+        self.fs.unmount()
+        self.fs_write_rate = self.megabytes / (time.time() - start)
 
-		self.fs.mount()
-		start = time.time()
-		try:
-			self.fs_read()
-		except:
-			try:
-				self.fs.unmount()
-			finally:
-				raise
-			read_in()
-		self.fs_read_rate = self.megabytes / (time.time() - start)
-		self.fs.unmount()
+        self.fs.mount()
+        start = time.time()
+        try:
+            self.fs_read()
+        except:
+            try:
+                self.fs.unmount()
+            finally:
+                raise
+            read_in()
+        self.fs_read_rate = self.megabytes / (time.time() - start)
+        self.fs.unmount()
 
 
-	def execute(self, fs, fstype = 'ext2', iterations = 2, megabytes = 1000, streams = 2):
-		self.megabytes = megabytes
-		self.blocks = megabytes * 256
-		self.blocks_per_file = self.blocks / streams
-		self.fs = fs
-		self.fstype = fstype
-		self.streams = streams
-		
-		print "Dumping %d megabytes across %d streams, %d times" % \
-						(megabytes, streams, iterations)
+    def execute(self, fs, fstype = 'ext2', iterations = 2, megabytes = 1000, streams = 2):
+        self.megabytes = megabytes
+        self.blocks = megabytes * 256
+        self.blocks_per_file = self.blocks / streams
+        self.fs = fs
+        self.fstype = fstype
+        self.streams = streams
 
-		keyval = open(os.path.join(self.resultsdir, 'keyval'), 'w')
-		for i in range(iterations):
-			self.test('%d' % i)
-			t = "raw_write=%d\n" % self.raw_write_rate
-			t += "raw_read=%d\n" % self.raw_read_rate
-			t += "fs_write=%d\n" % self.fs_write_rate
-			t += "fs_read=%d\n" % self.fs_read_rate
-			t += "\n"
-			print t
-			keyval.write(t)
-		keyval.close()
+        print "Dumping %d megabytes across %d streams, %d times" % \
+                                        (megabytes, streams, iterations)
+
+        keyval = open(os.path.join(self.resultsdir, 'keyval'), 'w')
+        for i in range(iterations):
+            self.test('%d' % i)
+            t = "raw_write=%d\n" % self.raw_write_rate
+            t += "raw_read=%d\n" % self.raw_read_rate
+            t += "fs_write=%d\n" % self.fs_write_rate
+            t += "fs_read=%d\n" % self.fs_read_rate
+            t += "\n"
+            print t
+            keyval.write(t)
+        keyval.close()
 
 
-		profilers = self.job.profilers
-		if profilers.present():
-			profilers.start(self)
-			self.test('profile')
-			profilers.stop(self)
-			profilers.report(self)
+        profilers = self.job.profilers
+        if profilers.present():
+            profilers.start(self)
+            self.test('profile')
+            profilers.stop(self)
+            profilers.report(self)
diff --git a/client/tests/pi_tests/pi_tests.py b/client/tests/pi_tests/pi_tests.py
index 54872f4..7f3fcf2 100644
--- a/client/tests/pi_tests/pi_tests.py
+++ b/client/tests/pi_tests/pi_tests.py
@@ -4,19 +4,19 @@
 
 
 class pi_tests(test.test):
-	version = 1
+    version = 1
 
-	# http://www.stardust.webpages.pl/files/patches/autotest/pi_tests.tar.bz2
+    # http://www.stardust.webpages.pl/files/patches/autotest/pi_tests.tar.bz2
 
-	def setup(self, tarball = 'pi_tests.tar.bz2'):
-		autotest_utils.check_glibc_ver('2.5')
-		tarball = autotest_utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    def setup(self, tarball = 'pi_tests.tar.bz2'):
+        autotest_utils.check_glibc_ver('2.5')
+        tarball = autotest_utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('make')
+        utils.system('make')
 
-	def execute(self, args = '1 300'):
-		os.chdir(self.srcdir)
-		utils.system('./start.sh ' + args)
+    def execute(self, args = '1 300'):
+        os.chdir(self.srcdir)
+        utils.system('./start.sh ' + args)
diff --git a/client/tests/pktgen/pktgen.py b/client/tests/pktgen/pktgen.py
index c70cb43..aaef41d 100755
--- a/client/tests/pktgen/pktgen.py
+++ b/client/tests/pktgen/pktgen.py
@@ -4,51 +4,50 @@
 
 
 class pktgen(test.test):
-	version = 1
+    version = 1
 
-	def execute(self, eth='eth0', count=50000, clone_skb=1, \
-			dst_ip='192.168.210.210', dst_mac='01:02:03:04:05:07'):
-		if not os.path.exists('/proc/net/pktgen'):
-			utils.system('/sbin/modprobe pktgen')
-		if not os.path.exists('/proc/net/pktgen'):
-			raise error.UnhandledError('pktgen not loaded')
+    def execute(self, eth='eth0', count=50000, clone_skb=1, \
+                    dst_ip='192.168.210.210', dst_mac='01:02:03:04:05:07'):
+        if not os.path.exists('/proc/net/pktgen'):
+            utils.system('/sbin/modprobe pktgen')
+        if not os.path.exists('/proc/net/pktgen'):
+            raise error.UnhandledError('pktgen not loaded')
 
-		print 'Adding devices to run'
-		self.pgdev = '/proc/net/pktgen/kpktgend_0'
+        print 'Adding devices to run'
+        self.pgdev = '/proc/net/pktgen/kpktgend_0'
 
-		self.pgset('rem_device_all')
-		self.pgset('add_device ' + eth)
-		self.pgset('max_before_softirq 10000')
+        self.pgset('rem_device_all')
+        self.pgset('add_device ' + eth)
+        self.pgset('max_before_softirq 10000')
 
-		# Configure the individual devices
-		print 'Configuring devices'
+        # Configure the individual devices
+        print 'Configuring devices'
 
-		self.ethdev='/proc/net/pktgen/' + eth
-		self.pgdev=self.ethdev
+        self.ethdev='/proc/net/pktgen/' + eth
+        self.pgdev=self.ethdev
 
-		if clone_skb:
-			self.pgset('clone_skb %d' % (count))
-		self.pgset('min_pkt_size 60')
-		self.pgset('max_pkt_size 60')
-		self.pgset('dst ' + dst_ip)
-		self.pgset('dst_mac ' + dst_mac)
-		self.pgset('count %d' % (count))
+        if clone_skb:
+            self.pgset('clone_skb %d' % (count))
+        self.pgset('min_pkt_size 60')
+        self.pgset('max_pkt_size 60')
+        self.pgset('dst ' + dst_ip)
+        self.pgset('dst_mac ' + dst_mac)
+        self.pgset('count %d' % (count))
 
-		# Time to run
-		self.pgdev='/proc/net/pktgen/pgctrl'
-		self.pgset('start')
+        # Time to run
+        self.pgdev='/proc/net/pktgen/pgctrl'
+        self.pgset('start')
 
-		output = os.path.join(self.resultsdir, eth)
-		utils.system ('cp %s %s' % (self.ethdev, output))
+        output = os.path.join(self.resultsdir, eth)
+        utils.system ('cp %s %s' % (self.ethdev, output))
 
 
-	def pgset(self, command):
-		file = open(self.pgdev, 'w')
-		file.write(command + '\n');
-		file.close
+    def pgset(self, command):
+        file = open(self.pgdev, 'w')
+        file.write(command + '\n');
+        file.close
 
-		if not autotest_utils.grep('Result: OK', self.pgdev):
-			if not autotest_utils.grep('Result: NA', self.pgdev):
-				utils.system('cat ' + self.pgdev)
-				# raise UnhandledError('Result not OK')
-
+        if not autotest_utils.grep('Result: OK', self.pgdev):
+            if not autotest_utils.grep('Result: NA', self.pgdev):
+                utils.system('cat ' + self.pgdev)
+                # raise UnhandledError('Result not OK')
diff --git a/client/tests/posixtest/posixtest.py b/client/tests/posixtest/posixtest.py
index e11989a..adec69c 100755
--- a/client/tests/posixtest/posixtest.py
+++ b/client/tests/posixtest/posixtest.py
@@ -8,22 +8,21 @@
 __author__ = '''[email protected] (Mohammed Omar)'''
 
 class posixtest(test.test):
-	version = 1
-	# http://ufpr.dl.sourceforge.net/sourceforge/posixtest/posixtestsuite-1.5.2.tar.gz
-	def setup(self, tarball = 'posixtestsuite-1.5.2.tar.gz'):
-		self.posix_tarball = utils.unmap_url(self.bindir,
-		                                              tarball,
-		                                              self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(self.posix_tarball,
-		                                      self.srcdir)
-		os.chdir(self.srcdir)
-		# Applying a small patch that introduces some linux specific
-		# linking options
-		utils.system('patch -p1 < ../posix-linux.patch')
-		utils.system('make')
+    version = 1
+    # http://ufpr.dl.sourceforge.net/sourceforge/posixtest/posixtestsuite-1.5.2.tar.gz
+    def setup(self, tarball = 'posixtestsuite-1.5.2.tar.gz'):
+        self.posix_tarball = utils.unmap_url(self.bindir,
+                                                      tarball,
+                                                      self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(self.posix_tarball,
+                                              self.srcdir)
+        os.chdir(self.srcdir)
+        # Applying a small patch that introduces some linux specific
+        # linking options
+        utils.system('patch -p1 < ../posix-linux.patch')
+        utils.system('make')
 
 
-	def execute(self):
-		os.chdir(self.srcdir)
-		utils.system('./run_tests THR')
-
+    def execute(self):
+        os.chdir(self.srcdir)
+        utils.system('./run_tests THR')
diff --git a/client/tests/raisetest/raisetest.py b/client/tests/raisetest/raisetest.py
index 1c968b4..e966585 100755
--- a/client/tests/raisetest/raisetest.py
+++ b/client/tests/raisetest/raisetest.py
@@ -3,7 +3,7 @@
 
 
 class raisetest(test.test):
-	version = 1
+    version = 1
 
-	def execute(self):
-		raise error.TestError('Arrrrrrrrggggh. You are DOOOMED')
+    def execute(self):
+        raise error.TestError('Arrrrrrrrggggh. You are DOOOMED')
diff --git a/client/tests/reaim/reaim.py b/client/tests/reaim/reaim.py
index 7e43723..9a5e832 100755
--- a/client/tests/reaim/reaim.py
+++ b/client/tests/reaim/reaim.py
@@ -5,87 +5,87 @@
 
 
 class reaim(test.test):
-	version = 1
+    version = 1
 
-	# http://prdownloads.sourceforge.net/re-aim-7/osdl-aim-7.0.1.13.tar.gz
-	def setup(self, tarball = 'osdl-aim-7.0.1.13.tar.gz'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+    # http://prdownloads.sourceforge.net/re-aim-7/osdl-aim-7.0.1.13.tar.gz
+    def setup(self, tarball = 'osdl-aim-7.0.1.13.tar.gz'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
 
-		self.job.setup_dep(['libaio'])
-		libs = '-L' + self.autodir + '/deps/libaio/lib -laio'
-		cflags = '-I ' + self.autodir + '/deps/libaio/include'
-		var_libs = 'LIBS="' + libs + '"'
-		var_cflags  = 'CFLAGS="' + cflags + '"'
-		self.make_flags = var_libs + ' ' + var_cflags
+        self.job.setup_dep(['libaio'])
+        libs = '-L' + self.autodir + '/deps/libaio/lib -laio'
+        cflags = '-I ' + self.autodir + '/deps/libaio/include'
+        var_libs = 'LIBS="' + libs + '"'
+        var_cflags  = 'CFLAGS="' + cflags + '"'
+        self.make_flags = var_libs + ' ' + var_cflags
 
-		os_dep.commands('autoconf', 'automake', 'libtoolize')
-		os.chdir(self.srcdir)
-		utils.system('./bootstrap')
-		utils.system('./configure')
-		# we can't use patch here, as the Makefile is autogenerated
-		# so we can't tell exactly what it looks like.
-		# Perform some foul in-place sed hackery instead.
-		for file in ('Makefile', 'src/Makefile'):
-			utils.system('sed -i "s/^CFLAGS =/CFLAGS +=/" ' + file)
-			utils.system('sed -i "s/^LIBS =/LIBS +=/" ' + file)
-		utils.system(self.make_flags + ' make')
-		os.rename('src/reaim', 'reaim')
+        os_dep.commands('autoconf', 'automake', 'libtoolize')
+        os.chdir(self.srcdir)
+        utils.system('./bootstrap')
+        utils.system('./configure')
+        # we can't use patch here, as the Makefile is autogenerated
+        # so we can't tell exactly what it looks like.
+        # Perform some foul in-place sed hackery instead.
+        for file in ('Makefile', 'src/Makefile'):
+            utils.system('sed -i "s/^CFLAGS =/CFLAGS +=/" ' + file)
+            utils.system('sed -i "s/^LIBS =/LIBS +=/" ' + file)
+        utils.system(self.make_flags + ' make')
+        os.rename('src/reaim', 'reaim')
 
 
-	def initialize(self):
-		self.ldlib = 'LD_LIBRARY_PATH=%s/deps/libaio/lib'%(self.autodir)
+    def initialize(self):
+        self.ldlib = 'LD_LIBRARY_PATH=%s/deps/libaio/lib'%(self.autodir)
 
 
-	def execute(self, iterations = 1, workfile = 'workfile.short', 
-			start = 1, end = 10, increment = 2,
-			extra_args = '', tmpdir = None):
-		if not tmpdir:
-			tmpdir = self.tmpdir
+    def execute(self, iterations = 1, workfile = 'workfile.short',
+                    start = 1, end = 10, increment = 2,
+                    extra_args = '', tmpdir = None):
+        if not tmpdir:
+            tmpdir = self.tmpdir
 
-		# -f workfile
-		# -s <number of users to start with>
-		# -e <number of users to end with>
-		# -i <number of users to increment>
-		workfile = os.path.join('data', workfile)
-		args = "-f %s -s %d -e %d -i %d" %(workfile,start,end,increment)
-		config = os.path.join(self.srcdir, 'reaim.config')
-		utils.system('cp -f %s/reaim.config %s' % (self.bindir, config))
-		args += ' -c ./reaim.config'
-		open(config, 'a+').write("DISKDIR %s\n" % (tmpdir))
-		os.chdir(self.srcdir)
-		print os.getcwd()
-		cmd = self.ldlib + ' ./reaim ' + args + ' ' + extra_args
+        # -f workfile
+        # -s <number of users to start with>
+        # -e <number of users to end with>
+        # -i <number of users to increment>
+        workfile = os.path.join('data', workfile)
+        args = "-f %s -s %d -e %d -i %d" %(workfile,start,end,increment)
+        config = os.path.join(self.srcdir, 'reaim.config')
+        utils.system('cp -f %s/reaim.config %s' % (self.bindir, config))
+        args += ' -c ./reaim.config'
+        open(config, 'a+').write("DISKDIR %s\n" % (tmpdir))
+        os.chdir(self.srcdir)
+        print os.getcwd()
+        cmd = self.ldlib + ' ./reaim ' + args + ' ' + extra_args
 
-		results = []
+        results = []
 
-		profilers = self.job.profilers
-		if not profilers.only():
-			for i in range(iterations):
-				results.append(utils.system_output(cmd,
-							retain_output=True))
+        profilers = self.job.profilers
+        if not profilers.only():
+            for i in range(iterations):
+                results.append(utils.system_output(cmd,
+                                        retain_output=True))
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			resuls.append(utils.system_output(cmd,
-			                                  retain_output=True))
-			profilers.stop(self)
-			profilers.report(self)
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            resuls.append(utils.system_output(cmd,
+                                              retain_output=True))
+            profilers.stop(self)
+            profilers.report(self)
 
-		self.__format_results("\n".join(results))
+        self.__format_results("\n".join(results))
 
 
-	def __format_results(self, results):
-		out = open(self.resultsdir + '/keyval', 'w')
-		for line in results.split('\n'):
-			m = re.match('Max Jobs per Minute (\d+)', line)
-			if m:
-				max_jobs_per_min = m.group(1)
-			if re.match(r"^[0-9\. ]+$", line):
-				fields = line.split()
-		print >> out, """\
+    def __format_results(self, results):
+        out = open(self.resultsdir + '/keyval', 'w')
+        for line in results.split('\n'):
+            m = re.match('Max Jobs per Minute (\d+)', line)
+            if m:
+                max_jobs_per_min = m.group(1)
+            if re.match(r"^[0-9\. ]+$", line):
+                fields = line.split()
+        print >> out, """\
 max_jobs_per_min=%s
 num_forked=%s
 parent_time=%s
@@ -97,4 +97,4 @@
 std_dev_pct=%s
 jti=%s
 """ % tuple([max_jobs_per_min] + fields)
-		out.close()
+        out.close()
diff --git a/client/tests/rmaptest/rmaptest.py b/client/tests/rmaptest/rmaptest.py
index f4cf3af..9b65547 100644
--- a/client/tests/rmaptest/rmaptest.py
+++ b/client/tests/rmaptest/rmaptest.py
@@ -5,24 +5,24 @@
 
 # tests is a simple array of "cmd" "arguments"
 tests = [["rmaptest", "-h -i100 -n100 -s100 -t100 -V10 -v file1.dat"],
-	 ["rmaptest", "-l -i100 -n100 -s100 -t100 -V10 -v file2.dat"],
-	 ["rmaptest", "-r -i100 -n100 -s100 -t100 -V10 -v file3.dat"],
-	]
+         ["rmaptest", "-l -i100 -n100 -s100 -t100 -V10 -v file2.dat"],
+         ["rmaptest", "-r -i100 -n100 -s100 -t100 -V10 -v file3.dat"],
+        ]
 name = 0
 arglist = 1
 
 class rmaptest(test.test):
-	version = 1
-	preserve_srcdir = True
+    version = 1
+    preserve_srcdir = True
 
-	def setup(self):
-		os.chdir(self.srcdir)
-		utils.system('gcc -Wall -o rmaptest rmap-test.c')
+    def setup(self):
+        os.chdir(self.srcdir)
+        utils.system('gcc -Wall -o rmaptest rmap-test.c')
 
 
-	def execute(self, args = ''):
-		os.chdir(self.tmpdir)
-		for test in tests:
-			cmd = self.srcdir + '/' + test[name] + ' ' \
-			      + args + ' ' + test[arglist]
-			utils.system(cmd)
+    def execute(self, args = ''):
+        os.chdir(self.tmpdir)
+        for test in tests:
+            cmd = self.srcdir + '/' + test[name] + ' ' \
+                  + args + ' ' + test[arglist]
+            utils.system(cmd)
diff --git a/client/tests/rtlinuxtests/rtlinuxtests.py b/client/tests/rtlinuxtests/rtlinuxtests.py
index e6df6e3..8c27432 100644
--- a/client/tests/rtlinuxtests/rtlinuxtests.py
+++ b/client/tests/rtlinuxtests/rtlinuxtests.py
@@ -5,27 +5,27 @@
 
 # tests is a simple array of "cmd" "arguments"
 tests = [["./run.sh", "tests=func"],
-	 ["./run.sh", "tests=pi-tests"],
-	]
+         ["./run.sh", "tests=pi-tests"],
+        ]
 name = 0
 arglist = 1
 
 class rtlinuxtests(test.test):
-	version = 1
-	preserve_srcdir = True
+    version = 1
+    preserve_srcdir = True
 
-	# http://www.kernel.org/pub/linux/kernel/people/dvhart/realtime/tests/tests.tar.bz2
+    # http://www.kernel.org/pub/linux/kernel/people/dvhart/realtime/tests/tests.tar.bz2
 
-	def setup(self, tarball = 'tests.tar.bz2'):
-		autotest_utils.check_glibc_ver('2.5')
-		self.tarball = utils.unmap_url(self.bindir, tarball,
-		                                        self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
-		os.chdir(self.srcdir)
-		utils.system('patch -p1 < ../path-fix.patch')
+    def setup(self, tarball = 'tests.tar.bz2'):
+        autotest_utils.check_glibc_ver('2.5')
+        self.tarball = utils.unmap_url(self.bindir, tarball,
+                                                self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+        os.chdir(self.srcdir)
+        utils.system('patch -p1 < ../path-fix.patch')
 
-	def execute(self, args = ''):
-		os.chdir(self.srcdir)
-		for test in tests:
-			cmd = 'echo y | ' + test[name] + ' ' + args + ' ' + test[arglist]
-			utils.system(cmd)
+    def execute(self, args = ''):
+        os.chdir(self.srcdir)
+        for test in tests:
+            cmd = 'echo y | ' + test[name] + ' ' + args + ' ' + test[arglist]
+            utils.system(cmd)
diff --git a/client/tests/rttester/rttester.py b/client/tests/rttester/rttester.py
index 97cea0a..69a2e9f 100644
--- a/client/tests/rttester/rttester.py
+++ b/client/tests/rttester/rttester.py
@@ -4,15 +4,15 @@
 
 
 class rttester(test.test):
-	version = 1
+    version = 1
 
-	# http://www.stardust.webpages.pl/files/patches/autotest/rttester.tar.bz2
+    # http://www.stardust.webpages.pl/files/patches/autotest/rttester.tar.bz2
 
-	def setup(self, tarball = 'rttester.tar.bz2'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+    def setup(self, tarball = 'rttester.tar.bz2'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
 
-	def execute(self):
-		os.chdir(self.srcdir)
-		utils.system(self.srcdir + '/check-all.sh')
+    def execute(self):
+        os.chdir(self.srcdir)
+        utils.system(self.srcdir + '/check-all.sh')
diff --git a/client/tests/scrashme/scrashme.py b/client/tests/scrashme/scrashme.py
index 3d8b237..30ace1c 100644
--- a/client/tests/scrashme/scrashme.py
+++ b/client/tests/scrashme/scrashme.py
@@ -4,31 +4,31 @@
 
 
 class scrashme(test.test):
-	version = 1
+    version = 1
 
-	# http://www.codemonkey.org.uk/projects/git-snapshots/scrashme/scrashme-2007-07-08.tar.gz
-	def setup(self, tarball = 'scrashme-2007-07-08.tar.gz'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    # http://www.codemonkey.org.uk/projects/git-snapshots/scrashme/scrashme-2007-07-08.tar.gz
+    def setup(self, tarball = 'scrashme-2007-07-08.tar.gz'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('make')
-		
-	def execute(self, iterations = 1, args_list = ''):
-		if len(args_list) != 0:
-			args = '' + args_list
-		else:
-			args = '-c100 -z'
+        utils.system('make')
 
-		profilers = self.job.profilers
-		if not profilers.only():
-			for i in range(iterations):
-				utils.system(self.srcdir + '/scrashme ' + args)
+    def execute(self, iterations = 1, args_list = ''):
+        if len(args_list) != 0:
+            args = '' + args_list
+        else:
+            args = '-c100 -z'
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			utils.system(self.srcdir + '/scrashme ' + args)
-			profilers.stop(self)
-			profilers.report(self)
+        profilers = self.job.profilers
+        if not profilers.only():
+            for i in range(iterations):
+                utils.system(self.srcdir + '/scrashme ' + args)
+
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            utils.system(self.srcdir + '/scrashme ' + args)
+            profilers.stop(self)
+            profilers.report(self)
diff --git a/client/tests/selftest/selftest.py b/client/tests/selftest/selftest.py
index 1baecf9..825b9cf 100644
--- a/client/tests/selftest/selftest.py
+++ b/client/tests/selftest/selftest.py
@@ -4,49 +4,49 @@
 
 
 class selftest(test.test):
-	version = 1
+    version = 1
 
-	def setup(self):
-		name = self.job.resultdir + '/sequence'
-		if (not os.path.exists(name)):
-			fd = file(name, 'w')
-			fd.write('0')
-			fd.close()
-	
-	def __mark(self, checkpoint):
-		name = self.job.resultdir + '/sequence'
-		fd = file(name, 'r')
-		current = int(fd.readline())
-		fd.close()
+    def setup(self):
+        name = self.job.resultdir + '/sequence'
+        if (not os.path.exists(name)):
+            fd = file(name, 'w')
+            fd.write('0')
+            fd.close()
 
-		current += 1
-		fd = file(name + '.new', 'w')
-		fd.write('%d' % current)
-		fd.close()
+    def __mark(self, checkpoint):
+        name = self.job.resultdir + '/sequence'
+        fd = file(name, 'r')
+        current = int(fd.readline())
+        fd.close()
 
-		os.rename(name + '.new', name)
+        current += 1
+        fd = file(name + '.new', 'w')
+        fd.write('%d' % current)
+        fd.close()
 
-		print "checkpoint %d %d" % (current, checkpoint)
+        os.rename(name + '.new', name)
 
-		if (current != checkpoint):
-			raise error.JobError("selftest: sequence was " +
-				"%d when %d expected" % (current, checkpoint))
+        print "checkpoint %d %d" % (current, checkpoint)
 
-	def __throw(self):
-		__does_not_exist = __does_not_exist_either
+        if (current != checkpoint):
+            raise error.JobError("selftest: sequence was " +
+                    "%d when %d expected" % (current, checkpoint))
 
-	def __print(self, msg):
-		sys.stdout.write(msg)
+    def __throw(self):
+        __does_not_exist = __does_not_exist_either
 
-	def __warn(self, msg):
-		sys.stderr.write(msg)
+    def __print(self, msg):
+        sys.stdout.write(msg)
 
-	def execute(self, cmd, *args):
-		if cmd == 'mark':
-			self.__mark(*args)
-		elif cmd == 'throw':
-			self.__throw(*args)
-		elif cmd == 'print':
-			self.__print(*args)
-		elif cmd == 'warn':
-			self.__warn(*args)
+    def __warn(self, msg):
+        sys.stderr.write(msg)
+
+    def execute(self, cmd, *args):
+        if cmd == 'mark':
+            self.__mark(*args)
+        elif cmd == 'throw':
+            self.__throw(*args)
+        elif cmd == 'print':
+            self.__print(*args)
+        elif cmd == 'warn':
+            self.__warn(*args)
diff --git a/client/tests/signaltest/signaltest.py b/client/tests/signaltest/signaltest.py
index 9c483c9..be4e095 100644
--- a/client/tests/signaltest/signaltest.py
+++ b/client/tests/signaltest/signaltest.py
@@ -4,14 +4,14 @@
 
 
 class signaltest(test.test):
-	version = 1
-	preserve_srcdir = True
+    version = 1
+    preserve_srcdir = True
 
-	# git://git.kernel.org/pub/scm/linux/kernel/git/tglx/rt-tests.git
+    # git://git.kernel.org/pub/scm/linux/kernel/git/tglx/rt-tests.git
 
-	def setup(self):
-		os.chdir(self.srcdir)
-		utils.system('make')
+    def setup(self):
+        os.chdir(self.srcdir)
+        utils.system('make')
 
-	def execute(self, args = '-t 10 -l 100000'):
-		utils.system(self.srcdir + '/signaltest ' + args)
+    def execute(self, args = '-t 10 -l 100000'):
+        utils.system(self.srcdir + '/signaltest ' + args)
diff --git a/client/tests/sleeptest/sleeptest.py b/client/tests/sleeptest/sleeptest.py
index 76c0383..35912f1 100755
--- a/client/tests/sleeptest/sleeptest.py
+++ b/client/tests/sleeptest/sleeptest.py
@@ -2,11 +2,11 @@
 from autotest_lib.client.bin import test
 
 class sleeptest(test.test):
-	version = 1
+    version = 1
 
-	def execute(self, seconds = 1):
-		profilers = self.job.profilers
-		profilers.start(self)
-		time.sleep(seconds)
-		profilers.stop(self)
-		profilers.report(self)
+    def execute(self, seconds = 1):
+        profilers = self.job.profilers
+        profilers.start(self)
+        time.sleep(seconds)
+        profilers.stop(self)
+        profilers.report(self)
diff --git a/client/tests/sparse/sparse.py b/client/tests/sparse/sparse.py
index 3a434cd..f6ff11d 100755
--- a/client/tests/sparse/sparse.py
+++ b/client/tests/sparse/sparse.py
@@ -4,25 +4,25 @@
 
 
 class sparse(test.test):
-	version = 1
+    version = 1
 
-	# http://www.codemonkey.org.uk/projects/git-snapshots/sparse/sparse-2006-04-28.tar.gz
-	def setup(self, tarball = 'sparse-2006-04-28.tar.gz'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    # http://www.codemonkey.org.uk/projects/git-snapshots/sparse/sparse-2006-04-28.tar.gz
+    def setup(self, tarball = 'sparse-2006-04-28.tar.gz'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('make')
-		utils.system('ln check sparse')
-	
-		self.top_dir = self.job.tmpdir+'/sparse'	
-		
-	def execute(self, base_tree, patches, config, config_list = None):
-		kernel = self.job.kernel(base_tree, self.resultsdir)
-		kernel.patch(patches)
-		kernel.config(config, config_list)
+        utils.system('make')
+        utils.system('ln check sparse')
 
-		os.environ['PATH'] = self.srcdir + ':' + os.environ['PATH']
-		results = os.path.join (self.resultsdir, 'sparse')
-		kernel.build(make_opts = 'C=1', logfile = results) 
+        self.top_dir = self.job.tmpdir+'/sparse'
+
+    def execute(self, base_tree, patches, config, config_list = None):
+        kernel = self.job.kernel(base_tree, self.resultsdir)
+        kernel.patch(patches)
+        kernel.config(config, config_list)
+
+        os.environ['PATH'] = self.srcdir + ':' + os.environ['PATH']
+        results = os.path.join (self.resultsdir, 'sparse')
+        kernel.build(make_opts = 'C=1', logfile = results)
diff --git a/client/tests/spew/spew.py b/client/tests/spew/spew.py
index 7c04cc8..4c1cb55 100755
--- a/client/tests/spew/spew.py
+++ b/client/tests/spew/spew.py
@@ -4,41 +4,41 @@
 
 
 class spew(test.test):
-	version = 1
+    version = 1
 
-	# ftp://ftp.berlios.de/pub/spew/1.0.5/spew-1.0.5.tgz
-	def setup(self, tarball = 'spew-1.0.5.tgz'):
-		self.tarball = utils.unmap_url(self.bindir, tarball,
-		                                        self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
+    # ftp://ftp.berlios.de/pub/spew/1.0.5/spew-1.0.5.tgz
+    def setup(self, tarball = 'spew-1.0.5.tgz'):
+        self.tarball = utils.unmap_url(self.bindir, tarball,
+                                                self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(self.tarball, self.srcdir)
 
-		os.chdir(self.srcdir)
-		utils.system('./configure')
-		utils.system('make')
+        os.chdir(self.srcdir)
+        utils.system('./configure')
+        utils.system('make')
 
 
-	def execute(self, testdir = None, iterations = 1, filesize='100M', type='write', pattern='random'):
-		cmd = os.path.join(self.srcdir, 'src/spew')
-		if not testdir:
-			testdir = self.tmpdir
-		tmpfile = os.path.join(testdir, 'spew-test.%d' % os.getpid())
-		results = os.path.join(self.resultsdir, 'stdout')
-		args = '--%s -i %d -p %s -b 2k -B 2M %s %s' % \
-				(type, iterations, pattern, filesize, tmpfile)
-		cmd += ' ' + args
+    def execute(self, testdir = None, iterations = 1, filesize='100M', type='write', pattern='random'):
+        cmd = os.path.join(self.srcdir, 'src/spew')
+        if not testdir:
+            testdir = self.tmpdir
+        tmpfile = os.path.join(testdir, 'spew-test.%d' % os.getpid())
+        results = os.path.join(self.resultsdir, 'stdout')
+        args = '--%s -i %d -p %s -b 2k -B 2M %s %s' % \
+                        (type, iterations, pattern, filesize, tmpfile)
+        cmd += ' ' + args
 
-		# Do a profiling run if necessary
-		profilers = self.job.profilers
-		if profilers.present():
-			profilers.start(self)
+        # Do a profiling run if necessary
+        profilers = self.job.profilers
+        if profilers.present():
+            profilers.start(self)
 
-		open(self.resultsdir + '/command', 'w').write(cmd + '\n')
-		self.job.stdout.redirect(results)
-		try:
-			utils.system(cmd)
-		finally:
-			self.job.stdout.restore()
+        open(self.resultsdir + '/command', 'w').write(cmd + '\n')
+        self.job.stdout.redirect(results)
+        try:
+            utils.system(cmd)
+        finally:
+            self.job.stdout.restore()
 
-		if profilers.present():
-			profilers.stop(self)
-			profilers.report(self)
+        if profilers.present():
+            profilers.stop(self)
+            profilers.report(self)
diff --git a/client/tests/stress/stress.py b/client/tests/stress/stress.py
index 5714045..d668866 100644
--- a/client/tests/stress/stress.py
+++ b/client/tests/stress/stress.py
@@ -4,56 +4,56 @@
 
 
 class stress(test.test):
-	version = 1
+    version = 1
 
-	# http://weather.ou.edu/~apw/projects/stress/stress-0.18.8.tar.gz
-	def setup(self, tarball = 'stress-0.18.8.tar.gz'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    # http://weather.ou.edu/~apw/projects/stress/stress-0.18.8.tar.gz
+    def setup(self, tarball = 'stress-0.18.8.tar.gz'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('./configure')
-		utils.system('make')
+        utils.system('./configure')
+        utils.system('make')
 
 
-	def execute(self, iterations = 1, args = ''):
-		if not args:
-			threads = 2*autotest_utils.count_cpus()
-			args = '-c %d -i %d -m %d -d %d -t 60 -v' % \
-				(threads, threads, threads, threads)
+    def execute(self, iterations = 1, args = ''):
+        if not args:
+            threads = 2*autotest_utils.count_cpus()
+            args = '-c %d -i %d -m %d -d %d -t 60 -v' % \
+                    (threads, threads, threads, threads)
 
-		profilers = self.job.profilers
-		if not profilers.only():
-			for i in range(iterations):
-				utils.system(self.srcdir + '/src/stress ' + args)
+        profilers = self.job.profilers
+        if not profilers.only():
+            for i in range(iterations):
+                utils.system(self.srcdir + '/src/stress ' + args)
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			utils.system(self.srcdir + '/src/stress ' + args)
-			profilers.stop(self)
-			profilers.report(self)
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            utils.system(self.srcdir + '/src/stress ' + args)
+            profilers.stop(self)
+            profilers.report(self)
 
-# -v			Turn up verbosity.
-# -q			Turn down verbosity.
-# -n			Show what would have been done (dry-run)
-# -t secs		Time out after secs seconds. 
-# --backoff usecs	Wait for factor of usecs microseconds before starting
-# -c forks		Spawn forks processes each spinning on sqrt().
-# -i forks		Spawn forks processes each spinning on sync().
-# -m forks		Spawn forks processes each spinning on malloc(). 
-# --vm-bytes bytes	Allocate bytes number of bytes. The default is 1. 
-# --vm-hang		Instruct each vm hog process to go to sleep after 
-#			allocating memory. This contrasts with their normal 
-#			behavior, which is to free the memory and reallocate 
-#			ad infinitum. This is useful for simulating low memory
-#			conditions on a machine. For example, the following
-#			command allocates 256M of RAM and holds it until killed.
+# -v                    Turn up verbosity.
+# -q                    Turn down verbosity.
+# -n                    Show what would have been done (dry-run)
+# -t secs               Time out after secs seconds.
+# --backoff usecs       Wait for factor of usecs microseconds before starting
+# -c forks              Spawn forks processes each spinning on sqrt().
+# -i forks              Spawn forks processes each spinning on sync().
+# -m forks              Spawn forks processes each spinning on malloc().
+# --vm-bytes bytes      Allocate bytes number of bytes. The default is 1.
+# --vm-hang             Instruct each vm hog process to go to sleep after
+#                       allocating memory. This contrasts with their normal
+#                       behavior, which is to free the memory and reallocate
+#                       ad infinitum. This is useful for simulating low memory
+#                       conditions on a machine. For example, the following
+#                       command allocates 256M of RAM and holds it until killed.
 #
-#				% stress --vm 2 --vm-bytes 128M --vm-hang
-# -d forks		Spawn forks processes each spinning on write(). 
-# --hdd-bytes bytes	Write bytes number of bytes. The default is 1GB. 
-# --hdd-noclean		Do not unlink file(s) to which random data is written. 
+#                               % stress --vm 2 --vm-bytes 128M --vm-hang
+# -d forks              Spawn forks processes each spinning on write().
+# --hdd-bytes bytes     Write bytes number of bytes. The default is 1GB.
+# --hdd-noclean         Do not unlink file(s) to which random data is written.
 #
-# Note: Suffixes may be s,m,h,d,y (time) or k,m,g (size). 
+# Note: Suffixes may be s,m,h,d,y (time) or k,m,g (size).
diff --git a/client/tests/sysbench/sysbench.py b/client/tests/sysbench/sysbench.py
index 58b7d25..571c37f 100644
--- a/client/tests/sysbench/sysbench.py
+++ b/client/tests/sysbench/sysbench.py
@@ -4,188 +4,188 @@
 
 
 class sysbench(test.test):
-	version = 1
+    version = 1
 
-	# http://osdn.dl.sourceforge.net/sourceforge/sysbench/sysbench-0.4.8.tar.gz
-	def setup(self, tarball = 'sysbench-0.4.8.tar.bz2'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		self.job.setup_dep(['pgsql', 'mysql'])
+    # http://osdn.dl.sourceforge.net/sourceforge/sysbench/sysbench-0.4.8.tar.gz
+    def setup(self, tarball = 'sysbench-0.4.8.tar.bz2'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        self.job.setup_dep(['pgsql', 'mysql'])
 
-		os.chdir(self.srcdir)
+        os.chdir(self.srcdir)
 
-		pgsql_dir = os.path.join(self.autodir, 'deps/pgsql/pgsql')
-		mysql_dir = os.path.join(self.autodir, 'deps/mysql/mysql')
+        pgsql_dir = os.path.join(self.autodir, 'deps/pgsql/pgsql')
+        mysql_dir = os.path.join(self.autodir, 'deps/mysql/mysql')
 
-		# configure wants to get at pg_config, so add its path
-		utils.system('PATH=%s/bin:$PATH ./configure --with-mysql=%s --with-pgsql' % (pgsql_dir, mysql_dir))
-		utils.system('make -j %d' % autotest_utils.count_cpus())
+        # configure wants to get at pg_config, so add its path
+        utils.system('PATH=%s/bin:$PATH ./configure --with-mysql=%s --with-pgsql' % (pgsql_dir, mysql_dir))
+        utils.system('make -j %d' % autotest_utils.count_cpus())
 
 
-	def execute(self, db_type = 'pgsql', build = 1, \
-			num_threads = autotest_utils.count_cpus(), max_time = 60, \
-			read_only = 0, args = ''):
-		plib = os.path.join(self.autodir, 'deps/pgsql/pgsql/lib')
-		mlib = os.path.join(self.autodir, 'deps/mysql/mysql/lib/mysql')
-		ld_path = prepend_path(plib, environ('LD_LIBRARY_PATH'))
-		ld_path = prepend_path(mlib, ld_path)
-		os.environ['LD_LIBRARY_PATH'] = ld_path
+    def execute(self, db_type = 'pgsql', build = 1, \
+                    num_threads = autotest_utils.count_cpus(), max_time = 60, \
+                    read_only = 0, args = ''):
+        plib = os.path.join(self.autodir, 'deps/pgsql/pgsql/lib')
+        mlib = os.path.join(self.autodir, 'deps/mysql/mysql/lib/mysql')
+        ld_path = prepend_path(plib, environ('LD_LIBRARY_PATH'))
+        ld_path = prepend_path(mlib, ld_path)
+        os.environ['LD_LIBRARY_PATH'] = ld_path
 
-		# The databases don't want to run as root so run them as nobody 
-		self.dbuser = 'nobody'
-		self.dbuid = pwd.getpwnam(self.dbuser)[2]
-		self.sudo = 'sudo -u ' + self.dbuser + ' '
+        # The databases don't want to run as root so run them as nobody
+        self.dbuser = 'nobody'
+        self.dbuid = pwd.getpwnam(self.dbuser)[2]
+        self.sudo = 'sudo -u ' + self.dbuser + ' '
 
-		# Check for nobody user
-		try:
-			utils.system(self.sudo + '/bin/true')
-		except:
-			raise TestError('Unable to run as nobody')
+        # Check for nobody user
+        try:
+            utils.system(self.sudo + '/bin/true')
+        except:
+            raise TestError('Unable to run as nobody')
 
-		if (db_type == 'pgsql'):
-			self.execute_pgsql(build, num_threads, max_time, \
-				read_only, args)
-		elif (db_type == 'mysql'):
-			self.execute_mysql(build, num_threads, max_time, \
-				read_only, args)
+        if (db_type == 'pgsql'):
+            self.execute_pgsql(build, num_threads, max_time, \
+                    read_only, args)
+        elif (db_type == 'mysql'):
+            self.execute_mysql(build, num_threads, max_time, \
+                    read_only, args)
 
 
-	def execute_pgsql(self, build, num_threads, max_time, read_only, args):
-		bin = os.path.join(self.autodir, 'deps/pgsql/pgsql/bin')
-		data = os.path.join(self.autodir, 'deps/pgsql/pgsql/data')
-		log = os.path.join(self.debugdir, 'pgsql.log')
+    def execute_pgsql(self, build, num_threads, max_time, read_only, args):
+        bin = os.path.join(self.autodir, 'deps/pgsql/pgsql/bin')
+        data = os.path.join(self.autodir, 'deps/pgsql/pgsql/data')
+        log = os.path.join(self.debugdir, 'pgsql.log')
 
-		if build == 1:
-			utils.system('rm -rf ' + data)
-			os.mkdir(data)
-			os.chown(data, self.dbuid, 0)
-			utils.system(self.sudo + bin + '/initdb -D ' + data)
+        if build == 1:
+            utils.system('rm -rf ' + data)
+            os.mkdir(data)
+            os.chown(data, self.dbuid, 0)
+            utils.system(self.sudo + bin + '/initdb -D ' + data)
 
-		# Database must be able to write its output into debugdir
-		os.chown(self.debugdir, self.dbuid, 0)
-		utils.system(self.sudo + bin + '/pg_ctl -D ' + data + \
-			' -l ' + log + ' start')
+        # Database must be able to write its output into debugdir
+        os.chown(self.debugdir, self.dbuid, 0)
+        utils.system(self.sudo + bin + '/pg_ctl -D ' + data + \
+                ' -l ' + log + ' start')
 
-		# Wait for database to start
-		time.sleep(5)
+        # Wait for database to start
+        time.sleep(5)
 
-		try:
-			base_cmd = self.srcdir + '/sysbench/sysbench ' + \
-				'--test=oltp --db-driver=pgsql ' + \
-				'--pgsql-user=' + self.dbuser
+        try:
+            base_cmd = self.srcdir + '/sysbench/sysbench ' + \
+                    '--test=oltp --db-driver=pgsql ' + \
+                    '--pgsql-user=' + self.dbuser
 
-			if build == 1:
-				utils.system(self.sudo + bin + '/createdb sbtest')
-				cmd = base_cmd +' prepare'
-				utils.system(cmd)
+            if build == 1:
+                utils.system(self.sudo + bin + '/createdb sbtest')
+                cmd = base_cmd +' prepare'
+                utils.system(cmd)
 
-			cmd = base_cmd + \
-				' --num-threads=' + str(num_threads) + \
-				' --max-time=' + str(max_time) + \
-				' --max-requests=0'
+            cmd = base_cmd + \
+                    ' --num-threads=' + str(num_threads) + \
+                    ' --max-time=' + str(max_time) + \
+                    ' --max-requests=0'
 
-			if read_only:
-				cmd = cmd + ' --oltp-read-only=on'
+            if read_only:
+                cmd = cmd + ' --oltp-read-only=on'
 
-			results = []
+            results = []
 
-			profilers = self.job.profilers
-			if not profilers.only():
-				results.append(utils.system_output(cmd + ' run',
-							retain_output=True))
+            profilers = self.job.profilers
+            if not profilers.only():
+                results.append(utils.system_output(cmd + ' run',
+                                        retain_output=True))
 
-			# Do a profiling run if necessary
-			if profilers.present():
-				profilers.start(self)
-				results.append("Profiling run ...")
-				results.append(utils.system_output(cmd + ' run',
-							retain_output=True))
-				profilers.stop(self)
-				profilers.report(self)
-		except:
-			utils.system(self.sudo + bin + '/pg_ctl -D ' + data + ' stop')
-			raise
+            # Do a profiling run if necessary
+            if profilers.present():
+                profilers.start(self)
+                results.append("Profiling run ...")
+                results.append(utils.system_output(cmd + ' run',
+                                        retain_output=True))
+                profilers.stop(self)
+                profilers.report(self)
+        except:
+            utils.system(self.sudo + bin + '/pg_ctl -D ' + data + ' stop')
+            raise
 
-		utils.system(self.sudo + bin + '/pg_ctl -D ' + data + ' stop')
+        utils.system(self.sudo + bin + '/pg_ctl -D ' + data + ' stop')
 
-		self.__format_results("\n".join(results))
+        self.__format_results("\n".join(results))
 
 
-	def execute_mysql(self, build, num_threads, max_time, read_only, args):
-		bin = os.path.join(self.autodir, 'deps/mysql/mysql/bin')
-		data = os.path.join(self.autodir, 'deps/mysql/mysql/var')
-		log = os.path.join(self.debugdir, 'mysql.log')
+    def execute_mysql(self, build, num_threads, max_time, read_only, args):
+        bin = os.path.join(self.autodir, 'deps/mysql/mysql/bin')
+        data = os.path.join(self.autodir, 'deps/mysql/mysql/var')
+        log = os.path.join(self.debugdir, 'mysql.log')
 
-		if build == 1:
-			utils.system('rm -rf ' + data)
-			os.mkdir(data)	
-			os.chown(data, self.dbuid, 0)
-			utils.system(bin + '/mysql_install_db --user=' + self.dbuser)
+        if build == 1:
+            utils.system('rm -rf ' + data)
+            os.mkdir(data)
+            os.chown(data, self.dbuid, 0)
+            utils.system(bin + '/mysql_install_db --user=' + self.dbuser)
 
-		utils.system(bin + '/mysqld_safe --log-error=' + log + \
-			' --user=' + self.dbuser + ' &')
+        utils.system(bin + '/mysqld_safe --log-error=' + log + \
+                ' --user=' + self.dbuser + ' &')
 
-		# Wait for database to start
-		time.sleep(5)
+        # Wait for database to start
+        time.sleep(5)
 
-		try:
-			base_cmd = self.srcdir + '/sysbench/sysbench ' + \
-				'--test=oltp --db-driver=mysql ' + \
-				'--mysql-user=root'
+        try:
+            base_cmd = self.srcdir + '/sysbench/sysbench ' + \
+                    '--test=oltp --db-driver=mysql ' + \
+                    '--mysql-user=root'
 
-			if build == 1:
-				utils.system('echo "create database sbtest" | ' + \
-					bin + '/mysql -u root')
-				cmd = base_cmd +' prepare'
-				utils.system(cmd)
+            if build == 1:
+                utils.system('echo "create database sbtest" | ' + \
+                        bin + '/mysql -u root')
+                cmd = base_cmd +' prepare'
+                utils.system(cmd)
 
-			cmd = base_cmd + \
-				' --num-threads=' + str(num_threads) + \
-				' --max-time=' + str(max_time) + \
-				' --max-requests=0'
+            cmd = base_cmd + \
+                    ' --num-threads=' + str(num_threads) + \
+                    ' --max-time=' + str(max_time) + \
+                    ' --max-requests=0'
 
-			if read_only:
-				cmd = cmd + ' --oltp-read-only=on'
+            if read_only:
+                cmd = cmd + ' --oltp-read-only=on'
 
-			results = []
+            results = []
 
-			profilers = self.job.profilers
-                	if not profilers.only():
-				results.append(utils.system_output(cmd + ' run',
-							 retain_output=True))
+            profilers = self.job.profilers
+            if not profilers.only():
+                results.append(utils.system_output(cmd + ' run',
+                                         retain_output=True))
 
-			# Do a profiling run if necessary
-			if profilers.present():
-				profilers.start(self)
-				results.append("Profiling run ...")
-				results.append(utils.system_output(cmd + ' run',
-							retain_output=True))
-				profilers.stop(self)
-				profilers.report(self)
-		except:
-			utils.system(bin + '/mysqladmin shutdown')
-			raise
+            # Do a profiling run if necessary
+            if profilers.present():
+                profilers.start(self)
+                results.append("Profiling run ...")
+                results.append(utils.system_output(cmd + ' run',
+                                        retain_output=True))
+                profilers.stop(self)
+                profilers.report(self)
+        except:
+            utils.system(bin + '/mysqladmin shutdown')
+            raise
 
-		utils.system(bin + '/mysqladmin shutdown')
+        utils.system(bin + '/mysqladmin shutdown')
 
-		self.__format_results("\n".join(results))
+        self.__format_results("\n".join(results))
 
 
-	def __format_results(self, results):
-		threads = 0
-		tps = 0
+    def __format_results(self, results):
+        threads = 0
+        tps = 0
 
-		out = open(self.resultsdir + '/keyval', 'w')
-		for line in results.split('\n'):
-			threads_re = re.search('Number of threads: (\d+)', line)
-			if threads_re:
-				threads = threads_re.group(1)
+        out = open(self.resultsdir + '/keyval', 'w')
+        for line in results.split('\n'):
+            threads_re = re.search('Number of threads: (\d+)', line)
+            if threads_re:
+                threads = threads_re.group(1)
 
-			tps_re = re.search('transactions:\s+\d+\s+\((\S+) per sec.\)', line)
-			if tps_re:
-				tps = tps_re.group(1)
-				break
+            tps_re = re.search('transactions:\s+\d+\s+\((\S+) per sec.\)', line)
+            if tps_re:
+                tps = tps_re.group(1)
+                break
 
-		print >> out, 'threads=%s\ntps=%s' % (threads, tps)
-		out.close()
+        print >> out, 'threads=%s\ntps=%s' % (threads, tps)
+        out.close()
diff --git a/client/tests/tbench/tbench.py b/client/tests/tbench/tbench.py
index 3bc0667..0aeb3cd 100755
--- a/client/tests/tbench/tbench.py
+++ b/client/tests/tbench/tbench.py
@@ -4,58 +4,58 @@
 
 
 class tbench(test.test):
-	version = 2 
+    version = 2
 
-	# http://samba.org/ftp/tridge/dbench/dbench-3.04.tar.gz
-	def setup(self, tarball = 'dbench-3.04.tar.gz'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    # http://samba.org/ftp/tridge/dbench/dbench-3.04.tar.gz
+    def setup(self, tarball = 'dbench-3.04.tar.gz'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('./configure')
-		utils.system('make')
+        utils.system('./configure')
+        utils.system('make')
 
-	def execute(self, iterations = 1, nprocs = None, args = ''):
-		# only supports combined server+client model at the moment
-		# should support separate I suppose, but nobody uses it
-		if not nprocs:
-			nprocs = self.job.cpu_count()
-		args += ' %s' % nprocs
-		results = []
-		profilers = self.job.profilers
-		if not profilers.only():
-			for i in range(iterations):
-				results.append(self.run_tbench(args))
+    def execute(self, iterations = 1, nprocs = None, args = ''):
+        # only supports combined server+client model at the moment
+        # should support separate I suppose, but nobody uses it
+        if not nprocs:
+            nprocs = self.job.cpu_count()
+        args += ' %s' % nprocs
+        results = []
+        profilers = self.job.profilers
+        if not profilers.only():
+            for i in range(iterations):
+                results.append(self.run_tbench(args))
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			results.append(self.run_tbench(args))
-			profilers.stop(self)
-			profilers.report(self)
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            results.append(self.run_tbench(args))
+            profilers.stop(self)
+            profilers.report(self)
 
-		self.__format_results("\n".join(results))
+        self.__format_results("\n".join(results))
 
 
-	def run_tbench(self, args):
-		pid = os.fork()
-		if pid:				# parent
-			time.sleep(1)
-			client = self.srcdir + '/client.txt'
-			args = '-c ' + client + ' ' + '%s' % args
-			cmd = os.path.join(self.srcdir, "tbench") + " " + args
-			results = utils.system_output(cmd, retain_output=True)
-			os.kill(pid, signal.SIGTERM)    # clean up the server
-		else:				# child
-			server = self.srcdir + '/tbench_srv'
-			os.execlp(server, server)
-		return results
+    def run_tbench(self, args):
+        pid = os.fork()
+        if pid:                         # parent
+            time.sleep(1)
+            client = self.srcdir + '/client.txt'
+            args = '-c ' + client + ' ' + '%s' % args
+            cmd = os.path.join(self.srcdir, "tbench") + " " + args
+            results = utils.system_output(cmd, retain_output=True)
+            os.kill(pid, signal.SIGTERM)    # clean up the server
+        else:                           # child
+            server = self.srcdir + '/tbench_srv'
+            os.execlp(server, server)
+        return results
 
 
-	def __format_results(self, results):
-		out = open(self.resultsdir + '/keyval', 'w')
-		pattern = re.compile(r"Throughput (.*?) MB/sec (.*?) procs")
-		for result in pattern.findall(results):
-			print >> out, "throughput=%s\nprocs=%s\n" % result
-		out.close()
+    def __format_results(self, results):
+        out = open(self.resultsdir + '/keyval', 'w')
+        pattern = re.compile(r"Throughput (.*?) MB/sec (.*?) procs")
+        for result in pattern.findall(results):
+            print >> out, "throughput=%s\nprocs=%s\n" % result
+        out.close()
diff --git a/client/tests/tiobench/tiobench.py b/client/tests/tiobench/tiobench.py
index 6c55567..8a4d5ba 100644
--- a/client/tests/tiobench/tiobench.py
+++ b/client/tests/tiobench/tiobench.py
@@ -4,32 +4,31 @@
 
 
 class tiobench(test.test):
-	version = 1
+    version = 1
 
-	# http://prdownloads.sourceforge.net/tiobench/tiobench-0.3.3.tar.gz
-	def setup(self, tarball = 'tiobench-0.3.3.tar.bz2'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    # http://prdownloads.sourceforge.net/tiobench/tiobench-0.3.3.tar.gz
+    def setup(self, tarball = 'tiobench-0.3.3.tar.bz2'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('make')
-		
-	def execute(self, dir = None, iterations=1, args = None):
-		if not dir:
-			dir = self.tmpdir
-		os.chdir(self.srcdir)
-		if not args:
-			args = '--block=4096 --block=8192 --threads=10 --size=1024 --numruns=2'
-		profilers = self.job.profilers
-		if not profilers.only():
-			for i in range(iterations):
-				utils.system('./tiobench.pl --dir %s %s' %(dir, args))
+        utils.system('make')
 
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			utils.system('./tiobench.pl --dir %s %s' %(dir, args))
-			profilers.stop(self)
-			profilers.report(self)
-			
+    def execute(self, dir = None, iterations=1, args = None):
+        if not dir:
+            dir = self.tmpdir
+        os.chdir(self.srcdir)
+        if not args:
+            args = '--block=4096 --block=8192 --threads=10 --size=1024 --numruns=2'
+        profilers = self.job.profilers
+        if not profilers.only():
+            for i in range(iterations):
+                utils.system('./tiobench.pl --dir %s %s' %(dir, args))
+
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            utils.system('./tiobench.pl --dir %s %s' %(dir, args))
+            profilers.stop(self)
+            profilers.report(self)
diff --git a/client/tests/tsc/tsc.py b/client/tests/tsc/tsc.py
index 79b5c26..54941f8 100755
--- a/client/tests/tsc/tsc.py
+++ b/client/tests/tsc/tsc.py
@@ -3,16 +3,16 @@
 from autotest_lib.client.common_lib import utils
 
 class tsc(test.test):
-	version = 1
+    version = 1
 
-	def setup(self, tarball = 'checktsc.tar'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
-		utils.system('make')
+    def setup(self, tarball = 'checktsc.tar'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
+        utils.system('make')
 
-	
-	def execute(self, iterations = 1, args = ''):
-		for i in range(iterations):
-			utils.system(self.srcdir + '/checktsc ' + args)
+
+    def execute(self, iterations = 1, args = ''):
+        for i in range(iterations):
+            utils.system(self.srcdir + '/checktsc ' + args)
diff --git a/client/tests/unixbench/unixbench.py b/client/tests/unixbench/unixbench.py
index 5685404..001ce66 100755
--- a/client/tests/unixbench/unixbench.py
+++ b/client/tests/unixbench/unixbench.py
@@ -4,91 +4,91 @@
 
 
 class unixbench(test.test):
-	version = 2
+    version = 2
 
-	# http://www.tux.org/pub/tux/niemi/unixbench/unixbench-4.1.0.tgz
-	def setup(self, tarball = 'unixbench-4.1.0.tar.bz2'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    # http://www.tux.org/pub/tux/niemi/unixbench/unixbench-4.1.0.tgz
+    def setup(self, tarball = 'unixbench-4.1.0.tar.bz2'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('patch -p1 < ../unixbench.patch')
-		utils.system('make')
+        utils.system('patch -p1 < ../unixbench.patch')
+        utils.system('make')
 
 
-	def execute(self, iterations = 1, args = '', stepsecs=0):
-		vars = ('TMPDIR=\"%s\" RESULTDIR=\"%s\"' % 
-		       (self.tmpdir, self.resultsdir))
-		profilers = self.job.profilers
-		keyval = open(self.resultsdir + '/keyval', 'w')
-		self.err = None
-		if stepsecs:
-			# change time per subtest from unixbench's defaults of
-			#   10 secs for small tests, 30 secs for bigger tests
-			vars += ' systime=%i looper=%i seconds=%i'\
-				' dhrytime=%i arithtime=%i' \
-				% ((stepsecs,)*5) 
-		if not profilers.only():
-			for i in range(iterations):
-				os.chdir(self.srcdir)
-				utils.system(vars + ' ./Run ' + args)
-				report = open(self.resultsdir + '/report')
-				self.format_results(report, keyval)
-				
-		# Do a profiling run if necessary
-		if profilers.present():
-			profilers.start(self)
-			utils.system(vars + ' ./Run ' + args)
-			profilers.stop(self)
-			profilers.report(self)
-		
-		# check err string and possible throw
-		if self.err != None:
-			raise error.TestError(self.err)
+    def execute(self, iterations = 1, args = '', stepsecs=0):
+        vars = ('TMPDIR=\"%s\" RESULTDIR=\"%s\"' %
+               (self.tmpdir, self.resultsdir))
+        profilers = self.job.profilers
+        keyval = open(self.resultsdir + '/keyval', 'w')
+        self.err = None
+        if stepsecs:
+            # change time per subtest from unixbench's defaults of
+            #   10 secs for small tests, 30 secs for bigger tests
+            vars += ' systime=%i looper=%i seconds=%i'\
+                    ' dhrytime=%i arithtime=%i' \
+                    % ((stepsecs,)*5)
+        if not profilers.only():
+            for i in range(iterations):
+                os.chdir(self.srcdir)
+                utils.system(vars + ' ./Run ' + args)
+                report = open(self.resultsdir + '/report')
+                self.format_results(report, keyval)
 
-	
-	def check_for_error(self, words):
-		l = len(words)
-		if l >= 3 and words[-3:l] == ['no', 'measured', 'results']:
-			# found a problem so record it in err string
-			key = '_'.join(words[:-3])
-			if self.err == None: 
-				self.err = key
-			else:
-				self.err = self.err + " " + key
-			return True
-		else:
-			return False
+        # Do a profiling run if necessary
+        if profilers.present():
+            profilers.start(self)
+            utils.system(vars + ' ./Run ' + args)
+            profilers.stop(self)
+            profilers.report(self)
+
+        # check err string and possible throw
+        if self.err != None:
+            raise error.TestError(self.err)
 
 
-	def format_results(self, report, keyval):
-		for i in range(9):
-			report.next()
-		for line in report:
-			if not line.strip():
-				break
+    def check_for_error(self, words):
+        l = len(words)
+        if l >= 3 and words[-3:l] == ['no', 'measured', 'results']:
+            # found a problem so record it in err string
+            key = '_'.join(words[:-3])
+            if self.err == None:
+                self.err = key
+            else:
+                self.err = self.err + " " + key
+            return True
+        else:
+            return False
 
-			words = line.split()
-			# look for problems first
-			if self.check_for_error(words):
-				continue
-					
-			# we should make sure that there are at least
-			# 6 guys before we start accessing the array
-			if len(words) >= 6:
-				key = '_'.join(words[:-6])
-				value = words[-6]
-				print >> keyval, '%s=%s' % (key, value)
-		for line in report:
-			if 'FINAL SCORE' in line:
-				print >> keyval, 'score=%s\n' % line.split()[-1]
-				break
+
+    def format_results(self, report, keyval):
+        for i in range(9):
+            report.next()
+        for line in report:
+            if not line.strip():
+                break
+
+            words = line.split()
+            # look for problems first
+            if self.check_for_error(words):
+                continue
+
+            # we should make sure that there are at least
+            # 6 guys before we start accessing the array
+            if len(words) >= 6:
+                key = '_'.join(words[:-6])
+                value = words[-6]
+                print >> keyval, '%s=%s' % (key, value)
+        for line in report:
+            if 'FINAL SCORE' in line:
+                print >> keyval, 'score=%s\n' % line.split()[-1]
+                break
 
 
 if __name__ == '__main__':
-	import sys
-	unixbench.format_results(sys.stdin, sys.stdout)
+    import sys
+    unixbench.format_results(sys.stdin, sys.stdout)
 
 
 """ Here is a sample report file:
diff --git a/client/tests/xmtest/xmtest.py b/client/tests/xmtest/xmtest.py
index 32c649a..e921633 100644
--- a/client/tests/xmtest/xmtest.py
+++ b/client/tests/xmtest/xmtest.py
@@ -1,33 +1,33 @@
 # (C) Copyright IBM Corp. 2006
 # Author: Paul Larson <[email protected]>
 # Description:
-#	Autotest script for running Xen xm-test
-#	This should be run from a Xen domain0
+#       Autotest script for running Xen xm-test
+#       This should be run from a Xen domain0
 import os
 from autotest_lib.client.bin import test, autotest_utils
 from autotest_lib.client.common_lib import utils
 
 
 class xmtest(test.test):
-	version = 1
+    version = 1
 
-	# This test expects just the xm-test directory, as a tarball
-	# from the Xen source tree
-	# hg clone http://xenbits.xensource.com/xen-unstable.hg
-	# or wget http://www.cl.cam.ac.uk/Research/SRG/netos/xen/downloads/xen-unstable-src.tgz
-	# cd tools
-	# tar -czf xm-test.tgz xm-test
-	def setup(self, tarball = 'xm-test.tar.bz2'):
-		tarball = utils.unmap_url(self.bindir, tarball,
-		                                   self.tmpdir)
-		autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
-		os.chdir(self.srcdir)
+    # This test expects just the xm-test directory, as a tarball
+    # from the Xen source tree
+    # hg clone http://xenbits.xensource.com/xen-unstable.hg
+    # or wget http://www.cl.cam.ac.uk/Research/SRG/netos/xen/downloads/xen-unstable-src.tgz
+    # cd tools
+    # tar -czf xm-test.tgz xm-test
+    def setup(self, tarball = 'xm-test.tar.bz2'):
+        tarball = utils.unmap_url(self.bindir, tarball,
+                                           self.tmpdir)
+        autotest_utils.extract_tarball_to_dir(tarball, self.srcdir)
+        os.chdir(self.srcdir)
 
-		utils.system('./autogen')
-		utils.system('./configure')
-		utils.system('make existing')
+        utils.system('./autogen')
+        utils.system('./configure')
+        utils.system('make existing')
 
-	def execute(self, args = ''):
-		os.chdir(self.srcdir)
-		utils.system('./runtest.sh ' + args)
-		utils.system('mv xmtest.* ' + self.resultsdir)
+    def execute(self, args = ''):
+        os.chdir(self.srcdir)
+        utils.system('./runtest.sh ' + args)
+        utils.system('mv xmtest.* ' + self.resultsdir)
diff --git a/client/tools/autotest b/client/tools/autotest
index 619db5a..2b47b37 100755
--- a/client/tools/autotest
+++ b/client/tools/autotest
@@ -3,21 +3,21 @@
 
 autodir = None
 try:
-	autodir = os.path.dirname(os.path.realpath('/etc/autotest.conf'))
+    autodir = os.path.dirname(os.path.realpath('/etc/autotest.conf'))
 except:
-	pass
+    pass
 if not autodir:
-        for path in ['/usr/local/autotest', '/home/autotest']:
-                if os.path.exists(os.path.join(path, '/bin/autotest')):
-			autodir = path
+    for path in ['/usr/local/autotest', '/home/autotest']:
+        if os.path.exists(os.path.join(path, '/bin/autotest')):
+            autodir = path
 
 autotest = os.path.join(autodir, 'bin/autotest')
 control = os.path.join(autodir, 'control')
 state = os.path.join(autodir, 'control.state')
 
 if len(sys.argv) == 1 or sys.argv[1] == 'start':
-	if os.path.exists(state):
-		print "Restarting partially completed autotest job"
-		os.system(autotest + ' --continue ' + control)
-	else:
-		print "No autotest jobs outstanding"
+    if os.path.exists(state):
+        print "Restarting partially completed autotest job"
+        os.system(autotest + ' --continue ' + control)
+    else:
+        print "No autotest jobs outstanding"
diff --git a/client/tools/avgtime b/client/tools/avgtime
index d78a750..b5a1ed7 100755
--- a/client/tools/avgtime
+++ b/client/tools/avgtime
@@ -2,35 +2,34 @@
 import sys, os, re
 
 def avg_deviation(values):
-	sum = 0
-	count = 0
+    sum = 0
+    count = 0
 
-	if not values:
-		return (0, 0)
-	for x in values:
-		sum += x
-		count += 1
-	average = sum / count
-	sum_sq_dev = 0
-	for x in values:
-		sum_sq_dev += (x - average) ** 2
-	std_dev = (sum_sq_dev / count)**0.5
-	return (average, 100 * std_dev / average)
+    if not values:
+        return (0, 0)
+    for x in values:
+        sum += x
+        count += 1
+    average = sum / count
+    sum_sq_dev = 0
+    for x in values:
+        sum_sq_dev += (x - average) ** 2
+    std_dev = (sum_sq_dev / count)**0.5
+    return (average, 100 * std_dev / average)
 
 
-list = []	
+list = []
 for line in sys.stdin.readlines():
-	(user, system, elapsed, cpu) = line.split()[0:4]
-	user = float(re.match(r'([\d\.]+)', user).group(0))
-	system = float(re.match(r'([\d\.]+)', system).group(0))
-	m = re.match(r'(\d+):([\d\.]+)', elapsed)
-	elapsed = 60*int(m.group(1)) + float(m.group(2))
-	cpu = int(re.match(r'(\d+)', cpu).group(0))
+    (user, system, elapsed, cpu) = line.split()[0:4]
+    user = float(re.match(r'([\d\.]+)', user).group(0))
+    system = float(re.match(r'([\d\.]+)', system).group(0))
+    m = re.match(r'(\d+):([\d\.]+)', elapsed)
+    elapsed = 60*int(m.group(1)) + float(m.group(2))
+    cpu = int(re.match(r'(\d+)', cpu).group(0))
 
-	list.append((user, system, elapsed, cpu))
+    list.append((user, system, elapsed, cpu))
 
 print "   user: %0.2fs (%0.2f%%)" % avg_deviation([x[0] for x in list])
 print " system: %0.2fs (%0.2f%%)" % avg_deviation([x[1] for x in list])
 print "elapsed: %0.2fs (%0.2f%%)" % avg_deviation([x[2] for x in list])
 print "    cpu: %d%% (%0.2f%%)" % avg_deviation([x[3] for x in list])
-	
diff --git a/client/tools/diffprofile b/client/tools/diffprofile
index 6bf2b6b..da57eb0 100755
--- a/client/tools/diffprofile
+++ b/client/tools/diffprofile
@@ -7,20 +7,20 @@
 results_per_sign = 10
 
 def parse_lines(filename):
-	results = []
-	start_key = 1
-	for line in open(filename).readlines():
-		try:
-			a = line.split()
-			key = ' '.join(a[start_key:])
-			count = int(a[0])
-			results.append((key, count))
-		except:		# presumably a header line
-			if re.match(r'samples\s*%\s*app name\s*symbol name', line):
-				start_key = 2
-			elif re.match(r'samples\s*%\s*image name\s*app name\s*symbol name', line):
-				start_key = 3
-	return results
+    results = []
+    start_key = 1
+    for line in open(filename).readlines():
+        try:
+            a = line.split()
+            key = ' '.join(a[start_key:])
+            count = int(a[0])
+            results.append((key, count))
+        except:         # presumably a header line
+            if re.match(r'samples\s*%\s*app name\s*symbol name', line):
+                start_key = 2
+            elif re.match(r'samples\s*%\s*image name\s*app name\s*symbol name', line):
+                start_key = 3
+    return results
 
 
 # Firstly, suck in both files.
@@ -29,43 +29,43 @@
 diff = {}
 
 for (key, count) in parse_lines(sys.argv[1]):
-	# Oprofile seems to be ... erm ... broken. Keys can appear > once ;-(
-	if orig.has_key(key):
-		orig[key] += count
-	else:
-		orig[key] = count
-	if diff.has_key(key):
-		diff[key] -= count
-	else:
-		diff[key] = -count
+    # Oprofile seems to be ... erm ... broken. Keys can appear > once ;-(
+    if orig.has_key(key):
+        orig[key] += count
+    else:
+        orig[key] = count
+    if diff.has_key(key):
+        diff[key] -= count
+    else:
+        diff[key] = -count
 
 for (key, count) in parse_lines(sys.argv[2]):
-	if new.has_key(key):
-		new[key] += count
-	else:
-		new[key] = count
-	if diff.has_key(key):
-		diff[key] += count
-	else:
-		diff[key] = count
+    if new.has_key(key):
+        new[key] += count
+    else:
+        new[key] = count
+    if diff.has_key(key):
+        diff[key] += count
+    else:
+        diff[key] = count
 
 if len(orig) < 2* results_per_sign or len(new) < 2 * results_per_sign:
-	sys.exit(1)		# one of the files was blank?
+    sys.exit(1)             # one of the files was blank?
 
 # Now sort and print the diffs.
 def print_key(key):
-	if orig.has_key(key) and orig[key] > 0:
-		pct = (100 * diff[key]) / orig[key]
-	else:
-		pct = 0
-	print "%10d  %6.1f%% %s" % (diff[key], pct, key)
+    if orig.has_key(key) and orig[key] > 0:
+        pct = (100 * diff[key]) / orig[key]
+    else:
+        pct = 0
+    print "%10d  %6.1f%% %s" % (diff[key], pct, key)
 
 keys = sorted(diff.keys(), key=lambda x : diff[x], reverse = True)
 
 for key in keys[:results_per_sign]:
-	print_key(key)
+    print_key(key)
 
 print "\n...\n"
 
 for key in keys[len(keys)-results_per_sign:]:
-	print_key(key)
+    print_key(key)
diff --git a/client/tools/make_clean b/client/tools/make_clean
index 5f47476..f6853d4 100755
--- a/client/tools/make_clean
+++ b/client/tools/make_clean
@@ -2,25 +2,24 @@
 import os
 
 def purge_src(top_dir):
-	for dir in os.listdir(top_dir):
-		if dir.startswith('.'):
-			continue
-		py = os.path.join (top_dir, dir, dir + '.py')
-		if not os.path.exists(py):
-			continue
-		ret = os.system('grep -q "preserve_srcdir = " ' + py)
-		src_path = os.path.abspath(os.path.join('tests', dir, 'src'))
-		if not os.path.exists(src_path):
-			continue
-		if ret:			# This should have a replaceable src dir
-			cmd = 'rm -rf ' + src_path
-		else:
-			cmd = 'cd %s; make clean > /dev/null 2>&1 ' % src_path
+    for dir in os.listdir(top_dir):
+        if dir.startswith('.'):
+            continue
+        py = os.path.join (top_dir, dir, dir + '.py')
+        if not os.path.exists(py):
+            continue
+        ret = os.system('grep -q "preserve_srcdir = " ' + py)
+        src_path = os.path.abspath(os.path.join('tests', dir, 'src'))
+        if not os.path.exists(src_path):
+            continue
+        if ret:                 # This should have a replaceable src dir
+            cmd = 'rm -rf ' + src_path
+        else:
+            cmd = 'cd %s; make clean > /dev/null 2>&1 ' % src_path
 
-		print cmd
-		os.system(cmd)
+        print cmd
+        os.system(cmd)
 
 
 for dir in ['tests', 'profilers', 'deps']:
-	purge_src(dir)
-
+    purge_src(dir)