Convert all python code to use four-space indents instead of eight-space tabs.

Signed-off-by: John Admanski <[email protected]>



git-svn-id: http://test.kernel.org/svn/autotest/trunk@1658 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/server/autoserv b/server/autoserv
index bf7dfa1..bab1a58 100755
--- a/server/autoserv
+++ b/server/autoserv
@@ -18,155 +18,155 @@
 
 
 class PidFileManager(object):
-	pid_file = None
+    pid_file = None
 
-	def open_pid_file(self, results_dir):
-		pid_file_path = os.path.join(results_dir, '.autoserv_execute')
-		assert not os.path.exists(pid_file_path)
-		self.pid_file = open(pid_file_path, 'w')
-		self.pid_file.write(str(os.getpid()) + '\n')
-		self.pid_file.flush()
+    def open_pid_file(self, results_dir):
+        pid_file_path = os.path.join(results_dir, '.autoserv_execute')
+        assert not os.path.exists(pid_file_path)
+        self.pid_file = open(pid_file_path, 'w')
+        self.pid_file.write(str(os.getpid()) + '\n')
+        self.pid_file.flush()
 
 
-	def close_pid_file(self, exit_code, signal_code=0):
-		if not self.pid_file:
-			return
-		real_exit_code = (exit_code << 8) | (signal_code & 0xFF)
-		self.pid_file.write(str(real_exit_code) + '\n')
-		self.pid_file.close()
-		self.pid_file = None
+    def close_pid_file(self, exit_code, signal_code=0):
+        if not self.pid_file:
+            return
+        real_exit_code = (exit_code << 8) | (signal_code & 0xFF)
+        self.pid_file.write(str(real_exit_code) + '\n')
+        self.pid_file.close()
+        self.pid_file = None
 
 
 def run_autoserv(pid_file_manager, results, parser):
-	# send stdin to /dev/null
-	dev_null = os.open(os.devnull, os.O_RDONLY)
-	os.dup2(dev_null, sys.stdin.fileno())
-	os.close(dev_null)
+    # send stdin to /dev/null
+    dev_null = os.open(os.devnull, os.O_RDONLY)
+    os.dup2(dev_null, sys.stdin.fileno())
+    os.close(dev_null)
 
-	# Create separate process group
-	os.setpgrp()
+    # Create separate process group
+    os.setpgrp()
 
-	# Implement SIGTERM handler
-	def handle_sigint(signum, frame):
-		pid_file_manager.close_pid_file(1, signal.SIGTERM)
-		os.killpg(os.getpgrp(), signal.SIGKILL)
+    # Implement SIGTERM handler
+    def handle_sigint(signum, frame):
+        pid_file_manager.close_pid_file(1, signal.SIGTERM)
+        os.killpg(os.getpgrp(), signal.SIGKILL)
 
-	# Set signal handler
-	signal.signal(signal.SIGTERM, handle_sigint)
+    # Set signal handler
+    signal.signal(signal.SIGTERM, handle_sigint)
 
-	# Get a useful value for running 'USER'
-	realuser = os.environ.get('USER')
-	if not realuser:
-		realuser = 'anonymous'
+    # Get a useful value for running 'USER'
+    realuser = os.environ.get('USER')
+    if not realuser:
+        realuser = 'anonymous'
 
-	machines = parser.options.machines.split(',')
-	machines_file = parser.options.machines_file
-	label    = parser.options.label
-	user     = parser.options.user
-	client   = parser.options.client
-	reboot   = parser.options.reboot
-	install_before = parser.options.install_before
-	install_after  = parser.options.install_after
-	verify   = parser.options.verify
-	repair   = parser.options.repair
-	no_tee   = parser.options.no_tee
-	parse_job = parser.options.parse_job
-	ssh_user = parser.options.ssh_user
-	ssh_port = parser.options.ssh_port
-	ssh_pass = parser.options.ssh_pass
+    machines = parser.options.machines.split(',')
+    machines_file = parser.options.machines_file
+    label    = parser.options.label
+    user     = parser.options.user
+    client   = parser.options.client
+    reboot   = parser.options.reboot
+    install_before = parser.options.install_before
+    install_after  = parser.options.install_after
+    verify   = parser.options.verify
+    repair   = parser.options.repair
+    no_tee   = parser.options.no_tee
+    parse_job = parser.options.parse_job
+    ssh_user = parser.options.ssh_user
+    ssh_port = parser.options.ssh_port
+    ssh_pass = parser.options.ssh_pass
 
-	if len(parser.args) < 1 and not verify and not repair:
-		print parser.parser.print_help()
-		sys.exit(1)
+    if len(parser.args) < 1 and not verify and not repair:
+        print parser.parser.print_help()
+        sys.exit(1)
 
-	# We have a control file unless it's just a verify/repair job
-	if len(parser.args) > 0:
-		control = parser.args[0]
-	else:
-		control = None
+    # We have a control file unless it's just a verify/repair job
+    if len(parser.args) > 0:
+        control = parser.args[0]
+    else:
+        control = None
 
-	if machines_file:
-		machines = []
-		for m in open(machines_file, 'r').readlines():
-			# remove comments, spaces
-			m = re.sub('#.*', '', m).strip()
-			if m:
-				machines.append(m)
-		print "Read list of machines from file: %s" % machines_file
-		print ','.join(machines)
+    if machines_file:
+        machines = []
+        for m in open(machines_file, 'r').readlines():
+            # remove comments, spaces
+            m = re.sub('#.*', '', m).strip()
+            if m:
+                machines.append(m)
+        print "Read list of machines from file: %s" % machines_file
+        print ','.join(machines)
 
-	if machines:
-		for machine in machines:
-			if not machine or re.search('\s', machine):
-				print "Invalid machine %s" % str(machine)
-				sys.exit(1)
-		machines = list(set(machines))
-		machines.sort()
+    if machines:
+        for machine in machines:
+            if not machine or re.search('\s', machine):
+                print "Invalid machine %s" % str(machine)
+                sys.exit(1)
+        machines = list(set(machines))
+        machines.sort()
 
-	job = server_job.server_job(control, parser.args[1:], results, label,
-				    user, machines, client, parse_job,
-				    ssh_user, ssh_port, ssh_pass)
-	debug_dir = os.path.join(results, 'debug')
-	stdout = os.path.join(debug_dir, 'autoserv.stdout')
-	stderr = os.path.join(debug_dir, 'autoserv.stderr')
-	if no_tee:
-		job.stdout.redirect(stdout)
-		job.stderr.redirect(stderr)
-	else:
-		job.stdout.tee_redirect(stdout)
-		job.stderr.tee_redirect(stderr)
+    job = server_job.server_job(control, parser.args[1:], results, label,
+                                user, machines, client, parse_job,
+                                ssh_user, ssh_port, ssh_pass)
+    debug_dir = os.path.join(results, 'debug')
+    stdout = os.path.join(debug_dir, 'autoserv.stdout')
+    stderr = os.path.join(debug_dir, 'autoserv.stderr')
+    if no_tee:
+        job.stdout.redirect(stdout)
+        job.stderr.redirect(stderr)
+    else:
+        job.stdout.tee_redirect(stdout)
+        job.stderr.tee_redirect(stderr)
 
-	# run the job
-	exit_code = 0
-	try:
-		if repair:
-			job.repair()
-		elif verify:
-			job.verify()
-		else:
-			try:
-				job.run(reboot, install_before, install_after)
-			finally:
-				job.cleanup_parser()
-	except:
-		job.aborted = True
-		traceback.print_exc()
+    # run the job
+    exit_code = 0
+    try:
+        if repair:
+            job.repair()
+        elif verify:
+            job.verify()
+        else:
+            try:
+                job.run(reboot, install_before, install_after)
+            finally:
+                job.cleanup_parser()
+    except:
+        job.aborted = True
+        traceback.print_exc()
 
-	if getattr(job, 'aborted', False):
-		sys.exit(1)
+    if getattr(job, 'aborted', False):
+        sys.exit(1)
 
 
 def main():
-	pid_file_manager = PidFileManager()
+    pid_file_manager = PidFileManager()
 
-	# grab the parser
-	parser = autoserv_parser.autoserv_parser
+    # grab the parser
+    parser = autoserv_parser.autoserv_parser
 
-	if len(sys.argv) == 1:
-		parser.parser.print_help()
-		sys.exit(1)
+    if len(sys.argv) == 1:
+        parser.parser.print_help()
+        sys.exit(1)
 
-	results  = parser.options.results
-	results  = os.path.abspath(results)
-	write_pidfile = parser.options.write_pidfile
-	if write_pidfile:
-		pid_file_manager.open_pid_file(results)
+    results  = parser.options.results
+    results  = os.path.abspath(results)
+    write_pidfile = parser.options.write_pidfile
+    if write_pidfile:
+        pid_file_manager.open_pid_file(results)
 
-	exit_code = 0
-	try:
-		try:
-			run_autoserv(pid_file_manager, results, parser)
-		except SystemExit, e:
-			exit_code = e.code
-		except:
-			traceback.print_exc()
-			# If we don't know what happened, we'll classify it as
-			# an 'abort' and return 1.
-			exit_code = 1
-	finally:
-		pid_file_manager.close_pid_file(exit_code)
-	sys.exit(exit_code)
+    exit_code = 0
+    try:
+        try:
+            run_autoserv(pid_file_manager, results, parser)
+        except SystemExit, e:
+            exit_code = e.code
+        except:
+            traceback.print_exc()
+            # If we don't know what happened, we'll classify it as
+            # an 'abort' and return 1.
+            exit_code = 1
+    finally:
+        pid_file_manager.close_pid_file(exit_code)
+    sys.exit(exit_code)
 
 
 if __name__ == '__main__':
-	main()
+    main()
diff --git a/server/autoserv_parser.py b/server/autoserv_parser.py
index 43e859f..b32e889 100644
--- a/server/autoserv_parser.py
+++ b/server/autoserv_parser.py
@@ -5,97 +5,97 @@
 
 
 class base_autoserv_parser(object):
-	"""Custom command-line options parser for autoserv.
+    """Custom command-line options parser for autoserv.
 
-	We can't use the general getopt methods here, as there will be unknown
-	extra arguments that we pass down into the control file instead.
-	Thus we process the arguments by hand, for which we are duly repentant.
-	Making a single function here just makes it harder to read. Suck it up.
-	"""
-	def __init__(self):
-		self.args = sys.argv[1:]
-		self.parser = optparse.OptionParser()
-		self.setup_options()
-		self.parse_args()
-		
-
-	def setup_options(self):
-		self.parser.add_option("-m", action="store", type="string",
-				       dest="machines",
-				       help="list of machines")
-		self.parser.add_option("-M", action="store", type="string",
-				       dest="machines_file",
-				       help="list of machines from file")
-		self.parser.add_option("-c", action="store_true",
-				       dest="client", default=False,
-				       help="control file is client side")
-		self.parser.add_option("-r", action="store", type="string",
-				       dest="results", default='.',
-				       help="specify results directory")
-		self.parser.add_option("-l", action="store", type="string",
-				       dest="label", default='',
-				       help="label for the job")
-		self.parser.add_option("-u", action="store", type="string",
-				       dest="user", 
-				       default=os.environ.get('USER'),
-				       help="username for the job")
-		self.parser.add_option("-P", action="store", type="string",
-				       dest="parse_job", 
-				       default='',
-				       help="parse the results of the job")
-		self.parser.add_option("-i", action="store_true",
-				       dest="install_before", default=False,
-			       help="reinstall machines before running the job")
-		self.parser.add_option("-I", action="store_true",
-				       dest="install_after", default=False,
-			        help="reinstall machines after running the job")
-	        self.parser.add_option("-b", action="store_true",
-				       dest="reboot", default=False,
-			               help="reboot all machines after job")
-		self.parser.add_option("-v", action="store_true",
-				       dest="verify", default=False,
-			               help="verify the machines only")
-		self.parser.add_option("-R", action="store_true",
-				       dest="repair", default=False,
-			               help="repair the machines")
-		self.parser.add_option("-n", action="store_true",
-				       dest="no_tee", default=False,
-			              help="no teeing the status to stdout/err")
-		self.parser.add_option("-p", action="store_true",
-				       dest="write_pidfile", default=False,
-			              help="write pidfile (.autoserv_execute)")
-		self.parser.add_option("--ssh-user", action="store",
-				       type="string", dest="ssh_user",
-				       default="root",
-				       help=("specify the user for ssh"
-				       "connections"))
-		self.parser.add_option("--ssh-port", action="store",
-				       type="int", dest="ssh_port",
-				       default=22,
-				       help=("specify the port to use for "
-					     "ssh connections"))
-		self.parser.add_option("--ssh-pass", action="store",
-				       type="string", dest="ssh_pass",
-				       default="",
-				       help=("specify the password to use "
-					     "for ssh connections"))
+    We can't use the general getopt methods here, as there will be unknown
+    extra arguments that we pass down into the control file instead.
+    Thus we process the arguments by hand, for which we are duly repentant.
+    Making a single function here just makes it harder to read. Suck it up.
+    """
+    def __init__(self):
+        self.args = sys.argv[1:]
+        self.parser = optparse.OptionParser()
+        self.setup_options()
+        self.parse_args()
 
 
-	def parse_args(self):
-		(self.options, self.args) = self.parser.parse_args()
+    def setup_options(self):
+        self.parser.add_option("-m", action="store", type="string",
+                               dest="machines",
+                               help="list of machines")
+        self.parser.add_option("-M", action="store", type="string",
+                               dest="machines_file",
+                               help="list of machines from file")
+        self.parser.add_option("-c", action="store_true",
+                               dest="client", default=False,
+                               help="control file is client side")
+        self.parser.add_option("-r", action="store", type="string",
+                               dest="results", default='.',
+                               help="specify results directory")
+        self.parser.add_option("-l", action="store", type="string",
+                               dest="label", default='',
+                               help="label for the job")
+        self.parser.add_option("-u", action="store", type="string",
+                               dest="user",
+                               default=os.environ.get('USER'),
+                               help="username for the job")
+        self.parser.add_option("-P", action="store", type="string",
+                               dest="parse_job",
+                               default='',
+                               help="parse the results of the job")
+        self.parser.add_option("-i", action="store_true",
+                               dest="install_before", default=False,
+                       help="reinstall machines before running the job")
+        self.parser.add_option("-I", action="store_true",
+                               dest="install_after", default=False,
+                        help="reinstall machines after running the job")
+        self.parser.add_option("-b", action="store_true",
+                               dest="reboot", default=False,
+                               help="reboot all machines after job")
+        self.parser.add_option("-v", action="store_true",
+                               dest="verify", default=False,
+                               help="verify the machines only")
+        self.parser.add_option("-R", action="store_true",
+                               dest="repair", default=False,
+                               help="repair the machines")
+        self.parser.add_option("-n", action="store_true",
+                               dest="no_tee", default=False,
+                              help="no teeing the status to stdout/err")
+        self.parser.add_option("-p", action="store_true",
+                               dest="write_pidfile", default=False,
+                              help="write pidfile (.autoserv_execute)")
+        self.parser.add_option("--ssh-user", action="store",
+                               type="string", dest="ssh_user",
+                               default="root",
+                               help=("specify the user for ssh"
+                               "connections"))
+        self.parser.add_option("--ssh-port", action="store",
+                               type="int", dest="ssh_port",
+                               default=22,
+                               help=("specify the port to use for "
+                                     "ssh connections"))
+        self.parser.add_option("--ssh-pass", action="store",
+                               type="string", dest="ssh_pass",
+                               default="",
+                               help=("specify the password to use "
+                                     "for ssh connections"))
+
+
+    def parse_args(self):
+        (self.options, self.args) = self.parser.parse_args()
 
 
 
 try:
-	from autotest_lib.server.site_autoserv_parser \
-	     import site_autoserv_parser
+    from autotest_lib.server.site_autoserv_parser \
+         import site_autoserv_parser
 except ImportError:
-	class site_autoserv_parser(base_autoserv_parser):
-		pass
+    class site_autoserv_parser(base_autoserv_parser):
+        pass
 
 
 class autoserv_parser(site_autoserv_parser):
-	pass
+    pass
 
 
 # create the one and only one instance of autoserv_parser
diff --git a/server/autotest.py b/server/autotest.py
index 57a80a1..21e519b 100644
--- a/server/autotest.py
+++ b/server/autotest.py
@@ -5,7 +5,7 @@
 """
 This module defines the Autotest class
 
-	Autotest: software to run tests automatically
+        Autotest: software to run tests automatically
 """
 
 __author__ = """
@@ -31,462 +31,462 @@
 
 
 class BaseAutotest(installable_object.InstallableObject):
-	"""
-	This class represents the Autotest program.
+    """
+    This class represents the Autotest program.
 
-	Autotest is used to run tests automatically and collect the results.
-	It also supports profilers.
+    Autotest is used to run tests automatically and collect the results.
+    It also supports profilers.
 
-	Implementation details:
-	This is a leaf class in an abstract class hierarchy, it must
-	implement the unimplemented methods in parent classes.
-	"""
-	job = None
+    Implementation details:
+    This is a leaf class in an abstract class hierarchy, it must
+    implement the unimplemented methods in parent classes.
+    """
+    job = None
 
 
-	def __init__(self, host = None):
-		self.host = host
-		self.got = False
-		self.installed = False
-		self.serverdir = utils.get_server_dir()
-		super(BaseAutotest, self).__init__()
+    def __init__(self, host = None):
+        self.host = host
+        self.got = False
+        self.installed = False
+        self.serverdir = utils.get_server_dir()
+        super(BaseAutotest, self).__init__()
 
 
-	@logging.record
-	def install(self, host = None):
-		"""
-		Install autotest.  If get() was not called previously, an
-		attempt will be made to install from the autotest svn
-		repository.
+    @logging.record
+    def install(self, host = None):
+        """
+        Install autotest.  If get() was not called previously, an
+        attempt will be made to install from the autotest svn
+        repository.
 
-		Args:
-			host: a Host instance on which autotest will be
-				installed
+        Args:
+                host: a Host instance on which autotest will be
+                        installed
 
-		Raises:
-			AutoservError: if a tarball was not specified and
-				the target host does not have svn installed in its path
+        Raises:
+                AutoservError: if a tarball was not specified and
+                        the target host does not have svn installed in its path
 
-		TODO(poirier): check dependencies
-		autotest needs:
-		bzcat
-		liboptdev (oprofile)
-		binutils-dev (oprofile)
-		make
-		psutils (netperf)
-		"""
-		if not host:
-			host = self.host
-		if not self.got:
-			self.get()
-		host.wait_up(timeout=30)
-		host.setup()
-		print "Installing autotest on %s" % host.hostname
+        TODO(poirier): check dependencies
+        autotest needs:
+        bzcat
+        liboptdev (oprofile)
+        binutils-dev (oprofile)
+        make
+        psutils (netperf)
+        """
+        if not host:
+            host = self.host
+        if not self.got:
+            self.get()
+        host.wait_up(timeout=30)
+        host.setup()
+        print "Installing autotest on %s" % host.hostname
 
-		# Let's try to figure out where autotest is installed. If we can't,
-		# (autotest not installed) just assume '/usr/local/autotest' and 
-		# proceed.
-		try:
-			autodir = _get_autodir(host)
-		except error.AutotestRunError:
-			autodir = '/usr/local/autotest'
+        # Let's try to figure out where autotest is installed. If we can't,
+        # (autotest not installed) just assume '/usr/local/autotest' and
+        # proceed.
+        try:
+            autodir = _get_autodir(host)
+        except error.AutotestRunError:
+            autodir = '/usr/local/autotest'
 
-		host.run('mkdir -p "%s"' % utils.sh_escape(autodir))
+        host.run('mkdir -p "%s"' % utils.sh_escape(autodir))
 
-		if getattr(host, 'site_install_autotest', None):
-			if host.site_install_autotest():
-				self.installed = True
-				return
+        if getattr(host, 'site_install_autotest', None):
+            if host.site_install_autotest():
+                self.installed = True
+                return
 
-		# try to install from file or directory
-		if self.source_material:
-			if os.path.isdir(self.source_material):
-				# Copy autotest recursively
-				host.send_file(self.source_material, autodir)
-			else:
-				# Copy autotest via tarball
-				e_msg = 'Installation method not yet implemented!'
-				raise NotImplementedError(e_msg)
-			print "Installation of autotest completed"
-			self.installed = True
-			return
+        # try to install from file or directory
+        if self.source_material:
+            if os.path.isdir(self.source_material):
+                # Copy autotest recursively
+                host.send_file(self.source_material, autodir)
+            else:
+                # Copy autotest via tarball
+                e_msg = 'Installation method not yet implemented!'
+                raise NotImplementedError(e_msg)
+            print "Installation of autotest completed"
+            self.installed = True
+            return
 
-		# if that fails try to install using svn
-		if utils.run('which svn').exit_status:
-			raise error.AutoservError('svn not found in path on \
-			target machine: %s' % host.name)
-		try:
-			host.run('svn checkout %s %s' %
-				 (AUTOTEST_SVN, autodir))
-		except error.AutoservRunError, e:
-			host.run('svn checkout %s %s' %
-				 (AUTOTEST_HTTP, autodir))
-		print "Installation of autotest completed"
-		self.installed = True
+        # if that fails try to install using svn
+        if utils.run('which svn').exit_status:
+            raise error.AutoservError('svn not found in path on \
+            target machine: %s' % host.name)
+        try:
+            host.run('svn checkout %s %s' %
+                     (AUTOTEST_SVN, autodir))
+        except error.AutoservRunError, e:
+            host.run('svn checkout %s %s' %
+                     (AUTOTEST_HTTP, autodir))
+        print "Installation of autotest completed"
+        self.installed = True
 
 
-	def get(self, location = None):
-		if not location:
-			location = os.path.join(self.serverdir, '../client')
-			location = os.path.abspath(location)
-		# If there's stuff run on our client directory already, it
-		# can cause problems. Try giving it a quick clean first.
-		cwd = os.getcwd()
-		os.chdir(location)
-		os.system('tools/make_clean')
-		os.chdir(cwd)
-		super(BaseAutotest, self).get(location)
-		self.got = True
+    def get(self, location = None):
+        if not location:
+            location = os.path.join(self.serverdir, '../client')
+            location = os.path.abspath(location)
+        # If there's stuff run on our client directory already, it
+        # can cause problems. Try giving it a quick clean first.
+        cwd = os.getcwd()
+        os.chdir(location)
+        os.system('tools/make_clean')
+        os.chdir(cwd)
+        super(BaseAutotest, self).get(location)
+        self.got = True
 
 
-	def run(self, control_file, results_dir = '.', host = None,
-		timeout=None, tag=None, parallel_flag=False):
-		"""
-		Run an autotest job on the remote machine.
+    def run(self, control_file, results_dir = '.', host = None,
+            timeout=None, tag=None, parallel_flag=False):
+        """
+        Run an autotest job on the remote machine.
 
-		Args:
-			control_file: an open file-like-obj of the control file
-			results_dir: a str path where the results should be stored
-				on the local filesystem
-			host: a Host instance on which the control file should
-				be run
-		        tag: tag name for the client side instance of autotest
-			parallel_flag: flag set when multiple jobs are run at the
-			          same time
-		Raises:
-			AutotestRunError: if there is a problem executing
-				the control file
-		"""
-		host = self._get_host_and_setup(host)
-		results_dir = os.path.abspath(results_dir)
+        Args:
+                control_file: an open file-like-obj of the control file
+                results_dir: a str path where the results should be stored
+                        on the local filesystem
+                host: a Host instance on which the control file should
+                        be run
+                tag: tag name for the client side instance of autotest
+                parallel_flag: flag set when multiple jobs are run at the
+                          same time
+        Raises:
+                AutotestRunError: if there is a problem executing
+                        the control file
+        """
+        host = self._get_host_and_setup(host)
+        results_dir = os.path.abspath(results_dir)
 
-		if tag:
-			results_dir = os.path.join(results_dir, tag)
+        if tag:
+            results_dir = os.path.join(results_dir, tag)
 
-		atrun = _Run(host, results_dir, tag, parallel_flag)
-		self._do_run(control_file, results_dir, host, atrun, timeout)
+        atrun = _Run(host, results_dir, tag, parallel_flag)
+        self._do_run(control_file, results_dir, host, atrun, timeout)
 
 
-	def _get_host_and_setup(self, host):
-		if not host:
-			host = self.host
-		if not self.installed:
-			self.install(host)
+    def _get_host_and_setup(self, host):
+        if not host:
+            host = self.host
+        if not self.installed:
+            self.install(host)
 
-		host.wait_up(timeout=30)
-		return host
+        host.wait_up(timeout=30)
+        return host
 
 
-	def prepare_for_copying_logs(self, src, dest, host):
-		keyval_path = ''
-		if not os.path.exists(os.path.join(dest, 'keyval')):
-			# Client-side keyval file can be copied directly
-			return keyval_path
-		# Copy client-side keyval to temporary location
-		try:
-			try:
-				# Create temp file
-				fd, keyval_path = tempfile.mkstemp(
-						'.keyval_%s' % host.hostname)
-				host.get_file(os.path.join(src, 'keyval'),
-					      keyval_path)
-			finally:
-				# We will squirrel away the client side keyval
-				# away and move it back when we are done
-				self.temp_keyval_path = tempfile.mktemp()
-				host.run('mv %s %s' %
-				         (os.path.join(src, 'keyval'),
-				         self.temp_keyval_path))
-		except (error.AutoservRunError, error.AutoservSSHTimeout):
-			print "Prepare for copying logs failed"
-		return keyval_path
+    def prepare_for_copying_logs(self, src, dest, host):
+        keyval_path = ''
+        if not os.path.exists(os.path.join(dest, 'keyval')):
+            # Client-side keyval file can be copied directly
+            return keyval_path
+        # Copy client-side keyval to temporary location
+        try:
+            try:
+                # Create temp file
+                fd, keyval_path = tempfile.mkstemp(
+                                '.keyval_%s' % host.hostname)
+                host.get_file(os.path.join(src, 'keyval'),
+                              keyval_path)
+            finally:
+                # We will squirrel away the client side keyval
+                # away and move it back when we are done
+                self.temp_keyval_path = tempfile.mktemp()
+                host.run('mv %s %s' %
+                         (os.path.join(src, 'keyval'),
+                         self.temp_keyval_path))
+        except (error.AutoservRunError, error.AutoservSSHTimeout):
+            print "Prepare for copying logs failed"
+        return keyval_path
 
 
-	def process_copied_logs(self, dest, host, keyval_path):
-		if not os.path.exists(os.path.join(dest, 'keyval')):
-			# Client-side keyval file was copied directly
-			return
-		# Append contents of keyval_<host> file to keyval file
-		try:
-			# Read in new and old keyval files
-			new_keyval = utils.read_keyval(keyval_path)
-			old_keyval = utils.read_keyval(dest)
-			# 'Delete' from new keyval entries that are in both
-			tmp_keyval = {}
-			for key, val in new_keyval.iteritems():
-				if key not in old_keyval:
-					tmp_keyval[key] = val
-			# Append new info to keyval file
-			utils.write_keyval(dest, tmp_keyval)
-			# Delete keyval_<host> file
-			os.remove(keyval_path)
-		except IOError:
-			print "Process copied logs failed"
+    def process_copied_logs(self, dest, host, keyval_path):
+        if not os.path.exists(os.path.join(dest, 'keyval')):
+            # Client-side keyval file was copied directly
+            return
+        # Append contents of keyval_<host> file to keyval file
+        try:
+            # Read in new and old keyval files
+            new_keyval = utils.read_keyval(keyval_path)
+            old_keyval = utils.read_keyval(dest)
+            # 'Delete' from new keyval entries that are in both
+            tmp_keyval = {}
+            for key, val in new_keyval.iteritems():
+                if key not in old_keyval:
+                    tmp_keyval[key] = val
+            # Append new info to keyval file
+            utils.write_keyval(dest, tmp_keyval)
+            # Delete keyval_<host> file
+            os.remove(keyval_path)
+        except IOError:
+            print "Process copied logs failed"
 
 
-	def postprocess_copied_logs(self, src, host):
-		# we can now put our keyval file back
-		try:
-			host.run('mv %s %s' % (self.temp_keyval_path,
-		                 os.path.join(src, 'keyval')))
-		except:
-			pass
+    def postprocess_copied_logs(self, src, host):
+        # we can now put our keyval file back
+        try:
+            host.run('mv %s %s' % (self.temp_keyval_path,
+                     os.path.join(src, 'keyval')))
+        except:
+            pass
 
 
-	def _do_run(self, control_file, results_dir, host, atrun, timeout):
-		try:
-			atrun.verify_machine()
-		except:
-			print "Verify machine failed on %s. Reinstalling" % \
-								host.hostname
-			self.install(host)
-		atrun.verify_machine()
-		debug = os.path.join(results_dir, 'debug')
-		try:
-			os.makedirs(debug)
-		except:
-			pass
+    def _do_run(self, control_file, results_dir, host, atrun, timeout):
+        try:
+            atrun.verify_machine()
+        except:
+            print "Verify machine failed on %s. Reinstalling" % \
+                                                    host.hostname
+            self.install(host)
+        atrun.verify_machine()
+        debug = os.path.join(results_dir, 'debug')
+        try:
+            os.makedirs(debug)
+        except:
+            pass
 
-		# Ready .... Aim ....
-		for control in [atrun.remote_control_file,
-				atrun.remote_control_file + '.state',
-				atrun.manual_control_file,
-				atrun.manual_control_file + '.state']:
-			host.run('rm -f ' + control)
+        # Ready .... Aim ....
+        for control in [atrun.remote_control_file,
+                        atrun.remote_control_file + '.state',
+                        atrun.manual_control_file,
+                        atrun.manual_control_file + '.state']:
+            host.run('rm -f ' + control)
 
-		# Copy control_file to remote_control_file on the host
-		tmppath = utils.get(control_file)
-		host.send_file(tmppath, atrun.remote_control_file)
-		if os.path.abspath(tmppath) != os.path.abspath(control_file):
-			os.remove(tmppath)
+        # Copy control_file to remote_control_file on the host
+        tmppath = utils.get(control_file)
+        host.send_file(tmppath, atrun.remote_control_file)
+        if os.path.abspath(tmppath) != os.path.abspath(control_file):
+            os.remove(tmppath)
 
-		try:
-			atrun.execute_control(timeout=timeout)
-		finally:
-			# make an effort to wait for the machine to come up
-			try:
-				host.wait_up(timeout=30)
-			except error.AutoservError:
-				# don't worry about any errors, we'll try and
-				# get the results anyway
-				pass
+        try:
+            atrun.execute_control(timeout=timeout)
+        finally:
+            # make an effort to wait for the machine to come up
+            try:
+                host.wait_up(timeout=30)
+            except error.AutoservError:
+                # don't worry about any errors, we'll try and
+                # get the results anyway
+                pass
 
-			# get the results
-			if not atrun.tag:
-				results = os.path.join(atrun.autodir,
-						       'results', 'default')
-			else:
-				results = os.path.join(atrun.autodir,
-						       'results', atrun.tag)
+            # get the results
+            if not atrun.tag:
+                results = os.path.join(atrun.autodir,
+                                       'results', 'default')
+            else:
+                results = os.path.join(atrun.autodir,
+                                       'results', atrun.tag)
 
-			# Copy all dirs in default to results_dir
-			keyval_path = self.prepare_for_copying_logs(results,
-						results_dir, host)
-			host.get_file(results + '/', results_dir)
-			self.process_copied_logs(results_dir, host, keyval_path)
-			self.postprocess_copied_logs(results, host)
+            # Copy all dirs in default to results_dir
+            keyval_path = self.prepare_for_copying_logs(results,
+                                    results_dir, host)
+            host.get_file(results + '/', results_dir)
+            self.process_copied_logs(results_dir, host, keyval_path)
+            self.postprocess_copied_logs(results, host)
 
 
-	def run_timed_test(self, test_name, results_dir='.', host=None,
-			   timeout=None, tag=None, *args, **dargs):
-		"""
-		Assemble a tiny little control file to just run one test,
-		and run it as an autotest client-side test
-		"""
-		if not host:
-			host = self.host
-		if not self.installed:
-			self.install(host)
-		opts = ["%s=%s" % (o[0], repr(o[1])) for o in dargs.items()]
-		cmd = ", ".join([repr(test_name)] + map(repr, args) + opts)
-		control = "job.run_test(%s)\n" % cmd
-		self.run(control, results_dir, host, timeout=timeout, tag=tag)
+    def run_timed_test(self, test_name, results_dir='.', host=None,
+                       timeout=None, tag=None, *args, **dargs):
+        """
+        Assemble a tiny little control file to just run one test,
+        and run it as an autotest client-side test
+        """
+        if not host:
+            host = self.host
+        if not self.installed:
+            self.install(host)
+        opts = ["%s=%s" % (o[0], repr(o[1])) for o in dargs.items()]
+        cmd = ", ".join([repr(test_name)] + map(repr, args) + opts)
+        control = "job.run_test(%s)\n" % cmd
+        self.run(control, results_dir, host, timeout=timeout, tag=tag)
 
 
-	def run_test(self, test_name, results_dir='.', host=None,
-		     tag=None, *args, **dargs):
-		self.run_timed_test(test_name, results_dir, host, timeout=None,
-				    tag=tag, *args, **dargs)
+    def run_test(self, test_name, results_dir='.', host=None,
+                 tag=None, *args, **dargs):
+        self.run_timed_test(test_name, results_dir, host, timeout=None,
+                            tag=tag, *args, **dargs)
 
 
 class _Run(object):
-	"""
-	Represents a run of autotest control file.  This class maintains
-	all the state necessary as an autotest control file is executed.
+    """
+    Represents a run of autotest control file.  This class maintains
+    all the state necessary as an autotest control file is executed.
 
-	It is not intended to be used directly, rather control files
-	should be run using the run method in Autotest.
-	"""
-	def __init__(self, host, results_dir, tag, parallel_flag):
-		self.host = host
-		self.results_dir = results_dir
-		self.env = host.env
-		self.tag = tag
-		self.parallel_flag = parallel_flag
-		self.autodir = _get_autodir(self.host)
-		if tag:
-			self.manual_control_file = os.path.join(self.autodir,
-							'control.%s' % tag)
-			self.remote_control_file = os.path.join(self.autodir,
-						'control.%s.autoserv' % tag)
-		else:
-			self.manual_control_file = os.path.join(self.autodir,
-								'control')
-			self.remote_control_file = os.path.join(self.autodir,
-							'control.autoserv')
+    It is not intended to be used directly, rather control files
+    should be run using the run method in Autotest.
+    """
+    def __init__(self, host, results_dir, tag, parallel_flag):
+        self.host = host
+        self.results_dir = results_dir
+        self.env = host.env
+        self.tag = tag
+        self.parallel_flag = parallel_flag
+        self.autodir = _get_autodir(self.host)
+        if tag:
+            self.manual_control_file = os.path.join(self.autodir,
+                                            'control.%s' % tag)
+            self.remote_control_file = os.path.join(self.autodir,
+                                    'control.%s.autoserv' % tag)
+        else:
+            self.manual_control_file = os.path.join(self.autodir,
+                                                    'control')
+            self.remote_control_file = os.path.join(self.autodir,
+                                            'control.autoserv')
 
 
-	def verify_machine(self):
-		binary = os.path.join(self.autodir, 'bin/autotest')
-		try:
-			self.host.run('ls %s > /dev/null 2>&1' % binary)
-		except:
-			raise "Autotest does not appear to be installed"
+    def verify_machine(self):
+        binary = os.path.join(self.autodir, 'bin/autotest')
+        try:
+            self.host.run('ls %s > /dev/null 2>&1' % binary)
+        except:
+            raise "Autotest does not appear to be installed"
 
-		if not self.parallel_flag:
-			tmpdir = os.path.join(self.autodir, 'tmp')
-			download = os.path.join(self.autodir, 'tests/download')
-			self.host.run('umount %s' % tmpdir, ignore_status=True)
-			self.host.run('umount %s' % download, ignore_status=True)
+        if not self.parallel_flag:
+            tmpdir = os.path.join(self.autodir, 'tmp')
+            download = os.path.join(self.autodir, 'tests/download')
+            self.host.run('umount %s' % tmpdir, ignore_status=True)
+            self.host.run('umount %s' % download, ignore_status=True)
 
-	def get_full_cmd(self, section):
-		# build up the full command we want to run over the host
-		cmd = [os.path.join(self.autodir, 'bin/autotest_client')]
-		if section > 0:
-			cmd.append('-c')
-		if self.tag:
-			cmd.append('-t %s' % self.tag)
-		if self.host.job.use_external_logging():
-			cmd.append('-l')
-		cmd.append(self.remote_control_file)
-		return ' '.join(cmd)
+    def get_full_cmd(self, section):
+        # build up the full command we want to run over the host
+        cmd = [os.path.join(self.autodir, 'bin/autotest_client')]
+        if section > 0:
+            cmd.append('-c')
+        if self.tag:
+            cmd.append('-t %s' % self.tag)
+        if self.host.job.use_external_logging():
+            cmd.append('-l')
+        cmd.append(self.remote_control_file)
+        return ' '.join(cmd)
 
 
-	def get_client_log(self, section):
-		# open up the files we need for our logging
-		client_log_file = os.path.join(self.results_dir, 'debug',
-					       'client.log.%d' % section)
-		return open(client_log_file, 'w', 0)
+    def get_client_log(self, section):
+        # open up the files we need for our logging
+        client_log_file = os.path.join(self.results_dir, 'debug',
+                                       'client.log.%d' % section)
+        return open(client_log_file, 'w', 0)
 
 
-	def execute_section(self, section, timeout):
-		print "Executing %s/bin/autotest %s/control phase %d" % \
-					(self.autodir, self.autodir,
-					 section)
+    def execute_section(self, section, timeout):
+        print "Executing %s/bin/autotest %s/control phase %d" % \
+                                (self.autodir, self.autodir,
+                                 section)
 
-		full_cmd = self.get_full_cmd(section)
-		client_log = self.get_client_log(section)
-		redirector = server_job.client_logger(self.host.job)
+        full_cmd = self.get_full_cmd(section)
+        client_log = self.get_client_log(section)
+        redirector = server_job.client_logger(self.host.job)
 
-		try:
-			old_resultdir = self.host.job.resultdir
-			self.host.job.resultdir = self.results_dir
-			result = self.host.run(full_cmd, ignore_status=True,
-					       timeout=timeout,
-					       stdout_tee=client_log,
-					       stderr_tee=redirector)
-		finally:
-			redirector.close()
-			self.host.job.resultdir = old_resultdir
+        try:
+            old_resultdir = self.host.job.resultdir
+            self.host.job.resultdir = self.results_dir
+            result = self.host.run(full_cmd, ignore_status=True,
+                                   timeout=timeout,
+                                   stdout_tee=client_log,
+                                   stderr_tee=redirector)
+        finally:
+            redirector.close()
+            self.host.job.resultdir = old_resultdir
 
-		if result.exit_status == 1:
-			self.host.job.aborted = True
-		if not result.stderr:
-  			raise error.AutotestRunError(
-			    "execute_section: %s failed to return anything\n"
-			    "stdout:%s\n" % (full_cmd, result.stdout))
+        if result.exit_status == 1:
+            self.host.job.aborted = True
+        if not result.stderr:
+            raise error.AutotestRunError(
+                "execute_section: %s failed to return anything\n"
+                "stdout:%s\n" % (full_cmd, result.stdout))
 
-		return redirector.last_line
+        return redirector.last_line
 
 
-	def execute_control(self, timeout=None):
-		section = 0
-		time_left = None
-		if timeout:
-			end_time = time.time() + timeout
-			time_left = end_time - time.time()
-		while not timeout or time_left > 0:
-			last = self.execute_section(section, time_left)
-			if timeout:
-				time_left = end_time - time.time()
-				if time_left <= 0:
-					break
-			section += 1
-			if re.match(r'^END .*\t----\t----\t.*$', last):
-				print "Client complete"
-				return
-			elif re.match('^\t*GOOD\t----\treboot\.start.*$', last):
-				print "Client is rebooting"
-				print "Waiting for client to halt"
-				if not self.host.wait_down(HALT_TIME):
-					raise error.AutotestRunError("%s \
-					failed to shutdown after %ds" %
-							(self.host.hostname,
-							HALT_TIME))
-				print "Client down, waiting for restart"
-				if not self.host.wait_up(BOOT_TIME):
-					# since reboot failed
-					# hardreset the machine once if possible
-					# before failing this control file
-					print "Hardresetting %s" % (
-					    self.host.hostname,)
-					try:
-						self.host.hardreset(wait=False)
-					except error.AutoservUnsupportedError:
-						print "Hardreset unsupported on %s" % (
-						    self.host.hostname,)
-					raise error.AutotestRunError("%s failed"
-						" to boot after %ds" % (
-						self.host.hostname,
-						BOOT_TIME,))
-				self.host.reboot_followup()
-				continue
-			self.host.job.record("ABORT", None, None,
-					     "Autotest client terminated " +
-					     "unexpectedly")
-			# give the client machine a chance to recover from
-			# possible crash
-			self.host.wait_up(CRASH_RECOVERY_TIME)
-			raise error.AutotestRunError("Aborting - unexpected "
-						     "final status message "
-						     "from client: %s\n"
-						     % last)
+    def execute_control(self, timeout=None):
+        section = 0
+        time_left = None
+        if timeout:
+            end_time = time.time() + timeout
+            time_left = end_time - time.time()
+        while not timeout or time_left > 0:
+            last = self.execute_section(section, time_left)
+            if timeout:
+                time_left = end_time - time.time()
+                if time_left <= 0:
+                    break
+            section += 1
+            if re.match(r'^END .*\t----\t----\t.*$', last):
+                print "Client complete"
+                return
+            elif re.match('^\t*GOOD\t----\treboot\.start.*$', last):
+                print "Client is rebooting"
+                print "Waiting for client to halt"
+                if not self.host.wait_down(HALT_TIME):
+                    raise error.AutotestRunError("%s \
+                    failed to shutdown after %ds" %
+                                    (self.host.hostname,
+                                    HALT_TIME))
+                print "Client down, waiting for restart"
+                if not self.host.wait_up(BOOT_TIME):
+                    # since reboot failed
+                    # hardreset the machine once if possible
+                    # before failing this control file
+                    print "Hardresetting %s" % (
+                        self.host.hostname,)
+                    try:
+                        self.host.hardreset(wait=False)
+                    except error.AutoservUnsupportedError:
+                        print "Hardreset unsupported on %s" % (
+                            self.host.hostname,)
+                    raise error.AutotestRunError("%s failed"
+                            " to boot after %ds" % (
+                            self.host.hostname,
+                            BOOT_TIME,))
+                self.host.reboot_followup()
+                continue
+            self.host.job.record("ABORT", None, None,
+                                 "Autotest client terminated " +
+                                 "unexpectedly")
+            # give the client machine a chance to recover from
+            # possible crash
+            self.host.wait_up(CRASH_RECOVERY_TIME)
+            raise error.AutotestRunError("Aborting - unexpected "
+                                         "final status message "
+                                         "from client: %s\n"
+                                         % last)
 
-		# should only get here if we timed out
-		assert timeout
-		raise error.AutotestTimeoutError()
+        # should only get here if we timed out
+        assert timeout
+        raise error.AutotestTimeoutError()
 
 
 def _get_autodir(host):
-	dir = host.get_autodir()
-	if dir:
-		return dir
-	try:
-		# There's no clean way to do this. readlink may not exist
-		cmd = "python -c 'import os,sys; print os.readlink(sys.argv[1])' /etc/autotest.conf 2> /dev/null"
-		dir = os.path.dirname(host.run(cmd).stdout)
-		if dir:
-			return dir
-	except error.AutoservRunError:
-		pass
-	for path in ['/usr/local/autotest', '/home/autotest']:
-		try:
-			host.run('ls %s > /dev/null 2>&1' % \
-					 os.path.join(path, 'bin/autotest'))
-			return path
-		except error.AutoservRunError:
-			pass
-	raise error.AutotestRunError("Cannot figure out autotest directory")
+    dir = host.get_autodir()
+    if dir:
+        return dir
+    try:
+        # There's no clean way to do this. readlink may not exist
+        cmd = "python -c 'import os,sys; print os.readlink(sys.argv[1])' /etc/autotest.conf 2> /dev/null"
+        dir = os.path.dirname(host.run(cmd).stdout)
+        if dir:
+            return dir
+    except error.AutoservRunError:
+        pass
+    for path in ['/usr/local/autotest', '/home/autotest']:
+        try:
+            host.run('ls %s > /dev/null 2>&1' % \
+                             os.path.join(path, 'bin/autotest'))
+            return path
+        except error.AutoservRunError:
+            pass
+    raise error.AutotestRunError("Cannot figure out autotest directory")
 
 
 # site_autotest.py may be non-existant or empty, make sure that an appropriate
 # SiteAutotest class is created nevertheless
 try:
-	from site_autotest import SiteAutotest
+    from site_autotest import SiteAutotest
 except ImportError:
-	class SiteAutotest(BaseAutotest):
-		pass
+    class SiteAutotest(BaseAutotest):
+        pass
 
 
 class Autotest(SiteAutotest):
-	pass
+    pass
diff --git a/server/autotest_unittest.py b/server/autotest_unittest.py
index f14ea21..3df353c 100644
--- a/server/autotest_unittest.py
+++ b/server/autotest_unittest.py
@@ -11,287 +11,287 @@
 
 
 class TestBaseAutotest(unittest.TestCase):
-	def setUp(self):
-		# create god
-		self.god = mock.mock_god()
+    def setUp(self):
+        # create god
+        self.god = mock.mock_god()
 
-		# stub out utils
-		self.utils_obj = self.god.create_mock_class(utils, "utils")
-		self.old_utils = autotest.utils
-		autotest.utils = self.utils_obj
+        # stub out utils
+        self.utils_obj = self.god.create_mock_class(utils, "utils")
+        self.old_utils = autotest.utils
+        autotest.utils = self.utils_obj
 
-		# stub out os
-		self.old_os = autotest.os
-		self.os_obj = self.god.create_mock_class(os, "os")
-		autotest.os = self.os_obj
+        # stub out os
+        self.old_os = autotest.os
+        self.os_obj = self.god.create_mock_class(os, "os")
+        autotest.os = self.os_obj
 
-		# stub out os.path
-		self.path_obj = self.god.create_mock_class(os.path, "os.path")
-		autotest.os.path = self.path_obj
+        # stub out os.path
+        self.path_obj = self.god.create_mock_class(os.path, "os.path")
+        autotest.os.path = self.path_obj
 
-		# need to set return of one function in utils called in constr.
-		self.server_dir = "autotest_lib.server"
-		func_call = self.utils_obj.get_server_dir.expect_call()
-		func_call.and_return(self.server_dir)
+        # need to set return of one function in utils called in constr.
+        self.server_dir = "autotest_lib.server"
+        func_call = self.utils_obj.get_server_dir.expect_call()
+        func_call.and_return(self.server_dir)
 
-		# create our host mock (and give it a hostname)
-		self.host = self.god.create_mock_class(ssh_host.SSHHost,
-		                                       "SSHHost")
-		self.host.hostname = "foo"
+        # create our host mock (and give it a hostname)
+        self.host = self.god.create_mock_class(ssh_host.SSHHost,
+                                               "SSHHost")
+        self.host.hostname = "foo"
 
-		# create the autotest object
-		self.base_autotest = autotest.BaseAutotest(self.host)
+        # create the autotest object
+        self.base_autotest = autotest.BaseAutotest(self.host)
 
 
-	def tearDown(self):
-		# put things back
-		autotest.utils = self.old_utils
-		autotest.os = self.old_os
+    def tearDown(self):
+        # put things back
+        autotest.utils = self.old_utils
+        autotest.os = self.old_os
 
 
-	def test_constructor(self):
-		# we should check the calls
-		self.god.check_playback()
+    def test_constructor(self):
+        # we should check the calls
+        self.god.check_playback()
 
-	def common_install_test_setup(self, autodir, is_site_install_autotest):
-	        # mock other methods
-		old_get_autodir = autotest._get_autodir
-		get_autodir_obj = self.god.create_mock_function("_get_autodir")
-		autotest._get_autodir = get_autodir_obj
+    def common_install_test_setup(self, autodir, is_site_install_autotest):
+        # mock other methods
+        old_get_autodir = autotest._get_autodir
+        get_autodir_obj = self.god.create_mock_function("_get_autodir")
+        autotest._get_autodir = get_autodir_obj
 
-		self.base_autotest.got = True
-		self.source_material = None
+        self.base_autotest.got = True
+        self.source_material = None
 
-		# record calls
-		self.host.wait_up.expect_call(timeout=30)
-		self.host.setup.expect_call()
-		get_autodir_obj.expect_call(self.host).and_return(autodir)
-		rt = self.utils_obj.sh_escape.expect_call(autodir)
-		rt.and_return(autodir)
-		self.host.run.expect_call('mkdir -p "%s"' % (autodir))
-		rt = self.host.site_install_autotest.expect_call()
-		rt.and_return(is_site_install_autotest)
+        # record calls
+        self.host.wait_up.expect_call(timeout=30)
+        self.host.setup.expect_call()
+        get_autodir_obj.expect_call(self.host).and_return(autodir)
+        rt = self.utils_obj.sh_escape.expect_call(autodir)
+        rt.and_return(autodir)
+        self.host.run.expect_call('mkdir -p "%s"' % (autodir))
+        rt = self.host.site_install_autotest.expect_call()
+        rt.and_return(is_site_install_autotest)
 
-		return old_get_autodir
+        return old_get_autodir
 
 
-	def common_install_test_teardown(self, old_get_autodir):
-		# put things back
-		autotest._get_autodir = old_get_autodir
+    def common_install_test_teardown(self, old_get_autodir):
+        # put things back
+        autotest._get_autodir = old_get_autodir
 
 
-	def test_install1(self):
-		# setup
-		autodir = "autodir"
-		old_get_autodir = self.common_install_test_setup(autodir, True)
+    def test_install1(self):
+        # setup
+        autodir = "autodir"
+        old_get_autodir = self.common_install_test_setup(autodir, True)
 
-		# run test
-		self.base_autotest.install()
+        # run test
+        self.base_autotest.install()
 
-		# check
-		self.assertTrue(self.base_autotest.installed)
-		self.god.check_playback()
+        # check
+        self.assertTrue(self.base_autotest.installed)
+        self.god.check_playback()
 
-		# put back
-		self.common_install_test_teardown(old_get_autodir)
+        # put back
+        self.common_install_test_teardown(old_get_autodir)
 
 
-	def test_install2(self):
-		# setup
-		autodir = "autodir"
-		old_get_autodir = self.common_install_test_setup(autodir, False)
-		cmd = 'which svn'
-		cmdresult = client_utils.CmdResult(cmd)
-		self.utils_obj.run.expect_call(cmd).and_return(cmdresult)
-		cmd = 'svn checkout %s %s' % (autotest.AUTOTEST_SVN, autodir)
-		self.host.run.expect_call(cmd)
+    def test_install2(self):
+        # setup
+        autodir = "autodir"
+        old_get_autodir = self.common_install_test_setup(autodir, False)
+        cmd = 'which svn'
+        cmdresult = client_utils.CmdResult(cmd)
+        self.utils_obj.run.expect_call(cmd).and_return(cmdresult)
+        cmd = 'svn checkout %s %s' % (autotest.AUTOTEST_SVN, autodir)
+        self.host.run.expect_call(cmd)
 
-		# run test
-		self.base_autotest.install()
+        # run test
+        self.base_autotest.install()
 
-		# check
-		self.assertTrue(self.base_autotest.installed)
-		self.god.check_playback()
+        # check
+        self.assertTrue(self.base_autotest.installed)
+        self.god.check_playback()
 
-		# put back
-		self.common_install_test_teardown(old_get_autodir)
+        # put back
+        self.common_install_test_teardown(old_get_autodir)
 
 
-	def test_get(self):
-		# setup
-		location = "autotest_lib.client"
-		cwd = "current_dir"
-		self.os_obj.getcwd.expect_call().and_return(cwd)
-		self.os_obj.chdir.expect_call(location)
-		self.os_obj.system.expect_call('tools/make_clean')
-		self.os_obj.chdir.expect_call(cwd)
+    def test_get(self):
+        # setup
+        location = "autotest_lib.client"
+        cwd = "current_dir"
+        self.os_obj.getcwd.expect_call().and_return(cwd)
+        self.os_obj.chdir.expect_call(location)
+        self.os_obj.system.expect_call('tools/make_clean')
+        self.os_obj.chdir.expect_call(cwd)
 
-		# call method under test
-		self.base_autotest.get(location)
+        # call method under test
+        self.base_autotest.get(location)
 
-		# do tests
-		self.assertTrue(self.base_autotest.got)
-		self.god.check_playback()
+        # do tests
+        self.assertTrue(self.base_autotest.got)
+        self.god.check_playback()
 
 
-	def test_get_default(self):
-		# setup the test
-		location = "autotest_lib.client"
-		self.path_obj.join.expect_call(self.base_autotest.serverdir,
-		                               '../client').and_return(location)
-		self.path_obj.abspath.expect_call(location).and_return(location)
-		cwd = "current_dir"
-		self.os_obj.getcwd.expect_call().and_return(cwd)
-		self.os_obj.chdir.expect_call(location)
-		self.os_obj.system.expect_call('tools/make_clean')
-		self.os_obj.chdir.expect_call(cwd)
+    def test_get_default(self):
+        # setup the test
+        location = "autotest_lib.client"
+        self.path_obj.join.expect_call(self.base_autotest.serverdir,
+                                       '../client').and_return(location)
+        self.path_obj.abspath.expect_call(location).and_return(location)
+        cwd = "current_dir"
+        self.os_obj.getcwd.expect_call().and_return(cwd)
+        self.os_obj.chdir.expect_call(location)
+        self.os_obj.system.expect_call('tools/make_clean')
+        self.os_obj.chdir.expect_call(cwd)
 
-		# call method under test
-		self.base_autotest.get()
+        # call method under test
+        self.base_autotest.get()
 
-		# do tests
-		self.assertTrue(self.base_autotest.got)
-		self.god.check_playback()
+        # do tests
+        self.assertTrue(self.base_autotest.got)
+        self.god.check_playback()
 
 
-	def test_run_default(self):
-		# need to stub out _get_host_and_setup
-		old_func = self.base_autotest._get_host_and_setup
-		name = "_get_host_and_setup"
-		new_func = self.god.create_mock_function(name)
-		self.base_autotest._get_host_and_setup = new_func
+    def test_run_default(self):
+        # need to stub out _get_host_and_setup
+        old_func = self.base_autotest._get_host_and_setup
+        name = "_get_host_and_setup"
+        new_func = self.god.create_mock_function(name)
+        self.base_autotest._get_host_and_setup = new_func
 
-		# need to stub out _do_run
-		old_do_run = self.base_autotest._do_run
-		do_run = self.god.create_mock_function("_do_run")
-		self.base_autotest._do_run = do_run
+        # need to stub out _do_run
+        old_do_run = self.base_autotest._do_run
+        do_run = self.god.create_mock_function("_do_run")
+        self.base_autotest._do_run = do_run
 
-		# need a mock of _Run object
-		run = self.god.create_mock_class(autotest._Run, "run")
+        # need a mock of _Run object
+        run = self.god.create_mock_class(autotest._Run, "run")
 
-		# need a mock for _Run constuctor
-		oldRun = autotest._Run
-		newRun = self.god.create_mock_function("_Run")
-		autotest._Run = newRun
+        # need a mock for _Run constuctor
+        oldRun = autotest._Run
+        newRun = self.god.create_mock_function("_Run")
+        autotest._Run = newRun
 
-		new_func.expect_call(None).and_return(self.host)
-		results_dir = "results_dir"
-		self.path_obj.abspath.expect_call(".").and_return(results_dir)
-		newRun.expect_call(self.host,
-		                   results_dir, None, False).and_return(run)
-		do_run.expect_call("control", results_dir, self.host, run, None)
+        new_func.expect_call(None).and_return(self.host)
+        results_dir = "results_dir"
+        self.path_obj.abspath.expect_call(".").and_return(results_dir)
+        newRun.expect_call(self.host,
+                           results_dir, None, False).and_return(run)
+        do_run.expect_call("control", results_dir, self.host, run, None)
 
-		# call method
-		self.base_autotest.run("control")
+        # call method
+        self.base_autotest.run("control")
 
-		# do test
-		self.god.check_playback()
+        # do test
+        self.god.check_playback()
 
-		# put things back
-		self.base_autotest._get_host_and_setup = old_func
-		self.base_autotest._do_run = old_do_run
-		autotest._Run = oldRun
+        # put things back
+        self.base_autotest._get_host_and_setup = old_func
+        self.base_autotest._do_run = old_do_run
+        autotest._Run = oldRun
 
 
-	def test_prepare_for_copying_logs1(self):
-		src = "src"
-		dest = "dest"
-		keyval_path = ''
-		dkeyval = "dest/keyval"
+    def test_prepare_for_copying_logs1(self):
+        src = "src"
+        dest = "dest"
+        keyval_path = ''
+        dkeyval = "dest/keyval"
 
-		# setup
-		self.path_obj.join.expect_call(dest,
-		                               'keyval').and_return(dkeyval)
-		self.path_obj.exists.expect_call(dkeyval).and_return(False)
+        # setup
+        self.path_obj.join.expect_call(dest,
+                                       'keyval').and_return(dkeyval)
+        self.path_obj.exists.expect_call(dkeyval).and_return(False)
 
-		# run test
-		self.base_autotest.prepare_for_copying_logs(src, dest,
-		                                            self.host)
+        # run test
+        self.base_autotest.prepare_for_copying_logs(src, dest,
+                                                    self.host)
 
-		# check
-		self.god.check_playback()
+        # check
+        self.god.check_playback()
 
 
-	def test_prepare_for_copying_logs2(self):
-		src = "src"
-		dest = "dest"
-		keyval_path = ''
-		dkeyval = "dest/keyval"
-		skeyval = "src/keyval"
-		file_path = (0, ".keyavl_host")
+    def test_prepare_for_copying_logs2(self):
+        src = "src"
+        dest = "dest"
+        keyval_path = ''
+        dkeyval = "dest/keyval"
+        skeyval = "src/keyval"
+        file_path = (0, ".keyavl_host")
 
-		# make stub for tempfile.mkstemp
-		old_mkstemp = autotest.tempfile.mkstemp
-		mkstemp_obj = self.god.create_mock_function("tempfile.mkstemp")
-		autotest.tempfile.mkstemp = mkstemp_obj
+        # make stub for tempfile.mkstemp
+        old_mkstemp = autotest.tempfile.mkstemp
+        mkstemp_obj = self.god.create_mock_function("tempfile.mkstemp")
+        autotest.tempfile.mkstemp = mkstemp_obj
 
-		# setup
-		self.path_obj.join.expect_call(dest,
-		                               'keyval').and_return(dkeyval)
-		self.path_obj.exists.expect_call(dkeyval).and_return(True)
-		mkstemp_obj.expect_call('.keyval_%s'
-		                     % self.host.hostname).and_return(file_path)
-		self.path_obj.join.expect_call(src,
-		                               'keyval').and_return(skeyval)
-		self.host.get_file.expect_call(skeyval, file_path[1])
-		self.path_obj.join.expect_call(src,
-		                               'keyval').and_return(skeyval)
-		self.host.run.expect_call('rm -rf %s' % (skeyval))
+        # setup
+        self.path_obj.join.expect_call(dest,
+                                       'keyval').and_return(dkeyval)
+        self.path_obj.exists.expect_call(dkeyval).and_return(True)
+        mkstemp_obj.expect_call('.keyval_%s'
+                             % self.host.hostname).and_return(file_path)
+        self.path_obj.join.expect_call(src,
+                                       'keyval').and_return(skeyval)
+        self.host.get_file.expect_call(skeyval, file_path[1])
+        self.path_obj.join.expect_call(src,
+                                       'keyval').and_return(skeyval)
+        self.host.run.expect_call('rm -rf %s' % (skeyval))
 
-		# run test
-		self.base_autotest.prepare_for_copying_logs(src, dest,
-		                                            self.host)
+        # run test
+        self.base_autotest.prepare_for_copying_logs(src, dest,
+                                                    self.host)
 
-		# check results
-		self.god.check_playback()
+        # check results
+        self.god.check_playback()
 
-		# set things back
-		autotest.tempfile.mkstemp = old_mkstemp
+        # set things back
+        autotest.tempfile.mkstemp = old_mkstemp
 
 
-	def test_process_copied_logs_no_dest_keyval(self):
-		# setup test
-		dest = "dest"
-		path = "keyval_path"
-		self.path_obj.join.expect_call(dest, 'keyval').and_return(path)
-		self.path_obj.exists.expect_call(path).and_return(False)
+    def test_process_copied_logs_no_dest_keyval(self):
+        # setup test
+        dest = "dest"
+        path = "keyval_path"
+        self.path_obj.join.expect_call(dest, 'keyval').and_return(path)
+        self.path_obj.exists.expect_call(path).and_return(False)
 
-		# run test
-		self.base_autotest.process_copied_logs(dest, self.host, path)
+        # run test
+        self.base_autotest.process_copied_logs(dest, self.host, path)
 
-		# run check
-		self.god.check_playback()
+        # run check
+        self.god.check_playback()
 
 
-	def test_process_copied_logs_with_dest_keyval(self):
-		# setup test
-		dest = "dest"
-		kpath = "keyval_path"
-		path = "path"
-		self.path_obj.join.expect_call(dest, 'keyval').and_return(path)
-		self.path_obj.exists.expect_call(path).and_return(True)
+    def test_process_copied_logs_with_dest_keyval(self):
+        # setup test
+        dest = "dest"
+        kpath = "keyval_path"
+        path = "path"
+        self.path_obj.join.expect_call(dest, 'keyval').and_return(path)
+        self.path_obj.exists.expect_call(path).and_return(True)
 
-		vals = {'version': 1, 'author': "wonder woman"}
-		kvals = {'version': 1}
-		mvals = {'author': "wonder woman"}
+        vals = {'version': 1, 'author': "wonder woman"}
+        kvals = {'version': 1}
+        mvals = {'author': "wonder woman"}
 
-		self.utils_obj.read_keyval.expect_call(path).and_return(vals)
-		self.path_obj.join.expect_call(dest, 'keyval').and_return(kpath)
-		self.utils_obj.read_keyval.expect_call(kpath).and_return(kvals)
-		self.path_obj.join.expect_call(dest, 'keyval').and_return(dest)
-		self.utils_obj.write_keyval.expect_call(dest, mvals)
-		self.os_obj.remove.expect_call(path)
+        self.utils_obj.read_keyval.expect_call(path).and_return(vals)
+        self.path_obj.join.expect_call(dest, 'keyval').and_return(kpath)
+        self.utils_obj.read_keyval.expect_call(kpath).and_return(kvals)
+        self.path_obj.join.expect_call(dest, 'keyval').and_return(dest)
+        self.utils_obj.write_keyval.expect_call(dest, mvals)
+        self.os_obj.remove.expect_call(path)
 
-		# call test
-		self.base_autotest.process_copied_logs(dest, self.host, path)
+        # call test
+        self.base_autotest.process_copied_logs(dest, self.host, path)
 
-		# run check
-		self.god.check_playback()
+        # run check
+        self.god.check_playback()
 
 
-	def test_run_timed_test(self):
-		pass
+    def test_run_timed_test(self):
+        pass
 
 
 if __name__ == "__main__":
-	unittest.main()
+    unittest.main()
diff --git a/server/deb_kernel.py b/server/deb_kernel.py
index 26523b9..d651329 100644
--- a/server/deb_kernel.py
+++ b/server/deb_kernel.py
@@ -5,7 +5,7 @@
 """
 This module defines the Kernel class
 
-	Kernel: an os kernel
+        Kernel: an os kernel
 """
 
 __author__ = """
@@ -22,132 +22,132 @@
 
 
 class DEBKernel(kernel.Kernel):
-	"""
-	This class represents a .deb pre-built kernel.
+    """
+    This class represents a .deb pre-built kernel.
 
-	It is used to obtain a built kernel and install it on a Host.
+    It is used to obtain a built kernel and install it on a Host.
 
-	Implementation details:
-	This is a leaf class in an abstract class hierarchy, it must
-	implement the unimplemented methods in parent classes.
-	"""
-	def __init__(self):
-		super(DEBKernel, self).__init__()
+    Implementation details:
+    This is a leaf class in an abstract class hierarchy, it must
+    implement the unimplemented methods in parent classes.
+    """
+    def __init__(self):
+        super(DEBKernel, self).__init__()
 
 
-	def install(self, host, **kwargs):
-		"""
-		Install a kernel on the remote host.
-		
-		This will also invoke the guest's bootloader to set this
-		kernel as the default kernel.
-		
-		Args:
-			host: the host on which to install the kernel
-			[kwargs]: remaining keyword arguments will be passed 
-				to Bootloader.add_kernel()
-		
-		Raises:
-			AutoservError: no package has yet been obtained. Call
-				DEBKernel.get() with a .deb package.
-		"""
-		if self.source_material is None:
-			raise error.AutoservError("A kernel must first be "
-						  "specified via get()")
-		
-		remote_tmpdir = host.get_tmp_dir()
-		basename = os.path.basename(self.source_material)
-		remote_filename = os.path.join(remote_tmpdir, basename)
-		host.send_file(self.source_material, remote_filename)
-		host.run('dpkg -i "%s"' % (utils.sh_escape(remote_filename),))
-		host.run('mkinitramfs -o "%s" "%s"' % (
-			utils.sh_escape(self.get_initrd_name()), 
-			utils.sh_escape(self.get_version()),))
-		
-		host.bootloader.add_kernel(self.get_image_name(), 
-			initrd=self.get_initrd_name(), **kwargs)
+    def install(self, host, **kwargs):
+        """
+        Install a kernel on the remote host.
+
+        This will also invoke the guest's bootloader to set this
+        kernel as the default kernel.
+
+        Args:
+                host: the host on which to install the kernel
+                [kwargs]: remaining keyword arguments will be passed
+                        to Bootloader.add_kernel()
+
+        Raises:
+                AutoservError: no package has yet been obtained. Call
+                        DEBKernel.get() with a .deb package.
+        """
+        if self.source_material is None:
+            raise error.AutoservError("A kernel must first be "
+                                      "specified via get()")
+
+        remote_tmpdir = host.get_tmp_dir()
+        basename = os.path.basename(self.source_material)
+        remote_filename = os.path.join(remote_tmpdir, basename)
+        host.send_file(self.source_material, remote_filename)
+        host.run('dpkg -i "%s"' % (utils.sh_escape(remote_filename),))
+        host.run('mkinitramfs -o "%s" "%s"' % (
+                utils.sh_escape(self.get_initrd_name()),
+                utils.sh_escape(self.get_version()),))
+
+        host.bootloader.add_kernel(self.get_image_name(),
+                initrd=self.get_initrd_name(), **kwargs)
 
 
-	def get_version(self):
-		"""Get the version of the kernel to be installed.
-		
-		Returns:
-			The version string, as would be returned 
-			by 'make kernelrelease'.
-		
-		Raises:
-			AutoservError: no package has yet been obtained. Call
-				DEBKernel.get() with a .deb package.
-		"""
-		if self.source_material is None:
-			raise error.AutoservError("A kernel must first be "
-			                          "specified via get()")
-		
-		retval= utils.run('dpkg-deb -f "%s" version' % 
-			utils.sh_escape(self.source_material),)
-		return retval.stdout.strip()
+    def get_version(self):
+        """Get the version of the kernel to be installed.
+
+        Returns:
+                The version string, as would be returned
+                by 'make kernelrelease'.
+
+        Raises:
+                AutoservError: no package has yet been obtained. Call
+                        DEBKernel.get() with a .deb package.
+        """
+        if self.source_material is None:
+            raise error.AutoservError("A kernel must first be "
+                                      "specified via get()")
+
+        retval= utils.run('dpkg-deb -f "%s" version' %
+                utils.sh_escape(self.source_material),)
+        return retval.stdout.strip()
 
 
-	def get_image_name(self):
-		"""Get the name of the kernel image to be installed.
-		
-		Returns:
-			The full path to the kernel image file as it will be 
-			installed on the host.
-		
-		Raises:
-			AutoservError: no package has yet been obtained. Call
-				DEBKernel.get() with a .deb package.
-		"""
-		return "/boot/vmlinuz-%s" % (self.get_version(),)
+    def get_image_name(self):
+        """Get the name of the kernel image to be installed.
+
+        Returns:
+                The full path to the kernel image file as it will be
+                installed on the host.
+
+        Raises:
+                AutoservError: no package has yet been obtained. Call
+                        DEBKernel.get() with a .deb package.
+        """
+        return "/boot/vmlinuz-%s" % (self.get_version(),)
 
 
-	def get_initrd_name(self):
-		"""Get the name of the initrd file to be installed.
-		
-		Returns:
-			The full path to the initrd file as it will be 
-			installed on the host. If the package includes no 
-			initrd file, None is returned
-		
-		Raises:
-			AutoservError: no package has yet been obtained. Call
-				DEBKernel.get() with a .deb package.
-		"""
-		if self.source_material is None:
-			raise error.AutoservError("A kernel must first be "
-			                          "specified via get()")
-		
-		return "/boot/initrd.img-%s" % (self.get_version(),)
-	
-	def extract(self, host):
-		"""Extract the kernel package.
-		
-		This function is only useful to access the content of the 
-		package (for example the kernel image) without 
-		installing it. It is not necessary to run this function to
-		install the kernel.
-		
-		Args:
-			host: the host on which to extract the kernel package.
-		
-		Returns:
-			The full path to the temporary directory on host where 
-			the package was extracted.
-		
-		Raises:
-			AutoservError: no package has yet been obtained. Call
-				DEBKernel.get() with a .deb package.
-		"""
-		if self.source_material is None:
-			raise error.AutoservError("A kernel must first be "
-			                          "specified via get()")
-		
-		remote_tmpdir = host.get_tmp_dir()
-		basename = os.path.basename(self.source_material)
-		remote_filename = os.path.join(remote_tmpdir, basename)
-		host.send_file(self.source_material, remote_filename)
-		content_dir= os.path.join(remote_tmpdir, "contents")
-		host.run('dpkg -x "%s" "%s"' % (utils.sh_escape(remote_filename), utils.sh_escape(content_dir),))
-		
-		return content_dir
+    def get_initrd_name(self):
+        """Get the name of the initrd file to be installed.
+
+        Returns:
+                The full path to the initrd file as it will be
+                installed on the host. If the package includes no
+                initrd file, None is returned
+
+        Raises:
+                AutoservError: no package has yet been obtained. Call
+                        DEBKernel.get() with a .deb package.
+        """
+        if self.source_material is None:
+            raise error.AutoservError("A kernel must first be "
+                                      "specified via get()")
+
+        return "/boot/initrd.img-%s" % (self.get_version(),)
+
+    def extract(self, host):
+        """Extract the kernel package.
+
+        This function is only useful to access the content of the
+        package (for example the kernel image) without
+        installing it. It is not necessary to run this function to
+        install the kernel.
+
+        Args:
+                host: the host on which to extract the kernel package.
+
+        Returns:
+                The full path to the temporary directory on host where
+                the package was extracted.
+
+        Raises:
+                AutoservError: no package has yet been obtained. Call
+                        DEBKernel.get() with a .deb package.
+        """
+        if self.source_material is None:
+            raise error.AutoservError("A kernel must first be "
+                                      "specified via get()")
+
+        remote_tmpdir = host.get_tmp_dir()
+        basename = os.path.basename(self.source_material)
+        remote_filename = os.path.join(remote_tmpdir, basename)
+        host.send_file(self.source_material, remote_filename)
+        content_dir= os.path.join(remote_tmpdir, "contents")
+        host.run('dpkg -x "%s" "%s"' % (utils.sh_escape(remote_filename), utils.sh_escape(content_dir),))
+
+        return content_dir
diff --git a/server/git.py b/server/git.py
index 52812c4..72f5b81 100644
--- a/server/git.py
+++ b/server/git.py
@@ -18,168 +18,168 @@
 
 
 class GitRepo(installable_object.InstallableObject):
-	"""
-	This class represents a git repo.
+    """
+    This class represents a git repo.
 
-	It is used to pull down a local copy of a git repo, check if the local
-	repo is up-to-date, if not update.  It delegates the install to
-	implementation classes.
+    It is used to pull down a local copy of a git repo, check if the local
+    repo is up-to-date, if not update.  It delegates the install to
+    implementation classes.
 
-	"""
+    """
 
-	def __init__(self, repodir, giturl, weburl):
-		super(installable_object.InstallableObject, self).__init__()
-		if repodir == None:
-			e_msg = 'You must provide a directory to hold the git repository'
-			raise ValueError(e_msg)
-		self.repodir = sh_escape(repodir)
-		if giturl == None:
-			raise ValueError('You must provide a git URL to the repository')
-		self.giturl = giturl
-		if weburl == None:
-			raise ValueError('You must provide a http URL to the repository')
-		self.weburl = weburl
+    def __init__(self, repodir, giturl, weburl):
+        super(installable_object.InstallableObject, self).__init__()
+        if repodir == None:
+            e_msg = 'You must provide a directory to hold the git repository'
+            raise ValueError(e_msg)
+        self.repodir = sh_escape(repodir)
+        if giturl == None:
+            raise ValueError('You must provide a git URL to the repository')
+        self.giturl = giturl
+        if weburl == None:
+            raise ValueError('You must provide a http URL to the repository')
+        self.weburl = weburl
 
-		# path to .git dir
-		self.gitpath = utils.sh_escape(os.path.join(self.repodir,'.git'))
+        # path to .git dir
+        self.gitpath = utils.sh_escape(os.path.join(self.repodir,'.git'))
 
-		# base git command , pointing to gitpath git dir
-		self.gitcmdbase = 'git --git-dir=%s' % self.gitpath
+        # base git command , pointing to gitpath git dir
+        self.gitcmdbase = 'git --git-dir=%s' % self.gitpath
 
-		# default to same remote path as local
-		self.__build = os.path.dirname(self.repodir)
+        # default to same remote path as local
+        self.__build = os.path.dirname(self.repodir)
 
 
-	def run(self, command, timeout=None, ignore_status=False):
-		return utils.run(r'%s' % (utils.sh_escape(command)),
-                                  timeout, ignore_status)
+    def run(self, command, timeout=None, ignore_status=False):
+        return utils.run(r'%s' % (utils.sh_escape(command)),
+                          timeout, ignore_status)
 
 
-	# base install method
-	def install(self, host, builddir=None):
-		# allow override of target remote dir
-		if builddir:
-			self.__build = builddir
+    # base install method
+    def install(self, host, builddir=None):
+        # allow override of target remote dir
+        if builddir:
+            self.__build = builddir
 
-		# push source to host for install
-		print 'pushing %s to host:%s' %(self.source_material, self.__build)
-		host.send_file(self.source_material, self.__build)
+        # push source to host for install
+        print 'pushing %s to host:%s' %(self.source_material, self.__build)
+        host.send_file(self.source_material, self.__build)
 
 
-	def gitcmd(self, cmd, ignore_status=False):
-		return self.run('%s %s'%(self.gitcmdbase, cmd),
-						ignore_status=ignore_status)
+    def gitcmd(self, cmd, ignore_status=False):
+        return self.run('%s %s'%(self.gitcmdbase, cmd),
+                                        ignore_status=ignore_status)
 
 
-	def get(self, **kwargs):
-		"""
-		This method overrides baseclass get so we can do proper git 
-		clone/pulls, and check for updated versions.  The result of
-		this method will leave an up-to-date version of git repo at
-		'giturl' in 'repodir' directory to be used by build/install
-		methods.
-		"""
+    def get(self, **kwargs):
+        """
+        This method overrides baseclass get so we can do proper git
+        clone/pulls, and check for updated versions.  The result of
+        this method will leave an up-to-date version of git repo at
+        'giturl' in 'repodir' directory to be used by build/install
+        methods.
+        """
 
-		if not self.is_repo_initialized():
-			# this is your first time ...
-			print 'cloning repo...'
-			cmd = 'clone %s %s ' %(self.giturl, self.repodir)
-			rv = self.gitcmd(cmd, True)
-			if rv.exit_status != 0:
-				print rv.stderr
-				raise error.CmdError('Failed to clone git url', rv)
-			else:
-				print rv.stdout
+        if not self.is_repo_initialized():
+            # this is your first time ...
+            print 'cloning repo...'
+            cmd = 'clone %s %s ' %(self.giturl, self.repodir)
+            rv = self.gitcmd(cmd, True)
+            if rv.exit_status != 0:
+                print rv.stderr
+                raise error.CmdError('Failed to clone git url', rv)
+            else:
+                print rv.stdout
 
-		else:
-			# exiting repo, check if we're up-to-date
-			if self.is_out_of_date():
-				print 'updating repo...'
-				rv = self.gitcmd('pull', True)
-				if rv.exit_status != 0:
-					print rv.stderr
-					e_msg = 'Failed to pull git repo data'
-					raise error.CmdError(e_msg, rv)
-			else:
-				print 'repo up-to-date'
+        else:
+            # exiting repo, check if we're up-to-date
+            if self.is_out_of_date():
+                print 'updating repo...'
+                rv = self.gitcmd('pull', True)
+                if rv.exit_status != 0:
+                    print rv.stderr
+                    e_msg = 'Failed to pull git repo data'
+                    raise error.CmdError(e_msg, rv)
+            else:
+                print 'repo up-to-date'
 
 
-		# remember where the source is
-		self.source_material = self.repodir
+        # remember where the source is
+        self.source_material = self.repodir
 
 
-	def get_local_head(self):
-		cmd = 'log --max-count=1'
-		gitlog = self.gitcmd(cmd).stdout
+    def get_local_head(self):
+        cmd = 'log --max-count=1'
+        gitlog = self.gitcmd(cmd).stdout
 
-		# parsing the commit checksum out of git log 's first entry.
-		# Output looks like:
-		# 	
-		# 	commit 1dccba29b4e5bf99fb98c324f952386dda5b097f
-		# 	Merge: 031b69b... df6af41...
-		# 	Author: Avi Kivity <[email protected]>
-		# 	Date:   Tue Oct 23 10:36:11 2007 +0200
-		#
-		# 	    Merge home:/home/avi/kvm/linux-2.6
-		return str(gitlog.split('\n')[0]).split()[1]
+        # parsing the commit checksum out of git log 's first entry.
+        # Output looks like:
+        #
+        #       commit 1dccba29b4e5bf99fb98c324f952386dda5b097f
+        #       Merge: 031b69b... df6af41...
+        #       Author: Avi Kivity <[email protected]>
+        #       Date:   Tue Oct 23 10:36:11 2007 +0200
+        #
+        #           Merge home:/home/avi/kvm/linux-2.6
+        return str(gitlog.split('\n')[0]).split()[1]
 
 
-	def get_remote_head(self):
-		def __needs_refresh(lines):
-			tag = '<meta http-equiv="refresh" content="0"/>'
-			if len(filter(lambda x: x.startswith(tag), lines)) > 0:
-				return True
+    def get_remote_head(self):
+        def __needs_refresh(lines):
+            tag = '<meta http-equiv="refresh" content="0"/>'
+            if len(filter(lambda x: x.startswith(tag), lines)) > 0:
+                return True
 
-			return False
+            return False
 
 
-		# scan git web interface for revision HEAD's commit tag
-		gitwebaction=';a=commit;h=HEAD'
-		url = self.weburl+gitwebaction
-		max_refresh = 4
-		r = 0
+        # scan git web interface for revision HEAD's commit tag
+        gitwebaction=';a=commit;h=HEAD'
+        url = self.weburl+gitwebaction
+        max_refresh = 4
+        r = 0
 
-		print 'checking %s for changes' %(url)
-		u = utils.urlopen(url)
-		lines = u.read().split('\n')
+        print 'checking %s for changes' %(url)
+        u = utils.urlopen(url)
+        lines = u.read().split('\n')
 
-		while __needs_refresh(lines) and r < max_refresh:
-			print 'refreshing url'
-			r = r+1
-			u = utils.urlopen(url)
-			lines = u.read().split('\n')
+        while __needs_refresh(lines) and r < max_refresh:
+            print 'refreshing url'
+            r = r+1
+            u = utils.urlopen(url)
+            lines = u.read().split('\n')
 
-		if r >= max_refresh:
-			e_msg = 'Failed to get remote repo status, refreshed %s times' % r
-			raise IndexError(e_msg)
+        if r >= max_refresh:
+            e_msg = 'Failed to get remote repo status, refreshed %s times' % r
+            raise IndexError(e_msg)
 
-		# looking for a line like:
-		# <tr><td>commit</td><td # class="sha1">aadea67210c8b9e7a57744a1c2845501d2cdbac7</td></tr>
-		commit_filter = lambda x: x.startswith('<tr><td>commit</td>')
-		commit_line = filter(commit_filter, lines)
+        # looking for a line like:
+        # <tr><td>commit</td><td # class="sha1">aadea67210c8b9e7a57744a1c2845501d2cdbac7</td></tr>
+        commit_filter = lambda x: x.startswith('<tr><td>commit</td>')
+        commit_line = filter(commit_filter, lines)
 
-		# extract the sha1 sum from the commit line
-		return str(commit_line).split('>')[4].split('<')[0]
+        # extract the sha1 sum from the commit line
+        return str(commit_line).split('>')[4].split('<')[0]
 
 
-	def is_out_of_date(self):
-		local_head = self.get_local_head()
-		remote_head = self.get_remote_head()
+    def is_out_of_date(self):
+        local_head = self.get_local_head()
+        remote_head = self.get_remote_head()
 
-		# local is out-of-date, pull
-		if local_head != remote_head:
-			return True
+        # local is out-of-date, pull
+        if local_head != remote_head:
+            return True
 
-		return False
+        return False
 
 
-	def is_repo_initialized(self):
-		# if we fail to get a rv of 0 out of the git log command
-		# then the repo is bogus
+    def is_repo_initialized(self):
+        # if we fail to get a rv of 0 out of the git log command
+        # then the repo is bogus
 
-		cmd = 'log --max-count=1'
-		rv = self.gitcmd(cmd, True)
-		if rv.exit_status == 0:
-			return True
+        cmd = 'log --max-count=1'
+        rv = self.gitcmd(cmd, True)
+        if rv.exit_status == 0:
+            return True
 
-		return False
+        return False
diff --git a/server/git_kernel.py b/server/git_kernel.py
index f993732..1f4273c 100644
--- a/server/git_kernel.py
+++ b/server/git_kernel.py
@@ -17,56 +17,56 @@
 
 
 class GitKernel(git.GitRepo):
-	"""
-	This class represents a git kernel repo.
+    """
+    This class represents a git kernel repo.
 
-	It is used to pull down a local copy of a git repo, check if the local repo
-	is up-to-date, if not update and then build the kernel from the git repo.
+    It is used to pull down a local copy of a git repo, check if the local repo
+    is up-to-date, if not update and then build the kernel from the git repo.
 
-	"""
-	def __init__(self, repodir, giturl, weburl):
-		git.GitRepo.__init__(self, repodir, giturl, weburl)
-		self.__patches = []
-		self.__config = None
-		self.__build = None
+    """
+    def __init__(self, repodir, giturl, weburl):
+        git.GitRepo.__init__(self, repodir, giturl, weburl)
+        self.__patches = []
+        self.__config = None
+        self.__build = None
 
 
-	def configure(self, config):
-		self.__config = config
+    def configure(self, config):
+        self.__config = config
 
 
-	def patch(self, patch):
-		self.__patches.append(patch)
+    def patch(self, patch):
+        self.__patches.append(patch)
 
 
-	def install(self, host, build=True, builddir=None):
-		# use tmpdir if no builddir specified
-		# NB: pass a builddir to install() method if you
-		# need to ensure the build remains after the completion
-		# of a job 
-		if not builddir:
-			self.__build = os.path.join(host.get_tmp_dir(),"build")
-			print 'warning: builddir %s is not persistent' %(self.__build)
+    def install(self, host, build=True, builddir=None):
+        # use tmpdir if no builddir specified
+        # NB: pass a builddir to install() method if you
+        # need to ensure the build remains after the completion
+        # of a job
+        if not builddir:
+            self.__build = os.path.join(host.get_tmp_dir(),"build")
+            print 'warning: builddir %s is not persistent' %(self.__build)
 
-		# push source to host for install
-		print 'pushing %s to host' %(self.source_material)
-		host.send_file(self.source_material, self.__build)
-		remote_source_material= os.path.join(self.__build,
-                                os.path.basename(self.source_material))
+        # push source to host for install
+        print 'pushing %s to host' %(self.source_material)
+        host.send_file(self.source_material, self.__build)
+        remote_source_material= os.path.join(self.__build,
+                        os.path.basename(self.source_material))
 
-		# use a source_kernel to configure, patch, build and install.
-		sk = source_kernel.SourceKernel(remote_source_material)
+        # use a source_kernel to configure, patch, build and install.
+        sk = source_kernel.SourceKernel(remote_source_material)
 
-		if build:
-			# apply patches
-			for p in self.__patches:
-				sk.patch(p)
+        if build:
+            # apply patches
+            for p in self.__patches:
+                sk.patch(p)
 
-			# configure
-			sk.configure(self.__config)
+            # configure
+            sk.configure(self.__config)
 
-			# build
-			sk.build(host)
-			
-		# install
-		sk.install(host)
+            # build
+            sk.build(host)
+
+        # install
+        sk.install(host)
diff --git a/server/hosts/base_classes.py b/server/hosts/base_classes.py
index ddcfcdd..0f637bd 100644
--- a/server/hosts/base_classes.py
+++ b/server/hosts/base_classes.py
@@ -8,8 +8,8 @@
 Implementation details:
 You should import the "hosts" package instead of importing each type of host.
 
-	Host: a machine on which you can run programs
-	RemoteHost: a remote machine on which you can run programs
+        Host: a machine on which you can run programs
+        RemoteHost: a remote machine on which you can run programs
 """
 
 __author__ = """
@@ -26,91 +26,91 @@
 
 
 class Host(object):
-	"""
-	This class represents a machine on which you can run programs.
+    """
+    This class represents a machine on which you can run programs.
 
-	It may be a local machine, the one autoserv is running on, a remote 
-	machine or a virtual machine.
+    It may be a local machine, the one autoserv is running on, a remote
+    machine or a virtual machine.
 
-	Implementation details:
-	This is an abstract class, leaf subclasses must implement the methods
-	listed here. You must not instantiate this class but should 
-	instantiate one of those leaf subclasses.
-	"""
+    Implementation details:
+    This is an abstract class, leaf subclasses must implement the methods
+    listed here. You must not instantiate this class but should
+    instantiate one of those leaf subclasses.
+    """
 
-	bootloader = None
+    bootloader = None
 
-	def __init__(self):
-		super(Host, self).__init__()
-		self.serverdir = utils.get_server_dir()
-		self.bootloader= bootloader.Bootloader(self)
-		self.env = {}
+    def __init__(self):
+        super(Host, self).__init__()
+        self.serverdir = utils.get_server_dir()
+        self.bootloader= bootloader.Bootloader(self)
+        self.env = {}
 
 
-	def run(self, command):
-		pass
+    def run(self, command):
+        pass
 
 
-	def reboot(self):
-		pass
+    def reboot(self):
+        pass
 
-	def reboot_setup(self):
-		pass
+    def reboot_setup(self):
+        pass
 
 
-	def reboot_followup(self):
-		pass
+    def reboot_followup(self):
+        pass
 
 
-	def get_file(self, source, dest):
-		pass
+    def get_file(self, source, dest):
+        pass
 
 
-	def send_file(self, source, dest):
-		pass
+    def send_file(self, source, dest):
+        pass
 
 
-	def get_tmp_dir(self):
-		pass
+    def get_tmp_dir(self):
+        pass
 
 
-	def is_up(self):
-		pass
+    def is_up(self):
+        pass
 
 
-	def get_wait_up_processes(self):
-		"""
-		Gets the list of local processes to wait for in wait_up.
-		"""
-		get_config = global_config.global_config.get_config_value
-		proc_list = get_config("HOSTS", "wait_up_processes",
-				       default="").strip()
-		processes = set(p.strip() for p in proc_list.split(","))
-		processes.discard("")
-		return processes
+    def get_wait_up_processes(self):
+        """
+        Gets the list of local processes to wait for in wait_up.
+        """
+        get_config = global_config.global_config.get_config_value
+        proc_list = get_config("HOSTS", "wait_up_processes",
+                               default="").strip()
+        processes = set(p.strip() for p in proc_list.split(","))
+        processes.discard("")
+        return processes
 
 
-	def wait_up(self, timeout):
-		pass
+    def wait_up(self, timeout):
+        pass
 
 
-	def wait_down(self, timeout):
-		pass
+    def wait_down(self, timeout):
+        pass
 
 
-	def get_num_cpu(self):
-		pass
+    def get_num_cpu(self):
+        pass
 
 
-	def machine_install(self):
-		raise NotImplementedError('Machine install not implemented!')
+    def machine_install(self):
+        raise NotImplementedError('Machine install not implemented!')
 
 
-	def install(self, installableObject):
-		installableObject.install(self)
+    def install(self, installableObject):
+        installableObject.install(self)
 
-	def get_crashdumps(self, test_start_time):
-		pass
+    def get_crashdumps(self, test_start_time):
+        pass
 
-	def get_autodir(self):
-		return None
+    def get_autodir(self):
+        return None
diff --git a/server/hosts/bootloader.py b/server/hosts/bootloader.py
index 7994a8a..4fb8d51 100644
--- a/server/hosts/bootloader.py
+++ b/server/hosts/bootloader.py
@@ -5,7 +5,7 @@
 """
 This module defines the Bootloader class.
 
-	Bootloader: a program to boot Kernels on a Host.
+        Bootloader: a program to boot Kernels on a Host.
 """
 
 __author__ = """
@@ -26,172 +26,172 @@
 
 
 class Bootloader(object):
-	"""
-	This class represents a bootloader.
+    """
+    This class represents a bootloader.
 
-	It can be used to add a kernel to the list of kernels that can be 
-	booted by a bootloader. It can also make sure that this kernel will 
-	be the one chosen at next reboot.
-	"""
+    It can be used to add a kernel to the list of kernels that can be
+    booted by a bootloader. It can also make sure that this kernel will
+    be the one chosen at next reboot.
+    """
 
-	def __init__(self, host, xen_mode=False):
-		super(Bootloader, self).__init__()
-		self._host = weakref.ref(host)
-		self._boottool_path = None
-		self.xen_mode = xen_mode
+    def __init__(self, host, xen_mode=False):
+        super(Bootloader, self).__init__()
+        self._host = weakref.ref(host)
+        self._boottool_path = None
+        self.xen_mode = xen_mode
 
 
-	def get_type(self):
-		return self._run_boottool('--bootloader-probe').stdout.strip()
+    def get_type(self):
+        return self._run_boottool('--bootloader-probe').stdout.strip()
 
 
-	def get_architecture(self):
-		return self._run_boottool('--arch-probe').stdout.strip()
+    def get_architecture(self):
+        return self._run_boottool('--arch-probe').stdout.strip()
 
 
-	def get_titles(self):
-		return self._run_boottool('--info all | grep title | '
-			'cut -d " " -f2-').stdout.strip().split('\n')
+    def get_titles(self):
+        return self._run_boottool('--info all | grep title | '
+                'cut -d " " -f2-').stdout.strip().split('\n')
 
 
-	def get_default(self):
-		return self._run_boottool('--default').stdout.strip()
+    def get_default(self):
+        return self._run_boottool('--default').stdout.strip()
 
 
-	def _get_info(self, info_id):
-		retval = self._run_boottool('--info=%s' % info_id).stdout
+    def _get_info(self, info_id):
+        retval = self._run_boottool('--info=%s' % info_id).stdout
 
-		results = []
-		info = {}
-		for line in retval.splitlines():
-			if not line.strip():
-				if info:
-					results.append(info)
-					info = {}
-			else:
-				key, val = line.split(":", 1)
-				info[key.strip()] = val.strip()
-		if info:
-			results.append(info)
+        results = []
+        info = {}
+        for line in retval.splitlines():
+            if not line.strip():
+                if info:
+                    results.append(info)
+                    info = {}
+            else:
+                key, val = line.split(":", 1)
+                info[key.strip()] = val.strip()
+        if info:
+            results.append(info)
 
-		return results
+        return results
 
 
-	def get_info(self, index):
-		results = self._get_info(index)
-		if results:
-			return results[0]
-		else:
-			return {}
+    def get_info(self, index):
+        results = self._get_info(index)
+        if results:
+            return results[0]
+        else:
+            return {}
 
 
-	def get_all_info(self):
-		return self._get_info('all')
+    def get_all_info(self):
+        return self._get_info('all')
 
 
-	def set_default(self, index):
-		self._run_boottool('--set-default=%s' % index)
+    def set_default(self, index):
+        self._run_boottool('--set-default=%s' % index)
 
 
-	# 'kernel' can be a position number or a title
-	def add_args(self, kernel, args):
-		parameters = '--update-kernel=%s --args="%s"' % (kernel, args)
-		
-		#add parameter if this is a Xen entry
-		if self.xen_mode:
-			parameters += ' --xen'
-		
-		self._run_boottool(parameters)
+    # 'kernel' can be a position number or a title
+    def add_args(self, kernel, args):
+        parameters = '--update-kernel=%s --args="%s"' % (kernel, args)
+
+        #add parameter if this is a Xen entry
+        if self.xen_mode:
+            parameters += ' --xen'
+
+        self._run_boottool(parameters)
 
 
-	def add_xen_hypervisor_args(self, kernel, args):
-		self._run_boottool('--xen --update-xenhyper=%s --xha="%s"' \
-				    % (kernel, args))
+    def add_xen_hypervisor_args(self, kernel, args):
+        self._run_boottool('--xen --update-xenhyper=%s --xha="%s"' \
+                            % (kernel, args))
 
 
-	def remove_args(self, kernel, args):
-		params = '--update-kernel=%s --remove-args="%s"' % (kernel, args)
-		
-		#add parameter if this is a Xen entry
-		if self.xen_mode:
-			params += ' --xen'
-		
-		self._run_boottool(params)
+    def remove_args(self, kernel, args):
+        params = '--update-kernel=%s --remove-args="%s"' % (kernel, args)
+
+        #add parameter if this is a Xen entry
+        if self.xen_mode:
+            params += ' --xen'
+
+        self._run_boottool(params)
 
 
-	def remove_xen_hypervisor_args(self, kernel, args):
-		self._run_boottool('--xen --update-xenhyper=%s '
-			'--remove-args="%s"') % (kernel, args)
+    def remove_xen_hypervisor_args(self, kernel, args):
+        self._run_boottool('--xen --update-xenhyper=%s '
+                '--remove-args="%s"') % (kernel, args)
 
 
-	def add_kernel(self, path, title='autoserv', root=None, args=None, 
-		initrd=None, xen_hypervisor=None, default=True):
-		"""
-		If an entry with the same title is already present, it will be 
-		replaced.
-		"""
-		if title in self.get_titles():
-			self._run_boottool('--remove-kernel "%s"' % (
-				utils.sh_escape(title),))
-		
-		parameters = '--add-kernel "%s" --title "%s"' % (
-			utils.sh_escape(path), utils.sh_escape(title),)
-		
-		if root:
-			parameters += ' --root "%s"' % (utils.sh_escape(root),)
-		
-		if args:
-			parameters += ' --args "%s"' % (utils.sh_escape(args),)
-		
-		# add an initrd now or forever hold your peace
-		if initrd:
-			parameters += ' --initrd "%s"' % (
-				utils.sh_escape(initrd),)
-		
-		if default:
-			parameters += ' --make-default'
-		
-		# add parameter if this is a Xen entry
-		if self.xen_mode:
-			parameters += ' --xen'
-			if xen_hypervisor:
-				parameters += ' --xenhyper "%s"' % (
-					utils.sh_escape(xen_hypervisor),)
-		
-		self._run_boottool(parameters)
+    def add_kernel(self, path, title='autoserv', root=None, args=None,
+            initrd=None, xen_hypervisor=None, default=True):
+        """
+        If an entry with the same title is already present, it will be
+        replaced.
+        """
+        if title in self.get_titles():
+            self._run_boottool('--remove-kernel "%s"' % (
+                    utils.sh_escape(title),))
+
+        parameters = '--add-kernel "%s" --title "%s"' % (
+                utils.sh_escape(path), utils.sh_escape(title),)
+
+        if root:
+            parameters += ' --root "%s"' % (utils.sh_escape(root),)
+
+        if args:
+            parameters += ' --args "%s"' % (utils.sh_escape(args),)
+
+        # add an initrd now or forever hold your peace
+        if initrd:
+            parameters += ' --initrd "%s"' % (
+                    utils.sh_escape(initrd),)
+
+        if default:
+            parameters += ' --make-default'
+
+        # add parameter if this is a Xen entry
+        if self.xen_mode:
+            parameters += ' --xen'
+            if xen_hypervisor:
+                parameters += ' --xenhyper "%s"' % (
+                        utils.sh_escape(xen_hypervisor),)
+
+        self._run_boottool(parameters)
 
 
-	def remove_kernel(self, kernel):
-		self._run_boottool('--remove-kernel=%s' % kernel)
+    def remove_kernel(self, kernel):
+        self._run_boottool('--remove-kernel=%s' % kernel)
 
 
-	def boot_once(self, title):
-		self._run_boottool('--boot-once --title=%s' % title)
+    def boot_once(self, title):
+        self._run_boottool('--boot-once --title=%s' % title)
 
 
-	def install_boottool(self):
-		if self._host() is None:
-			raise error.AutoservError(
-			    "Host does not exist anymore")
-		tmpdir = self._host().get_tmp_dir()
-		self._host().send_file(os.path.abspath(os.path.join(
-			utils.get_server_dir(), BOOTTOOL_SRC)), tmpdir)
-		self._boottool_path= os.path.join(tmpdir, 
-			os.path.basename(BOOTTOOL_SRC))
+    def install_boottool(self):
+        if self._host() is None:
+            raise error.AutoservError(
+                "Host does not exist anymore")
+        tmpdir = self._host().get_tmp_dir()
+        self._host().send_file(os.path.abspath(os.path.join(
+                utils.get_server_dir(), BOOTTOOL_SRC)), tmpdir)
+        self._boottool_path= os.path.join(tmpdir,
+                os.path.basename(BOOTTOOL_SRC))
 
 
-	def _get_boottool_path(self):
-		if not self._boottool_path:
-			self.install_boottool()
-		return self._boottool_path
+    def _get_boottool_path(self):
+        if not self._boottool_path:
+            self.install_boottool()
+        return self._boottool_path
 
 
-	def _set_boottool_path(self, path):
-		self._boottool_path = path
-
-	
-	boottool_path = property(_get_boottool_path, _set_boottool_path)
+    def _set_boottool_path(self, path):
+        self._boottool_path = path
 
 
-	def _run_boottool(self, cmd):
-		return self._host().run(self.boottool_path + ' ' + cmd)
+    boottool_path = property(_get_boottool_path, _set_boottool_path)
+
+
+    def _run_boottool(self, cmd):
+        return self._host().run(self.boottool_path + ' ' + cmd)
diff --git a/server/hosts/bootloader_unittest.py b/server/hosts/bootloader_unittest.py
index 9161a3e..a6f6756 100644
--- a/server/hosts/bootloader_unittest.py
+++ b/server/hosts/bootloader_unittest.py
@@ -10,347 +10,347 @@
 
 
 class test_bootloader_install(unittest.TestCase):
-	def setUp(self):
-		self.god = mock.mock_god()
+    def setUp(self):
+        self.god = mock.mock_god()
 
-		# mock out get_server_dir
-		self.god.stub_function(utils, "get_server_dir")
+        # mock out get_server_dir
+        self.god.stub_function(utils, "get_server_dir")
 
 
-	def tearDown(self):
-		self.god.unstub_all()
+    def tearDown(self):
+        self.god.unstub_all()
 
 
-	def create_mock_sshhost(self):
-		# useful for building disposable SSHHost mocks
-		return self.god.create_mock_class(ssh_host.SSHHost, "SSHHost")
+    def create_mock_sshhost(self):
+        # useful for building disposable SSHHost mocks
+        return self.god.create_mock_class(ssh_host.SSHHost, "SSHHost")
 
 
-	def create_install_boottool_mock(self, loader, dst_dir):
-		mock_install_boottool = \
-			self.god.create_mock_function("install_boottool")
-		def install_boottool():
-			loader._boottool_path = dst_dir
-			mock_install_boottool()
-		loader.install_boottool = install_boottool
-		return mock_install_boottool
+    def create_install_boottool_mock(self, loader, dst_dir):
+        mock_install_boottool = \
+                self.god.create_mock_function("install_boottool")
+        def install_boottool():
+            loader._boottool_path = dst_dir
+            mock_install_boottool()
+        loader.install_boottool = install_boottool
+        return mock_install_boottool
 
 
-	def test_install_fails_without_host(self):
-		host = self.create_mock_sshhost()
-		loader = bootloader.Bootloader(host)
-		del host
-		self.assertRaises(error.AutoservError, loader.install_boottool)
+    def test_install_fails_without_host(self):
+        host = self.create_mock_sshhost()
+        loader = bootloader.Bootloader(host)
+        del host
+        self.assertRaises(error.AutoservError, loader.install_boottool)
 
 
-	def test_installs_to_tmpdir(self):
-		TMPDIR = "/unittest/tmp"
-		SERVERDIR = "/unittest/server"
-		BOOTTOOL_SRC = os.path.join(SERVERDIR, bootloader.BOOTTOOL_SRC)
-		BOOTTOOL_SRC = os.path.abspath(BOOTTOOL_SRC)
-		BOOTTOOL_DST = os.path.join(TMPDIR, "boottool")
-		# set up the recording
-		host = self.create_mock_sshhost()
-		host.get_tmp_dir.expect_call().and_return(TMPDIR)
-		utils.get_server_dir.expect_call().and_return(SERVERDIR)
-		host.send_file.expect_call(BOOTTOOL_SRC, TMPDIR)
-		# run the test
-		loader = bootloader.Bootloader(host)
-		loader.install_boottool()
-		# assert the playback is correct
-		self.god.check_playback()
-		# assert the final dest is correct
-		self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
+    def test_installs_to_tmpdir(self):
+        TMPDIR = "/unittest/tmp"
+        SERVERDIR = "/unittest/server"
+        BOOTTOOL_SRC = os.path.join(SERVERDIR, bootloader.BOOTTOOL_SRC)
+        BOOTTOOL_SRC = os.path.abspath(BOOTTOOL_SRC)
+        BOOTTOOL_DST = os.path.join(TMPDIR, "boottool")
+        # set up the recording
+        host = self.create_mock_sshhost()
+        host.get_tmp_dir.expect_call().and_return(TMPDIR)
+        utils.get_server_dir.expect_call().and_return(SERVERDIR)
+        host.send_file.expect_call(BOOTTOOL_SRC, TMPDIR)
+        # run the test
+        loader = bootloader.Bootloader(host)
+        loader.install_boottool()
+        # assert the playback is correct
+        self.god.check_playback()
+        # assert the final dest is correct
+        self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
 
 
-	def test_get_path_automatically_installs(self):
-		BOOTTOOL_DST = "/unittest/tmp/boottool"
-		host = self.create_mock_sshhost()
-		loader = bootloader.Bootloader(host)
-		# mock out loader.install_boottool
-		mock_install = \
-			self.create_install_boottool_mock(loader, BOOTTOOL_DST)
-		# set up the recording
-		mock_install.expect_call()
-		# run the test
-		self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
-		self.god.check_playback()
+    def test_get_path_automatically_installs(self):
+        BOOTTOOL_DST = "/unittest/tmp/boottool"
+        host = self.create_mock_sshhost()
+        loader = bootloader.Bootloader(host)
+        # mock out loader.install_boottool
+        mock_install = \
+                self.create_install_boottool_mock(loader, BOOTTOOL_DST)
+        # set up the recording
+        mock_install.expect_call()
+        # run the test
+        self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
+        self.god.check_playback()
 
 
-	def test_install_is_only_called_once(self):
-		BOOTTOOL_DST = "/unittest/tmp/boottool"
-		host = self.create_mock_sshhost()
-		loader = bootloader.Bootloader(host)
-		# mock out loader.install_boottool
-		mock_install = \
-			self.create_install_boottool_mock(loader, BOOTTOOL_DST)
-		# set up the recording
-		mock_install.expect_call()
-		# run the test
-		self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
-		self.god.check_playback()
-		self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
-		self.god.check_playback()
+    def test_install_is_only_called_once(self):
+        BOOTTOOL_DST = "/unittest/tmp/boottool"
+        host = self.create_mock_sshhost()
+        loader = bootloader.Bootloader(host)
+        # mock out loader.install_boottool
+        mock_install = \
+                self.create_install_boottool_mock(loader, BOOTTOOL_DST)
+        # set up the recording
+        mock_install.expect_call()
+        # run the test
+        self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
+        self.god.check_playback()
+        self.assertEquals(loader.boottool_path, BOOTTOOL_DST)
+        self.god.check_playback()
 
 
 class test_bootloader_methods(unittest.TestCase):
-	def setUp(self):
-		self.god = mock.mock_god()
-		self.host = self.god.create_mock_class(ssh_host.SSHHost,
-						       "SSHHost")
-		# creates a bootloader with _run_boottool mocked out
-		self.loader = bootloader.Bootloader(self.host)
-		self.god.stub_function(self.loader, "_run_boottool")
+    def setUp(self):
+        self.god = mock.mock_god()
+        self.host = self.god.create_mock_class(ssh_host.SSHHost,
+                                               "SSHHost")
+        # creates a bootloader with _run_boottool mocked out
+        self.loader = bootloader.Bootloader(self.host)
+        self.god.stub_function(self.loader, "_run_boottool")
 
 
-	def tearDown(self):
-		self.god.unstub_all()
+    def tearDown(self):
+        self.god.unstub_all()
 
 
-	def expect_run_boottool(self, arg, result):
-		result = common_utils.CmdResult(stdout=result, exit_status=0)
-		self.loader._run_boottool.expect_call(arg).and_return(result)
+    def expect_run_boottool(self, arg, result):
+        result = common_utils.CmdResult(stdout=result, exit_status=0)
+        self.loader._run_boottool.expect_call(arg).and_return(result)
 
 
-	def test_get_type(self):
-		# set up the recording
-		self.expect_run_boottool("--bootloader-probe", "lilo\n")
-		# run the test
-		self.assertEquals(self.loader.get_type(), "lilo")
-		self.god.check_playback()
+    def test_get_type(self):
+        # set up the recording
+        self.expect_run_boottool("--bootloader-probe", "lilo\n")
+        # run the test
+        self.assertEquals(self.loader.get_type(), "lilo")
+        self.god.check_playback()
 
 
-	def test_get_arch(self):
-		# set up the recording
-		self.expect_run_boottool("--arch-probe", "x86_64\n")
-		# run the test
-		self.assertEquals(self.loader.get_architecture(), "x86_64")
-		self.god.check_playback()
+    def test_get_arch(self):
+        # set up the recording
+        self.expect_run_boottool("--arch-probe", "x86_64\n")
+        # run the test
+        self.assertEquals(self.loader.get_architecture(), "x86_64")
+        self.god.check_playback()
 
 
-	def test_get_default(self):
-		# set up the recording
-		self.expect_run_boottool("--default", "0\n")
-		# run the test
-		self.assertEquals(self.loader.get_default(), "0")
-		self.god.check_playback()
+    def test_get_default(self):
+        # set up the recording
+        self.expect_run_boottool("--default", "0\n")
+        # run the test
+        self.assertEquals(self.loader.get_default(), "0")
+        self.god.check_playback()
 
 
-	def test_get_titles(self):
-		# set up the recording
-		self.expect_run_boottool(mock.regex_comparator(
-		    r"^--info all \|"), "title #1\ntitle #2\n")
-		# run the test
-		self.assertEquals(self.loader.get_titles(),
-				  ["title #1", "title #2"])
-		self.god.check_playback()
+    def test_get_titles(self):
+        # set up the recording
+        self.expect_run_boottool(mock.regex_comparator(
+            r"^--info all \|"), "title #1\ntitle #2\n")
+        # run the test
+        self.assertEquals(self.loader.get_titles(),
+                          ["title #1", "title #2"])
+        self.god.check_playback()
 
 
-	def test_get_info_single_result(self):
-		RESULT = (
-		"index\t: 5\n"
-		"args\t: ro single\n"
-		"boot\t: (hd0,0)\n"
-		"initrd\t: /boot/initrd.img-2.6.15-23-386\n"
-		"kernel\t: /boot/vmlinuz-2.6.15-23-386\n"
-		"root\t: UUID=07D7-0714\n"
-		"savedefault\t:   \n"
-		"title\t: Distro, kernel 2.6.15-23-386\n"
-		)
-		# set up the recording
-		self.expect_run_boottool("--info=5", RESULT)
-		# run the test
-		info = self.loader.get_info(5)
-		self.god.check_playback()
-		expected_info = {"index": "5", "args": "ro single",
-				 "boot": "(hd0,0)",
-				 "initrd": "/boot/initrd.img-2.6.15-23-386",
-				 "kernel": "/boot/vmlinuz-2.6.15-23-386",
-				 "root": "UUID=07D7-0714", "savedefault": "",
-				 "title": "Distro, kernel 2.6.15-23-386"}
-		self.assertEquals(expected_info, info)
+    def test_get_info_single_result(self):
+        RESULT = (
+        "index\t: 5\n"
+        "args\t: ro single\n"
+        "boot\t: (hd0,0)\n"
+        "initrd\t: /boot/initrd.img-2.6.15-23-386\n"
+        "kernel\t: /boot/vmlinuz-2.6.15-23-386\n"
+        "root\t: UUID=07D7-0714\n"
+        "savedefault\t:   \n"
+        "title\t: Distro, kernel 2.6.15-23-386\n"
+        )
+        # set up the recording
+        self.expect_run_boottool("--info=5", RESULT)
+        # run the test
+        info = self.loader.get_info(5)
+        self.god.check_playback()
+        expected_info = {"index": "5", "args": "ro single",
+                         "boot": "(hd0,0)",
+                         "initrd": "/boot/initrd.img-2.6.15-23-386",
+                         "kernel": "/boot/vmlinuz-2.6.15-23-386",
+                         "root": "UUID=07D7-0714", "savedefault": "",
+                         "title": "Distro, kernel 2.6.15-23-386"}
+        self.assertEquals(expected_info, info)
 
 
-	def test_get_info_missing_result(self):
-		# set up the recording
-		self.expect_run_boottool("--info=4", "")
-		# run the test
-		info = self.loader.get_info(4)
-		self.god.check_playback()
-		self.assertEquals({}, info)
+    def test_get_info_missing_result(self):
+        # set up the recording
+        self.expect_run_boottool("--info=4", "")
+        # run the test
+        info = self.loader.get_info(4)
+        self.god.check_playback()
+        self.assertEquals({}, info)
 
 
-	def test_get_info_multiple_results(self):
-		RESULT = (
-		"index\t: 5\n"
-		"args\t: ro single\n"
-		"boot\t: (hd0,0)\n"
-		"initrd\t: /boot/initrd.img-2.6.15-23-386\n"
-		"kernel\t: /boot/vmlinuz-2.6.15-23-386\n"
-		"root\t: UUID=07D7-0714\n"
-		"savedefault\t:   \n"
-		"title\t: Distro, kernel 2.6.15-23-386\n"
-		"\n"
-		"index\t: 7\n"
-		"args\t: ro single\n"
-		"boot\t: (hd0,0)\n"
-		"initrd\t: /boot/initrd.img-2.6.15-23-686\n"
-		"kernel\t: /boot/vmlinuz-2.6.15-23-686\n"
-		"root\t: UUID=07D7-0714\n"
-		"savedefault\t:   \n"
-		"title\t: Distro, kernel 2.6.15-23-686\n"
-		)
-		# set up the recording
-		self.expect_run_boottool("--info=all", RESULT)
-		# run the test
-		info = self.loader.get_all_info()
-		self.god.check_playback()
-		expected_info = [{"index": "5", "args": "ro single",
-				  "boot": "(hd0,0)",
-				  "initrd": "/boot/initrd.img-2.6.15-23-386",
-				  "kernel": "/boot/vmlinuz-2.6.15-23-386",
-				  "root": "UUID=07D7-0714", "savedefault": "",
-				  "title": "Distro, kernel 2.6.15-23-386"},
-				 {"index": "7", "args": "ro single",
-				  "boot": "(hd0,0)",
-				  "initrd": "/boot/initrd.img-2.6.15-23-686",
-				  "kernel": "/boot/vmlinuz-2.6.15-23-686",
-				  "root": "UUID=07D7-0714", "savedefault": "",
-				  "title": "Distro, kernel 2.6.15-23-686"}]
-		self.assertEquals(expected_info, info)
+    def test_get_info_multiple_results(self):
+        RESULT = (
+        "index\t: 5\n"
+        "args\t: ro single\n"
+        "boot\t: (hd0,0)\n"
+        "initrd\t: /boot/initrd.img-2.6.15-23-386\n"
+        "kernel\t: /boot/vmlinuz-2.6.15-23-386\n"
+        "root\t: UUID=07D7-0714\n"
+        "savedefault\t:   \n"
+        "title\t: Distro, kernel 2.6.15-23-386\n"
+        "\n"
+        "index\t: 7\n"
+        "args\t: ro single\n"
+        "boot\t: (hd0,0)\n"
+        "initrd\t: /boot/initrd.img-2.6.15-23-686\n"
+        "kernel\t: /boot/vmlinuz-2.6.15-23-686\n"
+        "root\t: UUID=07D7-0714\n"
+        "savedefault\t:   \n"
+        "title\t: Distro, kernel 2.6.15-23-686\n"
+        )
+        # set up the recording
+        self.expect_run_boottool("--info=all", RESULT)
+        # run the test
+        info = self.loader.get_all_info()
+        self.god.check_playback()
+        expected_info = [{"index": "5", "args": "ro single",
+                          "boot": "(hd0,0)",
+                          "initrd": "/boot/initrd.img-2.6.15-23-386",
+                          "kernel": "/boot/vmlinuz-2.6.15-23-386",
+                          "root": "UUID=07D7-0714", "savedefault": "",
+                          "title": "Distro, kernel 2.6.15-23-386"},
+                         {"index": "7", "args": "ro single",
+                          "boot": "(hd0,0)",
+                          "initrd": "/boot/initrd.img-2.6.15-23-686",
+                          "kernel": "/boot/vmlinuz-2.6.15-23-686",
+                          "root": "UUID=07D7-0714", "savedefault": "",
+                          "title": "Distro, kernel 2.6.15-23-686"}]
+        self.assertEquals(expected_info, info)
 
 
-	def test_set_default(self):
-		# set up the recording
-		self.loader._run_boottool.expect_call("--set-default=41")
-		# run the test
-		self.loader.set_default(41)
-		self.god.check_playback()
+    def test_set_default(self):
+        # set up the recording
+        self.loader._run_boottool.expect_call("--set-default=41")
+        # run the test
+        self.loader.set_default(41)
+        self.god.check_playback()
 
 
-	def test_add_args(self):
-		# set up the recording
-		self.loader._run_boottool.expect_call(
-		    "--update-kernel=10 --args=\"some kernel args\"")
-		# run the test
-		self.loader.add_args(10, "some kernel args")
-		self.god.check_playback()
+    def test_add_args(self):
+        # set up the recording
+        self.loader._run_boottool.expect_call(
+            "--update-kernel=10 --args=\"some kernel args\"")
+        # run the test
+        self.loader.add_args(10, "some kernel args")
+        self.god.check_playback()
 
 
-	def test_remove_args(self):
-		# set up the recording
-		self.loader._run_boottool.expect_call(
-		    "--update-kernel=12 --remove-args=\"some kernel args\"")
-		# run the test
-		self.loader.remove_args(12, "some kernel args")
-		self.god.check_playback()
+    def test_remove_args(self):
+        # set up the recording
+        self.loader._run_boottool.expect_call(
+            "--update-kernel=12 --remove-args=\"some kernel args\"")
+        # run the test
+        self.loader.remove_args(12, "some kernel args")
+        self.god.check_playback()
 
 
-	def test_add_kernel_basic(self):
-		self.loader.get_titles = self.god.create_mock_function(
-		    "get_titles")
-		# set up the recording
-		self.loader.get_titles.expect_call().and_return(["notmylabel"])
-		self.loader._run_boottool.expect_call(
-		    "--add-kernel \"/unittest/kernels/vmlinuz\" "
-		    "--title \"mylabel\" --make-default")
-		# run the test
-		self.loader.add_kernel("/unittest/kernels/vmlinuz",
-				       "mylabel")
-		self.god.check_playback()
+    def test_add_kernel_basic(self):
+        self.loader.get_titles = self.god.create_mock_function(
+            "get_titles")
+        # set up the recording
+        self.loader.get_titles.expect_call().and_return(["notmylabel"])
+        self.loader._run_boottool.expect_call(
+            "--add-kernel \"/unittest/kernels/vmlinuz\" "
+            "--title \"mylabel\" --make-default")
+        # run the test
+        self.loader.add_kernel("/unittest/kernels/vmlinuz",
+                               "mylabel")
+        self.god.check_playback()
 
 
-	def test_add_kernel_adds_root(self):
-		self.loader.get_titles = self.god.create_mock_function(
-		    "get_titles")
-		# set up the recording
-		self.loader.get_titles.expect_call().and_return(["notmylabel"])
-		self.loader._run_boottool.expect_call(
-		    "--add-kernel \"/unittest/kernels/vmlinuz\" "
-		    "--title \"mylabel\" --root \"/unittest/root\" "
-		    "--make-default")
-		# run the test
-		self.loader.add_kernel("/unittest/kernels/vmlinuz",
-				       "mylabel", root="/unittest/root")
-		self.god.check_playback()
+    def test_add_kernel_adds_root(self):
+        self.loader.get_titles = self.god.create_mock_function(
+            "get_titles")
+        # set up the recording
+        self.loader.get_titles.expect_call().and_return(["notmylabel"])
+        self.loader._run_boottool.expect_call(
+            "--add-kernel \"/unittest/kernels/vmlinuz\" "
+            "--title \"mylabel\" --root \"/unittest/root\" "
+            "--make-default")
+        # run the test
+        self.loader.add_kernel("/unittest/kernels/vmlinuz",
+                               "mylabel", root="/unittest/root")
+        self.god.check_playback()
 
 
-	def test_add_kernel_adds_args(self):
-		self.loader.get_titles = self.god.create_mock_function(
-		    "get_titles")
-		# set up the recording
-		self.loader.get_titles.expect_call().and_return(["notmylabel"])
-		self.loader._run_boottool.expect_call(
-		    "--add-kernel \"/unittest/kernels/vmlinuz\" "
-		    "--title \"mylabel\" --args \"my kernel args\" "
-		    "--make-default")
-		# run the test
-		self.loader.add_kernel("/unittest/kernels/vmlinuz",
-				       "mylabel", args="my kernel args")
-		self.god.check_playback()
+    def test_add_kernel_adds_args(self):
+        self.loader.get_titles = self.god.create_mock_function(
+            "get_titles")
+        # set up the recording
+        self.loader.get_titles.expect_call().and_return(["notmylabel"])
+        self.loader._run_boottool.expect_call(
+            "--add-kernel \"/unittest/kernels/vmlinuz\" "
+            "--title \"mylabel\" --args \"my kernel args\" "
+            "--make-default")
+        # run the test
+        self.loader.add_kernel("/unittest/kernels/vmlinuz",
+                               "mylabel", args="my kernel args")
+        self.god.check_playback()
 
 
-	def test_add_kernel_adds_initrd(self):
-		self.loader.get_titles = self.god.create_mock_function(
-		    "get_titles")
-		# set up the recording
-		self.loader.get_titles.expect_call().and_return(["notmylabel"])
-		self.loader._run_boottool.expect_call(
-		    "--add-kernel \"/unittest/kernels/vmlinuz\" "
-		    "--title \"mylabel\" --initrd \"/unittest/initrd\" "
-		    "--make-default")
-		# run the test
-		self.loader.add_kernel("/unittest/kernels/vmlinuz",
-				       "mylabel", initrd="/unittest/initrd")
-		self.god.check_playback()
+    def test_add_kernel_adds_initrd(self):
+        self.loader.get_titles = self.god.create_mock_function(
+            "get_titles")
+        # set up the recording
+        self.loader.get_titles.expect_call().and_return(["notmylabel"])
+        self.loader._run_boottool.expect_call(
+            "--add-kernel \"/unittest/kernels/vmlinuz\" "
+            "--title \"mylabel\" --initrd \"/unittest/initrd\" "
+            "--make-default")
+        # run the test
+        self.loader.add_kernel("/unittest/kernels/vmlinuz",
+                               "mylabel", initrd="/unittest/initrd")
+        self.god.check_playback()
 
 
-	def test_add_kernel_disables_make_default(self):
-		self.loader.get_titles = self.god.create_mock_function(
-		    "get_titles")
-		# set up the recording
-		self.loader.get_titles.expect_call().and_return(["notmylabel"])
-		self.loader._run_boottool.expect_call(
-		    "--add-kernel \"/unittest/kernels/vmlinuz\" "
-		    "--title \"mylabel\"")
-		# run the test
-		self.loader.add_kernel("/unittest/kernels/vmlinuz",
-				       "mylabel", default=False)
-		self.god.check_playback()
+    def test_add_kernel_disables_make_default(self):
+        self.loader.get_titles = self.god.create_mock_function(
+            "get_titles")
+        # set up the recording
+        self.loader.get_titles.expect_call().and_return(["notmylabel"])
+        self.loader._run_boottool.expect_call(
+            "--add-kernel \"/unittest/kernels/vmlinuz\" "
+            "--title \"mylabel\"")
+        # run the test
+        self.loader.add_kernel("/unittest/kernels/vmlinuz",
+                               "mylabel", default=False)
+        self.god.check_playback()
 
 
-	def test_add_kernel_removes_old(self):
-		self.loader.get_titles = self.god.create_mock_function(
-		    "get_titles")
-		# set up the recording
-		self.loader.get_titles.expect_call().and_return(["mylabel"])
-		self.loader._run_boottool.expect_call(
-		    "--remove-kernel \"mylabel\"")
-		self.loader._run_boottool.expect_call(
-		    "--add-kernel \"/unittest/kernels/vmlinuz\" "
-		    "--title \"mylabel\" --make-default")
-		# run the test
-		self.loader.add_kernel("/unittest/kernels/vmlinuz",
-				       "mylabel")
-		self.god.check_playback()
+    def test_add_kernel_removes_old(self):
+        self.loader.get_titles = self.god.create_mock_function(
+            "get_titles")
+        # set up the recording
+        self.loader.get_titles.expect_call().and_return(["mylabel"])
+        self.loader._run_boottool.expect_call(
+            "--remove-kernel \"mylabel\"")
+        self.loader._run_boottool.expect_call(
+            "--add-kernel \"/unittest/kernels/vmlinuz\" "
+            "--title \"mylabel\" --make-default")
+        # run the test
+        self.loader.add_kernel("/unittest/kernels/vmlinuz",
+                               "mylabel")
+        self.god.check_playback()
 
 
-	def test_remove_kernel(self):
-		# set up the recording
-		self.loader._run_boottool.expect_call("--remove-kernel=14")
-		# run the test
-		self.loader.remove_kernel(14)
-		self.god.check_playback()
+    def test_remove_kernel(self):
+        # set up the recording
+        self.loader._run_boottool.expect_call("--remove-kernel=14")
+        # run the test
+        self.loader.remove_kernel(14)
+        self.god.check_playback()
 
 
-	def test_boot_once(self):
-		# set up the recording
-		self.loader._run_boottool.expect_call(
-		    "--boot-once --title=autotest")
-		# run the test
-		self.loader.boot_once("autotest")
-		self.god.check_playback()
+    def test_boot_once(self):
+        # set up the recording
+        self.loader._run_boottool.expect_call(
+            "--boot-once --title=autotest")
+        # run the test
+        self.loader.boot_once("autotest")
+        self.god.check_playback()
 
 
 if __name__ == "__main__":
-	unittest.main()
+    unittest.main()
diff --git a/server/hosts/guest.py b/server/hosts/guest.py
index 0d8270c..8fe6bc2 100644
--- a/server/hosts/guest.py
+++ b/server/hosts/guest.py
@@ -8,7 +8,7 @@
 Implementation details:
 You should import the "hosts" package instead of importing each type of host.
 
-	Guest: a virtual machine on which you can run programs
+        Guest: a virtual machine on which you can run programs
 """
 
 __author__ = """
@@ -22,49 +22,49 @@
 
 
 class Guest(ssh_host.SSHHost):
-	"""
-	This class represents a virtual machine on which you can run 
-	programs.
-	
-	It is not the machine autoserv is running on.
-	
-	Implementation details:
-	This is an abstract class, leaf subclasses must implement the methods
-	listed here and in parent classes which have no implementation. They 
-	may reimplement methods which already have an implementation. You 
-	must not instantiate this class but should instantiate one of those 
-	leaf subclasses.
-	"""
-	
-	controlling_hypervisor = None
+    """
+    This class represents a virtual machine on which you can run
+    programs.
 
-	
-	def __init__(self, controlling_hypervisor):
-		"""
-		Construct a Guest object
-		
-		Args:
-			controlling_hypervisor: Hypervisor object that is 
-				responsible for the creation and management of 
-				this guest
-		"""
-		hostname= controlling_hypervisor.new_guest()
-		super(Guest, self).__init__(hostname)
-		self.controlling_hypervisor= controlling_hypervisor
+    It is not the machine autoserv is running on.
 
-	
-	def __del__(self):
-		"""
-		Destroy a Guest object
-		"""
-		self.controlling_hypervisor.delete_guest(self.hostname)
+    Implementation details:
+    This is an abstract class, leaf subclasses must implement the methods
+    listed here and in parent classes which have no implementation. They
+    may reimplement methods which already have an implementation. You
+    must not instantiate this class but should instantiate one of those
+    leaf subclasses.
+    """
 
-	
-	def hardreset(self, timeout=600, wait=True):
-		"""
-		Perform a "hardreset" of the guest.
-		
-		It is restarted through the hypervisor. That will restart it 
-		even if the guest otherwise innaccessible through ssh.
-		"""
-		return self.controlling_hypervisor.reset_guest(self.hostname)
+    controlling_hypervisor = None
+
+
+    def __init__(self, controlling_hypervisor):
+        """
+        Construct a Guest object
+
+        Args:
+                controlling_hypervisor: Hypervisor object that is
+                        responsible for the creation and management of
+                        this guest
+        """
+        hostname= controlling_hypervisor.new_guest()
+        super(Guest, self).__init__(hostname)
+        self.controlling_hypervisor= controlling_hypervisor
+
+
+    def __del__(self):
+        """
+        Destroy a Guest object
+        """
+        self.controlling_hypervisor.delete_guest(self.hostname)
+
+
+    def hardreset(self, timeout=600, wait=True):
+        """
+        Perform a "hardreset" of the guest.
+
+        It is restarted through the hypervisor. That will restart it
+        even if the guest otherwise innaccessible through ssh.
+        """
+        return self.controlling_hypervisor.reset_guest(self.hostname)
diff --git a/server/hosts/kvm_guest.py b/server/hosts/kvm_guest.py
index a676cba..ae0acc8 100644
--- a/server/hosts/kvm_guest.py
+++ b/server/hosts/kvm_guest.py
@@ -8,7 +8,7 @@
 Implementation details:
 You should import the "hosts" package instead of importing each type of host.
 
-	KVMGuest: a KVM virtual machine on which you can run programs
+        KVMGuest: a KVM virtual machine on which you can run programs
 """
 
 __author__ = """
@@ -22,26 +22,26 @@
 
 
 class KVMGuest(guest.Guest):
-	"""This class represents a KVM virtual machine on which you can run 
-	programs.
-	
-	Implementation details:
-	This is a leaf class in an abstract class hierarchy, it must 
-	implement the unimplemented methods in parent classes.
-	"""
-	
-	def __init__(self, controlling_hypervisor, qemu_options):
-		"""
-		Construct a KVMGuest object
-		
-		Args:
-			controlling_hypervisor: hypervisor object that is 
-				responsible for the creation and management of 
-				this guest
-			qemu_options: options to pass to qemu, these should be
-				appropriately shell escaped, if need be.
-		"""
-		hostname= controlling_hypervisor.new_guest(qemu_options)
-		# bypass Guest's __init__
-		super(guest.Guest, self).__init__(hostname)
-		self.controlling_hypervisor= controlling_hypervisor
+    """This class represents a KVM virtual machine on which you can run
+    programs.
+
+    Implementation details:
+    This is a leaf class in an abstract class hierarchy, it must
+    implement the unimplemented methods in parent classes.
+    """
+
+    def __init__(self, controlling_hypervisor, qemu_options):
+        """
+        Construct a KVMGuest object
+
+        Args:
+                controlling_hypervisor: hypervisor object that is
+                        responsible for the creation and management of
+                        this guest
+                qemu_options: options to pass to qemu, these should be
+                        appropriately shell escaped, if need be.
+        """
+        hostname= controlling_hypervisor.new_guest(qemu_options)
+        # bypass Guest's __init__
+        super(guest.Guest, self).__init__(hostname)
+        self.controlling_hypervisor= controlling_hypervisor
diff --git a/server/hosts/remote.py b/server/hosts/remote.py
index 9f5e955..84bb400 100644
--- a/server/hosts/remote.py
+++ b/server/hosts/remote.py
@@ -1,32 +1,32 @@
 """This class defines the Remote host class, mixing in the SiteHost class
 if it is available."""
 
-# site_host.py may be non-existant or empty, make sure that an appropriate 
+# site_host.py may be non-existant or empty, make sure that an appropriate
 # SiteHost class is created nevertheless
 try:
-	from site_host import SiteHost
+    from site_host import SiteHost
 except ImportError:
-	import base_classes
-	class SiteHost(base_classes.Host):
-		def __init__(self):
-			super(SiteHost, self).__init__()
+    import base_classes
+    class SiteHost(base_classes.Host):
+        def __init__(self):
+            super(SiteHost, self).__init__()
 
 
 class RemoteHost(SiteHost):
-	"""This class represents a remote machine on which you can run 
-	programs.
+    """This class represents a remote machine on which you can run
+    programs.
 
-	It may be accessed through a network, a serial line, ...
-	It is not the machine autoserv is running on.
+    It may be accessed through a network, a serial line, ...
+    It is not the machine autoserv is running on.
 
-	Implementation details:
-	This is an abstract class, leaf subclasses must implement the methods
-	listed here and in parent classes which have no implementation. They 
-	may reimplement methods which already have an implementation. You 
-	must not instantiate this class but should instantiate one of those 
-	leaf subclasses."""
+    Implementation details:
+    This is an abstract class, leaf subclasses must implement the methods
+    listed here and in parent classes which have no implementation. They
+    may reimplement methods which already have an implementation. You
+    must not instantiate this class but should instantiate one of those
+    leaf subclasses."""
 
-	hostname= None
+    hostname= None
 
-	def __init__(self):
-		super(RemoteHost, self).__init__()
+    def __init__(self):
+        super(RemoteHost, self).__init__()
diff --git a/server/hosts/site_host.py b/server/hosts/site_host.py
index 4be6a4d..c0b6693 100644
--- a/server/hosts/site_host.py
+++ b/server/hosts/site_host.py
@@ -11,7 +11,7 @@
 Implementation details:
 You should import the "hosts" package instead of importing each type of host.
 
-	SiteHost: Host containing site-specific customizations.
+        SiteHost: Host containing site-specific customizations.
 """
 
 __author__ = """
@@ -24,14 +24,14 @@
 import base_classes, utils
 
 class SiteHost(base_classes.Host):
-	"""
-	Custom host to containing site-specific methods or attributes.
-	"""
-	
-	def __init__(self):
-		super(SiteHost, self).__init__()
-		self.serverdir = utils.get_server_dir()
+    """
+    Custom host to containing site-specific methods or attributes.
+    """
 
-	
-	def setup(self):
-		return
+    def __init__(self):
+        super(SiteHost, self).__init__()
+        self.serverdir = utils.get_server_dir()
+
+
+    def setup(self):
+        return
diff --git a/server/hosts/ssh_host.py b/server/hosts/ssh_host.py
index 378d048..c4ce4ab 100644
--- a/server/hosts/ssh_host.py
+++ b/server/hosts/ssh_host.py
@@ -8,7 +8,7 @@
 Implementation details:
 You should import the "hosts" package instead of importing each type of host.
 
-	SSHHost: a remote machine with a ssh access
+        SSHHost: a remote machine with a ssh access
 """
 
 __author__ = """
@@ -26,869 +26,869 @@
 
 
 class PermissionDeniedError(error.AutoservRunError):
-	pass
+    pass
 
 
 class SSHHost(remote.RemoteHost):
-	"""
-	This class represents a remote machine controlled through an ssh 
-	session on which you can run programs.
+    """
+    This class represents a remote machine controlled through an ssh
+    session on which you can run programs.
 
-	It is not the machine autoserv is running on. The machine must be 
-	configured for password-less login, for example through public key 
-	authentication.
+    It is not the machine autoserv is running on. The machine must be
+    configured for password-less login, for example through public key
+    authentication.
 
-	It includes support for controlling the machine through a serial
-	console on which you can run programs. If such a serial console is
-	set up on the machine then capabilities such as hard reset and
-	boot strap monitoring are available. If the machine does not have a
-	serial console available then ordinary SSH-based commands will
-	still be available, but attempts to use extensions such as
-	console logging or hard reset will fail silently.
+    It includes support for controlling the machine through a serial
+    console on which you can run programs. If such a serial console is
+    set up on the machine then capabilities such as hard reset and
+    boot strap monitoring are available. If the machine does not have a
+    serial console available then ordinary SSH-based commands will
+    still be available, but attempts to use extensions such as
+    console logging or hard reset will fail silently.
 
-	Implementation details:
-	This is a leaf class in an abstract class hierarchy, it must 
-	implement the unimplemented methods in parent classes.
-	"""
+    Implementation details:
+    This is a leaf class in an abstract class hierarchy, it must
+    implement the unimplemented methods in parent classes.
+    """
 
-	DEFAULT_REBOOT_TIMEOUT = 1800
-	job = None
+    DEFAULT_REBOOT_TIMEOUT = 1800
+    job = None
 
-	def __init__(self, hostname, user="root", port=22, initialize=True,
-		     conmux_log="console.log",
-		     conmux_server=None, conmux_attach=None,
-		     netconsole_log=None, netconsole_port=6666, autodir=None,
-		     password=''):
-		"""
-		Construct a SSHHost object
-		
-		Args:
-			hostname: network hostname or address of remote machine
-			user: user to log in as on the remote machine
-			port: port the ssh daemon is listening on on the remote 
-				machine
-		""" 
-		self.hostname= hostname
-		self.user= user
-		self.port= port
-		self.tmp_dirs= []
-		self.initialize = initialize
-		self.autodir = autodir
-		self.password = password
+    def __init__(self, hostname, user="root", port=22, initialize=True,
+                 conmux_log="console.log",
+                 conmux_server=None, conmux_attach=None,
+                 netconsole_log=None, netconsole_port=6666, autodir=None,
+                 password=''):
+        """
+        Construct a SSHHost object
 
-		super(SSHHost, self).__init__()
+        Args:
+                hostname: network hostname or address of remote machine
+                user: user to log in as on the remote machine
+                port: port the ssh daemon is listening on on the remote
+                        machine
+        """
+        self.hostname= hostname
+        self.user= user
+        self.port= port
+        self.tmp_dirs= []
+        self.initialize = initialize
+        self.autodir = autodir
+        self.password = password
 
-		self.conmux_server = conmux_server
-		if conmux_attach:
-			self.conmux_attach = conmux_attach
-		else:
-			self.conmux_attach = os.path.abspath(os.path.join(
-						self.serverdir, '..',
-						'conmux', 'conmux-attach'))
-		self.logger_popen = None
-		self.warning_stream = None
-		self.__start_console_log(conmux_log)
+        super(SSHHost, self).__init__()
 
-		self.bootloader = bootloader.Bootloader(self)
+        self.conmux_server = conmux_server
+        if conmux_attach:
+            self.conmux_attach = conmux_attach
+        else:
+            self.conmux_attach = os.path.abspath(os.path.join(
+                                    self.serverdir, '..',
+                                    'conmux', 'conmux-attach'))
+        self.logger_popen = None
+        self.warning_stream = None
+        self.__start_console_log(conmux_log)
 
-		self.__netconsole_param = ""
-		self.netlogger_popen = None
-		if netconsole_log:
-			self.__init_netconsole_params(netconsole_port)
-			self.__start_netconsole_log(netconsole_log, netconsole_port)
-			self.__load_netconsole_module()
+        self.bootloader = bootloader.Bootloader(self)
 
+        self.__netconsole_param = ""
+        self.netlogger_popen = None
+        if netconsole_log:
+            self.__init_netconsole_params(netconsole_port)
+            self.__start_netconsole_log(netconsole_log, netconsole_port)
+            self.__load_netconsole_module()
 
-	@staticmethod
-	def __kill(popen):
-		return_code = popen.poll()
-		if return_code is None:
-			try:
-				os.kill(popen.pid, signal.SIGTERM)
-			except OSError:
-				pass
 
+    @staticmethod
+    def __kill(popen):
+        return_code = popen.poll()
+        if return_code is None:
+            try:
+                os.kill(popen.pid, signal.SIGTERM)
+            except OSError:
+                pass
 
-	def __del__(self):
-		"""
-		Destroy a SSHHost object
-		"""
-		for dir in self.tmp_dirs:
-			try:
-				self.run('rm -rf "%s"' % (utils.sh_escape(dir)))
-			except error.AutoservRunError:
-				pass
-		# kill the console logger
-		if getattr(self, 'logger_popen', None):
-			self.__kill(self.logger_popen)
-			if self.job:
-				self.job.warning_loggers.discard(
-				    self.warning_stream)
-			self.warning_stream.close()
-		# kill the netconsole logger
-		if getattr(self, 'netlogger_popen', None):
-			self.__unload_netconsole_module()
-			self.__kill(self.netlogger_popen)
 
+    def __del__(self):
+        """
+        Destroy a SSHHost object
+        """
+        for dir in self.tmp_dirs:
+            try:
+                self.run('rm -rf "%s"' % (utils.sh_escape(dir)))
+            except error.AutoservRunError:
+                pass
+        # kill the console logger
+        if getattr(self, 'logger_popen', None):
+            self.__kill(self.logger_popen)
+            if self.job:
+                self.job.warning_loggers.discard(
+                    self.warning_stream)
+            self.warning_stream.close()
+        # kill the netconsole logger
+        if getattr(self, 'netlogger_popen', None):
+            self.__unload_netconsole_module()
+            self.__kill(self.netlogger_popen)
 
-	def __init_netconsole_params(self, port):
-		"""
-		Connect to the remote machine and determine the values to use for the
-		required netconsole parameters.
-		"""
-		# PROBLEM: on machines with multiple IPs this may not make any sense
-		# It also doesn't work with IPv6
-		remote_ip = socket.gethostbyname(self.hostname)
-		local_ip = socket.gethostbyname(socket.gethostname())
-		# Get the gateway of the remote machine
-		try:
-			traceroute = self.run('traceroute -n %s' % local_ip)
-		except error.AutoservRunError:
-			return
-		first_node = traceroute.stdout.split("\n")[0]
-		match = re.search(r'\s+((\d+\.){3}\d+)\s+', first_node)
-		if match:
-			router_ip = match.group(1)
-		else:
-			return
-		# Look up the MAC address of the gateway
-		try:
-			self.run('ping -c 1 %s' % router_ip)
-			arp = self.run('arp -n -a %s' % router_ip)
-		except error.AutoservRunError:
-			return
-		match = re.search(r'\s+(([0-9A-F]{2}:){5}[0-9A-F]{2})\s+', arp.stdout)
-		if match:
-			gateway_mac = match.group(1)
-		else:
-			return
-		self.__netconsole_param = 'netconsole=@%s/,%s@%s/%s' % (remote_ip,
-									port,
-									local_ip,
-									gateway_mac)
 
+    def __init_netconsole_params(self, port):
+        """
+        Connect to the remote machine and determine the values to use for the
+        required netconsole parameters.
+        """
+        # PROBLEM: on machines with multiple IPs this may not make any sense
+        # It also doesn't work with IPv6
+        remote_ip = socket.gethostbyname(self.hostname)
+        local_ip = socket.gethostbyname(socket.gethostname())
+        # Get the gateway of the remote machine
+        try:
+            traceroute = self.run('traceroute -n %s' % local_ip)
+        except error.AutoservRunError:
+            return
+        first_node = traceroute.stdout.split("\n")[0]
+        match = re.search(r'\s+((\d+\.){3}\d+)\s+', first_node)
+        if match:
+            router_ip = match.group(1)
+        else:
+            return
+        # Look up the MAC address of the gateway
+        try:
+            self.run('ping -c 1 %s' % router_ip)
+            arp = self.run('arp -n -a %s' % router_ip)
+        except error.AutoservRunError:
+            return
+        match = re.search(r'\s+(([0-9A-F]{2}:){5}[0-9A-F]{2})\s+', arp.stdout)
+        if match:
+            gateway_mac = match.group(1)
+        else:
+            return
+        self.__netconsole_param = 'netconsole=@%s/,%s@%s/%s' % (remote_ip,
+                                                                port,
+                                                                local_ip,
+                                                                gateway_mac)
 
-	def __start_netconsole_log(self, logfilename, port):
-		"""
-		Log the output of netconsole to a specified file
-		"""
-		if logfilename == None:
-			return
-		cmd = ['nc', '-u', '-l', '-p', str(port)]
-		logfile = open(logfilename, 'a', 0)
-		self.netlogger_popen = subprocess.Popen(cmd, stdout=logfile)
 
+    def __start_netconsole_log(self, logfilename, port):
+        """
+        Log the output of netconsole to a specified file
+        """
+        if logfilename == None:
+            return
+        cmd = ['nc', '-u', '-l', '-p', str(port)]
+        logfile = open(logfilename, 'a', 0)
+        self.netlogger_popen = subprocess.Popen(cmd, stdout=logfile)
 
-	def __load_netconsole_module(self):
-		"""
-		Make a best effort to load the netconsole module.
 
-		Note that loading the module can fail even when the remote machine is
-		working correctly if netconsole is already compiled into the kernel
-		and started.
-		"""
-		if not self.__netconsole_param:
-			return
-		try:
-			self.run('modprobe netconsole %s' % self.__netconsole_param)
-		except error.AutoservRunError:
-			# if it fails there isn't much we can do, just keep going
-			pass
+    def __load_netconsole_module(self):
+        """
+        Make a best effort to load the netconsole module.
 
+        Note that loading the module can fail even when the remote machine is
+        working correctly if netconsole is already compiled into the kernel
+        and started.
+        """
+        if not self.__netconsole_param:
+            return
+        try:
+            self.run('modprobe netconsole %s' % self.__netconsole_param)
+        except error.AutoservRunError:
+            # if it fails there isn't much we can do, just keep going
+            pass
 
-	def __unload_netconsole_module(self):
-		try:
-			self.run('modprobe -r netconsole')
-		except error.AutoservRunError:
-			pass
 
+    def __unload_netconsole_module(self):
+        try:
+            self.run('modprobe -r netconsole')
+        except error.AutoservRunError:
+            pass
 
-	def wait_for_restart(self, timeout=DEFAULT_REBOOT_TIMEOUT):
-		if not self.wait_down(300):	# Make sure he's dead, Jim
-			self.__record("ABORT", None, "reboot.verify", "shutdown failed")
-			raise error.AutoservRebootError(
-			    "Host did not shut down")
-		self.wait_up(timeout)
-		time.sleep(2) # this is needed for complete reliability
-		if self.wait_up(timeout):
-			self.__record("GOOD", None, "reboot.verify")
-		else:
-			self.__record("ABORT", None, "reboot.verify", "Host did not return from reboot")
-			raise error.AutoservRebootError(
-			    "Host did not return from reboot")
-		print "Reboot complete"
 
+    def wait_for_restart(self, timeout=DEFAULT_REBOOT_TIMEOUT):
+        if not self.wait_down(300):     # Make sure he's dead, Jim
+            self.__record("ABORT", None, "reboot.verify", "shutdown failed")
+            raise error.AutoservRebootError(
+                "Host did not shut down")
+        self.wait_up(timeout)
+        time.sleep(2) # this is needed for complete reliability
+        if self.wait_up(timeout):
+            self.__record("GOOD", None, "reboot.verify")
+        else:
+            self.__record("ABORT", None, "reboot.verify", "Host did not return from reboot")
+            raise error.AutoservRebootError(
+                "Host did not return from reboot")
+        print "Reboot complete"
 
-	def hardreset(self, timeout=DEFAULT_REBOOT_TIMEOUT, wait=True,
-	              conmux_command='hardreset'):
-		"""
-		Reach out and slap the box in the power switch.
-		Args:
-			conmux_command: The command to run via the conmux interface
-			timeout: timelimit in seconds before the machine is considered unreachable
-			wait: Whether or not to wait for the machine to reboot
-		
-		"""
-		conmux_command = r"'~$%s'" % conmux_command
-		if not self.__console_run(conmux_command):
-			self.__record("ABORT", None, "reboot.start", "hard reset unavailable")
-			raise error.AutoservUnsupportedError(
-			    'Hard reset unavailable')
 
-		if wait:
-			self.wait_for_restart(timeout)
-		self.__record("GOOD", None, "reboot.start", "hard reset")
+    def hardreset(self, timeout=DEFAULT_REBOOT_TIMEOUT, wait=True,
+                  conmux_command='hardreset'):
+        """
+        Reach out and slap the box in the power switch.
+        Args:
+                conmux_command: The command to run via the conmux interface
+                timeout: timelimit in seconds before the machine is considered unreachable
+                wait: Whether or not to wait for the machine to reboot
 
+        """
+        conmux_command = r"'~$%s'" % conmux_command
+        if not self.__console_run(conmux_command):
+            self.__record("ABORT", None, "reboot.start", "hard reset unavailable")
+            raise error.AutoservUnsupportedError(
+                'Hard reset unavailable')
 
-	def __conmux_hostname(self):
-		if self.conmux_server:
-			return '%s/%s' % (self.conmux_server, self.hostname)
-		else:
-			return self.hostname
+        if wait:
+            self.wait_for_restart(timeout)
+        self.__record("GOOD", None, "reboot.start", "hard reset")
 
 
-	def __start_console_log(self, logfilename):
-		"""
-		Log the output of the console session to a specified file
-		"""
-		if logfilename == None:
-			return
-		if not self.conmux_attach or not os.path.exists(self.conmux_attach):
-			return
+    def __conmux_hostname(self):
+        if self.conmux_server:
+            return '%s/%s' % (self.conmux_server, self.hostname)
+        else:
+            return self.hostname
 
-		r, w = os.pipe()
-		script_path = os.path.join(self.serverdir,
-					   'warning_monitor.py')
-		cmd = [self.conmux_attach, self.__conmux_hostname(),
-		       '%s %s %s %d' % (sys.executable, script_path,
-					logfilename, w)]
-		dev_null = open(os.devnull, 'w')
 
-		self.warning_stream = os.fdopen(r, 'r', 0)
-		if self.job:
-			self.job.warning_loggers.add(self.warning_stream)
-		self.logger_popen = subprocess.Popen(cmd, stderr=dev_null)
-		os.close(w)
+    def __start_console_log(self, logfilename):
+        """
+        Log the output of the console session to a specified file
+        """
+        if logfilename == None:
+            return
+        if not self.conmux_attach or not os.path.exists(self.conmux_attach):
+            return
 
+        r, w = os.pipe()
+        script_path = os.path.join(self.serverdir,
+                                   'warning_monitor.py')
+        cmd = [self.conmux_attach, self.__conmux_hostname(),
+               '%s %s %s %d' % (sys.executable, script_path,
+                                logfilename, w)]
+        dev_null = open(os.devnull, 'w')
 
-	def __console_run(self, cmd):
-		"""
-		Send a command to the conmux session
-		"""
-		if not self.conmux_attach or not os.path.exists(self.conmux_attach):
-			return False
-		cmd = '%s %s echo %s 2> /dev/null' % (self.conmux_attach,
-						      self.__conmux_hostname(),
-						      cmd)
-		result = utils.system(cmd, ignore_status=True)
-		return result == 0
+        self.warning_stream = os.fdopen(r, 'r', 0)
+        if self.job:
+            self.job.warning_loggers.add(self.warning_stream)
+        self.logger_popen = subprocess.Popen(cmd, stderr=dev_null)
+        os.close(w)
 
 
-	def __run_reboot_group(self, reboot_func):
-		if self.job:
-			self.job.run_reboot(reboot_func, self.get_kernel_ver)
-		else:
-			reboot_func()
+    def __console_run(self, cmd):
+        """
+        Send a command to the conmux session
+        """
+        if not self.conmux_attach or not os.path.exists(self.conmux_attach):
+            return False
+        cmd = '%s %s echo %s 2> /dev/null' % (self.conmux_attach,
+                                              self.__conmux_hostname(),
+                                              cmd)
+        result = utils.system(cmd, ignore_status=True)
+        return result == 0
 
 
-	def __record(self, status_code, subdir, operation, status = ''):
-		if self.job:
-			self.job.record(status_code, subdir, operation, status)
-		else:
-			if not subdir:
-				subdir = "----"
-			msg = "%s\t%s\t%s\t%s" % (status_code, subdir, operation, status)
-			sys.stderr.write(msg + "\n")
+    def __run_reboot_group(self, reboot_func):
+        if self.job:
+            self.job.run_reboot(reboot_func, self.get_kernel_ver)
+        else:
+            reboot_func()
 
 
-	def ssh_base_command(self, connect_timeout=30):
-		SSH_BASE_COMMAND = '/usr/bin/ssh -a -x -o ' + \
-				   'BatchMode=yes -o ConnectTimeout=%d ' + \
-				   '-o ServerAliveInterval=300'
-		assert isinstance(connect_timeout, (int, long))
-		assert connect_timeout > 0 # can't disable the timeout
-		return SSH_BASE_COMMAND % connect_timeout
+    def __record(self, status_code, subdir, operation, status = ''):
+        if self.job:
+            self.job.record(status_code, subdir, operation, status)
+        else:
+            if not subdir:
+                subdir = "----"
+            msg = "%s\t%s\t%s\t%s" % (status_code, subdir, operation, status)
+            sys.stderr.write(msg + "\n")
 
 
-	def ssh_command(self, connect_timeout=30, options=''):
-		"""Construct an ssh command with proper args for this host."""
-		ssh = self.ssh_base_command(connect_timeout)
-		return r'%s %s -l %s -p %d %s' % (ssh,
-		                                  options,
-		                                  self.user,
-		                                  self.port,
-		                                  self.hostname)
+    def ssh_base_command(self, connect_timeout=30):
+        SSH_BASE_COMMAND = '/usr/bin/ssh -a -x -o ' + \
+                           'BatchMode=yes -o ConnectTimeout=%d ' + \
+                           '-o ServerAliveInterval=300'
+        assert isinstance(connect_timeout, (int, long))
+        assert connect_timeout > 0 # can't disable the timeout
+        return SSH_BASE_COMMAND % connect_timeout
 
 
-	def _run(self, command, timeout, ignore_status, stdout, stderr,
-	         connect_timeout, env, options):
-		"""Helper function for run()."""
+    def ssh_command(self, connect_timeout=30, options=''):
+        """Construct an ssh command with proper args for this host."""
+        ssh = self.ssh_base_command(connect_timeout)
+        return r'%s %s -l %s -p %d %s' % (ssh,
+                                          options,
+                                          self.user,
+                                          self.port,
+                                          self.hostname)
 
-		ssh_cmd = self.ssh_command(connect_timeout, options)
-		echo_cmd = 'echo Connected. >&2'
-		full_cmd = '%s "%s;%s %s"' % (ssh_cmd, echo_cmd, env,
-		                              utils.sh_escape(command))
-		result = utils.run(full_cmd, timeout, True, stdout, stderr)
 
-		# The error messages will show up in band (indistinguishable
-		# from stuff sent through the SSH connection), so we have the
-		# remote computer echo the message "Connected." before running
-		# any command.  Since the following 2 errors have to do with
-		# connecting, it's safe to do these checks.
-		if result.exit_status == 255:
-			if re.search(r'^ssh: connect to host .* port .*: '
-			             r'Connection timed out\r$', result.stderr):
-				raise error.AutoservSSHTimeout("ssh timed out",
-							       result)
-			if result.stderr == "Permission denied.\r\n":
-				msg = "ssh permission denied"
-				raise PermissionDeniedError(msg, result)
+    def _run(self, command, timeout, ignore_status, stdout, stderr,
+             connect_timeout, env, options):
+        """Helper function for run()."""
 
-		if not ignore_status and result.exit_status > 0:
-			raise error.AutoservRunError("command execution error",
-						     result)
+        ssh_cmd = self.ssh_command(connect_timeout, options)
+        echo_cmd = 'echo Connected. >&2'
+        full_cmd = '%s "%s;%s %s"' % (ssh_cmd, echo_cmd, env,
+                                      utils.sh_escape(command))
+        result = utils.run(full_cmd, timeout, True, stdout, stderr)
 
-		return result
+        # The error messages will show up in band (indistinguishable
+        # from stuff sent through the SSH connection), so we have the
+        # remote computer echo the message "Connected." before running
+        # any command.  Since the following 2 errors have to do with
+        # connecting, it's safe to do these checks.
+        if result.exit_status == 255:
+            if re.search(r'^ssh: connect to host .* port .*: '
+                         r'Connection timed out\r$', result.stderr):
+                raise error.AutoservSSHTimeout("ssh timed out",
+                                               result)
+            if result.stderr == "Permission denied.\r\n":
+                msg = "ssh permission denied"
+                raise PermissionDeniedError(msg, result)
+
+        if not ignore_status and result.exit_status > 0:
+            raise error.AutoservRunError("command execution error",
+                                         result)
+
+        return result
+
+
+    def run(self, command, timeout=3600, ignore_status=False,
+            stdout_tee=None, stderr_tee=None, connect_timeout=30):
+        """
+        Run a command on the remote host.
+
+        Args:
+                command: the command line string
+                timeout: time limit in seconds before attempting to
+                        kill the running process. The run() function
+                        will take a few seconds longer than 'timeout'
+                        to complete if it has to kill the process.
+                ignore_status: do not raise an exception, no matter
+                        what the exit code of the command is.
+
+        Returns:
+                a hosts.base_classes.CmdResult object
+
+        Raises:
+                AutoservRunError: the exit code of the command
+                        execution was not 0
+                AutoservSSHTimeout: ssh connection has timed out
+        """
+        stdout = stdout_tee or sys.stdout
+        stderr = stderr_tee or sys.stdout
+        print "ssh: %s" % command
+        env = " ".join("=".join(pair) for pair in self.env.iteritems())
+        try:
+            try:
+                return self._run(command, timeout,
+                                 ignore_status, stdout,
+                                 stderr, connect_timeout,
+                                 env, '')
+            except PermissionDeniedError:
+                print("Permission denied to ssh; re-running"
+                      "with increased logging:")
+                return self._run(command, timeout,
+                                 ignore_status, stdout,
+                                 stderr, connect_timeout,
+                                 env, '-v -v -v')
+        except error.CmdError, cmderr:
+            # We get a CmdError here only if there is timeout of
+            # that command.  Catch that and stuff it into
+            # AutoservRunError and raise it.
+            raise error.AutoservRunError(cmderr.args[0],
+                                         cmderr.args[1])
+
+
+    def run_short(self, command, **kwargs):
+        """
+        Calls the run() command with a short default timeout.
+
+        Args:
+                Takes the same arguments as does run(),
+                with the exception of the timeout argument which
+                here is fixed at 60 seconds.
+                It returns the result of run.
+        """
+        return self.run(command, timeout=60, **kwargs)
+
+
+    def run_grep(self, command, timeout=30, ignore_status=False,
+                             stdout_ok_regexp=None, stdout_err_regexp=None,
+                             stderr_ok_regexp=None, stderr_err_regexp=None,
+                             connect_timeout=30):
+        """
+        Run a command on the remote host and look for regexp
+        in stdout or stderr to determine if the command was
+        successul or not.
+
+        Args:
+                command: the command line string
+                timeout: time limit in seconds before attempting to
+                        kill the running process. The run() function
+                        will take a few seconds longer than 'timeout'
+                        to complete if it has to kill the process.
+                ignore_status: do not raise an exception, no matter
+                        what the exit code of the command is.
+                stdout_ok_regexp: regexp that should be in stdout
+                        if the command was successul.
+                stdout_err_regexp: regexp that should be in stdout
+                        if the command failed.
+                stderr_ok_regexp: regexp that should be in stderr
+                        if the command was successul.
+                stderr_err_regexp: regexp that should be in stderr
+                        if the command failed.
+
+        Returns:
+                if the command was successul, raises an exception
+                otherwise.
+
+        Raises:
+                AutoservRunError:
+                - the exit code of the command execution was not 0.
+                - If stderr_err_regexp is found in stderr,
+                - If stdout_err_regexp is found in stdout,
+                - If stderr_ok_regexp is not found in stderr.
+                - If stdout_ok_regexp is not found in stdout,
+        """
+
+        # We ignore the status, because we will handle it at the end.
+        result = self.run(command, timeout, ignore_status=True,
+                          connect_timeout=connect_timeout)
+
+        # Look for the patterns, in order
+        for (regexp, stream) in ((stderr_err_regexp, result.stderr),
+                                 (stdout_err_regexp, result.stdout)):
+            if regexp and stream:
+                err_re = re.compile (regexp)
+                if err_re.search(stream):
+                    raise error.AutoservRunError(
+                        '%s failed, found error pattern: '
+                        '"%s"' % (command, regexp), result)
+
+        for (regexp, stream) in ((stderr_ok_regexp, result.stderr),
+                                 (stdout_ok_regexp, result.stdout)):
+            if regexp and stream:
+                ok_re = re.compile (regexp)
+                if ok_re.search(stream):
+                    if ok_re.search(stream):
+                        return
+
+        if not ignore_status and result.exit_status > 0:
+            raise error.AutoservRunError("command execution error",
+                                         result)
+
+
+    def reboot(self, timeout=DEFAULT_REBOOT_TIMEOUT, label=None,
+               kernel_args=None, wait=True):
+        """
+        Reboot the remote host.
+
+        Args:
+                timeout
+        """
+        self.reboot_setup()
+
+        # forcibly include the "netconsole" kernel arg
+        if self.__netconsole_param:
+            if kernel_args is None:
+                kernel_args = self.__netconsole_param
+            else:
+                kernel_args += " " + self.__netconsole_param
+            # unload the (possibly loaded) module to avoid shutdown issues
+            self.__unload_netconsole_module()
+        if label or kernel_args:
+            self.bootloader.install_boottool()
+        if label:
+            self.bootloader.set_default(label)
+        if kernel_args:
+            if not label:
+                default = int(self.bootloader.get_default())
+                label = self.bootloader.get_titles()[default]
+            self.bootloader.add_args(label, kernel_args)
+
+        # define a function for the reboot and run it in a group
+        print "Reboot: initiating reboot"
+        def reboot():
+            self.__record("GOOD", None, "reboot.start")
+            try:
+                self.run('(sleep 5; reboot) '
+                         '</dev/null >/dev/null 2>&1 &')
+            except error.AutoservRunError:
+                self.__record("ABORT", None, "reboot.start",
+                              "reboot command failed")
+                raise
+            if wait:
+                self.wait_for_restart(timeout)
+                self.reboot_followup()
+        self.__run_reboot_group(reboot)
+
+
+    def reboot_followup(self):
+        super(SSHHost, self).reboot_followup()
+        self.__load_netconsole_module() # if the builtin fails
+
+
+    def __copy_files(self, sources, dest):
+        """
+        Copy files from one machine to another.
+
+        This is for internal use by other methods that intend to move
+        files between machines. It expects a list of source files and
+        a destination (a filename if the source is a single file, a
+        destination otherwise). The names must already be
+        pre-processed into the appropriate rsync/scp friendly
+        format (%s@%s:%s).
+        """
+        # wait until there are only a small number of copies running
+        # before starting this one
+        get_config = global_config.global_config.get_config_value
+        max_simultaneous = get_config("HOSTS",
+                                      "max_simultaneous_file_copies",
+                                      type=int)
+        while True:
+            copy_count = 0
+            procs = utils.system_output('ps -ef')
+            for line in procs.splitlines():
+                if 'rsync ' in line or 'scp ' in line:
+                    copy_count += 1
+            if copy_count < max_simultaneous:
+                break
+            time.sleep(60)
+
+        print '__copy_files: copying %s to %s' % (sources, dest)
+        try:
+            utils.run('rsync --rsh="%s" -az %s %s' % (
+                self.ssh_base_command(), ' '.join(sources), dest))
+        except Exception:
+            try:
+                utils.run('scp -rpq -P %d %s "%s"' % (
+                    self.port, ' '.join(sources), dest))
+            except error.CmdError, cmderr:
+                raise error.AutoservRunError(cmderr.args[0],
+                                             cmderr.args[1])
 
+    def get_file(self, source, dest):
+        """
+        Copy files from the remote host to a local path.
 
-	def run(self, command, timeout=3600, ignore_status=False,
-		stdout_tee=None, stderr_tee=None, connect_timeout=30):
-		"""
-		Run a command on the remote host.
-		
-		Args:
-			command: the command line string
-			timeout: time limit in seconds before attempting to 
-				kill the running process. The run() function
-				will take a few seconds longer than 'timeout'
-				to complete if it has to kill the process.
-			ignore_status: do not raise an exception, no matter 
-				what the exit code of the command is.
-		
-		Returns:
-			a hosts.base_classes.CmdResult object
-		
-		Raises:
-			AutoservRunError: the exit code of the command 
-				execution was not 0
-			AutoservSSHTimeout: ssh connection has timed out
-		"""
-		stdout = stdout_tee or sys.stdout
-		stderr = stderr_tee or sys.stdout
-		print "ssh: %s" % command
-		env = " ".join("=".join(pair) for pair in self.env.iteritems())
-		try:
-			try:
-				return self._run(command, timeout,
-				                 ignore_status, stdout,
-				                 stderr, connect_timeout,
-				                 env, '')
-			except PermissionDeniedError:
-				print("Permission denied to ssh; re-running"
-				      "with increased logging:")
-				return self._run(command, timeout,
-				                 ignore_status, stdout,
-				                 stderr, connect_timeout,
-				                 env, '-v -v -v')
-		except error.CmdError, cmderr:
-			# We get a CmdError here only if there is timeout of
-			# that command.  Catch that and stuff it into
-			# AutoservRunError and raise it.
-			raise error.AutoservRunError(cmderr.args[0],
-			                             cmderr.args[1])
+        Directories will be copied recursively.
+        If a source component is a directory with a trailing slash,
+        the content of the directory will be copied, otherwise, the
+        directory itself and its content will be copied. This
+        behavior is similar to that of the program 'rsync'.
 
+        Args:
+                source: either
+                        1) a single file or directory, as a string
+                        2) a list of one or more (possibly mixed)
+                                files or directories
+                dest: a file or a directory (if source contains a
+                        directory or more than one element, you must
+                        supply a directory dest)
 
-	def run_short(self, command, **kwargs):
-		"""
-		Calls the run() command with a short default timeout.
-		
-		Args:
-			Takes the same arguments as does run(),
-			with the exception of the timeout argument which 
-			here is fixed at 60 seconds.
-			It returns the result of run.
-		"""
-		return self.run(command, timeout=60, **kwargs)
+        Raises:
+                AutoservRunError: the scp command failed
+        """
+        if isinstance(source, types.StringTypes):
+            source= [source]
 
+        processed_source= []
+        for entry in source:
+            if entry.endswith('/'):
+                format_string= '%s@%s:"%s*"'
+            else:
+                format_string= '%s@%s:"%s"'
+            entry= format_string % (self.user, self.hostname,
+                    utils.scp_remote_escape(entry))
+            processed_source.append(entry)
 
-	def run_grep(self, command, timeout=30, ignore_status=False,
-				 stdout_ok_regexp=None, stdout_err_regexp=None,
-				 stderr_ok_regexp=None, stderr_err_regexp=None,
-				 connect_timeout=30):
-		"""
-		Run a command on the remote host and look for regexp
-		in stdout or stderr to determine if the command was
-		successul or not.
+        processed_dest= os.path.abspath(dest)
+        if os.path.isdir(dest):
+            processed_dest= "%s/" % (utils.sh_escape(processed_dest),)
+        else:
+            processed_dest= utils.sh_escape(processed_dest)
 
-		Args:
-			command: the command line string
-			timeout: time limit in seconds before attempting to
-				kill the running process. The run() function
-				will take a few seconds longer than 'timeout'
-				to complete if it has to kill the process.
-			ignore_status: do not raise an exception, no matter
-				what the exit code of the command is.
-			stdout_ok_regexp: regexp that should be in stdout
-				if the command was successul.
-			stdout_err_regexp: regexp that should be in stdout
-				if the command failed.
-			stderr_ok_regexp: regexp that should be in stderr
-				if the command was successul.
-			stderr_err_regexp: regexp that should be in stderr
-				if the command failed.
+        self.__copy_files(processed_source, processed_dest)
 
-		Returns:
-			if the command was successul, raises an exception
-			otherwise.
 
-		Raises:
-			AutoservRunError:
-			- the exit code of the command execution was not 0.
-			- If stderr_err_regexp is found in stderr,
-			- If stdout_err_regexp is found in stdout,
-			- If stderr_ok_regexp is not found in stderr.
-			- If stdout_ok_regexp is not found in stdout,
-		"""
+    def send_file(self, source, dest):
+        """
+        Copy files from a local path to the remote host.
 
-		# We ignore the status, because we will handle it at the end.
-		result = self.run(command, timeout, ignore_status=True,
-				  connect_timeout=connect_timeout)
+        Directories will be copied recursively.
+        If a source component is a directory with a trailing slash,
+        the content of the directory will be copied, otherwise, the
+        directory itself and its content will be copied. This
+        behavior is similar to that of the program 'rsync'.
 
-		# Look for the patterns, in order
-		for (regexp, stream) in ((stderr_err_regexp, result.stderr),
-					 (stdout_err_regexp, result.stdout)):
-			if regexp and stream:
-				err_re = re.compile (regexp)
-				if err_re.search(stream):
-					raise error.AutoservRunError(
-					    '%s failed, found error pattern: '
-					    '"%s"' % (command, regexp), result)
+        Args:
+                source: either
+                        1) a single file or directory, as a string
+                        2) a list of one or more (possibly mixed)
+                                files or directories
+                dest: a file or a directory (if source contains a
+                        directory or more than one element, you must
+                        supply a directory dest)
 
-		for (regexp, stream) in ((stderr_ok_regexp, result.stderr),
-					 (stdout_ok_regexp, result.stdout)):
-			if regexp and stream:
-				ok_re = re.compile (regexp)
-				if ok_re.search(stream):
-					if ok_re.search(stream):
-						return
+        Raises:
+                AutoservRunError: the scp command failed
+        """
+        if isinstance(source, types.StringTypes):
+            source= [source]
 
-		if not ignore_status and result.exit_status > 0:
-			raise error.AutoservRunError("command execution error",
-						     result)
+        processed_source= []
+        for entry in source:
+            if entry.endswith('/'):
+                format_string= '"%s/"*'
+            else:
+                format_string= '"%s"'
+            entry= format_string % (utils.sh_escape(os.path.abspath(entry)),)
+            processed_source.append(entry)
 
+        remote_dest = '%s@%s:"%s"' % (
+                    self.user, self.hostname,
+                    utils.scp_remote_escape(dest))
 
-	def reboot(self, timeout=DEFAULT_REBOOT_TIMEOUT, label=None,
-		   kernel_args=None, wait=True):
-		"""
-		Reboot the remote host.
-		
-		Args:
-			timeout
-		"""
-		self.reboot_setup()
+        self.__copy_files(processed_source, remote_dest)
+        self.run('find "%s" -type d | xargs -i -r chmod o+rx "{}"' % dest)
+        self.run('find "%s" -type f | xargs -i -r chmod o+r "{}"' % dest)
 
-		# forcibly include the "netconsole" kernel arg
-		if self.__netconsole_param:
-			if kernel_args is None:
-				kernel_args = self.__netconsole_param
-			else:
-				kernel_args += " " + self.__netconsole_param
-			# unload the (possibly loaded) module to avoid shutdown issues
-			self.__unload_netconsole_module()
-		if label or kernel_args:
-			self.bootloader.install_boottool()
-		if label:
-			self.bootloader.set_default(label)
-		if kernel_args:
-			if not label:
-				default = int(self.bootloader.get_default())
-				label = self.bootloader.get_titles()[default]
-			self.bootloader.add_args(label, kernel_args)
+    def get_tmp_dir(self):
+        """
+        Return the pathname of a directory on the host suitable
+        for temporary file storage.
 
-		# define a function for the reboot and run it in a group
-		print "Reboot: initiating reboot"
-		def reboot():
-			self.__record("GOOD", None, "reboot.start")
-			try:
-				self.run('(sleep 5; reboot) '
-					 '</dev/null >/dev/null 2>&1 &')
-			except error.AutoservRunError:
-				self.__record("ABORT", None, "reboot.start",
-					      "reboot command failed")
-				raise
-			if wait:
-				self.wait_for_restart(timeout) 
-				self.reboot_followup()
-		self.__run_reboot_group(reboot)
+        The directory and its content will be deleted automatically
+        on the destruction of the Host object that was used to obtain
+        it.
+        """
+        dir_name= self.run("mktemp -d /tmp/autoserv-XXXXXX").stdout.rstrip(" \n")
+        self.tmp_dirs.append(dir_name)
+        return dir_name
 
 
-	def reboot_followup(self):
-		super(SSHHost, self).reboot_followup()
-		self.__load_netconsole_module() # if the builtin fails
+    def is_up(self):
+        """
+        Check if the remote host is up.
 
+        Returns:
+                True if the remote host is up, False otherwise
+        """
+        try:
+            self.ssh_ping()
+        except:
+            return False
+        return True
 
-	def __copy_files(self, sources, dest):
-		"""
-		Copy files from one machine to another.
 
-		This is for internal use by other methods that intend to move
-		files between machines. It expects a list of source files and
-		a destination (a filename if the source is a single file, a
-		destination otherwise). The names must already be
-		pre-processed into the appropriate rsync/scp friendly
-		format (%s@%s:%s).
-		"""
-		# wait until there are only a small number of copies running
-		# before starting this one
-		get_config = global_config.global_config.get_config_value
-		max_simultaneous = get_config("HOSTS",
-					      "max_simultaneous_file_copies",
-					      type=int)
-		while True:
-			copy_count = 0
-			procs = utils.system_output('ps -ef')
-			for line in procs.splitlines():
-				if 'rsync ' in line or 'scp ' in line:
-					copy_count += 1
-			if copy_count < max_simultaneous:
-				break
-			time.sleep(60)
+    def _is_wait_up_process_up(self):
+        """
+        Checks if any SSHHOST waitup processes are running yet on the
+        remote host.
 
-		print '__copy_files: copying %s to %s' % (sources, dest)
-		try:
-			utils.run('rsync --rsh="%s" -az %s %s' % (
-			    self.ssh_base_command(), ' '.join(sources), dest))
-		except Exception:
-			try:
-				utils.run('scp -rpq -P %d %s "%s"' % (
-				    self.port, ' '.join(sources), dest))
-			except error.CmdError, cmderr:
-				raise error.AutoservRunError(cmderr.args[0],
-							     cmderr.args[1])
+        Returns True if any the waitup processes are running, False
+        otherwise.
+        """
+        processes = self.get_wait_up_processes()
+        if len(processes) == 0:
+            return True # wait up processes aren't being used
+        for procname in processes:
+            exit_status = self.run("ps -e | grep '%s'" % procname,
+                                   ignore_status=True).exit_status
+            if exit_status == 0:
+                return True
+        return False
 
-	def get_file(self, source, dest):
-		"""
-		Copy files from the remote host to a local path.
-		
-		Directories will be copied recursively.
-		If a source component is a directory with a trailing slash, 
-		the content of the directory will be copied, otherwise, the 
-		directory itself and its content will be copied. This 
-		behavior is similar to that of the program 'rsync'.
-		
-		Args:
-			source: either
-				1) a single file or directory, as a string
-				2) a list of one or more (possibly mixed) 
-					files or directories
-			dest: a file or a directory (if source contains a 
-				directory or more than one element, you must 
-				supply a directory dest)
-		
-		Raises:
-			AutoservRunError: the scp command failed
-		"""
-		if isinstance(source, types.StringTypes):
-			source= [source]
-		
-		processed_source= []
-		for entry in source:
-			if entry.endswith('/'):
-				format_string= '%s@%s:"%s*"'
-			else:
-				format_string= '%s@%s:"%s"'
-			entry= format_string % (self.user, self.hostname, 
-				utils.scp_remote_escape(entry))
-			processed_source.append(entry)
-		
-		processed_dest= os.path.abspath(dest)
-		if os.path.isdir(dest):
-			processed_dest= "%s/" % (utils.sh_escape(processed_dest),)
-		else:
-			processed_dest= utils.sh_escape(processed_dest)
 
-		self.__copy_files(processed_source, processed_dest)
+    def wait_up(self, timeout=None):
+        """
+        Wait until the remote host is up or the timeout expires.
 
+        In fact, it will wait until an ssh connection to the remote
+        host can be established, and getty is running.
 
-	def send_file(self, source, dest):
-		"""
-		Copy files from a local path to the remote host.
-		
-		Directories will be copied recursively.
-		If a source component is a directory with a trailing slash, 
-		the content of the directory will be copied, otherwise, the 
-		directory itself and its content will be copied. This 
-		behavior is similar to that of the program 'rsync'.
-		
-		Args:
-			source: either
-				1) a single file or directory, as a string
-				2) a list of one or more (possibly mixed) 
-					files or directories
-			dest: a file or a directory (if source contains a 
-				directory or more than one element, you must 
-				supply a directory dest)
-		
-		Raises:
-			AutoservRunError: the scp command failed
-		"""
-		if isinstance(source, types.StringTypes):
-			source= [source]
-		
-		processed_source= []
-		for entry in source:
-			if entry.endswith('/'):
-				format_string= '"%s/"*'
-			else:
-				format_string= '"%s"'
-			entry= format_string % (utils.sh_escape(os.path.abspath(entry)),)
-			processed_source.append(entry)
+        Args:
+                timeout: time limit in seconds before returning even
+                        if the host is not up.
 
-		remote_dest = '%s@%s:"%s"' % (
-			    self.user, self.hostname,
-			    utils.scp_remote_escape(dest))
+        Returns:
+                True if the host was found to be up, False otherwise
+        """
+        if timeout:
+            end_time= time.time() + timeout
 
-		self.__copy_files(processed_source, remote_dest)
-		self.run('find "%s" -type d | xargs -i -r chmod o+rx "{}"' % dest)
-		self.run('find "%s" -type f | xargs -i -r chmod o+r "{}"' % dest)
+        while not timeout or time.time() < end_time:
+            try:
+                self.ssh_ping()
+            except (error.AutoservRunError,
+                    error.AutoservSSHTimeout):
+                pass
+            else:
+                try:
+                    if self._is_wait_up_process_up():
+                        return True
+                except (error.AutoservRunError,
+                        error.AutoservSSHTimeout):
+                    pass
+            time.sleep(1)
 
-	def get_tmp_dir(self):
-		"""
-		Return the pathname of a directory on the host suitable 
-		for temporary file storage.
-		
-		The directory and its content will be deleted automatically
-		on the destruction of the Host object that was used to obtain
-		it.
-		"""
-		dir_name= self.run("mktemp -d /tmp/autoserv-XXXXXX").stdout.rstrip(" \n")
-		self.tmp_dirs.append(dir_name)
-		return dir_name
+        return False
 
 
-	def is_up(self):
-		"""
-		Check if the remote host is up.
-		
-		Returns:
-			True if the remote host is up, False otherwise
-		"""
-		try:
-			self.ssh_ping()
-		except:
-			return False
-		return True
+    def wait_down(self, timeout=None):
+        """
+        Wait until the remote host is down or the timeout expires.
 
+        In fact, it will wait until an ssh connection to the remote
+        host fails.
 
-	def _is_wait_up_process_up(self):
-		"""
-		Checks if any SSHHOST waitup processes are running yet on the
-		remote host.
+        Args:
+                timeout: time limit in seconds before returning even
+                        if the host is not up.
 
-		Returns True if any the waitup processes are running, False
-		otherwise.
-		"""
-		processes = self.get_wait_up_processes()
-		if len(processes) == 0:
-			return True # wait up processes aren't being used
-		for procname in processes:
-			exit_status = self.run("ps -e | grep '%s'" % procname,
-					       ignore_status=True).exit_status
-			if exit_status == 0:
-				return True
-		return False
+        Returns:
+                True if the host was found to be down, False otherwise
+        """
+        if timeout:
+            end_time= time.time() + timeout
 
+        while not timeout or time.time() < end_time:
+            try:
+                self.ssh_ping()
+            except:
+                return True
+            time.sleep(1)
 
-	def wait_up(self, timeout=None):
-		"""
-		Wait until the remote host is up or the timeout expires.
-		
-		In fact, it will wait until an ssh connection to the remote 
-		host can be established, and getty is running.
-		
-		Args:
-			timeout: time limit in seconds before returning even
-				if the host is not up.
-		
-		Returns:
-			True if the host was found to be up, False otherwise
-		"""
-		if timeout:
-			end_time= time.time() + timeout
-		
-		while not timeout or time.time() < end_time:
-			try:
-				self.ssh_ping()
-			except (error.AutoservRunError,
-				error.AutoservSSHTimeout):
-				pass
-			else:
-				try:
-					if self._is_wait_up_process_up():
-						return True
-				except (error.AutoservRunError,
-					error.AutoservSSHTimeout):
-					pass
-			time.sleep(1)
-		
-		return False
+        return False
 
 
-	def wait_down(self, timeout=None):
-		"""
-		Wait until the remote host is down or the timeout expires.
-		
-		In fact, it will wait until an ssh connection to the remote 
-		host fails.
-		
-		Args:
-			timeout: time limit in seconds before returning even
-				if the host is not up.
-		
-		Returns:
-			True if the host was found to be down, False otherwise
-		"""
-		if timeout:
-			end_time= time.time() + timeout
-		
-		while not timeout or time.time() < end_time:
-			try:
-				self.ssh_ping()
-			except:
-				return True
-			time.sleep(1)
-		
-		return False
+    def ensure_up(self):
+        """
+        Ensure the host is up if it is not then do not proceed;
+        this prevents cacading failures of tests
+        """
+        print 'Ensuring that %s is up before continuing' % self.hostname
+        if hasattr(self, 'hardreset') and not self.wait_up(300):
+            print "Performing a hardreset on %s" % self.hostname
+            try:
+                self.hardreset()
+            except error.AutoservUnsupportedError:
+                print "Hardreset is unsupported on %s" % self.hostname
+        if not self.wait_up(60 * 30):
+            # 30 minutes should be more than enough
+            raise error.AutoservHostError
+        print 'Host up, continuing'
 
 
-	def ensure_up(self):
-		"""
-		Ensure the host is up if it is not then do not proceed;
-		this prevents cacading failures of tests
-		"""
-		print 'Ensuring that %s is up before continuing' % self.hostname
-		if hasattr(self, 'hardreset') and not self.wait_up(300):
-			print "Performing a hardreset on %s" % self.hostname
-			try:
-				self.hardreset()
-			except error.AutoservUnsupportedError:
-				print "Hardreset is unsupported on %s" % self.hostname
-		if not self.wait_up(60 * 30):
-			# 30 minutes should be more than enough
-			raise error.AutoservHostError
-		print 'Host up, continuing'
+    def get_num_cpu(self):
+        """
+        Get the number of CPUs in the host according to
+        /proc/cpuinfo.
 
+        Returns:
+                The number of CPUs
+        """
 
-	def get_num_cpu(self):
-		"""
-		Get the number of CPUs in the host according to 
-		/proc/cpuinfo.
-		
-		Returns:
-			The number of CPUs
-		"""
-		
-		proc_cpuinfo = self.run("cat /proc/cpuinfo",
-				stdout_tee=open('/dev/null', 'w')).stdout
-		cpus = 0
-		for line in proc_cpuinfo.splitlines():
-			if line.startswith('processor'):
-				cpus += 1
-		return cpus
+        proc_cpuinfo = self.run("cat /proc/cpuinfo",
+                        stdout_tee=open('/dev/null', 'w')).stdout
+        cpus = 0
+        for line in proc_cpuinfo.splitlines():
+            if line.startswith('processor'):
+                cpus += 1
+        return cpus
 
 
-	def check_uptime(self):
-		"""
-		Check that uptime is available and monotonically increasing.
-		"""
-		if not self.ping():
-			raise error.AutoservHostError('Client is not pingable')
-		result = self.run("/bin/cat /proc/uptime", 30)
-		return result.stdout.strip().split()[0]
+    def check_uptime(self):
+        """
+        Check that uptime is available and monotonically increasing.
+        """
+        if not self.ping():
+            raise error.AutoservHostError('Client is not pingable')
+        result = self.run("/bin/cat /proc/uptime", 30)
+        return result.stdout.strip().split()[0]
 
 
-	def get_arch(self):
-		"""
-		Get the hardware architecture of the remote machine
-		"""
-		arch = self.run('/bin/uname -m').stdout.rstrip()
-		if re.match(r'i\d86$', arch):
-			arch = 'i386'
-		return arch
+    def get_arch(self):
+        """
+        Get the hardware architecture of the remote machine
+        """
+        arch = self.run('/bin/uname -m').stdout.rstrip()
+        if re.match(r'i\d86$', arch):
+            arch = 'i386'
+        return arch
 
 
-	def get_kernel_ver(self):
-		"""
-		Get the kernel version of the remote machine
-		"""
-		return self.run('/bin/uname -r').stdout.rstrip()
+    def get_kernel_ver(self):
+        """
+        Get the kernel version of the remote machine
+        """
+        return self.run('/bin/uname -r').stdout.rstrip()
 
 
-	def get_cmdline(self):
-		"""
-		Get the kernel command line of the remote machine
-		"""
-		return self.run('cat /proc/cmdline').stdout.rstrip()
+    def get_cmdline(self):
+        """
+        Get the kernel command line of the remote machine
+        """
+        return self.run('cat /proc/cmdline').stdout.rstrip()
 
 
-	def ping(self):
-		"""
-		Ping the remote system, and return whether it's available
-		"""
-		fpingcmd = "%s -q %s" % ('/usr/bin/fping', self.hostname)
-		rc = utils.system(fpingcmd, ignore_status = 1)
-		return (rc == 0)
+    def ping(self):
+        """
+        Ping the remote system, and return whether it's available
+        """
+        fpingcmd = "%s -q %s" % ('/usr/bin/fping', self.hostname)
+        rc = utils.system(fpingcmd, ignore_status = 1)
+        return (rc == 0)
 
 
-	def ssh_ping(self, timeout = 60):
-		try:
-			self.run('true', timeout = timeout, connect_timeout = timeout)
-		except error.AutoservSSHTimeout:
-			msg = "ssh ping timed out. timeout = %s" % timeout
-			raise error.AutoservSSHTimeout(msg)
-		except error.AutoservRunError, exc:
-			msg = "command true failed in ssh ping"
-			raise error.AutoservRunError(msg, exc.args[1])
+    def ssh_ping(self, timeout = 60):
+        try:
+            self.run('true', timeout = timeout, connect_timeout = timeout)
+        except error.AutoservSSHTimeout:
+            msg = "ssh ping timed out. timeout = %s" % timeout
+            raise error.AutoservSSHTimeout(msg)
+        except error.AutoservRunError, exc:
+            msg = "command true failed in ssh ping"
+            raise error.AutoservRunError(msg, exc.args[1])
 
 
-	def get_autodir(self):
-		return self.autodir
+    def get_autodir(self):
+        return self.autodir
 
 
-	def ssh_setup_key(self):
-		try:
-			print 'Performing ssh key setup on %s:%d as %s' % \
-			    (self.hostname, self.port, self.user)
+    def ssh_setup_key(self):
+        try:
+            print 'Performing ssh key setup on %s:%d as %s' % \
+                (self.hostname, self.port, self.user)
 
-			host = pxssh.pxssh()
-			host.login(self.hostname, self.user, self.password,
-				port=self.port)
+            host = pxssh.pxssh()
+            host.login(self.hostname, self.user, self.password,
+                    port=self.port)
 
-			try:
-				public_key = utils.get_public_key()
+            try:
+                public_key = utils.get_public_key()
 
-				host.sendline('mkdir -p ~/.ssh')
-				host.prompt()
-				host.sendline('chmod 700 ~/.ssh')
-				host.prompt()
-				host.sendline("echo '%s' >> ~/.ssh/authorized_keys; " %
-					(public_key))
-				host.prompt()
-				host.sendline('chmod 600 ~/.ssh/authorized_keys')
-				host.prompt()
+                host.sendline('mkdir -p ~/.ssh')
+                host.prompt()
+                host.sendline('chmod 700 ~/.ssh')
+                host.prompt()
+                host.sendline("echo '%s' >> ~/.ssh/authorized_keys; " %
+                        (public_key))
+                host.prompt()
+                host.sendline('chmod 600 ~/.ssh/authorized_keys')
+                host.prompt()
 
-				print 'SSH key setup complete'
+                print 'SSH key setup complete'
 
-			finally:
-				host.logout()
+            finally:
+                host.logout()
 
-		except:
-			pass
+        except:
+            pass
 
 
-	def setup(self):
-		if not self.password == '':
-			try:
-				self.ssh_ping()
-			except error.AutoservRunError:
-				self.ssh_setup_key()
+    def setup(self):
+        if not self.password == '':
+            try:
+                self.ssh_ping()
+            except error.AutoservRunError:
+                self.ssh_setup_key()
diff --git a/server/hypervisor.py b/server/hypervisor.py
index ac213cc..75ad321 100644
--- a/server/hypervisor.py
+++ b/server/hypervisor.py
@@ -5,7 +5,7 @@
 """
 This module defines the Hypervisor class
 
-	Hypervisor: a virtual machine monitor
+        Hypervisor: a virtual machine monitor
 """
 
 __author__ = """
@@ -19,32 +19,32 @@
 
 
 class Hypervisor(installable_object.InstallableObject):
-	"""
-	This class represents a virtual machine monitor.
+    """
+    This class represents a virtual machine monitor.
 
-	Implementation details:
-	This is an abstract class, leaf subclasses must implement the methods
-	listed here and in parent classes which have no implementation. They 
-	may reimplement methods which already have an implementation. You 
-	must not instantiate this class but should instantiate one of those 
-	leaf subclasses.
-	"""
+    Implementation details:
+    This is an abstract class, leaf subclasses must implement the methods
+    listed here and in parent classes which have no implementation. They
+    may reimplement methods which already have an implementation. You
+    must not instantiate this class but should instantiate one of those
+    leaf subclasses.
+    """
 
-	host = None
-	guests = None
+    host = None
+    guests = None
 
-	def __init__(self, host):
-		super(Hypervisor, self).__init__()
-		self.host= host
+    def __init__(self, host):
+        super(Hypervisor, self).__init__()
+        self.host= host
 
 
-	def new_guest(self):
-		pass
+    def new_guest(self):
+        pass
 
 
-	def delete_guest(self, guest_hostname):
-		pass
+    def delete_guest(self, guest_hostname):
+        pass
 
 
-	def reset_guest(self, guest_hostname):
-		pass
+    def reset_guest(self, guest_hostname):
+        pass
diff --git a/server/installable_object.py b/server/installable_object.py
index 0492595..3c61e05 100644
--- a/server/installable_object.py
+++ b/server/installable_object.py
@@ -5,7 +5,7 @@
 """
 This module defines the InstallableObject class
 
-	InstallableObject: a software package that can be installed on a Host
+        InstallableObject: a software package that can be installed on a Host
 """
 
 __author__ = """
@@ -19,37 +19,37 @@
 
 
 class InstallableObject(object):
-	"""
-	This class represents a software package that can be installed on 
-	a Host.
-	
-	Implementation details:
-	This is an abstract class, leaf subclasses must implement the methods
-	listed here. You must not instantiate this class but should 
-	instantiate one of those leaf subclasses.
-	"""
-	
-	source_material= None
-	
-	def __init__(self):
-		super(InstallableObject, self).__init__()
+    """
+    This class represents a software package that can be installed on
+    a Host.
 
-	
-	def get(self, location):
-		"""
-		Get the source material required to install the object.
-		
-		Through the utils.get() function, the argument passed will be 
-		saved in a temporary location on the LocalHost. That location 
-		is saved in the source_material attribute.
-		
-		Args:
-			location: the path to the source material. This path
-				may be of any type that the utils.get() 
-				function will accept.
-		"""
-		self.source_material= utils.get(location)
+    Implementation details:
+    This is an abstract class, leaf subclasses must implement the methods
+    listed here. You must not instantiate this class but should
+    instantiate one of those leaf subclasses.
+    """
 
-	
-	def install(self, host):
-		pass
+    source_material= None
+
+    def __init__(self):
+        super(InstallableObject, self).__init__()
+
+
+    def get(self, location):
+        """
+        Get the source material required to install the object.
+
+        Through the utils.get() function, the argument passed will be
+        saved in a temporary location on the LocalHost. That location
+        is saved in the source_material attribute.
+
+        Args:
+                location: the path to the source material. This path
+                        may be of any type that the utils.get()
+                        function will accept.
+        """
+        self.source_material= utils.get(location)
+
+
+    def install(self, host):
+        pass
diff --git a/server/kernel.py b/server/kernel.py
index 63743cd..5fcaba7 100644
--- a/server/kernel.py
+++ b/server/kernel.py
@@ -5,7 +5,7 @@
 """
 This module defines the Kernel class
 
-	Kernel: an os kernel
+        Kernel: an os kernel
 """
 
 __author__ = """
@@ -19,27 +19,27 @@
 
 
 class Kernel(installable_object.InstallableObject):
-	"""
-	This class represents a kernel.
-	
-	It is used to obtain a built kernel or create one from source and 
-	install it on a Host.
-	
-	Implementation details:
-	This is an abstract class, leaf subclasses must implement the methods
-	listed here and in parent classes which have no implementation. They 
-	may reimplement methods which already have an implementation. You 
-	must not instantiate this class but should instantiate one of those 
-	leaf subclasses.
-	"""
-	
-	def get_version():
-		pass
+    """
+    This class represents a kernel.
+
+    It is used to obtain a built kernel or create one from source and
+    install it on a Host.
+
+    Implementation details:
+    This is an abstract class, leaf subclasses must implement the methods
+    listed here and in parent classes which have no implementation. They
+    may reimplement methods which already have an implementation. You
+    must not instantiate this class but should instantiate one of those
+    leaf subclasses.
+    """
+
+    def get_version():
+        pass
 
 
-	def get_image_name():
-		pass
+    def get_image_name():
+        pass
 
 
-	def get_initrd_name():
-		pass
+    def get_initrd_name():
+        pass
diff --git a/server/kvm.py b/server/kvm.py
index 72e3e7d..0e57731 100644
--- a/server/kvm.py
+++ b/server/kvm.py
@@ -5,7 +5,7 @@
 """
 This module defines the KVM class
 
-	KVM: a KVM virtual machine monitor
+        KVM: a KVM virtual machine monitor
 """
 
 __author__ = """
@@ -31,15 +31,15 @@
 _check_process_script= """\
 if [ -f "%(pid_file_name)s" ]
 then
-	pid=$(cat "%(pid_file_name)s")
-	if [ -L /proc/$pid/exe ] && stat /proc/$pid/exe | 
-		grep -q --  "-> \`%(qemu_binary)s\'\$"
-	then
-		echo "process present"
-	else
-		rm -f "%(pid_file_name)s"
-		rm -f "%(monitor_file_name)s"
-	fi
+        pid=$(cat "%(pid_file_name)s")
+        if [ -L /proc/$pid/exe ] && stat /proc/$pid/exe |
+                grep -q --  "-> \`%(qemu_binary)s\'\$"
+        then
+                echo "process present"
+        else
+                rm -f "%(pid_file_name)s"
+                rm -f "%(monitor_file_name)s"
+        fi
 fi
 """
 
@@ -54,430 +54,430 @@
 _remove_modules_script= """\
 if $(grep -q "^kvm_intel [[:digit:]]\+ 0" /proc/modules)
 then
-	rmmod kvm-intel
+        rmmod kvm-intel
 fi
 
 if $(grep -q "^kvm_amd [[:digit:]]\+ 0" /proc/modules)
 then
-	rmmod kvm-amd
+        rmmod kvm-amd
 fi
 
 if $(grep -q "^kvm [[:digit:]]\+ 0" /proc/modules)
 then
-	rmmod kvm
+        rmmod kvm
 fi
 """
 
 
 class KVM(hypervisor.Hypervisor):
-	"""
-	This class represents a KVM virtual machine monitor.
+    """
+    This class represents a KVM virtual machine monitor.
 
-	Implementation details:
-	This is a leaf class in an abstract class hierarchy, it must 
-	implement the unimplemented methods in parent classes.
-	"""
+    Implementation details:
+    This is a leaf class in an abstract class hierarchy, it must
+    implement the unimplemented methods in parent classes.
+    """
 
-	build_dir= None
-	pid_dir= None
-	support_dir= None
-	addresses= []
-	insert_modules= True
-	modules= {}
+    build_dir= None
+    pid_dir= None
+    support_dir= None
+    addresses= []
+    insert_modules= True
+    modules= {}
 
 
-	def __del__(self):
-		"""
-		Destroy a KVM object.
+    def __del__(self):
+        """
+        Destroy a KVM object.
 
-		Guests managed by this hypervisor that are still running will 
-		be killed.
-		"""
-		self.deinitialize()
+        Guests managed by this hypervisor that are still running will
+        be killed.
+        """
+        self.deinitialize()
 
 
-	def _insert_modules(self):
-		"""
-		Insert the kvm modules into the kernel.
+    def _insert_modules(self):
+        """
+        Insert the kvm modules into the kernel.
 
-		The modules inserted are the ones from the build directory, NOT 
-		the ones from the kernel.
+        The modules inserted are the ones from the build directory, NOT
+        the ones from the kernel.
 
-		This function should only be called after install(). It will
-		check that the modules are not already loaded before attempting
-		to insert them.
-		"""
-		cpu_flags= self.host.run('cat /proc/cpuinfo | '
-			'grep -e "^flags" | head -1 | cut -d " " -f 2-'
-			).stdout.strip()
+        This function should only be called after install(). It will
+        check that the modules are not already loaded before attempting
+        to insert them.
+        """
+        cpu_flags= self.host.run('cat /proc/cpuinfo | '
+                'grep -e "^flags" | head -1 | cut -d " " -f 2-'
+                ).stdout.strip()
 
-		if cpu_flags.find('vmx') != -1:
-			module_type= "intel"
-		elif cpu_flags.find('svm') != -1:
-			module_type= "amd"
-		else:
-			raise error.AutoservVirtError("No harware "
-				"virtualization extensions found, "
-				"KVM cannot run")
+        if cpu_flags.find('vmx') != -1:
+            module_type= "intel"
+        elif cpu_flags.find('svm') != -1:
+            module_type= "amd"
+        else:
+            raise error.AutoservVirtError("No harware "
+                    "virtualization extensions found, "
+                    "KVM cannot run")
 
-		self.host.run('if ! $(grep -q "^kvm " /proc/modules); '
-			'then insmod "%s"; fi' % (utils.sh_escape(
-			os.path.join(self.build_dir, "kernel/kvm.ko")),))
-		if module_type == "intel":
-			self.host.run('if ! $(grep -q "^kvm_intel " '
-				'/proc/modules); then insmod "%s"; fi' % 
-				(utils.sh_escape(os.path.join(self.build_dir, 
-				"kernel/kvm-intel.ko")),))
-		elif module_type == "amd":
-			self.host.run('if ! $(grep -q "^kvm_amd " '
-				'/proc/modules); then insmod "%s"; fi' % 
-				(utils.sh_escape(os.path.join(self.build_dir, 
-				"kernel/kvm-amd.ko")),))
+        self.host.run('if ! $(grep -q "^kvm " /proc/modules); '
+                'then insmod "%s"; fi' % (utils.sh_escape(
+                os.path.join(self.build_dir, "kernel/kvm.ko")),))
+        if module_type == "intel":
+            self.host.run('if ! $(grep -q "^kvm_intel " '
+                    '/proc/modules); then insmod "%s"; fi' %
+                    (utils.sh_escape(os.path.join(self.build_dir,
+                    "kernel/kvm-intel.ko")),))
+        elif module_type == "amd":
+            self.host.run('if ! $(grep -q "^kvm_amd " '
+                    '/proc/modules); then insmod "%s"; fi' %
+                    (utils.sh_escape(os.path.join(self.build_dir,
+                    "kernel/kvm-amd.ko")),))
 
 
-	def _remove_modules(self):
-		"""
-		Remove the kvm modules from the kernel.
+    def _remove_modules(self):
+        """
+        Remove the kvm modules from the kernel.
 
-		This function checks that they're not in use before trying to 
-		remove them.
-		"""
-		self.host.run(_remove_modules_script)
+        This function checks that they're not in use before trying to
+        remove them.
+        """
+        self.host.run(_remove_modules_script)
 
 
-	def install(self, addresses, build=True, insert_modules=True, syncdir=None):
-		"""
-		Compile the kvm software on the host that the object was 
-		initialized with.
+    def install(self, addresses, build=True, insert_modules=True, syncdir=None):
+        """
+        Compile the kvm software on the host that the object was
+        initialized with.
 
-		The kvm kernel modules are compiled, for this, the kernel
-		sources must be available. A custom qemu is also compiled.
-		Note that 'make install' is not run, the kernel modules and 
-		qemu are run from where they were built, therefore not 
-		conflicting with what might already be installed.
+        The kvm kernel modules are compiled, for this, the kernel
+        sources must be available. A custom qemu is also compiled.
+        Note that 'make install' is not run, the kernel modules and
+        qemu are run from where they were built, therefore not
+        conflicting with what might already be installed.
 
-		Args:
-			addresses: a list of dict entries of the form 
-				{"mac" : "xx:xx:xx:xx:xx:xx", 
-				"ip" : "yyy.yyy.yyy.yyy"} where x and y 
-				are replaced with sensible values. The ip 
-				address may be a hostname or an IPv6 instead.
+        Args:
+                addresses: a list of dict entries of the form
+                        {"mac" : "xx:xx:xx:xx:xx:xx",
+                        "ip" : "yyy.yyy.yyy.yyy"} where x and y
+                        are replaced with sensible values. The ip
+                        address may be a hostname or an IPv6 instead.
 
-				When a new virtual machine is created, the 
-				first available entry in that list will be 
-				used. The network card in the virtual machine 
-				will be assigned the specified mac address and
-				autoserv will use the specified ip address to 
-				connect to the virtual host via ssh. The virtual
-				machine os must therefore be configured to 
-				configure its network with the ip corresponding 
-				to the mac.
-			build: build kvm from the source material, if False,
-				it is assumed that the package contains the 
-				source tree after a 'make'.
-			insert_modules: build kvm modules from the source 
-				material and insert them. Otherwise, the 
-				running kernel is assumed to already have
-				kvm support and nothing will be done concerning
-				the modules.
+                        When a new virtual machine is created, the
+                        first available entry in that list will be
+                        used. The network card in the virtual machine
+                        will be assigned the specified mac address and
+                        autoserv will use the specified ip address to
+                        connect to the virtual host via ssh. The virtual
+                        machine os must therefore be configured to
+                        configure its network with the ip corresponding
+                        to the mac.
+                build: build kvm from the source material, if False,
+                        it is assumed that the package contains the
+                        source tree after a 'make'.
+                insert_modules: build kvm modules from the source
+                        material and insert them. Otherwise, the
+                        running kernel is assumed to already have
+                        kvm support and nothing will be done concerning
+                        the modules.
 
-		TODO(poirier): check dependencies before building
-		kvm needs:
-		libasound2-dev
-		libsdl1.2-dev (or configure qemu with --disable-gfx-check, how?)
-		bridge-utils
-		"""
-		self.addresses= [
-			{"mac" : address["mac"], 
-			"ip" : address["ip"],
-			"is_used" : False} for address in addresses]
+        TODO(poirier): check dependencies before building
+        kvm needs:
+        libasound2-dev
+        libsdl1.2-dev (or configure qemu with --disable-gfx-check, how?)
+        bridge-utils
+        """
+        self.addresses= [
+                {"mac" : address["mac"],
+                "ip" : address["ip"],
+                "is_used" : False} for address in addresses]
 
-		self.build_dir = self.host.get_tmp_dir()
-		self.support_dir= self.host.get_tmp_dir()
+        self.build_dir = self.host.get_tmp_dir()
+        self.support_dir= self.host.get_tmp_dir()
 
-		self.host.run('echo "%s" > "%s"' % (
-			utils.sh_escape(_qemu_ifup_script),
-			utils.sh_escape(os.path.join(self.support_dir, 
-				"qemu-ifup.sh")),))
-		self.host.run('chmod a+x "%s"' % (
-			utils.sh_escape(os.path.join(self.support_dir, 
-				"qemu-ifup.sh")),))
+        self.host.run('echo "%s" > "%s"' % (
+                utils.sh_escape(_qemu_ifup_script),
+                utils.sh_escape(os.path.join(self.support_dir,
+                        "qemu-ifup.sh")),))
+        self.host.run('chmod a+x "%s"' % (
+                utils.sh_escape(os.path.join(self.support_dir,
+                        "qemu-ifup.sh")),))
 
-		self.host.send_file(self.source_material, self.build_dir)
-		remote_source_material= os.path.join(self.build_dir, 
-				os.path.basename(self.source_material))
+        self.host.send_file(self.source_material, self.build_dir)
+        remote_source_material= os.path.join(self.build_dir,
+                        os.path.basename(self.source_material))
 
-		self.build_dir= utils.unarchive(self.host, 
-			remote_source_material)
+        self.build_dir= utils.unarchive(self.host,
+                remote_source_material)
 
-		if insert_modules:
-			configure_modules= ""
-			self.insert_modules= True
-		else:
-			configure_modules= "--with-patched-kernel "
-			self.insert_modules= False
+        if insert_modules:
+            configure_modules= ""
+            self.insert_modules= True
+        else:
+            configure_modules= "--with-patched-kernel "
+            self.insert_modules= False
 
-		# build
-		if build:
-			try:
-				self.host.run('make -C "%s" clean' % (
-					utils.sh_escape(self.build_dir),),
-					timeout=600)
-			except error.AutoservRunError:
-				# directory was already clean and contained 
-				# no makefile
-				pass
-			self.host.run('cd "%s" && ./configure %s' % (
-				utils.sh_escape(self.build_dir), 
-				configure_modules,), timeout=600)
-			if syncdir:
-				cmd = 'cd "%s/kernel" && make sync LINUX=%s' % (
-				utils.sh_escape(self.build_dir),
-				utils.sh_escape(syncdir))
-				self.host.run(cmd)
-			self.host.run('make -j%d -C "%s"' % (
-				self.host.get_num_cpu() * 2, 
-				utils.sh_escape(self.build_dir),), timeout=3600)
-			# remember path to modules
-			self.modules['kvm'] = "%s" %(
-				utils.sh_escape(os.path.join(self.build_dir, 
-				"kernel/kvm.ko")))
-			self.modules['kvm-intel'] = "%s" %(
-				utils.sh_escape(os.path.join(self.build_dir, 
-				"kernel/kvm-intel.ko")))
-			self.modules['kvm-amd'] = "%s" %(
-				utils.sh_escape(os.path.join(self.build_dir, 
-				"kernel/kvm-amd.ko")))
-			print self.modules
+        # build
+        if build:
+            try:
+                self.host.run('make -C "%s" clean' % (
+                        utils.sh_escape(self.build_dir),),
+                        timeout=600)
+            except error.AutoservRunError:
+                # directory was already clean and contained
+                # no makefile
+                pass
+            self.host.run('cd "%s" && ./configure %s' % (
+                    utils.sh_escape(self.build_dir),
+                    configure_modules,), timeout=600)
+            if syncdir:
+                cmd = 'cd "%s/kernel" && make sync LINUX=%s' % (
+                utils.sh_escape(self.build_dir),
+                utils.sh_escape(syncdir))
+                self.host.run(cmd)
+            self.host.run('make -j%d -C "%s"' % (
+                    self.host.get_num_cpu() * 2,
+                    utils.sh_escape(self.build_dir),), timeout=3600)
+            # remember path to modules
+            self.modules['kvm'] = "%s" %(
+                    utils.sh_escape(os.path.join(self.build_dir,
+                    "kernel/kvm.ko")))
+            self.modules['kvm-intel'] = "%s" %(
+                    utils.sh_escape(os.path.join(self.build_dir,
+                    "kernel/kvm-intel.ko")))
+            self.modules['kvm-amd'] = "%s" %(
+                    utils.sh_escape(os.path.join(self.build_dir,
+                    "kernel/kvm-amd.ko")))
+            print self.modules
 
-		self.initialize()
+        self.initialize()
 
 
-	def initialize(self):
-		"""
-		Initialize the hypervisor.
+    def initialize(self):
+        """
+        Initialize the hypervisor.
 
-		Loads needed kernel modules and creates temporary directories.
-		The logic is that you could compile once and 
-		initialize - deinitialize many times. But why you would do that
-		has yet to be figured.
+        Loads needed kernel modules and creates temporary directories.
+        The logic is that you could compile once and
+        initialize - deinitialize many times. But why you would do that
+        has yet to be figured.
 
-		Raises:
-			AutoservVirtError: cpuid doesn't report virtualization 
-				extentions (vmx for intel or svm for amd), in
-				this case, kvm cannot run.
-		"""
-		self.pid_dir= self.host.get_tmp_dir()
+        Raises:
+                AutoservVirtError: cpuid doesn't report virtualization
+                        extentions (vmx for intel or svm for amd), in
+                        this case, kvm cannot run.
+        """
+        self.pid_dir= self.host.get_tmp_dir()
 
-		if self.insert_modules:
-			self._remove_modules()
-			self._insert_modules()
+        if self.insert_modules:
+            self._remove_modules()
+            self._insert_modules()
 
 
-	def deinitialize(self):
-		"""
-		Terminate the hypervisor.
+    def deinitialize(self):
+        """
+        Terminate the hypervisor.
 
-		Kill all the virtual machines that are still running and
-		unload the kernel modules.
-		"""
-		self.refresh_guests()
-		for address in self.addresses:
-			if address["is_used"]:
-				self.delete_guest(address["ip"])
-		self.pid_dir= None
+        Kill all the virtual machines that are still running and
+        unload the kernel modules.
+        """
+        self.refresh_guests()
+        for address in self.addresses:
+            if address["is_used"]:
+                self.delete_guest(address["ip"])
+        self.pid_dir= None
 
-		if self.insert_modules:
-			self._remove_modules()
+        if self.insert_modules:
+            self._remove_modules()
 
 
-	def new_guest(self, qemu_options):
-		"""
-		Start a new guest ("virtual machine").
+    def new_guest(self, qemu_options):
+        """
+        Start a new guest ("virtual machine").
 
-		Returns:
-			The ip that was picked from the list supplied to 
-			install() and assigned to this guest.
+        Returns:
+                The ip that was picked from the list supplied to
+                install() and assigned to this guest.
 
-		Raises:
-			AutoservVirtError: no more addresses are available.
-		"""
-		for address in self.addresses:
-			if not address["is_used"]:
-				break
-		else:
-			raise error.AutoservVirtError(
-				"No more addresses available")
+        Raises:
+                AutoservVirtError: no more addresses are available.
+        """
+        for address in self.addresses:
+            if not address["is_used"]:
+                break
+        else:
+            raise error.AutoservVirtError(
+                    "No more addresses available")
 
-		retval= self.host.run(
-			'%s'
-			# this is the line of options that can be modified
-			' %s '
-			'-pidfile "%s" -daemonize -nographic '
-			#~ '-serial telnet::4444,server '
-			'-monitor unix:"%s",server,nowait '
-			'-net nic,macaddr="%s" -net tap,script="%s" -L "%s"' % (
-			utils.sh_escape(os.path.join(
-				self.build_dir, 
-				"qemu/x86_64-softmmu/qemu-system-x86_64")),
-			qemu_options, 
-			utils.sh_escape(os.path.join(
-				self.pid_dir, 
-				"vhost%s_pid" % (address["ip"],))), 
-			utils.sh_escape(os.path.join(
-				self.pid_dir, 
-				"vhost%s_monitor" % (address["ip"],))), 
-			utils.sh_escape(address["mac"]),
-			utils.sh_escape(os.path.join(
-				self.support_dir, 
-				"qemu-ifup.sh")),
-			utils.sh_escape(os.path.join(
-				self.build_dir, 
-				"qemu/pc-bios")),))
+        retval= self.host.run(
+                '%s'
+                # this is the line of options that can be modified
+                ' %s '
+                '-pidfile "%s" -daemonize -nographic '
+                #~ '-serial telnet::4444,server '
+                '-monitor unix:"%s",server,nowait '
+                '-net nic,macaddr="%s" -net tap,script="%s" -L "%s"' % (
+                utils.sh_escape(os.path.join(
+                        self.build_dir,
+                        "qemu/x86_64-softmmu/qemu-system-x86_64")),
+                qemu_options,
+                utils.sh_escape(os.path.join(
+                        self.pid_dir,
+                        "vhost%s_pid" % (address["ip"],))),
+                utils.sh_escape(os.path.join(
+                        self.pid_dir,
+                        "vhost%s_monitor" % (address["ip"],))),
+                utils.sh_escape(address["mac"]),
+                utils.sh_escape(os.path.join(
+                        self.support_dir,
+                        "qemu-ifup.sh")),
+                utils.sh_escape(os.path.join(
+                        self.build_dir,
+                        "qemu/pc-bios")),))
 
-		address["is_used"]= True
-		return address["ip"]
+        address["is_used"]= True
+        return address["ip"]
 
 
-	def refresh_guests(self):
-		"""
-		Refresh the list of guests addresses.
+    def refresh_guests(self):
+        """
+        Refresh the list of guests addresses.
 
-		The is_used status will be updated according to the presence
-		of the process specified in the pid file that was written when
-		the virtual machine was started.
+        The is_used status will be updated according to the presence
+        of the process specified in the pid file that was written when
+        the virtual machine was started.
 
-		TODO(poirier): there are a lot of race conditions in this code
-		because the process might terminate on its own anywhere in 
-		between
-		"""
-		for address in self.addresses:
-			if address["is_used"]:
-				pid_file_name= utils.sh_escape(os.path.join(
-					self.pid_dir, 
-					"vhost%s_pid" % (address["ip"],)))
-				monitor_file_name= utils.sh_escape(os.path.join(
-					self.pid_dir, 
-					"vhost%s_monitor" % (address["ip"],)))
-				retval= self.host.run(
-					_check_process_script % {
-					"pid_file_name" : pid_file_name, 
-					"monitor_file_name" : monitor_file_name,
-					"qemu_binary" : utils.sh_escape(
-						os.path.join(self.build_dir, 
-						"qemu/x86_64-softmmu/"
-						"qemu-system-x86_64")),})
-				if (retval.stdout.strip() != 
-					"process present"):
-					address["is_used"]= False
+        TODO(poirier): there are a lot of race conditions in this code
+        because the process might terminate on its own anywhere in
+        between
+        """
+        for address in self.addresses:
+            if address["is_used"]:
+                pid_file_name= utils.sh_escape(os.path.join(
+                        self.pid_dir,
+                        "vhost%s_pid" % (address["ip"],)))
+                monitor_file_name= utils.sh_escape(os.path.join(
+                        self.pid_dir,
+                        "vhost%s_monitor" % (address["ip"],)))
+                retval= self.host.run(
+                        _check_process_script % {
+                        "pid_file_name" : pid_file_name,
+                        "monitor_file_name" : monitor_file_name,
+                        "qemu_binary" : utils.sh_escape(
+                                os.path.join(self.build_dir,
+                                "qemu/x86_64-softmmu/"
+                                "qemu-system-x86_64")),})
+                if (retval.stdout.strip() !=
+                        "process present"):
+                    address["is_used"]= False
 
 
-	def delete_guest(self, guest_hostname):
-		"""
-		Terminate a virtual machine.
+    def delete_guest(self, guest_hostname):
+        """
+        Terminate a virtual machine.
 
-		Args:
-			guest_hostname: the ip (as it was specified in the 
-				address list given to install()) of the guest 
-				to terminate.
+        Args:
+                guest_hostname: the ip (as it was specified in the
+                        address list given to install()) of the guest
+                        to terminate.
 
-		Raises:
-			AutoservVirtError: the guest_hostname argument is
-				invalid
+        Raises:
+                AutoservVirtError: the guest_hostname argument is
+                        invalid
 
-		TODO(poirier): is there a difference in qemu between 
-		sending SIGTEM or quitting from the monitor?
-		TODO(poirier): there are a lot of race conditions in this code
-		because the process might terminate on its own anywhere in 
-		between
-		"""
-		for address in self.addresses:
-			if address["ip"] == guest_hostname:
-				if address["is_used"]:
-					break
-				else:
-					# Will happen if deinitialize() is 
-					# called while guest objects still
-					# exit and these are del'ed after.
-					# In that situation, nothing is to 
-					# be done here, don't throw an error
-					# either because it will print an
-					# ugly message during garbage 
-					# collection. The solution would be to
-					# delete the guest objects before 
-					# calling deinitialize(), this can't be
-					# done by the KVM class, it has no 
-					# reference to those objects and it 
-					# cannot have any either. The Guest 
-					# objects already need to have a 
-					# reference to their managing 
-					# hypervisor. If the hypervisor had a 
-					# reference to the Guest objects it 
-					# manages, it would create a circular 
-					# reference and those objects would 
-					# not be elligible for garbage 
-					# collection. In turn, this means that 
-					# the KVM object would not be 
-					# automatically del'ed at the end of 
-					# the program and guests that are still
-					# running would be left unattended.
-					# Note that this circular reference 
-					# problem could be avoided by using 
-					# weakref's in class KVM but the 
-					# control file will most likely also
-					# have references to the guests.
-					return
-		else:
-			raise error.AutoservVirtError("Unknown guest hostname")
+        TODO(poirier): is there a difference in qemu between
+        sending SIGTEM or quitting from the monitor?
+        TODO(poirier): there are a lot of race conditions in this code
+        because the process might terminate on its own anywhere in
+        between
+        """
+        for address in self.addresses:
+            if address["ip"] == guest_hostname:
+                if address["is_used"]:
+                    break
+                else:
+                    # Will happen if deinitialize() is
+                    # called while guest objects still
+                    # exit and these are del'ed after.
+                    # In that situation, nothing is to
+                    # be done here, don't throw an error
+                    # either because it will print an
+                    # ugly message during garbage
+                    # collection. The solution would be to
+                    # delete the guest objects before
+                    # calling deinitialize(), this can't be
+                    # done by the KVM class, it has no
+                    # reference to those objects and it
+                    # cannot have any either. The Guest
+                    # objects already need to have a
+                    # reference to their managing
+                    # hypervisor. If the hypervisor had a
+                    # reference to the Guest objects it
+                    # manages, it would create a circular
+                    # reference and those objects would
+                    # not be elligible for garbage
+                    # collection. In turn, this means that
+                    # the KVM object would not be
+                    # automatically del'ed at the end of
+                    # the program and guests that are still
+                    # running would be left unattended.
+                    # Note that this circular reference
+                    # problem could be avoided by using
+                    # weakref's in class KVM but the
+                    # control file will most likely also
+                    # have references to the guests.
+                    return
+        else:
+            raise error.AutoservVirtError("Unknown guest hostname")
 
-		pid_file_name= utils.sh_escape(os.path.join(self.pid_dir, 
-			"vhost%s_pid" % (address["ip"],)))
-		monitor_file_name= utils.sh_escape(os.path.join(self.pid_dir, 
-			"vhost%s_monitor" % (address["ip"],)))
+        pid_file_name= utils.sh_escape(os.path.join(self.pid_dir,
+                "vhost%s_pid" % (address["ip"],)))
+        monitor_file_name= utils.sh_escape(os.path.join(self.pid_dir,
+                "vhost%s_monitor" % (address["ip"],)))
 
-		retval= self.host.run(
-			_check_process_script % {
-			"pid_file_name" : pid_file_name, 
-			"monitor_file_name" : monitor_file_name, 
-			"qemu_binary" : utils.sh_escape(os.path.join(
-				self.build_dir, 
-				"qemu/x86_64-softmmu/qemu-system-x86_64")),})
-		if retval.stdout.strip() == "process present":
-			self.host.run('kill $(cat "%s")' %(
-				pid_file_name,))
-			self.host.run('rm -f "%s"' %(
-				pid_file_name,))
-			self.host.run('rm -f "%s"' %(
-				monitor_file_name,))
-		address["is_used"]= False
+        retval= self.host.run(
+                _check_process_script % {
+                "pid_file_name" : pid_file_name,
+                "monitor_file_name" : monitor_file_name,
+                "qemu_binary" : utils.sh_escape(os.path.join(
+                        self.build_dir,
+                        "qemu/x86_64-softmmu/qemu-system-x86_64")),})
+        if retval.stdout.strip() == "process present":
+            self.host.run('kill $(cat "%s")' %(
+                    pid_file_name,))
+            self.host.run('rm -f "%s"' %(
+                    pid_file_name,))
+            self.host.run('rm -f "%s"' %(
+                    monitor_file_name,))
+        address["is_used"]= False
 
 
-	def reset_guest(self, guest_hostname):
-		"""
-		Perform a hard reset on a virtual machine.
+    def reset_guest(self, guest_hostname):
+        """
+        Perform a hard reset on a virtual machine.
 
-		Args:
-			guest_hostname: the ip (as it was specified in the 
-				address list given to install()) of the guest 
-				to terminate.
+        Args:
+                guest_hostname: the ip (as it was specified in the
+                        address list given to install()) of the guest
+                        to terminate.
 
-		Raises:
-			AutoservVirtError: the guest_hostname argument is
-				invalid
-		"""
-		for address in self.addresses:
-			if address["ip"] is guest_hostname:
-				if address["is_used"]:
-					break
-				else:
-					raise error.AutoservVirtError("guest "
-						"hostname not in use")
-		else:
-			raise error.AutoservVirtError("Unknown guest hostname")
+        Raises:
+                AutoservVirtError: the guest_hostname argument is
+                        invalid
+        """
+        for address in self.addresses:
+            if address["ip"] is guest_hostname:
+                if address["is_used"]:
+                    break
+                else:
+                    raise error.AutoservVirtError("guest "
+                            "hostname not in use")
+        else:
+            raise error.AutoservVirtError("Unknown guest hostname")
 
-		monitor_file_name= utils.sh_escape(os.path.join(self.pid_dir, 
-			"vhost%s_monitor" % (address["ip"],)))
+        monitor_file_name= utils.sh_escape(os.path.join(self.pid_dir,
+                "vhost%s_monitor" % (address["ip"],)))
 
-		self.host.run('python -c "%s"' % (utils.sh_escape(
-			_hard_reset_script % {
-			"monitor_file_name" : monitor_file_name,}),))
+        self.host.run('python -c "%s"' % (utils.sh_escape(
+                _hard_reset_script % {
+                "monitor_file_name" : monitor_file_name,}),))
diff --git a/server/rpm_kernel.py b/server/rpm_kernel.py
index adaf180..5b8c8c0 100644
--- a/server/rpm_kernel.py
+++ b/server/rpm_kernel.py
@@ -5,7 +5,7 @@
 """
 This module defines the Kernel class
 
-	Kernel: an os kernel
+        Kernel: an os kernel
 """
 
 __author__ = """
@@ -20,147 +20,147 @@
 
 
 class RPMKernel(kernel.Kernel):
-	"""
-	This class represents a .rpm pre-built kernel.
+    """
+    This class represents a .rpm pre-built kernel.
 
-	It is used to obtain a built kernel and install it on a Host.
+    It is used to obtain a built kernel and install it on a Host.
 
-	Implementation details:
-	This is a leaf class in an abstract class hierarchy, it must
-	implement the unimplemented methods in parent classes.
-	"""
-	def __init__(self):
-		super(RPMKernel, self).__init__()
+    Implementation details:
+    This is a leaf class in an abstract class hierarchy, it must
+    implement the unimplemented methods in parent classes.
+    """
+    def __init__(self):
+        super(RPMKernel, self).__init__()
 
-	def install(self, host, label='autoserv',
-		    default=False, kernel_args = '', install_vmlinux=False):
-		"""
-		Install a kernel on the remote host.
-		
-		This will also invoke the guest's bootloader to set this
-		kernel as the default kernel if default=True.
-		
-		Args:
-			host: the host on which to install the kernel
-			[kwargs]: remaining keyword arguments will be passed 
-				to Bootloader.add_kernel()
-		
-		Raises:
-			AutoservError: no package has yet been obtained. Call
-				RPMKernel.get() with a .rpm package.
-		"""
-		if len(label) > 15:
-			raise error.AutoservError("label for kernel is too long \
-			(> 15 chars): %s" % label)
-		if self.source_material is None:
-			raise error.AutoservError("A kernel must first be \
-			specified via get()")
-		rpm = self.source_material
+    def install(self, host, label='autoserv',
+                default=False, kernel_args = '', install_vmlinux=False):
+        """
+        Install a kernel on the remote host.
 
-		remote_tmpdir = host.get_tmp_dir()	
-		remote_rpm = os.path.join(remote_tmpdir, os.path.basename(rpm))
-		rpm_package = utils.run('/usr/bin/rpm -q -p %s' % rpm).stdout
-		vmlinuz = self.get_image_name()
-		host.send_file(rpm, remote_rpm)
-		host.run('rpm -e ' + rpm_package, ignore_status = True)
-		host.run('rpm --force -i ' + remote_rpm)
+        This will also invoke the guest's bootloader to set this
+        kernel as the default kernel if default=True.
 
-		# Copy over the uncompressed image if there is one
-                if install_vmlinux:
-			vmlinux = self.get_vmlinux_name()
-			host.run('cd /;rpm2cpio %s | cpio -imuv .%s'
-				% (remote_rpm, vmlinux))
-			host.run('ls ' + vmlinux) # Verify
+        Args:
+                host: the host on which to install the kernel
+                [kwargs]: remaining keyword arguments will be passed
+                        to Bootloader.add_kernel()
 
-		host.bootloader.remove_kernel(label)
-		host.bootloader.add_kernel(vmlinuz, label,
-					   args=kernel_args, default=default)
-		if kernel_args:
-			host.bootloader.add_args(label, kernel_args)
-		if not default:
-			host.bootloader.boot_once(label)
+        Raises:
+                AutoservError: no package has yet been obtained. Call
+                        RPMKernel.get() with a .rpm package.
+        """
+        if len(label) > 15:
+            raise error.AutoservError("label for kernel is too long \
+            (> 15 chars): %s" % label)
+        if self.source_material is None:
+            raise error.AutoservError("A kernel must first be \
+            specified via get()")
+        rpm = self.source_material
+
+        remote_tmpdir = host.get_tmp_dir()
+        remote_rpm = os.path.join(remote_tmpdir, os.path.basename(rpm))
+        rpm_package = utils.run('/usr/bin/rpm -q -p %s' % rpm).stdout
+        vmlinuz = self.get_image_name()
+        host.send_file(rpm, remote_rpm)
+        host.run('rpm -e ' + rpm_package, ignore_status = True)
+        host.run('rpm --force -i ' + remote_rpm)
+
+        # Copy over the uncompressed image if there is one
+        if install_vmlinux:
+            vmlinux = self.get_vmlinux_name()
+            host.run('cd /;rpm2cpio %s | cpio -imuv .%s'
+                    % (remote_rpm, vmlinux))
+            host.run('ls ' + vmlinux) # Verify
+
+        host.bootloader.remove_kernel(label)
+        host.bootloader.add_kernel(vmlinuz, label,
+                                   args=kernel_args, default=default)
+        if kernel_args:
+            host.bootloader.add_args(label, kernel_args)
+        if not default:
+            host.bootloader.boot_once(label)
 
 
-	def get_version(self):
-		"""Get the version of the kernel to be installed.
-		
-		Returns:
-			The version string, as would be returned 
-			by 'make kernelrelease'.
-		
-		Raises:
-			AutoservError: no package has yet been obtained. Call
-				RPMKernel.get() with a .rpm package.
-		"""
-		if self.source_material is None:
-			raise error.AutoservError("A kernel must first be \
-			specified via get()")
-		
-		retval = utils.run('rpm -qpi %s | grep Version | \
-		awk \'{print($3);}\'' % utils.sh_escape(self.source_material))
-		return retval.stdout.strip()
+    def get_version(self):
+        """Get the version of the kernel to be installed.
+
+        Returns:
+                The version string, as would be returned
+                by 'make kernelrelease'.
+
+        Raises:
+                AutoservError: no package has yet been obtained. Call
+                        RPMKernel.get() with a .rpm package.
+        """
+        if self.source_material is None:
+            raise error.AutoservError("A kernel must first be \
+            specified via get()")
+
+        retval = utils.run('rpm -qpi %s | grep Version | \
+        awk \'{print($3);}\'' % utils.sh_escape(self.source_material))
+        return retval.stdout.strip()
 
 
-	def get_image_name(self):
-		"""Get the name of the kernel image to be installed.
-		
-		Returns:
-			The full path to the kernel image file as it will be 
-			installed on the host.
-		
-		Raises:
-			AutoservError: no package has yet been obtained. Call
-				RPMKernel.get() with a .rpm package.
-		"""
-		if self.source_material is None:
-			raise error.AutoservError("A kernel must first be \
-			specified via get()")
-		
-		vmlinuz = utils.run('rpm -q -l -p %s \
-		| grep /boot/vmlinuz' % self.source_material).stdout.strip()
-		return vmlinuz
+    def get_image_name(self):
+        """Get the name of the kernel image to be installed.
+
+        Returns:
+                The full path to the kernel image file as it will be
+                installed on the host.
+
+        Raises:
+                AutoservError: no package has yet been obtained. Call
+                        RPMKernel.get() with a .rpm package.
+        """
+        if self.source_material is None:
+            raise error.AutoservError("A kernel must first be \
+            specified via get()")
+
+        vmlinuz = utils.run('rpm -q -l -p %s \
+        | grep /boot/vmlinuz' % self.source_material).stdout.strip()
+        return vmlinuz
 
 
-	def get_vmlinux_name(self):
-		"""Get the name of the kernel image to be installed.
-		
-		Returns:
-			The full path to the kernel image file as it will be 
-			installed on the host. It is the uncompressed and
-			unstripped version of the kernel that can be used with
-			oprofile.
-		
-		Raises:
-			AutoservError: no package has yet been obtained. Call
-				RPMKernel.get() with a .rpm package.
-		"""
-		if self.source_material is None:
-			raise error.AutoservError("A kernel must first be \
-			specified via get()")
-		
-		vmlinux = utils.run('rpm -q -l -p %s \
-		| grep /boot/vmlinux' % self.source_material).stdout.strip()
-		return vmlinux
+    def get_vmlinux_name(self):
+        """Get the name of the kernel image to be installed.
+
+        Returns:
+                The full path to the kernel image file as it will be
+                installed on the host. It is the uncompressed and
+                unstripped version of the kernel that can be used with
+                oprofile.
+
+        Raises:
+                AutoservError: no package has yet been obtained. Call
+                        RPMKernel.get() with a .rpm package.
+        """
+        if self.source_material is None:
+            raise error.AutoservError("A kernel must first be \
+            specified via get()")
+
+        vmlinux = utils.run('rpm -q -l -p %s \
+        | grep /boot/vmlinux' % self.source_material).stdout.strip()
+        return vmlinux
 
 
-	def get_initrd_name(self):
-		"""Get the name of the initrd file to be installed.
-		
-		Returns:
-			The full path to the initrd file as it will be 
-			installed on the host. If the package includes no 
-			initrd file, None is returned
-		
-		Raises:
-			AutoservError: no package has yet been obtained. Call
-				RPMKernel.get() with a .rpm package.
-		"""
-		if self.source_material is None:
-			raise error.AutoservError("A kernel must first be \
-			specified via get()")
+    def get_initrd_name(self):
+        """Get the name of the initrd file to be installed.
 
-		res = utils.run('rpm -q -l -p %s \
-		| grep /boot/initrd' % self.source_material, ignore_status=True)
-		if res.exit_status:
-			return None
-		return res.stdout.strip()
+        Returns:
+                The full path to the initrd file as it will be
+                installed on the host. If the package includes no
+                initrd file, None is returned
+
+        Raises:
+                AutoservError: no package has yet been obtained. Call
+                        RPMKernel.get() with a .rpm package.
+        """
+        if self.source_material is None:
+            raise error.AutoservError("A kernel must first be \
+            specified via get()")
+
+        res = utils.run('rpm -q -l -p %s \
+        | grep /boot/initrd' % self.source_material, ignore_status=True)
+        if res.exit_status:
+            return None
+        return res.stdout.strip()
diff --git a/server/samples/autoserv_console.srv b/server/samples/autoserv_console.srv
index 50bb62a..b0c69e6 100755
--- a/server/samples/autoserv_console.srv
+++ b/server/samples/autoserv_console.srv
@@ -9,12 +9,12 @@
 # -Steve Howard ([email protected])
 
 if machines:
-	host = hosts.SSHHost(machines[0])
+    host = hosts.SSHHost(machines[0])
 
 try:
-	import IPython
-	ipshell = IPython.Shell.IPShellEmbed(argv=[])
-	ipshell()
+    import IPython
+    ipshell = IPython.Shell.IPShellEmbed(argv=[])
+    ipshell()
 except ImportError:
-	import code
-	code.interact("Autoserv console", raw_input, locals())
+    import code
+    code.interact("Autoserv console", raw_input, locals())
diff --git a/server/samples/continuous_reboot.srv b/server/samples/continuous_reboot.srv
index 900273b..adbfc43 100644
--- a/server/samples/continuous_reboot.srv
+++ b/server/samples/continuous_reboot.srv
@@ -1,6 +1,6 @@
 def run(machine):
-	host = hosts.SSHHost(machine)
-	while True:
-		host.reboot()
+    host = hosts.SSHHost(machine)
+    while True:
+        host.reboot()
 
 job.parallel_simple(run, machines)
diff --git a/server/samples/failtest.srv b/server/samples/failtest.srv
index 975ab83..a793e59 100644
--- a/server/samples/failtest.srv
+++ b/server/samples/failtest.srv
@@ -1,6 +1,6 @@
 def run(machine):
-	host = hosts.SSHHost(machine)
-	at = autotest.Autotest(host)
-	at.run_test('failtest')
+    host = hosts.SSHHost(machine)
+    at = autotest.Autotest(host)
+    at.run_test('failtest')
 
 job.parallel_simple(run, machines)
diff --git a/server/samples/info.srv b/server/samples/info.srv
index 0dd7f88..a5d2350 100644
--- a/server/samples/info.srv
+++ b/server/samples/info.srv
@@ -1,8 +1,8 @@
 def run(machine):
-	host = hosts.SSHHost(machine, initialize = False)
-	print 'Uptime:         ' + host.check_uptime()
-	print 'Arch:           ' + host.get_arch()
-	print 'Kernel ver:     ' + host.get_kernel_ver()
-	print 'Kernel cmdline: ' + host.get_cmdline()
+    host = hosts.SSHHost(machine, initialize = False)
+    print 'Uptime:         ' + host.check_uptime()
+    print 'Arch:           ' + host.get_arch()
+    print 'Kernel ver:     ' + host.get_kernel_ver()
+    print 'Kernel cmdline: ' + host.get_cmdline()
 
 job.parallel_simple(run, machines)
diff --git a/server/samples/kernbench.srv b/server/samples/kernbench.srv
index 174e6e7..a8d84e3 100644
--- a/server/samples/kernbench.srv
+++ b/server/samples/kernbench.srv
@@ -1,6 +1,6 @@
 def run(machine):
-	host = hosts.SSHHost(machine)
-	at = autotest.Autotest(host)
-	at.run_test('kernbench', iterations=2, threads=5)
+    host = hosts.SSHHost(machine)
+    at = autotest.Autotest(host)
+    at.run_test('kernbench', iterations=2, threads=5)
 
 job.parallel_simple(run, machines)
diff --git a/server/samples/netperf-guest-to-host-far.srv b/server/samples/netperf-guest-to-host-far.srv
index 57b67ae..10ac8c7 100644
--- a/server/samples/netperf-guest-to-host-far.srv
+++ b/server/samples/netperf-guest-to-host-far.srv
@@ -2,19 +2,19 @@
 
 
 def check_kernel(host, version, package):
-	if host.run("uname -r").stdout.strip() != version:
-		print "XXXX installing kernel on %s" % (host.hostname,)
-		package.install(host)
-		
-		host.reboot()
-		host.wait_up()
+    if host.run("uname -r").stdout.strip() != version:
+        print "XXXX installing kernel on %s" % (host.hostname,)
+        package.install(host)
+
+        host.reboot()
+        host.wait_up()
 
 
 def install_kvm(kvm_on_host_var_name, host, source, addresses):
-	exec ("global %(var_name)s\n"
-		"%(var_name)s= kvm.KVM(host)\n"
-		"%(var_name)s.get(source)\n"
-		"%(var_name)s.install(addresses)\n" % {"var_name": kvm_on_host_var_name})
+    exec ("global %(var_name)s\n"
+            "%(var_name)s= kvm.KVM(host)\n"
+            "%(var_name)s.get(source)\n"
+            "%(var_name)s.install(addresses)\n" % {"var_name": kvm_on_host_var_name})
 
 
 print "XXXX creating SSHHost objects"
diff --git a/server/samples/parallel.srv b/server/samples/parallel.srv
index 2fdfc57..24dc1f0 100644
--- a/server/samples/parallel.srv
+++ b/server/samples/parallel.srv
@@ -1,7 +1,7 @@
 at = autotest.Autotest()
 
 def run(machine):
-	host = hosts.SSHHost(machine)
-	at.run_test('sleeptest', host = host)
+    host = hosts.SSHHost(machine)
+    at.run_test('sleeptest', host = host)
 
 job.parallel_simple(run, machines)
diff --git a/server/samples/parallel_kvm.srv b/server/samples/parallel_kvm.srv
index 682c020..101b51d 100644
--- a/server/samples/parallel_kvm.srv
+++ b/server/samples/parallel_kvm.srv
@@ -21,9 +21,9 @@
 num_guests= 5
 g= []
 for i in range(num_guests):
-	g.append(hosts.KVMGuest(kvm_on_remote_host, qemu_options))
+    g.append(hosts.KVMGuest(kvm_on_remote_host, qemu_options))
 for i in range(num_guests):
-	g[i].wait_up()
+    g[i].wait_up()
 
 
 print "XXXX running transfers"
@@ -35,10 +35,10 @@
 args= range(1, num_guests)
 
 def f(i):
-	print "This is %s" % i
-	tmp_dir= g[i].get_tmp_dir()
-	g[i].run('scp "%s":"%s" "%s"' % (g[0].hostname, big_file, tmp_dir,))
-	print g[i].run('sha1sum "%s"' % (os.path.join(tmp_dir, "big_file"),)).stdout.strip()
+    print "This is %s" % i
+    tmp_dir= g[i].get_tmp_dir()
+    g[i].run('scp "%s":"%s" "%s"' % (g[0].hostname, big_file, tmp_dir,))
+    print g[i].run('sha1sum "%s"' % (os.path.join(tmp_dir, "big_file"),)).stdout.strip()
 
 job.parallel_simple(f, args)
 
diff --git a/server/samples/parallel_sleeptest.srv b/server/samples/parallel_sleeptest.srv
index 49bb0d1..c46353e 100644
--- a/server/samples/parallel_sleeptest.srv
+++ b/server/samples/parallel_sleeptest.srv
@@ -22,34 +22,34 @@
 # Specify the path to the client control files and the tag names
 # for the respective jobs here.
 tests = [("client/tests/sleeptest/control", "sleeptag0"),
-	 ("client/tests/sleeptest/control", "sleeptag1"),
-	 ]
+         ("client/tests/sleeptest/control", "sleeptag1"),
+         ]
 
 def run_client(at, machine_name, machine_num, instance):
-	control = open(os.path.join(job.autodir,tests[instance][0])).read()
-	'''
-	The get_sync_control_file method basically does the setup of the barriers
-	required to start these multiple tests at the same time and returns the
-	modified control file (that contains the barrier code added to it)
-	Check client/common_lib/utils.py for detailed documentation of how this
-	method sets up the barriers.
-	'''
-	control_new = utils.get_sync_control_file(control, machine_name,
-					machine_num, instance, len(tests))
-	'''
-	This control file is now simply passed in to the run method along with
-	a tag name of the test and a 'parallel_flag' that identifies this scenario
-	of running multiple tests on the same machine at the same time.
-	'''
-	at.run(control_new, tag='%s' % tests[instance][1], parallel_flag=True)
+    control = open(os.path.join(job.autodir,tests[instance][0])).read()
+    '''
+    The get_sync_control_file method basically does the setup of the barriers
+    required to start these multiple tests at the same time and returns the
+    modified control file (that contains the barrier code added to it)
+    Check client/common_lib/utils.py for detailed documentation of how this
+    method sets up the barriers.
+    '''
+    control_new = utils.get_sync_control_file(control, machine_name,
+                                    machine_num, instance, len(tests))
+    '''
+    This control file is now simply passed in to the run method along with
+    a tag name of the test and a 'parallel_flag' that identifies this scenario
+    of running multiple tests on the same machine at the same time.
+    '''
+    at.run(control_new, tag='%s' % tests[instance][1], parallel_flag=True)
 
 def main(machine_name, machine_num):
-	host = hosts.SSHHost(machine_name)
-	at = autotest.Autotest(host)
-	at.install()
+    host = hosts.SSHHost(machine_name)
+    at = autotest.Autotest(host)
+    at.install()
 
-	parallel([subcommand(run_client, [at, machine_name, machine_num, i])
-                  for i in range(len(tests))])
+    parallel([subcommand(run_client, [at, machine_name, machine_num, i])
+              for i in range(len(tests))])
 
 parallel([subcommand(main, [machines[i], i], machines[i])
-	  for i in range(len(machines))])
+          for i in range(len(machines))])
diff --git a/server/samples/profilertest.srv b/server/samples/profilertest.srv
index 2743b76..42f32f2 100644
--- a/server/samples/profilertest.srv
+++ b/server/samples/profilertest.srv
@@ -5,63 +5,63 @@
 
 
 def add_profilers(at, profilers, timeout_sync, timeout_start, timeout_stop,
-			machines, name):
-	control_file = []
-	for profiler in profilers:
-		control_file.append("job.profilers.add(%s)"
-					% str(profiler)[1:-1])
+                        machines, name):
+    control_file = []
+    for profiler in profilers:
+        control_file.append("job.profilers.add(%s)"
+                                % str(profiler)[1:-1])
 
-	control_file.append("job.run_test('barriertest',%d,%d,%d,'%s','%s',%s)"
-			% (timeout_sync, timeout_start, timeout_stop,
-				at.host.hostname, "PROF_MASTER", str(machines)))
+    control_file.append("job.run_test('barriertest',%d,%d,%d,'%s','%s',%s)"
+                    % (timeout_sync, timeout_start, timeout_stop,
+                            at.host.hostname, "PROF_MASTER", str(machines)))
 
-	for profiler in profilers:
-		control_file.append("job.profilers.delete('%s')" % profiler[0])
+    for profiler in profilers:
+        control_file.append("job.profilers.delete('%s')" % profiler[0])
 
-	params = ["\n".join(control_file), "profile-" + profiler[0], at.host]
-	return subcommand(at.run, params, name)
+    params = ["\n".join(control_file), "profile-" + profiler[0], at.host]
+    return subcommand(at.run, params, name)
 
 
 def wait_for_profilers(machines, timeout = 180):
-	# wait until the profilers have started
-	sync_bar = barrier("PROF_MASTER", "sync_profilers",
-		timeout, port=63100)
-	sync_bar.rendevous_servers("PROF_MASTER", *machines)
+    # wait until the profilers have started
+    sync_bar = barrier("PROF_MASTER", "sync_profilers",
+            timeout, port=63100)
+    sync_bar.rendevous_servers("PROF_MASTER", *machines)
 
 
 def start_profilers(machines, timeout = 180):
-	# wait until the profilers have started
-	start_bar = barrier("PROF_MASTER", "start_profilers",
-		timeout, port=63100)
-	start_bar.rendevous_servers("PROF_MASTER", *machines)
+    # wait until the profilers have started
+    start_bar = barrier("PROF_MASTER", "start_profilers",
+            timeout, port=63100)
+    start_bar.rendevous_servers("PROF_MASTER", *machines)
 
 
 def stop_profilers(machines, timeout = 120):
-	stop_bar = barrier("PROF_MASTER", "stop_profilers", timeout, port=63100)
-	stop_bar.rendevous_servers("PROF_MASTER", *machines)
+    stop_bar = barrier("PROF_MASTER", "stop_profilers", timeout, port=63100)
+    stop_bar.rendevous_servers("PROF_MASTER", *machines)
 
 
 def server_sleep_test(seconds):
-	wait_for_profilers(machines)
-	start_profilers(machines)
-	for i in range(seconds):
-		print "%d of %d" % (i, seconds)
-		time.sleep(1)
-	stop_profilers(machines)
+    wait_for_profilers(machines)
+    start_profilers(machines)
+    for i in range(seconds):
+        print "%d of %d" % (i, seconds)
+        time.sleep(1)
+    stop_profilers(machines)
 
 
 def main():
-	timeout_sync = 180
-	timeout_start = 60
-	timeout_stop = 60
-	profilers = [["vmstat"], ["iostat"]]
+    timeout_sync = 180
+    timeout_start = 60
+    timeout_stop = 60
+    profilers = [["vmstat"], ["iostat"]]
 
-	tests = [subcommand(server_sleep_test, [20], "server_sleep_test")]
-	for at in at_hosts:
-		name = "profiled-%s" % at.host.hostname
-		tests.append(add_profilers(at, profilers, timeout_sync,
-				timeout_start, timeout_stop, machines, name))
-	parallel(tests)
+    tests = [subcommand(server_sleep_test, [20], "server_sleep_test")]
+    for at in at_hosts:
+        name = "profiled-%s" % at.host.hostname
+        tests.append(add_profilers(at, profilers, timeout_sync,
+                        timeout_start, timeout_stop, machines, name))
+    parallel(tests)
 
 
 main()
diff --git a/server/samples/reboot.srv b/server/samples/reboot.srv
index 670bf5c..5837570 100644
--- a/server/samples/reboot.srv
+++ b/server/samples/reboot.srv
@@ -1,6 +1,6 @@
 def run(machine):
-	host = hosts.SSHHost(machine)
-	print host.is_up()
-	host.reboot()
+    host = hosts.SSHHost(machine)
+    print host.is_up()
+    host.reboot()
 
 job.parallel_simple(run, machines)
diff --git a/server/samples/run_test.srv b/server/samples/run_test.srv
index 1220445..e8b1f36 100644
--- a/server/samples/run_test.srv
+++ b/server/samples/run_test.srv
@@ -5,38 +5,38 @@
 
 
 def usage():
-	print "usage: -t <test name> -m <machines> -l <log dir>"
+    print "usage: -t <test name> -m <machines> -l <log dir>"
 
 def run(client):
-	m = hosts.SSHHost(client)
-	at = autotest.Autotest()
+    m = hosts.SSHHost(client)
+    at = autotest.Autotest()
 
-	results_dir = os.path.join(logdir, client)
-	at.run_test(test, results_dir, m)
+    results_dir = os.path.join(logdir, client)
+    at.run_test(test, results_dir, m)
 
 
 def main():
-	global test, logdir, args
+    global test, logdir, args
 
-	try:
-		opts, args = getopt.getopt(args, 't:l:', [])
-	except getopt.GetoptError, e:
-		usage()
-		print e
-		sys.exit(1)
+    try:
+        opts, args = getopt.getopt(args, 't:l:', [])
+    except getopt.GetoptError, e:
+        usage()
+        print e
+        sys.exit(1)
 
-	for flag, value in opts:
-		if flag == '-t':
-			test = value
-		elif flag == '-l':
-			logdir = value
-	
-	if test == None or logdir == None:
-		usage()
-		sys.exit(1)
+    for flag, value in opts:
+        if flag == '-t':
+            test = value
+        elif flag == '-l':
+            logdir = value
 
-	print "Going to launch %s on %r with log dir of %s." % (test, machines, logdir)
-	parallel_simple(run, machines)
+    if test == None or logdir == None:
+        usage()
+        sys.exit(1)
+
+    print "Going to launch %s on %r with log dir of %s." % (test, machines, logdir)
+    parallel_simple(run, machines)
 
 
 main()
diff --git a/server/samples/sleeptest.srv b/server/samples/sleeptest.srv
index 805ac097..85cbf08 100644
--- a/server/samples/sleeptest.srv
+++ b/server/samples/sleeptest.srv
@@ -1,6 +1,6 @@
 def run(machine):
-	host = hosts.SSHHost(machine)
-	at = autotest.Autotest(host)
-	at.run_test('sleeptest')
+    host = hosts.SSHHost(machine)
+    at = autotest.Autotest(host)
+    at.run_test('sleeptest')
 
 job.parallel_simple(run, machines)
diff --git a/server/samples/uname.srv b/server/samples/uname.srv
index b141d5c..f67d626 100644
--- a/server/samples/uname.srv
+++ b/server/samples/uname.srv
@@ -1,5 +1,5 @@
 hosts = [hosts.SSHHost(h, initialize=False) for h in machines]
 
 for host in hosts:
-	print host.hostname
-	print host.run('uname -a').stdout.rstrip()
+    print host.hostname
+    print host.run('uname -a').stdout.rstrip()
diff --git a/server/self-test/alltests_suite.py b/server/self-test/alltests_suite.py
index 14ffc1d..1d9a8b0 100644
--- a/server/self-test/alltests_suite.py
+++ b/server/self-test/alltests_suite.py
@@ -12,7 +12,7 @@
 # Adjust the path so Python can find the autoserv modules
 src = os.path.abspath("%s/.." % (os.path.dirname(sys.argv[0]),))
 if src not in sys.path:
-	sys.path.insert(1, src)
+    sys.path.insert(1, src)
 
 import unittest
 
@@ -22,9 +22,9 @@
 
 
 def suite():
-	return unittest.TestSuite([autotest_test.suite(),
-                                   utils_test.suite()])
+    return unittest.TestSuite([autotest_test.suite(),
+                               utils_test.suite()])
 
 
 if __name__ == '__main__':
-	unittest.TextTestRunner(verbosity=2).run(suite())
+    unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/server/self-test/autotest_test.py b/server/self-test/autotest_test.py
index 033b091..08dc482 100644
--- a/server/self-test/autotest_test.py
+++ b/server/self-test/autotest_test.py
@@ -14,7 +14,7 @@
 # Adjust the path so Python can find the autoserv modules
 src = os.path.abspath("%s/.." % (os.path.dirname(sys.argv[0]),))
 if src not in sys.path:
-	sys.path.insert(1, src)
+    sys.path.insert(1, src)
 
 import utils
 import autotest
@@ -22,119 +22,119 @@
 
 
 class AutotestTestCase(unittest.TestCase):
-	def setUp(self):
-		self.autotest = autotest.Autotest()
-	
-	def tearDown(self):
-		pass
+    def setUp(self):
+        self.autotest = autotest.Autotest()
+
+    def tearDown(self):
+        pass
 
 
-	def testGetAutoDir(self):
-		class MockInstallHost:
-			def __init__(self):
-				self.commands = []
-				self.result = "autodir='/stuff/autotest'\n"
-			
-			def run(self, command):
-				if command == "grep autodir= /etc/autotest.conf":
-					result = hosts.CmdResult()
-					result.stdout = self.result
-					return result
-				else:
-					self.commands.append(command)
-		
-		host = MockInstallHost()
-		self.assertEqual('/stuff/autotest',
-				 autotest._get_autodir(host))
-		host.result = "autodir=/stuff/autotest\n"
-		self.assertEqual('/stuff/autotest',
-				 autotest._get_autodir(host))
-		host.result = 'autodir="/stuff/auto test"\n'
-		self.assertEqual('/stuff/auto test',
-				 autotest._get_autodir(host))
+    def testGetAutoDir(self):
+        class MockInstallHost:
+            def __init__(self):
+                self.commands = []
+                self.result = "autodir='/stuff/autotest'\n"
+
+            def run(self, command):
+                if command == "grep autodir= /etc/autotest.conf":
+                    result = hosts.CmdResult()
+                    result.stdout = self.result
+                    return result
+                else:
+                    self.commands.append(command)
+
+        host = MockInstallHost()
+        self.assertEqual('/stuff/autotest',
+                         autotest._get_autodir(host))
+        host.result = "autodir=/stuff/autotest\n"
+        self.assertEqual('/stuff/autotest',
+                         autotest._get_autodir(host))
+        host.result = 'autodir="/stuff/auto test"\n'
+        self.assertEqual('/stuff/auto test',
+                         autotest._get_autodir(host))
 
 
-	def testInstallFromDir(self):
-		class MockInstallHost:
-			def __init__(self):
-				self.commands = []
-			
-			def run(self, command):
-				if command == "grep autodir= /etc/autotest.conf":
-					result= hosts.CmdResult()
-					result.stdout = "autodir=/usr/local/autotest\n"
-					return result
-				else:
-					self.commands.append(command)
+    def testInstallFromDir(self):
+        class MockInstallHost:
+            def __init__(self):
+                self.commands = []
 
-			def send_file(self, src, dst):
-				self.commands.append("send_file: %s %s" % (src,
-									   dst))
-				
-		host = MockInstallHost()
-		tmpdir = utils.get_tmp_dir()
-		self.autotest.get(tmpdir)
-		self.autotest.install(host)
-		self.assertEqual(host.commands[0],
-				 'mkdir -p /usr/local/autotest')
-		self.assertTrue(host.commands[1].startswith('send_file: /tmp/'))
-		self.assertTrue(host.commands[1].endswith(
-			'/ /usr/local/autotest'))
+            def run(self, command):
+                if command == "grep autodir= /etc/autotest.conf":
+                    result= hosts.CmdResult()
+                    result.stdout = "autodir=/usr/local/autotest\n"
+                    return result
+                else:
+                    self.commands.append(command)
 
-		
+            def send_file(self, src, dst):
+                self.commands.append("send_file: %s %s" % (src,
+                                                           dst))
 
-	
-	def testInstallFromSVN(self):
-		class MockInstallHost:
-			def __init__(self):
-				self.commands = []
-			
-			def run(self, command):
-				if command == "grep autodir= /etc/autotest.conf":
-					result= hosts.CmdResult()
-					result.stdout = "autodir=/usr/local/autotest\n"
-					return result
-				else:
-					self.commands.append(command)
-		
-		host = MockInstallHost()
-		self.autotest.install(host)
-		self.assertEqual(host.commands,
-				 ['svn checkout '
-				  + autotest.AUTOTEST_SVN + ' '
-				  + "/usr/local/autotest"])
+        host = MockInstallHost()
+        tmpdir = utils.get_tmp_dir()
+        self.autotest.get(tmpdir)
+        self.autotest.install(host)
+        self.assertEqual(host.commands[0],
+                         'mkdir -p /usr/local/autotest')
+        self.assertTrue(host.commands[1].startswith('send_file: /tmp/'))
+        self.assertTrue(host.commands[1].endswith(
+                '/ /usr/local/autotest'))
 
-	
-	def testFirstInstallFromSVNFails(self):
-		class MockFirstInstallFailsHost:
-			def __init__(self):
-				self.commands = []
-			
-			def run(self, command):
-				if command == "grep autodir= /etc/autotest.conf":
-					result= hosts.CmdResult()
-					result.stdout = "autodir=/usr/local/autotest\n"
-					return result
-				else:
-					self.commands.append(command)
-					first = ('svn checkout ' +
-					    autotest.AUTOTEST_SVN + ' ' +
-					    "/usr/local/autotest")
-					if (command == first):
-						raise autotest.AutoservRunError(
-							"svn not found")
-		
-		host = MockFirstInstallFailsHost()
-		self.autotest.install(host)
-		self.assertEqual(host.commands,
-				 ['svn checkout ' + autotest.AUTOTEST_SVN +
-				  ' ' + "/usr/local/autotest",
-				  'svn checkout ' + autotest.AUTOTEST_HTTP +
-				  ' ' + "/usr/local/autotest"])
+
+
+
+    def testInstallFromSVN(self):
+        class MockInstallHost:
+            def __init__(self):
+                self.commands = []
+
+            def run(self, command):
+                if command == "grep autodir= /etc/autotest.conf":
+                    result= hosts.CmdResult()
+                    result.stdout = "autodir=/usr/local/autotest\n"
+                    return result
+                else:
+                    self.commands.append(command)
+
+        host = MockInstallHost()
+        self.autotest.install(host)
+        self.assertEqual(host.commands,
+                         ['svn checkout '
+                          + autotest.AUTOTEST_SVN + ' '
+                          + "/usr/local/autotest"])
+
+
+    def testFirstInstallFromSVNFails(self):
+        class MockFirstInstallFailsHost:
+            def __init__(self):
+                self.commands = []
+
+            def run(self, command):
+                if command == "grep autodir= /etc/autotest.conf":
+                    result= hosts.CmdResult()
+                    result.stdout = "autodir=/usr/local/autotest\n"
+                    return result
+                else:
+                    self.commands.append(command)
+                    first = ('svn checkout ' +
+                        autotest.AUTOTEST_SVN + ' ' +
+                        "/usr/local/autotest")
+                    if (command == first):
+                        raise autotest.AutoservRunError(
+                                "svn not found")
+
+        host = MockFirstInstallFailsHost()
+        self.autotest.install(host)
+        self.assertEqual(host.commands,
+                         ['svn checkout ' + autotest.AUTOTEST_SVN +
+                          ' ' + "/usr/local/autotest",
+                          'svn checkout ' + autotest.AUTOTEST_HTTP +
+                          ' ' + "/usr/local/autotest"])
 
 
 def suite():
-	return unittest.TestLoader().loadTestsFromTestCase(AutotestTestCase)
+    return unittest.TestLoader().loadTestsFromTestCase(AutotestTestCase)
 
 if __name__ == '__main__':
-	unittest.TextTestRunner(verbosity=2).run(suite())
+    unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/server/self-test/utils_test.py b/server/self-test/utils_test.py
index 0978752..fffbf9a 100644
--- a/server/self-test/utils_test.py
+++ b/server/self-test/utils_test.py
@@ -15,62 +15,62 @@
 # Adjust the path so Python can find the autoserv modules
 src = os.path.abspath("%s/.." % (os.path.dirname(sys.argv[0]),))
 if src not in sys.path:
-	sys.path.insert(1, src)
+    sys.path.insert(1, src)
 
 import utils
 
 
 
 class UtilsTestCase(unittest.TestCase):
-	def setUp(self):
-		pass
+    def setUp(self):
+        pass
 
 
-	def tearDown(self):
-		pass
+    def tearDown(self):
+        pass
 
 
-	def testGetWithOpenFile(self):
-		tmpdir = utils.get_tmp_dir()
-		tmppath = os.path.join(tmpdir, 'testfile')
-		tmpfile = file(tmppath, 'w')
-		print >> tmpfile, 'Test string'
-		tmpfile.close()
-		tmpfile = file(tmppath)
-		newtmppath = utils.get(tmpfile)
-		self.assertEqual(file(newtmppath).read(), 'Test string\n')
+    def testGetWithOpenFile(self):
+        tmpdir = utils.get_tmp_dir()
+        tmppath = os.path.join(tmpdir, 'testfile')
+        tmpfile = file(tmppath, 'w')
+        print >> tmpfile, 'Test string'
+        tmpfile.close()
+        tmpfile = file(tmppath)
+        newtmppath = utils.get(tmpfile)
+        self.assertEqual(file(newtmppath).read(), 'Test string\n')
 
 
-	def testGetWithHTTP(self):
-		# Yeah, this test is a bad idea, oh well
-		url = 'http://www.kernel.org/pub/linux/kernel/README'
-		tmppath = utils.get(url)
-		f = file(tmppath)
-		f.readline()
-		self.assertTrue('Linux' in f.readline().split())
+    def testGetWithHTTP(self):
+        # Yeah, this test is a bad idea, oh well
+        url = 'http://www.kernel.org/pub/linux/kernel/README'
+        tmppath = utils.get(url)
+        f = file(tmppath)
+        f.readline()
+        self.assertTrue('Linux' in f.readline().split())
 
 
-	def testGetWithPath(self):
-		path = utils.get('/proc/cpuinfo')
-		self.assertTrue(file(path).readline().startswith('processor'))
+    def testGetWithPath(self):
+        path = utils.get('/proc/cpuinfo')
+        self.assertTrue(file(path).readline().startswith('processor'))
 
 
-	def testGetWithString(self):
-		path = utils.get('/tmp loves rabbits!')
-		self.assertTrue(file(path).readline().startswith('/tmp loves'))
+    def testGetWithString(self):
+        path = utils.get('/tmp loves rabbits!')
+        self.assertTrue(file(path).readline().startswith('/tmp loves'))
 
 
-	def testGetWithDir(self):
-		tmpdir = utils.get_tmp_dir()
-		origpath = os.path.join(tmpdir, 'testGetWithDir')
-		os.mkdir(origpath)
-		dstpath = utils.get(origpath)
-		self.assertTrue(dstpath.endswith('/'))
-		self.assertTrue(os.path.isdir(dstpath))
+    def testGetWithDir(self):
+        tmpdir = utils.get_tmp_dir()
+        origpath = os.path.join(tmpdir, 'testGetWithDir')
+        os.mkdir(origpath)
+        dstpath = utils.get(origpath)
+        self.assertTrue(dstpath.endswith('/'))
+        self.assertTrue(os.path.isdir(dstpath))
 
 
 def suite():
-	return unittest.TestLoader().loadTestsFromTestCase(UtilsTestCase)
+    return unittest.TestLoader().loadTestsFromTestCase(UtilsTestCase)
 
 if __name__ == '__main__':
-	unittest.TextTestRunner(verbosity=2).run(suite())
+    unittest.TextTestRunner(verbosity=2).run(suite())
diff --git a/server/server_job.py b/server/server_job.py
index ed91713..8b6c2c6 100755
--- a/server/server_job.py
+++ b/server/server_job.py
@@ -23,12 +23,12 @@
 # load up a control segment
 # these are all stored in <server_dir>/control_segments
 def load_control_segment(name):
-	server_dir = os.path.dirname(os.path.abspath(__file__))
-	script_file = os.path.join(server_dir, "control_segments", name)
-	if os.path.exists(script_file):
-		return file(script_file).read()
-	else:
-		return ""
+    server_dir = os.path.dirname(os.path.abspath(__file__))
+    script_file = os.path.join(server_dir, "control_segments", name)
+    if os.path.exists(script_file):
+        return file(script_file).read()
+    else:
+        return ""
 
 
 preamble = """\
@@ -47,54 +47,54 @@
 hosts.SSHHost.job = job
 barrier = barrier.barrier
 if len(machines) > 1:
-	open('.machines', 'w').write('\\n'.join(machines) + '\\n')
+        open('.machines', 'w').write('\\n'.join(machines) + '\\n')
 """
 
 client_wrapper = """
 at = autotest.Autotest()
 
 def run_client(machine):
-	hostname, user, password, port = parse_machine(machine,
-		ssh_user, ssh_port, ssh_pass)
+        hostname, user, password, port = parse_machine(machine,
+                ssh_user, ssh_port, ssh_pass)
 
-	host = hosts.SSHHost(hostname, user, port, password=password)
-	at.run(control, host=host)
+        host = hosts.SSHHost(hostname, user, port, password=password)
+        at.run(control, host=host)
 
 job.parallel_simple(run_client, machines)
 """
 
 crashdumps = """
 def crashdumps(machine):
-	hostname, user, password, port = parse_machine(machine,
-		ssh_user, ssh_port, ssh_pass)
+        hostname, user, password, port = parse_machine(machine,
+                ssh_user, ssh_port, ssh_pass)
 
-	host = hosts.SSHHost(hostname, user, port, initialize=False, \
-	    password=password)
-	host.get_crashdumps(test_start_time)
+        host = hosts.SSHHost(hostname, user, port, initialize=False, \
+            password=password)
+        host.get_crashdumps(test_start_time)
 
 job.parallel_simple(crashdumps, machines, log=False)
 """
 
 reboot_segment="""\
 def reboot(machine):
-	hostname, user, password, port = parse_machine(machine,
-		ssh_user, ssh_port, ssh_pass)
+        hostname, user, password, port = parse_machine(machine,
+                ssh_user, ssh_port, ssh_pass)
 
-	host = hosts.SSHHost(hostname, user, port, initialize=False, \
-	    password=password)
-	host.reboot()
+        host = hosts.SSHHost(hostname, user, port, initialize=False, \
+            password=password)
+        host.reboot()
 
 job.parallel_simple(reboot, machines, log=False)
 """
 
 install="""\
 def install(machine):
-	hostname, user, password, port = parse_machine(machine,
-		ssh_user, ssh_port, ssh_pass)
+        hostname, user, password, port = parse_machine(machine,
+                ssh_user, ssh_port, ssh_pass)
 
-	host = hosts.SSHHost(hostname, user, port, initialize=False, \
-	    password=password)
-	host.machine_install()
+        host = hosts.SSHHost(hostname, user, port, initialize=False, \
+            password=password)
+        host.machine_install()
 
 job.parallel_simple(install, machines, log=False)
 """
@@ -110,701 +110,701 @@
 
 # load up site-specific code for generating site-specific job data
 try:
-	import site_job
-	get_site_job_data = site_job.get_site_job_data
-	del site_job
+    import site_job
+    get_site_job_data = site_job.get_site_job_data
+    del site_job
 except ImportError:
-	# by default provide a stub that generates no site data
-	def get_site_job_data(job):
-		return {}
+    # by default provide a stub that generates no site data
+    def get_site_job_data(job):
+        return {}
 
 
 class base_server_job:
-	"""The actual job against which we do everything.
+    """The actual job against which we do everything.
 
-	Properties:
-		autodir
-			The top level autotest directory (/usr/local/autotest).
-		serverdir
-			<autodir>/server/
-		clientdir
-			<autodir>/client/
-		conmuxdir
-			<autodir>/conmux/
-		testdir
-			<autodir>/server/tests/
-		control
-			the control file for this job
-	"""
+    Properties:
+            autodir
+                    The top level autotest directory (/usr/local/autotest).
+            serverdir
+                    <autodir>/server/
+            clientdir
+                    <autodir>/client/
+            conmuxdir
+                    <autodir>/conmux/
+            testdir
+                    <autodir>/server/tests/
+            control
+                    the control file for this job
+    """
 
-	STATUS_VERSION = 1
+    STATUS_VERSION = 1
 
 
-	def __init__(self, control, args, resultdir, label, user, machines,
-		     client=False, parse_job="",
-		     ssh_user='root', ssh_port=22, ssh_pass=''):
-		"""
-			control
-				The control file (pathname of)
-			args
-				args to pass to the control file
-			resultdir
-				where to throw the results
-			label
-				label for the job
-			user	
-				Username for the job (email address)
-			client
-				True if a client-side control file
-		"""
-		path = os.path.dirname(__file__)
-		self.autodir = os.path.abspath(os.path.join(path, '..'))
-		self.serverdir = os.path.join(self.autodir, 'server')
-		self.testdir   = os.path.join(self.serverdir, 'tests')
-		self.tmpdir    = os.path.join(self.serverdir, 'tmp')
-		self.conmuxdir = os.path.join(self.autodir, 'conmux')
-		self.clientdir = os.path.join(self.autodir, 'client')
-		self.toolsdir = os.path.join(self.autodir, 'client/tools')
-		if control:
-			self.control = open(control, 'r').read()
-			self.control = re.sub('\r', '', self.control)
-		else:
-			self.control = None
-		self.resultdir = resultdir
-		if not os.path.exists(resultdir):
-			os.mkdir(resultdir)
-		self.debugdir = os.path.join(resultdir, 'debug')
-		if not os.path.exists(self.debugdir):
-			os.mkdir(self.debugdir)
-		self.status = os.path.join(resultdir, 'status')
-		self.label = label
-		self.user = user
-		self.args = args
-		self.machines = machines
-		self.client = client
-		self.record_prefix = ''
-		self.warning_loggers = set()
-		self.ssh_user = ssh_user
-		self.ssh_port = ssh_port
-		self.ssh_pass = ssh_pass
+    def __init__(self, control, args, resultdir, label, user, machines,
+                 client=False, parse_job="",
+                 ssh_user='root', ssh_port=22, ssh_pass=''):
+        """
+                control
+                        The control file (pathname of)
+                args
+                        args to pass to the control file
+                resultdir
+                        where to throw the results
+                label
+                        label for the job
+                user
+                        Username for the job (email address)
+                client
+                        True if a client-side control file
+        """
+        path = os.path.dirname(__file__)
+        self.autodir = os.path.abspath(os.path.join(path, '..'))
+        self.serverdir = os.path.join(self.autodir, 'server')
+        self.testdir   = os.path.join(self.serverdir, 'tests')
+        self.tmpdir    = os.path.join(self.serverdir, 'tmp')
+        self.conmuxdir = os.path.join(self.autodir, 'conmux')
+        self.clientdir = os.path.join(self.autodir, 'client')
+        self.toolsdir = os.path.join(self.autodir, 'client/tools')
+        if control:
+            self.control = open(control, 'r').read()
+            self.control = re.sub('\r', '', self.control)
+        else:
+            self.control = None
+        self.resultdir = resultdir
+        if not os.path.exists(resultdir):
+            os.mkdir(resultdir)
+        self.debugdir = os.path.join(resultdir, 'debug')
+        if not os.path.exists(self.debugdir):
+            os.mkdir(self.debugdir)
+        self.status = os.path.join(resultdir, 'status')
+        self.label = label
+        self.user = user
+        self.args = args
+        self.machines = machines
+        self.client = client
+        self.record_prefix = ''
+        self.warning_loggers = set()
+        self.ssh_user = ssh_user
+        self.ssh_port = ssh_port
+        self.ssh_pass = ssh_pass
 
-		self.stdout = fd_stack.fd_stack(1, sys.stdout)
-		self.stderr = fd_stack.fd_stack(2, sys.stderr)
+        self.stdout = fd_stack.fd_stack(1, sys.stdout)
+        self.stderr = fd_stack.fd_stack(2, sys.stderr)
 
-		if os.path.exists(self.status):
-			os.unlink(self.status)
-		job_data = {'label' : label, 'user' : user,
-                            'hostname' : ','.join(machines),
-                            'status_version' : str(self.STATUS_VERSION)}
-		job_data.update(get_site_job_data(self))
-		utils.write_keyval(self.resultdir, job_data)
+        if os.path.exists(self.status):
+            os.unlink(self.status)
+        job_data = {'label' : label, 'user' : user,
+                    'hostname' : ','.join(machines),
+                    'status_version' : str(self.STATUS_VERSION)}
+        job_data.update(get_site_job_data(self))
+        utils.write_keyval(self.resultdir, job_data)
 
-		self.parse_job = parse_job
-		if self.parse_job and len(machines) == 1:
-			self.using_parser = True
-			self.init_parser(resultdir)
-		else:
-			self.using_parser = False
+        self.parse_job = parse_job
+        if self.parse_job and len(machines) == 1:
+            self.using_parser = True
+            self.init_parser(resultdir)
+        else:
+            self.using_parser = False
 
 
-	def init_parser(self, resultdir):
-		"""Start the continuous parsing of resultdir. This sets up
-		the database connection and inserts the basic job object into
-		the database if necessary."""
-		# redirect parser debugging to .parse.log
-		parse_log = os.path.join(resultdir, '.parse.log')
-		parse_log = open(parse_log, 'w', 0)
-		tko_utils.redirect_parser_debugging(parse_log)
-		# create a job model object and set up the db
-		self.results_db = tko_db.db(autocommit=True)
-		self.parser = status_lib.parser(self.STATUS_VERSION)
-		self.job_model = self.parser.make_job(resultdir)
-		self.parser.start(self.job_model)
-		# check if a job already exists in the db and insert it if
-		# it does not
-		job_idx = self.results_db.find_job(self.parse_job)
-		if job_idx is None:
-			self.results_db.insert_job(self.parse_job,
-						   self.job_model)
-		else:
-			machine_idx = self.results_db.lookup_machine(
-			    self.job_model.machine)
-			self.job_model.index = job_idx
-			self.job_model.machine_idx = machine_idx
+    def init_parser(self, resultdir):
+        """Start the continuous parsing of resultdir. This sets up
+        the database connection and inserts the basic job object into
+        the database if necessary."""
+        # redirect parser debugging to .parse.log
+        parse_log = os.path.join(resultdir, '.parse.log')
+        parse_log = open(parse_log, 'w', 0)
+        tko_utils.redirect_parser_debugging(parse_log)
+        # create a job model object and set up the db
+        self.results_db = tko_db.db(autocommit=True)
+        self.parser = status_lib.parser(self.STATUS_VERSION)
+        self.job_model = self.parser.make_job(resultdir)
+        self.parser.start(self.job_model)
+        # check if a job already exists in the db and insert it if
+        # it does not
+        job_idx = self.results_db.find_job(self.parse_job)
+        if job_idx is None:
+            self.results_db.insert_job(self.parse_job,
+                                       self.job_model)
+        else:
+            machine_idx = self.results_db.lookup_machine(
+                self.job_model.machine)
+            self.job_model.index = job_idx
+            self.job_model.machine_idx = machine_idx
 
 
-	def cleanup_parser(self):
-		"""This should be called after the server job is finished
-		to carry out any remaining cleanup (e.g. flushing any
-		remaining test results to the results db)"""
-		if not self.using_parser:
-			return
-		final_tests = self.parser.end()
-		for test in final_tests:
-			self.__insert_test(test)
-		self.using_parser = False
+    def cleanup_parser(self):
+        """This should be called after the server job is finished
+        to carry out any remaining cleanup (e.g. flushing any
+        remaining test results to the results db)"""
+        if not self.using_parser:
+            return
+        final_tests = self.parser.end()
+        for test in final_tests:
+            self.__insert_test(test)
+        self.using_parser = False
 
 
-	def verify(self):
-		if not self.machines:
-			raise error.AutoservError(
-			    'No machines specified to verify')
-		try:
-			namespace = {'machines' : self.machines, 'job' : self, \
-						 'ssh_user' : self.ssh_user, \
-						 'ssh_port' : self.ssh_port, \
-						 'ssh_pass' : self.ssh_pass}
-			exec(preamble + verify, namespace, namespace)
-		except Exception, e:
-			msg = ('Verify failed\n' + str(e) + '\n' 
-				+ traceback.format_exc())
-			self.record('ABORT', None, None, msg)
-			raise
+    def verify(self):
+        if not self.machines:
+            raise error.AutoservError(
+                'No machines specified to verify')
+        try:
+            namespace = {'machines' : self.machines, 'job' : self, \
+                                     'ssh_user' : self.ssh_user, \
+                                     'ssh_port' : self.ssh_port, \
+                                     'ssh_pass' : self.ssh_pass}
+            exec(preamble + verify, namespace, namespace)
+        except Exception, e:
+            msg = ('Verify failed\n' + str(e) + '\n'
+                    + traceback.format_exc())
+            self.record('ABORT', None, None, msg)
+            raise
 
 
-	def repair(self):
-		if not self.machines:
-			raise error.AutoservError(
-			    'No machines specified to repair')
-		namespace = {'machines' : self.machines, 'job' : self, \
-					 'ssh_user' : self.ssh_user, \
-					 'ssh_port' : self.ssh_port, \
-					 'ssh_pass' : self.ssh_pass}
-		# no matter what happens during repair, go on to try to reverify
-		try:
-			exec(preamble + repair, namespace, namespace)
-		except Exception, exc:
-			print 'Exception occured during repair'
-			traceback.print_exc()
-		self.verify()
+    def repair(self):
+        if not self.machines:
+            raise error.AutoservError(
+                'No machines specified to repair')
+        namespace = {'machines' : self.machines, 'job' : self, \
+                                 'ssh_user' : self.ssh_user, \
+                                 'ssh_port' : self.ssh_port, \
+                                 'ssh_pass' : self.ssh_pass}
+        # no matter what happens during repair, go on to try to reverify
+        try:
+            exec(preamble + repair, namespace, namespace)
+        except Exception, exc:
+            print 'Exception occured during repair'
+            traceback.print_exc()
+        self.verify()
 
 
-	def enable_external_logging(self):
-		"""Start or restart external logging mechanism.
-		"""
-		pass
+    def enable_external_logging(self):
+        """Start or restart external logging mechanism.
+        """
+        pass
 
 
-	def disable_external_logging(self):
-		""" Pause or stop external logging mechanism.
-		"""
-		pass
+    def disable_external_logging(self):
+        """ Pause or stop external logging mechanism.
+        """
+        pass
 
 
-	def use_external_logging(self):
-		"""Return True if external logging should be used.
-		"""
-		return False
+    def use_external_logging(self):
+        """Return True if external logging should be used.
+        """
+        return False
 
 
-	def parallel_simple(self, function, machines, log=True, timeout=None):
-		"""Run 'function' using parallel_simple, with an extra
-		wrapper to handle the necessary setup for continuous parsing,
-		if possible. If continuous parsing is already properly
-		initialized then this should just work."""
-		is_forking = not (len(machines) == 1 and
-				  self.machines == machines)
-		if self.parse_job and is_forking:
-			def wrapper(machine):
-				self.parse_job += "/" + machine
-				self.using_parser = True
-				self.machines = [machine]
-				self.resultdir = os.path.join(self.resultdir,
-							      machine)
-				self.init_parser(self.resultdir)
-				result = function(machine)
-				self.cleanup_parser()
-				return result
-		else:
-			wrapper = function
-		subcommand.parallel_simple(wrapper, machines, log, timeout)
+    def parallel_simple(self, function, machines, log=True, timeout=None):
+        """Run 'function' using parallel_simple, with an extra
+        wrapper to handle the necessary setup for continuous parsing,
+        if possible. If continuous parsing is already properly
+        initialized then this should just work."""
+        is_forking = not (len(machines) == 1 and
+                          self.machines == machines)
+        if self.parse_job and is_forking:
+            def wrapper(machine):
+                self.parse_job += "/" + machine
+                self.using_parser = True
+                self.machines = [machine]
+                self.resultdir = os.path.join(self.resultdir,
+                                              machine)
+                self.init_parser(self.resultdir)
+                result = function(machine)
+                self.cleanup_parser()
+                return result
+        else:
+            wrapper = function
+        subcommand.parallel_simple(wrapper, machines, log, timeout)
 
 
-	def run(self, reboot = False, install_before = False,
-		install_after = False, collect_crashdumps = True,
-		namespace = {}):
-		# use a copy so changes don't affect the original dictionary
-		namespace = namespace.copy()
-		machines = self.machines
+    def run(self, reboot = False, install_before = False,
+            install_after = False, collect_crashdumps = True,
+            namespace = {}):
+        # use a copy so changes don't affect the original dictionary
+        namespace = namespace.copy()
+        machines = self.machines
 
-		self.aborted = False
-		namespace['machines'] = machines
-		namespace['args'] = self.args
-		namespace['job'] = self
-		namespace['ssh_user'] = self.ssh_user
-		namespace['ssh_port'] = self.ssh_port
-		namespace['ssh_pass'] = self.ssh_pass
-		test_start_time = int(time.time())
+        self.aborted = False
+        namespace['machines'] = machines
+        namespace['args'] = self.args
+        namespace['job'] = self
+        namespace['ssh_user'] = self.ssh_user
+        namespace['ssh_port'] = self.ssh_port
+        namespace['ssh_pass'] = self.ssh_pass
+        test_start_time = int(time.time())
 
-		os.chdir(self.resultdir)
-		
-		self.enable_external_logging()
-		status_log = os.path.join(self.resultdir, 'status.log')
-		try:
-			if install_before and machines:
-				exec(preamble + install, namespace, namespace)
-			if self.client:
-				namespace['control'] = self.control
-				open('control', 'w').write(self.control)
-				open('control.srv', 'w').write(client_wrapper)
-				server_control = client_wrapper
-			else:
-				open('control.srv', 'w').write(self.control)
-				server_control = self.control
-			exec(preamble + server_control, namespace, namespace)
+        os.chdir(self.resultdir)
 
-		finally:
-			if machines and collect_crashdumps:
-				namespace['test_start_time'] = test_start_time
-				exec(preamble + crashdumps, 
-					namespace, namespace)
-			self.disable_external_logging()
-			if reboot and machines:
-				exec(preamble + reboot_segment,
-				     namespace, namespace)
-			if install_after and machines:
-				exec(preamble + install, namespace, namespace)
+        self.enable_external_logging()
+        status_log = os.path.join(self.resultdir, 'status.log')
+        try:
+            if install_before and machines:
+                exec(preamble + install, namespace, namespace)
+            if self.client:
+                namespace['control'] = self.control
+                open('control', 'w').write(self.control)
+                open('control.srv', 'w').write(client_wrapper)
+                server_control = client_wrapper
+            else:
+                open('control.srv', 'w').write(self.control)
+                server_control = self.control
+            exec(preamble + server_control, namespace, namespace)
 
+        finally:
+            if machines and collect_crashdumps:
+                namespace['test_start_time'] = test_start_time
+                exec(preamble + crashdumps,
+                        namespace, namespace)
+            self.disable_external_logging()
+            if reboot and machines:
+                exec(preamble + reboot_segment,
+                     namespace, namespace)
+            if install_after and machines:
+                exec(preamble + install, namespace, namespace)
 
-	def run_test(self, url, *args, **dargs):
-		"""Summon a test object and run it.
-		
-		tag
-			tag to add to testname
-		url
-			url of the test to run
-		"""
 
-		(group, testname) = test.testname(url)
-		tag = None
-		subdir = testname
+    def run_test(self, url, *args, **dargs):
+        """Summon a test object and run it.
 
-		if dargs.has_key('tag'):
-			tag = dargs['tag']
-			del dargs['tag']
-			if tag:
-				subdir += '.' + tag
+        tag
+                tag to add to testname
+        url
+                url of the test to run
+        """
+
+        (group, testname) = test.testname(url)
+        tag = None
+        subdir = testname
+
+        if dargs.has_key('tag'):
+            tag = dargs['tag']
+            del dargs['tag']
+            if tag:
+                subdir += '.' + tag
 
-		outputdir = os.path.join(self.resultdir, subdir)
-		if os.path.exists(outputdir):
-			msg = ("%s already exists, test <%s> may have"
-			       " already run with tag <%s>"
-			       % (outputdir, testname, tag) )
-			raise error.TestError(msg)
-		os.mkdir(outputdir)
+        outputdir = os.path.join(self.resultdir, subdir)
+        if os.path.exists(outputdir):
+            msg = ("%s already exists, test <%s> may have"
+                   " already run with tag <%s>"
+                   % (outputdir, testname, tag) )
+            raise error.TestError(msg)
+        os.mkdir(outputdir)
 
-		try:
-			test.runtest(self, url, tag, args, dargs)
-			self.record('GOOD', subdir, testname, 'completed successfully')
-		except error.TestNAError, detail:
-			self.record('TEST_NA', subdir, testname, str(detail))
-		except Exception, detail:
-			info = str(detail) + "\n" + traceback.format_exc()
-			self.record('FAIL', subdir, testname, info)
+        try:
+            test.runtest(self, url, tag, args, dargs)
+            self.record('GOOD', subdir, testname, 'completed successfully')
+        except error.TestNAError, detail:
+            self.record('TEST_NA', subdir, testname, str(detail))
+        except Exception, detail:
+            info = str(detail) + "\n" + traceback.format_exc()
+            self.record('FAIL', subdir, testname, info)
 
 
-	def run_group(self, function, *args, **dargs):
-		"""\
-		function:
-			subroutine to run
-		*args:
-			arguments for the function
-		"""
+    def run_group(self, function, *args, **dargs):
+        """\
+        function:
+                subroutine to run
+        *args:
+                arguments for the function
+        """
 
-		result = None
-		name = function.__name__
+        result = None
+        name = function.__name__
 
-		# Allow the tag for the group to be specified.
-		if dargs.has_key('tag'):
-			tag = dargs['tag']
-			del dargs['tag']
-			if tag:
-				name = tag
+        # Allow the tag for the group to be specified.
+        if dargs.has_key('tag'):
+            tag = dargs['tag']
+            del dargs['tag']
+            if tag:
+                name = tag
 
-		old_record_prefix = self.record_prefix
-		try:
-			try:
-				self.record('START', None, name)
-				self.record_prefix += '\t'
-				result = function(*args, **dargs)
-			except Exception, e:
-				self.record_prefix = old_record_prefix
-				err_msg = str(e) + '\n'
-				err_msg += traceback.format_exc()
-				self.record('END FAIL', None, name, err_msg)
-			else:
-				self.record_prefix = old_record_prefix
-				self.record('END GOOD', None, name)
+        old_record_prefix = self.record_prefix
+        try:
+            try:
+                self.record('START', None, name)
+                self.record_prefix += '\t'
+                result = function(*args, **dargs)
+            except Exception, e:
+                self.record_prefix = old_record_prefix
+                err_msg = str(e) + '\n'
+                err_msg += traceback.format_exc()
+                self.record('END FAIL', None, name, err_msg)
+            else:
+                self.record_prefix = old_record_prefix
+                self.record('END GOOD', None, name)
 
-		# We don't want to raise up an error higher if it's just
-		# a TestError - we want to carry on to other tests. Hence
-		# this outer try/except block.
-		except error.TestError:
-			pass
-		except:
-			raise error.TestError(name + ' failed\n' +
-					      traceback.format_exc())
+        # We don't want to raise up an error higher if it's just
+        # a TestError - we want to carry on to other tests. Hence
+        # this outer try/except block.
+        except error.TestError:
+            pass
+        except:
+            raise error.TestError(name + ' failed\n' +
+                                  traceback.format_exc())
 
-		return result
+        return result
 
 
-	def run_reboot(self, reboot_func, get_kernel_func):
-		"""\
-		A specialization of run_group meant specifically for handling
-		a reboot. Includes support for capturing the kernel version
-		after the reboot.
+    def run_reboot(self, reboot_func, get_kernel_func):
+        """\
+        A specialization of run_group meant specifically for handling
+        a reboot. Includes support for capturing the kernel version
+        after the reboot.
 
-		reboot_func: a function that carries out the reboot
+        reboot_func: a function that carries out the reboot
 
-		get_kernel_func: a function that returns a string
-		representing the kernel version.
-		"""
+        get_kernel_func: a function that returns a string
+        representing the kernel version.
+        """
 
-		old_record_prefix = self.record_prefix
-		try:
-			self.record('START', None, 'reboot')
-			self.record_prefix += '\t'
-			reboot_func()
-		except Exception, e:
-			self.record_prefix = old_record_prefix
-			err_msg = str(e) + '\n' + traceback.format_exc()
-			self.record('END FAIL', None, 'reboot', err_msg)
-		else:
-			kernel = get_kernel_func()
-			self.record_prefix = old_record_prefix
-			self.record('END GOOD', None, 'reboot',
-				    optional_fields={"kernel": kernel})
+        old_record_prefix = self.record_prefix
+        try:
+            self.record('START', None, 'reboot')
+            self.record_prefix += '\t'
+            reboot_func()
+        except Exception, e:
+            self.record_prefix = old_record_prefix
+            err_msg = str(e) + '\n' + traceback.format_exc()
+            self.record('END FAIL', None, 'reboot', err_msg)
+        else:
+            kernel = get_kernel_func()
+            self.record_prefix = old_record_prefix
+            self.record('END GOOD', None, 'reboot',
+                        optional_fields={"kernel": kernel})
 
 
-	def record(self, status_code, subdir, operation, status='',
-		   optional_fields=None):
-		"""
-		Record job-level status
+    def record(self, status_code, subdir, operation, status='',
+               optional_fields=None):
+        """
+        Record job-level status
 
-		The intent is to make this file both machine parseable and
-		human readable. That involves a little more complexity, but
-		really isn't all that bad ;-)
+        The intent is to make this file both machine parseable and
+        human readable. That involves a little more complexity, but
+        really isn't all that bad ;-)
 
-		Format is <status code>\t<subdir>\t<operation>\t<status>
+        Format is <status code>\t<subdir>\t<operation>\t<status>
 
-		status code: see common_lib.logging.is_valid_status()
-		             for valid status definition
+        status code: see common_lib.logging.is_valid_status()
+                     for valid status definition
 
-		subdir: MUST be a relevant subdirectory in the results,
-		or None, which will be represented as '----'
+        subdir: MUST be a relevant subdirectory in the results,
+        or None, which will be represented as '----'
 
-		operation: description of what you ran (e.g. "dbench", or
-						"mkfs -t foobar /dev/sda9")
+        operation: description of what you ran (e.g. "dbench", or
+                                        "mkfs -t foobar /dev/sda9")
 
-		status: error message or "completed sucessfully"
+        status: error message or "completed sucessfully"
 
-		------------------------------------------------------------
+        ------------------------------------------------------------
 
-		Initial tabs indicate indent levels for grouping, and is
-		governed by self.record_prefix
+        Initial tabs indicate indent levels for grouping, and is
+        governed by self.record_prefix
 
-		multiline messages have secondary lines prefaced by a double
-		space ('  ')
+        multiline messages have secondary lines prefaced by a double
+        space ('  ')
 
-		Executing this method will trigger the logging of all new
-		warnings to date from the various console loggers.
-		"""
-		# poll all our warning loggers for new warnings
-		warnings = self._read_warnings()
-		for timestamp, msg in warnings:
-			self.__record("WARN", None, None, msg, timestamp)
+        Executing this method will trigger the logging of all new
+        warnings to date from the various console loggers.
+        """
+        # poll all our warning loggers for new warnings
+        warnings = self._read_warnings()
+        for timestamp, msg in warnings:
+            self.__record("WARN", None, None, msg, timestamp)
 
-		# write out the actual status log line
-		self.__record(status_code, subdir, operation, status,
-			      optional_fields=optional_fields)
+        # write out the actual status log line
+        self.__record(status_code, subdir, operation, status,
+                      optional_fields=optional_fields)
 
 
-	def _read_warnings(self):
-		warnings = []
-		while True:
-			# pull in a line of output from every logger that has
-			# output ready to be read
-			loggers, _, _ = select.select(self.warning_loggers,
-						      [], [], 0)
-			closed_loggers = set()
-			for logger in loggers:
-				line = logger.readline()
-				# record any broken pipes (aka line == empty)
-				if len(line) == 0:
-					closed_loggers.add(logger)
-					continue
-				timestamp, msg = line.split('\t', 1)
-				warnings.append((int(timestamp), msg.strip()))
+    def _read_warnings(self):
+        warnings = []
+        while True:
+            # pull in a line of output from every logger that has
+            # output ready to be read
+            loggers, _, _ = select.select(self.warning_loggers,
+                                          [], [], 0)
+            closed_loggers = set()
+            for logger in loggers:
+                line = logger.readline()
+                # record any broken pipes (aka line == empty)
+                if len(line) == 0:
+                    closed_loggers.add(logger)
+                    continue
+                timestamp, msg = line.split('\t', 1)
+                warnings.append((int(timestamp), msg.strip()))
 
-			# stop listening to loggers that are closed
-			self.warning_loggers -= closed_loggers
+            # stop listening to loggers that are closed
+            self.warning_loggers -= closed_loggers
 
-			# stop if none of the loggers have any output left
-			if not loggers:
-				break
+            # stop if none of the loggers have any output left
+            if not loggers:
+                break
 
-		# sort into timestamp order
-		warnings.sort()
-		return warnings
+        # sort into timestamp order
+        warnings.sort()
+        return warnings
 
 
-	def _render_record(self, status_code, subdir, operation, status='',
-			   epoch_time=None, record_prefix=None,
-			   optional_fields=None):
-		"""
-		Internal Function to generate a record to be written into a
-		status log. For use by server_job.* classes only.
-		"""
-		if subdir:
-			if re.match(r'[\n\t]', subdir):
-				raise ValueError(
-				    'Invalid character in subdir string')
-			substr = subdir
-		else:
-			substr = '----'
+    def _render_record(self, status_code, subdir, operation, status='',
+                       epoch_time=None, record_prefix=None,
+                       optional_fields=None):
+        """
+        Internal Function to generate a record to be written into a
+        status log. For use by server_job.* classes only.
+        """
+        if subdir:
+            if re.match(r'[\n\t]', subdir):
+                raise ValueError(
+                    'Invalid character in subdir string')
+            substr = subdir
+        else:
+            substr = '----'
 
-		if not logging.is_valid_status(status_code):
-			raise ValueError('Invalid status code supplied: %s' %
-					 status_code)
-		if not operation:
-			operation = '----'
-		if re.match(r'[\n\t]', operation):
-			raise ValueError(
-			    'Invalid character in operation string')
-		operation = operation.rstrip()
-		status = status.rstrip()
-		status = re.sub(r"\t", "  ", status)
-		# Ensure any continuation lines are marked so we can
-		# detect them in the status file to ensure it is parsable.
-		status = re.sub(r"\n", "\n" + self.record_prefix + "  ", status)
+        if not logging.is_valid_status(status_code):
+            raise ValueError('Invalid status code supplied: %s' %
+                             status_code)
+        if not operation:
+            operation = '----'
+        if re.match(r'[\n\t]', operation):
+            raise ValueError(
+                'Invalid character in operation string')
+        operation = operation.rstrip()
+        status = status.rstrip()
+        status = re.sub(r"\t", "  ", status)
+        # Ensure any continuation lines are marked so we can
+        # detect them in the status file to ensure it is parsable.
+        status = re.sub(r"\n", "\n" + self.record_prefix + "  ", status)
 
-		if not optional_fields:
-			optional_fields = {}
+        if not optional_fields:
+            optional_fields = {}
 
-		# Generate timestamps for inclusion in the logs
-		if epoch_time is None:
-			epoch_time = int(time.time())
-		local_time = time.localtime(epoch_time)
-		optional_fields["timestamp"] = str(epoch_time)
-		optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
-							     local_time)
+        # Generate timestamps for inclusion in the logs
+        if epoch_time is None:
+            epoch_time = int(time.time())
+        local_time = time.localtime(epoch_time)
+        optional_fields["timestamp"] = str(epoch_time)
+        optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
+                                                     local_time)
 
-		fields = [status_code, substr, operation]
-		fields += ["%s=%s" % x for x in optional_fields.iteritems()]
-		fields.append(status)
+        fields = [status_code, substr, operation]
+        fields += ["%s=%s" % x for x in optional_fields.iteritems()]
+        fields.append(status)
 
-		if record_prefix is None:
-			record_prefix = self.record_prefix
+        if record_prefix is None:
+            record_prefix = self.record_prefix
 
-		msg = '\t'.join(str(x) for x in fields)
+        msg = '\t'.join(str(x) for x in fields)
 
-		return record_prefix + msg + '\n'
+        return record_prefix + msg + '\n'
 
 
-	def _record_prerendered(self, msg):
-		"""
-		Record a pre-rendered msg into the status logs. The only
-		change this makes to the message is to add on the local
-		indentation. Should not be called outside of server_job.*
-		classes. Unlike __record, this does not write the message
-		to standard output.
-		"""
-		lines = []
-		status_file = os.path.join(self.resultdir, 'status.log')
-		status_log = open(status_file, 'a')
-		for line in msg.splitlines():
-			line = self.record_prefix + line + '\n'
-			lines.append(line)
-			status_log.write(line)
-		status_log.close()
-		self.__parse_status(lines)
+    def _record_prerendered(self, msg):
+        """
+        Record a pre-rendered msg into the status logs. The only
+        change this makes to the message is to add on the local
+        indentation. Should not be called outside of server_job.*
+        classes. Unlike __record, this does not write the message
+        to standard output.
+        """
+        lines = []
+        status_file = os.path.join(self.resultdir, 'status.log')
+        status_log = open(status_file, 'a')
+        for line in msg.splitlines():
+            line = self.record_prefix + line + '\n'
+            lines.append(line)
+            status_log.write(line)
+        status_log.close()
+        self.__parse_status(lines)
 
 
-	def __record(self, status_code, subdir, operation, status='',
-		     epoch_time=None, optional_fields=None):
-		"""
-		Actual function for recording a single line into the status
-		logs. Should never be called directly, only by job.record as
-		this would bypass the console monitor logging.
-		"""
+    def __record(self, status_code, subdir, operation, status='',
+                 epoch_time=None, optional_fields=None):
+        """
+        Actual function for recording a single line into the status
+        logs. Should never be called directly, only by job.record as
+        this would bypass the console monitor logging.
+        """
 
-		msg = self._render_record(status_code, subdir, operation,
-					  status, epoch_time,
-					  optional_fields=optional_fields)
+        msg = self._render_record(status_code, subdir, operation,
+                                  status, epoch_time,
+                                  optional_fields=optional_fields)
 
 
-		status_file = os.path.join(self.resultdir, 'status.log')
-		sys.stdout.write(msg)
-		open(status_file, "a").write(msg)
-		if subdir:
-			test_dir = os.path.join(self.resultdir, subdir)
-			status_file = os.path.join(test_dir, 'status')
-			open(status_file, "a").write(msg)
-		self.__parse_status(msg.splitlines())
+        status_file = os.path.join(self.resultdir, 'status.log')
+        sys.stdout.write(msg)
+        open(status_file, "a").write(msg)
+        if subdir:
+            test_dir = os.path.join(self.resultdir, subdir)
+            status_file = os.path.join(test_dir, 'status')
+            open(status_file, "a").write(msg)
+        self.__parse_status(msg.splitlines())
 
 
-	def __parse_status(self, new_lines):
-		if not self.using_parser:
-			return
-		new_tests = self.parser.process_lines(new_lines)
-		for test in new_tests:
-			self.__insert_test(test)
+    def __parse_status(self, new_lines):
+        if not self.using_parser:
+            return
+        new_tests = self.parser.process_lines(new_lines)
+        for test in new_tests:
+            self.__insert_test(test)
 
 
-	def __insert_test(self, test):
-		""" An internal method to insert a new test result into the
-		database. This method will not raise an exception, even if an
-		error occurs during the insert, to avoid failing a test
-		simply because of unexpected database issues."""
-		try:
-			self.results_db.insert_test(self.job_model, test)
-		except Exception:
-			msg = ("WARNING: An unexpected error occured while "
-			       "inserting test results into the database. "
-			       "Ignoring error.\n" + traceback.format_exc())
-			print >> sys.stderr, msg
+    def __insert_test(self, test):
+        """ An internal method to insert a new test result into the
+        database. This method will not raise an exception, even if an
+        error occurs during the insert, to avoid failing a test
+        simply because of unexpected database issues."""
+        try:
+            self.results_db.insert_test(self.job_model, test)
+        except Exception:
+            msg = ("WARNING: An unexpected error occured while "
+                   "inserting test results into the database. "
+                   "Ignoring error.\n" + traceback.format_exc())
+            print >> sys.stderr, msg
 
 
 # a file-like object for catching stderr from an autotest client and
 # extracting status logs from it
 class client_logger(object):
-	"""Partial file object to write to both stdout and
-	the status log file.  We only implement those methods
-	utils.run() actually calls.
-	"""
-	parser = re.compile(r"^AUTOTEST_STATUS:([^:]*):(.*)$")
-	extract_indent = re.compile(r"^(\t*).*$")
+    """Partial file object to write to both stdout and
+    the status log file.  We only implement those methods
+    utils.run() actually calls.
+    """
+    parser = re.compile(r"^AUTOTEST_STATUS:([^:]*):(.*)$")
+    extract_indent = re.compile(r"^(\t*).*$")
 
-	def __init__(self, job):
-		self.job = job
-		self.leftover = ""
-		self.last_line = ""
-		self.logs = {}
+    def __init__(self, job):
+        self.job = job
+        self.leftover = ""
+        self.last_line = ""
+        self.logs = {}
 
 
-	def _process_log_dict(self, log_dict):
-		log_list = log_dict.pop("logs", [])
-		for key in sorted(log_dict.iterkeys()):
-			log_list += self._process_log_dict(log_dict.pop(key))
-		return log_list
+    def _process_log_dict(self, log_dict):
+        log_list = log_dict.pop("logs", [])
+        for key in sorted(log_dict.iterkeys()):
+            log_list += self._process_log_dict(log_dict.pop(key))
+        return log_list
 
 
-	def _process_logs(self):
-		"""Go through the accumulated logs in self.log and print them
-		out to stdout and the status log. Note that this processes
-		logs in an ordering where:
+    def _process_logs(self):
+        """Go through the accumulated logs in self.log and print them
+        out to stdout and the status log. Note that this processes
+        logs in an ordering where:
 
-		1) logs to different tags are never interleaved
-		2) logs to x.y come before logs to x.y.z for all z
-		3) logs to x.y come before x.z whenever y < z
+        1) logs to different tags are never interleaved
+        2) logs to x.y come before logs to x.y.z for all z
+        3) logs to x.y come before x.z whenever y < z
 
-		Note that this will in general not be the same as the
-		chronological ordering of the logs. However, if a chronological
-		ordering is desired that one can be reconstructed from the
-		status log by looking at timestamp lines."""
-		log_list = self._process_log_dict(self.logs)
-		for line in log_list:
-			self.job._record_prerendered(line + '\n')
-		if log_list:
-			self.last_line = log_list[-1]
+        Note that this will in general not be the same as the
+        chronological ordering of the logs. However, if a chronological
+        ordering is desired that one can be reconstructed from the
+        status log by looking at timestamp lines."""
+        log_list = self._process_log_dict(self.logs)
+        for line in log_list:
+            self.job._record_prerendered(line + '\n')
+        if log_list:
+            self.last_line = log_list[-1]
 
 
-	def _process_quoted_line(self, tag, line):
-		"""Process a line quoted with an AUTOTEST_STATUS flag. If the
-		tag is blank then we want to push out all the data we've been
-		building up in self.logs, and then the newest line. If the
-		tag is not blank, then push the line into the logs for handling
-		later."""
-		print line
-		if tag == "":
-			self._process_logs()
-			self.job._record_prerendered(line + '\n')
-			self.last_line = line
-		else:
-			tag_parts = [int(x) for x in tag.split(".")]
-			log_dict = self.logs
-			for part in tag_parts:
-				log_dict = log_dict.setdefault(part, {})
-			log_list = log_dict.setdefault("logs", [])
-			log_list.append(line)
+    def _process_quoted_line(self, tag, line):
+        """Process a line quoted with an AUTOTEST_STATUS flag. If the
+        tag is blank then we want to push out all the data we've been
+        building up in self.logs, and then the newest line. If the
+        tag is not blank, then push the line into the logs for handling
+        later."""
+        print line
+        if tag == "":
+            self._process_logs()
+            self.job._record_prerendered(line + '\n')
+            self.last_line = line
+        else:
+            tag_parts = [int(x) for x in tag.split(".")]
+            log_dict = self.logs
+            for part in tag_parts:
+                log_dict = log_dict.setdefault(part, {})
+            log_list = log_dict.setdefault("logs", [])
+            log_list.append(line)
 
 
-	def _process_line(self, line):
-		"""Write out a line of data to the appropriate stream. Status
-		lines sent by autotest will be prepended with
-		"AUTOTEST_STATUS", and all other lines are ssh error
-		messages."""
-		match = self.parser.search(line)
-		if match:
-			tag, line = match.groups()
-			self._process_quoted_line(tag, line)
-		else:
-			print line
+    def _process_line(self, line):
+        """Write out a line of data to the appropriate stream. Status
+        lines sent by autotest will be prepended with
+        "AUTOTEST_STATUS", and all other lines are ssh error
+        messages."""
+        match = self.parser.search(line)
+        if match:
+            tag, line = match.groups()
+            self._process_quoted_line(tag, line)
+        else:
+            print line
 
 
-	def _format_warnings(self, last_line, warnings):
-		# use the indentation of whatever the last log line was
-		indent = self.extract_indent.match(last_line).group(1)
-		# if the last line starts a new group, add an extra indent
-		if last_line.lstrip('\t').startswith("START\t"):
-			indent += '\t'
-		return [self.job._render_record("WARN", None, None, msg,
-						timestamp, indent).rstrip('\n')
-			for timestamp, msg in warnings]
+    def _format_warnings(self, last_line, warnings):
+        # use the indentation of whatever the last log line was
+        indent = self.extract_indent.match(last_line).group(1)
+        # if the last line starts a new group, add an extra indent
+        if last_line.lstrip('\t').startswith("START\t"):
+            indent += '\t'
+        return [self.job._render_record("WARN", None, None, msg,
+                                        timestamp, indent).rstrip('\n')
+                for timestamp, msg in warnings]
 
 
-	def _process_warnings(self, last_line, log_dict, warnings):
-		if log_dict.keys() in ([], ["logs"]):
-			# there are no sub-jobs, just append the warnings here
-			warnings = self._format_warnings(last_line, warnings)
-			log_list = log_dict.setdefault("logs", [])
-			log_list += warnings
-			for warning in warnings:
-				sys.stdout.write(warning + '\n')
-		else:
-			# there are sub-jobs, so put the warnings in there
-			log_list = log_dict.get("logs", [])
-			if log_list:
-				last_line = log_list[-1]
-			for key in sorted(log_dict.iterkeys()):
-				if key != "logs":
-					self._process_warnings(last_line,
-							       log_dict[key],
-							       warnings)
+    def _process_warnings(self, last_line, log_dict, warnings):
+        if log_dict.keys() in ([], ["logs"]):
+            # there are no sub-jobs, just append the warnings here
+            warnings = self._format_warnings(last_line, warnings)
+            log_list = log_dict.setdefault("logs", [])
+            log_list += warnings
+            for warning in warnings:
+                sys.stdout.write(warning + '\n')
+        else:
+            # there are sub-jobs, so put the warnings in there
+            log_list = log_dict.get("logs", [])
+            if log_list:
+                last_line = log_list[-1]
+            for key in sorted(log_dict.iterkeys()):
+                if key != "logs":
+                    self._process_warnings(last_line,
+                                           log_dict[key],
+                                           warnings)
 
 
-	def write(self, data):
-		# first check for any new console warnings
-		warnings = self.job._read_warnings()
-		self._process_warnings(self.last_line, self.logs, warnings)
-		# now process the newest data written out
-		data = self.leftover + data
-		lines = data.split("\n")
-		# process every line but the last one
-		for line in lines[:-1]:
-			self._process_line(line)
-		# save the last line for later processing
-		# since we may not have the whole line yet
-		self.leftover = lines[-1]
+    def write(self, data):
+        # first check for any new console warnings
+        warnings = self.job._read_warnings()
+        self._process_warnings(self.last_line, self.logs, warnings)
+        # now process the newest data written out
+        data = self.leftover + data
+        lines = data.split("\n")
+        # process every line but the last one
+        for line in lines[:-1]:
+            self._process_line(line)
+        # save the last line for later processing
+        # since we may not have the whole line yet
+        self.leftover = lines[-1]
 
 
-	def flush(self):
-		sys.stdout.flush()
+    def flush(self):
+        sys.stdout.flush()
 
 
-	def close(self):
-		if self.leftover:
-			self._process_line(self.leftover)
-		self._process_logs()
-		self.flush()
+    def close(self):
+        if self.leftover:
+            self._process_line(self.leftover)
+        self._process_logs()
+        self.flush()
 
 # site_server_job.py may be non-existant or empty, make sure that an
 # appropriate site_server_job class is created nevertheless
 try:
-	from autotest_lib.server.site_server_job import site_server_job
+    from autotest_lib.server.site_server_job import site_server_job
 except ImportError:
-	class site_server_job(base_server_job):
-		pass
-	
+    class site_server_job(base_server_job):
+        pass
+
 class server_job(site_server_job):
-	pass
+    pass
diff --git a/server/site_autoserv_parser.py b/server/site_autoserv_parser.py
index f8703f7..03b8c4e 100644
--- a/server/site_autoserv_parser.py
+++ b/server/site_autoserv_parser.py
@@ -9,6 +9,6 @@
 
 
 class site_autoserv_parser(base_autoserv_parser):
-	def get_usage(self):
-		usage = super(site_autoserv_parser, self).get_usage()
-		return usage+add_usage
+    def get_usage(self):
+        usage = super(site_autoserv_parser, self).get_usage()
+        return usage+add_usage
diff --git a/server/source_kernel.py b/server/source_kernel.py
index 8d27d11..ae7f032 100644
--- a/server/source_kernel.py
+++ b/server/source_kernel.py
@@ -5,7 +5,7 @@
 """
 This module defines the SourceKernel class
 
-	SourceKernel: an linux kernel built from source
+        SourceKernel: an linux kernel built from source
 """
 
 __author__ = """
@@ -21,60 +21,60 @@
 
 
 class SourceKernel(kernel.Kernel):
-	"""
-	This class represents a linux kernel built from source.
-	
-	It is used to obtain a built kernel or create one from source and 
-	install it on a Host.
-	
-	Implementation details:
-	This is a leaf class in an abstract class hierarchy, it must 
-	implement the unimplemented methods in parent classes.
-	"""
-	def __init__(self, k):
-		super(kernel.Kernel, self).__init__()
-		self.__kernel = k
-		self.__patch_list = []
-		self.__config_file = None
-		self.__autotest = autotest.Autotest()
+    """
+    This class represents a linux kernel built from source.
+
+    It is used to obtain a built kernel or create one from source and
+    install it on a Host.
+
+    Implementation details:
+    This is a leaf class in an abstract class hierarchy, it must
+    implement the unimplemented methods in parent classes.
+    """
+    def __init__(self, k):
+        super(kernel.Kernel, self).__init__()
+        self.__kernel = k
+        self.__patch_list = []
+        self.__config_file = None
+        self.__autotest = autotest.Autotest()
 
 
-	def configure(self, configFile):
-		self.__config_file = configFile
+    def configure(self, configFile):
+        self.__config_file = configFile
 
 
-	def patch(self, patchFile):
-		self.__patch_list.append(patchFile)
+    def patch(self, patchFile):
+        self.__patch_list.append(patchFile)
 
 
-	def build(self, host):
-		ctlfile = self.__control_file(self.__kernel, self.__patch_list,
-					    self.__config_file)
-		self.__autotest.run(ctlfile, host.get_tmp_dir(), host)
+    def build(self, host):
+        ctlfile = self.__control_file(self.__kernel, self.__patch_list,
+                                    self.__config_file)
+        self.__autotest.run(ctlfile, host.get_tmp_dir(), host)
 
 
-	def install(self, host):
-		self.__autotest.install(host)
-		ctlfile = ("testkernel = job.kernel('%s')\n"
-			   "testkernel.install()\n"
-			   "testkernel.add_to_bootloader()\n" %(self.__kernel))
-		self.__autotest.run(ctlfile, host.get_tmp_dir(), host)
-		
+    def install(self, host):
+        self.__autotest.install(host)
+        ctlfile = ("testkernel = job.kernel('%s')\n"
+                   "testkernel.install()\n"
+                   "testkernel.add_to_bootloader()\n" %(self.__kernel))
+        self.__autotest.run(ctlfile, host.get_tmp_dir(), host)
 
-	def __control_file(self, kernel, patch_list, config):
-		ctl = ("testkernel = job.kernel('%s')\n" % kernel)
 
-		if len(patch_list):
-			patches = ', '.join(["'%s'" % x for x in patch_list])
-			ctl += "testkernel.patch(%s)\n" % patches
+    def __control_file(self, kernel, patch_list, config):
+        ctl = ("testkernel = job.kernel('%s')\n" % kernel)
 
-		if config:
-			ctl += "testkernel.config('%s')\n" % config
-		else:
-			ctl += "testkernel.config('', None, True)\n"
+        if len(patch_list):
+            patches = ', '.join(["'%s'" % x for x in patch_list])
+            ctl += "testkernel.patch(%s)\n" % patches
 
-		ctl += "testkernel.build()\n"
+        if config:
+            ctl += "testkernel.config('%s')\n" % config
+        else:
+            ctl += "testkernel.config('', None, True)\n"
 
-		# copy back to server
+        ctl += "testkernel.build()\n"
 
-		return ctl
+        # copy back to server
+
+        return ctl
diff --git a/server/standalone_profiler.py b/server/standalone_profiler.py
index 81bf769..f681ff4 100644
--- a/server/standalone_profiler.py
+++ b/server/standalone_profiler.py
@@ -15,35 +15,35 @@
 
 
 def generate_test(machines, hostname, profilers, timeout_start, timeout_stop,
-			timeout_sync=180):
-	control_file = []
-	for profiler in profilers:
-		control_file.append("job.profilers.add(%s)"
-					% str(profiler)[1:-1])  # Remove parens
+                        timeout_sync=180):
+    control_file = []
+    for profiler in profilers:
+        control_file.append("job.profilers.add(%s)"
+                                % str(profiler)[1:-1])  # Remove parens
 
-	control_file.append("job.run_test('barriertest',%d,%d,%d,'%s','%s',%s)"
-			% (timeout_sync, timeout_start, timeout_stop,
-				hostname, "PROF_MASTER", str(machines)))
+    control_file.append("job.run_test('barriertest',%d,%d,%d,'%s','%s',%s)"
+                    % (timeout_sync, timeout_start, timeout_stop,
+                            hostname, "PROF_MASTER", str(machines)))
 
-	for profiler in profilers:
-		control_file.append("job.profilers.delete('%s')" % profiler[0])
+    for profiler in profilers:
+        control_file.append("job.profilers.delete('%s')" % profiler[0])
 
-	return "\n".join(control_file)
+    return "\n".join(control_file)
 
 
 def wait_for_profilers(machines, timeout = 300):
-	sb = barrier.barrier("PROF_MASTER", "sync_profilers",
-		timeout, port=63100)
-	sb.rendevous_servers("PROF_MASTER", *machines)
+    sb = barrier.barrier("PROF_MASTER", "sync_profilers",
+            timeout, port=63100)
+    sb.rendevous_servers("PROF_MASTER", *machines)
 
 
 def start_profilers(machines, timeout = 120):
-	sb = barrier.barrier("PROF_MASTER", "start_profilers",
-		timeout, port=63100)
-	sb.rendevous_servers("PROF_MASTER", *machines)
+    sb = barrier.barrier("PROF_MASTER", "start_profilers",
+            timeout, port=63100)
+    sb.rendevous_servers("PROF_MASTER", *machines)
 
 
 def stop_profilers(machines, timeout = 120):
-	sb = barrier.barrier("PROF_MASTER", "stop_profilers",
-		timeout, port=63100)
-	sb.rendevous_servers("PROF_MASTER", *machines)
+    sb = barrier.barrier("PROF_MASTER", "stop_profilers",
+            timeout, port=63100)
+    sb.rendevous_servers("PROF_MASTER", *machines)
diff --git a/server/status.py b/server/status.py
index 382626a..b0f4408 100644
--- a/server/status.py
+++ b/server/status.py
@@ -3,194 +3,194 @@
 
 
 class Machine:
-	"""
-	Represents the current state of a machine. Possible values are:
-		TESTING     currently running a test
-		REBOOTING   currently rebooting
-		BROKEN      busted somehow (e.g. reboot timed out)
-		OTHER       none of the above
+    """
+    Represents the current state of a machine. Possible values are:
+            TESTING     currently running a test
+            REBOOTING   currently rebooting
+            BROKEN      busted somehow (e.g. reboot timed out)
+            OTHER       none of the above
 
-	The implementation is basically that of a state machine. From an
-	external point of view the only relevant attributes are:
-	        details     text description of the current status
-		test_count  number of tests run
-	"""
-	def __init__(self):
-		self.state = "OTHER"
-		self.details = "Running"
-		self.test_name = ""
-		self.test_count = 0
+    The implementation is basically that of a state machine. From an
+    external point of view the only relevant attributes are:
+            details     text description of the current status
+            test_count  number of tests run
+    """
+    def __init__(self):
+        self.state = "OTHER"
+        self.details = "Running"
+        self.test_name = ""
+        self.test_count = 0
 
 
-	def process_line(self, line):
-		self.handlers[self.state](self, line)
+    def process_line(self, line):
+        self.handlers[self.state](self, line)
 
 
-	def _OTHER_handler(self, line):
-		match = self.job_start.match(line)
-		if match and match.group(2) != "----":
-			self.state = "TESTING"
-			self.tab_level = len(match.group(1))
-			self.test_name = match.group(2)
-			self.test_status = "GOOD"
-			self.details = "Running %s" % self.test_name
-			return
+    def _OTHER_handler(self, line):
+        match = self.job_start.match(line)
+        if match and match.group(2) != "----":
+            self.state = "TESTING"
+            self.tab_level = len(match.group(1))
+            self.test_name = match.group(2)
+            self.test_status = "GOOD"
+            self.details = "Running %s" % self.test_name
+            return
 
-		match = self.reboot_start.match(line)
-		if match:
-			self.boot_status = match.group(1)
-			if self.worse_status("GOOD", self.boot_status) == "GOOD":
-				self.state = "REBOOTING"
-				self.details = "Rebooting"
-			else:
-				self.state = "BROKEN"
-				self.details = "Reboot failed - machine broken"
-			return
+        match = self.reboot_start.match(line)
+        if match:
+            self.boot_status = match.group(1)
+            if self.worse_status("GOOD", self.boot_status) == "GOOD":
+                self.state = "REBOOTING"
+                self.details = "Rebooting"
+            else:
+                self.state = "BROKEN"
+                self.details = "Reboot failed - machine broken"
+            return
 
 
-	def _TESTING_handler(self, line):
-		match = self.job_status.match(line)
-		if match:
-			if len(match.group(1)) != self.tab_level + 1:
-				return   # we don't care about subgroups
-			if self.test_name != match.group(3):
-				return   # we don't care about other tests
-			self.test_status = self.worse_status(self.test_status,
-							     match.group(2))
-			self.details = "Running %s: %s" % (self.test_name,
-							    match.group(4))
-			return
+    def _TESTING_handler(self, line):
+        match = self.job_status.match(line)
+        if match:
+            if len(match.group(1)) != self.tab_level + 1:
+                return   # we don't care about subgroups
+            if self.test_name != match.group(3):
+                return   # we don't care about other tests
+            self.test_status = self.worse_status(self.test_status,
+                                                 match.group(2))
+            self.details = "Running %s: %s" % (self.test_name,
+                                                match.group(4))
+            return
 
-		match = self.job_end.match(line)
-		if match:
-			if len(match.group(1)) != self.tab_level:
-				return   # we don't care about subgroups
-			if self.test_name != match.group(3):
-				raise ValueError('Group START and END name mismatch')
-			self.state = "OTHER"
-			self.test_status = self.worse_status(self.test_status,
-							     match.group(2))
-			self.test_name = ""
-			del self.test_status
-			self.details = "Running"
-			self.test_count += 1
-			return
+        match = self.job_end.match(line)
+        if match:
+            if len(match.group(1)) != self.tab_level:
+                return   # we don't care about subgroups
+            if self.test_name != match.group(3):
+                raise ValueError('Group START and END name mismatch')
+            self.state = "OTHER"
+            self.test_status = self.worse_status(self.test_status,
+                                                 match.group(2))
+            self.test_name = ""
+            del self.test_status
+            self.details = "Running"
+            self.test_count += 1
+            return
 
 
-	def _REBOOTING_handler(self, line):
-		match = self.reboot_done.match(line)
-		if match:
-			status = self.worse_status(self.boot_status,
-						   match.group(1))
-			del self.boot_status
-			if status == "GOOD":
-				self.state = "OTHER"
-				self.details = "Running"
-			else:
-				self.state = "BROKEN"
-				self.details = "Reboot failed - machine broken"
-			return
+    def _REBOOTING_handler(self, line):
+        match = self.reboot_done.match(line)
+        if match:
+            status = self.worse_status(self.boot_status,
+                                       match.group(1))
+            del self.boot_status
+            if status == "GOOD":
+                self.state = "OTHER"
+                self.details = "Running"
+            else:
+                self.state = "BROKEN"
+                self.details = "Reboot failed - machine broken"
+            return
 
 
-	def _BROKEN_handler(self, line):
-		pass    # just do nothing - we're broken and staying broken
+    def _BROKEN_handler(self, line):
+        pass    # just do nothing - we're broken and staying broken
 
 
-	handlers = {"OTHER": _OTHER_handler,
-		    "TESTING": _TESTING_handler,
-		    "REBOOTING": _REBOOTING_handler,
-		    "BROKEN": _BROKEN_handler}
+    handlers = {"OTHER": _OTHER_handler,
+                "TESTING": _TESTING_handler,
+                "REBOOTING": _REBOOTING_handler,
+                "BROKEN": _BROKEN_handler}
 
 
-	status_list = ["GOOD", "WARN", "FAIL", "ABORT", "ERROR"]
-	order_dict = {None: -1}
-	order_dict.update((status, i)
-			  for i, status in enumerate(status_list))
+    status_list = ["GOOD", "WARN", "FAIL", "ABORT", "ERROR"]
+    order_dict = {None: -1}
+    order_dict.update((status, i)
+                      for i, status in enumerate(status_list))
 
 
-	job_start = re.compile(r"^(\t*)START\t----\t([^\t]+).*$")
-	job_status = re.compile(r"^(\t*)(%s)\t([^\t]+)\t(?:[^\t]+).*\t([^\t]+)$" %
-				"|".join(status_list))
-	job_end = re.compile(r"^(\t*)END (%s)\t----\t([^\t]+).*$" %
-			     "|".join(status_list))
-	reboot_start = re.compile(r"^\t?(%s)\t[^\t]+\treboot\.start.*$" %
-				  "|".join(status_list))
-	reboot_done = re.compile(r"^\t?(%s)\t[^\t]+\treboot\.verify.*$" %
-				 "|".join(status_list))
+    job_start = re.compile(r"^(\t*)START\t----\t([^\t]+).*$")
+    job_status = re.compile(r"^(\t*)(%s)\t([^\t]+)\t(?:[^\t]+).*\t([^\t]+)$" %
+                            "|".join(status_list))
+    job_end = re.compile(r"^(\t*)END (%s)\t----\t([^\t]+).*$" %
+                         "|".join(status_list))
+    reboot_start = re.compile(r"^\t?(%s)\t[^\t]+\treboot\.start.*$" %
+                              "|".join(status_list))
+    reboot_done = re.compile(r"^\t?(%s)\t[^\t]+\treboot\.verify.*$" %
+                             "|".join(status_list))
 
-	@classmethod
-	def worse_status(cls, old_status, new_status):
-		if cls.order_dict[new_status] > cls.order_dict[old_status]:
-			return new_status
-		else:
-			return old_status
+    @classmethod
+    def worse_status(cls, old_status, new_status):
+        if cls.order_dict[new_status] > cls.order_dict[old_status]:
+            return new_status
+        else:
+            return old_status
 
 
 def parse_status(status_log):
-	"""\
-	Parse the status from a single status log.
-	Do not use with status logs from multi-machine tests.
-	"""
-	parser = Machine()
-	for line in file(status_log):
-		parser.process_line(line)
-	result = {
-	    "status": parser.details,
-	    "test_on": parser.test_name,
-	    "test_num_complete": parser.test_count
-	    }
-	return result
+    """\
+    Parse the status from a single status log.
+    Do not use with status logs from multi-machine tests.
+    """
+    parser = Machine()
+    for line in file(status_log):
+        parser.process_line(line)
+    result = {
+        "status": parser.details,
+        "test_on": parser.test_name,
+        "test_num_complete": parser.test_count
+        }
+    return result
 
 
 def _file_iterator(filename):
-	"""\
-	Return an iterator over file(filename), or an empty iterator
-	if the file does not exist.
-	"""
-	if os.path.exists(filename):
-		return iter(file(filename))
-	else:
-		return ()
+    """\
+    Return an iterator over file(filename), or an empty iterator
+    if the file does not exist.
+    """
+    if os.path.exists(filename):
+        return iter(file(filename))
+    else:
+        return ()
 
 
 def parse_machine_status(root_path, name):
-	"""Parse the status for one machine (of a multi-machine test)"""
-	general_log = _file_iterator(os.path.join(root_path, "status.log"))
-	machine_log = _file_iterator(os.path.join(root_path, name, "status.log"))
-	timestamp_regex = re.compile("\ttimestamp=(\d+)")
-	# collect all the lines from both the root & machine-specific log
-	lines = []
-	timestamp = 0
-	for line in itertools.chain(general_log, machine_log):
-		timestamp_match = timestamp_regex.search(line)
-		# if the log line has a timestamp, use it
-		# otherwise, just use the timestamp from the previous line
-		if timestamp_match:
-			timestamp = int(timestamp_match.group(1))
-		lines.append((timestamp, line))
-	lines.sort()  # this will sort the lines by timestamp
-	# now actually run the lines through the parser
-	parser = Machine()
-	for timestamp, line in lines:
-		parser.process_line(line)
-	return {
-	    "status": parser.details,
-	    "test_on": parser.test_name,
-	    "test_num_complete": parser.test_count
-	    }
+    """Parse the status for one machine (of a multi-machine test)"""
+    general_log = _file_iterator(os.path.join(root_path, "status.log"))
+    machine_log = _file_iterator(os.path.join(root_path, name, "status.log"))
+    timestamp_regex = re.compile("\ttimestamp=(\d+)")
+    # collect all the lines from both the root & machine-specific log
+    lines = []
+    timestamp = 0
+    for line in itertools.chain(general_log, machine_log):
+        timestamp_match = timestamp_regex.search(line)
+        # if the log line has a timestamp, use it
+        # otherwise, just use the timestamp from the previous line
+        if timestamp_match:
+            timestamp = int(timestamp_match.group(1))
+        lines.append((timestamp, line))
+    lines.sort()  # this will sort the lines by timestamp
+    # now actually run the lines through the parser
+    parser = Machine()
+    for timestamp, line in lines:
+        parser.process_line(line)
+    return {
+        "status": parser.details,
+        "test_on": parser.test_name,
+        "test_num_complete": parser.test_count
+        }
 
 
 def parse_multimachine_status(root_path, machine_names):
-	"""Parse the status for a set of machines."""
-	results = {}
-	for name in machine_names:
-		results[name] = parse_machine_status(root_path, name)
-	return results
+    """Parse the status for a set of machines."""
+    results = {}
+    for name in machine_names:
+        results[name] = parse_machine_status(root_path, name)
+    return results
 
 
 if __name__ == "__main__":
- 	args = sys.argv[1:]
- 	if len(args) != 1:
- 		print "USAGE: status.py status_log"
- 		sys.exit(1)
-	print parse_status(args[0])
+    args = sys.argv[1:]
+    if len(args) != 1:
+        print "USAGE: status.py status_log"
+        sys.exit(1)
+    print parse_status(args[0])
diff --git a/server/subcommand.py b/server/subcommand.py
index 85898a1..8b29b82 100644
--- a/server/subcommand.py
+++ b/server/subcommand.py
@@ -6,196 +6,196 @@
 
 
 def parallel(tasklist, timeout=None):
-	"""Run an set of predefined subcommands in parallel"""
-	pids = []
-	run_error = False
-	for task in tasklist:
-		task.fork_start()
+    """Run an set of predefined subcommands in parallel"""
+    pids = []
+    run_error = False
+    for task in tasklist:
+        task.fork_start()
 
-	remaining_timeout = None
-	if timeout:
-		endtime = time.time() + timeout
+    remaining_timeout = None
+    if timeout:
+        endtime = time.time() + timeout
 
-	for task in tasklist:
-		if timeout:
-			remaining_timeout = max(endtime - time.time(), 1)
-		try:
-			status = task.fork_waitfor(remaining_timeout)
-		except error.AutoservSubcommandError:
-			run_error = True
-		else:
-			if status != 0:
-				run_error = True
+    for task in tasklist:
+        if timeout:
+            remaining_timeout = max(endtime - time.time(), 1)
+        try:
+            status = task.fork_waitfor(remaining_timeout)
+        except error.AutoservSubcommandError:
+            run_error = True
+        else:
+            if status != 0:
+                run_error = True
 
-	if run_error:
-		raise error.AutoservError('One or more subcommands failed')
+    if run_error:
+        raise error.AutoservError('One or more subcommands failed')
 
 
 def parallel_simple(function, arglist, log=True, timeout=None):
-	"""Each element in the arglist used to create a subcommand object,
-	where that arg is used both as a subdir name, and a single argument
-	to pass to "function".
-	We create a subcommand object for each element in the list,
-	then execute those subcommand objects in parallel."""
+    """Each element in the arglist used to create a subcommand object,
+    where that arg is used both as a subdir name, and a single argument
+    to pass to "function".
+    We create a subcommand object for each element in the list,
+    then execute those subcommand objects in parallel."""
 
-	# Bypass the multithreading if only one machine.
-	if len (arglist) == 1:
-		function(arglist[0])
-		return
-	
-	subcommands = []
-	for arg in arglist:
-		args = [arg]
-		if log:
-			subdir = str(arg)
-		else:
-			subdir = None
-		subcommands.append(subcommand(function, args, subdir))
-	parallel(subcommands, timeout)
+    # Bypass the multithreading if only one machine.
+    if len (arglist) == 1:
+        function(arglist[0])
+        return
+
+    subcommands = []
+    for arg in arglist:
+        args = [arg]
+        if log:
+            subdir = str(arg)
+        else:
+            subdir = None
+        subcommands.append(subcommand(function, args, subdir))
+    parallel(subcommands, timeout)
 
 
 def _where_art_thy_filehandles():
-	os.system("ls -l /proc/%d/fd >> /dev/tty" % os.getpid())
+    os.system("ls -l /proc/%d/fd >> /dev/tty" % os.getpid())
 
 
 def _print_to_tty(string):
-	open('/dev/tty', 'w').write(string + '\n')
+    open('/dev/tty', 'w').write(string + '\n')
 
 
 def _redirect_stream(fd, output):
-	newfd = os.open(output, os.O_WRONLY | os.O_CREAT)
-	os.dup2(newfd, fd)
-	os.close(newfd)
-	if fd == 1:
-		sys.stdout = os.fdopen(fd, 'w')
-	if fd == 2:
-		sys.stderr = os.fdopen(fd, 'w')
+    newfd = os.open(output, os.O_WRONLY | os.O_CREAT)
+    os.dup2(newfd, fd)
+    os.close(newfd)
+    if fd == 1:
+        sys.stdout = os.fdopen(fd, 'w')
+    if fd == 2:
+        sys.stderr = os.fdopen(fd, 'w')
 
 
 def _redirect_stream_tee(fd, output, tag):
-	"""Use the low-level fork & pipe operations here to get a fd,
-	not a filehandle. This ensures that we get both the 
-	filehandle and fd for stdout/stderr redirected correctly."""
-	r, w = os.pipe()
-	pid = os.fork()
-	if pid:                 		# Parent
-		os.dup2(w, fd)
-		os.close(r)
-		os.close(w)
-		if fd == 1:
-			sys.stdout = os.fdopen(fd, 'w', 1)
-		if fd == 2:
-			sys.stderr = os.fdopen(fd, 'w', 1)
-		return
-	else:					# Child
-		os.close(w)
-		log = open(output, 'w')
-		f = os.fdopen(r, 'r')
-		for line in iter(f.readline, ''):
-			# Tee straight to file
-			log.write(line)
-			log.flush()
-			# Prepend stdout with the tag
-			print tag + ' : ' + line,
-			sys.stdout.flush()
-		log.close()
-		os._exit(0)
+    """Use the low-level fork & pipe operations here to get a fd,
+    not a filehandle. This ensures that we get both the
+    filehandle and fd for stdout/stderr redirected correctly."""
+    r, w = os.pipe()
+    pid = os.fork()
+    if pid:                                 # Parent
+        os.dup2(w, fd)
+        os.close(r)
+        os.close(w)
+        if fd == 1:
+            sys.stdout = os.fdopen(fd, 'w', 1)
+        if fd == 2:
+            sys.stderr = os.fdopen(fd, 'w', 1)
+        return
+    else:                                   # Child
+        os.close(w)
+        log = open(output, 'w')
+        f = os.fdopen(r, 'r')
+        for line in iter(f.readline, ''):
+            # Tee straight to file
+            log.write(line)
+            log.flush()
+            # Prepend stdout with the tag
+            print tag + ' : ' + line,
+            sys.stdout.flush()
+        log.close()
+        os._exit(0)
 
 
 class subcommand:
-	def __init__(self, func, args, subdir = None, stdprint = True):
-		# func(args) - the subcommand to run
-		# subdir     - the subdirectory to log results in
-		# stdprint   - whether to print results to stdout/stderr
-		if subdir:
-			self.subdir = os.path.abspath(subdir)
-			if not os.path.exists(self.subdir):
-				os.mkdir(self.subdir)
-			self.debug = os.path.join(self.subdir, 'debug')
-			if not os.path.exists(self.debug):
-				os.mkdir(self.debug)
-			self.stdout = os.path.join(self.debug, 'stdout')
-			self.stderr = os.path.join(self.debug, 'stderr')
-		else:
-			self.subdir = None
-			self.debug = '/dev/null'
-			self.stdout = '/dev/null'
-			self.stderr = '/dev/null'
+    def __init__(self, func, args, subdir = None, stdprint = True):
+        # func(args) - the subcommand to run
+        # subdir     - the subdirectory to log results in
+        # stdprint   - whether to print results to stdout/stderr
+        if subdir:
+            self.subdir = os.path.abspath(subdir)
+            if not os.path.exists(self.subdir):
+                os.mkdir(self.subdir)
+            self.debug = os.path.join(self.subdir, 'debug')
+            if not os.path.exists(self.debug):
+                os.mkdir(self.debug)
+            self.stdout = os.path.join(self.debug, 'stdout')
+            self.stderr = os.path.join(self.debug, 'stderr')
+        else:
+            self.subdir = None
+            self.debug = '/dev/null'
+            self.stdout = '/dev/null'
+            self.stderr = '/dev/null'
 
-		self.func = func
-		self.args = args
-		self.lambda_function = lambda: func(*args)
-		self.pid = None
-		self.stdprint = stdprint
+        self.func = func
+        self.args = args
+        self.lambda_function = lambda: func(*args)
+        self.pid = None
+        self.stdprint = stdprint
 
 
-	def redirect_output(self):
-		if self.stdprint:
-			if self.subdir:
-				tag = os.path.basename(self.subdir)
-				_redirect_stream_tee(1, self.stdout, tag)
-				_redirect_stream_tee(2, self.stderr, tag)
-		else:
-			_redirect_stream(1, self.stdout)
-			_redirect_stream(2, self.stderr)
+    def redirect_output(self):
+        if self.stdprint:
+            if self.subdir:
+                tag = os.path.basename(self.subdir)
+                _redirect_stream_tee(1, self.stdout, tag)
+                _redirect_stream_tee(2, self.stderr, tag)
+        else:
+            _redirect_stream(1, self.stdout)
+            _redirect_stream(2, self.stderr)
 
 
-	def fork_start(self):
-		sys.stdout.flush()
-		sys.stderr.flush()
-		self.pid = os.fork()
+    def fork_start(self):
+        sys.stdout.flush()
+        sys.stderr.flush()
+        self.pid = os.fork()
 
-		if self.pid:				# I am the parent
-			return
+        if self.pid:                            # I am the parent
+            return
 
-		# We are the child from this point on. Never return.
-		signal.signal(signal.SIGTERM, signal.SIG_DFL) # clear handler
-		if self.subdir:
-			os.chdir(self.subdir)
-		self.redirect_output()
+        # We are the child from this point on. Never return.
+        signal.signal(signal.SIGTERM, signal.SIG_DFL) # clear handler
+        if self.subdir:
+            os.chdir(self.subdir)
+        self.redirect_output()
 
-		try:
-			self.lambda_function()
+        try:
+            self.lambda_function()
 
-		except:
-			traceback.print_exc()
-			sys.stdout.flush()
-			sys.stderr.flush()
-			os._exit(1)
+        except:
+            traceback.print_exc()
+            sys.stdout.flush()
+            sys.stderr.flush()
+            os._exit(1)
 
-		sys.stdout.flush()
-		sys.stderr.flush()
-		os._exit(0)
+        sys.stdout.flush()
+        sys.stderr.flush()
+        os._exit(0)
 
 
-	def fork_waitfor(self, timeout=None):
-		if not timeout:
-			(pid, status) = os.waitpid(self.pid, 0)
-		else:
-			pid = None
-			start_time = time.time()
-			while time.time() <= start_time + timeout:
-				(pid, status) = os.waitpid(self.pid, os.WNOHANG)
-				if pid:
-					break
-				time.sleep(1)
+    def fork_waitfor(self, timeout=None):
+        if not timeout:
+            (pid, status) = os.waitpid(self.pid, 0)
+        else:
+            pid = None
+            start_time = time.time()
+            while time.time() <= start_time + timeout:
+                (pid, status) = os.waitpid(self.pid, os.WNOHANG)
+                if pid:
+                    break
+                time.sleep(1)
 
-			if not pid:
-				utils.nuke_pid(self.pid)
-				print "subcommand failed pid %d" % self.pid
-				print "%s" % (self.func,)
-				print "timeout after %ds" % timeout
-				print
-				return None
+            if not pid:
+                utils.nuke_pid(self.pid)
+                print "subcommand failed pid %d" % self.pid
+                print "%s" % (self.func,)
+                print "timeout after %ds" % timeout
+                print
+                return None
 
-		if status != 0:
-			print "subcommand failed pid %d" % pid
-			print "%s" % (self.func,)
-			print "rc=%d" % status
-			print
-			if os.path.exists(self.stderr):
-				for line in open(self.stderr).readlines():
-					print line,
-			print "\n--------------------------------------------\n"
-			raise error.AutoservSubcommandError(self.func, status)
-		return status
+        if status != 0:
+            print "subcommand failed pid %d" % pid
+            print "%s" % (self.func,)
+            print "rc=%d" % status
+            print
+            if os.path.exists(self.stderr):
+                for line in open(self.stderr).readlines():
+                    print line,
+            print "\n--------------------------------------------\n"
+            raise error.AutoservSubcommandError(self.func, status)
+        return status
diff --git a/server/test.py b/server/test.py
index 34de6a6..3176857 100755
--- a/server/test.py
+++ b/server/test.py
@@ -9,12 +9,12 @@
 
 
 class test(common_test.base_test):
-	pass
+    pass
 
 
 testname = common_test.testname
 
 
 def runtest(job, url, tag, args, dargs):
-	common_test.runtest(job, url, tag, args, dargs,
-			    locals(), globals())
+    common_test.runtest(job, url, tag, args, dargs,
+                        locals(), globals())
diff --git a/server/tests/sleeptest/sleeptest.py b/server/tests/sleeptest/sleeptest.py
index 7b9d12c..1ae3013 100755
--- a/server/tests/sleeptest/sleeptest.py
+++ b/server/tests/sleeptest/sleeptest.py
@@ -1,7 +1,7 @@
 import test, time
 
 class sleeptest(test.test):
-	version = 1
+    version = 1
 
-	def execute(self, seconds = 1):
-		time.sleep(seconds)
+    def execute(self, seconds = 1):
+        time.sleep(seconds)
diff --git a/server/utils.py b/server/utils.py
index 56a102e..e5e0dd6 100644
--- a/server/utils.py
+++ b/server/utils.py
@@ -22,368 +22,368 @@
 
 
 ############# we need pass throughs for the methods in client/common_lib/utils
-def run(command, timeout=None, ignore_status=False, 
-	stdout_tee=None, stderr_tee=None):
-	return utils.run(command, timeout, ignore_status, 
-			 stdout_tee, stderr_tee)
+def run(command, timeout=None, ignore_status=False,
+        stdout_tee=None, stderr_tee=None):
+    return utils.run(command, timeout, ignore_status,
+                     stdout_tee, stderr_tee)
 
 
 def system(command, timeout=None, ignore_status=False):
-	return utils.system(command, timeout, ignore_status)
+    return utils.system(command, timeout, ignore_status)
 
 
 def system_output(command, timeout=None, ignore_status=False,
-		  retain_output=False):
-	return utils.system_output(command, timeout, ignore_status, 
-				   retain_output)
+                  retain_output=False):
+    return utils.system_output(command, timeout, ignore_status,
+                               retain_output)
 
 
 def urlopen(url, data=None, proxies=None, timeout=300):
-	return utils.urlopen(url, data=data, proxies=proxies, timeout=timeout)
+    return utils.urlopen(url, data=data, proxies=proxies, timeout=timeout)
 
 
 def urlretrieve(url, filename=None, reporthook=None, data=None, timeout=300):
-	return utils.urlretrieve(url, filename=filename, reporthook=reporthook,
-	                         data=data, timeout=timeout)
+    return utils.urlretrieve(url, filename=filename, reporthook=reporthook,
+                             data=data, timeout=timeout)
 
 
 def read_keyval(path):
-	return utils.read_keyval(path)
+    return utils.read_keyval(path)
 
 
 def write_keyval(path, dictionary):
-	return utils.write_keyval(path, dictionary)
+    return utils.write_keyval(path, dictionary)
 
 
 ####################################################################
 
 def sh_escape(command):
-	"""
-	Escape special characters from a command so that it can be passed 
-	as a double quoted (" ") string in a (ba)sh command.
+    """
+    Escape special characters from a command so that it can be passed
+    as a double quoted (" ") string in a (ba)sh command.
 
-	Args:
-		command: the command string to escape. 
+    Args:
+            command: the command string to escape.
 
-	Returns:
-		The escaped command string. The required englobing double 
-		quotes are NOT added and so should be added at some point by 
-		the caller.
+    Returns:
+            The escaped command string. The required englobing double
+            quotes are NOT added and so should be added at some point by
+            the caller.
 
-	See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
-	"""
-	command = command.replace("\\", "\\\\")
-	command = command.replace("$", r'\$')
-	command = command.replace('"', r'\"')
-	command = command.replace('`', r'\`')
-	return command
+    See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
+    """
+    command = command.replace("\\", "\\\\")
+    command = command.replace("$", r'\$')
+    command = command.replace('"', r'\"')
+    command = command.replace('`', r'\`')
+    return command
 
 
 def scp_remote_escape(filename):
-	"""
-	Escape special characters from a filename so that it can be passed 
-	to scp (within double quotes) as a remote file.
+    """
+    Escape special characters from a filename so that it can be passed
+    to scp (within double quotes) as a remote file.
 
-	Bis-quoting has to be used with scp for remote files, "bis-quoting" 
-	as in quoting x 2
-	scp does not support a newline in the filename
+    Bis-quoting has to be used with scp for remote files, "bis-quoting"
+    as in quoting x 2
+    scp does not support a newline in the filename
 
-	Args:
-		filename: the filename string to escape. 
+    Args:
+            filename: the filename string to escape.
 
-	Returns:
-		The escaped filename string. The required englobing double 
-		quotes are NOT added and so should be added at some point by 
-		the caller.
-	"""
-	escape_chars= r' !"$&' "'" r'()*,:;<=>?[\]^`{|}'
+    Returns:
+            The escaped filename string. The required englobing double
+            quotes are NOT added and so should be added at some point by
+            the caller.
+    """
+    escape_chars= r' !"$&' "'" r'()*,:;<=>?[\]^`{|}'
 
-	new_name= []
-	for char in filename:
-		if char in escape_chars:
-			new_name.append("\\%s" % (char,))
-		else:
-			new_name.append(char)
+    new_name= []
+    for char in filename:
+        if char in escape_chars:
+            new_name.append("\\%s" % (char,))
+        else:
+            new_name.append(char)
 
-	return sh_escape("".join(new_name))
+    return sh_escape("".join(new_name))
 
 
 def get(location, local_copy = False):
-	"""Get a file or directory to a local temporary directory.
+    """Get a file or directory to a local temporary directory.
 
-	Args:
-		location: the source of the material to get. This source may 
-			be one of:
-			* a local file or directory
-			* a URL (http or ftp)
-			* a python file-like object
+    Args:
+            location: the source of the material to get. This source may
+                    be one of:
+                    * a local file or directory
+                    * a URL (http or ftp)
+                    * a python file-like object
 
-	Returns:
-		The location of the file or directory where the requested
-		content was saved. This will be contained in a temporary 
-		directory on the local host. If the material to get was a 
-		directory, the location will contain a trailing '/'
-	"""
-	tmpdir = get_tmp_dir()
+    Returns:
+            The location of the file or directory where the requested
+            content was saved. This will be contained in a temporary
+            directory on the local host. If the material to get was a
+            directory, the location will contain a trailing '/'
+    """
+    tmpdir = get_tmp_dir()
 
-	# location is a file-like object
-	if hasattr(location, "read"):
-		tmpfile = os.path.join(tmpdir, "file")
-		tmpfileobj = file(tmpfile, 'w')
-		shutil.copyfileobj(location, tmpfileobj)
-		tmpfileobj.close()
-		return tmpfile
+    # location is a file-like object
+    if hasattr(location, "read"):
+        tmpfile = os.path.join(tmpdir, "file")
+        tmpfileobj = file(tmpfile, 'w')
+        shutil.copyfileobj(location, tmpfileobj)
+        tmpfileobj.close()
+        return tmpfile
 
-	if isinstance(location, types.StringTypes):
-		# location is a URL
-		if location.startswith('http') or location.startswith('ftp'):
-			tmpfile = os.path.join(tmpdir, os.path.basename(location))
-			utils.urlretrieve(location, tmpfile)
-			return tmpfile
-		# location is a local path
-		elif os.path.exists(os.path.abspath(location)):
-			if not local_copy:
-				if os.path.isdir(location):
-					return location.rstrip('/') + '/'
-				else:
-					return location
-			tmpfile = os.path.join(tmpdir, os.path.basename(location))
-			if os.path.isdir(location):
-				tmpfile += '/'
-				shutil.copytree(location, tmpfile, symlinks=True)
-				return tmpfile
-			shutil.copyfile(location, tmpfile)
-			return tmpfile
-		# location is just a string, dump it to a file
-		else:
-			tmpfd, tmpfile = tempfile.mkstemp(dir=tmpdir)
-			tmpfileobj = os.fdopen(tmpfd, 'w')
-			tmpfileobj.write(location)
-			tmpfileobj.close()
-			return tmpfile
+    if isinstance(location, types.StringTypes):
+        # location is a URL
+        if location.startswith('http') or location.startswith('ftp'):
+            tmpfile = os.path.join(tmpdir, os.path.basename(location))
+            utils.urlretrieve(location, tmpfile)
+            return tmpfile
+        # location is a local path
+        elif os.path.exists(os.path.abspath(location)):
+            if not local_copy:
+                if os.path.isdir(location):
+                    return location.rstrip('/') + '/'
+                else:
+                    return location
+            tmpfile = os.path.join(tmpdir, os.path.basename(location))
+            if os.path.isdir(location):
+                tmpfile += '/'
+                shutil.copytree(location, tmpfile, symlinks=True)
+                return tmpfile
+            shutil.copyfile(location, tmpfile)
+            return tmpfile
+        # location is just a string, dump it to a file
+        else:
+            tmpfd, tmpfile = tempfile.mkstemp(dir=tmpdir)
+            tmpfileobj = os.fdopen(tmpfd, 'w')
+            tmpfileobj.write(location)
+            tmpfileobj.close()
+            return tmpfile
 
 
 def get_tmp_dir():
-	"""Return the pathname of a directory on the host suitable 
-	for temporary file storage.
+    """Return the pathname of a directory on the host suitable
+    for temporary file storage.
 
-	The directory and its content will be deleted automatically
-	at the end of the program execution if they are still present.
-	"""
-	global __tmp_dirs
+    The directory and its content will be deleted automatically
+    at the end of the program execution if they are still present.
+    """
+    global __tmp_dirs
 
-	dir_name= tempfile.mkdtemp(prefix="autoserv-")
-	pid = os.getpid()
-	if not pid in __tmp_dirs:
-		__tmp_dirs[pid] = []
-	__tmp_dirs[pid].append(dir_name)
-	return dir_name
+    dir_name= tempfile.mkdtemp(prefix="autoserv-")
+    pid = os.getpid()
+    if not pid in __tmp_dirs:
+        __tmp_dirs[pid] = []
+    __tmp_dirs[pid].append(dir_name)
+    return dir_name
 
 
 @atexit.register
 def __clean_tmp_dirs():
-	"""Erase temporary directories that were created by the get_tmp_dir() 
-	function and that are still present.
-	"""
-	global __tmp_dirs
+    """Erase temporary directories that were created by the get_tmp_dir()
+    function and that are still present.
+    """
+    global __tmp_dirs
 
-	pid = os.getpid()
-	if pid not in __tmp_dirs:
-		return
-	for dir in __tmp_dirs[pid]:
-		try:
-			shutil.rmtree(dir)
-		except OSError, e:
-			if e.errno == 2:
-				pass
-	__tmp_dirs[pid] = []
+    pid = os.getpid()
+    if pid not in __tmp_dirs:
+        return
+    for dir in __tmp_dirs[pid]:
+        try:
+            shutil.rmtree(dir)
+        except OSError, e:
+            if e.errno == 2:
+                pass
+    __tmp_dirs[pid] = []
 
 
 def unarchive(host, source_material):
-	"""Uncompress and untar an archive on a host.
+    """Uncompress and untar an archive on a host.
 
-	If the "source_material" is compresses (according to the file 
-	extension) it will be uncompressed. Supported compression formats 
-	are gzip and bzip2. Afterwards, if the source_material is a tar 
-	archive, it will be untarred.
+    If the "source_material" is compresses (according to the file
+    extension) it will be uncompressed. Supported compression formats
+    are gzip and bzip2. Afterwards, if the source_material is a tar
+    archive, it will be untarred.
 
-	Args:
-		host: the host object on which the archive is located
-		source_material: the path of the archive on the host
+    Args:
+            host: the host object on which the archive is located
+            source_material: the path of the archive on the host
 
-	Returns:
-		The file or directory name of the unarchived source material. 
-		If the material is a tar archive, it will be extracted in the
-		directory where it is and the path returned will be the first
-		entry in the archive, assuming it is the topmost directory.
-		If the material is not an archive, nothing will be done so this
-		function is "harmless" when it is "useless".
-	"""
-	# uncompress
-	if (source_material.endswith(".gz") or 
-		source_material.endswith(".gzip")):
-		host.run('gunzip "%s"' % (sh_escape(source_material)))
-		source_material= ".".join(source_material.split(".")[:-1])
-	elif source_material.endswith("bz2"):
-		host.run('bunzip2 "%s"' % (sh_escape(source_material)))
-		source_material= ".".join(source_material.split(".")[:-1])
+    Returns:
+            The file or directory name of the unarchived source material.
+            If the material is a tar archive, it will be extracted in the
+            directory where it is and the path returned will be the first
+            entry in the archive, assuming it is the topmost directory.
+            If the material is not an archive, nothing will be done so this
+            function is "harmless" when it is "useless".
+    """
+    # uncompress
+    if (source_material.endswith(".gz") or
+            source_material.endswith(".gzip")):
+        host.run('gunzip "%s"' % (sh_escape(source_material)))
+        source_material= ".".join(source_material.split(".")[:-1])
+    elif source_material.endswith("bz2"):
+        host.run('bunzip2 "%s"' % (sh_escape(source_material)))
+        source_material= ".".join(source_material.split(".")[:-1])
 
-	# untar
-	if source_material.endswith(".tar"):
-		retval= host.run('tar -C "%s" -xvf "%s"' % (
-			sh_escape(os.path.dirname(source_material)),
-			sh_escape(source_material),))
-		source_material= os.path.join(os.path.dirname(source_material), 
-			retval.stdout.split()[0])
+    # untar
+    if source_material.endswith(".tar"):
+        retval= host.run('tar -C "%s" -xvf "%s"' % (
+                sh_escape(os.path.dirname(source_material)),
+                sh_escape(source_material),))
+        source_material= os.path.join(os.path.dirname(source_material),
+                retval.stdout.split()[0])
 
-	return source_material
+    return source_material
 
 
 def get_server_dir():
-	path = os.path.dirname(sys.modules['autotest_lib.server.utils'].__file__)
-	return os.path.abspath(path)
+    path = os.path.dirname(sys.modules['autotest_lib.server.utils'].__file__)
+    return os.path.abspath(path)
 
 
 def find_pid(command):
-	for line in utils.system_output('ps -eo pid,cmd').rstrip().split('\n'):
-		(pid, cmd) = line.split(None, 1)
-		if re.search(command, cmd):
-			return int(pid)
-	return None
+    for line in utils.system_output('ps -eo pid,cmd').rstrip().split('\n'):
+        (pid, cmd) = line.split(None, 1)
+        if re.search(command, cmd):
+            return int(pid)
+    return None
 
 
 def nohup(command, stdout='/dev/null', stderr='/dev/null', background=True,
-								env = {}):
-	cmd = ' '.join(key+'='+val for key, val in env.iteritems())
-	cmd += ' nohup ' + command
-	cmd += ' > %s' % stdout
-	if stdout == stderr:
-		cmd += ' 2>&1'
-	else:
-		cmd += ' 2> %s' % stderr
-	if background:
-		cmd += ' &'
-	utils.system(cmd)
+                                                                env = {}):
+    cmd = ' '.join(key+'='+val for key, val in env.iteritems())
+    cmd += ' nohup ' + command
+    cmd += ' > %s' % stdout
+    if stdout == stderr:
+        cmd += ' 2>&1'
+    else:
+        cmd += ' 2> %s' % stderr
+    if background:
+        cmd += ' &'
+    utils.system(cmd)
 
 
 def default_mappings(machines):
-	"""
-	Returns a simple mapping in which all machines are assigned to the
-	same key.  Provides the default behavior for 
-	form_ntuples_from_machines. """
-	mappings = {}
-	failures = []
-	
-	mach = machines[0]
-	mappings['ident'] = [mach]
-	if len(machines) > 1:	
-		machines = machines[1:]
-		for machine in machines:
-			mappings['ident'].append(machine)
-		
-	return (mappings, failures)
+    """
+    Returns a simple mapping in which all machines are assigned to the
+    same key.  Provides the default behavior for
+    form_ntuples_from_machines. """
+    mappings = {}
+    failures = []
+
+    mach = machines[0]
+    mappings['ident'] = [mach]
+    if len(machines) > 1:
+        machines = machines[1:]
+        for machine in machines:
+            mappings['ident'].append(machine)
+
+    return (mappings, failures)
 
 
 def form_ntuples_from_machines(machines, n=2, mapping_func=default_mappings):
-	"""Returns a set of ntuples from machines where the machines in an
-	   ntuple are in the same mapping, and a set of failures which are
-	   (machine name, reason) tuples."""
-	ntuples = []
-	(mappings, failures) = mapping_func(machines)
-	
-	# now run through the mappings and create n-tuples.
-	# throw out the odd guys out
-	for key in mappings:
-		key_machines = mappings[key]
-		total_machines = len(key_machines)
+    """Returns a set of ntuples from machines where the machines in an
+       ntuple are in the same mapping, and a set of failures which are
+       (machine name, reason) tuples."""
+    ntuples = []
+    (mappings, failures) = mapping_func(machines)
 
-		# form n-tuples 
-		while len(key_machines) >= n:
-			ntuples.append(key_machines[0:n])
-			key_machines = key_machines[n:]
+    # now run through the mappings and create n-tuples.
+    # throw out the odd guys out
+    for key in mappings:
+        key_machines = mappings[key]
+        total_machines = len(key_machines)
 
-		for mach in key_machines:
-			failures.append((mach, "machine can not be tupled"))
+        # form n-tuples
+        while len(key_machines) >= n:
+            ntuples.append(key_machines[0:n])
+            key_machines = key_machines[n:]
 
-	return (ntuples, failures)
+        for mach in key_machines:
+            failures.append((mach, "machine can not be tupled"))
+
+    return (ntuples, failures)
 
 
 def parse_machine(machine, user = 'root', port = 22, password = ''):
-	"""
-	Parse the machine string user:pass@host:port and return it separately,
-	if the machine string is not complete, use the default parameters
-	when appropriate.
-	"""
+    """
+    Parse the machine string user:pass@host:port and return it separately,
+    if the machine string is not complete, use the default parameters
+    when appropriate.
+    """
 
-	user = user
-	port = port
-	password = password
+    user = user
+    port = port
+    password = password
 
-	if re.search('@', machine):
-		machine = machine.split('@')
+    if re.search('@', machine):
+        machine = machine.split('@')
 
-		if re.search(':', machine[0]):
-			machine[0] = machine[0].split(':')
-			user = machine[0][0]
-			password = machine[0][1]
+        if re.search(':', machine[0]):
+            machine[0] = machine[0].split(':')
+            user = machine[0][0]
+            password = machine[0][1]
 
-		else:
-			user = machine[0]
+        else:
+            user = machine[0]
 
-		if re.search(':', machine[1]):
-			machine[1] = machine[1].split(':')
-			hostname = machine[1][0]
-			port = int(machine[1][1])
+        if re.search(':', machine[1]):
+            machine[1] = machine[1].split(':')
+            hostname = machine[1][0]
+            port = int(machine[1][1])
 
-		else:
-			hostname = machine[1]
+        else:
+            hostname = machine[1]
 
-	elif re.search(':', machine):
-		machine = machine.split(':')
-		hostname = machine[0]
-		port = int(machine[1])
+    elif re.search(':', machine):
+        machine = machine.split(':')
+        hostname = machine[0]
+        port = int(machine[1])
 
-	else:
-		hostname = machine
+    else:
+        hostname = machine
 
-	return hostname, user, password, port
+    return hostname, user, password, port
 
 
 def get_public_key():
-	"""
-	Return a valid string ssh public key for the user executing autoserv or
-	autotest. If there's no DSA or RSA public key, create a DSA keypair with
-	ssh-keygen and return it.
-	"""
+    """
+    Return a valid string ssh public key for the user executing autoserv or
+    autotest. If there's no DSA or RSA public key, create a DSA keypair with
+    ssh-keygen and return it.
+    """
 
-	ssh_conf_path = os.path.join(os.environ['HOME'], '.ssh')
+    ssh_conf_path = os.path.join(os.environ['HOME'], '.ssh')
 
-	dsa_public_key_path = os.path.join(ssh_conf_path, 'id_dsa.pub')
-	dsa_private_key_path = os.path.join(ssh_conf_path, 'id_dsa')
+    dsa_public_key_path = os.path.join(ssh_conf_path, 'id_dsa.pub')
+    dsa_private_key_path = os.path.join(ssh_conf_path, 'id_dsa')
 
-	rsa_public_key_path = os.path.join(ssh_conf_path, 'id_rsa.pub')
-	rsa_private_key_path = os.path.join(ssh_conf_path, 'id_rsa')
+    rsa_public_key_path = os.path.join(ssh_conf_path, 'id_rsa.pub')
+    rsa_private_key_path = os.path.join(ssh_conf_path, 'id_rsa')
 
-	has_dsa_keypair = os.path.isfile(dsa_public_key_path) and \
-	    os.path.isfile(dsa_private_key_path)
-	has_rsa_keypair = os.path.isfile(rsa_public_key_path) and \
-	    os.path.isfile(rsa_private_key_path)
+    has_dsa_keypair = os.path.isfile(dsa_public_key_path) and \
+        os.path.isfile(dsa_private_key_path)
+    has_rsa_keypair = os.path.isfile(rsa_public_key_path) and \
+        os.path.isfile(rsa_private_key_path)
 
-	if has_dsa_keypair:
-		print 'DSA keypair found, using it'
-		public_key_path = dsa_public_key_path
+    if has_dsa_keypair:
+        print 'DSA keypair found, using it'
+        public_key_path = dsa_public_key_path
 
-	elif has_rsa_keypair:
-		print 'RSA keypair found, using it'
-		public_key_path = rsa_public_key_path
+    elif has_rsa_keypair:
+        print 'RSA keypair found, using it'
+        public_key_path = rsa_public_key_path
 
-	else:
-		print 'Neither RSA nor DSA keypair found, creating DSA ssh key pair'
-		system('ssh-keygen -t dsa -q -N "" -f %s' % dsa_private_key_path)
-		public_key_path = dsa_public_key_path
+    else:
+        print 'Neither RSA nor DSA keypair found, creating DSA ssh key pair'
+        system('ssh-keygen -t dsa -q -N "" -f %s' % dsa_private_key_path)
+        public_key_path = dsa_public_key_path
 
-	public_key = open(public_key_path, 'r')
-	public_key_str = public_key.read()
-	public_key.close()
+    public_key = open(public_key_path, 'r')
+    public_key_str = public_key.read()
+    public_key.close()
 
-	return public_key_str
+    return public_key_str
diff --git a/server/utils_unittest.py b/server/utils_unittest.py
index d28ec16..002164e 100644
--- a/server/utils_unittest.py
+++ b/server/utils_unittest.py
@@ -8,23 +8,23 @@
 
 
 class UtilsTest(unittest.TestCase):
-	
-	def setUp(self):
-		# define out machines here
-		self.machines = ['mach1', 'mach2', 'mach3', 'mach4', 'mach5', 
-					'mach6', 'mach7']
-		
-		self.ntuples = [['mach1', 'mach2'], ['mach3', 'mach4'],
-				['mach5', 'mach6']]
-		self.failures = []
-		self.failures.append(('mach7', "machine can not be tupled"))
+
+    def setUp(self):
+        # define out machines here
+        self.machines = ['mach1', 'mach2', 'mach3', 'mach4', 'mach5',
+                                'mach6', 'mach7']
+
+        self.ntuples = [['mach1', 'mach2'], ['mach3', 'mach4'],
+                        ['mach5', 'mach6']]
+        self.failures = []
+        self.failures.append(('mach7', "machine can not be tupled"))
 
 
-	def test_form_cell_mappings(self):
-		(ntuples, failures) = utils.form_ntuples_from_machines(self.machines)
-		self.assertEquals(self.ntuples, ntuples)
-		self.assertEquals(self.failures, failures)
+    def test_form_cell_mappings(self):
+        (ntuples, failures) = utils.form_ntuples_from_machines(self.machines)
+        self.assertEquals(self.ntuples, ntuples)
+        self.assertEquals(self.failures, failures)
 
 
 if __name__ == "__main__":
-	unittest.main()
+    unittest.main()
diff --git a/server/warning_monitor.py b/server/warning_monitor.py
index 6cd15c7..3b216b9 100644
--- a/server/warning_monitor.py
+++ b/server/warning_monitor.py
@@ -8,11 +8,11 @@
 # the format for a warning used here is:
 #   <timestamp (integer)> <tab> <status (string)> <newline>
 def make_alert(msg):
-	def alert(*params):
-		formatted_msg = msg % params
-	        timestamped_msg = "%d\t%s" % (time.time(), formatted_msg)
-		print >> warnfile, timestamped_msg
-	return alert
+    def alert(*params):
+        formatted_msg = msg % params
+        timestamped_msg = "%d\t%s" % (time.time(), formatted_msg)
+        print >> warnfile, timestamped_msg
+    return alert
 
 
 pattern_file = os.path.join(os.path.dirname(__file__), 'warning_patterns')
@@ -28,15 +28,15 @@
 
 # assert that the patterns are separated by empty lines
 if sum(len(line.strip()) for line in pattern_lines[2::3]) > 0:
-	raise ValueError('warning patterns are not separated by blank lines')
+    raise ValueError('warning patterns are not separated by blank lines')
 
 hooks = [(re.compile(regex.rstrip('\n')), make_alert(alert.rstrip('\n')))
-	 for regex, alert in patterns]
+         for regex, alert in patterns]
 
 while True:
-	line = sys.stdin.readline()
-	logfile.write(line)
-	for regex, callback in hooks:
-		match = re.match(regex, line.strip())
-		if match:
-			callback(*match.groups())
+    line = sys.stdin.readline()
+    logfile.write(line)
+    for regex, callback in hooks:
+        match = re.match(regex, line.strip())
+        if match:
+            callback(*match.groups())