Convert all python code to use four-space indents instead of eight-space tabs.
Signed-off-by: John Admanski <[email protected]>
git-svn-id: http://test.kernel.org/svn/autotest/trunk@1658 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/client/bin/autotest b/client/bin/autotest
index 9d3a968..34c1e26 100755
--- a/client/bin/autotest
+++ b/client/bin/autotest
@@ -28,27 +28,27 @@
parser = OptionParser()
parser.add_option("-c", "--continue", dest="cont", action="store_true",
- default=False, help="continue previously started job")
+ default=False, help="continue previously started job")
parser.add_option("-t", "--tag", dest="tag", type="string", default="default",
- help="set the job tag")
+ help="set the job tag")
parser.add_option("-H", "--harness", dest="harness", type="string", default='',
- help="set the harness type")
+ help="set the harness type")
parser.add_option("-l", "--external_logging", dest="log", action="store_true",
- default=False, help="enable external logging")
+ default=False, help="enable external logging")
def usage():
- parser.print_help()
- sys.exit(1)
+ parser.print_help()
+ sys.exit(1)
options, args = parser.parse_args()
# Check for a control file.
if len(args) != 1:
- usage()
+ usage()
# JOB: run the specified job control file.
job.runjob(os.path.abspath(args[0]), options.cont, options.tag, options.harness,
- options.log)
+ options.log)
diff --git a/client/bin/autotest.py b/client/bin/autotest.py
index 9d4faa7..7695892 100755
--- a/client/bin/autotest.py
+++ b/client/bin/autotest.py
@@ -1,17 +1,17 @@
import os, sys
class system:
- def __init__(self):
- self.autodir = os.environ['AUTODIR']
- self.resultdir = self.autodir + '/results'
- self.tmpdir = self.autodir + '/tmp'
+ def __init__(self):
+ self.autodir = os.environ['AUTODIR']
+ self.resultdir = self.autodir + '/results'
+ self.tmpdir = self.autodir + '/tmp'
- if not os.path.isdir(self.resultdir):
- os.mkdir(self.resultdir)
- if not os.path.isdir(self.tmpdir):
- os.mkdir(self.tmpdir)
- return None
+ if not os.path.isdir(self.resultdir):
+ os.mkdir(self.resultdir)
+ if not os.path.isdir(self.tmpdir):
+ os.mkdir(self.tmpdir)
+ return None
- def boot(self, tag=None):
- print "I OUGHT TO REBOOT NOW!"
+ def boot(self, tag=None):
+ print "I OUGHT TO REBOOT NOW!"
diff --git a/client/bin/autotest_client b/client/bin/autotest_client
index 762d0cb..1e275a4 100755
--- a/client/bin/autotest_client
+++ b/client/bin/autotest_client
@@ -17,11 +17,11 @@
# If we're using cpusets, run inside the root one by default
if os.path.exists("/dev/cpuset/tasks") and getpass.getuser() == "root":
- utils.write_one_line("/dev/cpuset/tasks", str(os.getpid()))
+ utils.write_one_line("/dev/cpuset/tasks", str(os.getpid()))
autodir = os.path.dirname(sys.argv[0])
autotest = os.path.join(autodir, 'autotest')
cmd = ' '.join([autotest, '-H simple'] + sys.argv[1:])
exit_code = subprocess.call(cmd, shell=True, stderr=subprocess.STDOUT,
- close_fds=False)
+ close_fds=False)
sys.exit(exit_code) # pass on the exit status from autotest
diff --git a/client/bin/autotest_utils.py b/client/bin/autotest_utils.py
index 1e6c725..f6eb86e 100755
--- a/client/bin/autotest_utils.py
+++ b/client/bin/autotest_utils.py
@@ -8,593 +8,593 @@
def grep(pattern, file):
- """
- This is mainly to fix the return code inversion from grep
- Also handles compressed files.
+ """
+ This is mainly to fix the return code inversion from grep
+ Also handles compressed files.
- returns 1 if the pattern is present in the file, 0 if not.
- """
- command = 'grep "%s" > /dev/null' % pattern
- ret = cat_file_to_cmd(file, command, ignore_status=True)
- return not ret
+ returns 1 if the pattern is present in the file, 0 if not.
+ """
+ command = 'grep "%s" > /dev/null' % pattern
+ ret = cat_file_to_cmd(file, command, ignore_status=True)
+ return not ret
def difflist(list1, list2):
- """returns items in list2 that are not in list1"""
- diff = [];
- for x in list2:
- if x not in list1:
- diff.append(x)
- return diff
+ """returns items in list2 that are not in list1"""
+ diff = [];
+ for x in list2:
+ if x not in list1:
+ diff.append(x)
+ return diff
def cat_file_to_cmd(file, command, ignore_status=0, return_output=False):
- """
- equivalent to 'cat file | command' but knows to use
- zcat or bzcat if appropriate
- """
- if return_output:
- run_cmd = utils.system_output
- else:
- run_cmd = utils.system
+ """
+ equivalent to 'cat file | command' but knows to use
+ zcat or bzcat if appropriate
+ """
+ if return_output:
+ run_cmd = utils.system_output
+ else:
+ run_cmd = utils.system
- if not os.path.isfile(file):
- raise NameError('invalid file %s to cat to command %s'
- % (file, command))
- if file.endswith('.bz2'):
- return run_cmd('bzcat ' + file + ' | ' + command, ignore_status)
- elif (file.endswith('.gz') or file.endswith('.tgz')):
- return run_cmd('zcat ' + file + ' | ' + command, ignore_status)
- else:
- return run_cmd('cat ' + file + ' | ' + command, ignore_status)
+ if not os.path.isfile(file):
+ raise NameError('invalid file %s to cat to command %s'
+ % (file, command))
+ if file.endswith('.bz2'):
+ return run_cmd('bzcat ' + file + ' | ' + command, ignore_status)
+ elif (file.endswith('.gz') or file.endswith('.tgz')):
+ return run_cmd('zcat ' + file + ' | ' + command, ignore_status)
+ else:
+ return run_cmd('cat ' + file + ' | ' + command, ignore_status)
def extract_tarball_to_dir(tarball, dir):
- """
- Extract a tarball to a specified directory name instead of whatever
- the top level of a tarball is - useful for versioned directory names, etc
- """
- if os.path.exists(dir):
- raise NameError, 'target %s already exists' % dir
- pwd = os.getcwd()
- os.chdir(os.path.dirname(os.path.abspath(dir)))
- newdir = extract_tarball(tarball)
- os.rename(newdir, dir)
- os.chdir(pwd)
+ """
+ Extract a tarball to a specified directory name instead of whatever
+ the top level of a tarball is - useful for versioned directory names, etc
+ """
+ if os.path.exists(dir):
+ raise NameError, 'target %s already exists' % dir
+ pwd = os.getcwd()
+ os.chdir(os.path.dirname(os.path.abspath(dir)))
+ newdir = extract_tarball(tarball)
+ os.rename(newdir, dir)
+ os.chdir(pwd)
def extract_tarball(tarball):
- """Returns the directory extracted by the tarball."""
- extracted = cat_file_to_cmd(tarball, 'tar xvf - 2>/dev/null',
- return_output=True).splitlines()
+ """Returns the directory extracted by the tarball."""
+ extracted = cat_file_to_cmd(tarball, 'tar xvf - 2>/dev/null',
+ return_output=True).splitlines()
- dir = None
+ dir = None
- for line in extracted:
- line = re.sub(r'^./', '', line)
- if not line or line == '.':
- continue
- topdir = line.split('/')[0]
- if os.path.isdir(topdir):
- if dir:
- assert(dir == topdir)
- else:
- dir = topdir
- if dir:
- return dir
- else:
- raise NameError('extracting tarball produced no dir')
+ for line in extracted:
+ line = re.sub(r'^./', '', line)
+ if not line or line == '.':
+ continue
+ topdir = line.split('/')[0]
+ if os.path.isdir(topdir):
+ if dir:
+ assert(dir == topdir)
+ else:
+ dir = topdir
+ if dir:
+ return dir
+ else:
+ raise NameError('extracting tarball produced no dir')
def get_md5sum(file_path):
- """Gets the md5sum of a file. You must provide a valid path to the file"""
- if not os.path.isfile(file_path):
- raise ValueError, 'invalid file %s to verify' % file_path
- return utils.system_output("md5sum " + file_path + " | awk '{print $1}'")
+ """Gets the md5sum of a file. You must provide a valid path to the file"""
+ if not os.path.isfile(file_path):
+ raise ValueError, 'invalid file %s to verify' % file_path
+ return utils.system_output("md5sum " + file_path + " | awk '{print $1}'")
def unmap_url_cache(cachedir, url, expected_md5):
- """\
- Downloads a file from a URL to a cache directory. If the file is already
- at the expected position and has the expected md5 number, let's not
- download it again.
- """
- # Let's convert cachedir to a canonical path, if it's not already
- cachedir = os.path.realpath(cachedir)
- if not os.path.isdir(cachedir):
- try:
- system('mkdir -p ' + cachedir)
- except:
- raise ValueError('Could not create cache directory %s' % cachedir)
- file_from_url = os.path.basename(url)
- file_local_path = os.path.join(cachedir, file_from_url)
- if os.path.isfile(file_local_path):
- file_md5 = get_md5sum(file_local_path)
- if file_md5 == expected_md5:
- # File is already at the expected position and ready to go
- src = file_from_url
- else:
- # Let's download the package again, it's corrupted...
- src = url
- else:
- # File is not there, let's download it
- src = url
- return utils.unmap_url(cachedir, src, cachedir)
+ """\
+ Downloads a file from a URL to a cache directory. If the file is already
+ at the expected position and has the expected md5 number, let's not
+ download it again.
+ """
+ # Let's convert cachedir to a canonical path, if it's not already
+ cachedir = os.path.realpath(cachedir)
+ if not os.path.isdir(cachedir):
+ try:
+ system('mkdir -p ' + cachedir)
+ except:
+ raise ValueError('Could not create cache directory %s' % cachedir)
+ file_from_url = os.path.basename(url)
+ file_local_path = os.path.join(cachedir, file_from_url)
+ if os.path.isfile(file_local_path):
+ file_md5 = get_md5sum(file_local_path)
+ if file_md5 == expected_md5:
+ # File is already at the expected position and ready to go
+ src = file_from_url
+ else:
+ # Let's download the package again, it's corrupted...
+ src = url
+ else:
+ # File is not there, let's download it
+ src = url
+ return utils.unmap_url(cachedir, src, cachedir)
def basename(path):
- i = path.rfind('/');
- return path[i+1:]
+ i = path.rfind('/');
+ return path[i+1:]
def force_copy(src, dest):
- """Replace dest with a new copy of src, even if it exists"""
- if os.path.isfile(dest):
- os.remove(dest)
- if os.path.isdir(dest):
- dest = os.path.join(dest, os.path.basename(src))
- shutil.copyfile(src, dest)
- return dest
+ """Replace dest with a new copy of src, even if it exists"""
+ if os.path.isfile(dest):
+ os.remove(dest)
+ if os.path.isdir(dest):
+ dest = os.path.join(dest, os.path.basename(src))
+ shutil.copyfile(src, dest)
+ return dest
def force_link(src, dest):
- """Link src to dest, overwriting it if it exists"""
- return utils.system("ln -sf %s %s" % (src, dest))
+ """Link src to dest, overwriting it if it exists"""
+ return utils.system("ln -sf %s %s" % (src, dest))
def file_contains_pattern(file, pattern):
- """Return true if file contains the specified egrep pattern"""
- if not os.path.isfile(file):
- raise NameError('file %s does not exist' % file)
- return not utils.system('egrep -q "' + pattern + '" ' + file, ignore_status=True)
+ """Return true if file contains the specified egrep pattern"""
+ if not os.path.isfile(file):
+ raise NameError('file %s does not exist' % file)
+ return not utils.system('egrep -q "' + pattern + '" ' + file, ignore_status=True)
def list_grep(list, pattern):
- """True if any item in list matches the specified pattern."""
- compiled = re.compile(pattern)
- for line in list:
- match = compiled.search(line)
- if (match):
- return 1
- return 0
+ """True if any item in list matches the specified pattern."""
+ compiled = re.compile(pattern)
+ for line in list:
+ match = compiled.search(line)
+ if (match):
+ return 1
+ return 0
def get_os_vendor():
- """Try to guess what's the os vendor
- """
- issue = '/etc/issue'
+ """Try to guess what's the os vendor
+ """
+ issue = '/etc/issue'
- if not os.path.isfile(issue):
- return 'Unknown'
+ if not os.path.isfile(issue):
+ return 'Unknown'
- if file_contains_pattern(issue, 'Red Hat'):
- return 'Red Hat'
- elif file_contains_pattern(issue, 'Fedora Core'):
- return 'Fedora Core'
- elif file_contains_pattern(issue, 'SUSE'):
- return 'SUSE'
- elif file_contains_pattern(issue, 'Ubuntu'):
- return 'Ubuntu'
- elif file_contains_pattern(issue, 'Debian'):
- return 'Debian'
- else:
- return 'Unknown'
+ if file_contains_pattern(issue, 'Red Hat'):
+ return 'Red Hat'
+ elif file_contains_pattern(issue, 'Fedora Core'):
+ return 'Fedora Core'
+ elif file_contains_pattern(issue, 'SUSE'):
+ return 'SUSE'
+ elif file_contains_pattern(issue, 'Ubuntu'):
+ return 'Ubuntu'
+ elif file_contains_pattern(issue, 'Debian'):
+ return 'Debian'
+ else:
+ return 'Unknown'
def get_vmlinux():
- """Return the full path to vmlinux
+ """Return the full path to vmlinux
- Ahem. This is crap. Pray harder. Bad Martin.
- """
- vmlinux = '/boot/vmlinux-%s' % utils.system_output('uname -r')
- if os.path.isfile(vmlinux):
- return vmlinux
- vmlinux = '/lib/modules/%s/build/vmlinux' % utils.system_output('uname -r')
- if os.path.isfile(vmlinux):
- return vmlinux
- return None
+ Ahem. This is crap. Pray harder. Bad Martin.
+ """
+ vmlinux = '/boot/vmlinux-%s' % utils.system_output('uname -r')
+ if os.path.isfile(vmlinux):
+ return vmlinux
+ vmlinux = '/lib/modules/%s/build/vmlinux' % utils.system_output('uname -r')
+ if os.path.isfile(vmlinux):
+ return vmlinux
+ return None
def get_systemmap():
- """Return the full path to System.map
+ """Return the full path to System.map
- Ahem. This is crap. Pray harder. Bad Martin.
- """
- map = '/boot/System.map-%s' % utils.system_output('uname -r')
- if os.path.isfile(map):
- return map
- map = '/lib/modules/%s/build/System.map' % utils.system_output('uname -r')
- if os.path.isfile(map):
- return map
- return None
+ Ahem. This is crap. Pray harder. Bad Martin.
+ """
+ map = '/boot/System.map-%s' % utils.system_output('uname -r')
+ if os.path.isfile(map):
+ return map
+ map = '/lib/modules/%s/build/System.map' % utils.system_output('uname -r')
+ if os.path.isfile(map):
+ return map
+ return None
def get_modules_dir():
- """Return the modules dir for the running kernel version"""
- kernel_version = utils.system_output('uname -r')
- return '/lib/modules/%s/kernel' % kernel_version
+ """Return the modules dir for the running kernel version"""
+ kernel_version = utils.system_output('uname -r')
+ return '/lib/modules/%s/kernel' % kernel_version
def get_cpu_arch():
- """Work out which CPU architecture we're running on"""
- f = open('/proc/cpuinfo', 'r')
- cpuinfo = f.readlines()
- f.close()
- if list_grep(cpuinfo, '^cpu.*(RS64|POWER3|Broadband Engine)'):
- return 'power'
- elif list_grep(cpuinfo, '^cpu.*POWER4'):
- return 'power4'
- elif list_grep(cpuinfo, '^cpu.*POWER5'):
- return 'power5'
- elif list_grep(cpuinfo, '^cpu.*POWER6'):
- return 'power6'
- elif list_grep(cpuinfo, '^cpu.*PPC970'):
- return 'power970'
- elif list_grep(cpuinfo, 'Opteron'):
- return 'x86_64'
- elif list_grep(cpuinfo, 'GenuineIntel') and list_grep(cpuinfo, '48 bits virtual'):
- return 'x86_64'
- else:
- return 'i386'
+ """Work out which CPU architecture we're running on"""
+ f = open('/proc/cpuinfo', 'r')
+ cpuinfo = f.readlines()
+ f.close()
+ if list_grep(cpuinfo, '^cpu.*(RS64|POWER3|Broadband Engine)'):
+ return 'power'
+ elif list_grep(cpuinfo, '^cpu.*POWER4'):
+ return 'power4'
+ elif list_grep(cpuinfo, '^cpu.*POWER5'):
+ return 'power5'
+ elif list_grep(cpuinfo, '^cpu.*POWER6'):
+ return 'power6'
+ elif list_grep(cpuinfo, '^cpu.*PPC970'):
+ return 'power970'
+ elif list_grep(cpuinfo, 'Opteron'):
+ return 'x86_64'
+ elif list_grep(cpuinfo, 'GenuineIntel') and list_grep(cpuinfo, '48 bits virtual'):
+ return 'x86_64'
+ else:
+ return 'i386'
def get_current_kernel_arch():
- """Get the machine architecture, now just a wrap of 'uname -m'."""
- return os.popen('uname -m').read().rstrip()
+ """Get the machine architecture, now just a wrap of 'uname -m'."""
+ return os.popen('uname -m').read().rstrip()
def get_file_arch(filename):
- # -L means follow symlinks
- file_data = utils.system_output('file -L ' + filename)
- if file_data.count('80386'):
- return 'i386'
- return None
+ # -L means follow symlinks
+ file_data = utils.system_output('file -L ' + filename)
+ if file_data.count('80386'):
+ return 'i386'
+ return None
def count_cpus():
- """number of CPUs in the local machine according to /proc/cpuinfo"""
- f = file('/proc/cpuinfo', 'r')
- cpus = 0
- for line in f.readlines():
- if line.startswith('processor'):
- cpus += 1
- return cpus
+ """number of CPUs in the local machine according to /proc/cpuinfo"""
+ f = file('/proc/cpuinfo', 'r')
+ cpus = 0
+ for line in f.readlines():
+ if line.startswith('processor'):
+ cpus += 1
+ return cpus
# Returns total memory in kb
def read_from_meminfo(key):
- meminfo = utils.system_output('grep %s /proc/meminfo' % key)
- return int(re.search(r'\d+', meminfo).group(0))
+ meminfo = utils.system_output('grep %s /proc/meminfo' % key)
+ return int(re.search(r'\d+', meminfo).group(0))
def memtotal():
- return read_from_meminfo('MemTotal')
+ return read_from_meminfo('MemTotal')
def freememtotal():
- return read_from_meminfo('MemFree')
+ return read_from_meminfo('MemFree')
def sysctl_kernel(key, value=None):
- """(Very) partial implementation of sysctl, for kernel params"""
- if value:
- # write
- utils.write_one_line('/proc/sys/kernel/%s' % key, str(value))
- else:
- # read
- out = utils.read_one_line('/proc/sys/kernel/%s' % key)
- return int(re.search(r'\d+', out).group(0))
+ """(Very) partial implementation of sysctl, for kernel params"""
+ if value:
+ # write
+ utils.write_one_line('/proc/sys/kernel/%s' % key, str(value))
+ else:
+ # read
+ out = utils.read_one_line('/proc/sys/kernel/%s' % key)
+ return int(re.search(r'\d+', out).group(0))
def _convert_exit_status(sts):
- if os.WIFSIGNALED(sts):
- return -os.WTERMSIG(sts)
- elif os.WIFEXITED(sts):
- return os.WEXITSTATUS(sts)
- else:
- # impossible?
- raise RuntimeError("Unknown exit status %d!" % sts)
+ if os.WIFSIGNALED(sts):
+ return -os.WTERMSIG(sts)
+ elif os.WIFEXITED(sts):
+ return os.WEXITSTATUS(sts)
+ else:
+ # impossible?
+ raise RuntimeError("Unknown exit status %d!" % sts)
def where_art_thy_filehandles():
- """Dump the current list of filehandles"""
- os.system("ls -l /proc/%d/fd >> /dev/tty" % os.getpid())
+ """Dump the current list of filehandles"""
+ os.system("ls -l /proc/%d/fd >> /dev/tty" % os.getpid())
def print_to_tty(string):
- """Output string straight to the tty"""
- open('/dev/tty', 'w').write(string + '\n')
+ """Output string straight to the tty"""
+ open('/dev/tty', 'w').write(string + '\n')
def dump_object(object):
- """Dump an object's attributes and methods
+ """Dump an object's attributes and methods
- kind of like dir()
- """
- for item in object.__dict__.iteritems():
- print item
- try:
- (key,value) = item
- dump_object(value)
- except:
- continue
+ kind of like dir()
+ """
+ for item in object.__dict__.iteritems():
+ print item
+ try:
+ (key,value) = item
+ dump_object(value)
+ except:
+ continue
def environ(env_key):
- """return the requested environment variable, or '' if unset"""
- if (os.environ.has_key(env_key)):
- return os.environ[env_key]
- else:
- return ''
+ """return the requested environment variable, or '' if unset"""
+ if (os.environ.has_key(env_key)):
+ return os.environ[env_key]
+ else:
+ return ''
def prepend_path(newpath, oldpath):
- """prepend newpath to oldpath"""
- if (oldpath):
- return newpath + ':' + oldpath
- else:
- return newpath
+ """prepend newpath to oldpath"""
+ if (oldpath):
+ return newpath + ':' + oldpath
+ else:
+ return newpath
def append_path(oldpath, newpath):
- """append newpath to oldpath"""
- if (oldpath):
- return oldpath + ':' + newpath
- else:
- return newpath
+ """append newpath to oldpath"""
+ if (oldpath):
+ return oldpath + ':' + newpath
+ else:
+ return newpath
def avgtime_print(dir):
- """ Calculate some benchmarking statistics.
- Input is a directory containing a file called 'time'.
- File contains one-per-line results of /usr/bin/time.
- Output is average Elapsed, User, and System time in seconds,
- and average CPU percentage.
- """
- f = open(dir + "/time")
- user = system = elapsed = cpu = count = 0
- r = re.compile('([\d\.]*)user ([\d\.]*)system (\d*):([\d\.]*)elapsed (\d*)%CPU')
- for line in f.readlines():
- try:
- s = r.match(line);
- user += float(s.group(1))
- system += float(s.group(2))
- elapsed += (float(s.group(3)) * 60) + float(s.group(4))
- cpu += float(s.group(5))
- count += 1
- except:
- raise ValueError("badly formatted times")
+ """ Calculate some benchmarking statistics.
+ Input is a directory containing a file called 'time'.
+ File contains one-per-line results of /usr/bin/time.
+ Output is average Elapsed, User, and System time in seconds,
+ and average CPU percentage.
+ """
+ f = open(dir + "/time")
+ user = system = elapsed = cpu = count = 0
+ r = re.compile('([\d\.]*)user ([\d\.]*)system (\d*):([\d\.]*)elapsed (\d*)%CPU')
+ for line in f.readlines():
+ try:
+ s = r.match(line);
+ user += float(s.group(1))
+ system += float(s.group(2))
+ elapsed += (float(s.group(3)) * 60) + float(s.group(4))
+ cpu += float(s.group(5))
+ count += 1
+ except:
+ raise ValueError("badly formatted times")
- f.close()
- return "Elapsed: %0.2fs User: %0.2fs System: %0.2fs CPU: %0.0f%%" % \
- (elapsed/count, user/count, system/count, cpu/count)
+ f.close()
+ return "Elapsed: %0.2fs User: %0.2fs System: %0.2fs CPU: %0.0f%%" % \
+ (elapsed/count, user/count, system/count, cpu/count)
def running_config():
- """
- Return path of config file of the currently running kernel
- """
- version = utils.system_output('uname -r')
- for config in ('/proc/config.gz', \
- '/boot/config-%s' % version,
- '/lib/modules/%s/build/.config' % version):
- if os.path.isfile(config):
- return config
- return None
+ """
+ Return path of config file of the currently running kernel
+ """
+ version = utils.system_output('uname -r')
+ for config in ('/proc/config.gz', \
+ '/boot/config-%s' % version,
+ '/lib/modules/%s/build/.config' % version):
+ if os.path.isfile(config):
+ return config
+ return None
def check_for_kernel_feature(feature):
- config = running_config()
+ config = running_config()
- if not config:
- raise TypeError("Can't find kernel config file")
+ if not config:
+ raise TypeError("Can't find kernel config file")
- if config.endswith('.gz'):
- grep = 'zgrep'
- else:
- grep = 'grep'
- grep += ' ^CONFIG_%s= %s' % (feature, config)
+ if config.endswith('.gz'):
+ grep = 'zgrep'
+ else:
+ grep = 'grep'
+ grep += ' ^CONFIG_%s= %s' % (feature, config)
- if not utils.system_output(grep, ignore_status=True):
- raise ValueError("Kernel doesn't have a %s feature" % (feature))
+ if not utils.system_output(grep, ignore_status=True):
+ raise ValueError("Kernel doesn't have a %s feature" % (feature))
def cpu_online_map():
- """
- Check out the available cpu online map
- """
- cpus = []
- for line in open('/proc/cpuinfo', 'r').readlines():
- if line.startswith('processor'):
- cpus.append(line.split()[2]) # grab cpu number
- return cpus
+ """
+ Check out the available cpu online map
+ """
+ cpus = []
+ for line in open('/proc/cpuinfo', 'r').readlines():
+ if line.startswith('processor'):
+ cpus.append(line.split()[2]) # grab cpu number
+ return cpus
def check_glibc_ver(ver):
- glibc_ver = commands.getoutput('ldd --version').splitlines()[0]
- glibc_ver = re.search(r'(\d+\.\d+(\.\d+)?)', glibc_ver).group()
- if glibc_ver.split('.') < ver.split('.'):
- raise error.TestError("Glibc is too old (%s). Glibc >= %s is needed." % \
- (glibc_ver, ver))
+ glibc_ver = commands.getoutput('ldd --version').splitlines()[0]
+ glibc_ver = re.search(r'(\d+\.\d+(\.\d+)?)', glibc_ver).group()
+ if glibc_ver.split('.') < ver.split('.'):
+ raise error.TestError("Glibc is too old (%s). Glibc >= %s is needed." % \
+ (glibc_ver, ver))
def check_kernel_ver(ver):
- kernel_ver = utils.system_output('uname -r')
- kv_tmp = re.split(r'[-]', kernel_ver)[0:3]
- if kv_tmp[0].split('.') < ver.split('.'):
- raise error.TestError("Kernel is too old (%s). Kernel > %s is needed." % \
- (kernel_ver, ver))
+ kernel_ver = utils.system_output('uname -r')
+ kv_tmp = re.split(r'[-]', kernel_ver)[0:3]
+ if kv_tmp[0].split('.') < ver.split('.'):
+ raise error.TestError("Kernel is too old (%s). Kernel > %s is needed." % \
+ (kernel_ver, ver))
def human_format(number):
- # Convert number to kilo / mega / giga format.
- if number < 1024:
- return "%d" % number
- kilo = float(number) / 1024.0
- if kilo < 1024:
- return "%.2fk" % kilo
- meg = kilo / 1024.0
- if meg < 1024:
- return "%.2fM" % meg
- gig = meg / 1024.0
- return "%.2fG" % gig
+ # Convert number to kilo / mega / giga format.
+ if number < 1024:
+ return "%d" % number
+ kilo = float(number) / 1024.0
+ if kilo < 1024:
+ return "%.2fk" % kilo
+ meg = kilo / 1024.0
+ if meg < 1024:
+ return "%.2fM" % meg
+ gig = meg / 1024.0
+ return "%.2fG" % gig
def numa_nodes():
- node_paths = glob.glob('/sys/devices/system/node/node*')
- nodes = [int(re.sub(r'.*node(\d+)', r'\1', x)) for x in node_paths]
- return (sorted(nodes))
+ node_paths = glob.glob('/sys/devices/system/node/node*')
+ nodes = [int(re.sub(r'.*node(\d+)', r'\1', x)) for x in node_paths]
+ return (sorted(nodes))
def node_size():
- nodes = max(len(numa_nodes()), 1)
- return ((memtotal() * 1024) / nodes)
+ nodes = max(len(numa_nodes()), 1)
+ return ((memtotal() * 1024) / nodes)
def to_seconds(time_string):
- """Converts a string in M+:SS.SS format to S+.SS"""
- elts = time_string.split(':')
- if len(elts) == 1:
- return time_string
- return str(int(elts[0]) * 60 + float(elts[1]))
+ """Converts a string in M+:SS.SS format to S+.SS"""
+ elts = time_string.split(':')
+ if len(elts) == 1:
+ return time_string
+ return str(int(elts[0]) * 60 + float(elts[1]))
def extract_all_time_results(results_string):
- """Extract user, system, and elapsed times into a list of tuples"""
- pattern = re.compile(r"(.*?)user (.*?)system (.*?)elapsed")
- results = []
- for result in pattern.findall(results_string):
- results.append(tuple([to_seconds(elt) for elt in result]))
- return results
+ """Extract user, system, and elapsed times into a list of tuples"""
+ pattern = re.compile(r"(.*?)user (.*?)system (.*?)elapsed")
+ results = []
+ for result in pattern.findall(results_string):
+ results.append(tuple([to_seconds(elt) for elt in result]))
+ return results
def pickle_load(filename):
- return pickle.load(open(filename, 'r'))
+ return pickle.load(open(filename, 'r'))
# Return the kernel version and build timestamp.
def running_os_release():
- return os.uname()[2:4]
+ return os.uname()[2:4]
def running_os_ident():
- (version, timestamp) = running_os_release()
- return version + '::' + timestamp
+ (version, timestamp) = running_os_release()
+ return version + '::' + timestamp
# much like find . -name 'pattern'
def locate(pattern, root=os.getcwd()):
- for path, dirs, files in os.walk(root):
- for f in [os.path.abspath(os.path.join(path, f))
- for f in files if fnmatch.fnmatch(f, pattern)]:
- yield f
+ for path, dirs, files in os.walk(root):
+ for f in [os.path.abspath(os.path.join(path, f))
+ for f in files if fnmatch.fnmatch(f, pattern)]:
+ yield f
def freespace(path):
- """Return the disk free space, in bytes"""
- s = os.statvfs(path)
- return s.f_bavail * s.f_bsize
+ """Return the disk free space, in bytes"""
+ s = os.statvfs(path)
+ return s.f_bavail * s.f_bsize
def disk_block_size(path):
- """Return the disk block size, in bytes"""
- return os.statvfs(path).f_bsize
+ """Return the disk block size, in bytes"""
+ return os.statvfs(path).f_bsize
def get_cpu_family():
- procinfo = utils.system_output('cat /proc/cpuinfo')
- CPU_FAMILY_RE = re.compile(r'^cpu family\s+:\s+(\S+)', re.M)
- matches = CPU_FAMILY_RE.findall(procinfo)
- if matches:
- return int(matches[0])
- else:
- raise error.TestError('Could not get valid cpu family data')
+ procinfo = utils.system_output('cat /proc/cpuinfo')
+ CPU_FAMILY_RE = re.compile(r'^cpu family\s+:\s+(\S+)', re.M)
+ matches = CPU_FAMILY_RE.findall(procinfo)
+ if matches:
+ return int(matches[0])
+ else:
+ raise error.TestError('Could not get valid cpu family data')
def get_disks():
- df_output = utils.system_output('df')
- disk_re = re.compile(r'^(/dev/hd[a-z]+)3', re.M)
- return disk_re.findall(df_output)
+ df_output = utils.system_output('df')
+ disk_re = re.compile(r'^(/dev/hd[a-z]+)3', re.M)
+ return disk_re.findall(df_output)
def load_module(module_name):
- # Checks if a module has already been loaded
- if module_is_loaded(module_name):
- return False
+ # Checks if a module has already been loaded
+ if module_is_loaded(module_name):
+ return False
- utils.system('/sbin/modprobe ' + module_name)
- return True
+ utils.system('/sbin/modprobe ' + module_name)
+ return True
def unload_module(module_name):
- utils.system('/sbin/rmmod ' + module_name)
+ utils.system('/sbin/rmmod ' + module_name)
def module_is_loaded(module_name):
- module_name = module_name.replace('-', '_')
- modules = utils.system_output('/sbin/lsmod').splitlines()
- for module in modules:
- if module.startswith(module_name) and module[len(module_name)] == ' ':
- return True
- return False
+ module_name = module_name.replace('-', '_')
+ modules = utils.system_output('/sbin/lsmod').splitlines()
+ for module in modules:
+ if module.startswith(module_name) and module[len(module_name)] == ' ':
+ return True
+ return False
def get_loaded_modules():
- lsmod_output = utils.system_output('/sbin/lsmod').splitlines()[1:]
- return [line.split(None, 1)[0] for line in lsmod_output]
+ lsmod_output = utils.system_output('/sbin/lsmod').splitlines()[1:]
+ return [line.split(None, 1)[0] for line in lsmod_output]
def get_huge_page_size():
- output = utils.system_output('grep Hugepagesize /proc/meminfo')
- return int(output.split()[1]) # Assumes units always in kB. :(
+ output = utils.system_output('grep Hugepagesize /proc/meminfo')
+ return int(output.split()[1]) # Assumes units always in kB. :(
def get_num_huge_pages():
- raw_hugepages = utils.system_output('/sbin/sysctl vm.nr_hugepages')
- return int(raw_hugepages.split()[2])
+ raw_hugepages = utils.system_output('/sbin/sysctl vm.nr_hugepages')
+ return int(raw_hugepages.split()[2])
def set_num_huge_pages(num):
- utils.system('/sbin/sysctl vm.nr_hugepages=%d' % num)
+ utils.system('/sbin/sysctl vm.nr_hugepages=%d' % num)
def get_system_nodes():
- nodes = os.listdir('/sys/devices/system/node')
- nodes.sort()
- return nodes
+ nodes = os.listdir('/sys/devices/system/node')
+ nodes.sort()
+ return nodes
def get_cpu_vendor():
- cpuinfo = open('/proc/cpuinfo').read()
- vendors = re.findall(r'(?m)^vendor_id\s*:\s*(\S+)\s*$', cpuinfo)
- for i in xrange(1, len(vendors)):
- if vendors[i] != vendors[0]:
- raise error.TestError('multiple cpu vendors found: ' + str(vendors))
- return vendors[0]
+ cpuinfo = open('/proc/cpuinfo').read()
+ vendors = re.findall(r'(?m)^vendor_id\s*:\s*(\S+)\s*$', cpuinfo)
+ for i in xrange(1, len(vendors)):
+ if vendors[i] != vendors[0]:
+ raise error.TestError('multiple cpu vendors found: ' + str(vendors))
+ return vendors[0]
def probe_cpus():
- """
- This routine returns a list of cpu devices found under /sys/devices/system/cpu.
- """
- output = utils.system_output(
- 'find /sys/devices/system/cpu/ -maxdepth 1 -type d -name cpu*')
- return output.splitlines()
+ """
+ This routine returns a list of cpu devices found under /sys/devices/system/cpu.
+ """
+ output = utils.system_output(
+ 'find /sys/devices/system/cpu/ -maxdepth 1 -type d -name cpu*')
+ return output.splitlines()
def ping_default_gateway():
- """Ping the default gateway."""
-
- network = open('/etc/sysconfig/network')
- m = re.search('GATEWAY=(\S+)', network.read())
+ """Ping the default gateway."""
- if m:
- gw = m.group(1)
- cmd = 'ping %s -c 5 > /dev/null' % gw
- return utils.system(cmd, ignore_status=True)
-
- raise error.TestError('Unable to find default gateway')
+ network = open('/etc/sysconfig/network')
+ m = re.search('GATEWAY=(\S+)', network.read())
+
+ if m:
+ gw = m.group(1)
+ cmd = 'ping %s -c 5 > /dev/null' % gw
+ return utils.system(cmd, ignore_status=True)
+
+ raise error.TestError('Unable to find default gateway')
try:
- from site_utils import *
+ from site_utils import *
except ImportError:
- pass
+ pass
diff --git a/client/bin/boottool.py b/client/bin/boottool.py
index 43f7f39..7c10618 100644
--- a/client/bin/boottool.py
+++ b/client/bin/boottool.py
@@ -2,125 +2,124 @@
from autotest_lib.client.common_lib import utils, error
class boottool:
- def __init__(self, boottool_exec=None):
- #variable to indicate if in mode to write entries for Xen
- self.xen_mode = False
+ def __init__(self, boottool_exec=None):
+ #variable to indicate if in mode to write entries for Xen
+ self.xen_mode = False
- if boottool_exec:
- self.boottool_exec = boottool_exec
- else:
- autodir = os.environ['AUTODIR']
- self.boottool_exec = autodir + '/tools/boottool'
+ if boottool_exec:
+ self.boottool_exec = boottool_exec
+ else:
+ autodir = os.environ['AUTODIR']
+ self.boottool_exec = autodir + '/tools/boottool'
- if not self.boottool_exec:
- raise error.AutotestError('Failed to set boottool_exec')
+ if not self.boottool_exec:
+ raise error.AutotestError('Failed to set boottool_exec')
- def run_boottool(self, params):
- return utils.system_output('%s %s' % (self.boottool_exec, params))
+ def run_boottool(self, params):
+ return utils.system_output('%s %s' % (self.boottool_exec, params))
- def bootloader(self):
- return self.run_boottool('--bootloader-probe')
+ def bootloader(self):
+ return self.run_boottool('--bootloader-probe')
- def architecture(self):
- return self.run_boottool('--arch-probe')
+ def architecture(self):
+ return self.run_boottool('--arch-probe')
- def list_titles(self):
- print self.run_boottool('--info all | grep title')
+ def list_titles(self):
+ print self.run_boottool('--info all | grep title')
- def print_entry(self, index):
- print self.run_boottool('--info=%s' % index)
+ def print_entry(self, index):
+ print self.run_boottool('--info=%s' % index)
- def get_default(self):
- self.run_boottool('--default')
+ def get_default(self):
+ self.run_boottool('--default')
- def set_default(self, index):
- print self.run_boottool('--set-default=%s' % index)
+ def set_default(self, index):
+ print self.run_boottool('--set-default=%s' % index)
- def enable_xen_mode(self):
- self.xen_mode = True
+ def enable_xen_mode(self):
+ self.xen_mode = True
- def disable_xen_mode(self):
- self.xen_mode = False
+ def disable_xen_mode(self):
+ self.xen_mode = False
- def get_xen_mode(self):
- return self.xen_mode
+ def get_xen_mode(self):
+ return self.xen_mode
- # 'kernel' can be an position number or a title
- def add_args(self, kernel, args):
- parameters = '--update-kernel=%s --args="%s"' % (kernel, args)
+ # 'kernel' can be an position number or a title
+ def add_args(self, kernel, args):
+ parameters = '--update-kernel=%s --args="%s"' % (kernel, args)
- #add parameter if this is a Xen entry
- if self.xen_mode:
- parameters += ' --xen'
+ #add parameter if this is a Xen entry
+ if self.xen_mode:
+ parameters += ' --xen'
- print self.run_boottool(parameters)
+ print self.run_boottool(parameters)
- def add_xen_hypervisor_args(self, kernel, args):
- self.run_boottool('--xen --update-xenhyper=%s --xha="%s"') %(kernel, args)
-
-
- def remove_args(self, kernel, args):
- parameters = '--update-kernel=%s --remove-args=%s' % (kernel, args)
-
- #add parameter if this is a Xen entry
- if self.xen_mode:
- parameters += ' --xen'
-
- print self.run_boottool(parameters)
+ def add_xen_hypervisor_args(self, kernel, args):
+ self.run_boottool('--xen --update-xenhyper=%s --xha="%s"') %(kernel, args)
- def remove_xen_hypervisor_args(self, kernel, args):
- self.run_boottool('--xen --update-xenhyper=%s --remove-args="%s"') \
- % (kernel, args)
+ def remove_args(self, kernel, args):
+ parameters = '--update-kernel=%s --remove-args=%s' % (kernel, args)
+
+ #add parameter if this is a Xen entry
+ if self.xen_mode:
+ parameters += ' --xen'
+
+ print self.run_boottool(parameters)
- def add_kernel(self, path, title='autotest', initrd='', xen_hypervisor='', args=None, root=None, position='end'):
- parameters = '--add-kernel=%s --title=%s' % (path, title)
-
- # add an initrd now or forever hold your peace
- if initrd:
- parameters += ' --initrd=%s' % initrd
-
- # add parameter if this is a Xen entry
- if self.xen_mode:
- parameters += ' --xen'
- if xen_hypervisor:
- parameters += ' --xenhyper=%s' % xen_hypervisor
-
- if args:
- parameters += ' --args="%s"' % args
- if root:
- parameters += ' --root="%s"' % root
- if position:
- parameters += ' --position="%s"' % position
-
- print self.run_boottool(parameters)
+ def remove_xen_hypervisor_args(self, kernel, args):
+ self.run_boottool('--xen --update-xenhyper=%s --remove-args="%s"') \
+ % (kernel, args)
- def remove_kernel(self, kernel):
- print self.run_boottool('--remove-kernel=%s' % kernel)
+ def add_kernel(self, path, title='autotest', initrd='', xen_hypervisor='', args=None, root=None, position='end'):
+ parameters = '--add-kernel=%s --title=%s' % (path, title)
+
+ # add an initrd now or forever hold your peace
+ if initrd:
+ parameters += ' --initrd=%s' % initrd
+
+ # add parameter if this is a Xen entry
+ if self.xen_mode:
+ parameters += ' --xen'
+ if xen_hypervisor:
+ parameters += ' --xenhyper=%s' % xen_hypervisor
+
+ if args:
+ parameters += ' --args="%s"' % args
+ if root:
+ parameters += ' --root="%s"' % root
+ if position:
+ parameters += ' --position="%s"' % position
+
+ print self.run_boottool(parameters)
- def boot_once(self, title):
- print self.run_boottool('--boot-once --title=%s' % title)
+ def remove_kernel(self, kernel):
+ print self.run_boottool('--remove-kernel=%s' % kernel)
- def info(self, index):
- return self.run_boottool('--info=%s' % index)
+ def boot_once(self, title):
+ print self.run_boottool('--boot-once --title=%s' % title)
+
+
+ def info(self, index):
+ return self.run_boottool('--info=%s' % index)
# TODO: backup()
# TODO: set_timeout()
-
diff --git a/client/bin/common.py b/client/bin/common.py
index 74ed759..6881386 100644
--- a/client/bin/common.py
+++ b/client/bin/common.py
@@ -5,4 +5,4 @@
import setup_modules
sys.path.pop(0)
setup_modules.setup(base_path=client_dir,
- root_module_name="autotest_lib.client")
+ root_module_name="autotest_lib.client")
diff --git a/client/bin/config.py b/client/bin/config.py
index 59b8279..477bece 100644
--- a/client/bin/config.py
+++ b/client/bin/config.py
@@ -8,7 +8,7 @@
Please no StudlyCaps.
For example:
- boot.default_args
+ boot.default_args
"""
__author__ = """Copyright Andy Whitcroft 2006"""
@@ -16,33 +16,33 @@
import os
class config:
- """The BASIC job configuration
+ """The BASIC job configuration
- Properties:
- job
- The job object for this job
- config
- The job configuration dictionary
- """
+ Properties:
+ job
+ The job object for this job
+ config
+ The job configuration dictionary
+ """
- def __init__(self, job):
- """
- job
- The job object for this job
- """
- self.job = job
- self.config = {}
+ def __init__(self, job):
+ """
+ job
+ The job object for this job
+ """
+ self.job = job
+ self.config = {}
- def set(self, name, value):
- if name == "proxy":
- os.environ['http_proxy'] = value
- os.environ['ftp_proxy'] = value
+ def set(self, name, value):
+ if name == "proxy":
+ os.environ['http_proxy'] = value
+ os.environ['ftp_proxy'] = value
- self.config[name] = value
+ self.config[name] = value
- def get(self, name):
- if name in self.config:
- return self.config[name]
- else:
- return None
+ def get(self, name):
+ if name in self.config:
+ return self.config[name]
+ else:
+ return None
diff --git a/client/bin/cpuset.py b/client/bin/cpuset.py
index a870e76..fe583ac 100644
--- a/client/bin/cpuset.py
+++ b/client/bin/cpuset.py
@@ -9,265 +9,265 @@
# Convert '1-3,7,9-12' to [1,2,3,7,9,10,11,12]
def rangelist_to_list(rangelist):
- result = []
- if not rangelist:
- return result
- for x in rangelist.split(','):
- if re.match(r'^(\d+)$', x):
- result.append(int(x))
- continue
- m = re.match(r'^(\d+)-(\d+)$', x)
- if m:
- start = int(m.group(1))
- end = int(m.group(2))
- result += range(start, end+1)
- continue
- msg = 'Cannot understand data input: %s %s' % (x, rangelist)
- raise ValueError(msg)
- return result
+ result = []
+ if not rangelist:
+ return result
+ for x in rangelist.split(','):
+ if re.match(r'^(\d+)$', x):
+ result.append(int(x))
+ continue
+ m = re.match(r'^(\d+)-(\d+)$', x)
+ if m:
+ start = int(m.group(1))
+ end = int(m.group(2))
+ result += range(start, end+1)
+ continue
+ msg = 'Cannot understand data input: %s %s' % (x, rangelist)
+ raise ValueError(msg)
+ return result
def rounded_memtotal():
- # Get total of all physical mem, in Kbytes
- usable_Kbytes = autotest_utils.memtotal()
- # usable_Kbytes is system's usable DRAM in Kbytes,
- # as reported by memtotal() from device /proc/meminfo memtotal
- # after Linux deducts 1.5% to 5.1% for system table overhead
- # Undo the unknown actual deduction by rounding up
- # to next small multiple of a big power-of-two
- # eg 12GB - 5.1% gets rounded back up to 12GB
- mindeduct = 0.015 # 1.5 percent
- maxdeduct = 0.055 # 5.5 percent
- # deduction range 1.5% .. 5.5% supports physical mem sizes
- # 6GB .. 12GB in steps of .5GB
- # 12GB .. 24GB in steps of 1 GB
- # 24GB .. 48GB in steps of 2 GB ...
- # Finer granularity in physical mem sizes would require
- # tighter spread between min and max possible deductions
+ # Get total of all physical mem, in Kbytes
+ usable_Kbytes = autotest_utils.memtotal()
+ # usable_Kbytes is system's usable DRAM in Kbytes,
+ # as reported by memtotal() from device /proc/meminfo memtotal
+ # after Linux deducts 1.5% to 5.1% for system table overhead
+ # Undo the unknown actual deduction by rounding up
+ # to next small multiple of a big power-of-two
+ # eg 12GB - 5.1% gets rounded back up to 12GB
+ mindeduct = 0.015 # 1.5 percent
+ maxdeduct = 0.055 # 5.5 percent
+ # deduction range 1.5% .. 5.5% supports physical mem sizes
+ # 6GB .. 12GB in steps of .5GB
+ # 12GB .. 24GB in steps of 1 GB
+ # 24GB .. 48GB in steps of 2 GB ...
+ # Finer granularity in physical mem sizes would require
+ # tighter spread between min and max possible deductions
- # increase mem size by at least min deduction, without rounding
- min_Kbytes = int(usable_Kbytes / (1.0 - mindeduct))
- # increase mem size further by 2**n rounding, by 0..roundKb or more
- round_Kbytes = int(usable_Kbytes / (1.0 - maxdeduct)) - min_Kbytes
- # find least binary roundup 2**n that covers worst-cast roundKb
- mod2n = 1 << int(math.ceil(math.log(round_Kbytes, 2)))
- # have round_Kbytes <= mod2n < round_Kbytes*2
- # round min_Kbytes up to next multiple of mod2n
- phys_Kbytes = min_Kbytes + mod2n - 1
- phys_Kbytes = phys_Kbytes - (phys_Kbytes % mod2n) # clear low bits
- return phys_Kbytes
+ # increase mem size by at least min deduction, without rounding
+ min_Kbytes = int(usable_Kbytes / (1.0 - mindeduct))
+ # increase mem size further by 2**n rounding, by 0..roundKb or more
+ round_Kbytes = int(usable_Kbytes / (1.0 - maxdeduct)) - min_Kbytes
+ # find least binary roundup 2**n that covers worst-cast roundKb
+ mod2n = 1 << int(math.ceil(math.log(round_Kbytes, 2)))
+ # have round_Kbytes <= mod2n < round_Kbytes*2
+ # round min_Kbytes up to next multiple of mod2n
+ phys_Kbytes = min_Kbytes + mod2n - 1
+ phys_Kbytes = phys_Kbytes - (phys_Kbytes % mod2n) # clear low bits
+ return phys_Kbytes
def my_container_name():
- # Get current process's inherited or self-built container name
- # within /dev/cpuset. Is '/' for root container, '/sys', etc.
- return utils.read_one_line('/proc/%i/cpuset' % os.getpid())
+ # Get current process's inherited or self-built container name
+ # within /dev/cpuset. Is '/' for root container, '/sys', etc.
+ return utils.read_one_line('/proc/%i/cpuset' % os.getpid())
def get_mem_nodes(container_full_name):
- file_name = os.path.join(container_full_name, "mems")
- if os.path.exists(file_name):
- return rangelist_to_list(utils.read_one_line(file_name))
- else:
- return []
+ file_name = os.path.join(container_full_name, "mems")
+ if os.path.exists(file_name):
+ return rangelist_to_list(utils.read_one_line(file_name))
+ else:
+ return []
def available_exclusive_mem_nodes(parent_container):
- # Get list of numa memory nodes of parent container which could
- # be allocated exclusively to new child containers.
- # This excludes any nodes now allocated (exclusively or not)
- # to existing children.
- available = set(get_mem_nodes(parent_container))
- for child_container in glob.glob('%s/*/mems' % parent_container):
- child_container = os.path.dirname(child_container)
- busy = set(get_mem_nodes(child_container))
- available -= busy
- return list(available)
+ # Get list of numa memory nodes of parent container which could
+ # be allocated exclusively to new child containers.
+ # This excludes any nodes now allocated (exclusively or not)
+ # to existing children.
+ available = set(get_mem_nodes(parent_container))
+ for child_container in glob.glob('%s/*/mems' % parent_container):
+ child_container = os.path.dirname(child_container)
+ busy = set(get_mem_nodes(child_container))
+ available -= busy
+ return list(available)
def my_mem_nodes():
- # Get list of numa memory nodes owned by current process's container.
- return get_mem_nodes('/dev/cpuset%s' % my_container_name())
+ # Get list of numa memory nodes owned by current process's container.
+ return get_mem_nodes('/dev/cpuset%s' % my_container_name())
def my_available_exclusive_mem_nodes():
- # Get list of numa memory nodes owned by current process's
- # container, which could be allocated exclusively to new child
- # containers. This excludes any nodes now allocated
- # (exclusively or not) to existing children.
- return available_exclusive_mem_nodes('/dev/cpuset%s' % my_container_name())
+ # Get list of numa memory nodes owned by current process's
+ # container, which could be allocated exclusively to new child
+ # containers. This excludes any nodes now allocated
+ # (exclusively or not) to existing children.
+ return available_exclusive_mem_nodes('/dev/cpuset%s' % my_container_name())
def mbytes_per_mem_node():
- # Get mbyte size of each numa mem node, as float
- # Replaces autotest_utils.node_size().
- # Based on guessed total physical mem size, not on kernel's
- # lesser 'available memory' after various system tables.
- # Can be non-integer when kernel sets up 15 nodes instead of 16.
- return rounded_memtotal() / (len(autotest_utils.numa_nodes()) * 1024.0)
+ # Get mbyte size of each numa mem node, as float
+ # Replaces autotest_utils.node_size().
+ # Based on guessed total physical mem size, not on kernel's
+ # lesser 'available memory' after various system tables.
+ # Can be non-integer when kernel sets up 15 nodes instead of 16.
+ return rounded_memtotal() / (len(autotest_utils.numa_nodes()) * 1024.0)
def get_cpus(container_full_name):
- file_name = os.path.join(container_full_name, "cpus")
- if os.path.exists(file_name):
- return rangelist_to_list(utils.read_one_line(file_name))
- else:
- return []
+ file_name = os.path.join(container_full_name, "cpus")
+ if os.path.exists(file_name):
+ return rangelist_to_list(utils.read_one_line(file_name))
+ else:
+ return []
def my_cpus():
- # Get list of cpu cores owned by current process's container.
- return get_cpus('/dev/cpuset%s' % my_container_name())
+ # Get list of cpu cores owned by current process's container.
+ return get_cpus('/dev/cpuset%s' % my_container_name())
def get_tasks(setname):
- return [x.rstrip() for x in open(setname+'/tasks').readlines()]
+ return [x.rstrip() for x in open(setname+'/tasks').readlines()]
def print_one_cpuset(name):
- dir = os.path.join('/dev/cpuset', name)
- cpus = utils.read_one_line(dir + '/cpus')
- mems = utils.read_one_line(dir + '/mems')
- node_size_ = int(mbytes_per_mem_node()) << 20
- memtotal = node_size_ * len(rangelist_to_list(mems))
- tasks = ','.join(get_tasks(dir))
- print "cpuset %s: size %s; tasks %s; cpus %s; mems %s" % \
- (name, autotest_utils.human_format(memtotal), tasks, cpus, mems)
+ dir = os.path.join('/dev/cpuset', name)
+ cpus = utils.read_one_line(dir + '/cpus')
+ mems = utils.read_one_line(dir + '/mems')
+ node_size_ = int(mbytes_per_mem_node()) << 20
+ memtotal = node_size_ * len(rangelist_to_list(mems))
+ tasks = ','.join(get_tasks(dir))
+ print "cpuset %s: size %s; tasks %s; cpus %s; mems %s" % \
+ (name, autotest_utils.human_format(memtotal), tasks, cpus, mems)
def print_all_cpusets():
- for cpuset in glob.glob('/dev/cpuset/*'):
- print_one_cpuset(re.sub(r'.*/', '', cpuset))
+ for cpuset in glob.glob('/dev/cpuset/*'):
+ print_one_cpuset(re.sub(r'.*/', '', cpuset))
def release_dead_containers(parent=super_root):
- # Delete temp subcontainers nested within parent container
- # that are now dead (having no tasks and no sub-containers)
- # and recover their cpu and mem resources.
- # Must not call when a parallel task may be allocating containers!
- # Limit to test* names to preserve permanent containers.
- for child in glob.glob('%s/test*' % parent):
- print 'releasing dead container', child
- release_dead_containers(child) # bottom-up tree walk
- # rmdir has no effect when container still
- # has tasks or sub-containers
- os.rmdir(child)
+ # Delete temp subcontainers nested within parent container
+ # that are now dead (having no tasks and no sub-containers)
+ # and recover their cpu and mem resources.
+ # Must not call when a parallel task may be allocating containers!
+ # Limit to test* names to preserve permanent containers.
+ for child in glob.glob('%s/test*' % parent):
+ print 'releasing dead container', child
+ release_dead_containers(child) # bottom-up tree walk
+ # rmdir has no effect when container still
+ # has tasks or sub-containers
+ os.rmdir(child)
class cpuset:
- def display(self):
- print_one_cpuset(os.path.join(self.root, self.name))
+ def display(self):
+ print_one_cpuset(os.path.join(self.root, self.name))
- def release(self):
- print "releasing ", self.cpudir
- parent_t = os.path.join(self.root, 'tasks')
- # Transfer survivors (and self) to parent
- for task in get_tasks(self.cpudir):
- utils.write_one_line(parent_t, task)
- os.rmdir(self.cpudir)
- if os.path.exists(self.cpudir):
- raise error.AutotestError('Could not delete container '
- + self.cpudir)
+ def release(self):
+ print "releasing ", self.cpudir
+ parent_t = os.path.join(self.root, 'tasks')
+ # Transfer survivors (and self) to parent
+ for task in get_tasks(self.cpudir):
+ utils.write_one_line(parent_t, task)
+ os.rmdir(self.cpudir)
+ if os.path.exists(self.cpudir):
+ raise error.AutotestError('Could not delete container '
+ + self.cpudir)
- def __init__(self, name, job_size=None, job_pid=None, cpus=None,
- root=None):
- """\
- Create a cpuset container and move job_pid into it
- Allocate the list "cpus" of cpus to that container
+ def __init__(self, name, job_size=None, job_pid=None, cpus=None,
+ root=None):
+ """\
+ Create a cpuset container and move job_pid into it
+ Allocate the list "cpus" of cpus to that container
- name = arbitrary string tag
- job_size = reqested memory for job in megabytes
- job_pid = pid of job we're putting into the container
- cpu = list of cpu indicies to associate with the cpuset
- root = the cpuset to create this new set in
- """
- if not os.path.exists(os.path.join(super_root, "cpus")):
- raise error.AutotestError('Root container /dev/cpuset '
- 'is empty; please reboot')
+ name = arbitrary string tag
+ job_size = reqested memory for job in megabytes
+ job_pid = pid of job we're putting into the container
+ cpu = list of cpu indicies to associate with the cpuset
+ root = the cpuset to create this new set in
+ """
+ if not os.path.exists(os.path.join(super_root, "cpus")):
+ raise error.AutotestError('Root container /dev/cpuset '
+ 'is empty; please reboot')
- self.name = name
+ self.name = name
- if root == None:
- # default to nested in process's current container
- root = my_container_name()[1:]
- self.root = os.path.join(super_root, root)
- if not os.path.exists(self.root):
- raise error.AutotestError(('Parent container %s'
- ' does not exist')
- % self.root)
+ if root == None:
+ # default to nested in process's current container
+ root = my_container_name()[1:]
+ self.root = os.path.join(super_root, root)
+ if not os.path.exists(self.root):
+ raise error.AutotestError(('Parent container %s'
+ ' does not exist')
+ % self.root)
- if job_size == None:
- # default to biggest container we can make under root
- job_size = int( mbytes_per_mem_node() *
- len(available_exclusive_mem_nodes(self.root)) )
- if not job_size:
- raise error.AutotestError('Creating container '
- 'with no mem')
- self.memory = job_size
+ if job_size == None:
+ # default to biggest container we can make under root
+ job_size = int( mbytes_per_mem_node() *
+ len(available_exclusive_mem_nodes(self.root)) )
+ if not job_size:
+ raise error.AutotestError('Creating container '
+ 'with no mem')
+ self.memory = job_size
- if cpus == None:
- # default to biggest container we can make under root
- cpus = get_cpus(self.root)
- if not cpus:
- raise error.AutotestError('Creating container '
- 'with no cpus')
- self.cpus = cpus
+ if cpus == None:
+ # default to biggest container we can make under root
+ cpus = get_cpus(self.root)
+ if not cpus:
+ raise error.AutotestError('Creating container '
+ 'with no cpus')
+ self.cpus = cpus
- # default to the current pid
- if not job_pid:
- job_pid = os.getpid()
+ # default to the current pid
+ if not job_pid:
+ job_pid = os.getpid()
- print "cpuset(name=%s, root=%s, job_size=%d, pid=%d)" % \
- (name, root, job_size, job_pid)
+ print "cpuset(name=%s, root=%s, job_size=%d, pid=%d)" % \
+ (name, root, job_size, job_pid)
- self.cpudir = os.path.join(self.root, name)
- if os.path.exists(self.cpudir):
- self.release() # destructively replace old
+ self.cpudir = os.path.join(self.root, name)
+ if os.path.exists(self.cpudir):
+ self.release() # destructively replace old
- nodes_needed = int(math.ceil( float(job_size) /
- math.ceil(mbytes_per_mem_node()) ))
+ nodes_needed = int(math.ceil( float(job_size) /
+ math.ceil(mbytes_per_mem_node()) ))
- if nodes_needed > len(get_mem_nodes(self.root)):
- raise error.AutotestError("Container's memory "
- "is bigger than parent's")
+ if nodes_needed > len(get_mem_nodes(self.root)):
+ raise error.AutotestError("Container's memory "
+ "is bigger than parent's")
- while True:
- # Pick specific free mem nodes for this cpuset
- mems = available_exclusive_mem_nodes(self.root)
- if len(mems) < nodes_needed:
- raise error.AutotestError(('Existing container'
- ' hold %d mem nodes'
- ' needed by new'
- 'container')
- % (nodes_needed
- - len(mems)))
- mems = mems[-nodes_needed:]
- mems_spec = ','.join(['%d' % x for x in mems])
- os.mkdir(self.cpudir)
- utils.write_one_line(os.path.join(self.cpudir,
- 'mem_exclusive'), '1')
- utils.write_one_line(os.path.join(self.cpudir,
- 'mems'),
- mems_spec)
- # Above sends err msg to client.log.0, but no exception,
- # if mems_spec contained any now-taken nodes
- # Confirm that siblings didn't grab our chosen mems:
- nodes_gotten = len(get_mem_nodes(self.cpudir))
- if nodes_gotten >= nodes_needed:
- break # success
- print "cpuset %s lost race for nodes" % name, mems_spec
- # Return any mem we did get, and try again
- os.rmdir(self.cpudir)
+ while True:
+ # Pick specific free mem nodes for this cpuset
+ mems = available_exclusive_mem_nodes(self.root)
+ if len(mems) < nodes_needed:
+ raise error.AutotestError(('Existing container'
+ ' hold %d mem nodes'
+ ' needed by new'
+ 'container')
+ % (nodes_needed
+ - len(mems)))
+ mems = mems[-nodes_needed:]
+ mems_spec = ','.join(['%d' % x for x in mems])
+ os.mkdir(self.cpudir)
+ utils.write_one_line(os.path.join(self.cpudir,
+ 'mem_exclusive'), '1')
+ utils.write_one_line(os.path.join(self.cpudir,
+ 'mems'),
+ mems_spec)
+ # Above sends err msg to client.log.0, but no exception,
+ # if mems_spec contained any now-taken nodes
+ # Confirm that siblings didn't grab our chosen mems:
+ nodes_gotten = len(get_mem_nodes(self.cpudir))
+ if nodes_gotten >= nodes_needed:
+ break # success
+ print "cpuset %s lost race for nodes" % name, mems_spec
+ # Return any mem we did get, and try again
+ os.rmdir(self.cpudir)
- # add specified cpu cores and own task pid to container:
- cpu_spec = ','.join(['%d' % x for x in cpus])
- utils.write_one_line(os.path.join(self.cpudir,
- 'cpus'),
- cpu_spec)
- utils.write_one_line(os.path.join(self.cpudir,
- 'tasks'),
- "%d" % job_pid)
- self.display()
+ # add specified cpu cores and own task pid to container:
+ cpu_spec = ','.join(['%d' % x for x in cpus])
+ utils.write_one_line(os.path.join(self.cpudir,
+ 'cpus'),
+ cpu_spec)
+ utils.write_one_line(os.path.join(self.cpudir,
+ 'tasks'),
+ "%d" % job_pid)
+ self.display()
diff --git a/client/bin/fd_stack.py b/client/bin/fd_stack.py
index c377cf0..e0f3f4c 100755
--- a/client/bin/fd_stack.py
+++ b/client/bin/fd_stack.py
@@ -3,137 +3,137 @@
import sys, os
class fd_stack:
- """a stack of fd redirects
+ """a stack of fd redirects
- Redirects cause existing fd's to be pushed on the stack; restore()
- causes the current set of redirects to be popped, restoring the previous
- filehandle destinations.
+ Redirects cause existing fd's to be pushed on the stack; restore()
+ causes the current set of redirects to be popped, restoring the previous
+ filehandle destinations.
- Note that we need to redirect both the sys.stdout type descriptor
- (which print, etc use) and the low level OS numbered descriptor
- which os.system() etc use.
- """
+ Note that we need to redirect both the sys.stdout type descriptor
+ (which print, etc use) and the low level OS numbered descriptor
+ which os.system() etc use.
+ """
- def __init__(self, fd, filehandle):
- self.fd = fd # eg 1
- self.filehandle = filehandle # eg sys.stdout
- self.stack = [(fd, filehandle)]
+ def __init__(self, fd, filehandle):
+ self.fd = fd # eg 1
+ self.filehandle = filehandle # eg sys.stdout
+ self.stack = [(fd, filehandle)]
- def update_handle(self, new):
- if (self.filehandle == sys.stdout):
- sys.stdout = new
- if (self.filehandle == sys.stderr):
- sys.stderr = new
- self.filehandle = new
+ def update_handle(self, new):
+ if (self.filehandle == sys.stdout):
+ sys.stdout = new
+ if (self.filehandle == sys.stderr):
+ sys.stderr = new
+ self.filehandle = new
- def redirect(self, filename):
- """Redirect output to the specified file
+ def redirect(self, filename):
+ """Redirect output to the specified file
- Overwrites the previous contents, if any.
- """
- self.filehandle.flush()
- fdcopy = os.dup(self.fd)
- self.stack.append( (fdcopy, self.filehandle, 0) )
- # self.filehandle = file(filename, 'w')
- if (os.path.isfile(filename)):
- newfd = os.open(filename, os.O_WRONLY)
- else:
- newfd = os.open(filename, os.O_WRONLY | os.O_CREAT)
- os.dup2(newfd, self.fd)
- os.close(newfd)
- self.update_handle(os.fdopen(self.fd, 'w'))
+ Overwrites the previous contents, if any.
+ """
+ self.filehandle.flush()
+ fdcopy = os.dup(self.fd)
+ self.stack.append( (fdcopy, self.filehandle, 0) )
+ # self.filehandle = file(filename, 'w')
+ if (os.path.isfile(filename)):
+ newfd = os.open(filename, os.O_WRONLY)
+ else:
+ newfd = os.open(filename, os.O_WRONLY | os.O_CREAT)
+ os.dup2(newfd, self.fd)
+ os.close(newfd)
+ self.update_handle(os.fdopen(self.fd, 'w'))
- def tee_redirect(self, filename):
- """Tee output to the specified file
+ def tee_redirect(self, filename):
+ """Tee output to the specified file
- Overwrites the previous contents, if any.
- """
- self.filehandle.flush()
- #print_to_tty("tee_redirect to " + filename)
- #where_art_thy_filehandles()
- fdcopy = os.dup(self.fd)
- r, w = os.pipe()
- pid = os.fork()
- if pid: # parent
- os.close(r)
- os.dup2(w, self.fd)
- os.close(w)
- self.stack.append( (fdcopy, self.filehandle, pid) )
- self.update_handle(os.fdopen(self.fd, 'w', 0))
- #where_art_thy_filehandles()
- #print_to_tty("done tee_redirect to " + filename)
- else: # child
- os.close(w)
- os.dup2(r, 0)
- os.dup2(fdcopy, 1)
- os.close(r)
- os.close(fdcopy)
- os.execlp('tee', 'tee', '-a', filename)
+ Overwrites the previous contents, if any.
+ """
+ self.filehandle.flush()
+ #print_to_tty("tee_redirect to " + filename)
+ #where_art_thy_filehandles()
+ fdcopy = os.dup(self.fd)
+ r, w = os.pipe()
+ pid = os.fork()
+ if pid: # parent
+ os.close(r)
+ os.dup2(w, self.fd)
+ os.close(w)
+ self.stack.append( (fdcopy, self.filehandle, pid) )
+ self.update_handle(os.fdopen(self.fd, 'w', 0))
+ #where_art_thy_filehandles()
+ #print_to_tty("done tee_redirect to " + filename)
+ else: # child
+ os.close(w)
+ os.dup2(r, 0)
+ os.dup2(fdcopy, 1)
+ os.close(r)
+ os.close(fdcopy)
+ os.execlp('tee', 'tee', '-a', filename)
-
- def restore(self):
- """unredirect one level"""
- self.filehandle.flush()
- # print_to_tty("ENTERING RESTORE %d" % self.fd)
- # where_art_thy_filehandles()
- (old_fd, old_filehandle, pid) = self.stack.pop()
- # print_to_tty("old_fd %d" % old_fd)
- # print_to_tty("self.fd %d" % self.fd)
- self.filehandle.close() # seems to close old_fd as well.
- if pid:
- os.waitpid(pid, 0)
- # where_art_thy_filehandles()
- os.dup2(old_fd, self.fd)
- # print_to_tty("CLOSING FD %d" % old_fd)
- os.close(old_fd)
- # where_art_thy_filehandles()
- self.update_handle(old_filehandle)
- # where_art_thy_filehandles()
- # print_to_tty("EXIT RESTORE %d" % self.fd)
+
+ def restore(self):
+ """unredirect one level"""
+ self.filehandle.flush()
+ # print_to_tty("ENTERING RESTORE %d" % self.fd)
+ # where_art_thy_filehandles()
+ (old_fd, old_filehandle, pid) = self.stack.pop()
+ # print_to_tty("old_fd %d" % old_fd)
+ # print_to_tty("self.fd %d" % self.fd)
+ self.filehandle.close() # seems to close old_fd as well.
+ if pid:
+ os.waitpid(pid, 0)
+ # where_art_thy_filehandles()
+ os.dup2(old_fd, self.fd)
+ # print_to_tty("CLOSING FD %d" % old_fd)
+ os.close(old_fd)
+ # where_art_thy_filehandles()
+ self.update_handle(old_filehandle)
+ # where_art_thy_filehandles()
+ # print_to_tty("EXIT RESTORE %d" % self.fd)
def tee_output_logdir(fn):
- """\
- Method decorator for a class to tee the output to the objects log_dir.
- """
- def tee_logdir_wrapper(self, *args, **dargs):
- self.job.stdout.tee_redirect(os.path.join(self.log_dir, 'stdout'))
- self.job.stderr.tee_redirect(os.path.join(self.log_dir, 'stderr'))
- try:
- result = fn(self, *args, **dargs)
- finally:
- self.job.stderr.restore()
- self.job.stdout.restore()
- return result
- return tee_logdir_wrapper
+ """\
+ Method decorator for a class to tee the output to the objects log_dir.
+ """
+ def tee_logdir_wrapper(self, *args, **dargs):
+ self.job.stdout.tee_redirect(os.path.join(self.log_dir, 'stdout'))
+ self.job.stderr.tee_redirect(os.path.join(self.log_dir, 'stderr'))
+ try:
+ result = fn(self, *args, **dargs)
+ finally:
+ self.job.stderr.restore()
+ self.job.stdout.restore()
+ return result
+ return tee_logdir_wrapper
def __mark(filename, msg):
- file = open(filename, 'a')
- file.write(msg)
- file.close()
+ file = open(filename, 'a')
+ file.write(msg)
+ file.close()
def tee_output_logdir_mark(fn):
- def tee_logdir_mark_wrapper(self, *args, **dargs):
- mark = self.__class__.__name__ + "." + fn.__name__
- outfile = os.path.join(self.log_dir, 'stdout')
- errfile = os.path.join(self.log_dir, 'stderr')
- __mark(outfile, "--- START " + mark + " ---\n")
- __mark(errfile, "--- START " + mark + " ---\n")
- self.job.stdout.tee_redirect(outfile)
- self.job.stderr.tee_redirect(errfile)
- try:
- result = fn(self, *args, **dargs)
- finally:
- self.job.stderr.restore()
- self.job.stdout.restore()
- __mark(outfile, "--- END " + mark + " ---\n")
- __mark(errfile, "--- END " + mark + " ---\n")
+ def tee_logdir_mark_wrapper(self, *args, **dargs):
+ mark = self.__class__.__name__ + "." + fn.__name__
+ outfile = os.path.join(self.log_dir, 'stdout')
+ errfile = os.path.join(self.log_dir, 'stderr')
+ __mark(outfile, "--- START " + mark + " ---\n")
+ __mark(errfile, "--- START " + mark + " ---\n")
+ self.job.stdout.tee_redirect(outfile)
+ self.job.stderr.tee_redirect(errfile)
+ try:
+ result = fn(self, *args, **dargs)
+ finally:
+ self.job.stderr.restore()
+ self.job.stdout.restore()
+ __mark(outfile, "--- END " + mark + " ---\n")
+ __mark(errfile, "--- END " + mark + " ---\n")
- return result
+ return result
- tee_logdir_mark_wrapper.__name__ = fn.__name__
- return tee_logdir_mark_wrapper
+ tee_logdir_mark_wrapper.__name__ = fn.__name__
+ return tee_logdir_mark_wrapper
diff --git a/client/bin/filesystem.py b/client/bin/filesystem.py
index 2c24141..47c8c44 100755
--- a/client/bin/filesystem.py
+++ b/client/bin/filesystem.py
@@ -5,170 +5,170 @@
from autotest_lib.client.common_lib import error, utils
def list_mount_devices():
- devices = []
- # list mounted filesystems
- for line in utils.system_output('mount').splitlines():
- devices.append(line.split()[0])
- # list mounted swap devices
- for line in utils.system_output('swapon -s').splitlines():
- if line.startswith('/'): # skip header line
- devices.append(line.split()[0])
- return devices
+ devices = []
+ # list mounted filesystems
+ for line in utils.system_output('mount').splitlines():
+ devices.append(line.split()[0])
+ # list mounted swap devices
+ for line in utils.system_output('swapon -s').splitlines():
+ if line.startswith('/'): # skip header line
+ devices.append(line.split()[0])
+ return devices
def list_mount_points():
- mountpoints = []
- for line in utils.system_output('mount').splitlines():
- mountpoints.append(line.split()[2])
- return mountpoints
+ mountpoints = []
+ for line in utils.system_output('mount').splitlines():
+ mountpoints.append(line.split()[2])
+ return mountpoints
class filesystem:
- """
- Class for handling filesystems
- """
+ """
+ Class for handling filesystems
+ """
- def __init__(self, job, device, mountpoint, loop_size = 0):
- """
- device should be able to be a file as well
- which we mount as loopback
+ def __init__(self, job, device, mountpoint, loop_size = 0):
+ """
+ device should be able to be a file as well
+ which we mount as loopback
- device
- The device in question (eg "/dev/hda2")
- mountpoint
- Default mountpoint for the device.
- loop_size
- size of loopback device (in MB)
- """
+ device
+ The device in question (eg "/dev/hda2")
+ mountpoint
+ Default mountpoint for the device.
+ loop_size
+ size of loopback device (in MB)
+ """
- part = re.compile(r'^part(\d+)$')
- m = part.match(device)
- if m:
- number = int(m.groups()[0])
- partitions = job.config_get('filesystem.partitions')
- try:
- device = partitions[number]
- except:
- raise NameError("Partition '" + device + "' not available")
+ part = re.compile(r'^part(\d+)$')
+ m = part.match(device)
+ if m:
+ number = int(m.groups()[0])
+ partitions = job.config_get('filesystem.partitions')
+ try:
+ device = partitions[number]
+ except:
+ raise NameError("Partition '" + device + "' not available")
- self.device = device
- self.mountpoint = mountpoint
- self.job = job
- self.fstype = None
- self.loop = loop_size
- if self.loop:
- utils.system('dd if=/dev/zero of=%s bs=1M count=%d' % \
- (device, loop_size))
+ self.device = device
+ self.mountpoint = mountpoint
+ self.job = job
+ self.fstype = None
+ self.loop = loop_size
+ if self.loop:
+ utils.system('dd if=/dev/zero of=%s bs=1M count=%d' % \
+ (device, loop_size))
- def mkfs(self, fstype = 'ext2', args = ''):
- """
- Format a partition to fstype
- """
- if list_mount_devices().count(self.device):
- raise NameError('Attempted to format mounted device')
- if fstype == 'xfs':
- args += ' -f'
- if self.loop:
- # BAH. Inconsistent mkfs syntax SUCKS.
- if fstype == 'ext2' or fstype == 'ext3':
- args += ' -F'
- if fstype == 'reiserfs':
- args += ' -f'
- args = args.lstrip()
- mkfs_cmd = "mkfs -t %s %s %s" % (fstype, args, self.device)
- print mkfs_cmd
- sys.stdout.flush()
- try:
- utils.system("yes | " + mkfs_cmd)
- except:
- self.job.record('FAIL', None, mkfs_cmd, error.format_error())
- raise
- else:
- self.job.record('GOOD', None, mkfs_cmd)
- self.fstype = fstype
+ def mkfs(self, fstype = 'ext2', args = ''):
+ """
+ Format a partition to fstype
+ """
+ if list_mount_devices().count(self.device):
+ raise NameError('Attempted to format mounted device')
+ if fstype == 'xfs':
+ args += ' -f'
+ if self.loop:
+ # BAH. Inconsistent mkfs syntax SUCKS.
+ if fstype == 'ext2' or fstype == 'ext3':
+ args += ' -F'
+ if fstype == 'reiserfs':
+ args += ' -f'
+ args = args.lstrip()
+ mkfs_cmd = "mkfs -t %s %s %s" % (fstype, args, self.device)
+ print mkfs_cmd
+ sys.stdout.flush()
+ try:
+ utils.system("yes | " + mkfs_cmd)
+ except:
+ self.job.record('FAIL', None, mkfs_cmd, error.format_error())
+ raise
+ else:
+ self.job.record('GOOD', None, mkfs_cmd)
+ self.fstype = fstype
- def fsck(self, args = '-n'):
- # I hate reiserfstools.
- # Requires an explit Yes for some inane reason
- fsck_cmd = 'fsck %s %s' % (self.device, args)
- if self.fstype == 'reiserfs':
- fsck_cmd = 'yes "Yes" | ' + fsck_cmd
- print fsck_cmd
- sys.stdout.flush()
- try:
- utils.system("yes | " + fsck_cmd)
- except:
- self.job.record('FAIL', None, fsck_cmd, error.format_error())
- raise
- else:
- self.job.record('GOOD', None, fsck_cmd)
-
-
- def mount(self, mountpoint = None, args = ''):
- if self.fstype:
- args += ' -t ' + self.fstype
- if self.loop:
- args += ' -o loop'
- args = args.lstrip()
-
- if not mountpoint:
- mountpoint = self.mountpoint
- mount_cmd = "mount %s %s %s" % (args, self.device, mountpoint)
-
- if list_mount_devices().count(self.device):
- err = 'Attempted to mount mounted device'
- self.job.record('FAIL', None, mount_cmd, err)
- raise NameError(err)
- if list_mount_points().count(mountpoint):
- err = 'Attempted to mount busy mountpoint'
- self.job.record('FAIL', None, mount_cmd, err)
- raise NameError(err)
-
- print mount_cmd
- sys.stdout.flush()
- try:
- utils.system(mount_cmd)
- except:
- self.job.record('FAIL', None, mount_cmd, error.format_error())
- raise
- else:
- self.job.record('GOOD', None, mount_cmd)
+ def fsck(self, args = '-n'):
+ # I hate reiserfstools.
+ # Requires an explit Yes for some inane reason
+ fsck_cmd = 'fsck %s %s' % (self.device, args)
+ if self.fstype == 'reiserfs':
+ fsck_cmd = 'yes "Yes" | ' + fsck_cmd
+ print fsck_cmd
+ sys.stdout.flush()
+ try:
+ utils.system("yes | " + fsck_cmd)
+ except:
+ self.job.record('FAIL', None, fsck_cmd, error.format_error())
+ raise
+ else:
+ self.job.record('GOOD', None, fsck_cmd)
- def unmount(self, handle=None):
- if not handle:
- handle = self.device
- umount_cmd = "umount " + handle
- print umount_cmd
- sys.stdout.flush()
- try:
- utils.system(umount_cmd)
- except:
- self.job.record('FAIL', None, umount_cmd, error.format_error())
- raise
- else:
- self.job.record('GOOD', None, umount_cmd)
+ def mount(self, mountpoint = None, args = ''):
+ if self.fstype:
+ args += ' -t ' + self.fstype
+ if self.loop:
+ args += ' -o loop'
+ args = args.lstrip()
+
+ if not mountpoint:
+ mountpoint = self.mountpoint
+ mount_cmd = "mount %s %s %s" % (args, self.device, mountpoint)
+
+ if list_mount_devices().count(self.device):
+ err = 'Attempted to mount mounted device'
+ self.job.record('FAIL', None, mount_cmd, err)
+ raise NameError(err)
+ if list_mount_points().count(mountpoint):
+ err = 'Attempted to mount busy mountpoint'
+ self.job.record('FAIL', None, mount_cmd, err)
+ raise NameError(err)
+
+ print mount_cmd
+ sys.stdout.flush()
+ try:
+ utils.system(mount_cmd)
+ except:
+ self.job.record('FAIL', None, mount_cmd, error.format_error())
+ raise
+ else:
+ self.job.record('GOOD', None, mount_cmd)
- def get_io_scheduler_list(self, device_name):
- names = open(self.__sched_path(device_name)).read()
- return names.translate(string.maketrans('[]', ' ')).split()
+ def unmount(self, handle=None):
+ if not handle:
+ handle = self.device
+ umount_cmd = "umount " + handle
+ print umount_cmd
+ sys.stdout.flush()
+ try:
+ utils.system(umount_cmd)
+ except:
+ self.job.record('FAIL', None, umount_cmd, error.format_error())
+ raise
+ else:
+ self.job.record('GOOD', None, umount_cmd)
- def get_io_scheduler(self, device_name):
- return re.split('[\[\]]',
- open(self.__sched_path(device_name)).read())[1]
+ def get_io_scheduler_list(self, device_name):
+ names = open(self.__sched_path(device_name)).read()
+ return names.translate(string.maketrans('[]', ' ')).split()
- def set_io_scheduler(self, device_name, name):
- if name not in self.get_io_scheduler_list(device_name):
- raise NameError('No such IO scheduler: %s' % name)
- f = open(self.__sched_path(device_name), 'w')
- print >> f, name
- f.close()
+ def get_io_scheduler(self, device_name):
+ return re.split('[\[\]]',
+ open(self.__sched_path(device_name)).read())[1]
- def __sched_path(self, device_name):
- return '/sys/block/%s/queue/scheduler' % device_name
+ def set_io_scheduler(self, device_name, name):
+ if name not in self.get_io_scheduler_list(device_name):
+ raise NameError('No such IO scheduler: %s' % name)
+ f = open(self.__sched_path(device_name), 'w')
+ print >> f, name
+ f.close()
+
+
+ def __sched_path(self, device_name):
+ return '/sys/block/%s/queue/scheduler' % device_name
diff --git a/client/bin/grub.py b/client/bin/grub.py
index 68b9bc5..81ea3de 100755
--- a/client/bin/grub.py
+++ b/client/bin/grub.py
@@ -4,139 +4,139 @@
import shutil
import re
-import os
+import os
import os.path
import string
class grub:
- config_locations = ['/boot/grub/grub.conf', '/boot/grub/menu.lst',
- '/etc/grub.conf']
+ config_locations = ['/boot/grub/grub.conf', '/boot/grub/menu.lst',
+ '/etc/grub.conf']
- def __init__(self, config_file=None):
- if config_file:
- self.config = config_file
- else:
- self.config = self.detect()
- self.read()
+ def __init__(self, config_file=None):
+ if config_file:
+ self.config = config_file
+ else:
+ self.config = self.detect()
+ self.read()
- def read(self):
- conf_file = file(self.config, 'r')
- self.lines = conf_file.readlines()
- conf_file.close()
+ def read(self):
+ conf_file = file(self.config, 'r')
+ self.lines = conf_file.readlines()
+ conf_file.close()
- self.entries = [] # list of stanzas
- self.titles = {} # dictionary of titles
- entry = grub_entry(-1)
- count = 0
- for line in self.lines:
- if re.match(r'\s*title', line):
- self.entries.append(entry)
- entry = grub_entry(count)
- count = count + 1
- title = line.replace('title ', '')
- title = title.rstrip('\n')
- entry.set('title', title)
- self.titles[title] = entry
- # if line.startswith('initrd'):
- if re.match(r'\s*initrd', line):
- entry.set('initrd',
- re.sub(r'\s*initrd\s+', '', line))
- if re.match(r'\s*kernel', line):
- entry.set('kernel',
- re.sub(r'\s*kernel\s+', '', line))
- entry.lines.append(line)
- self.entries.append(entry)
- self.preamble = self.entries.pop(0) # separate preamble
+ self.entries = [] # list of stanzas
+ self.titles = {} # dictionary of titles
+ entry = grub_entry(-1)
+ count = 0
+ for line in self.lines:
+ if re.match(r'\s*title', line):
+ self.entries.append(entry)
+ entry = grub_entry(count)
+ count = count + 1
+ title = line.replace('title ', '')
+ title = title.rstrip('\n')
+ entry.set('title', title)
+ self.titles[title] = entry
+ # if line.startswith('initrd'):
+ if re.match(r'\s*initrd', line):
+ entry.set('initrd',
+ re.sub(r'\s*initrd\s+', '', line))
+ if re.match(r'\s*kernel', line):
+ entry.set('kernel',
+ re.sub(r'\s*kernel\s+', '', line))
+ entry.lines.append(line)
+ self.entries.append(entry)
+ self.preamble = self.entries.pop(0) # separate preamble
- def write(self):
- conf_file = file(self.config, 'w')
- conf_file.write(self.preamble)
- for entry in self.entries:
- conf_file.write(entry.lines)
- conf_file.close()
+ def write(self):
+ conf_file = file(self.config, 'w')
+ conf_file.write(self.preamble)
+ for entry in self.entries:
+ conf_file.write(entry.lines)
+ conf_file.close()
- def dump(self):
- for line in self.preamble.lines:
- print line,
- for entry in self.entries:
- for line in entry.lines:
- print line,
+ def dump(self):
+ for line in self.preamble.lines:
+ print line,
+ for entry in self.entries:
+ for line in entry.lines:
+ print line,
- def backup(self):
- shutil.copyfile(self.config, self.config+'.bak')
- restore = file(autodir + '/var/autotest.boot.restore', 'w')
- restore.write('cp ' + self.config+'.bak ' + self.config + '\n')
- restore.close()
+ def backup(self):
+ shutil.copyfile(self.config, self.config+'.bak')
+ restore = file(autodir + '/var/autotest.boot.restore', 'w')
+ restore.write('cp ' + self.config+'.bak ' + self.config + '\n')
+ restore.close()
- def bootloader(self):
- return 'grub'
+ def bootloader(self):
+ return 'grub'
- def detect(self):
- for config in grub.config_locations:
- if os.path.isfile(config) and not os.path.islink(config):
- return config
+ def detect(self):
+ for config in grub.config_locations:
+ if os.path.isfile(config) and not os.path.islink(config):
+ return config
- def list_titles(self):
- list = []
- for entry in self.entries:
- list.append(entry.get('title'))
- return list
+ def list_titles(self):
+ list = []
+ for entry in self.entries:
+ list.append(entry.get('title'))
+ return list
- def print_entry(self, index):
- entry = self.entries[index]
- entry.print_entry()
+ def print_entry(self, index):
+ entry = self.entries[index]
+ entry.print_entry()
- def renamed_entry(self, index, newname, args=False):
- "print a specified entry, renaming it as specified"
- entry = self.entries[index]
- entry.set('title', newname)
- if args:
- entry.set_autotest_kernel()
- entry.print_entry()
+ def renamed_entry(self, index, newname, args=False):
+ "print a specified entry, renaming it as specified"
+ entry = self.entries[index]
+ entry.set('title', newname)
+ if args:
+ entry.set_autotest_kernel()
+ entry.print_entry()
- def omit_markers(self, marker):
- # print, ommitting entries between specified markers
- print_state = True
- for line in lines:
- if line.count(marker):
- print_state = not print_state
- else:
- if print_state:
- print line
+ def omit_markers(self, marker):
+ # print, ommitting entries between specified markers
+ print_state = True
+ for line in lines:
+ if line.count(marker):
+ print_state = not print_state
+ else:
+ if print_state:
+ print line
- def select(self, title, boot_options=None):
- entry = self.titles[title]
- print "grub: will boot entry %d (0-based)" % entry.index
- self.set_default(entry.index)
- self.set_timeout()
+ def select(self, title, boot_options=None):
+ entry = self.titles[title]
+ print "grub: will boot entry %d (0-based)" % entry.index
+ self.set_default(entry.index)
+ self.set_timeout()
- def set_default(self, index):
- lines = (self.preamble).lines
- for i in range(len(lines)):
- default = 'default %d' % index
- lines[i] = re.sub(r'^\s*default.*',
- default, lines[i])
+ def set_default(self, index):
+ lines = (self.preamble).lines
+ for i in range(len(lines)):
+ default = 'default %d' % index
+ lines[i] = re.sub(r'^\s*default.*',
+ default, lines[i])
- def set_timeout(self):
- lines = (self.preamble).lines
- for i in range(len(lines)):
- lines[i] = re.sub(r'^timeout.*/',
- 'timeout 60', lines[i])
- lines[i] = re.sub(r'^(\s*terminal .*--timeout)=\d+',
- r'\1=30', lines[i])
-
+ def set_timeout(self):
+ lines = (self.preamble).lines
+ for i in range(len(lines)):
+ lines[i] = re.sub(r'^timeout.*/',
+ 'timeout 60', lines[i])
+ lines[i] = re.sub(r'^(\s*terminal .*--timeout)=\d+',
+ r'\1=30', lines[i])
+
# ----------------------------------------------------------------------
@@ -145,49 +145,49 @@
# and bits we don't understand.
class grub_entry:
- def __init__(self, count):
- self.lines = []
- self.fields = {} # title, initrd, kernel, etc
- self.index = count
+ def __init__(self, count):
+ self.lines = []
+ self.fields = {} # title, initrd, kernel, etc
+ self.index = count
- def set(self, field, value):
- print "setting '%s' to '%s'" % (field, value)
- self.fields[field] = value
- for i in range(len(self.lines)):
- m = re.match(r'\s*' + field + r'\s+', self.lines[i])
- if m:
- self.lines[i] = m.group() + value + '\n'
+ def set(self, field, value):
+ print "setting '%s' to '%s'" % (field, value)
+ self.fields[field] = value
+ for i in range(len(self.lines)):
+ m = re.match(r'\s*' + field + r'\s+', self.lines[i])
+ if m:
+ self.lines[i] = m.group() + value + '\n'
- def get(self, field):
- return self.fields[field]
+ def get(self, field):
+ return self.fields[field]
- def print_entry(self):
- print self.lines
+ def print_entry(self):
+ print self.lines
- def set_kernel_options(self, options):
- kernel = self.get('kernel')
- re.sub(r'(autotest_args:).*', r'\1'+options, kernel)
- self.set('kernel', kernel)
+ def set_kernel_options(self, options):
+ kernel = self.get('kernel')
+ re.sub(r'(autotest_args:).*', r'\1'+options, kernel)
+ self.set('kernel', kernel)
- def set_autotest_kernel(self):
- kernel_words = []
- found_path = False
- # Want to copy most of the entry, replacing the 'path'
- # part of the entry with vmlinux-autotest in the same
- # dir, and make sure autotest_args: is (uniquely) added
- for word in (self.get('kernel')).split():
- if word.startswith('--'):
- kernel_words.append(word)
- continue
- if not found_path:
- word = os.path.dirname(word)+'vmlinuz-autotest'
- found_path = True
- if re.match(r'auto(bench|test)_args:', word):
- break
- kernel_words.append(word)
- kernel_words.append('autotest_args: ')
- self.set('kernel', string.join(kernel_words))
+ def set_autotest_kernel(self):
+ kernel_words = []
+ found_path = False
+ # Want to copy most of the entry, replacing the 'path'
+ # part of the entry with vmlinux-autotest in the same
+ # dir, and make sure autotest_args: is (uniquely) added
+ for word in (self.get('kernel')).split():
+ if word.startswith('--'):
+ kernel_words.append(word)
+ continue
+ if not found_path:
+ word = os.path.dirname(word)+'vmlinuz-autotest'
+ found_path = True
+ if re.match(r'auto(bench|test)_args:', word):
+ break
+ kernel_words.append(word)
+ kernel_words.append('autotest_args: ')
+ self.set('kernel', string.join(kernel_words))
diff --git a/client/bin/harness.py b/client/bin/harness.py
index 2a6378f..86441b8 100755
--- a/client/bin/harness.py
+++ b/client/bin/harness.py
@@ -8,77 +8,77 @@
import os, sys
class harness:
- """The NULL server harness
+ """The NULL server harness
- Properties:
- job
- The job object for this job
- """
+ Properties:
+ job
+ The job object for this job
+ """
- def __init__(self, job):
- """
- job
- The job object for this job
- """
- self.setup(job)
+ def __init__(self, job):
+ """
+ job
+ The job object for this job
+ """
+ self.setup(job)
- def setup(self, job):
- """
- job
- The job object for this job
- """
- self.job = job
+ def setup(self, job):
+ """
+ job
+ The job object for this job
+ """
+ self.job = job
- configd = os.path.join(os.environ['AUTODIR'], 'configs')
- if os.path.isdir(configd):
- (name, dirs, files) = os.walk(configd).next()
- job.config_set('kernel.default_config_set',
- [ configd + '/' ] + files)
+ configd = os.path.join(os.environ['AUTODIR'], 'configs')
+ if os.path.isdir(configd):
+ (name, dirs, files) = os.walk(configd).next()
+ job.config_set('kernel.default_config_set',
+ [ configd + '/' ] + files)
- def run_start(self):
- """A run within this job is starting"""
- pass
+ def run_start(self):
+ """A run within this job is starting"""
+ pass
- def run_pause(self):
- """A run within this job is completing (expect continue)"""
- pass
+ def run_pause(self):
+ """A run within this job is completing (expect continue)"""
+ pass
- def run_reboot(self):
- """A run within this job is performing a reboot
- (expect continue following reboot)
- """
- pass
+ def run_reboot(self):
+ """A run within this job is performing a reboot
+ (expect continue following reboot)
+ """
+ pass
- def run_abort(self):
- """A run within this job is aborting. It all went wrong"""
- pass
+ def run_abort(self):
+ """A run within this job is aborting. It all went wrong"""
+ pass
- def run_complete(self):
- """A run within this job is completing (all done)"""
- pass
+ def run_complete(self):
+ """A run within this job is completing (all done)"""
+ pass
- def test_status(self, status, tag):
- """A test within this job is completing"""
- pass
+ def test_status(self, status, tag):
+ """A test within this job is completing"""
+ pass
- def test_status_detail(self, code, subdir, operation, status, tag):
- """A test within this job is completing (detail)"""
- pass
+ def test_status_detail(self, code, subdir, operation, status, tag):
+ """A test within this job is completing (detail)"""
+ pass
def select(which, job):
- if not which:
- which = 'standalone'
-
- exec "import harness_%s" % (which)
- exec "myharness = harness_%s.harness_%s(job)" % (which, which)
+ if not which:
+ which = 'standalone'
- return myharness
+ exec "import harness_%s" % (which)
+ exec "myharness = harness_%s.harness_%s(job)" % (which, which)
+
+ return myharness
diff --git a/client/bin/harness_ABAT.py b/client/bin/harness_ABAT.py
index e5f2f63..8fadb2a 100755
--- a/client/bin/harness_ABAT.py
+++ b/client/bin/harness_ABAT.py
@@ -10,145 +10,145 @@
import os, harness, time, re
def autobench_load(fn):
- disks = re.compile(r'^\s*DATS_FREE_DISKS\s*=(.*\S)\s*$')
- parts = re.compile(r'^\s*DATS_FREE_PARTITIONS\s*=(.*\S)\s*$')
- modules = re.compile(r'^\s*INITRD_MODULES\s*=(.*\S)\s*$')
+ disks = re.compile(r'^\s*DATS_FREE_DISKS\s*=(.*\S)\s*$')
+ parts = re.compile(r'^\s*DATS_FREE_PARTITIONS\s*=(.*\S)\s*$')
+ modules = re.compile(r'^\s*INITRD_MODULES\s*=(.*\S)\s*$')
- conf = {}
+ conf = {}
- try:
- fd = file(fn, "r")
- except:
- return conf
- for ln in fd.readlines():
- m = disks.match(ln)
- if m:
- val = m.groups()[0]
- conf['disks'] = val.strip('"').split()
- m = parts.match(ln)
- if m:
- val = m.groups()[0]
- conf['partitions'] = val.strip('"').split()
- m = modules.match(ln)
- if m:
- val = m.groups()[0]
- conf['modules'] = val.strip('"').split()
- fd.close()
+ try:
+ fd = file(fn, "r")
+ except:
+ return conf
+ for ln in fd.readlines():
+ m = disks.match(ln)
+ if m:
+ val = m.groups()[0]
+ conf['disks'] = val.strip('"').split()
+ m = parts.match(ln)
+ if m:
+ val = m.groups()[0]
+ conf['partitions'] = val.strip('"').split()
+ m = modules.match(ln)
+ if m:
+ val = m.groups()[0]
+ conf['modules'] = val.strip('"').split()
+ fd.close()
- return conf
+ return conf
class harness_ABAT(harness.harness):
- """The ABAT server harness
+ """The ABAT server harness
- Properties:
- job
- The job object for this job
- """
+ Properties:
+ job
+ The job object for this job
+ """
- def __init__(self, job):
- """
- job
- The job object for this job
- """
- self.setup(job)
+ def __init__(self, job):
+ """
+ job
+ The job object for this job
+ """
+ self.setup(job)
- if 'ABAT_STATUS' in os.environ:
- self.status = file(os.environ['ABAT_STATUS'], "w")
- else:
- self.status = None
+ if 'ABAT_STATUS' in os.environ:
+ self.status = file(os.environ['ABAT_STATUS'], "w")
+ else:
+ self.status = None
- def __send(self, msg):
- if self.status:
- msg = msg.rstrip()
- self.status.write(msg + "\n")
- self.status.flush()
+ def __send(self, msg):
+ if self.status:
+ msg = msg.rstrip()
+ self.status.write(msg + "\n")
+ self.status.flush()
- def __send_status(self, code, subdir, operation, msg):
- self.__send("STATUS %s %s %s %s" % \
- (code, subdir, operation, msg))
+ def __send_status(self, code, subdir, operation, msg):
+ self.__send("STATUS %s %s %s %s" % \
+ (code, subdir, operation, msg))
- def __root_device(self):
- device = None
- root = re.compile(r'^\S*(/dev/\S+).*\s/\s*$')
-
- df = utils.system_output('df -lP')
- for line in df.split("\n"):
- m = root.match(line)
- if m:
- device = m.groups()[0]
+ def __root_device(self):
+ device = None
+ root = re.compile(r'^\S*(/dev/\S+).*\s/\s*$')
- return device
+ df = utils.system_output('df -lP')
+ for line in df.split("\n"):
+ m = root.match(line)
+ if m:
+ device = m.groups()[0]
+
+ return device
- def run_start(self):
- """A run within this job is starting"""
- self.__send_status('GOOD', '----', '----', 'run starting')
+ def run_start(self):
+ """A run within this job is starting"""
+ self.__send_status('GOOD', '----', '----', 'run starting')
- # Load up the autobench.conf if it exists.
- conf = autobench_load("/etc/autobench.conf")
- if 'partitions' in conf:
- self.job.config_set('filesystem.partitions',
- conf['partitions'])
+ # Load up the autobench.conf if it exists.
+ conf = autobench_load("/etc/autobench.conf")
+ if 'partitions' in conf:
+ self.job.config_set('filesystem.partitions',
+ conf['partitions'])
- # Search the boot loader configuration for the autobench entry,
- # and extract its args.
- entry_args = None
- args = None
- for line in self.job.bootloader.info('all').split('\n'):
- if line.startswith('args'):
- entry_args = line.split(None, 2)[2]
- if line.startswith('title'):
- title = line.split()[2]
- if title == 'autobench':
- args = entry_args
+ # Search the boot loader configuration for the autobench entry,
+ # and extract its args.
+ entry_args = None
+ args = None
+ for line in self.job.bootloader.info('all').split('\n'):
+ if line.startswith('args'):
+ entry_args = line.split(None, 2)[2]
+ if line.startswith('title'):
+ title = line.split()[2]
+ if title == 'autobench':
+ args = entry_args
- if args:
- args = re.sub(r'autobench_args:.*', '', args)
- args = re.sub(r'root=\S*', '', args)
- args += " root=" + self.__root_device()
+ if args:
+ args = re.sub(r'autobench_args:.*', '', args)
+ args = re.sub(r'root=\S*', '', args)
+ args += " root=" + self.__root_device()
- self.job.config_set('boot.default_args', args)
+ self.job.config_set('boot.default_args', args)
- # Turn off boot_once semantics.
- self.job.config_set('boot.set_default', True)
+ # Turn off boot_once semantics.
+ self.job.config_set('boot.set_default', True)
- # For RedHat installs we do not load up the module.conf
- # as they cannot be builtin. Pass them as arguments.
- vendor = autotest_utils.get_os_vendor()
- if vendor in ['Red Hat', 'Fedora Core'] and 'modules' in conf:
- args = '--allow-missing'
- for mod in conf['modules']:
- args += " --with " + mod
- self.job.config_set('kernel.mkinitrd_extra_args', args)
+ # For RedHat installs we do not load up the module.conf
+ # as they cannot be builtin. Pass them as arguments.
+ vendor = autotest_utils.get_os_vendor()
+ if vendor in ['Red Hat', 'Fedora Core'] and 'modules' in conf:
+ args = '--allow-missing'
+ for mod in conf['modules']:
+ args += " --with " + mod
+ self.job.config_set('kernel.mkinitrd_extra_args', args)
- def run_reboot(self):
- """A run within this job is performing a reboot
- (expect continue following reboot)
- """
- self.__send("REBOOT")
+ def run_reboot(self):
+ """A run within this job is performing a reboot
+ (expect continue following reboot)
+ """
+ self.__send("REBOOT")
- def run_complete(self):
- """A run within this job is completing (all done)"""
- self.__send("DONE")
+ def run_complete(self):
+ """A run within this job is completing (all done)"""
+ self.__send("DONE")
- def test_status_detail(self, code, subdir, operation, msg, tag):
- """A test within this job is completing (detail)"""
+ def test_status_detail(self, code, subdir, operation, msg, tag):
+ """A test within this job is completing (detail)"""
- # Send the first line with the status code as a STATUS message.
- lines = msg.split("\n")
- self.__send_status(code, subdir, operation, lines[0])
+ # Send the first line with the status code as a STATUS message.
+ lines = msg.split("\n")
+ self.__send_status(code, subdir, operation, lines[0])
- def test_status(self, msg, tag):
- lines = msg.split("\n")
+ def test_status(self, msg, tag):
+ lines = msg.split("\n")
- # Send each line as a SUMMARY message.
- for line in lines:
- self.__send("SUMMARY :" + line)
+ # Send each line as a SUMMARY message.
+ for line in lines:
+ self.__send("SUMMARY :" + line)
diff --git a/client/bin/harness_simple.py b/client/bin/harness_simple.py
index 7f104da..5ff90d4 100755
--- a/client/bin/harness_simple.py
+++ b/client/bin/harness_simple.py
@@ -7,31 +7,31 @@
import os, harness, time
class harness_simple(harness.harness):
- """
- The simple server harness
+ """
+ The simple server harness
- Properties:
- job
- The job object for this job
- """
+ Properties:
+ job
+ The job object for this job
+ """
- def __init__(self, job):
- """
- job
- The job object for this job
- """
- self.setup(job)
+ def __init__(self, job):
+ """
+ job
+ The job object for this job
+ """
+ self.setup(job)
- self.status = os.fdopen(3, 'w')
+ self.status = os.fdopen(3, 'w')
- def test_status(self, status, tag):
- """A test within this job is completing"""
- if self.status:
- for line in status.split('\n'):
- # prepend status messages with
- # AUTOTEST_STATUS:tag: so that we can tell
- # which lines were sent by the autotest client
- pre = 'AUTOTEST_STATUS:%s:' % (tag,)
- self.status.write(pre + line + '\n')
- self.status.flush()
+ def test_status(self, status, tag):
+ """A test within this job is completing"""
+ if self.status:
+ for line in status.split('\n'):
+ # prepend status messages with
+ # AUTOTEST_STATUS:tag: so that we can tell
+ # which lines were sent by the autotest client
+ pre = 'AUTOTEST_STATUS:%s:' % (tag,)
+ self.status.write(pre + line + '\n')
+ self.status.flush()
diff --git a/client/bin/harness_standalone.py b/client/bin/harness_standalone.py
index dccdcab..d6a4687 100644
--- a/client/bin/harness_standalone.py
+++ b/client/bin/harness_standalone.py
@@ -9,40 +9,40 @@
import os, harness, shutil
class harness_standalone(harness.harness):
- """The standalone server harness
+ """The standalone server harness
- Properties:
- job
- The job object for this job
- """
+ Properties:
+ job
+ The job object for this job
+ """
- def __init__(self, job):
- """
- job
- The job object for this job
- """
- self.autodir = os.path.abspath(os.environ['AUTODIR'])
- self.setup(job)
+ def __init__(self, job):
+ """
+ job
+ The job object for this job
+ """
+ self.autodir = os.path.abspath(os.environ['AUTODIR'])
+ self.setup(job)
- src = job.control_get()
- dest = os.path.join(self.autodir, 'control')
- if os.path.abspath(src) != os.path.abspath(dest):
- shutil.copyfile(src, dest)
- job.control_set(dest)
+ src = job.control_get()
+ dest = os.path.join(self.autodir, 'control')
+ if os.path.abspath(src) != os.path.abspath(dest):
+ shutil.copyfile(src, dest)
+ job.control_set(dest)
- print 'Symlinking init scripts'
- rc = os.path.join(self.autodir, 'tools/autotest')
- # see if system supports event.d versus inittab
- if os.path.exists('/etc/event.d'):
- # NB: assuming current runlevel is default
- initdefault = utils.system_output('runlevel').split()[1]
- else:
- initdefault = utils.system_output('grep :initdefault: /etc/inittab')
- initdefault = initdefault.split(':')[1]
+ print 'Symlinking init scripts'
+ rc = os.path.join(self.autodir, 'tools/autotest')
+ # see if system supports event.d versus inittab
+ if os.path.exists('/etc/event.d'):
+ # NB: assuming current runlevel is default
+ initdefault = utils.system_output('runlevel').split()[1]
+ else:
+ initdefault = utils.system_output('grep :initdefault: /etc/inittab')
+ initdefault = initdefault.split(':')[1]
- try:
- utils.system('ln -sf %s /etc/init.d/autotest' % rc)
- utils.system('ln -sf %s /etc/rc%s.d/S99autotest' % \
- (rc, initdefault))
- except:
- print "WARNING: linking init scripts failed"
+ try:
+ utils.system('ln -sf %s /etc/init.d/autotest' % rc)
+ utils.system('ln -sf %s /etc/rc%s.d/S99autotest' % \
+ (rc, initdefault))
+ except:
+ print "WARNING: linking init scripts failed"
diff --git a/client/bin/job.py b/client/bin/job.py
index ac2745b..870b63e 100755
--- a/client/bin/job.py
+++ b/client/bin/job.py
@@ -20,995 +20,995 @@
"""
class StepError(error.AutotestError):
- pass
+ pass
class base_job:
- """The actual job against which we do everything.
+ """The actual job against which we do everything.
- Properties:
- autodir
- The top level autotest directory (/usr/local/autotest).
- Comes from os.environ['AUTODIR'].
- bindir
- <autodir>/bin/
- libdir
- <autodir>/lib/
- testdir
- <autodir>/tests/
- site_testdir
- <autodir>/site_tests/
- profdir
- <autodir>/profilers/
- tmpdir
- <autodir>/tmp/
- resultdir
- <autodir>/results/<jobtag>
- stdout
- fd_stack object for stdout
- stderr
- fd_stack object for stderr
- profilers
- the profilers object for this job
- harness
- the server harness object for this job
- config
- the job configuration for this job
- """
+ Properties:
+ autodir
+ The top level autotest directory (/usr/local/autotest).
+ Comes from os.environ['AUTODIR'].
+ bindir
+ <autodir>/bin/
+ libdir
+ <autodir>/lib/
+ testdir
+ <autodir>/tests/
+ site_testdir
+ <autodir>/site_tests/
+ profdir
+ <autodir>/profilers/
+ tmpdir
+ <autodir>/tmp/
+ resultdir
+ <autodir>/results/<jobtag>
+ stdout
+ fd_stack object for stdout
+ stderr
+ fd_stack object for stderr
+ profilers
+ the profilers object for this job
+ harness
+ the server harness object for this job
+ config
+ the job configuration for this job
+ """
- DEFAULT_LOG_FILENAME = "status"
+ DEFAULT_LOG_FILENAME = "status"
- def __init__(self, control, jobtag, cont, harness_type=None,
- use_external_logging = False):
- """
- control
- The control file (pathname of)
- jobtag
- The job tag string (eg "default")
- cont
- If this is the continuation of this job
- harness_type
- An alternative server harness
- """
- self.autodir = os.environ['AUTODIR']
- self.bindir = os.path.join(self.autodir, 'bin')
- self.libdir = os.path.join(self.autodir, 'lib')
- self.testdir = os.path.join(self.autodir, 'tests')
- self.site_testdir = os.path.join(self.autodir, 'site_tests')
- self.profdir = os.path.join(self.autodir, 'profilers')
- self.tmpdir = os.path.join(self.autodir, 'tmp')
- self.resultdir = os.path.join(self.autodir, 'results', jobtag)
- self.sysinfodir = os.path.join(self.resultdir, 'sysinfo')
- self.control = os.path.abspath(control)
- self.state_file = self.control + '.state'
- self.current_step_ancestry = []
- self.next_step_index = 0
- self.__load_state()
+ def __init__(self, control, jobtag, cont, harness_type=None,
+ use_external_logging = False):
+ """
+ control
+ The control file (pathname of)
+ jobtag
+ The job tag string (eg "default")
+ cont
+ If this is the continuation of this job
+ harness_type
+ An alternative server harness
+ """
+ self.autodir = os.environ['AUTODIR']
+ self.bindir = os.path.join(self.autodir, 'bin')
+ self.libdir = os.path.join(self.autodir, 'lib')
+ self.testdir = os.path.join(self.autodir, 'tests')
+ self.site_testdir = os.path.join(self.autodir, 'site_tests')
+ self.profdir = os.path.join(self.autodir, 'profilers')
+ self.tmpdir = os.path.join(self.autodir, 'tmp')
+ self.resultdir = os.path.join(self.autodir, 'results', jobtag)
+ self.sysinfodir = os.path.join(self.resultdir, 'sysinfo')
+ self.control = os.path.abspath(control)
+ self.state_file = self.control + '.state'
+ self.current_step_ancestry = []
+ self.next_step_index = 0
+ self.__load_state()
- if not cont:
- """
- Don't cleanup the tmp dir (which contains the lockfile)
- in the constructor, this would be a problem for multiple
- jobs starting at the same time on the same client. Instead
- do the delete at the server side. We simply create the tmp
- directory here if it does not already exist.
- """
- if not os.path.exists(self.tmpdir):
- os.mkdir(self.tmpdir)
+ if not cont:
+ """
+ Don't cleanup the tmp dir (which contains the lockfile)
+ in the constructor, this would be a problem for multiple
+ jobs starting at the same time on the same client. Instead
+ do the delete at the server side. We simply create the tmp
+ directory here if it does not already exist.
+ """
+ if not os.path.exists(self.tmpdir):
+ os.mkdir(self.tmpdir)
- results = os.path.join(self.autodir, 'results')
- if not os.path.exists(results):
- os.mkdir(results)
-
- download = os.path.join(self.testdir, 'download')
- if not os.path.exists(download):
- os.mkdir(download)
+ results = os.path.join(self.autodir, 'results')
+ if not os.path.exists(results):
+ os.mkdir(results)
- if os.path.exists(self.resultdir):
- utils.system('rm -rf '
- + self.resultdir)
- os.mkdir(self.resultdir)
- os.mkdir(self.sysinfodir)
+ download = os.path.join(self.testdir, 'download')
+ if not os.path.exists(download):
+ os.mkdir(download)
- os.mkdir(os.path.join(self.resultdir, 'debug'))
- os.mkdir(os.path.join(self.resultdir, 'analysis'))
+ if os.path.exists(self.resultdir):
+ utils.system('rm -rf '
+ + self.resultdir)
+ os.mkdir(self.resultdir)
+ os.mkdir(self.sysinfodir)
- shutil.copyfile(self.control,
- os.path.join(self.resultdir, 'control'))
+ os.mkdir(os.path.join(self.resultdir, 'debug'))
+ os.mkdir(os.path.join(self.resultdir, 'analysis'))
+ shutil.copyfile(self.control,
+ os.path.join(self.resultdir, 'control'))
- self.control = control
- self.jobtag = jobtag
- self.log_filename = self.DEFAULT_LOG_FILENAME
- self.container = None
- self.stdout = fd_stack.fd_stack(1, sys.stdout)
- self.stderr = fd_stack.fd_stack(2, sys.stderr)
+ self.control = control
+ self.jobtag = jobtag
+ self.log_filename = self.DEFAULT_LOG_FILENAME
+ self.container = None
- self._init_group_level()
+ self.stdout = fd_stack.fd_stack(1, sys.stdout)
+ self.stderr = fd_stack.fd_stack(2, sys.stderr)
- self.config = config.config(self)
+ self._init_group_level()
- self.harness = harness.select(harness_type, self)
+ self.config = config.config(self)
- self.profilers = profilers.profilers(self)
+ self.harness = harness.select(harness_type, self)
- try:
- tool = self.config_get('boottool.executable')
- self.bootloader = boottool.boottool(tool)
- except:
- pass
+ self.profilers = profilers.profilers(self)
- sysinfo.log_per_reboot_data(self.sysinfodir)
+ try:
+ tool = self.config_get('boottool.executable')
+ self.bootloader = boottool.boottool(tool)
+ except:
+ pass
- if not cont:
- self.record('START', None, None)
- self._increment_group_level()
+ sysinfo.log_per_reboot_data(self.sysinfodir)
- self.harness.run_start()
-
- if use_external_logging:
- self.enable_external_logging()
+ if not cont:
+ self.record('START', None, None)
+ self._increment_group_level()
- # load the max disk usage rate - default to no monitoring
- self.max_disk_usage_rate = self.get_state('__monitor_disk',
- default=0.0)
+ self.harness.run_start()
+ if use_external_logging:
+ self.enable_external_logging()
- def monitor_disk_usage(self, max_rate):
- """\
- Signal that the job should monitor disk space usage on /
- and generate a warning if a test uses up disk space at a
- rate exceeding 'max_rate'.
+ # load the max disk usage rate - default to no monitoring
+ self.max_disk_usage_rate = self.get_state('__monitor_disk',
+ default=0.0)
- Parameters:
- max_rate - the maximium allowed rate of disk consumption
- during a test, in MB/hour, or 0 to indicate
- no limit.
- """
- self.set_state('__monitor_disk', max_rate)
- self.max_disk_usage_rate = max_rate
+ def monitor_disk_usage(self, max_rate):
+ """\
+ Signal that the job should monitor disk space usage on /
+ and generate a warning if a test uses up disk space at a
+ rate exceeding 'max_rate'.
- def relative_path(self, path):
- """\
- Return a patch relative to the job results directory
- """
- head = len(self.resultdir) + 1 # remove the / inbetween
- return path[head:]
+ Parameters:
+ max_rate - the maximium allowed rate of disk consumption
+ during a test, in MB/hour, or 0 to indicate
+ no limit.
+ """
+ self.set_state('__monitor_disk', max_rate)
+ self.max_disk_usage_rate = max_rate
- def control_get(self):
- return self.control
+ def relative_path(self, path):
+ """\
+ Return a patch relative to the job results directory
+ """
+ head = len(self.resultdir) + 1 # remove the / inbetween
+ return path[head:]
- def control_set(self, control):
- self.control = os.path.abspath(control)
+ def control_get(self):
+ return self.control
- def harness_select(self, which):
- self.harness = harness.select(which, self)
+ def control_set(self, control):
+ self.control = os.path.abspath(control)
- def config_set(self, name, value):
- self.config.set(name, value)
+ def harness_select(self, which):
+ self.harness = harness.select(which, self)
- def config_get(self, name):
- return self.config.get(name)
+ def config_set(self, name, value):
+ self.config.set(name, value)
- def setup_dirs(self, results_dir, tmp_dir):
- if not tmp_dir:
- tmp_dir = os.path.join(self.tmpdir, 'build')
- if not os.path.exists(tmp_dir):
- os.mkdir(tmp_dir)
- if not os.path.isdir(tmp_dir):
- e_msg = "Temp dir (%s) is not a dir - args backwards?" % self.tmpdir
- raise ValueError(e_msg)
- # We label the first build "build" and then subsequent ones
- # as "build.2", "build.3", etc. Whilst this is a little bit
- # inconsistent, 99.9% of jobs will only have one build
- # (that's not done as kernbench, sparse, or buildtest),
- # so it works out much cleaner. One of life's comprimises.
- if not results_dir:
- results_dir = os.path.join(self.resultdir, 'build')
- i = 2
- while os.path.exists(results_dir):
- results_dir = os.path.join(self.resultdir, 'build.%d' % i)
- i += 1
- if not os.path.exists(results_dir):
- os.mkdir(results_dir)
+ def config_get(self, name):
+ return self.config.get(name)
- return (results_dir, tmp_dir)
+ def setup_dirs(self, results_dir, tmp_dir):
+ if not tmp_dir:
+ tmp_dir = os.path.join(self.tmpdir, 'build')
+ if not os.path.exists(tmp_dir):
+ os.mkdir(tmp_dir)
+ if not os.path.isdir(tmp_dir):
+ e_msg = "Temp dir (%s) is not a dir - args backwards?" % self.tmpdir
+ raise ValueError(e_msg)
+ # We label the first build "build" and then subsequent ones
+ # as "build.2", "build.3", etc. Whilst this is a little bit
+ # inconsistent, 99.9% of jobs will only have one build
+ # (that's not done as kernbench, sparse, or buildtest),
+ # so it works out much cleaner. One of life's comprimises.
+ if not results_dir:
+ results_dir = os.path.join(self.resultdir, 'build')
+ i = 2
+ while os.path.exists(results_dir):
+ results_dir = os.path.join(self.resultdir, 'build.%d' % i)
+ i += 1
+ if not os.path.exists(results_dir):
+ os.mkdir(results_dir)
- def xen(self, base_tree, results_dir = '', tmp_dir = '', leave = False, \
- kjob = None ):
- """Summon a xen object"""
- (results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
- build_dir = 'xen'
- return xen.xen(self, base_tree, results_dir, tmp_dir, build_dir, leave, kjob)
+ return (results_dir, tmp_dir)
- def kernel(self, base_tree, results_dir = '', tmp_dir = '', leave = False):
- """Summon a kernel object"""
- (results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
- build_dir = 'linux'
- return kernel.auto_kernel(self, base_tree, results_dir,
- tmp_dir, build_dir, leave)
+ def xen(self, base_tree, results_dir = '', tmp_dir = '', leave = False, \
+ kjob = None ):
+ """Summon a xen object"""
+ (results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
+ build_dir = 'xen'
+ return xen.xen(self, base_tree, results_dir, tmp_dir, build_dir, leave, kjob)
- def barrier(self, *args, **kwds):
- """Create a barrier object"""
- return barrier.barrier(*args, **kwds)
+ def kernel(self, base_tree, results_dir = '', tmp_dir = '', leave = False):
+ """Summon a kernel object"""
+ (results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
+ build_dir = 'linux'
+ return kernel.auto_kernel(self, base_tree, results_dir,
+ tmp_dir, build_dir, leave)
- def setup_dep(self, deps):
- """Set up the dependencies for this test.
-
- deps is a list of libraries required for this test.
- """
- for dep in deps:
- try:
- os.chdir(os.path.join(self.autodir, 'deps', dep))
- utils.system('./' + dep + '.py')
- except:
- err = "setting up dependency " + dep + "\n"
- raise error.UnhandledError(err)
+ def barrier(self, *args, **kwds):
+ """Create a barrier object"""
+ return barrier.barrier(*args, **kwds)
- def __runtest(self, url, tag, args, dargs):
- try:
- l = lambda : test.runtest(self, url, tag, args, dargs)
- pid = parallel.fork_start(self.resultdir, l)
- parallel.fork_waitfor(self.resultdir, pid)
- except error.AutotestError:
- raise
- except Exception, e:
- msg = "Unhandled %s error occured during test\n"
- msg %= str(e.__class__.__name__)
- raise error.UnhandledError(msg)
+ def setup_dep(self, deps):
+ """Set up the dependencies for this test.
+ deps is a list of libraries required for this test.
+ """
+ for dep in deps:
+ try:
+ os.chdir(os.path.join(self.autodir, 'deps', dep))
+ utils.system('./' + dep + '.py')
+ except:
+ err = "setting up dependency " + dep + "\n"
+ raise error.UnhandledError(err)
- def run_test(self, url, *args, **dargs):
- """Summon a test object and run it.
-
- tag
- tag to add to testname
- url
- url of the test to run
- """
- if not url:
- raise TypeError("Test name is invalid. "
- "Switched arguments?")
- (group, testname) = test.testname(url)
- namelen = len(testname)
- dargs = dargs.copy()
- tntag = dargs.pop('tag', None)
- if tntag: # testname tag is included in reported test name
- testname += '.' + tntag
- subdir = testname
- sdtag = dargs.pop('subdir_tag', None)
- if sdtag: # subdir-only tag is not included in reports
- subdir = subdir + '.' + sdtag
- tag = subdir[namelen+1:] # '' if none
+ def __runtest(self, url, tag, args, dargs):
+ try:
+ l = lambda : test.runtest(self, url, tag, args, dargs)
+ pid = parallel.fork_start(self.resultdir, l)
+ parallel.fork_waitfor(self.resultdir, pid)
+ except error.AutotestError:
+ raise
+ except Exception, e:
+ msg = "Unhandled %s error occured during test\n"
+ msg %= str(e.__class__.__name__)
+ raise error.UnhandledError(msg)
- outputdir = os.path.join(self.resultdir, subdir)
- if os.path.exists(outputdir):
- msg = ("%s already exists, test <%s> may have"
- " already run with tag <%s>"
- % (outputdir, testname, tag) )
- raise error.TestError(msg)
- os.mkdir(outputdir)
-
- container = dargs.pop('container', None)
- if container:
- cname = container.get('name', None)
- if not cname: # get old name
- cname = container.get('container_name', None)
- mbytes = container.get('mbytes', None)
- if not mbytes: # get old name
- mbytes = container.get('mem', None)
- cpus = container.get('cpus', None)
- if not cpus: # get old name
- cpus = container.get('cpu', None)
- root = container.get('root', None)
- self.new_container(mbytes=mbytes, cpus=cpus,
- root=root, name=cname)
- # We are running in a container now...
- def log_warning(reason):
- self.record("WARN", subdir, testname, reason)
- @disk_usage_monitor.watch(log_warning, "/",
- self.max_disk_usage_rate)
- def group_func():
- try:
- self.__runtest(url, tag, args, dargs)
- except error.TestNAError, detail:
- self.record('TEST_NA', subdir, testname,
- str(detail))
- raise
- except Exception, detail:
- self.record('FAIL', subdir, testname,
- str(detail))
- raise
- else:
- self.record('GOOD', subdir, testname,
- 'completed successfully')
+ def run_test(self, url, *args, **dargs):
+ """Summon a test object and run it.
- result, exc_info = self.__rungroup(subdir, testname, group_func)
- if container:
- self.release_container()
- if exc_info and isinstance(exc_info[1], error.TestError):
- return False
- elif exc_info:
- raise exc_info[0], exc_info[1], exc_info[2]
- else:
- return True
+ tag
+ tag to add to testname
+ url
+ url of the test to run
+ """
+ if not url:
+ raise TypeError("Test name is invalid. "
+ "Switched arguments?")
+ (group, testname) = test.testname(url)
+ namelen = len(testname)
+ dargs = dargs.copy()
+ tntag = dargs.pop('tag', None)
+ if tntag: # testname tag is included in reported test name
+ testname += '.' + tntag
+ subdir = testname
+ sdtag = dargs.pop('subdir_tag', None)
+ if sdtag: # subdir-only tag is not included in reports
+ subdir = subdir + '.' + sdtag
+ tag = subdir[namelen+1:] # '' if none
- def __rungroup(self, subdir, testname, function, *args, **dargs):
- """\
- subdir:
- name of the group
- testname:
- name of the test to run, or support step
- function:
- subroutine to run
- *args:
- arguments for the function
+ outputdir = os.path.join(self.resultdir, subdir)
+ if os.path.exists(outputdir):
+ msg = ("%s already exists, test <%s> may have"
+ " already run with tag <%s>"
+ % (outputdir, testname, tag) )
+ raise error.TestError(msg)
+ os.mkdir(outputdir)
- Returns a 2-tuple (result, exc_info) where result
- is the return value of function, and exc_info is
- the sys.exc_info() of the exception thrown by the
- function (which may be None).
- """
+ container = dargs.pop('container', None)
+ if container:
+ cname = container.get('name', None)
+ if not cname: # get old name
+ cname = container.get('container_name', None)
+ mbytes = container.get('mbytes', None)
+ if not mbytes: # get old name
+ mbytes = container.get('mem', None)
+ cpus = container.get('cpus', None)
+ if not cpus: # get old name
+ cpus = container.get('cpu', None)
+ root = container.get('root', None)
+ self.new_container(mbytes=mbytes, cpus=cpus,
+ root=root, name=cname)
+ # We are running in a container now...
- result, exc_info = None, None
- try:
- self.record('START', subdir, testname)
- self._increment_group_level()
- result = function(*args, **dargs)
- self._decrement_group_level()
- self.record('END GOOD', subdir, testname)
- except error.TestNAError, e:
- self._decrement_group_level()
- self.record('END TEST_NA', subdir, testname, str(e))
- except Exception, e:
- exc_info = sys.exc_info()
- self._decrement_group_level()
- err_msg = str(e) + '\n' + traceback.format_exc()
- self.record('END FAIL', subdir, testname, err_msg)
+ def log_warning(reason):
+ self.record("WARN", subdir, testname, reason)
+ @disk_usage_monitor.watch(log_warning, "/",
+ self.max_disk_usage_rate)
+ def group_func():
+ try:
+ self.__runtest(url, tag, args, dargs)
+ except error.TestNAError, detail:
+ self.record('TEST_NA', subdir, testname,
+ str(detail))
+ raise
+ except Exception, detail:
+ self.record('FAIL', subdir, testname,
+ str(detail))
+ raise
+ else:
+ self.record('GOOD', subdir, testname,
+ 'completed successfully')
- return result, exc_info
+ result, exc_info = self.__rungroup(subdir, testname, group_func)
+ if container:
+ self.release_container()
+ if exc_info and isinstance(exc_info[1], error.TestError):
+ return False
+ elif exc_info:
+ raise exc_info[0], exc_info[1], exc_info[2]
+ else:
+ return True
- def run_group(self, function, *args, **dargs):
- """\
- function:
- subroutine to run
- *args:
- arguments for the function
- """
+ def __rungroup(self, subdir, testname, function, *args, **dargs):
+ """\
+ subdir:
+ name of the group
+ testname:
+ name of the test to run, or support step
+ function:
+ subroutine to run
+ *args:
+ arguments for the function
- # Allow the tag for the group to be specified
- name = function.__name__
- tag = dargs.pop('tag', None)
- if tag:
- name = tag
+ Returns a 2-tuple (result, exc_info) where result
+ is the return value of function, and exc_info is
+ the sys.exc_info() of the exception thrown by the
+ function (which may be None).
+ """
- outputdir = os.path.join(self.resultdir, name)
- if os.path.exists(outputdir):
- msg = ("%s already exists, test <%s> may have"
- " already run with tag <%s>"
- % (outputdir, name, name) )
- raise error.TestError(msg)
- os.mkdir(outputdir)
+ result, exc_info = None, None
+ try:
+ self.record('START', subdir, testname)
+ self._increment_group_level()
+ result = function(*args, **dargs)
+ self._decrement_group_level()
+ self.record('END GOOD', subdir, testname)
+ except error.TestNAError, e:
+ self._decrement_group_level()
+ self.record('END TEST_NA', subdir, testname, str(e))
+ except Exception, e:
+ exc_info = sys.exc_info()
+ self._decrement_group_level()
+ err_msg = str(e) + '\n' + traceback.format_exc()
+ self.record('END FAIL', subdir, testname, err_msg)
- result, exc_info = self.__rungroup(name, name, function,
- *args, **dargs)
+ return result, exc_info
- # if there was a non-TestError exception, raise it
- if exc_info and not isinstance(exc_info[1], error.TestError):
- err = ''.join(traceback.format_exception(*exc_info))
- raise error.TestError(name + ' failed\n' + err)
- # pass back the actual return value from the function
- return result
+ def run_group(self, function, *args, **dargs):
+ """\
+ function:
+ subroutine to run
+ *args:
+ arguments for the function
+ """
+ # Allow the tag for the group to be specified
+ name = function.__name__
+ tag = dargs.pop('tag', None)
+ if tag:
+ name = tag
- def new_container(self, mbytes=None, cpus=None, root=None, name=None):
- if not autotest_utils.grep('cpuset', '/proc/filesystems'):
- print "Containers not enabled by latest reboot"
- return # containers weren't enabled in this kernel boot
- pid = os.getpid()
- if not name:
- name = 'test%d' % pid # make arbitrary unique name
- self.container = cpuset.cpuset(name, job_size=mbytes,
- job_pid=pid, cpus=cpus, root=root)
- # This job's python shell is now running in the new container
- # and all forked test processes will inherit that container
+ outputdir = os.path.join(self.resultdir, name)
+ if os.path.exists(outputdir):
+ msg = ("%s already exists, test <%s> may have"
+ " already run with tag <%s>"
+ % (outputdir, name, name) )
+ raise error.TestError(msg)
+ os.mkdir(outputdir)
+
+ result, exc_info = self.__rungroup(name, name, function,
+ *args, **dargs)
+
+ # if there was a non-TestError exception, raise it
+ if exc_info and not isinstance(exc_info[1], error.TestError):
+ err = ''.join(traceback.format_exception(*exc_info))
+ raise error.TestError(name + ' failed\n' + err)
+
+ # pass back the actual return value from the function
+ return result
+
+
+ def new_container(self, mbytes=None, cpus=None, root=None, name=None):
+ if not autotest_utils.grep('cpuset', '/proc/filesystems'):
+ print "Containers not enabled by latest reboot"
+ return # containers weren't enabled in this kernel boot
+ pid = os.getpid()
+ if not name:
+ name = 'test%d' % pid # make arbitrary unique name
+ self.container = cpuset.cpuset(name, job_size=mbytes,
+ job_pid=pid, cpus=cpus, root=root)
+ # This job's python shell is now running in the new container
+ # and all forked test processes will inherit that container
+
+
+ def release_container(self):
+ if self.container:
+ self.container.release()
+ self.container = None
+
+ def cpu_count(self):
+ if self.container:
+ return len(self.container.cpus)
+ return autotest_utils.count_cpus() # use total system count
- def release_container(self):
- if self.container:
- self.container.release()
- self.container = None
+ # Check the passed kernel identifier against the command line
+ # and the running kernel, abort the job on missmatch.
+ def kernel_check_ident(self, expected_when, expected_id, subdir,
+ type = 'src', patches=[]):
+ print (("POST BOOT: checking booted kernel " +
+ "mark=%d identity='%s' type='%s'") %
+ (expected_when, expected_id, type))
- def cpu_count(self):
- if self.container:
- return len(self.container.cpus)
- return autotest_utils.count_cpus() # use total system count
+ running_id = autotest_utils.running_os_ident()
+ cmdline = utils.read_one_line("/proc/cmdline")
- # Check the passed kernel identifier against the command line
- # and the running kernel, abort the job on missmatch.
- def kernel_check_ident(self, expected_when, expected_id, subdir,
- type = 'src', patches=[]):
- print (("POST BOOT: checking booted kernel " +
- "mark=%d identity='%s' type='%s'") %
- (expected_when, expected_id, type))
+ find_sum = re.compile(r'.*IDENT=(\d+)')
+ m = find_sum.match(cmdline)
+ cmdline_when = -1
+ if m:
+ cmdline_when = int(m.groups()[0])
- running_id = autotest_utils.running_os_ident()
+ # We have all the facts, see if they indicate we
+ # booted the requested kernel or not.
+ bad = False
+ if (type == 'src' and expected_id != running_id or
+ type == 'rpm' and
+ not running_id.startswith(expected_id + '::')):
+ print "check_kernel_ident: kernel identifier mismatch"
+ bad = True
+ if expected_when != cmdline_when:
+ print "check_kernel_ident: kernel command line mismatch"
+ bad = True
- cmdline = utils.read_one_line("/proc/cmdline")
+ if bad:
+ print " Expected Ident: " + expected_id
+ print " Running Ident: " + running_id
+ print " Expected Mark: %d" % (expected_when)
+ print "Command Line Mark: %d" % (cmdline_when)
+ print " Command Line: " + cmdline
- find_sum = re.compile(r'.*IDENT=(\d+)')
- m = find_sum.match(cmdline)
- cmdline_when = -1
- if m:
- cmdline_when = int(m.groups()[0])
+ raise error.JobError("boot failure", "reboot.verify")
- # We have all the facts, see if they indicate we
- # booted the requested kernel or not.
- bad = False
- if (type == 'src' and expected_id != running_id or
- type == 'rpm' and
- not running_id.startswith(expected_id + '::')):
- print "check_kernel_ident: kernel identifier mismatch"
- bad = True
- if expected_when != cmdline_when:
- print "check_kernel_ident: kernel command line mismatch"
- bad = True
+ kernel_info = {'kernel': expected_id}
+ for i, patch in enumerate(patches):
+ kernel_info["patch%d" % i] = patch
+ self.record('GOOD', subdir, 'reboot.verify', expected_id)
+ self._decrement_group_level()
+ self.record('END GOOD', subdir, 'reboot',
+ optional_fields=kernel_info)
- if bad:
- print " Expected Ident: " + expected_id
- print " Running Ident: " + running_id
- print " Expected Mark: %d" % (expected_when)
- print "Command Line Mark: %d" % (cmdline_when)
- print " Command Line: " + cmdline
- raise error.JobError("boot failure", "reboot.verify")
+ def filesystem(self, device, mountpoint = None, loop_size = 0):
+ if not mountpoint:
+ mountpoint = self.tmpdir
+ return filesystem.filesystem(self, device, mountpoint,loop_size)
- kernel_info = {'kernel': expected_id}
- for i, patch in enumerate(patches):
- kernel_info["patch%d" % i] = patch
- self.record('GOOD', subdir, 'reboot.verify', expected_id)
- self._decrement_group_level()
- self.record('END GOOD', subdir, 'reboot',
- optional_fields=kernel_info)
+ def enable_external_logging(self):
+ pass
- def filesystem(self, device, mountpoint = None, loop_size = 0):
- if not mountpoint:
- mountpoint = self.tmpdir
- return filesystem.filesystem(self, device, mountpoint,loop_size)
-
- def enable_external_logging(self):
- pass
+ def disable_external_logging(self):
+ pass
- def disable_external_logging(self):
- pass
-
+ def reboot_setup(self):
+ pass
- def reboot_setup(self):
- pass
+ def reboot(self, tag='autotest'):
+ self.reboot_setup()
+ self.record('START', None, 'reboot')
+ self._increment_group_level()
+ self.record('GOOD', None, 'reboot.start')
+ self.harness.run_reboot()
+ default = self.config_get('boot.set_default')
+ if default:
+ self.bootloader.set_default(tag)
+ else:
+ self.bootloader.boot_once(tag)
+ cmd = "(sleep 5; reboot) </dev/null >/dev/null 2>&1 &"
+ utils.system(cmd)
+ self.quit()
- def reboot(self, tag='autotest'):
- self.reboot_setup()
- self.record('START', None, 'reboot')
- self._increment_group_level()
- self.record('GOOD', None, 'reboot.start')
- self.harness.run_reboot()
- default = self.config_get('boot.set_default')
- if default:
- self.bootloader.set_default(tag)
- else:
- self.bootloader.boot_once(tag)
- cmd = "(sleep 5; reboot) </dev/null >/dev/null 2>&1 &"
- utils.system(cmd)
- self.quit()
+ def noop(self, text):
+ print "job: noop: " + text
- def noop(self, text):
- print "job: noop: " + text
+ def parallel(self, *tasklist):
+ """Run tasks in parallel"""
- def parallel(self, *tasklist):
- """Run tasks in parallel"""
+ pids = []
+ old_log_filename = self.log_filename
+ for i, task in enumerate(tasklist):
+ self.log_filename = old_log_filename + (".%d" % i)
+ task_func = lambda: task[0](*task[1:])
+ pids.append(parallel.fork_start(self.resultdir,
+ task_func))
- pids = []
- old_log_filename = self.log_filename
- for i, task in enumerate(tasklist):
- self.log_filename = old_log_filename + (".%d" % i)
- task_func = lambda: task[0](*task[1:])
- pids.append(parallel.fork_start(self.resultdir,
- task_func))
+ old_log_path = os.path.join(self.resultdir, old_log_filename)
+ old_log = open(old_log_path, "a")
+ exceptions = []
+ for i, pid in enumerate(pids):
+ # wait for the task to finish
+ try:
+ parallel.fork_waitfor(self.resultdir, pid)
+ except Exception, e:
+ exceptions.append(e)
+ # copy the logs from the subtask into the main log
+ new_log_path = old_log_path + (".%d" % i)
+ if os.path.exists(new_log_path):
+ new_log = open(new_log_path)
+ old_log.write(new_log.read())
+ new_log.close()
+ old_log.flush()
+ os.remove(new_log_path)
+ old_log.close()
- old_log_path = os.path.join(self.resultdir, old_log_filename)
- old_log = open(old_log_path, "a")
- exceptions = []
- for i, pid in enumerate(pids):
- # wait for the task to finish
- try:
- parallel.fork_waitfor(self.resultdir, pid)
- except Exception, e:
- exceptions.append(e)
- # copy the logs from the subtask into the main log
- new_log_path = old_log_path + (".%d" % i)
- if os.path.exists(new_log_path):
- new_log = open(new_log_path)
- old_log.write(new_log.read())
- new_log.close()
- old_log.flush()
- os.remove(new_log_path)
- old_log.close()
+ self.log_filename = old_log_filename
- self.log_filename = old_log_filename
+ # handle any exceptions raised by the parallel tasks
+ if exceptions:
+ msg = "%d task(s) failed" % len(exceptions)
+ raise error.JobError(msg, str(exceptions), exceptions)
- # handle any exceptions raised by the parallel tasks
- if exceptions:
- msg = "%d task(s) failed" % len(exceptions)
- raise error.JobError(msg, str(exceptions), exceptions)
+ def quit(self):
+ # XXX: should have a better name.
+ self.harness.run_pause()
+ raise error.JobContinue("more to come")
- def quit(self):
- # XXX: should have a better name.
- self.harness.run_pause()
- raise error.JobContinue("more to come")
+ def complete(self, status):
+ """Clean up and exit"""
+ # We are about to exit 'complete' so clean up the control file.
+ try:
+ os.unlink(self.state_file)
+ except:
+ pass
- def complete(self, status):
- """Clean up and exit"""
- # We are about to exit 'complete' so clean up the control file.
- try:
- os.unlink(self.state_file)
- except:
- pass
+ self.harness.run_complete()
+ self.disable_external_logging()
+ sys.exit(status)
- self.harness.run_complete()
- self.disable_external_logging()
- sys.exit(status)
+ def set_state(self, var, val):
+ # Deep copies make sure that the state can't be altered
+ # without it being re-written. Perf wise, deep copies
+ # are overshadowed by pickling/loading.
+ self.state[var] = copy.deepcopy(val)
+ pickle.dump(self.state, open(self.state_file, 'w'))
- def set_state(self, var, val):
- # Deep copies make sure that the state can't be altered
- # without it being re-written. Perf wise, deep copies
- # are overshadowed by pickling/loading.
- self.state[var] = copy.deepcopy(val)
- pickle.dump(self.state, open(self.state_file, 'w'))
+ def __load_state(self):
+ assert not hasattr(self, "state")
+ try:
+ self.state = pickle.load(open(self.state_file, 'r'))
+ self.state_existed = True
+ except Exception:
+ print "Initializing the state engine."
+ self.state = {}
+ self.set_state('__steps', []) # writes pickle file
+ self.state_existed = False
- def __load_state(self):
- assert not hasattr(self, "state")
- try:
- self.state = pickle.load(open(self.state_file, 'r'))
- self.state_existed = True
- except Exception:
- print "Initializing the state engine."
- self.state = {}
- self.set_state('__steps', []) # writes pickle file
- self.state_existed = False
+ def get_state(self, var, default=None):
+ if var in self.state or default == None:
+ val = self.state[var]
+ else:
+ val = default
+ return copy.deepcopy(val)
- def get_state(self, var, default=None):
- if var in self.state or default == None:
- val = self.state[var]
- else:
- val = default
- return copy.deepcopy(val)
+ def __create_step_tuple(self, fn, args, dargs):
+ # Legacy code passes in an array where the first arg is
+ # the function or its name.
+ if isinstance(fn, list):
+ assert(len(args) == 0)
+ assert(len(dargs) == 0)
+ args = fn[1:]
+ fn = fn[0]
+ # Pickling actual functions is harry, thus we have to call
+ # them by name. Unfortunately, this means only functions
+ # defined globally can be used as a next step.
+ if callable(fn):
+ fn = fn.__name__
+ if not isinstance(fn, types.StringTypes):
+ raise StepError("Next steps must be functions or "
+ "strings containing the function name")
+ ancestry = copy.copy(self.current_step_ancestry)
+ return (ancestry, fn, args, dargs)
- def __create_step_tuple(self, fn, args, dargs):
- # Legacy code passes in an array where the first arg is
- # the function or its name.
- if isinstance(fn, list):
- assert(len(args) == 0)
- assert(len(dargs) == 0)
- args = fn[1:]
- fn = fn[0]
- # Pickling actual functions is harry, thus we have to call
- # them by name. Unfortunately, this means only functions
- # defined globally can be used as a next step.
- if callable(fn):
- fn = fn.__name__
- if not isinstance(fn, types.StringTypes):
- raise StepError("Next steps must be functions or "
- "strings containing the function name")
- ancestry = copy.copy(self.current_step_ancestry)
- return (ancestry, fn, args, dargs)
+ def next_step_append(self, fn, *args, **dargs):
+ """Define the next step and place it at the end"""
+ steps = self.get_state('__steps')
+ steps.append(self.__create_step_tuple(fn, args, dargs))
+ self.set_state('__steps', steps)
- def next_step_append(self, fn, *args, **dargs):
- """Define the next step and place it at the end"""
- steps = self.get_state('__steps')
- steps.append(self.__create_step_tuple(fn, args, dargs))
- self.set_state('__steps', steps)
+ def next_step(self, fn, *args, **dargs):
+ """Create a new step and place it after any steps added
+ while running the current step but before any steps added in
+ previous steps"""
+ steps = self.get_state('__steps')
+ steps.insert(self.next_step_index,
+ self.__create_step_tuple(fn, args, dargs))
+ self.next_step_index += 1
+ self.set_state('__steps', steps)
- def next_step(self, fn, *args, **dargs):
- """Create a new step and place it after any steps added
- while running the current step but before any steps added in
- previous steps"""
- steps = self.get_state('__steps')
- steps.insert(self.next_step_index,
- self.__create_step_tuple(fn, args, dargs))
- self.next_step_index += 1
- self.set_state('__steps', steps)
+ def next_step_prepend(self, fn, *args, **dargs):
+ """Insert a new step, executing first"""
+ steps = self.get_state('__steps')
+ steps.insert(0, self.__create_step_tuple(fn, args, dargs))
+ self.next_step_index += 1
+ self.set_state('__steps', steps)
- def next_step_prepend(self, fn, *args, **dargs):
- """Insert a new step, executing first"""
- steps = self.get_state('__steps')
- steps.insert(0, self.__create_step_tuple(fn, args, dargs))
- self.next_step_index += 1
- self.set_state('__steps', steps)
+ def _run_step_fn(self, local_vars, fn, args, dargs):
+ """Run a (step) function within the given context"""
- def _run_step_fn(self, local_vars, fn, args, dargs):
- """Run a (step) function within the given context"""
+ local_vars['__args'] = args
+ local_vars['__dargs'] = dargs
+ exec('__ret = %s(*__args, **__dargs)' % fn,
+ local_vars, local_vars)
+ return local_vars['__ret']
- local_vars['__args'] = args
- local_vars['__dargs'] = dargs
- exec('__ret = %s(*__args, **__dargs)' % fn,
- local_vars, local_vars)
- return local_vars['__ret']
+ def _create_frame(self, global_vars, ancestry, fn_name):
+ """Set up the environment like it would have been when this
+ function was first defined.
- def _create_frame(self, global_vars, ancestry, fn_name):
- """Set up the environment like it would have been when this
- function was first defined.
+ Child step engine 'implementations' must have 'return locals()'
+ at end end of their steps. Because of this, we can call the
+ parent function and get back all child functions (i.e. those
+ defined within it).
- Child step engine 'implementations' must have 'return locals()'
- at end end of their steps. Because of this, we can call the
- parent function and get back all child functions (i.e. those
- defined within it).
+ Unfortunately, the call stack of the function calling
+ job.next_step might have been deeper than the function it
+ added. In order to make sure that the environment is what it
+ should be, we need to then pop off the frames we built until
+ we find the frame where the function was first defined."""
- Unfortunately, the call stack of the function calling
- job.next_step might have been deeper than the function it
- added. In order to make sure that the environment is what it
- should be, we need to then pop off the frames we built until
- we find the frame where the function was first defined."""
+ # The copies ensure that the parent frames are not modified
+ # while building child frames. This matters if we then
+ # pop some frames in the next part of this function.
+ current_frame = copy.copy(global_vars)
+ frames = [current_frame]
+ for steps_fn_name in ancestry:
+ ret = self._run_step_fn(current_frame,
+ steps_fn_name, [], {})
+ current_frame = copy.copy(ret)
+ frames.append(current_frame)
- # The copies ensure that the parent frames are not modified
- # while building child frames. This matters if we then
- # pop some frames in the next part of this function.
- current_frame = copy.copy(global_vars)
- frames = [current_frame]
- for steps_fn_name in ancestry:
- ret = self._run_step_fn(current_frame,
- steps_fn_name, [], {})
- current_frame = copy.copy(ret)
- frames.append(current_frame)
+ while len(frames) > 2:
+ if fn_name not in frames[-2]:
+ break
+ if frames[-2][fn_name] != frames[-1][fn_name]:
+ break
+ frames.pop()
+ ancestry.pop()
- while len(frames) > 2:
- if fn_name not in frames[-2]:
- break
- if frames[-2][fn_name] != frames[-1][fn_name]:
- break
- frames.pop()
- ancestry.pop()
+ return (frames[-1], ancestry)
- return (frames[-1], ancestry)
+ def _add_step_init(self, local_vars, current_function):
+ """If the function returned a dictionary that includes a
+ function named 'step_init', prepend it to our list of steps.
+ This will only get run the first time a function with a nested
+ use of the step engine is run."""
- def _add_step_init(self, local_vars, current_function):
- """If the function returned a dictionary that includes a
- function named 'step_init', prepend it to our list of steps.
- This will only get run the first time a function with a nested
- use of the step engine is run."""
+ if (isinstance(local_vars, dict) and
+ 'step_init' in local_vars and
+ callable(local_vars['step_init'])):
+ # The init step is a child of the function
+ # we were just running.
+ self.current_step_ancestry.append(current_function)
+ self.next_step_prepend('step_init')
- if (isinstance(local_vars, dict) and
- 'step_init' in local_vars and
- callable(local_vars['step_init'])):
- # The init step is a child of the function
- # we were just running.
- self.current_step_ancestry.append(current_function)
- self.next_step_prepend('step_init')
+ def step_engine(self):
+ """the stepping engine -- if the control file defines
+ step_init we will be using this engine to drive multiple runs.
+ """
+ """Do the next step"""
- def step_engine(self):
- """the stepping engine -- if the control file defines
- step_init we will be using this engine to drive multiple runs.
- """
- """Do the next step"""
+ # Set up the environment and then interpret the control file.
+ # Some control files will have code outside of functions,
+ # which means we need to have our state engine initialized
+ # before reading in the file.
+ global_control_vars = {'job': self}
+ exec(JOB_PREAMBLE, global_control_vars, global_control_vars)
+ execfile(self.control, global_control_vars, global_control_vars)
- # Set up the environment and then interpret the control file.
- # Some control files will have code outside of functions,
- # which means we need to have our state engine initialized
- # before reading in the file.
- global_control_vars = {'job': self}
- exec(JOB_PREAMBLE, global_control_vars, global_control_vars)
- execfile(self.control, global_control_vars, global_control_vars)
+ # If we loaded in a mid-job state file, then we presumably
+ # know what steps we have yet to run.
+ if not self.state_existed:
+ if global_control_vars.has_key('step_init'):
+ self.next_step(global_control_vars['step_init'])
- # If we loaded in a mid-job state file, then we presumably
- # know what steps we have yet to run.
- if not self.state_existed:
- if global_control_vars.has_key('step_init'):
- self.next_step(global_control_vars['step_init'])
+ # Iterate through the steps. If we reboot, we'll simply
+ # continue iterating on the next step.
+ while len(self.get_state('__steps')) > 0:
+ steps = self.get_state('__steps')
+ (ancestry, fn_name, args, dargs) = steps.pop(0)
+ self.set_state('__steps', steps)
- # Iterate through the steps. If we reboot, we'll simply
- # continue iterating on the next step.
- while len(self.get_state('__steps')) > 0:
- steps = self.get_state('__steps')
- (ancestry, fn_name, args, dargs) = steps.pop(0)
- self.set_state('__steps', steps)
+ self.next_step_index = 0
+ ret = self._create_frame(global_control_vars, ancestry,
+ fn_name)
+ local_vars, self.current_step_ancestry = ret
+ local_vars = self._run_step_fn(local_vars, fn_name,
+ args, dargs)
+ self._add_step_init(local_vars, fn_name)
- self.next_step_index = 0
- ret = self._create_frame(global_control_vars, ancestry,
- fn_name)
- local_vars, self.current_step_ancestry = ret
- local_vars = self._run_step_fn(local_vars, fn_name,
- args, dargs)
- self._add_step_init(local_vars, fn_name)
+ def _init_group_level(self):
+ self.group_level = self.get_state("__group_level", default=0)
- def _init_group_level(self):
- self.group_level = self.get_state("__group_level", default=0)
+ def _increment_group_level(self):
+ self.group_level += 1
+ self.set_state("__group_level", self.group_level)
- def _increment_group_level(self):
- self.group_level += 1
- self.set_state("__group_level", self.group_level)
+ def _decrement_group_level(self):
+ self.group_level -= 1
+ self.set_state("__group_level", self.group_level)
- def _decrement_group_level(self):
- self.group_level -= 1
- self.set_state("__group_level", self.group_level)
+ def record(self, status_code, subdir, operation, status = '',
+ optional_fields=None):
+ """
+ Record job-level status
- def record(self, status_code, subdir, operation, status = '',
- optional_fields=None):
- """
- Record job-level status
+ The intent is to make this file both machine parseable and
+ human readable. That involves a little more complexity, but
+ really isn't all that bad ;-)
- The intent is to make this file both machine parseable and
- human readable. That involves a little more complexity, but
- really isn't all that bad ;-)
+ Format is <status code>\t<subdir>\t<operation>\t<status>
- Format is <status code>\t<subdir>\t<operation>\t<status>
+ status code: (GOOD|WARN|FAIL|ABORT)
+ or START
+ or END (GOOD|WARN|FAIL|ABORT)
- status code: (GOOD|WARN|FAIL|ABORT)
- or START
- or END (GOOD|WARN|FAIL|ABORT)
+ subdir: MUST be a relevant subdirectory in the results,
+ or None, which will be represented as '----'
- subdir: MUST be a relevant subdirectory in the results,
- or None, which will be represented as '----'
+ operation: description of what you ran (e.g. "dbench", or
+ "mkfs -t foobar /dev/sda9")
- operation: description of what you ran (e.g. "dbench", or
- "mkfs -t foobar /dev/sda9")
+ status: error message or "completed sucessfully"
- status: error message or "completed sucessfully"
+ ------------------------------------------------------------
- ------------------------------------------------------------
+ Initial tabs indicate indent levels for grouping, and is
+ governed by self.group_level
- Initial tabs indicate indent levels for grouping, and is
- governed by self.group_level
+ multiline messages have secondary lines prefaced by a double
+ space (' ')
+ """
- multiline messages have secondary lines prefaced by a double
- space (' ')
- """
+ if subdir:
+ if re.match(r'[\n\t]', subdir):
+ raise ValueError("Invalid character in "
+ "subdir string")
+ substr = subdir
+ else:
+ substr = '----'
- if subdir:
- if re.match(r'[\n\t]', subdir):
- raise ValueError("Invalid character in "
- "subdir string")
- substr = subdir
- else:
- substr = '----'
-
- if not logging.is_valid_status(status_code):
- raise ValueError("Invalid status code supplied: %s" %
- status_code)
- if not operation:
- operation = '----'
+ if not logging.is_valid_status(status_code):
+ raise ValueError("Invalid status code supplied: %s" %
+ status_code)
+ if not operation:
+ operation = '----'
- if re.match(r'[\n\t]', operation):
- raise ValueError("Invalid character in "
- "operation string")
- operation = operation.rstrip()
+ if re.match(r'[\n\t]', operation):
+ raise ValueError("Invalid character in "
+ "operation string")
+ operation = operation.rstrip()
- if not optional_fields:
- optional_fields = {}
+ if not optional_fields:
+ optional_fields = {}
- status = status.rstrip()
- status = re.sub(r"\t", " ", status)
- # Ensure any continuation lines are marked so we can
- # detect them in the status file to ensure it is parsable.
- status = re.sub(r"\n", "\n" + "\t" * self.group_level + " ",
- status)
+ status = status.rstrip()
+ status = re.sub(r"\t", " ", status)
+ # Ensure any continuation lines are marked so we can
+ # detect them in the status file to ensure it is parsable.
+ status = re.sub(r"\n", "\n" + "\t" * self.group_level + " ",
+ status)
- # Generate timestamps for inclusion in the logs
- epoch_time = int(time.time()) # seconds since epoch, in UTC
- local_time = time.localtime(epoch_time)
- optional_fields["timestamp"] = str(epoch_time)
- optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
- local_time)
+ # Generate timestamps for inclusion in the logs
+ epoch_time = int(time.time()) # seconds since epoch, in UTC
+ local_time = time.localtime(epoch_time)
+ optional_fields["timestamp"] = str(epoch_time)
+ optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
+ local_time)
- fields = [status_code, substr, operation]
- fields += ["%s=%s" % x for x in optional_fields.iteritems()]
- fields.append(status)
+ fields = [status_code, substr, operation]
+ fields += ["%s=%s" % x for x in optional_fields.iteritems()]
+ fields.append(status)
- msg = '\t'.join(str(x) for x in fields)
- msg = '\t' * self.group_level + msg
+ msg = '\t'.join(str(x) for x in fields)
+ msg = '\t' * self.group_level + msg
- msg_tag = ""
- if "." in self.log_filename:
- msg_tag = self.log_filename.split(".", 1)[1]
+ msg_tag = ""
+ if "." in self.log_filename:
+ msg_tag = self.log_filename.split(".", 1)[1]
- self.harness.test_status_detail(status_code, substr,
- operation, status, msg_tag)
- self.harness.test_status(msg, msg_tag)
+ self.harness.test_status_detail(status_code, substr,
+ operation, status, msg_tag)
+ self.harness.test_status(msg, msg_tag)
- # log to stdout (if enabled)
- #if self.log_filename == self.DEFAULT_LOG_FILENAME:
- print msg
+ # log to stdout (if enabled)
+ #if self.log_filename == self.DEFAULT_LOG_FILENAME:
+ print msg
- # log to the "root" status log
- status_file = os.path.join(self.resultdir, self.log_filename)
- open(status_file, "a").write(msg + "\n")
+ # log to the "root" status log
+ status_file = os.path.join(self.resultdir, self.log_filename)
+ open(status_file, "a").write(msg + "\n")
- # log to the subdir status log (if subdir is set)
- if subdir:
- dir = os.path.join(self.resultdir, subdir)
- status_file = os.path.join(dir,
- self.DEFAULT_LOG_FILENAME)
- open(status_file, "a").write(msg + "\n")
+ # log to the subdir status log (if subdir is set)
+ if subdir:
+ dir = os.path.join(self.resultdir, subdir)
+ status_file = os.path.join(dir,
+ self.DEFAULT_LOG_FILENAME)
+ open(status_file, "a").write(msg + "\n")
class disk_usage_monitor:
- def __init__(self, logging_func, device, max_mb_per_hour):
- self.func = logging_func
- self.device = device
- self.max_mb_per_hour = max_mb_per_hour
+ def __init__(self, logging_func, device, max_mb_per_hour):
+ self.func = logging_func
+ self.device = device
+ self.max_mb_per_hour = max_mb_per_hour
- def start(self):
- self.initial_space = autotest_utils.freespace(self.device)
- self.start_time = time.time()
+ def start(self):
+ self.initial_space = autotest_utils.freespace(self.device)
+ self.start_time = time.time()
- def stop(self):
- # if no maximum usage rate was set, we don't need to
- # generate any warnings
- if not self.max_mb_per_hour:
- return
+ def stop(self):
+ # if no maximum usage rate was set, we don't need to
+ # generate any warnings
+ if not self.max_mb_per_hour:
+ return
- final_space = autotest_utils.freespace(self.device)
- used_space = self.initial_space - final_space
- stop_time = time.time()
- total_time = stop_time - self.start_time
- # round up the time to one minute, to keep extremely short
- # tests from generating false positives due to short, badly
- # timed bursts of activity
- total_time = max(total_time, 60.0)
+ final_space = autotest_utils.freespace(self.device)
+ used_space = self.initial_space - final_space
+ stop_time = time.time()
+ total_time = stop_time - self.start_time
+ # round up the time to one minute, to keep extremely short
+ # tests from generating false positives due to short, badly
+ # timed bursts of activity
+ total_time = max(total_time, 60.0)
- # determine the usage rate
- bytes_per_sec = used_space / total_time
- mb_per_sec = bytes_per_sec / 1024**2
- mb_per_hour = mb_per_sec * 60 * 60
+ # determine the usage rate
+ bytes_per_sec = used_space / total_time
+ mb_per_sec = bytes_per_sec / 1024**2
+ mb_per_hour = mb_per_sec * 60 * 60
- if mb_per_hour > self.max_mb_per_hour:
- msg = ("disk space on %s was consumed at a rate of "
- "%.2f MB/hour")
- msg %= (self.device, mb_per_hour)
- self.func(msg)
+ if mb_per_hour > self.max_mb_per_hour:
+ msg = ("disk space on %s was consumed at a rate of "
+ "%.2f MB/hour")
+ msg %= (self.device, mb_per_hour)
+ self.func(msg)
- @classmethod
- def watch(cls, *monitor_args, **monitor_dargs):
- """ Generic decorator to wrap a function call with the
- standard create-monitor -> start -> call -> stop idiom."""
- def decorator(func):
- def watched_func(*args, **dargs):
- monitor = cls(*monitor_args, **monitor_dargs)
- monitor.start()
- try:
- func(*args, **dargs)
- finally:
- monitor.stop()
- return watched_func
- return decorator
+ @classmethod
+ def watch(cls, *monitor_args, **monitor_dargs):
+ """ Generic decorator to wrap a function call with the
+ standard create-monitor -> start -> call -> stop idiom."""
+ def decorator(func):
+ def watched_func(*args, **dargs):
+ monitor = cls(*monitor_args, **monitor_dargs)
+ monitor.start()
+ try:
+ func(*args, **dargs)
+ finally:
+ monitor.stop()
+ return watched_func
+ return decorator
def runjob(control, cont = False, tag = "default", harness_type = '',
- use_external_logging = False):
- """The main interface to this module
+ use_external_logging = False):
+ """The main interface to this module
- control
- The control file to use for this job.
- cont
- Whether this is the continuation of a previously started job
- """
- control = os.path.abspath(control)
- state = control + '.state'
+ control
+ The control file to use for this job.
+ cont
+ Whether this is the continuation of a previously started job
+ """
+ control = os.path.abspath(control)
+ state = control + '.state'
- # instantiate the job object ready for the control file.
- myjob = None
- try:
- # Check that the control file is valid
- if not os.path.exists(control):
- raise error.JobError(control +
- ": control file not found")
+ # instantiate the job object ready for the control file.
+ myjob = None
+ try:
+ # Check that the control file is valid
+ if not os.path.exists(control):
+ raise error.JobError(control +
+ ": control file not found")
- # When continuing, the job is complete when there is no
- # state file, ensure we don't try and continue.
- if cont and not os.path.exists(state):
- raise error.JobComplete("all done")
- if cont == False and os.path.exists(state):
- os.unlink(state)
+ # When continuing, the job is complete when there is no
+ # state file, ensure we don't try and continue.
+ if cont and not os.path.exists(state):
+ raise error.JobComplete("all done")
+ if cont == False and os.path.exists(state):
+ os.unlink(state)
- myjob = job(control, tag, cont, harness_type,
- use_external_logging)
+ myjob = job(control, tag, cont, harness_type,
+ use_external_logging)
- # Load in the users control file, may do any one of:
- # 1) execute in toto
- # 2) define steps, and select the first via next_step()
- myjob.step_engine()
+ # Load in the users control file, may do any one of:
+ # 1) execute in toto
+ # 2) define steps, and select the first via next_step()
+ myjob.step_engine()
- except error.JobContinue:
- sys.exit(5)
+ except error.JobContinue:
+ sys.exit(5)
- except error.JobComplete:
- sys.exit(1)
+ except error.JobComplete:
+ sys.exit(1)
- except error.JobError, instance:
- print "JOB ERROR: " + instance.args[0]
- if myjob:
- command = None
- if len(instance.args) > 1:
- command = instance.args[1]
- myjob.record('ABORT', None, command, instance.args[0])
- myjob._decrement_group_level()
- myjob.record('END ABORT', None, None)
- assert(myjob.group_level == 0)
- myjob.complete(1)
- else:
- sys.exit(1)
+ except error.JobError, instance:
+ print "JOB ERROR: " + instance.args[0]
+ if myjob:
+ command = None
+ if len(instance.args) > 1:
+ command = instance.args[1]
+ myjob.record('ABORT', None, command, instance.args[0])
+ myjob._decrement_group_level()
+ myjob.record('END ABORT', None, None)
+ assert(myjob.group_level == 0)
+ myjob.complete(1)
+ else:
+ sys.exit(1)
- except Exception, e:
- msg = str(e) + '\n' + traceback.format_exc()
- print "JOB ERROR: " + msg
- if myjob:
- myjob.record('ABORT', None, None, msg)
- myjob._decrement_group_level()
- myjob.record('END ABORT', None, None)
- assert(myjob.group_level == 0)
- myjob.complete(1)
- else:
- sys.exit(1)
+ except Exception, e:
+ msg = str(e) + '\n' + traceback.format_exc()
+ print "JOB ERROR: " + msg
+ if myjob:
+ myjob.record('ABORT', None, None, msg)
+ myjob._decrement_group_level()
+ myjob.record('END ABORT', None, None)
+ assert(myjob.group_level == 0)
+ myjob.complete(1)
+ else:
+ sys.exit(1)
- # If we get here, then we assume the job is complete and good.
- myjob._decrement_group_level()
- myjob.record('END GOOD', None, None)
- assert(myjob.group_level == 0)
+ # If we get here, then we assume the job is complete and good.
+ myjob._decrement_group_level()
+ myjob.record('END GOOD', None, None)
+ assert(myjob.group_level == 0)
- myjob.complete(0)
+ myjob.complete(0)
# site_job.py may be non-existant or empty, make sure that an appropriate
# site_job class is created nevertheless
try:
- from site_job import site_job
+ from site_job import site_job
except ImportError:
- class site_job(base_job):
- pass
+ class site_job(base_job):
+ pass
class job(site_job):
- pass
+ pass
diff --git a/client/bin/kernel.py b/client/bin/kernel.py
index fca8f7e..ad65ae9 100755
--- a/client/bin/kernel.py
+++ b/client/bin/kernel.py
@@ -8,727 +8,727 @@
class kernel:
- """ Class for compiling kernels.
+ """ Class for compiling kernels.
- Data for the object includes the src files
- used to create the kernel, patches applied, config (base + changes),
- the build directory itself, and logged output
+ Data for the object includes the src files
+ used to create the kernel, patches applied, config (base + changes),
+ the build directory itself, and logged output
- Properties:
- job
- Backpointer to the job object we're part of
- autodir
- Path to the top level autotest dir (/usr/local/autotest)
- src_dir
- <tmp_dir>/src/
- build_dir
- <tmp_dir>/linux/
- config_dir
- <results_dir>/config/
- log_dir
- <results_dir>/debug/
- results_dir
- <results_dir>/results/
- """
+ Properties:
+ job
+ Backpointer to the job object we're part of
+ autodir
+ Path to the top level autotest dir (/usr/local/autotest)
+ src_dir
+ <tmp_dir>/src/
+ build_dir
+ <tmp_dir>/linux/
+ config_dir
+ <results_dir>/config/
+ log_dir
+ <results_dir>/debug/
+ results_dir
+ <results_dir>/results/
+ """
- autodir = ''
+ autodir = ''
- def __init__(self, job, base_tree, subdir, tmp_dir, build_dir, leave = False):
- """Initialize the kernel build environment
+ def __init__(self, job, base_tree, subdir, tmp_dir, build_dir, leave = False):
+ """Initialize the kernel build environment
- job
- which job this build is part of
- base_tree
- base kernel tree. Can be one of the following:
- 1. A local tarball
- 2. A URL to a tarball
- 3. A local directory (will symlink it)
- 4. A shorthand expandable (eg '2.6.11-git3')
- subdir
- subdir in the results directory (eg "build")
- (holds config/, debug/, results/)
- tmp_dir
+ job
+ which job this build is part of
+ base_tree
+ base kernel tree. Can be one of the following:
+ 1. A local tarball
+ 2. A URL to a tarball
+ 3. A local directory (will symlink it)
+ 4. A shorthand expandable (eg '2.6.11-git3')
+ subdir
+ subdir in the results directory (eg "build")
+ (holds config/, debug/, results/)
+ tmp_dir
- leave
- Boolean, whether to leave existing tmpdir or not
- """
- self.job = job
- self.autodir = job.autodir
+ leave
+ Boolean, whether to leave existing tmpdir or not
+ """
+ self.job = job
+ self.autodir = job.autodir
- self.src_dir = os.path.join(tmp_dir, 'src')
- self.build_dir = os.path.join(tmp_dir, build_dir)
- # created by get_kernel_tree
- self.config_dir = os.path.join(subdir, 'config')
- self.log_dir = os.path.join(subdir, 'debug')
- self.results_dir = os.path.join(subdir, 'results')
- self.subdir = os.path.basename(subdir)
+ self.src_dir = os.path.join(tmp_dir, 'src')
+ self.build_dir = os.path.join(tmp_dir, build_dir)
+ # created by get_kernel_tree
+ self.config_dir = os.path.join(subdir, 'config')
+ self.log_dir = os.path.join(subdir, 'debug')
+ self.results_dir = os.path.join(subdir, 'results')
+ self.subdir = os.path.basename(subdir)
- self.installed_as = None
+ self.installed_as = None
- if not leave:
- if os.path.isdir(self.src_dir):
- utils.system('rm -rf ' + self.src_dir)
- if os.path.isdir(self.build_dir):
- utils.system('rm -rf ' + self.build_dir)
+ if not leave:
+ if os.path.isdir(self.src_dir):
+ utils.system('rm -rf ' + self.src_dir)
+ if os.path.isdir(self.build_dir):
+ utils.system('rm -rf ' + self.build_dir)
- if not os.path.exists(self.src_dir):
- os.mkdir(self.src_dir)
- for path in [self.config_dir, self.log_dir, self.results_dir]:
- if os.path.exists(path):
- utils.system('rm -rf ' + path)
- os.mkdir(path)
+ if not os.path.exists(self.src_dir):
+ os.mkdir(self.src_dir)
+ for path in [self.config_dir, self.log_dir, self.results_dir]:
+ if os.path.exists(path):
+ utils.system('rm -rf ' + path)
+ os.mkdir(path)
- logpath = os.path.join(self.log_dir, 'build_log')
- self.logfile = open(logpath, 'w+')
- self.applied_patches = []
+ logpath = os.path.join(self.log_dir, 'build_log')
+ self.logfile = open(logpath, 'w+')
+ self.applied_patches = []
- self.target_arch = None
- self.build_target = 'bzImage'
- self.build_image = None
+ self.target_arch = None
+ self.build_target = 'bzImage'
+ self.build_image = None
- arch = autotest_utils.get_current_kernel_arch()
- if arch == 's390' or arch == 's390x':
- self.build_target = 'image'
- elif arch == 'ia64':
- self.build_target = 'all'
- self.build_image = 'vmlinux.gz'
+ arch = autotest_utils.get_current_kernel_arch()
+ if arch == 's390' or arch == 's390x':
+ self.build_target = 'image'
+ elif arch == 'ia64':
+ self.build_target = 'all'
+ self.build_image = 'vmlinux.gz'
- if leave:
- return
+ if leave:
+ return
- self.logfile.write('BASE: %s\n' % base_tree)
+ self.logfile.write('BASE: %s\n' % base_tree)
- # Where we have direct version hint record that
- # for later configuration selection.
- shorthand = re.compile(r'^\d+\.\d+\.\d+')
- if shorthand.match(base_tree):
- self.base_tree_version = base_tree
- else:
- self.base_tree_version = None
-
- # Actually extract the tree. Make sure we know it occured
- self.extract(base_tree)
+ # Where we have direct version hint record that
+ # for later configuration selection.
+ shorthand = re.compile(r'^\d+\.\d+\.\d+')
+ if shorthand.match(base_tree):
+ self.base_tree_version = base_tree
+ else:
+ self.base_tree_version = None
+ # Actually extract the tree. Make sure we know it occured
+ self.extract(base_tree)
- def kernelexpand(self, kernel):
- # If we have something like a path, just use it as it is
- if '/' in kernel:
- return [kernel]
- # Find the configured mirror list.
- mirrors = self.job.config_get('mirror.mirrors')
- if not mirrors:
- # LEGACY: convert the kernel.org mirror
- mirror = self.job.config_get('mirror.ftp_kernel_org')
- if mirror:
- korg = 'http://www.kernel.org/pub/linux/kernel'
- mirrors = [
- [ korg + '/v2.6', mirror + '/v2.6' ],
- [ korg + '/people/akpm/patches/2.6',
- mirror + '/akpm' ],
- [ korg + '/people/mbligh',
- mirror + '/mbligh' ],
- ]
+ def kernelexpand(self, kernel):
+ # If we have something like a path, just use it as it is
+ if '/' in kernel:
+ return [kernel]
- patches = kernelexpand.expand_classic(kernel, mirrors)
- print patches
+ # Find the configured mirror list.
+ mirrors = self.job.config_get('mirror.mirrors')
+ if not mirrors:
+ # LEGACY: convert the kernel.org mirror
+ mirror = self.job.config_get('mirror.ftp_kernel_org')
+ if mirror:
+ korg = 'http://www.kernel.org/pub/linux/kernel'
+ mirrors = [
+ [ korg + '/v2.6', mirror + '/v2.6' ],
+ [ korg + '/people/akpm/patches/2.6',
+ mirror + '/akpm' ],
+ [ korg + '/people/mbligh',
+ mirror + '/mbligh' ],
+ ]
- return patches
+ patches = kernelexpand.expand_classic(kernel, mirrors)
+ print patches
+ return patches
- @logging.record
- @tee_output_logdir_mark
- def extract(self, base_tree):
- if os.path.exists(base_tree):
- self.get_kernel_tree(base_tree)
- else:
- base_components = self.kernelexpand(base_tree)
- print 'kernelexpand: '
- print base_components
- self.get_kernel_tree(base_components.pop(0))
- if base_components: # apply remaining patches
- self.patch(*base_components)
+ @logging.record
+ @tee_output_logdir_mark
+ def extract(self, base_tree):
+ if os.path.exists(base_tree):
+ self.get_kernel_tree(base_tree)
+ else:
+ base_components = self.kernelexpand(base_tree)
+ print 'kernelexpand: '
+ print base_components
+ self.get_kernel_tree(base_components.pop(0))
+ if base_components: # apply remaining patches
+ self.patch(*base_components)
- @logging.record
- @tee_output_logdir_mark
- def patch(self, *patches):
- """Apply a list of patches (in order)"""
- if not patches:
- return
- print 'Applying patches: ', patches
- self.apply_patches(self.get_patches(patches))
+ @logging.record
+ @tee_output_logdir_mark
+ def patch(self, *patches):
+ """Apply a list of patches (in order)"""
+ if not patches:
+ return
+ print 'Applying patches: ', patches
+ self.apply_patches(self.get_patches(patches))
- @logging.record
- @tee_output_logdir_mark
- def config(self, config_file = '', config_list = None, defconfig = False):
- self.set_cross_cc()
- config = kernel_config.kernel_config(self.job, self.build_dir,
- self.config_dir, config_file, config_list,
- defconfig, self.base_tree_version)
+ @logging.record
+ @tee_output_logdir_mark
+ def config(self, config_file = '', config_list = None, defconfig = False):
+ self.set_cross_cc()
+ config = kernel_config.kernel_config(self.job, self.build_dir,
+ self.config_dir, config_file, config_list,
+ defconfig, self.base_tree_version)
- def get_patches(self, patches):
- """fetch the patches to the local src_dir"""
- local_patches = []
- for patch in patches:
- dest = os.path.join(self.src_dir, basename(patch))
- # FIXME: this isn't unique. Append something to it
- # like wget does if it's not there?
- print "get_file %s %s %s %s" % (patch, dest, self.src_dir, basename(patch))
- utils.get_file(patch, dest)
- # probably safer to use the command, not python library
- md5sum = utils.system_output('md5sum ' + dest).split()[0]
- local_patches.append((patch, dest, md5sum))
- return local_patches
+ def get_patches(self, patches):
+ """fetch the patches to the local src_dir"""
+ local_patches = []
+ for patch in patches:
+ dest = os.path.join(self.src_dir, basename(patch))
+ # FIXME: this isn't unique. Append something to it
+ # like wget does if it's not there?
+ print "get_file %s %s %s %s" % (patch, dest, self.src_dir, basename(patch))
+ utils.get_file(patch, dest)
+ # probably safer to use the command, not python library
+ md5sum = utils.system_output('md5sum ' + dest).split()[0]
+ local_patches.append((patch, dest, md5sum))
+ return local_patches
- def apply_patches(self, local_patches):
- """apply the list of patches, in order"""
- builddir = self.build_dir
- os.chdir(builddir)
- if not local_patches:
- return None
- for (spec, local, md5sum) in local_patches:
- if local.endswith('.bz2') or local.endswith('.gz'):
- ref = spec
- else:
- ref = force_copy(local, self.results_dir)
- ref = self.job.relative_path(ref)
- patch_id = "%s %s %s" % (spec, ref, md5sum)
- log = "PATCH: " + patch_id + "\n"
- print log
- cat_file_to_cmd(local, 'patch -p1 > /dev/null')
- self.logfile.write(log)
- self.applied_patches.append(patch_id)
+ def apply_patches(self, local_patches):
+ """apply the list of patches, in order"""
+ builddir = self.build_dir
+ os.chdir(builddir)
+ if not local_patches:
+ return None
+ for (spec, local, md5sum) in local_patches:
+ if local.endswith('.bz2') or local.endswith('.gz'):
+ ref = spec
+ else:
+ ref = force_copy(local, self.results_dir)
+ ref = self.job.relative_path(ref)
+ patch_id = "%s %s %s" % (spec, ref, md5sum)
+ log = "PATCH: " + patch_id + "\n"
+ print log
+ cat_file_to_cmd(local, 'patch -p1 > /dev/null')
+ self.logfile.write(log)
+ self.applied_patches.append(patch_id)
- def get_kernel_tree(self, base_tree):
- """Extract/link base_tree to self.build_dir"""
-
- # if base_tree is a dir, assume uncompressed kernel
- if os.path.isdir(base_tree):
- print 'Symlinking existing kernel source'
- os.symlink(base_tree, self.build_dir)
- # otherwise, extract tarball
- else:
- os.chdir(os.path.dirname(self.src_dir))
- # Figure out local destination for tarball
- tarball = os.path.join(self.src_dir, os.path.basename(base_tree))
- utils.get_file(base_tree, tarball)
- print 'Extracting kernel tarball:', tarball, '...'
- autotest_utils.extract_tarball_to_dir(tarball,
- self.build_dir)
+ def get_kernel_tree(self, base_tree):
+ """Extract/link base_tree to self.build_dir"""
+ # if base_tree is a dir, assume uncompressed kernel
+ if os.path.isdir(base_tree):
+ print 'Symlinking existing kernel source'
+ os.symlink(base_tree, self.build_dir)
- def extraversion(self, tag, append=1):
- os.chdir(self.build_dir)
- extraversion_sub = r's/^EXTRAVERSION =\s*\(.*\)/EXTRAVERSION = '
- if append:
- p = extraversion_sub + '\\1-%s/' % tag
- else:
- p = extraversion_sub + '-%s/' % tag
- utils.system('mv Makefile Makefile.old')
- utils.system('sed "%s" < Makefile.old > Makefile' % p)
+ # otherwise, extract tarball
+ else:
+ os.chdir(os.path.dirname(self.src_dir))
+ # Figure out local destination for tarball
+ tarball = os.path.join(self.src_dir, os.path.basename(base_tree))
+ utils.get_file(base_tree, tarball)
+ print 'Extracting kernel tarball:', tarball, '...'
+ autotest_utils.extract_tarball_to_dir(tarball,
+ self.build_dir)
- @logging.record
- @tee_output_logdir_mark
- def build(self, make_opts = '', logfile = '', extraversion='autotest'):
- """build the kernel
+ def extraversion(self, tag, append=1):
+ os.chdir(self.build_dir)
+ extraversion_sub = r's/^EXTRAVERSION =\s*\(.*\)/EXTRAVERSION = '
+ if append:
+ p = extraversion_sub + '\\1-%s/' % tag
+ else:
+ p = extraversion_sub + '-%s/' % tag
+ utils.system('mv Makefile Makefile.old')
+ utils.system('sed "%s" < Makefile.old > Makefile' % p)
- make_opts
- additional options to make, if any
- """
- os_dep.commands('gcc', 'make')
- if logfile == '':
- logfile = os.path.join(self.log_dir, 'kernel_build')
- os.chdir(self.build_dir)
- if extraversion:
- self.extraversion(extraversion)
- self.set_cross_cc()
- # setup_config_file(config_file, config_overrides)
- # Not needed on 2.6, but hard to tell -- handle failure
- utils.system('make dep', ignore_status=True)
- threads = 2 * autotest_utils.count_cpus()
- build_string = 'make -j %d %s %s' % (threads, make_opts,
- self.build_target)
- # eg make bzImage, or make zImage
- print build_string
- system(build_string)
- if kernel_config.modules_needed('.config'):
- utils.system('make -j %d modules' % (threads))
+ @logging.record
+ @tee_output_logdir_mark
+ def build(self, make_opts = '', logfile = '', extraversion='autotest'):
+ """build the kernel
- kernel_version = self.get_kernel_build_ver()
- kernel_version = re.sub('-autotest', '', kernel_version)
- self.logfile.write('BUILD VERSION: %s\n' % kernel_version)
+ make_opts
+ additional options to make, if any
+ """
+ os_dep.commands('gcc', 'make')
+ if logfile == '':
+ logfile = os.path.join(self.log_dir, 'kernel_build')
+ os.chdir(self.build_dir)
+ if extraversion:
+ self.extraversion(extraversion)
+ self.set_cross_cc()
+ # setup_config_file(config_file, config_overrides)
- force_copy(self.build_dir+'/System.map', self.results_dir)
+ # Not needed on 2.6, but hard to tell -- handle failure
+ utils.system('make dep', ignore_status=True)
+ threads = 2 * autotest_utils.count_cpus()
+ build_string = 'make -j %d %s %s' % (threads, make_opts,
+ self.build_target)
+ # eg make bzImage, or make zImage
+ print build_string
+ system(build_string)
+ if kernel_config.modules_needed('.config'):
+ utils.system('make -j %d modules' % (threads))
+ kernel_version = self.get_kernel_build_ver()
+ kernel_version = re.sub('-autotest', '', kernel_version)
+ self.logfile.write('BUILD VERSION: %s\n' % kernel_version)
- def build_timed(self, threads, timefile = '/dev/null', make_opts = '',
- output = '/dev/null'):
- """time the bulding of the kernel"""
- os.chdir(self.build_dir)
- self.set_cross_cc()
+ force_copy(self.build_dir+'/System.map', self.results_dir)
- self.clean(logged=False)
- build_string = "/usr/bin/time -o %s make %s -j %s vmlinux" \
- % (timefile, make_opts, threads)
- build_string += ' > %s 2>&1' % output
- print build_string
- utils.system(build_string)
- if (not os.path.isfile('vmlinux')):
- errmsg = "no vmlinux found, kernel build failed"
- raise error.TestError(errmsg)
+ def build_timed(self, threads, timefile = '/dev/null', make_opts = '',
+ output = '/dev/null'):
+ """time the bulding of the kernel"""
+ os.chdir(self.build_dir)
+ self.set_cross_cc()
+
+ self.clean(logged=False)
+ build_string = "/usr/bin/time -o %s make %s -j %s vmlinux" \
+ % (timefile, make_opts, threads)
+ build_string += ' > %s 2>&1' % output
+ print build_string
+ utils.system(build_string)
+
+ if (not os.path.isfile('vmlinux')):
+ errmsg = "no vmlinux found, kernel build failed"
+ raise error.TestError(errmsg)
+
+
+ @logging.record
+ @tee_output_logdir_mark
+ def clean(self):
+ """make clean in the kernel tree"""
+ os.chdir(self.build_dir)
+ print "make clean"
+ utils.system('make clean > /dev/null 2> /dev/null')
+
+
+ @logging.record
+ @tee_output_logdir_mark
+ def mkinitrd(self, version, image, system_map, initrd):
+ """Build kernel initrd image.
+ Try to use distro specific way to build initrd image.
+ Parameters:
+ version
+ new kernel version
+ image
+ new kernel image file
+ system_map
+ System.map file
+ initrd
+ initrd image file to build
+ """
+ vendor = autotest_utils.get_os_vendor()
+ if os.path.isfile(initrd):
+ print "Existing %s file, will remove it." % initrd
+ os.remove(initrd)
- @logging.record
- @tee_output_logdir_mark
- def clean(self):
- """make clean in the kernel tree"""
- os.chdir(self.build_dir)
- print "make clean"
- utils.system('make clean > /dev/null 2> /dev/null')
+ args = self.job.config_get('kernel.mkinitrd_extra_args')
+ # don't leak 'None' into mkinitrd command
+ if not args:
+ args = ''
- @logging.record
- @tee_output_logdir_mark
- def mkinitrd(self, version, image, system_map, initrd):
- """Build kernel initrd image.
- Try to use distro specific way to build initrd image.
- Parameters:
- version
- new kernel version
- image
- new kernel image file
- system_map
- System.map file
- initrd
- initrd image file to build
- """
- vendor = autotest_utils.get_os_vendor()
-
- if os.path.isfile(initrd):
- print "Existing %s file, will remove it." % initrd
- os.remove(initrd)
+ if vendor in ['Red Hat', 'Fedora Core']:
+ utils.system('mkinitrd %s %s %s' % (args, initrd, version))
+ elif vendor in ['SUSE']:
+ utils.system('mkinitrd %s -k %s -i %s -M %s' % (args, image, initrd, system_map))
+ elif vendor in ['Debian', 'Ubuntu']:
+ if os.path.isfile('/usr/sbin/mkinitrd'):
+ cmd = '/usr/sbin/mkinitrd'
+ elif os.path.isfile('/usr/sbin/mkinitramfs'):
+ cmd = '/usr/sbin/mkinitramfs'
+ else:
+ raise error.TestError('No Debian initrd builder')
+ utils.system('%s %s -o %s %s' % (cmd, args, initrd, version))
+ else:
+ raise error.TestError('Unsupported vendor %s' % vendor)
- args = self.job.config_get('kernel.mkinitrd_extra_args')
- # don't leak 'None' into mkinitrd command
- if not args:
- args = ''
+ def set_build_image(self, image):
+ self.build_image = image
- if vendor in ['Red Hat', 'Fedora Core']:
- utils.system('mkinitrd %s %s %s' % (args, initrd, version))
- elif vendor in ['SUSE']:
- utils.system('mkinitrd %s -k %s -i %s -M %s' % (args, image, initrd, system_map))
- elif vendor in ['Debian', 'Ubuntu']:
- if os.path.isfile('/usr/sbin/mkinitrd'):
- cmd = '/usr/sbin/mkinitrd'
- elif os.path.isfile('/usr/sbin/mkinitramfs'):
- cmd = '/usr/sbin/mkinitramfs'
- else:
- raise error.TestError('No Debian initrd builder')
- utils.system('%s %s -o %s %s' % (cmd, args, initrd, version))
- else:
- raise error.TestError('Unsupported vendor %s' % vendor)
+ @logging.record
+ @tee_output_logdir_mark
+ def install(self, tag='autotest', prefix = '/'):
+ """make install in the kernel tree"""
- def set_build_image(self, image):
- self.build_image = image
+ # Record that we have installed the kernel, and
+ # the tag under which we installed it.
+ self.installed_as = tag
+ os.chdir(self.build_dir)
- @logging.record
- @tee_output_logdir_mark
- def install(self, tag='autotest', prefix = '/'):
- """make install in the kernel tree"""
+ if not os.path.isdir(prefix):
+ os.mkdir(prefix)
+ self.boot_dir = os.path.join(prefix, 'boot')
+ if not os.path.isdir(self.boot_dir):
+ os.mkdir(self.boot_dir)
- # Record that we have installed the kernel, and
- # the tag under which we installed it.
- self.installed_as = tag
+ if not self.build_image:
+ images = glob.glob('arch/*/boot/' + self.build_target)
+ if len(images):
+ self.build_image = images[0]
+ else:
+ self.build_image = self.build_target
- os.chdir(self.build_dir)
+ # remember installed files
+ self.vmlinux = self.boot_dir + '/vmlinux-' + tag
+ if (self.build_image != 'vmlinux'):
+ self.image = self.boot_dir + '/vmlinuz-' + tag
+ else:
+ self.image = self.vmlinux
+ self.system_map = self.boot_dir + '/System.map-' + tag
+ self.config = self.boot_dir + '/config-' + tag
+ self.initrd = ''
- if not os.path.isdir(prefix):
- os.mkdir(prefix)
- self.boot_dir = os.path.join(prefix, 'boot')
- if not os.path.isdir(self.boot_dir):
- os.mkdir(self.boot_dir)
+ # copy to boot dir
+ autotest_utils.force_copy('vmlinux', self.vmlinux)
+ if (self.build_image != 'vmlinux'):
+ force_copy(self.build_image, self.image)
+ autotest_utils.force_copy('System.map', self.system_map)
+ autotest_utils.force_copy('.config', self.config)
- if not self.build_image:
- images = glob.glob('arch/*/boot/' + self.build_target)
- if len(images):
- self.build_image = images[0]
- else:
- self.build_image = self.build_target
+ if not kernel_config.modules_needed('.config'):
+ return
- # remember installed files
- self.vmlinux = self.boot_dir + '/vmlinux-' + tag
- if (self.build_image != 'vmlinux'):
- self.image = self.boot_dir + '/vmlinuz-' + tag
- else:
- self.image = self.vmlinux
- self.system_map = self.boot_dir + '/System.map-' + tag
- self.config = self.boot_dir + '/config-' + tag
- self.initrd = ''
+ utils.system('make modules_install INSTALL_MOD_PATH=%s' % prefix)
+ if prefix == '/':
+ self.initrd = self.boot_dir + '/initrd-' + tag
+ self.mkinitrd(self.get_kernel_build_ver(), self.image,
+ self.system_map, self.initrd)
- # copy to boot dir
- autotest_utils.force_copy('vmlinux', self.vmlinux)
- if (self.build_image != 'vmlinux'):
- force_copy(self.build_image, self.image)
- autotest_utils.force_copy('System.map', self.system_map)
- autotest_utils.force_copy('.config', self.config)
- if not kernel_config.modules_needed('.config'):
- return
+ def add_to_bootloader(self, tag='autotest', args=''):
+ """ add this kernel to bootloader, taking an
+ optional parameter of space separated parameters
+ e.g.: kernel.add_to_bootloader('mykernel', 'ro acpi=off')
+ """
- utils.system('make modules_install INSTALL_MOD_PATH=%s' % prefix)
- if prefix == '/':
- self.initrd = self.boot_dir + '/initrd-' + tag
- self.mkinitrd(self.get_kernel_build_ver(), self.image,
- self.system_map, self.initrd)
+ # remove existing entry if present
+ self.job.bootloader.remove_kernel(tag)
+ # pull the base argument set from the job config,
+ baseargs = self.job.config_get('boot.default_args')
+ if baseargs:
+ args = baseargs + " " + args
- def add_to_bootloader(self, tag='autotest', args=''):
- """ add this kernel to bootloader, taking an
- optional parameter of space separated parameters
- e.g.: kernel.add_to_bootloader('mykernel', 'ro acpi=off')
- """
+ # otherwise populate from /proc/cmdline
+ # if not baseargs:
+ # baseargs = open('/proc/cmdline', 'r').readline().strip()
+ # NOTE: This is unnecessary, because boottool does it.
- # remove existing entry if present
- self.job.bootloader.remove_kernel(tag)
+ root = None
+ roots = [x for x in args.split() if x.startswith('root=')]
+ if roots:
+ root = re.sub('^root=', '', roots[0])
+ arglist = [x for x in args.split() if not x.startswith('root=')]
+ args = ' '.join(arglist)
- # pull the base argument set from the job config,
- baseargs = self.job.config_get('boot.default_args')
- if baseargs:
- args = baseargs + " " + args
-
- # otherwise populate from /proc/cmdline
- # if not baseargs:
- # baseargs = open('/proc/cmdline', 'r').readline().strip()
- # NOTE: This is unnecessary, because boottool does it.
+ # add the kernel entry
+ # add_kernel(image, title='autotest', initrd='')
+ self.job.bootloader.add_kernel(self.image, tag, self.initrd, \
+ args = args, root = root)
- root = None
- roots = [x for x in args.split() if x.startswith('root=')]
- if roots:
- root = re.sub('^root=', '', roots[0])
- arglist = [x for x in args.split() if not x.startswith('root=')]
- args = ' '.join(arglist)
- # add the kernel entry
- # add_kernel(image, title='autotest', initrd='')
- self.job.bootloader.add_kernel(self.image, tag, self.initrd, \
- args = args, root = root)
+ def get_kernel_build_arch(self, arch=None):
+ """
+ Work out the current kernel architecture (as a kernel arch)
+ """
+ if not arch:
+ arch = autotest_utils.get_current_kernel_arch()
+ if re.match('i.86', arch):
+ return 'i386'
+ elif re.match('sun4u', arch):
+ return 'sparc64'
+ elif re.match('arm.*', arch):
+ return 'arm'
+ elif re.match('sa110', arch):
+ return 'arm'
+ elif re.match('s390x', arch):
+ return 's390'
+ elif re.match('parisc64', arch):
+ return 'parisc'
+ elif re.match('ppc.*', arch):
+ return 'powerpc'
+ elif re.match('mips.*', arch):
+ return 'mips'
+ else:
+ return arch
- def get_kernel_build_arch(self, arch=None):
- """
- Work out the current kernel architecture (as a kernel arch)
- """
- if not arch:
- arch = autotest_utils.get_current_kernel_arch()
- if re.match('i.86', arch):
- return 'i386'
- elif re.match('sun4u', arch):
- return 'sparc64'
- elif re.match('arm.*', arch):
- return 'arm'
- elif re.match('sa110', arch):
- return 'arm'
- elif re.match('s390x', arch):
- return 's390'
- elif re.match('parisc64', arch):
- return 'parisc'
- elif re.match('ppc.*', arch):
- return 'powerpc'
- elif re.match('mips.*', arch):
- return 'mips'
- else:
- return arch
+ def get_kernel_build_release(self):
+ releasem = re.compile(r'.*UTS_RELEASE\s+"([^"]+)".*');
+ versionm = re.compile(r'.*UTS_VERSION\s+"([^"]+)".*');
+ release = None
+ version = None
- def get_kernel_build_release(self):
- releasem = re.compile(r'.*UTS_RELEASE\s+"([^"]+)".*');
- versionm = re.compile(r'.*UTS_VERSION\s+"([^"]+)".*');
+ for file in [ self.build_dir + "/include/linux/version.h",
+ self.build_dir + "/include/linux/utsrelease.h",
+ self.build_dir + "/include/linux/compile.h" ]:
+ if os.path.exists(file):
+ fd = open(file, 'r')
+ for line in fd.readlines():
+ m = releasem.match(line)
+ if m:
+ release = m.groups()[0]
+ m = versionm.match(line)
+ if m:
+ version = m.groups()[0]
+ fd.close()
- release = None
- version = None
+ return (release, version)
- for file in [ self.build_dir + "/include/linux/version.h",
- self.build_dir + "/include/linux/utsrelease.h",
- self.build_dir + "/include/linux/compile.h" ]:
- if os.path.exists(file):
- fd = open(file, 'r')
- for line in fd.readlines():
- m = releasem.match(line)
- if m:
- release = m.groups()[0]
- m = versionm.match(line)
- if m:
- version = m.groups()[0]
- fd.close()
- return (release, version)
+ def get_kernel_build_ident(self):
+ (release, version) = self.get_kernel_build_release()
-
- def get_kernel_build_ident(self):
- (release, version) = self.get_kernel_build_release()
+ if not release or not version:
+ raise error.JobError('kernel has no identity')
- if not release or not version:
- raise error.JobError('kernel has no identity')
+ return release + '::' + version
- return release + '::' + version
+ def boot(self, args='', ident=1):
+ """ install and boot this kernel, do not care how
+ just make it happen.
+ """
- def boot(self, args='', ident=1):
- """ install and boot this kernel, do not care how
- just make it happen.
- """
+ # If we can check the kernel identity do so.
+ if ident:
+ when = int(time.time())
+ ident = self.get_kernel_build_ident()
+ args += " IDENT=%d" % (when)
- # If we can check the kernel identity do so.
- if ident:
- when = int(time.time())
- ident = self.get_kernel_build_ident()
- args += " IDENT=%d" % (when)
+ self.job.next_step_prepend(["job.kernel_check_ident",
+ when, ident, self.subdir,
+ self.applied_patches])
- self.job.next_step_prepend(["job.kernel_check_ident",
- when, ident, self.subdir,
- self.applied_patches])
+ # Check if the kernel has been installed, if not install
+ # as the default tag and boot that.
+ if not self.installed_as:
+ self.install()
- # Check if the kernel has been installed, if not install
- # as the default tag and boot that.
- if not self.installed_as:
- self.install()
+ # Boot the selected tag.
+ self.add_to_bootloader(args=args, tag=self.installed_as)
- # Boot the selected tag.
- self.add_to_bootloader(args=args, tag=self.installed_as)
+ # Boot it.
+ self.job.reboot(tag=self.installed_as)
- # Boot it.
- self.job.reboot(tag=self.installed_as)
+ def get_kernel_build_ver(self):
+ """Check Makefile and .config to return kernel version"""
+ version = patchlevel = sublevel = extraversion = localversion = ''
- def get_kernel_build_ver(self):
- """Check Makefile and .config to return kernel version"""
- version = patchlevel = sublevel = extraversion = localversion = ''
+ for line in open(self.build_dir + '/Makefile', 'r').readlines():
+ if line.startswith('VERSION'):
+ version = line[line.index('=') + 1:].strip()
+ if line.startswith('PATCHLEVEL'):
+ patchlevel = line[line.index('=') + 1:].strip()
+ if line.startswith('SUBLEVEL'):
+ sublevel = line[line.index('=') + 1:].strip()
+ if line.startswith('EXTRAVERSION'):
+ extraversion = line[line.index('=') + 1:].strip()
- for line in open(self.build_dir + '/Makefile', 'r').readlines():
- if line.startswith('VERSION'):
- version = line[line.index('=') + 1:].strip()
- if line.startswith('PATCHLEVEL'):
- patchlevel = line[line.index('=') + 1:].strip()
- if line.startswith('SUBLEVEL'):
- sublevel = line[line.index('=') + 1:].strip()
- if line.startswith('EXTRAVERSION'):
- extraversion = line[line.index('=') + 1:].strip()
+ for line in open(self.build_dir + '/.config', 'r').readlines():
+ if line.startswith('CONFIG_LOCALVERSION='):
+ localversion = line.rstrip().split('"')[1]
- for line in open(self.build_dir + '/.config', 'r').readlines():
- if line.startswith('CONFIG_LOCALVERSION='):
- localversion = line.rstrip().split('"')[1]
+ return "%s.%s.%s%s%s" %(version, patchlevel, sublevel, extraversion, localversion)
- return "%s.%s.%s%s%s" %(version, patchlevel, sublevel, extraversion, localversion)
+ def set_build_target(self, build_target):
+ if build_target:
+ self.build_target = build_target
+ print 'BUILD TARGET: %s' % self.build_target
- def set_build_target(self, build_target):
- if build_target:
- self.build_target = build_target
- print 'BUILD TARGET: %s' % self.build_target
+ def set_cross_cc(self, target_arch=None, cross_compile=None,
+ build_target='bzImage'):
+ """Set up to cross-compile.
+ This is broken. We need to work out what the default
+ compile produces, and if not, THEN set the cross
+ compiler.
+ """
- def set_cross_cc(self, target_arch=None, cross_compile=None,
- build_target='bzImage'):
- """Set up to cross-compile.
- This is broken. We need to work out what the default
- compile produces, and if not, THEN set the cross
- compiler.
- """
+ if self.target_arch:
+ return
- if self.target_arch:
- return
+ # if someone has set build_target, don't clobber in set_cross_cc
+ # run set_build_target before calling set_cross_cc
+ if not self.build_target:
+ self.set_build_target(build_target)
- # if someone has set build_target, don't clobber in set_cross_cc
- # run set_build_target before calling set_cross_cc
- if not self.build_target:
- self.set_build_target(build_target)
+ # If no 'target_arch' given assume native compilation
+ if target_arch == None:
+ target_arch = autotest_utils.get_current_kernel_arch()
+ if target_arch == 'ppc64':
+ if self.build_target == 'bzImage':
+ self.build_target = 'vmlinux'
- # If no 'target_arch' given assume native compilation
- if target_arch == None:
- target_arch = autotest_utils.get_current_kernel_arch()
- if target_arch == 'ppc64':
- if self.build_target == 'bzImage':
- self.build_target = 'vmlinux'
+ if not cross_compile:
+ cross_compile = self.job.config_get('kernel.cross_cc')
- if not cross_compile:
- cross_compile = self.job.config_get('kernel.cross_cc')
+ if cross_compile:
+ os.environ['CROSS_COMPILE'] = cross_compile
+ else:
+ if os.environ.has_key('CROSS_COMPILE'):
+ del os.environ['CROSS_COMPILE']
- if cross_compile:
- os.environ['CROSS_COMPILE'] = cross_compile
- else:
- if os.environ.has_key('CROSS_COMPILE'):
- del os.environ['CROSS_COMPILE']
-
- return # HACK. Crap out for now.
+ return # HACK. Crap out for now.
- # At this point I know what arch I *want* to build for
- # but have no way of working out what arch the default
- # compiler DOES build for.
+ # At this point I know what arch I *want* to build for
+ # but have no way of working out what arch the default
+ # compiler DOES build for.
- # Oh, and BTW, install_package() doesn't exist yet.
+ # Oh, and BTW, install_package() doesn't exist yet.
- if target_arch == 'ppc64':
- install_package('ppc64-cross')
- cross_compile = os.path.join(self.autodir, 'sources/ppc64-cross/bin')
+ if target_arch == 'ppc64':
+ install_package('ppc64-cross')
+ cross_compile = os.path.join(self.autodir, 'sources/ppc64-cross/bin')
- elif target_arch == 'x86_64':
- install_package('x86_64-cross')
- cross_compile = os.path.join(self.autodir, 'sources/x86_64-cross/bin')
+ elif target_arch == 'x86_64':
+ install_package('x86_64-cross')
+ cross_compile = os.path.join(self.autodir, 'sources/x86_64-cross/bin')
- os.environ['ARCH'] = self.target_arch = target_arch
+ os.environ['ARCH'] = self.target_arch = target_arch
- self.cross_compile = cross_compile
- if self.cross_compile:
- os.environ['CROSS_COMPILE'] = self.cross_compile
+ self.cross_compile = cross_compile
+ if self.cross_compile:
+ os.environ['CROSS_COMPILE'] = self.cross_compile
- def pickle_dump(self, filename):
- """dump a pickle of ourself out to the specified filename
+ def pickle_dump(self, filename):
+ """dump a pickle of ourself out to the specified filename
- we can't pickle the backreference to job (it contains fd's),
- nor would we want to. Same for logfile (fd's).
- """
- temp = copy.copy(self)
- temp.job = None
- temp.logfile = None
- pickle.dump(temp, open(filename, 'w'))
+ we can't pickle the backreference to job (it contains fd's),
+ nor would we want to. Same for logfile (fd's).
+ """
+ temp = copy.copy(self)
+ temp.job = None
+ temp.logfile = None
+ pickle.dump(temp, open(filename, 'w'))
class rpm_kernel:
- """ Class for installing rpm kernel package
- """
+ """ Class for installing rpm kernel package
+ """
- def __init__(self, job, rpm_package, subdir):
- self.job = job
- self.rpm_package = rpm_package
- self.log_dir = os.path.join(subdir, 'debug')
- self.subdir = os.path.basename(subdir)
- if os.path.exists(self.log_dir):
- utils.system('rm -rf ' + self.log_dir)
- os.mkdir(self.log_dir)
- self.installed_as = None
+ def __init__(self, job, rpm_package, subdir):
+ self.job = job
+ self.rpm_package = rpm_package
+ self.log_dir = os.path.join(subdir, 'debug')
+ self.subdir = os.path.basename(subdir)
+ if os.path.exists(self.log_dir):
+ utils.system('rm -rf ' + self.log_dir)
+ os.mkdir(self.log_dir)
+ self.installed_as = None
- @logging.record
- @tee_output_logdir_mark
- def install(self, tag='autotest'):
- self.installed_as = tag
+ @logging.record
+ @tee_output_logdir_mark
+ def install(self, tag='autotest'):
+ self.installed_as = tag
- self.rpm_name = utils.system_output('rpm -qp ' + self.rpm_package)
+ self.rpm_name = utils.system_output('rpm -qp ' + self.rpm_package)
- # install
- utils.system('rpm -i --force ' + self.rpm_package)
+ # install
+ utils.system('rpm -i --force ' + self.rpm_package)
- # get file list
- files = utils.system_output('rpm -ql ' + self.rpm_name).splitlines()
+ # get file list
+ files = utils.system_output('rpm -ql ' + self.rpm_name).splitlines()
- # search for vmlinuz
- for file in files:
- if file.startswith('/boot/vmlinuz'):
- self.image = file
- break
- else:
- errmsg = "%s doesn't contain /boot/vmlinuz"
- errmsg %= self.rpm_package
- raise error.TestError(errmsg)
+ # search for vmlinuz
+ for file in files:
+ if file.startswith('/boot/vmlinuz'):
+ self.image = file
+ break
+ else:
+ errmsg = "%s doesn't contain /boot/vmlinuz"
+ errmsg %= self.rpm_package
+ raise error.TestError(errmsg)
- # search for initrd
- self.initrd = ''
- for file in files:
- if file.startswith('/boot/initrd'):
- self.initrd = file
- break
+ # search for initrd
+ self.initrd = ''
+ for file in files:
+ if file.startswith('/boot/initrd'):
+ self.initrd = file
+ break
- # get version and release number
- self.version, self.release = utils.system_output(
- 'rpm --queryformat="%{VERSION}\\n%{RELEASE}\\n" -q ' + self.rpm_name).splitlines()[0:2]
+ # get version and release number
+ self.version, self.release = utils.system_output(
+ 'rpm --queryformat="%{VERSION}\\n%{RELEASE}\\n" -q ' + self.rpm_name).splitlines()[0:2]
- def add_to_bootloader(self, tag='autotest', args=''):
- """ Add this kernel to bootloader
- """
+ def add_to_bootloader(self, tag='autotest', args=''):
+ """ Add this kernel to bootloader
+ """
- # remove existing entry if present
- self.job.bootloader.remove_kernel(tag)
+ # remove existing entry if present
+ self.job.bootloader.remove_kernel(tag)
- # pull the base argument set from the job config
- baseargs = self.job.config_get('boot.default_args')
- if baseargs:
- args = baseargs + ' ' + args
+ # pull the base argument set from the job config
+ baseargs = self.job.config_get('boot.default_args')
+ if baseargs:
+ args = baseargs + ' ' + args
- # otherwise populate from /proc/cmdline
- # if not baseargs:
- # baseargs = open('/proc/cmdline', 'r').readline().strip()
- # NOTE: This is unnecessary, because boottool does it.
+ # otherwise populate from /proc/cmdline
+ # if not baseargs:
+ # baseargs = open('/proc/cmdline', 'r').readline().strip()
+ # NOTE: This is unnecessary, because boottool does it.
- root = None
- roots = [x for x in args.split() if x.startswith('root=')]
- if roots:
- root = re.sub('^root=', '', roots[0])
- arglist = [x for x in args.split() if not x.startswith('root=')]
- args = ' '.join(arglist)
+ root = None
+ roots = [x for x in args.split() if x.startswith('root=')]
+ if roots:
+ root = re.sub('^root=', '', roots[0])
+ arglist = [x for x in args.split() if not x.startswith('root=')]
+ args = ' '.join(arglist)
- # add the kernel entry
- self.job.bootloader.add_kernel(self.image, tag, self.initrd, args = args, root = root)
+ # add the kernel entry
+ self.job.bootloader.add_kernel(self.image, tag, self.initrd, args = args, root = root)
- def boot(self, args='', ident=1):
- """ install and boot this kernel
- """
+ def boot(self, args='', ident=1):
+ """ install and boot this kernel
+ """
- # Check if the kernel has been installed, if not install
- # as the default tag and boot that.
- if not self.installed_as:
- self.install()
+ # Check if the kernel has been installed, if not install
+ # as the default tag and boot that.
+ if not self.installed_as:
+ self.install()
- # If we can check the kernel identity do so.
- if ident:
- when = int(time.time())
- ident = '-'.join([self.version,
- self.rpm_name.split('-')[1],
- self.release])
- args += " IDENT=%d" % (when)
+ # If we can check the kernel identity do so.
+ if ident:
+ when = int(time.time())
+ ident = '-'.join([self.version,
+ self.rpm_name.split('-')[1],
+ self.release])
+ args += " IDENT=%d" % (when)
- self.job.next_step_prepend(["job.kernel_check_ident",
- when, ident, self.subdir, 'rpm'])
+ self.job.next_step_prepend(["job.kernel_check_ident",
+ when, ident, self.subdir, 'rpm'])
- # Boot the selected tag.
- self.add_to_bootloader(args=args, tag=self.installed_as)
+ # Boot the selected tag.
+ self.add_to_bootloader(args=args, tag=self.installed_as)
- # Boot it.
- self.job.reboot(tag=self.installed_as)
+ # Boot it.
+ self.job.reboot(tag=self.installed_as)
# pull in some optional site-specific path pre-processing
try:
- import site_kernel
- preprocess_path = site_kernel.preprocess_path
- del site_kernel
+ import site_kernel
+ preprocess_path = site_kernel.preprocess_path
+ del site_kernel
except ImportError:
- # just make the preprocessor a nop
- def preprocess_path(path):
- return path
+ # just make the preprocessor a nop
+ def preprocess_path(path):
+ return path
def auto_kernel(job, path, subdir, tmp_dir, build_dir, leave=False):
- """\
- Create a kernel object, dynamically selecting the appropriate class to use
- based on the path provided.
- """
- path = preprocess_path(path)
- if path.endswith('.rpm'):
- return rpm_kernel(job, path, subdir)
- else:
- return kernel(job, path, subdir, tmp_dir, build_dir, leave)
+ """\
+ Create a kernel object, dynamically selecting the appropriate class to use
+ based on the path provided.
+ """
+ path = preprocess_path(path)
+ if path.endswith('.rpm'):
+ return rpm_kernel(job, path, subdir)
+ else:
+ return kernel(job, path, subdir, tmp_dir, build_dir, leave)
diff --git a/client/bin/kernel_config.py b/client/bin/kernel_config.py
index ed6321a..9b471b5 100755
--- a/client/bin/kernel_config.py
+++ b/client/bin/kernel_config.py
@@ -5,122 +5,122 @@
from autotest_lib.client.common_lib import error, utils
def apply_overrides(orig_file, changes_file, output_file):
- override = dict()
+ override = dict()
- # First suck all the changes into a dictionary.
- input = file(changes_file, 'r')
- for line in input.readlines():
- if line.startswith('CONFIG_'):
- key = line.split('=')[0]
- override[key] = line;
- elif line.startswith('# CONFIG_'):
- key = line.split(' ')[1]
- override[key] = line;
- input.close()
+ # First suck all the changes into a dictionary.
+ input = file(changes_file, 'r')
+ for line in input.readlines():
+ if line.startswith('CONFIG_'):
+ key = line.split('=')[0]
+ override[key] = line;
+ elif line.startswith('# CONFIG_'):
+ key = line.split(' ')[1]
+ override[key] = line;
+ input.close()
- # Now go through the input file, overriding lines where need be
- input = file(orig_file, 'r')
- output = file(output_file, 'w')
- for line in input.readlines():
- if line.startswith('CONFIG_'):
- key = line.split('=')[0]
- elif line.startswith('# CONFIG_'):
- key = line.split(' ')[1]
- else:
- key = None
- if key and key in override:
- output.write(override[key])
- else:
- output.write(line)
- input.close()
- output.close()
+ # Now go through the input file, overriding lines where need be
+ input = file(orig_file, 'r')
+ output = file(output_file, 'w')
+ for line in input.readlines():
+ if line.startswith('CONFIG_'):
+ key = line.split('=')[0]
+ elif line.startswith('# CONFIG_'):
+ key = line.split(' ')[1]
+ else:
+ key = None
+ if key and key in override:
+ output.write(override[key])
+ else:
+ output.write(line)
+ input.close()
+ output.close()
def diff_configs(old, new):
- utils.system('diff -u %s %s > %s' % (old, new, new + '.diff'),
- ignore_status=True)
+ utils.system('diff -u %s %s > %s' % (old, new, new + '.diff'),
+ ignore_status=True)
def modules_needed(config):
- return (autotest_utils.grep('CONFIG_MODULES=y', config)
- and autotest_utils.grep('=m', config))
+ return (autotest_utils.grep('CONFIG_MODULES=y', config)
+ and autotest_utils.grep('=m', config))
def config_by_name(name, set):
- version = kernel_versions.version_choose_config(name, set[1:])
- if version:
- return set[0] + version
- return None
+ version = kernel_versions.version_choose_config(name, set[1:])
+ if version:
+ return set[0] + version
+ return None
class kernel_config:
- # Build directory must be ready before init'ing config.
- #
- # Stages:
- # 1. Get original config file
- # 2. Apply overrides
- # 3. Do 'make oldconfig' to update it to current source code
- # (gets done implicitly during the process)
- #
- # You may specifiy the a defconfig within the tree to build,
- # or a custom config file you want, or None, to get machine's
- # default config file from the repo.
+ # Build directory must be ready before init'ing config.
+ #
+ # Stages:
+ # 1. Get original config file
+ # 2. Apply overrides
+ # 3. Do 'make oldconfig' to update it to current source code
+ # (gets done implicitly during the process)
+ #
+ # You may specifiy the a defconfig within the tree to build,
+ # or a custom config file you want, or None, to get machine's
+ # default config file from the repo.
- build_dir = '' # the directory we're building in
- config_dir = '' # local repository for config_file data
+ build_dir = '' # the directory we're building in
+ config_dir = '' # local repository for config_file data
- build_config = '' # the config file in the build directory
- orig_config = '' # the original config file
- over_config = '' # config file + overrides
+ build_config = '' # the config file in the build directory
+ orig_config = '' # the original config file
+ over_config = '' # config file + overrides
- def __init__(self, job, build_dir, config_dir, orig_file,
- overrides, defconfig = False, name = None):
- self.build_dir = build_dir
- self.config_dir = config_dir
+ def __init__(self, job, build_dir, config_dir, orig_file,
+ overrides, defconfig = False, name = None):
+ self.build_dir = build_dir
+ self.config_dir = config_dir
- # 1. Get original config file
- self.build_config = build_dir + '/.config'
- if (orig_file == '' and not defconfig): # use user default
- set = job.config_get("kernel.default_config_set")
- defconf = None
- if set and name:
- defconf = config_by_name(name, set)
- if not defconf:
- defconf = job.config_get("kernel.default_config")
- if defconf:
- orig_file = defconf
- if (orig_file == '' or defconfig): # use defconfig
- print "kernel_config: using defconfig to configure kernel"
- os.chdir(build_dir)
- utils.system('make defconfig')
- else:
- print "kernel_config: using " + orig_file + \
- " to configure kernel"
- self.orig_config = config_dir + '/config.orig'
- utils.get_file(orig_file, self.orig_config)
- self.update_config(self.orig_config, self.orig_config+'.new')
- diff_configs(self.orig_config, self.orig_config+'.new')
+ # 1. Get original config file
+ self.build_config = build_dir + '/.config'
+ if (orig_file == '' and not defconfig): # use user default
+ set = job.config_get("kernel.default_config_set")
+ defconf = None
+ if set and name:
+ defconf = config_by_name(name, set)
+ if not defconf:
+ defconf = job.config_get("kernel.default_config")
+ if defconf:
+ orig_file = defconf
+ if (orig_file == '' or defconfig): # use defconfig
+ print "kernel_config: using defconfig to configure kernel"
+ os.chdir(build_dir)
+ utils.system('make defconfig')
+ else:
+ print "kernel_config: using " + orig_file + \
+ " to configure kernel"
+ self.orig_config = config_dir + '/config.orig'
+ utils.get_file(orig_file, self.orig_config)
+ self.update_config(self.orig_config, self.orig_config+'.new')
+ diff_configs(self.orig_config, self.orig_config+'.new')
- # 2. Apply overrides
- if overrides:
- print "kernel_config: using " + overrides + \
- " to re-configure kernel"
- self.over_config = config_dir + '/config.over'
- overrides_local = self.over_config + '.changes'
- get_file(overrides, overrides_local)
- apply_overrides(self.build_config, overrides_local, self.over_config)
- self.update_config(self.over_config, self.over_config+'.new')
- diff_configs(self.over_config, self.over_config+'.new')
- else:
- self.over_config = self.orig_config
+ # 2. Apply overrides
+ if overrides:
+ print "kernel_config: using " + overrides + \
+ " to re-configure kernel"
+ self.over_config = config_dir + '/config.over'
+ overrides_local = self.over_config + '.changes'
+ get_file(overrides, overrides_local)
+ apply_overrides(self.build_config, overrides_local, self.over_config)
+ self.update_config(self.over_config, self.over_config+'.new')
+ diff_configs(self.over_config, self.over_config+'.new')
+ else:
+ self.over_config = self.orig_config
- def update_config(self, old_config, new_config = 'None'):
- os.chdir(self.build_dir)
- shutil.copyfile(old_config, self.build_config)
- utils.system('yes "" | make oldconfig > /dev/null')
- if new_config:
- shutil.copyfile(self.build_config, new_config)
+ def update_config(self, old_config, new_config = 'None'):
+ os.chdir(self.build_dir)
+ shutil.copyfile(old_config, self.build_config)
+ utils.system('yes "" | make oldconfig > /dev/null')
+ if new_config:
+ shutil.copyfile(self.build_config, new_config)
diff --git a/client/bin/kernel_versions.py b/client/bin/kernel_versions.py
index 3af08fe..9b04beb 100644
--- a/client/bin/kernel_versions.py
+++ b/client/bin/kernel_versions.py
@@ -5,111 +5,109 @@
import sys,re
-#
+#
# Sort key for ordering versions chronologically. The key ordering
# problem is between that introduced by -rcN. These come _before_
# their accompanying version.
-#
+#
# 2.6.0 -> 2.6.1-rc1 -> 2.6.1
-#
+#
# In order to sort them we convert all non-rc releases to a pseudo
# -rc99 release. We also convert all numbers to two digits. The
# result is then sortable textually.
-#
+#
# 02.06.00-rc99 -> 02.06.01-rc01 -> 02.06.01-rc99
-#
+#
encode_sep = re.compile(r'(\D+)')
def version_encode(version):
- bits = encode_sep.split(version)
- n = 9
- if len(bits[0]) == 0:
- n += 2
- if len(bits) == n or (len(bits) > n and bits[n] != '_rc'):
- # Insert missing _rc99 after 2 . 6 . 18 -smp- 220 . 0
- bits.insert(n, '_rc')
- bits.insert(n+1, '99')
- n = 5
- if len(bits[0]) == 0:
- n += 2
- if len(bits) <= n or bits[n] != '-rc':
- bits.insert(n, '-rc')
- bits.insert(n+1, '99')
- for n in range(0, len(bits), 2):
- if len(bits[n]) == 1:
- bits[n] = '0' + bits[n]
+ bits = encode_sep.split(version)
+ n = 9
+ if len(bits[0]) == 0:
+ n += 2
+ if len(bits) == n or (len(bits) > n and bits[n] != '_rc'):
+ # Insert missing _rc99 after 2 . 6 . 18 -smp- 220 . 0
+ bits.insert(n, '_rc')
+ bits.insert(n+1, '99')
+ n = 5
+ if len(bits[0]) == 0:
+ n += 2
+ if len(bits) <= n or bits[n] != '-rc':
+ bits.insert(n, '-rc')
+ bits.insert(n+1, '99')
+ for n in range(0, len(bits), 2):
+ if len(bits[n]) == 1:
+ bits[n] = '0' + bits[n]
- return ''.join(bits)
+ return ''.join(bits)
def version_limit(version, n):
- bits = encode_sep.split(version)
- return ''.join(bits[0:n])
+ bits = encode_sep.split(version)
+ return ''.join(bits[0:n])
def version_len(version):
- return len(encode_sep.split(version))
+ return len(encode_sep.split(version))
#
# Given a list of versions find the nearest version which is deemed
# less than or equal to the target. Versions are in linux order
# as follows:
-#
+#
# 2.6.0 -> 2.6.1 -> 2.6.2-rc1 -> 2.6.2-rc2 -> 2.6.2 -> 2.6.3-rc1
# | |\
# | | 2.6.2-rc1-mm1 -> 2.6.2-rc1-mm2
# | \
# | 2.6.2-rc1-ac1 -> 2.6.2-rc1-ac2
-# \
+# \
# 2.6.1-mm1 -> 2.6.1-mm2
-#
+#
# Note that a 2.6.1-mm1 is not a predecessor of 2.6.2-rc1-mm1.
#
def version_choose_config(version, candidates):
- # Check if we have an exact match ... if so magic
- if version in candidates:
- return version
+ # Check if we have an exact match ... if so magic
+ if version in candidates:
+ return version
- # Sort the search key into the list ordered by 'age'
- deco = [ (version_encode(v), i, v) for i, v in
- enumerate(candidates + [ version ]) ]
- deco.sort()
- versions = [ v for _, _, v in deco ]
+ # Sort the search key into the list ordered by 'age'
+ deco = [ (version_encode(v), i, v) for i, v in
+ enumerate(candidates + [ version ]) ]
+ deco.sort()
+ versions = [ v for _, _, v in deco ]
- # Everything sorted below us is of interst.
- for n in range(len(versions) - 1, -1, -1):
- if versions[n] == version:
- break
- n -= 1
+ # Everything sorted below us is of interst.
+ for n in range(len(versions) - 1, -1, -1):
+ if versions[n] == version:
+ break
+ n -= 1
- # Try ever shorter 'prefixes' 2.6.20-rc3-mm, 2.6.20-rc, 2.6. etc
- # to match against the ordered list newest to oldest.
- length = version_len(version) - 1
- version = version_limit(version, length)
- while length > 1:
- for o in range(n, -1, -1):
- if version_len(versions[o]) == (length + 1) and \
- version_limit(versions[o], length) == version:
- return versions[o]
- length -= 2
- version = version_limit(version, length)
+ # Try ever shorter 'prefixes' 2.6.20-rc3-mm, 2.6.20-rc, 2.6. etc
+ # to match against the ordered list newest to oldest.
+ length = version_len(version) - 1
+ version = version_limit(version, length)
+ while length > 1:
+ for o in range(n, -1, -1):
+ if version_len(versions[o]) == (length + 1) and \
+ version_limit(versions[o], length) == version:
+ return versions[o]
+ length -= 2
+ version = version_limit(version, length)
- return None
+ return None
def is_released_kernel(version):
- # True if version name suggests a released kernel,
- # not some release candidate or experimental kernel name
- # e.g. 2.6.18-smp-200.0 includes no other text, underscores, etc
- version = version.strip('01234567890.-')
- return version in ['', 'smp', 'smpx', 'pae']
+ # True if version name suggests a released kernel,
+ # not some release candidate or experimental kernel name
+ # e.g. 2.6.18-smp-200.0 includes no other text, underscores, etc
+ version = version.strip('01234567890.-')
+ return version in ['', 'smp', 'smpx', 'pae']
def is_release_candidate(version):
- # True if version names a released kernel or release candidate,
- # not some experimental name containing arbitrary text
- # e.g. 2.6.18-smp-220.0_rc3 but not 2.6.18_patched
- version = re.sub(r'[_-]rc\d+', '', version)
- return is_released_kernel(version)
-
-
+ # True if version names a released kernel or release candidate,
+ # not some experimental name containing arbitrary text
+ # e.g. 2.6.18-smp-220.0_rc3 but not 2.6.18_patched
+ version = re.sub(r'[_-]rc\d+', '', version)
+ return is_released_kernel(version)
diff --git a/client/bin/kernel_versions_unittest.py b/client/bin/kernel_versions_unittest.py
index fc21a65..b63ecc8 100755
--- a/client/bin/kernel_versions_unittest.py
+++ b/client/bin/kernel_versions_unittest.py
@@ -5,83 +5,83 @@
class kernel_versions_test(unittest.TestCase):
- def increases(self, kernels):
- for i in xrange(len(kernels)-1):
- k1 = kernels[i]
- k2 = kernels[i+1]
- ek1 = version_encode(k1)
- ek2 = version_encode(k2)
- self.assert_(ek1 < ek2,
- '%s (-> %s) should sort < %s (-> %s)'
- % (k1, ek1, k2, ek2) )
+ def increases(self, kernels):
+ for i in xrange(len(kernels)-1):
+ k1 = kernels[i]
+ k2 = kernels[i+1]
+ ek1 = version_encode(k1)
+ ek2 = version_encode(k2)
+ self.assert_(ek1 < ek2,
+ '%s (-> %s) should sort < %s (-> %s)'
+ % (k1, ek1, k2, ek2) )
- def test_version_encode(self):
- series1 = [
- '2.6',
- '2.6.0',
- '2.6.1-rc1',
- '2.6.1-rc1_fix',
- '2.6.1-rc1_patch',
- '2.6.1-rc9',
- '2.6.1-rc9-mm1',
- '2.6.1-rc9-mm2',
- '2.6.1-rc10',
- '2.6.1-rc98',
- '2.6.1',
- '2.6.1_patch',
- '2.6.9',
- '2.6.10',
- '2.6.99',
- '2.7',
- '2.9.99',
- '2.10.0',
- '99.99.99',
- 'UNKNOWN',
- ]
- self.increases(series1)
- self.increases(['pathX'+k for k in series1])
- series2 = [
- '2.6.18-smp-220',
- '2.6.18-smp-220.0',
- '2.6.18-smp-220.1_rc1',
- '2.6.18-smp-220.1_rc1_fix',
- '2.6.18-smp-220.1_rc1_patch',
- '2.6.18-smp-220.1_rc9',
- '2.6.18-smp-220.1_rc9_mm1',
- '2.6.18-smp-220.1_rc9_mm2',
- '2.6.18-smp-220.1_rc10',
- '2.6.18-smp-220.1_rc98',
- '2.6.18-smp-220.1',
- '2.6.18-smp-220.1_patch',
- '2.6.18-smp-220.9',
- '2.6.18-smp-220.10',
- '2.6.18-smp-220.99',
- '2.6.18-smp-221',
- 'UNKNOWN',
- ]
- self.increases(series2)
- self.increases(['pathX'+k for k in series2])
+ def test_version_encode(self):
+ series1 = [
+ '2.6',
+ '2.6.0',
+ '2.6.1-rc1',
+ '2.6.1-rc1_fix',
+ '2.6.1-rc1_patch',
+ '2.6.1-rc9',
+ '2.6.1-rc9-mm1',
+ '2.6.1-rc9-mm2',
+ '2.6.1-rc10',
+ '2.6.1-rc98',
+ '2.6.1',
+ '2.6.1_patch',
+ '2.6.9',
+ '2.6.10',
+ '2.6.99',
+ '2.7',
+ '2.9.99',
+ '2.10.0',
+ '99.99.99',
+ 'UNKNOWN',
+ ]
+ self.increases(series1)
+ self.increases(['pathX'+k for k in series1])
+ series2 = [
+ '2.6.18-smp-220',
+ '2.6.18-smp-220.0',
+ '2.6.18-smp-220.1_rc1',
+ '2.6.18-smp-220.1_rc1_fix',
+ '2.6.18-smp-220.1_rc1_patch',
+ '2.6.18-smp-220.1_rc9',
+ '2.6.18-smp-220.1_rc9_mm1',
+ '2.6.18-smp-220.1_rc9_mm2',
+ '2.6.18-smp-220.1_rc10',
+ '2.6.18-smp-220.1_rc98',
+ '2.6.18-smp-220.1',
+ '2.6.18-smp-220.1_patch',
+ '2.6.18-smp-220.9',
+ '2.6.18-smp-220.10',
+ '2.6.18-smp-220.99',
+ '2.6.18-smp-221',
+ 'UNKNOWN',
+ ]
+ self.increases(series2)
+ self.increases(['pathX'+k for k in series2])
- releases = ['2.6.1' , '2.6.18-smp-220.0' ]
- candidates = ['2.6.1-rc1' , '2.6.18-smp-220.0_rc1']
- experiments = ['2.6.1-patch', '2.6.1-rc1_patch',
- '2.6.18-smp-220.0_patch', 'UNKNOWN']
+ releases = ['2.6.1' , '2.6.18-smp-220.0' ]
+ candidates = ['2.6.1-rc1' , '2.6.18-smp-220.0_rc1']
+ experiments = ['2.6.1-patch', '2.6.1-rc1_patch',
+ '2.6.18-smp-220.0_patch', 'UNKNOWN']
- def test_is_released_kernel(self):
- for v in self.releases:
- self.assert_( is_released_kernel(v))
- for v in self.candidates + self.experiments:
- self.assert_(not is_released_kernel(v))
+ def test_is_released_kernel(self):
+ for v in self.releases:
+ self.assert_( is_released_kernel(v))
+ for v in self.candidates + self.experiments:
+ self.assert_(not is_released_kernel(v))
- def test_is_release_candidate(self):
- for v in self.releases + self.candidates:
- self.assert_( is_release_candidate(v))
- for v in self.experiments:
- self.assert_(not is_release_candidate(v))
+ def test_is_release_candidate(self):
+ for v in self.releases + self.candidates:
+ self.assert_( is_release_candidate(v))
+ for v in self.experiments:
+ self.assert_(not is_release_candidate(v))
if __name__ == "__main__":
- unittest.main()
+ unittest.main()
diff --git a/client/bin/kernelexpand-test.py b/client/bin/kernelexpand-test.py
index 3c34010..24b4ce0 100755
--- a/client/bin/kernelexpand-test.py
+++ b/client/bin/kernelexpand-test.py
@@ -11,136 +11,136 @@
akpml = 'http://www.example.com/mirror/akpm/'
mirrorA = [
- [ akpm, akpml ],
- [ km, kml ],
+ [ akpm, akpml ],
+ [ km, kml ],
]
class kernelexpandTest(unittest.TestCase):
- def test_decompose_simple(self):
- correct = [
- [ km + 'v2.6/linux-2.6.23.tar.bz2' ]
- ]
- sample = decompose_kernel('2.6.23')
- self.assertEqual(sample, correct)
+ def test_decompose_simple(self):
+ correct = [
+ [ km + 'v2.6/linux-2.6.23.tar.bz2' ]
+ ]
+ sample = decompose_kernel('2.6.23')
+ self.assertEqual(sample, correct)
- def test_decompose_fail(self):
- success = False
- try:
- sample = decompose_kernel('1.0.0.0.0')
- success = True
- except NameError:
- pass
- except Exception, e:
- self.fail('expected NameError, got something else')
+ def test_decompose_fail(self):
+ success = False
+ try:
+ sample = decompose_kernel('1.0.0.0.0')
+ success = True
+ except NameError:
+ pass
+ except Exception, e:
+ self.fail('expected NameError, got something else')
- if success:
- self.fail('expected NameError, was successful')
+ if success:
+ self.fail('expected NameError, was successful')
- def test_decompose_rcN(self):
- correct = [
- [ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
- km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2']
- ]
- sample = decompose_kernel('2.6.23-rc1')
- self.assertEqual(sample, correct)
-
-
- def test_decompose_mmN(self):
- correct = [
- [ km + 'v2.6/linux-2.6.23.tar.bz2' ],
- [ akpm + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2' ]
- ]
- sample = decompose_kernel('2.6.23-mm1')
- self.assertEqual(sample, correct)
+ def test_decompose_rcN(self):
+ correct = [
+ [ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+ km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2']
+ ]
+ sample = decompose_kernel('2.6.23-rc1')
+ self.assertEqual(sample, correct)
- def test_decompose_gitN(self):
- correct = [
- [ km + 'v2.6/linux-2.6.23.tar.bz2' ],
- [ km + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
- km + 'v2.6/snapshots/patch-2.6.23-git1.bz2']
- ]
- sample = decompose_kernel('2.6.23-git1')
- self.assertEqual(sample, correct)
+ def test_decompose_mmN(self):
+ correct = [
+ [ km + 'v2.6/linux-2.6.23.tar.bz2' ],
+ [ akpm + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2' ]
+ ]
+ sample = decompose_kernel('2.6.23-mm1')
+ self.assertEqual(sample, correct)
- def test_decompose_rcN_mmN(self):
- correct = [
- [ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
- km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2' ],
- [ akpm + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2']
- ]
- sample = decompose_kernel('2.6.23-rc1-mm1')
- self.assertEqual(sample, correct)
+ def test_decompose_gitN(self):
+ correct = [
+ [ km + 'v2.6/linux-2.6.23.tar.bz2' ],
+ [ km + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
+ km + 'v2.6/snapshots/patch-2.6.23-git1.bz2']
+ ]
+ sample = decompose_kernel('2.6.23-git1')
+ self.assertEqual(sample, correct)
- def test_mirrorA_simple(self):
- correct = [
- [ kml + 'v2.6/linux-2.6.23.tar.bz2',
- km + 'v2.6/linux-2.6.23.tar.bz2' ]
- ]
- sample = decompose_kernel('2.6.23')
- sample = mirror_kernel_components(mirrorA, sample)
-
- self.assertEqual(sample, correct)
+ def test_decompose_rcN_mmN(self):
+ correct = [
+ [ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+ km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2' ],
+ [ akpm + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2']
+ ]
+ sample = decompose_kernel('2.6.23-rc1-mm1')
+ self.assertEqual(sample, correct)
- def test_mirrorA_rcN(self):
- correct = [
- [ kml + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
- kml + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2',
- km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
- km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2' ]
- ]
- sample = decompose_kernel('2.6.23-rc1')
- sample = mirror_kernel_components(mirrorA, sample)
- self.assertEqual(sample, correct)
+ def test_mirrorA_simple(self):
+ correct = [
+ [ kml + 'v2.6/linux-2.6.23.tar.bz2',
+ km + 'v2.6/linux-2.6.23.tar.bz2' ]
+ ]
+ sample = decompose_kernel('2.6.23')
+ sample = mirror_kernel_components(mirrorA, sample)
-
- def test_mirrorA_mmN(self):
- correct = [
- [ kml + 'v2.6/linux-2.6.23.tar.bz2',
- km + 'v2.6/linux-2.6.23.tar.bz2'],
- [ akpml + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2',
- kml + 'people/akpm/patches/2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2',
- akpm + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2' ]
- ]
-
- sample = decompose_kernel('2.6.23-mm1')
- sample = mirror_kernel_components(mirrorA, sample)
- self.assertEqual(sample, correct)
+ self.assertEqual(sample, correct)
- def test_mirrorA_gitN(self):
- correct = [
- [ kml + 'v2.6/linux-2.6.23.tar.bz2',
- km + 'v2.6/linux-2.6.23.tar.bz2'],
- [ kml + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
- kml + 'v2.6/snapshots/patch-2.6.23-git1.bz2',
- km + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
- km + 'v2.6/snapshots/patch-2.6.23-git1.bz2' ]
- ]
- sample = decompose_kernel('2.6.23-git1')
- sample = mirror_kernel_components(mirrorA, sample)
- self.assertEqual(sample, correct)
+ def test_mirrorA_rcN(self):
+ correct = [
+ [ kml + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+ kml + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2',
+ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+ km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2' ]
+ ]
+ sample = decompose_kernel('2.6.23-rc1')
+ sample = mirror_kernel_components(mirrorA, sample)
+ self.assertEqual(sample, correct)
- def test_mirrorA_rcN_mmN(self):
- correct = [
- [ kml + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
- kml + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2',
- km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
- km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2'],
- [ akpml + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2',
- kml + 'people/akpm/patches/2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2',
- akpm + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2' ]
- ]
- sample = decompose_kernel('2.6.23-rc1-mm1')
- sample = mirror_kernel_components(mirrorA, sample)
- self.assertEqual(sample, correct)
+ def test_mirrorA_mmN(self):
+ correct = [
+ [ kml + 'v2.6/linux-2.6.23.tar.bz2',
+ km + 'v2.6/linux-2.6.23.tar.bz2'],
+ [ akpml + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2',
+ kml + 'people/akpm/patches/2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2',
+ akpm + '2.6/2.6.23/2.6.23-mm1/2.6.23-mm1.bz2' ]
+ ]
+
+ sample = decompose_kernel('2.6.23-mm1')
+ sample = mirror_kernel_components(mirrorA, sample)
+ self.assertEqual(sample, correct)
+
+
+ def test_mirrorA_gitN(self):
+ correct = [
+ [ kml + 'v2.6/linux-2.6.23.tar.bz2',
+ km + 'v2.6/linux-2.6.23.tar.bz2'],
+ [ kml + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
+ kml + 'v2.6/snapshots/patch-2.6.23-git1.bz2',
+ km + 'v2.6/snapshots/old/patch-2.6.23-git1.bz2',
+ km + 'v2.6/snapshots/patch-2.6.23-git1.bz2' ]
+ ]
+ sample = decompose_kernel('2.6.23-git1')
+ sample = mirror_kernel_components(mirrorA, sample)
+ self.assertEqual(sample, correct)
+
+
+ def test_mirrorA_rcN_mmN(self):
+ correct = [
+ [ kml + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+ kml + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2',
+ km + 'v2.6/testing/v2.6.23/linux-2.6.23-rc1.tar.bz2',
+ km + 'v2.6/testing/linux-2.6.23-rc1.tar.bz2'],
+ [ akpml + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2',
+ kml + 'people/akpm/patches/2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2',
+ akpm + '2.6/2.6.23-rc1/2.6.23-rc1-mm1/2.6.23-rc1-mm1.bz2' ]
+ ]
+ sample = decompose_kernel('2.6.23-rc1-mm1')
+ sample = mirror_kernel_components(mirrorA, sample)
+ self.assertEqual(sample, correct)
if __name__ == '__main__':
- unittest.main()
+ unittest.main()
diff --git a/client/bin/kernelexpand.py b/client/bin/kernelexpand.py
index 73028fa..e23b865 100755
--- a/client/bin/kernelexpand.py
+++ b/client/bin/kernelexpand.py
@@ -12,180 +12,180 @@
kernel = 'http://www.kernel.org/pub/linux/kernel/'
mappings = [
- [ r'^\d+\.\d+\.\d+$', '', True, [
- kernel + 'v%(major)s/linux-%(full)s.tar.bz2'
- ]],
- [ r'^\d+\.\d+\.\d+\.\d+$', '', True, [
- kernel + 'v%(major)s/linux-%(full)s.tar.bz2'
- ]],
- [ r'-rc\d+$', '%(minor-prev)s', True, [
- kernel + 'v%(major)s/testing/v%(minor)s/linux-%(full)s.tar.bz2',
- kernel + 'v%(major)s/testing/linux-%(full)s.tar.bz2',
- ]],
- [ r'-(git|bk)\d+$', '%(base)s', False, [
- kernel + 'v%(major)s/snapshots/old/patch-%(full)s.bz2',
- kernel + 'v%(major)s/snapshots/patch-%(full)s.bz2',
- ]],
- [ r'-mm\d+$', '%(base)s', False, [
- kernel + 'people/akpm/patches/' +
- '%(major)s/%(base)s/%(full)s/%(full)s.bz2'
- ]],
- [ r'-mjb\d+$', '%(base)s', False, [
- kernel + 'people/mbligh/%(base)s/patch-%(full)s.bz2'
- ]]
+ [ r'^\d+\.\d+\.\d+$', '', True, [
+ kernel + 'v%(major)s/linux-%(full)s.tar.bz2'
+ ]],
+ [ r'^\d+\.\d+\.\d+\.\d+$', '', True, [
+ kernel + 'v%(major)s/linux-%(full)s.tar.bz2'
+ ]],
+ [ r'-rc\d+$', '%(minor-prev)s', True, [
+ kernel + 'v%(major)s/testing/v%(minor)s/linux-%(full)s.tar.bz2',
+ kernel + 'v%(major)s/testing/linux-%(full)s.tar.bz2',
+ ]],
+ [ r'-(git|bk)\d+$', '%(base)s', False, [
+ kernel + 'v%(major)s/snapshots/old/patch-%(full)s.bz2',
+ kernel + 'v%(major)s/snapshots/patch-%(full)s.bz2',
+ ]],
+ [ r'-mm\d+$', '%(base)s', False, [
+ kernel + 'people/akpm/patches/' +
+ '%(major)s/%(base)s/%(full)s/%(full)s.bz2'
+ ]],
+ [ r'-mjb\d+$', '%(base)s', False, [
+ kernel + 'people/mbligh/%(base)s/patch-%(full)s.bz2'
+ ]]
];
def decompose_kernel_once(kernel):
- ##print "S<" + kernel + ">"
- for mapping in mappings:
- (suffix, becomes, is_full, patch_templates) = mapping
+ ##print "S<" + kernel + ">"
+ for mapping in mappings:
+ (suffix, becomes, is_full, patch_templates) = mapping
- params = {}
+ params = {}
- match = re.search(r'^(.*)' + suffix, kernel)
- if not match:
- continue
+ match = re.search(r'^(.*)' + suffix, kernel)
+ if not match:
+ continue
- # Generate the parameters for the patches:
- # full => full kernel name
- # base => all but the matches suffix
- # minor => 2.n.m
- # major => 2.n
- # minor-prev => 2.n.m-1
- params['full'] = kernel
- params['base'] = match.group(1)
+ # Generate the parameters for the patches:
+ # full => full kernel name
+ # base => all but the matches suffix
+ # minor => 2.n.m
+ # major => 2.n
+ # minor-prev => 2.n.m-1
+ params['full'] = kernel
+ params['base'] = match.group(1)
- match = re.search(r'^((\d+\.\d+)\.(\d+))', kernel)
- if not match:
- raise "unable to determine major/minor version"
- params['minor'] = match.group(1)
- params['major'] = match.group(2)
- params['minor-prev'] = match.group(2) + \
- '.%d' % (int(match.group(3)) - 1)
+ match = re.search(r'^((\d+\.\d+)\.(\d+))', kernel)
+ if not match:
+ raise "unable to determine major/minor version"
+ params['minor'] = match.group(1)
+ params['major'] = match.group(2)
+ params['minor-prev'] = match.group(2) + \
+ '.%d' % (int(match.group(3)) - 1)
- # Build the new kernel and patch list.
- new_kernel = becomes % params
- patch_list = []
- for template in patch_templates:
- patch_list.append(template % params)
-
- return (is_full, new_kernel, patch_list)
+ # Build the new kernel and patch list.
+ new_kernel = becomes % params
+ patch_list = []
+ for template in patch_templates:
+ patch_list.append(template % params)
- return (True, kernel, None)
+ return (is_full, new_kernel, patch_list)
+
+ return (True, kernel, None)
def decompose_kernel(kernel):
- kernel_patches = []
+ kernel_patches = []
- done = False
- while not done:
- (done, kernel, patch_list) = decompose_kernel_once(kernel)
- if patch_list:
- kernel_patches.insert(0, patch_list)
- if not len(kernel_patches):
- raise NameError('kernelexpand: %s: unknown kernel' % (kernel))
+ done = False
+ while not done:
+ (done, kernel, patch_list) = decompose_kernel_once(kernel)
+ if patch_list:
+ kernel_patches.insert(0, patch_list)
+ if not len(kernel_patches):
+ raise NameError('kernelexpand: %s: unknown kernel' % (kernel))
- return kernel_patches
+ return kernel_patches
# Look for and add potential mirrors.
def mirror_kernel_components(mirrors, components):
- new_components = []
- for component in components:
- new_patches = []
- for mirror in mirrors:
- (prefix, local) = mirror
- for patch in component:
- if patch.startswith(prefix):
- new_patch = local + \
- patch[len(prefix):]
- new_patches.append(new_patch)
- for patch in component:
- new_patches.append(patch)
- new_components.append(new_patches)
-
- return new_components
+ new_components = []
+ for component in components:
+ new_patches = []
+ for mirror in mirrors:
+ (prefix, local) = mirror
+ for patch in component:
+ if patch.startswith(prefix):
+ new_patch = local + \
+ patch[len(prefix):]
+ new_patches.append(new_patch)
+ for patch in component:
+ new_patches.append(patch)
+ new_components.append(new_patches)
+
+ return new_components
def url_accessible(url):
- status = os.system("wget --spider -q '%s'" % (url))
- #print url + ": status=%d" % (status)
-
- return status == 0
+ status = os.system("wget --spider -q '%s'" % (url))
+ #print url + ": status=%d" % (status)
+
+ return status == 0
def select_kernel_components(components):
- new_components = []
- for component in components:
- new_patches = []
- for patch in component:
- if url_accessible(patch):
- new_patches.append(patch)
- break
- if not len(new_patches):
- new_patches.append(component[-1])
- new_components.append(new_patches)
- return new_components
+ new_components = []
+ for component in components:
+ new_patches = []
+ for patch in component:
+ if url_accessible(patch):
+ new_patches.append(patch)
+ break
+ if not len(new_patches):
+ new_patches.append(component[-1])
+ new_components.append(new_patches)
+ return new_components
def expand_classic(kernel, mirrors):
- components = decompose_kernel(kernel)
- if mirrors:
- components = mirror_kernel_components(mirrors, components)
- components = select_kernel_components(components)
+ components = decompose_kernel(kernel)
+ if mirrors:
+ components = mirror_kernel_components(mirrors, components)
+ components = select_kernel_components(components)
- patches = []
- for component in components:
- patches.append(component[0])
+ patches = []
+ for component in components:
+ patches.append(component[0])
- return patches
+ return patches
if __name__ == '__main__':
- from optparse import OptionParser
+ from optparse import OptionParser
- parser = OptionParser()
+ parser = OptionParser()
- parser.add_option("-m", "--mirror",
- type="string", dest="mirror", action="append", nargs=2,
- help="mirror prefix")
- parser.add_option("-v", "--no-validate", dest="validate",
- action="store_false", default=True,
- help="prune invalid entries")
+ parser.add_option("-m", "--mirror",
+ type="string", dest="mirror", action="append", nargs=2,
+ help="mirror prefix")
+ parser.add_option("-v", "--no-validate", dest="validate",
+ action="store_false", default=True,
+ help="prune invalid entries")
- def usage():
- parser.print_help()
- sys.exit(1)
+ def usage():
+ parser.print_help()
+ sys.exit(1)
- options, args = parser.parse_args()
+ options, args = parser.parse_args()
- # Check for a kernel version
- if len(args) != 1:
- usage()
- kernel = args[0]
+ # Check for a kernel version
+ if len(args) != 1:
+ usage()
+ kernel = args[0]
- #mirrors = [
- # [ 'http://www.kernel.org/pub/linux/kernel/v2.4',
- # 'http://kernel.beaverton.ibm.com/mirror/v2.4' ],
- # [ 'http://www.kernel.org/pub/linux/kernel/v2.6',
- # 'http://kernel.beaverton.ibm.com/mirror/v2.6' ],
- # [ 'http://www.kernel.org/pub/linux/kernel/people/akpm/patches',
- # 'http://kernel.beaverton.ibm.com/mirror/akpm' ],
- #]
- mirrors = options.mirror
+ #mirrors = [
+ # [ 'http://www.kernel.org/pub/linux/kernel/v2.4',
+ # 'http://kernel.beaverton.ibm.com/mirror/v2.4' ],
+ # [ 'http://www.kernel.org/pub/linux/kernel/v2.6',
+ # 'http://kernel.beaverton.ibm.com/mirror/v2.6' ],
+ # [ 'http://www.kernel.org/pub/linux/kernel/people/akpm/patches',
+ # 'http://kernel.beaverton.ibm.com/mirror/akpm' ],
+ #]
+ mirrors = options.mirror
- try:
- components = decompose_kernel(kernel)
- except NameError, e:
- sys.stderr.write(e.args[0] + "\n")
- sys.exit(1)
+ try:
+ components = decompose_kernel(kernel)
+ except NameError, e:
+ sys.stderr.write(e.args[0] + "\n")
+ sys.exit(1)
- if mirrors:
- components = mirror_kernel_components(mirrors, components)
+ if mirrors:
+ components = mirror_kernel_components(mirrors, components)
- if options.validate:
- components = select_kernel_components(components)
+ if options.validate:
+ components = select_kernel_components(components)
- # Dump them out.
- for component in components:
- print " ".join(component)
+ # Dump them out.
+ for component in components:
+ print " ".join(component)
diff --git a/client/bin/os_dep.py b/client/bin/os_dep.py
index f61d46d..9022913 100644
--- a/client/bin/os_dep.py
+++ b/client/bin/os_dep.py
@@ -8,30 +8,30 @@
"""
def command(cmd):
- # this could use '/usr/bin/which', I suppose. But this seems simpler
- for dir in os.environ['PATH'].split(':'):
- file = os.path.join(dir, cmd)
- if os.path.exists(file):
- return file
- raise ValueError('Missing command: %s' % cmd)
+ # this could use '/usr/bin/which', I suppose. But this seems simpler
+ for dir in os.environ['PATH'].split(':'):
+ file = os.path.join(dir, cmd)
+ if os.path.exists(file):
+ return file
+ raise ValueError('Missing command: %s' % cmd)
def commands(*cmds):
- results = []
- for cmd in cmds:
- results.append(command(cmd))
+ results = []
+ for cmd in cmds:
+ results.append(command(cmd))
def library(lib):
- lddirs = [x.rstrip() for x in open('/etc/ld.so.conf', 'r').readlines()]
- for dir in ['/lib', '/usr/lib'] + lddirs:
- file = os.path.join(dir, lib)
- if os.path.exists(file):
- return file
- raise ValueError('Missing library: %s' % lib)
+ lddirs = [x.rstrip() for x in open('/etc/ld.so.conf', 'r').readlines()]
+ for dir in ['/lib', '/usr/lib'] + lddirs:
+ file = os.path.join(dir, lib)
+ if os.path.exists(file):
+ return file
+ raise ValueError('Missing library: %s' % lib)
def libraries(*libs):
- results = []
- for lib in libs:
- results.append(library(lib))
+ results = []
+ for lib in libs:
+ results.append(library(lib))
diff --git a/client/bin/package.py b/client/bin/package.py
index c889a9c..42a8604 100644
--- a/client/bin/package.py
+++ b/client/bin/package.py
@@ -1,5 +1,5 @@
"""
-Functions to handle software packages. The functions covered here aim to be
+Functions to handle software packages. The functions covered here aim to be
generic, with implementations that deal with different package managers, such
as dpkg and rpm.
"""
@@ -15,277 +15,277 @@
def __rpm_info(rpm_package):
- """\
- Private function that returns a dictionary with information about an
- RPM package file
- - type: Package management program that handles the file
- - system_support: If the package management program is installed on the
- system or not
- - source: If it is a source (True) our binary (False) package
- - version: The package version (or name), that is used to check against the
- package manager if the package is installed
- - arch: The architecture for which a binary package was built
- - installed: Whether the package is installed (True) on the system or not
- (False)
- """
- # We will make good use of what the file command has to tell us about the
- # package :)
- file_result = utils.system_output('file ' + rpm_package)
- package_info = {}
- package_info['type'] = 'rpm'
- try:
- os_dep.command('rpm')
- # Build the command strings that will be used to get package info
- # s_cmd - Command to determine if package is a source package
- # a_cmd - Command to determine package architecture
- # v_cmd - Command to determine package version
- # i_cmd - Command to determiine if package is installed
- s_cmd = 'rpm -qp --qf %{SOURCE} ' + rpm_package + ' 2>/dev/null'
- a_cmd = 'rpm -qp --qf %{ARCH} ' + rpm_package + ' 2>/dev/null'
- v_cmd = 'rpm -qp ' + rpm_package + ' 2>/dev/null'
- i_cmd = 'rpm -q ' + utils.system_output(v_cmd) + ' 2>&1 >/dev/null'
+ """\
+ Private function that returns a dictionary with information about an
+ RPM package file
+ - type: Package management program that handles the file
+ - system_support: If the package management program is installed on the
+ system or not
+ - source: If it is a source (True) our binary (False) package
+ - version: The package version (or name), that is used to check against the
+ package manager if the package is installed
+ - arch: The architecture for which a binary package was built
+ - installed: Whether the package is installed (True) on the system or not
+ (False)
+ """
+ # We will make good use of what the file command has to tell us about the
+ # package :)
+ file_result = utils.system_output('file ' + rpm_package)
+ package_info = {}
+ package_info['type'] = 'rpm'
+ try:
+ os_dep.command('rpm')
+ # Build the command strings that will be used to get package info
+ # s_cmd - Command to determine if package is a source package
+ # a_cmd - Command to determine package architecture
+ # v_cmd - Command to determine package version
+ # i_cmd - Command to determiine if package is installed
+ s_cmd = 'rpm -qp --qf %{SOURCE} ' + rpm_package + ' 2>/dev/null'
+ a_cmd = 'rpm -qp --qf %{ARCH} ' + rpm_package + ' 2>/dev/null'
+ v_cmd = 'rpm -qp ' + rpm_package + ' 2>/dev/null'
+ i_cmd = 'rpm -q ' + utils.system_output(v_cmd) + ' 2>&1 >/dev/null'
- package_info['system_support'] = True
- # Checking whether this is a source or src package
- source = utils.system_output(s_cmd)
- if source == '(none)':
- package_info['source'] = False
- else:
- package_info['source'] = True
- package_info['version'] = utils.system_output(v_cmd)
- package_info['arch'] = utils.system_output(a_cmd)
- # Checking if package is installed
- try:
- utils.system(i_cmd)
- package_info['installed'] = True
- except:
- package_info['installed'] = False
+ package_info['system_support'] = True
+ # Checking whether this is a source or src package
+ source = utils.system_output(s_cmd)
+ if source == '(none)':
+ package_info['source'] = False
+ else:
+ package_info['source'] = True
+ package_info['version'] = utils.system_output(v_cmd)
+ package_info['arch'] = utils.system_output(a_cmd)
+ # Checking if package is installed
+ try:
+ utils.system(i_cmd)
+ package_info['installed'] = True
+ except:
+ package_info['installed'] = False
- except:
- package_info['system_support'] = False
- package_info['installed'] = False
- # File gives a wealth of information about rpm packages.
- # However, we can't trust all this info, as incorrectly
- # packaged rpms can report some wrong values.
- # It's better than nothing though :)
- if len(file_result.split(' ')) == 6:
- # Figure if package is a source package
- if file_result.split(' ')[3] == 'src':
- package_info['source'] = True
- elif file_result.split(' ')[3] == 'bin':
- package_info['source'] = False
- else:
- package_info['source'] = False
- # Get architecture
- package_info['arch'] = file_result.split(' ')[4]
- # Get version
- package_info['version'] = file_result.split(' ')[5]
- elif len(file_result.split(' ')) == 5:
- # Figure if package is a source package
- if file_result.split(' ')[3] == 'src':
- package_info['source'] = True
- elif file_result.split(' ')[3] == 'bin':
- package_info['source'] = False
- else:
- package_info['source'] = False
- # When the arch param is missing on file, we assume noarch
- package_info['arch'] = 'noarch'
- # Get version
- package_info['version'] = file_result.split(' ')[4]
- else:
- # If everything else fails...
- package_info['source'] = False
- package_info['arch'] = 'Not Available'
- package_info['version'] = 'Not Available'
- return package_info
+ except:
+ package_info['system_support'] = False
+ package_info['installed'] = False
+ # File gives a wealth of information about rpm packages.
+ # However, we can't trust all this info, as incorrectly
+ # packaged rpms can report some wrong values.
+ # It's better than nothing though :)
+ if len(file_result.split(' ')) == 6:
+ # Figure if package is a source package
+ if file_result.split(' ')[3] == 'src':
+ package_info['source'] = True
+ elif file_result.split(' ')[3] == 'bin':
+ package_info['source'] = False
+ else:
+ package_info['source'] = False
+ # Get architecture
+ package_info['arch'] = file_result.split(' ')[4]
+ # Get version
+ package_info['version'] = file_result.split(' ')[5]
+ elif len(file_result.split(' ')) == 5:
+ # Figure if package is a source package
+ if file_result.split(' ')[3] == 'src':
+ package_info['source'] = True
+ elif file_result.split(' ')[3] == 'bin':
+ package_info['source'] = False
+ else:
+ package_info['source'] = False
+ # When the arch param is missing on file, we assume noarch
+ package_info['arch'] = 'noarch'
+ # Get version
+ package_info['version'] = file_result.split(' ')[4]
+ else:
+ # If everything else fails...
+ package_info['source'] = False
+ package_info['arch'] = 'Not Available'
+ package_info['version'] = 'Not Available'
+ return package_info
def __dpkg_info(dpkg_package):
- """\
- Private function that returns a dictionary with information about a
- dpkg package file
- - type: Package management program that handles the file
- - system_support: If the package management program is installed on the
- system or not
- - source: If it is a source (True) our binary (False) package
- - version: The package version (or name), that is used to check against the
- package manager if the package is installed
- - arch: The architecture for which a binary package was built
- - installed: Whether the package is installed (True) on the system or not
- (False)
- """
- # We will make good use of what the file command has to tell us about the
- # package :)
- file_result = utils.system_output('file ' + dpkg_package)
- package_info = {}
- package_info['type'] = 'dpkg'
- # There's no single debian source package as is the case
- # with RPM
- package_info['source'] = False
- try:
- os_dep.command('dpkg')
- # Build the command strings that will be used to get package info
- # a_cmd - Command to determine package architecture
- # v_cmd - Command to determine package version
- # i_cmd - Command to determiine if package is installed
- a_cmd = 'dpkg -f ' + dpkg_package + ' Architecture 2>/dev/null'
- v_cmd = 'dpkg -f ' + dpkg_package + ' Package 2>/dev/null'
- i_cmd = 'dpkg -s ' + utils.system_output(v_cmd) + ' 2>/dev/null'
+ """\
+ Private function that returns a dictionary with information about a
+ dpkg package file
+ - type: Package management program that handles the file
+ - system_support: If the package management program is installed on the
+ system or not
+ - source: If it is a source (True) our binary (False) package
+ - version: The package version (or name), that is used to check against the
+ package manager if the package is installed
+ - arch: The architecture for which a binary package was built
+ - installed: Whether the package is installed (True) on the system or not
+ (False)
+ """
+ # We will make good use of what the file command has to tell us about the
+ # package :)
+ file_result = utils.system_output('file ' + dpkg_package)
+ package_info = {}
+ package_info['type'] = 'dpkg'
+ # There's no single debian source package as is the case
+ # with RPM
+ package_info['source'] = False
+ try:
+ os_dep.command('dpkg')
+ # Build the command strings that will be used to get package info
+ # a_cmd - Command to determine package architecture
+ # v_cmd - Command to determine package version
+ # i_cmd - Command to determiine if package is installed
+ a_cmd = 'dpkg -f ' + dpkg_package + ' Architecture 2>/dev/null'
+ v_cmd = 'dpkg -f ' + dpkg_package + ' Package 2>/dev/null'
+ i_cmd = 'dpkg -s ' + utils.system_output(v_cmd) + ' 2>/dev/null'
- package_info['system_support'] = True
- package_info['version'] = utils.system_output(v_cmd)
- package_info['arch'] = utils.system_output(a_cmd)
- # Checking if package is installed
- package_status = utils.system_output(i_cmd, ignore_status=True)
- not_inst_pattern = re.compile('not-installed', re.IGNORECASE)
- dpkg_not_installed = re.search(not_inst_pattern, package_status)
- if dpkg_not_installed:
- package_info['installed'] = False
- else:
- package_info['installed'] = True
+ package_info['system_support'] = True
+ package_info['version'] = utils.system_output(v_cmd)
+ package_info['arch'] = utils.system_output(a_cmd)
+ # Checking if package is installed
+ package_status = utils.system_output(i_cmd, ignore_status=True)
+ not_inst_pattern = re.compile('not-installed', re.IGNORECASE)
+ dpkg_not_installed = re.search(not_inst_pattern, package_status)
+ if dpkg_not_installed:
+ package_info['installed'] = False
+ else:
+ package_info['installed'] = True
- except:
- package_info['system_support'] = False
- package_info['installed'] = False
- # The output of file is not as generous for dpkg files as
- # it is with rpm files
- package_info['arch'] = 'Not Available'
- package_info['version'] = 'Not Available'
+ except:
+ package_info['system_support'] = False
+ package_info['installed'] = False
+ # The output of file is not as generous for dpkg files as
+ # it is with rpm files
+ package_info['arch'] = 'Not Available'
+ package_info['version'] = 'Not Available'
- return package_info
+ return package_info
def info(package):
- """\
- Returns a dictionary with package information about a given package file:
- - type: Package management program that handles the file
- - system_support: If the package management program is installed on the
- system or not
- - source: If it is a source (True) our binary (False) package
- - version: The package version (or name), that is used to check against the
- package manager if the package is installed
- - arch: The architecture for which a binary package was built
- - installed: Whether the package is installed (True) on the system or not
- (False)
+ """\
+ Returns a dictionary with package information about a given package file:
+ - type: Package management program that handles the file
+ - system_support: If the package management program is installed on the
+ system or not
+ - source: If it is a source (True) our binary (False) package
+ - version: The package version (or name), that is used to check against the
+ package manager if the package is installed
+ - arch: The architecture for which a binary package was built
+ - installed: Whether the package is installed (True) on the system or not
+ (False)
- Implemented package types:
- - 'dpkg' - dpkg (debian, ubuntu) package files
- - 'rpm' - rpm (red hat, suse) package files
- Raises an exception if the package type is not one of the implemented
- package types.
- """
- if not os.path.isfile(package):
- raise ValueError('invalid file %s to verify' % package)
- # Use file and libmagic to determine the actual package file type.
- file_result = utils.system_output('file ' + package)
- for package_manager in KNOWN_PACKAGE_MANAGERS:
- if package_manager == 'rpm':
- package_pattern = re.compile('RPM', re.IGNORECASE)
- elif package_manager == 'dpkg':
- package_pattern = re.compile('Debian', re.IGNORECASE)
+ Implemented package types:
+ - 'dpkg' - dpkg (debian, ubuntu) package files
+ - 'rpm' - rpm (red hat, suse) package files
+ Raises an exception if the package type is not one of the implemented
+ package types.
+ """
+ if not os.path.isfile(package):
+ raise ValueError('invalid file %s to verify' % package)
+ # Use file and libmagic to determine the actual package file type.
+ file_result = utils.system_output('file ' + package)
+ for package_manager in KNOWN_PACKAGE_MANAGERS:
+ if package_manager == 'rpm':
+ package_pattern = re.compile('RPM', re.IGNORECASE)
+ elif package_manager == 'dpkg':
+ package_pattern = re.compile('Debian', re.IGNORECASE)
- result = re.search(package_pattern, file_result)
+ result = re.search(package_pattern, file_result)
- if result and package_manager == 'rpm':
- return __rpm_info(package)
- elif result and package_manager == 'dpkg':
- return __dpkg_info(package)
+ if result and package_manager == 'rpm':
+ return __rpm_info(package)
+ elif result and package_manager == 'dpkg':
+ return __dpkg_info(package)
- # If it's not one of the implemented package manager methods, there's
- # not much that can be done, hence we throw an exception.
- raise error.PackageError('Unknown package type %s' % file_result)
+ # If it's not one of the implemented package manager methods, there's
+ # not much that can be done, hence we throw an exception.
+ raise error.PackageError('Unknown package type %s' % file_result)
def install(package, nodeps = False):
- """\
- Tries to install a package file. If the package is already installed,
- it prints a message to the user and ends gracefully. If nodeps is set to
- true, it will ignore package dependencies.
- """
- my_package_info = info(package)
- type = my_package_info['type']
- system_support = my_package_info['system_support']
- source = my_package_info['source']
- installed = my_package_info['installed']
+ """\
+ Tries to install a package file. If the package is already installed,
+ it prints a message to the user and ends gracefully. If nodeps is set to
+ true, it will ignore package dependencies.
+ """
+ my_package_info = info(package)
+ type = my_package_info['type']
+ system_support = my_package_info['system_support']
+ source = my_package_info['source']
+ installed = my_package_info['installed']
- if not system_support:
- e_msg = 'Client does not have package manager %s to handle %s install' \
- % (type, package)
- raise error.PackageError(e_msg)
+ if not system_support:
+ e_msg = 'Client does not have package manager %s to handle %s install' \
+ % (type, package)
+ raise error.PackageError(e_msg)
- opt_args = ''
- if type == 'rpm':
- if nodeps:
- opt_args = opt_args + '--nodeps'
- install_command = 'rpm %s -U %s' % (opt_args, package)
- if type == 'dpkg':
- if nodeps:
- opt_args = opt_args + '--force-depends'
- install_command = 'dpkg %s -i %s' % (opt_args, package)
+ opt_args = ''
+ if type == 'rpm':
+ if nodeps:
+ opt_args = opt_args + '--nodeps'
+ install_command = 'rpm %s -U %s' % (opt_args, package)
+ if type == 'dpkg':
+ if nodeps:
+ opt_args = opt_args + '--force-depends'
+ install_command = 'dpkg %s -i %s' % (opt_args, package)
- # RPM source packages can be installed along with the binary versions
- # with this check
- if installed and not source:
- return 'Package %s is already installed' % package
+ # RPM source packages can be installed along with the binary versions
+ # with this check
+ if installed and not source:
+ return 'Package %s is already installed' % package
- # At this point, the most likely thing to go wrong is that there are
- # unmet dependencies for the package. We won't cover this case, at
- # least for now.
- utils.system(install_command)
- return 'Package %s was installed successfuly' % package
+ # At this point, the most likely thing to go wrong is that there are
+ # unmet dependencies for the package. We won't cover this case, at
+ # least for now.
+ utils.system(install_command)
+ return 'Package %s was installed successfuly' % package
def convert(package, destination_format):
- """\
- Convert packages with the 'alien' utility. If alien is not installed, it
- throws a NotImplementedError exception.
- returns: filename of the package generated.
- """
- try:
- os_dep.command('alien')
- except:
- e_msg = 'Cannot convert to %s, alien not installed' % destination_format
- raise error.TestError(e_msg)
+ """\
+ Convert packages with the 'alien' utility. If alien is not installed, it
+ throws a NotImplementedError exception.
+ returns: filename of the package generated.
+ """
+ try:
+ os_dep.command('alien')
+ except:
+ e_msg = 'Cannot convert to %s, alien not installed' % destination_format
+ raise error.TestError(e_msg)
- # alien supports converting to many formats, but its interesting to map
- # convertions only for the implemented package types.
- if destination_format == 'dpkg':
- deb_pattern = re.compile('[A-Za-z0-9_.-]*[.][d][e][b]')
- conv_output = utils.system_output('alien --to-deb %s 2>/dev/null' % package)
- converted_package = re.findall(deb_pattern, conv_output)[0]
- elif destination_format == 'rpm':
- rpm_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]')
- conv_output = utils.system_output('alien --to-rpm %s 2>/dev/null' % package)
- converted_package = re.findall(rpm_pattern, conv_output)[0]
- else:
- e_msg = 'Convertion to format %s not implemented' % destination_format
- raise NotImplementedError(e_msg)
+ # alien supports converting to many formats, but its interesting to map
+ # convertions only for the implemented package types.
+ if destination_format == 'dpkg':
+ deb_pattern = re.compile('[A-Za-z0-9_.-]*[.][d][e][b]')
+ conv_output = utils.system_output('alien --to-deb %s 2>/dev/null' % package)
+ converted_package = re.findall(deb_pattern, conv_output)[0]
+ elif destination_format == 'rpm':
+ rpm_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]')
+ conv_output = utils.system_output('alien --to-rpm %s 2>/dev/null' % package)
+ converted_package = re.findall(rpm_pattern, conv_output)[0]
+ else:
+ e_msg = 'Convertion to format %s not implemented' % destination_format
+ raise NotImplementedError(e_msg)
- print 'Package %s successfuly converted to %s' % \
- (os.path.basename(package), os.path.basename(converted_package))
- return os.path.abspath(converted_package)
+ print 'Package %s successfuly converted to %s' % \
+ (os.path.basename(package), os.path.basename(converted_package))
+ return os.path.abspath(converted_package)
def os_support():
- """\
- Returns a dictionary with host os package support info:
- - rpm: True if system supports rpm packages, False otherwise
- - dpkg: True if system supports dpkg packages, False otherwise
- - conversion: True if the system can convert packages (alien installed),
- or False otherwise
- """
- support_info = {}
- for package_manager in KNOWN_PACKAGE_MANAGERS:
- try:
- os_dep.command(package_manager)
- support_info[package_manager] = True
- except:
- support_info[package_manager] = False
+ """\
+ Returns a dictionary with host os package support info:
+ - rpm: True if system supports rpm packages, False otherwise
+ - dpkg: True if system supports dpkg packages, False otherwise
+ - conversion: True if the system can convert packages (alien installed),
+ or False otherwise
+ """
+ support_info = {}
+ for package_manager in KNOWN_PACKAGE_MANAGERS:
+ try:
+ os_dep.command(package_manager)
+ support_info[package_manager] = True
+ except:
+ support_info[package_manager] = False
- try:
- os_dep.command('alien')
- support_info['conversion'] = True
- except:
- support_info['conversion'] = False
+ try:
+ os_dep.command('alien')
+ support_info['conversion'] = True
+ except:
+ support_info['conversion'] = False
- return support_info
+ return support_info
diff --git a/client/bin/parallel.py b/client/bin/parallel.py
index a95b643..13a8b51 100644
--- a/client/bin/parallel.py
+++ b/client/bin/parallel.py
@@ -6,42 +6,42 @@
from autotest_lib.client.common_lib import error
def fork_start(tmp, l):
- sys.stdout.flush()
- sys.stderr.flush()
- pid = os.fork()
- if pid:
- # Parent
- return pid
+ sys.stdout.flush()
+ sys.stderr.flush()
+ pid = os.fork()
+ if pid:
+ # Parent
+ return pid
- try:
- try:
- l()
+ try:
+ try:
+ l()
- except error.AutotestError:
- raise
+ except error.AutotestError:
+ raise
- except:
- raise error.UnhandledError("test failed and threw:\n")
+ except:
+ raise error.UnhandledError("test failed and threw:\n")
- except Exception, detail:
- ename = tmp + "/debug/error-%d" % (os.getpid())
- pickle.dump(detail, open(ename, "w"))
+ except Exception, detail:
+ ename = tmp + "/debug/error-%d" % (os.getpid())
+ pickle.dump(detail, open(ename, "w"))
- sys.stdout.flush()
- sys.stderr.flush()
- os._exit(1)
+ sys.stdout.flush()
+ sys.stderr.flush()
+ os._exit(1)
- sys.stdout.flush()
- sys.stderr.flush()
- os._exit(0)
+ sys.stdout.flush()
+ sys.stderr.flush()
+ os._exit(0)
def fork_waitfor(tmp, pid):
- (pid, status) = os.waitpid(pid, 0)
+ (pid, status) = os.waitpid(pid, 0)
- ename = tmp + "/debug/error-%d" % pid
- if (os.path.exists(ename)):
- raise pickle.load(file(ename, 'r'))
+ ename = tmp + "/debug/error-%d" % pid
+ if (os.path.exists(ename)):
+ raise pickle.load(file(ename, 'r'))
- if (status != 0):
- raise error.TestError("test failed rc=%d" % (status))
+ if (status != 0):
+ raise error.TestError("test failed rc=%d" % (status))
diff --git a/client/bin/profiler.py b/client/bin/profiler.py
index b919542..ff821f9 100755
--- a/client/bin/profiler.py
+++ b/client/bin/profiler.py
@@ -1,25 +1,24 @@
class profiler:
- preserve_srcdir = False
+ preserve_srcdir = False
- def __init__(self, job):
- self.job = job
+ def __init__(self, job):
+ self.job = job
- def setup(self, *args):
- return
+ def setup(self, *args):
+ return
- def initialize(self, *args):
- return
+ def initialize(self, *args):
+ return
- def start(self, test):
- return
+ def start(self, test):
+ return
- def stop(self, test):
- return
+ def stop(self, test):
+ return
- def report(self, test):
- return
-
+ def report(self, test):
+ return
diff --git a/client/bin/profilers.py b/client/bin/profilers.py
index c080021..298cba9 100755
--- a/client/bin/profilers.py
+++ b/client/bin/profilers.py
@@ -4,71 +4,71 @@
class profilers:
- def __init__(self, job):
- self.job = job
- self.list = []
- self.profdir = job.autodir + '/profilers'
- self.tmpdir = job.tmpdir
- self.profile_run_only = False
+ def __init__(self, job):
+ self.job = job
+ self.list = []
+ self.profdir = job.autodir + '/profilers'
+ self.tmpdir = job.tmpdir
+ self.profile_run_only = False
- # add a profiler
- def add(self, profiler, *args, **dargs):
- try:
- sys.path.insert(0, self.job.profdir + '/' + profiler)
- exec 'import ' + profiler
- exec 'newprofiler = %s.%s(self)' % (profiler, profiler)
- finally:
- sys.path.pop(0)
- newprofiler.name = profiler
- newprofiler.bindir = self.profdir + '/' + profiler
- newprofiler.srcdir = newprofiler.bindir + '/src'
- newprofiler.tmpdir = self.tmpdir + '/' + profiler
- utils.update_version(newprofiler.srcdir, newprofiler.preserve_srcdir,
- newprofiler.version, newprofiler.setup,
- *args, **dargs)
- newprofiler.initialize(*args, **dargs)
- self.list.append(newprofiler)
+ # add a profiler
+ def add(self, profiler, *args, **dargs):
+ try:
+ sys.path.insert(0, self.job.profdir + '/' + profiler)
+ exec 'import ' + profiler
+ exec 'newprofiler = %s.%s(self)' % (profiler, profiler)
+ finally:
+ sys.path.pop(0)
+ newprofiler.name = profiler
+ newprofiler.bindir = self.profdir + '/' + profiler
+ newprofiler.srcdir = newprofiler.bindir + '/src'
+ newprofiler.tmpdir = self.tmpdir + '/' + profiler
+ utils.update_version(newprofiler.srcdir, newprofiler.preserve_srcdir,
+ newprofiler.version, newprofiler.setup,
+ *args, **dargs)
+ newprofiler.initialize(*args, **dargs)
+ self.list.append(newprofiler)
- # remove a profiler
- def delete(self, profiler):
- nukeme = None
- for p in self.list:
- if (p.name == profiler):
- nukeme = p
- self.list.remove(p)
+ # remove a profiler
+ def delete(self, profiler):
+ nukeme = None
+ for p in self.list:
+ if (p.name == profiler):
+ nukeme = p
+ self.list.remove(p)
- # are any profilers enabled ?
- def present(self):
- if self.list:
- return 1
- else:
- return 0
+ # are any profilers enabled ?
+ def present(self):
+ if self.list:
+ return 1
+ else:
+ return 0
- # Returns True if job is supposed to be run only with profiling turned
- # on, False otherwise
- def only(self):
- return self.profile_run_only
+ # Returns True if job is supposed to be run only with profiling turned
+ # on, False otherwise
+ def only(self):
+ return self.profile_run_only
- # Changes the flag which determines whether or not the job is to be
- # run without profilers at all
- def set_only(self, value):
- self.profile_run_only = value
+ # Changes the flag which determines whether or not the job is to be
+ # run without profilers at all
+ def set_only(self, value):
+ self.profile_run_only = value
- # Start all enabled profilers
- def start(self, test):
- for p in self.list:
- p.start(test)
+ # Start all enabled profilers
+ def start(self, test):
+ for p in self.list:
+ p.start(test)
- # Stop all enabled profilers
- def stop(self, test):
- for p in self.list:
- p.stop(test)
+ # Stop all enabled profilers
+ def stop(self, test):
+ for p in self.list:
+ p.stop(test)
- # Report on all enabled profilers
- def report(self, test):
- for p in self.list:
- p.report(test)
+ # Report on all enabled profilers
+ def report(self, test):
+ for p in self.list:
+ p.report(test)
diff --git a/client/bin/sysinfo.py b/client/bin/sysinfo.py
index 2f605ea..107229c 100755
--- a/client/bin/sysinfo.py
+++ b/client/bin/sysinfo.py
@@ -6,14 +6,14 @@
from autotest_lib.client.common_lib import utils
try:
- from autotest_lib.client.bin import site_sysinfo
- local = True
+ from autotest_lib.client.bin import site_sysinfo
+ local = True
except ImportError:
- local = False
+ local = False
# stuff to log per reboot
-files = ['/proc/pci', '/proc/meminfo', '/proc/slabinfo', '/proc/version',
- '/proc/cpuinfo', '/proc/cmdline', '/proc/modules']
+files = ['/proc/pci', '/proc/meminfo', '/proc/slabinfo', '/proc/version',
+ '/proc/cpuinfo', '/proc/cmdline', '/proc/modules']
# commands = ['lshw'] # this causes problems triggering CDROM drives
commands = ['uname -a', 'lspci -vvn', 'gcc --version', 'ld --version',
'mount', 'hostname']
@@ -21,84 +21,84 @@
def run_command(command, output):
- parts = command.split(None, 1)
- cmd = parts[0]
- if len(parts) > 1:
- args = parts[1]
- else:
- args = ''
- for dir in path:
- pathname = dir + '/' + cmd
- if not os.path.exists(pathname):
- continue
- tmp_cmd = "%s %s > %s 2> /dev/null" % (pathname, args, output)
- utils.system(tmp_cmd)
+ parts = command.split(None, 1)
+ cmd = parts[0]
+ if len(parts) > 1:
+ args = parts[1]
+ else:
+ args = ''
+ for dir in path:
+ pathname = dir + '/' + cmd
+ if not os.path.exists(pathname):
+ continue
+ tmp_cmd = "%s %s > %s 2> /dev/null" % (pathname, args, output)
+ utils.system(tmp_cmd)
def reboot_count():
- if not glob.glob('*'):
- return -1 # No reboots, initial data not logged
- else:
- return len(glob.glob('reboot*'))
-
-
+ if not glob.glob('*'):
+ return -1 # No reboots, initial data not logged
+ else:
+ return len(glob.glob('reboot*'))
+
+
def boot_subdir(reboot_count):
- """subdir of job sysinfo"""
- if reboot_count == 0:
- return '.'
- else:
- return 'reboot%d' % reboot_count
+ """subdir of job sysinfo"""
+ if reboot_count == 0:
+ return '.'
+ else:
+ return 'reboot%d' % reboot_count
def log_per_reboot_data(sysinfo_dir):
- """we log this data when the job starts, and again after any reboot"""
- pwd = os.getcwd()
- try:
- os.chdir(sysinfo_dir)
- subdir = boot_subdir(reboot_count() + 1)
- if not os.path.exists(subdir):
- os.mkdir(subdir)
- os.chdir(os.path.join(sysinfo_dir, subdir))
- _log_per_reboot_data()
- finally:
- os.chdir(pwd)
+ """we log this data when the job starts, and again after any reboot"""
+ pwd = os.getcwd()
+ try:
+ os.chdir(sysinfo_dir)
+ subdir = boot_subdir(reboot_count() + 1)
+ if not os.path.exists(subdir):
+ os.mkdir(subdir)
+ os.chdir(os.path.join(sysinfo_dir, subdir))
+ _log_per_reboot_data()
+ finally:
+ os.chdir(pwd)
def _log_per_reboot_data():
- """system info to log before each step of the job"""
- for command in commands:
- run_command(command, re.sub(r'\s', '_', command))
+ """system info to log before each step of the job"""
+ for command in commands:
+ run_command(command, re.sub(r'\s', '_', command))
- for file in files:
- if (os.path.exists(file)):
- shutil.copyfile(file, os.path.basename(file))
+ for file in files:
+ if (os.path.exists(file)):
+ shutil.copyfile(file, os.path.basename(file))
- utils.system('dmesg -c > dmesg', ignore_status=True)
- utils.system('df -mP > df', ignore_status=True)
- if local:
- site_sysinfo.log_per_reboot_data()
+ utils.system('dmesg -c > dmesg', ignore_status=True)
+ utils.system('df -mP > df', ignore_status=True)
+ if local:
+ site_sysinfo.log_per_reboot_data()
def log_after_each_test(test_sysinfo_dir, job_sysinfo_dir):
- """log things that change after each test (called from test.py)"""
- pwd = os.getcwd()
- try:
- os.chdir(job_sysinfo_dir)
- reboot_subdir = boot_subdir(reboot_count())
- reboot_dir = os.path.join(job_sysinfo_dir, reboot_subdir)
- assert os.path.exists(reboot_dir)
+ """log things that change after each test (called from test.py)"""
+ pwd = os.getcwd()
+ try:
+ os.chdir(job_sysinfo_dir)
+ reboot_subdir = boot_subdir(reboot_count())
+ reboot_dir = os.path.join(job_sysinfo_dir, reboot_subdir)
+ assert os.path.exists(reboot_dir)
- os.makedirs(test_sysinfo_dir)
- os.chdir(test_sysinfo_dir)
- utils.system('ln -s %s reboot_current' % reboot_dir)
+ os.makedirs(test_sysinfo_dir)
+ os.chdir(test_sysinfo_dir)
+ utils.system('ln -s %s reboot_current' % reboot_dir)
- utils.system('dmesg -c > dmesg', ignore_status=True)
- utils.system('df -mP > df', ignore_status=True)
- if local:
- site_sysinfo.log_after_each_test()
- finally:
- os.chdir(pwd)
-
-
+ utils.system('dmesg -c > dmesg', ignore_status=True)
+ utils.system('df -mP > df', ignore_status=True)
+ if local:
+ site_sysinfo.log_after_each_test()
+ finally:
+ os.chdir(pwd)
+
+
if __name__ == '__main__':
- log_per_reboot_data()
+ log_per_reboot_data()
diff --git a/client/bin/test.py b/client/bin/test.py
index 78dbdd1..5d5d4f9 100755
--- a/client/bin/test.py
+++ b/client/bin/test.py
@@ -3,20 +3,20 @@
# Shell class for a test, inherited by all individual tests
#
# Methods:
-# __init__ initialise
-# initialize run once for each job
-# setup run once for each new version of the test installed
-# run run the test (wrapped by job.run_test())
+# __init__ initialise
+# initialize run once for each job
+# setup run once for each new version of the test installed
+# run run the test (wrapped by job.run_test())
#
# Data:
-# job backreference to the job this test instance is part of
-# outputdir eg. results/<job>/<testname.tag>
-# resultsdir eg. results/<job>/<testname.tag>/results
-# profdir eg. results/<job>/<testname.tag>/profiling
-# debugdir eg. results/<job>/<testname.tag>/debug
-# bindir eg. tests/<test>
-# src eg. tests/<test>/src
-# tmpdir eg. tmp/<testname.tag>
+# job backreference to the job this test instance is part of
+# outputdir eg. results/<job>/<testname.tag>
+# resultsdir eg. results/<job>/<testname.tag>/results
+# profdir eg. results/<job>/<testname.tag>/profiling
+# debugdir eg. results/<job>/<testname.tag>/debug
+# bindir eg. tests/<test>
+# src eg. tests/<test>/src
+# tmpdir eg. tmp/<testname.tag>
import os, traceback
@@ -26,23 +26,23 @@
class test(common_test.base_test):
- pass
+ pass
testname = common_test.testname
def _grab_sysinfo(mytest):
- try:
- sysinfo_dir = os.path.join(mytest.outputdir, 'sysinfo')
- sysinfo.log_after_each_test(sysinfo_dir, mytest.job.sysinfodir)
- if os.path.exists(mytest.tmpdir):
- utils.system('rm -rf ' + mytest.tmpdir)
- except:
- print 'after-test error:'
- traceback.print_exc(file=sys.stdout)
+ try:
+ sysinfo_dir = os.path.join(mytest.outputdir, 'sysinfo')
+ sysinfo.log_after_each_test(sysinfo_dir, mytest.job.sysinfodir)
+ if os.path.exists(mytest.tmpdir):
+ utils.system('rm -rf ' + mytest.tmpdir)
+ except:
+ print 'after-test error:'
+ traceback.print_exc(file=sys.stdout)
def runtest(job, url, tag, args, dargs):
- common_test.runtest(job, url, tag, args, dargs,
- locals(), globals(), _grab_sysinfo)
+ common_test.runtest(job, url, tag, args, dargs,
+ locals(), globals(), _grab_sysinfo)
diff --git a/client/bin/test_config.py b/client/bin/test_config.py
index e926813..d8a5337 100644
--- a/client/bin/test_config.py
+++ b/client/bin/test_config.py
@@ -13,79 +13,79 @@
__all__ = ['config_loader']
class config_loader:
- """Base class of the configuration parser"""
- def __init__(self, cfg, tmpdir = '/tmp'):
- """\
- Instantiate ConfigParser and provide the file like object that we'll
- use to read configuration data from.
- Args:
- * cfg: Where we'll get configuration data. It can be either:
- * A URL containing the file
- * A valid file path inside the filesystem
- * A string containing configuration data
- * tmpdir: Where we'll dump the temporary conf files. The default
- is the /tmp directory.
- """
- # Base Parser
- self.parser = ConfigParser()
- # File is already a file like object
- if hasattr(cfg, 'read'):
- self.cfg = cfg
- self.parser.readfp(self.cfg)
- elif isinstance(cfg, types.StringTypes):
- # Config file is a URL. Download it to a temp dir
- if cfg.startswith('http') or cfg.startswith('ftp'):
- self.cfg = path.join(tmpdir, path.basename(cfg))
- utils.urlretrieve(cfg, self.cfg)
- self.parser.read(self.cfg)
- # Config is a valid filesystem path to a file.
- elif path.exists(path.abspath(cfg)):
- if path.isfile(cfg):
- self.cfg = path.abspath(cfg)
- self.parser.read(self.cfg)
- else:
- e_msg = 'Invalid config file path: %s' % cfg
- raise IOError(e_msg)
- # Config file is just a string, convert it to a python file like
- # object using StringIO
- else:
- self.cfg = StringIO(cfg)
- self.parser.readfp(self.cfg)
+ """Base class of the configuration parser"""
+ def __init__(self, cfg, tmpdir = '/tmp'):
+ """\
+ Instantiate ConfigParser and provide the file like object that we'll
+ use to read configuration data from.
+ Args:
+ * cfg: Where we'll get configuration data. It can be either:
+ * A URL containing the file
+ * A valid file path inside the filesystem
+ * A string containing configuration data
+ * tmpdir: Where we'll dump the temporary conf files. The default
+ is the /tmp directory.
+ """
+ # Base Parser
+ self.parser = ConfigParser()
+ # File is already a file like object
+ if hasattr(cfg, 'read'):
+ self.cfg = cfg
+ self.parser.readfp(self.cfg)
+ elif isinstance(cfg, types.StringTypes):
+ # Config file is a URL. Download it to a temp dir
+ if cfg.startswith('http') or cfg.startswith('ftp'):
+ self.cfg = path.join(tmpdir, path.basename(cfg))
+ utils.urlretrieve(cfg, self.cfg)
+ self.parser.read(self.cfg)
+ # Config is a valid filesystem path to a file.
+ elif path.exists(path.abspath(cfg)):
+ if path.isfile(cfg):
+ self.cfg = path.abspath(cfg)
+ self.parser.read(self.cfg)
+ else:
+ e_msg = 'Invalid config file path: %s' % cfg
+ raise IOError(e_msg)
+ # Config file is just a string, convert it to a python file like
+ # object using StringIO
+ else:
+ self.cfg = StringIO(cfg)
+ self.parser.readfp(self.cfg)
- def get(self, section, name, default=None):
- """Get the value of a option.
+ def get(self, section, name, default=None):
+ """Get the value of a option.
- Section of the config file and the option name.
- You can pass a default value if the option doesn't exist.
- """
- if not self.parser.has_option(section, name):
- return default
- return self.parser.get(section, name)
+ Section of the config file and the option name.
+ You can pass a default value if the option doesn't exist.
+ """
+ if not self.parser.has_option(section, name):
+ return default
+ return self.parser.get(section, name)
- def set(self, section, option, value):
- """Set an option.
+ def set(self, section, option, value):
+ """Set an option.
- This change is not persistent unless saved with 'save()'.
- """
- if not self.parser.has_section(section):
- self.parser.add_section(section)
- return self.parser.set(section, name, value)
+ This change is not persistent unless saved with 'save()'.
+ """
+ if not self.parser.has_section(section):
+ self.parser.add_section(section)
+ return self.parser.set(section, name, value)
- def remove(self, section, name):
- """Remove an option."""
- if self.parser.has_section(section):
- self.parser.remove_option(section, name)
+ def remove(self, section, name):
+ """Remove an option."""
+ if self.parser.has_section(section):
+ self.parser.remove_option(section, name)
- def save(self):
- """Save the configuration file with all modifications"""
- if not self.filename:
- return
- fileobj = file(self.filename, 'w')
- try:
- self.parser.write(fileobj)
- finally:
- fileobj.close()
+ def save(self):
+ """Save the configuration file with all modifications"""
+ if not self.filename:
+ return
+ fileobj = file(self.filename, 'w')
+ try:
+ self.parser.write(fileobj)
+ finally:
+ fileobj.close()
diff --git a/client/bin/xen.py b/client/bin/xen.py
index d69b2c5..a792427 100644
--- a/client/bin/xen.py
+++ b/client/bin/xen.py
@@ -8,201 +8,201 @@
class xen(kernel.kernel):
- def log(self, msg):
- print msg
- self.logfile.write('%s\n' % msg)
+ def log(self, msg):
+ print msg
+ self.logfile.write('%s\n' % msg)
- def __init__(self, job, base_tree, results_dir, tmp_dir, build_dir, \
- leave = False, kjob = None):
- # call base-class
- kernel.kernel.__init__(self, job, base_tree, results_dir, \
- tmp_dir, build_dir, leave)
- self.kjob = kjob
+ def __init__(self, job, base_tree, results_dir, tmp_dir, build_dir, \
+ leave = False, kjob = None):
+ # call base-class
+ kernel.kernel.__init__(self, job, base_tree, results_dir, \
+ tmp_dir, build_dir, leave)
+ self.kjob = kjob
- def config(self, config_file, config_list = None):
- raise NotImplementedError('config() not implemented for xen')
+ def config(self, config_file, config_list = None):
+ raise NotImplementedError('config() not implemented for xen')
- def build(self, make_opts = '', logfile = '', extraversion='autotest'):
- """build xen
+ def build(self, make_opts = '', logfile = '', extraversion='autotest'):
+ """build xen
- make_opts
- additional options to make, if any
- """
- self.log('running build')
- os_dep.commands('gcc', 'make')
- # build xen with extraversion flag
- os.environ['XEN_EXTRAVERSION'] = '-unstable-%s'% extraversion
- if logfile == '':
- logfile = os.path.join(self.log_dir, 'xen_build')
- os.chdir(self.build_dir)
- self.log('log_dir: %s ' % os.path.join(self.log_dir, 'stdout'))
- self.job.stdout.tee_redirect(logfile + '.stdout')
- self.job.stderr.tee_redirect(logfile + '.stderr')
+ make_opts
+ additional options to make, if any
+ """
+ self.log('running build')
+ os_dep.commands('gcc', 'make')
+ # build xen with extraversion flag
+ os.environ['XEN_EXTRAVERSION'] = '-unstable-%s'% extraversion
+ if logfile == '':
+ logfile = os.path.join(self.log_dir, 'xen_build')
+ os.chdir(self.build_dir)
+ self.log('log_dir: %s ' % os.path.join(self.log_dir, 'stdout'))
+ self.job.stdout.tee_redirect(logfile + '.stdout')
+ self.job.stderr.tee_redirect(logfile + '.stderr')
- # build xen hypervisor and user-space tools
- targets = ['xen', 'tools']
- threads = 2 * autotest_utils.count_cpus()
- for t in targets:
- build_string = 'make -j %d %s %s' % (threads, make_opts, t)
- self.log('build_string: %s' % build_string)
- system(build_string)
+ # build xen hypervisor and user-space tools
+ targets = ['xen', 'tools']
+ threads = 2 * autotest_utils.count_cpus()
+ for t in targets:
+ build_string = 'make -j %d %s %s' % (threads, make_opts, t)
+ self.log('build_string: %s' % build_string)
+ system(build_string)
- # make a kernel job out of the kernel from the xen src if one isn't provided
- if self.kjob == None:
- # get xen kernel tree ready
- self.log("prep-ing xen'ified kernel source tree")
- system('make prep-kernels')
+ # make a kernel job out of the kernel from the xen src if one isn't provided
+ if self.kjob == None:
+ # get xen kernel tree ready
+ self.log("prep-ing xen'ified kernel source tree")
+ system('make prep-kernels')
- v = self.get_xen_kernel_build_ver()
- self.log('building xen kernel version: %s' % v)
+ v = self.get_xen_kernel_build_ver()
+ self.log('building xen kernel version: %s' % v)
- # build xen-ified kernel in xen tree
- kernel_base_tree = os.path.join(self.build_dir, \
- 'linux-%s' % self.get_xen_kernel_build_ver())
+ # build xen-ified kernel in xen tree
+ kernel_base_tree = os.path.join(self.build_dir, \
+ 'linux-%s' % self.get_xen_kernel_build_ver())
- self.log('kernel_base_tree = %s' % kernel_base_tree)
- # fix up XENGUEST value in EXTRAVERSION; we can't have
- # files with '$(XENGEUST)' in the name, =(
- self.fix_up_xen_kernel_makefile(kernel_base_tree)
+ self.log('kernel_base_tree = %s' % kernel_base_tree)
+ # fix up XENGUEST value in EXTRAVERSION; we can't have
+ # files with '$(XENGEUST)' in the name, =(
+ self.fix_up_xen_kernel_makefile(kernel_base_tree)
- # make the kernel job
- self.kjob = self.job.kernel(kernel_base_tree)
+ # make the kernel job
+ self.kjob = self.job.kernel(kernel_base_tree)
- # hardcoding dom0 config (no modules for testing, yay!)
- # FIXME: probe host to determine which config to pick
- c = self.build_dir + '/buildconfigs/linux-defconfig_xen0_x86_32'
- self.log('using kernel config: %s ' % c)
- self.kjob.config(c)
+ # hardcoding dom0 config (no modules for testing, yay!)
+ # FIXME: probe host to determine which config to pick
+ c = self.build_dir + '/buildconfigs/linux-defconfig_xen0_x86_32'
+ self.log('using kernel config: %s ' % c)
+ self.kjob.config(c)
- # Xen's kernel tree sucks; doesn't use bzImage, but vmlinux
- self.kjob.set_build_target('vmlinuz')
+ # Xen's kernel tree sucks; doesn't use bzImage, but vmlinux
+ self.kjob.set_build_target('vmlinuz')
- # also, the vmlinuz is not out in arch/*/boot, ARGH! more hackery
- self.kjob.set_build_image(self.job.tmpdir + '/build/linux/vmlinuz')
+ # also, the vmlinuz is not out in arch/*/boot, ARGH! more hackery
+ self.kjob.set_build_image(self.job.tmpdir + '/build/linux/vmlinuz')
- self.kjob.build()
+ self.kjob.build()
- self.job.stdout.restore()
- self.job.stderr.restore()
+ self.job.stdout.restore()
+ self.job.stderr.restore()
- xen_version = self.get_xen_build_ver()
- self.log('BUILD VERSION: Xen: %s Kernel:%s' % \
- (xen_version, self.kjob.get_kernel_build_ver()))
+ xen_version = self.get_xen_build_ver()
+ self.log('BUILD VERSION: Xen: %s Kernel:%s' % \
+ (xen_version, self.kjob.get_kernel_build_ver()))
- def build_timed(self, *args, **kwds):
- raise NotImplementedError('build_timed() not implemented')
+ def build_timed(self, *args, **kwds):
+ raise NotImplementedError('build_timed() not implemented')
- def install(self, tag='', prefix = '/', extraversion='autotest'):
- """make install in the kernel tree"""
- self.log('Installing ...')
+ def install(self, tag='', prefix = '/', extraversion='autotest'):
+ """make install in the kernel tree"""
+ self.log('Installing ...')
- os.chdir(self.build_dir)
+ os.chdir(self.build_dir)
- if not os.path.isdir(prefix):
- os.mkdir(prefix)
- self.boot_dir = os.path.join(prefix, 'boot')
- if not os.path.isdir(self.boot_dir):
- os.mkdir(self.boot_dir)
+ if not os.path.isdir(prefix):
+ os.mkdir(prefix)
+ self.boot_dir = os.path.join(prefix, 'boot')
+ if not os.path.isdir(self.boot_dir):
+ os.mkdir(self.boot_dir)
- # remember what we are going to install
- xen_version = '%s-%s' % (self.get_xen_build_ver(), extraversion)
- self.xen_image = self.boot_dir + '/xen-' + xen_version + '.gz'
- self.xen_syms = self.boot_dir + '/xen-syms-' + xen_version
+ # remember what we are going to install
+ xen_version = '%s-%s' % (self.get_xen_build_ver(), extraversion)
+ self.xen_image = self.boot_dir + '/xen-' + xen_version + '.gz'
+ self.xen_syms = self.boot_dir + '/xen-syms-' + xen_version
- self.log('Installing Xen ...')
- os.environ['XEN_EXTRAVERSION'] = '-unstable-%s'% extraversion
+ self.log('Installing Xen ...')
+ os.environ['XEN_EXTRAVERSION'] = '-unstable-%s'% extraversion
- # install xen
- system('make DESTDIR=%s -C xen install' % prefix)
+ # install xen
+ system('make DESTDIR=%s -C xen install' % prefix)
- # install tools
- system('make DESTDIR=%s -C tools install' % prefix)
+ # install tools
+ system('make DESTDIR=%s -C tools install' % prefix)
- # install kernel
- ktag = self.kjob.get_kernel_build_ver()
- kprefix = prefix
- self.kjob.install(tag=ktag, prefix=kprefix)
+ # install kernel
+ ktag = self.kjob.get_kernel_build_ver()
+ kprefix = prefix
+ self.kjob.install(tag=ktag, prefix=kprefix)
- def add_to_bootloader(self, tag='autotest', args=''):
- """ add this kernel to bootloader, taking an
- optional parameter of space separated parameters
- e.g.: kernel.add_to_bootloader('mykernel', 'ro acpi=off')
- """
+ def add_to_bootloader(self, tag='autotest', args=''):
+ """ add this kernel to bootloader, taking an
+ optional parameter of space separated parameters
+ e.g.: kernel.add_to_bootloader('mykernel', 'ro acpi=off')
+ """
- # turn on xen mode
- self.job.bootloader.enable_xen_mode()
+ # turn on xen mode
+ self.job.bootloader.enable_xen_mode()
- # remove existing entry if present
- self.job.bootloader.remove_kernel(tag)
+ # remove existing entry if present
+ self.job.bootloader.remove_kernel(tag)
- # add xen and xen kernel
- self.job.bootloader.add_kernel(self.kjob.image, tag, \
- self.kjob.initrd, self.xen_image)
+ # add xen and xen kernel
+ self.job.bootloader.add_kernel(self.kjob.image, tag, \
+ self.kjob.initrd, self.xen_image)
- # if no args passed, populate from /proc/cmdline
- if not args:
- args = open('/proc/cmdline', 'r').readline().strip()
+ # if no args passed, populate from /proc/cmdline
+ if not args:
+ args = open('/proc/cmdline', 'r').readline().strip()
- # add args to entry one at a time
- for a in args.split(' '):
- self.job.bootloader.add_args(tag, a)
+ # add args to entry one at a time
+ for a in args.split(' '):
+ self.job.bootloader.add_args(tag, a)
- # turn off xen mode
- self.job.bootloader.disable_xen_mode()
+ # turn off xen mode
+ self.job.bootloader.disable_xen_mode()
- def get_xen_kernel_build_ver(self):
- """Check xen buildconfig for current kernel version"""
- version = patchlevel = sublevel = ''
- extraversion = localversion = ''
+ def get_xen_kernel_build_ver(self):
+ """Check xen buildconfig for current kernel version"""
+ version = patchlevel = sublevel = ''
+ extraversion = localversion = ''
- version_file = self.build_dir + '/buildconfigs/mk.linux-2.6-xen'
+ version_file = self.build_dir + '/buildconfigs/mk.linux-2.6-xen'
- for line in open(version_file, 'r').readlines():
- if line.startswith('LINUX_VER'):
- start = line.index('=') + 1
- version = line[start:].strip() + "-xen"
- break
+ for line in open(version_file, 'r').readlines():
+ if line.startswith('LINUX_VER'):
+ start = line.index('=') + 1
+ version = line[start:].strip() + "-xen"
+ break
- return version
+ return version
- def fix_up_xen_kernel_makefile(self, kernel_dir):
- """Fix up broken EXTRAVERSION in xen-ified Linux kernel Makefile"""
- xenguest = ''
- makefile = kernel_dir + '/Makefile'
+ def fix_up_xen_kernel_makefile(self, kernel_dir):
+ """Fix up broken EXTRAVERSION in xen-ified Linux kernel Makefile"""
+ xenguest = ''
+ makefile = kernel_dir + '/Makefile'
- for line in open(makefile, 'r').readlines():
- if line.startswith('XENGUEST'):
- start = line.index('=') + 1
- xenguest = line[start:].strip()
- break;
+ for line in open(makefile, 'r').readlines():
+ if line.startswith('XENGUEST'):
+ start = line.index('=') + 1
+ xenguest = line[start:].strip()
+ break;
- # change out $XENGUEST in EXTRAVERSION line
- system('sed -i.old "s,\$(XENGUEST),%s," %s' % \
- (xenguest, makefile))
+ # change out $XENGUEST in EXTRAVERSION line
+ system('sed -i.old "s,\$(XENGUEST),%s," %s' % \
+ (xenguest, makefile))
- def get_xen_build_ver(self):
- """Check Makefile and .config to return kernel version"""
- version = patchlevel = sublevel = ''
- extraversion = localversion = ''
+ def get_xen_build_ver(self):
+ """Check Makefile and .config to return kernel version"""
+ version = patchlevel = sublevel = ''
+ extraversion = localversion = ''
- for line in open(self.build_dir + '/xen/Makefile', 'r').readlines():
- if line.startswith('export XEN_VERSION'):
- start = line.index('=') + 1
- version = line[start:].strip()
- if line.startswith('export XEN_SUBVERSION'):
- start = line.index('=') + 1
- sublevel = line[start:].strip()
- if line.startswith('export XEN_EXTRAVERSION'):
- start = line.index('=') + 1
- extraversion = line[start:].strip()
+ for line in open(self.build_dir + '/xen/Makefile', 'r').readlines():
+ if line.startswith('export XEN_VERSION'):
+ start = line.index('=') + 1
+ version = line[start:].strip()
+ if line.startswith('export XEN_SUBVERSION'):
+ start = line.index('=') + 1
+ sublevel = line[start:].strip()
+ if line.startswith('export XEN_EXTRAVERSION'):
+ start = line.index('=') + 1
+ extraversion = line[start:].strip()
- return "%s.%s%s" % (version, sublevel, extraversion)
+ return "%s.%s%s" % (version, sublevel, extraversion)