Add documentation patches
From: Paul Jimenez <[email protected]>
Here's more doc patches for kernel.py, autotest_utils.py, and error.py.
This is a great way for me to learn the codebase :)
git-svn-id: http://test.kernel.org/svn/autotest/trunk@159 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/bin/autotest_utils.py b/bin/autotest_utils.py
index 3a28404..0c2e344 100755
--- a/bin/autotest_utils.py
+++ b/bin/autotest_utils.py
@@ -1,21 +1,26 @@
+"""Convenience functions for use by tests or whomever.
+"""
+
import os,os.path,shutil,urllib,sys,signal,commands,pickle
from error import *
import re
def grep(pattern, file):
-# This is mainly to fix the return code inversion from grep
+ """This is mainly to fix the return code inversion from grep"""
return not system('grep "' + pattern + '" "' + file + '"')
-def difflist(list1, list2):
-# returns items in list 2 that are not in list 1
+def difflist(list1, list2):
+ """returns items in list2 that are not in list1"""
diff = [];
for x in list2:
if x not in list1:
diff.append(x)
return diff
+
def cat_file_to_cmd(file, command):
+ """equivalent to 'cat file | command' but knows to use zcat or bzcat if appropriate"""
if not os.path.isfile(file):
raise NameError, 'invalid file %s to cat to command %s' % file, command
if file.endswith('.bz2'):
@@ -25,10 +30,12 @@
else:
system('cat ' + file + ' | ' + command)
-
-# Extract a tarball to a specified directory name instead of whatever
-# the top level of a tarball is - useful for versioned directory names, etc
+
def extract_tarball_to_dir(tarball, dir):
+ """
+ Extract a tarball to a specified directory name instead of whatever
+ the top level of a tarball is - useful for versioned directory names, etc
+ """
if os.path.exists(dir):
raise NameError, 'target %s already exists' % dir
pwd = os.getcwd()
@@ -38,8 +45,8 @@
os.chdir(pwd)
-# Returns the first found newly created directory by the tarball extraction
def extract_tarball(tarball):
+ """Returns the first found newly created directory by the tarball extraction"""
oldlist = os.listdir('.')
cat_file_to_cmd(tarball, 'tar xf -')
newlist = os.listdir('.')
@@ -52,6 +59,10 @@
def update_version(srcdir, new_version, install):
+ """Make sure srcdir is version new_version
+
+ If not, delete it and install() the new version
+ """
versionfile = srcdir + '/.version'
if os.path.exists(srcdir):
if os.path.exists(versionfile):
@@ -67,16 +78,18 @@
def is_url(path):
+ """true if path is a url
+ """
+ # should cope with other url types here, but we only handle http and ftp
if (path.startswith('http://')) or (path.startswith('ftp://')):
- # should cope with other url types here, but we don't handle them yet
return 1
return 0
def get_file(src, dest):
+ """get a file, either from url or local"""
if (src == dest): # no-op here allows clean overrides in tests
return
- # get a file, either from url or local
if (is_url(src)):
print 'PWD: ' + os.getcwd()
print 'Fetching \n\t', src, '\n\t->', dest
@@ -105,18 +118,21 @@
def force_copy(src, dest):
+ """Replace dest with a new copy of src, even if it exists"""
if os.path.isfile(dest):
os.remove(dest)
return shutil.copyfile(src, dest)
def file_contains_pattern(file, pattern):
+ """Return true if file contains the specified egrep pattern"""
if not os.path.isfile(file):
raise NameError, 'file %s does not exist' % file
return not system('egrep -q ' + pattern + ' ' + file, ignorestatus = 1)
def list_grep(list, pattern):
+ """True if any item in list matches the specified pattern."""
compiled = re.compile(pattern)
for line in list:
match = compiled.search(line)
@@ -126,7 +142,10 @@
def get_vmlinux():
- # Ahem. This is crap. Pray harder. Bad Martin.
+ """Return the full path to vmlinux
+
+ Ahem. This is crap. Pray harder. Bad Martin.
+ """
vmlinux = '/boot/vmlinux'
if not os.path.isfile(vmlinux):
raise NameError, 'Cannot find vmlinux'
@@ -134,7 +153,10 @@
def get_systemmap():
- # Ahem. This is crap. Pray harder. Bad Martin.
+ """Return the full path to System.map
+
+ Ahem. This is crap. Pray harder. Bad Martin.
+ """
map = '/boot/System.map'
if not os.path.isfile(map):
raise NameError, 'Cannot find System.map'
@@ -142,12 +164,13 @@
def get_modules_dir():
+ """Return the modules dir for the running kernel version"""
kernel_version = system_output('uname -r')
return '/lib/modules/%s/kernel' % kernel_version
def get_arch():
-# Work out which CPU architecture we're running on
+ """Work out which CPU architecture we're running on"""
f = open('/proc/cpuinfo', 'r')
cpuinfo = f.readlines()
f.close()
@@ -170,6 +193,7 @@
def get_target_arch():
+ """Work out the target architecture."""
arch = get_arch()
if arch.startswith('power'):
return 'ppc64'
@@ -189,6 +213,7 @@
def count_cpus():
+ """number of CPUs in the local machine according to /proc/cpuinfo"""
f = file('/proc/cpuinfo', 'r')
cpus = 0
for line in f.readlines():
@@ -196,14 +221,16 @@
cpus += 1
return cpus
-
-# We have our own definition of system here, as the stock os.system doesn't
-# correctly handle sigpipe
-# (ie things like "yes | head" will hang because yes doesn't get the SIGPIPE).
-#
-# Also the stock os.system didn't raise errors based on exit status, this
-# version does unless you explicitly specify ignorestatus=1
def system(cmd, ignorestatus = 0):
+ """os.system replacement
+
+ We have our own definition of system here, as the stock os.system doesn't
+ correctly handle sigpipe
+ (ie things like "yes | head" will hang because yes doesn't get the SIGPIPE).
+
+ Also the stock os.system didn't raise errors based on exit status, this
+ version does unless you explicitly specify ignorestatus=1
+ """
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
try:
status = os.system(cmd)
@@ -216,6 +243,11 @@
def system_output(command, ignorestatus = 0):
+ """Run command and return its output
+
+ ignorestatus
+ whether to raise a CmdError if command has a nonzero exit status
+ """
(result, data) = commands.getstatusoutput(command)
if ((result != 0) and not ignorestatus):
raise CmdError, 'command failed: ' + command
@@ -223,14 +255,20 @@
def where_art_thy_filehandles():
+ """Dump the current list of filehandles"""
os.system("ls -l /proc/%d/fd >> /dev/tty" % os.getpid())
def print_to_tty(string):
+ """Output string straight to the tty"""
os.system("echo " + string + " >> /dev/tty")
def dump_object(object):
+ """Dump an object's attributes and methods
+
+ kind of like dir()
+ """
for item in object.__dict__.iteritems():
print item
try:
@@ -241,6 +279,7 @@
def environ(env_key):
+ """return the requested environment variable, or '' if unset"""
if (os.environ.has_key(env_key)):
return os.environ(env_key)
else:
@@ -248,6 +287,7 @@
def prepend_path(newpath, oldpath):
+ """prepend newpath to oldpath"""
if (oldpath):
return newpath + ':' + oldpath
else:
@@ -255,6 +295,7 @@
def append_path(oldpath, newpath):
+ """append newpath to oldpath"""
if (oldpath):
return oldpath + ':' + newpath
else:
@@ -262,9 +303,16 @@
class fd_stack:
- # Note that we need to redirect both the sys.stdout type descriptor
- # (which print, etc use) and the low level OS numbered descriptor
- # which os.system() etc use.
+ """a stack of fd redirects
+
+ Redirects cause existing fd's to be pushed on the stack; restore()
+ causes the current set of redirects to be popped, restoring the previous
+ filehandle destinations.
+
+ Note that we need to redirect both the sys.stdout type descriptor
+ (which print, etc use) and the low level OS numbered descriptor
+ which os.system() etc use.
+ """
def __init__(self, fd, filehandle):
self.fd = fd # eg 1
@@ -273,6 +321,10 @@
def redirect(self, filename):
+ """Redirect output to the specified file
+
+ Overwrites the previous contents, if any.
+ """
fdcopy = os.dup(self.fd)
self.stack.append( (fdcopy, self.filehandle) )
# self.filehandle = file(filename, 'w')
@@ -286,6 +338,10 @@
def tee_redirect(self, filename):
+ """Tee output to the specified file
+
+ Overwrites the previous contents, if any.
+ """
print_to_tty("tee_redirect to " + filename)
where_art_thy_filehandles()
fdcopy = os.dup(self.fd)
@@ -307,6 +363,7 @@
def restore(self):
+ """unredirect one level"""
# print_to_tty("ENTERING RESTORE %d" % self.fd)
# where_art_thy_filehandles()
(old_fd, old_filehandle) = self.stack.pop()
diff --git a/bin/error.py b/bin/error.py
index 8a11d79..6825ae1 100644
--- a/bin/error.py
+++ b/bin/error.py
@@ -1,31 +1,33 @@
+"""
+Internal global error types
+"""
+
import sys
from traceback import format_exception
-# Allow us to bail out requesting continuance.
class JobContinue(SystemExit):
+ """Allow us to bail out requesting continuance."""
pass
-# AutotestError: the parent of all errors deliberatly thrown
-# within the client code.
class AutotestError(Exception):
+ """The parent of all errors deliberatly thrown within the client code."""
pass
-# JobError: indicates an error which terminates and fails the whole job.
class JobError(AutotestError):
+ """Indicates an error which terminates and fails the whole job."""
pass
-# TestError: indicates an error which terminates and fails the test.
class TestError(AutotestError):
+ """Indicates an error which terminates and fails the test."""
pass
-# CmdError: indicates that a command failed, is fatal to the test
-# unless caught.
class CmdError(TestError):
+ """Indicates that a command failed, is fatal to the test unless caught."""
def __str__(self):
return "Command <" + self.args[0] + "> failed, rc=%d" % (self.args[1])
-# UnhandledError: indicates an unhandled exception in a test.
class UnhandledError(TestError):
+ """Indicates an unhandled exception in a test."""
def __init__(self, prefix):
t, o, tb = sys.exc_info()
trace = format_exception(t, o, tb)
diff --git a/bin/job.py b/bin/job.py
index e682714..6106b8c 100755
--- a/bin/job.py
+++ b/bin/job.py
@@ -1,36 +1,15 @@
-# Copyright Andy Whitcroft, Martin J. Bligh 2006
+"""The main job wrapper
-# The class describing a job
-#
-# Methods:
-# __init__ Initialize the job object
-# kernel Summon a kernel object
-# runtest Summon a test object and run it
-# parallel Run tasks in parallel
-# complete Clean up and exit
-# next_step Define the next step
-# step_engine Do the next step
-# record Record job-level status
-#
-# Data:
-# autodir The top level autotest directory (/usr/local/autotest)
-# bindir bin/
-# testdir tests/
-# profdir profilers/
-# tmpdir tmp/
-# resultdir results/<jobtag>
-# control The control file (pathname of)
-# jobtag The job tag string (eg "default")
-#
-# stdout fd_stack object for stdout
-# stderr fd_stack object for stderr
-# profilers the profilers object for this job
+This is the core infrastructure.
+"""
+
+__author__ = """Copyright Andy Whitcroft, Martin J. Bligh 2006"""
from autotest_utils import *
import os, sys, kernel, test, pickle, threading, profilers
-# Parallel run interface.
class AsyncRun(threading.Thread):
+ """Parallel run interface."""
def __init__(self, cmd):
threading.Thread.__init__(self)
self.cmd = cmd
@@ -38,9 +17,39 @@
x = self.cmd.pop(0)
x(*self.cmd)
-# JOB: the actual job against which we do everything.
+
class job:
+ """The actual job against which we do everything.
+
+ Properties:
+ autodir
+ The top level autotest directory (/usr/local/autotest).
+ Comes from os.environ['AUTODIR'].
+ bindir
+ <autodir>/bin/
+ testdir
+ <autodir>/tests/
+ profdir
+ <autodir>/profilers/
+ tmpdir
+ <autodir>/tmp/
+ resultdir
+ <autodir>/results/<jobtag>
+ stdout
+ fd_stack object for stdout
+ stderr
+ fd_stack object for stderr
+ profilers
+ the profilers object for this job
+ """
+
def __init__(self, control, jobtag='default'):
+ """
+ control
+ The control file (pathname of)
+ jobtag
+ The job tag string (eg "default")
+ """
self.autodir = os.environ['AUTODIR']
self.bindir = self.autodir + '/bin'
self.testdir = self.autodir + '/tests'
@@ -74,11 +83,14 @@
os.chdir(pwd)
def kernel(self, topdir, base_tree):
+ """Summon a kernel object"""
return kernel.kernel(self, topdir, base_tree)
-
def setup_dep(self, deps):
- # deps is an array of libraries required for this test.
+ """Set up the dependencies for this test.
+
+ deps is a list of libraries required for this test.
+ """
for dep in deps:
try:
os.chdir(self.autodir + '/deps/' + dep)
@@ -98,6 +110,13 @@
self.__class__.__name__ + "\n")
def runtest(self, tag, testname, *test_args):
+ """Summon a test object and run it.
+
+ tag
+ tag to add to testname
+ testname
+ name of the test to run
+ """
name = testname
if (tag):
name += '.' + tag
@@ -123,20 +142,23 @@
print "job: noop: " + text
# Job control primatives.
- def parallel(self, *l):
+
+ def parallel(self, *tasklist):
+ """Run tasks in parallel"""
tasks = []
- for t in l:
+ for t in tasklist:
task = AsyncRun(t)
tasks.append(task)
task.start()
for t in tasks:
t.join()
- # XXX: should have a better name.
def quit(self):
+ # XXX: should have a better name.
raise JobContinue("more to come")
def complete(self, status):
+ """Clean up and exit"""
# We are about to exit 'complete' so clean up the control file.
try:
os.unlink(self.control + '.state')
@@ -144,16 +166,18 @@
pass
sys.exit(status)
- # STEPS: the stepping engine -- if the control file defines
- # step_init we will be using this engine to drive multiple
- # runs.
steps = []
def next_step(self, step):
+ """Define the next step"""
step[0] = step[0].__name__
self.steps.append(step)
pickle.dump(self.steps, open(self.control + '.state', 'w'))
def step_engine(self):
+ """the stepping engine -- if the control file defines
+ step_init we will be using this engine to drive multiple runs.
+ """
+ """Do the next step"""
lcl = dict({'job': self})
str = """
@@ -189,6 +213,7 @@
self.complete(0)
def record(self, msg):
+ """Record job-level status"""
print msg
status = self.resultdir + "/status"
fd = file(status, "a")
@@ -197,6 +222,13 @@
def runjob(control, cont = 0):
+ """The main interface to this module
+
+ control
+ The control file to use for this job.
+ cont
+ Whether this is the continuation of a previously started job
+ """
state = control + '.state'
# instantiate the job object ready for the control file.
diff --git a/bin/kernel.py b/bin/kernel.py
index fdadbda..ae6878d 100755
--- a/bin/kernel.py
+++ b/bin/kernel.py
@@ -1,28 +1,5 @@
-# Copyright Martin J. Bligh, 2006
-#
-# Class for compiling kernels. Data for the object includes the src files
-# used to create the kernel, patches applied, config (base + changes),
-# the build directory itself, and logged output
-#
-# Methods:
-# __init__ Initialize kernel object
-# patch Apply a list of patches (in order)
-# config Summon a kernel_config object and set it up
-# build Build the kernel
-# build_timed Build the kernel, and time it
-# clean Do a "make clean"
-# install Do a "make install"
-# set_cross_cc Set the cross compiler to the h/w platform
-# pickle_dump Pickle this object, sans job backreference.
-#
-# Data:
-# job Backpointer to the job object we're part of
-# autodir Path to the top level autotest dir (/usr/local/autotest)
-# top_dir Path to the top level dir of this kernel object
-# src_dir <kernel>/src/
-# build_dir <kernel>/patches/
-# config_dir <kernel>/config
-# log_dir <kernel>/log
+
+__author__ = """Copyright Martin J. Bligh, 2006"""
import os,os.path,shutil,urllib,copy,pickle
from autotest_utils import *
@@ -30,9 +7,41 @@
import test
class kernel:
+ """ Class for compiling kernels.
+
+ Data for the object includes the src files
+ used to create the kernel, patches applied, config (base + changes),
+ the build directory itself, and logged output
+
+ Properties:
+ job
+ Backpointer to the job object we're part of
+ autodir
+ Path to the top level autotest dir (/usr/local/autotest)
+ top_dir
+ Path to the top level dir of this kernel object
+ src_dir
+ <top_dir>/src/
+ build_dir
+ <top_dir>/patches/
+ config_dir
+ <top_dir>/config
+ log_dir
+ <top_dir>/log
+ """
+
autodir = ''
def __init__(self, job, top_directory, base_tree):
+ """Initialize the kernel build environment
+
+ job
+ which job this build is part of
+ top_directory
+ top of the build environment
+ base_tree
+ ???
+ """
self.job = job
autodir = job.autodir
self.top_dir = top_directory
@@ -58,6 +67,7 @@
def patch(self, *patches):
+ """Apply a list of patches (in order)"""
self.job.stdout.redirect(self.log_dir+'/stdout')
local_patches = self.get_patches(patches)
self.apply_patches(local_patches)
@@ -71,6 +81,7 @@
def get_patches(self, patches):
+ """fetch the patches to the local patch_dir"""
local_patches = []
for patch in patches:
dest = self.patch_dir + basename(patch)
@@ -79,6 +90,7 @@
def apply_patches(self, patches):
+ """apply the list of patches, in order"""
builddir = self.build_dir
os.chdir(builddir)
@@ -92,7 +104,7 @@
def get_kernel_tree(self, base_tree):
- # Extract base_tree into self.top_dir/build
+ """Extract base_tree into self.top_dir/build"""
os.chdir(self.top_dir)
tarball = 'src/' + basename(base_tree)
get_file(base_tree, tarball)
@@ -102,7 +114,11 @@
def build(self, make_opts = ''):
- # build the kernel
+ """build the kernel
+
+ make_opts
+ additional options to make, if any
+ """
os.chdir(self.build_dir)
print self.log_dir+'stdout'
self.job.stdout.redirect(self.log_dir+'/stdout')
@@ -126,6 +142,7 @@
def build_timed(self, threads, timefile = '/dev/null', make_opts = ''):
+ """time the bulding of the kernel"""
os.chdir(self.build_dir)
print "make clean"
system('make clean')
@@ -137,13 +154,14 @@
def clean(self):
+ """make clean in the kernel tree"""
os.chdir(self.build_dir)
print "make clean"
system('make clean')
def install(self, dir):
- # install the kernel
+ """make install in the kernel tree"""
os.chdir(self.build_dir)
image = 'arch/' + get_target_arch() + '/boot/' + target
force_copy(image, '/boot/vmlinuz-autotest')
@@ -155,6 +173,10 @@
def set_cross_cc(self):
+ """Set up to cross-compile.
+
+ Currently this can cross-compile to ppc64 and x86_64
+ """
target_arch = get_target_arch()
global target
target = 'bzImage'
@@ -169,9 +191,12 @@
os.environ['CROSS_COMPILE']=autodir+'sources/x86_64-cross/bin'
- # we can't pickle the backreference to job (it contains fd's),
- # nor would we want to
def pickle_dump(self, filename):
+ """dump a pickle of ourself out to the specified filename
+
+ we can't pickle the backreference to job (it contains fd's),
+ nor would we want to
+ """
temp = copy.copy(self)
temp.job = None
pickle.dump(temp, open(filename, 'w'))