Convert all python code to use four-space indents instead of eight-space tabs.
Signed-off-by: John Admanski <[email protected]>
git-svn-id: http://test.kernel.org/svn/autotest/trunk@1658 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/tko/db.py b/tko/db.py
index 235b485..175869b 100644
--- a/tko/db.py
+++ b/tko/db.py
@@ -5,488 +5,488 @@
class MySQLTooManyRows(Exception):
- pass
+ pass
class db_sql:
- def __init__(self, debug=False, autocommit=True, host=None,
- database=None, user=None, password=None):
- self.debug = debug
- self.autocommit = autocommit
- self._load_config(host, database, user, password)
+ def __init__(self, debug=False, autocommit=True, host=None,
+ database=None, user=None, password=None):
+ self.debug = debug
+ self.autocommit = autocommit
+ self._load_config(host, database, user, password)
- self.con = None
- self._init_db()
+ self.con = None
+ self._init_db()
- # if not present, insert statuses
- self.status_idx = {}
- self.status_word = {}
- status_rows = self.select('status_idx, word', 'status', None)
- for s in status_rows:
- self.status_idx[s[1]] = s[0]
- self.status_word[s[0]] = s[1]
+ # if not present, insert statuses
+ self.status_idx = {}
+ self.status_word = {}
+ status_rows = self.select('status_idx, word', 'status', None)
+ for s in status_rows:
+ self.status_idx[s[1]] = s[0]
+ self.status_word[s[0]] = s[1]
- machine_map = os.path.join(os.path.dirname(__file__),
- 'machines')
- if os.path.exists(machine_map):
- self.machine_map = machine_map
- else:
- self.machine_map = None
- self.machine_group = {}
+ machine_map = os.path.join(os.path.dirname(__file__),
+ 'machines')
+ if os.path.exists(machine_map):
+ self.machine_map = machine_map
+ else:
+ self.machine_map = None
+ self.machine_group = {}
- def _load_config(self, host, database, user, password):
- # grab the global config
- get_value = global_config.global_config.get_config_value
+ def _load_config(self, host, database, user, password):
+ # grab the global config
+ get_value = global_config.global_config.get_config_value
- # grab the host, database
- if host:
- self.host = host
- else:
- self.host = get_value("TKO", "host")
- if database:
- self.database = database
- else:
- self.database = get_value("TKO", "database")
+ # grab the host, database
+ if host:
+ self.host = host
+ else:
+ self.host = get_value("TKO", "host")
+ if database:
+ self.database = database
+ else:
+ self.database = get_value("TKO", "database")
- # grab the user and password
- if user:
- self.user = user
- else:
- self.user = get_value("TKO", "user")
- if password:
- self.password = password
- else:
- self.password = get_value("TKO", "password")
+ # grab the user and password
+ if user:
+ self.user = user
+ else:
+ self.user = get_value("TKO", "user")
+ if password:
+ self.password = password
+ else:
+ self.password = get_value("TKO", "password")
- # grab the timeout configuration
- self.query_timeout = get_value("TKO", "query_timeout",
- type=int, default=3600)
- self.min_delay = get_value("TKO", "min_retry_delay", type=int,
- default=20)
- self.max_delay = get_value("TKO", "max_retry_delay", type=int,
- default=60)
+ # grab the timeout configuration
+ self.query_timeout = get_value("TKO", "query_timeout",
+ type=int, default=3600)
+ self.min_delay = get_value("TKO", "min_retry_delay", type=int,
+ default=20)
+ self.max_delay = get_value("TKO", "max_retry_delay", type=int,
+ default=60)
- def _init_db(self):
- # make sure we clean up any existing connection
- if self.con:
- self.con.close()
- self.con = None
+ def _init_db(self):
+ # make sure we clean up any existing connection
+ if self.con:
+ self.con.close()
+ self.con = None
- # create the db connection and cursor
- self.con = self.connect(self.host, self.database,
- self.user, self.password)
- self.cur = self.con.cursor()
+ # create the db connection and cursor
+ self.con = self.connect(self.host, self.database,
+ self.user, self.password)
+ self.cur = self.con.cursor()
- def _random_delay(self):
- delay = random.randint(self.min_delay, self.max_delay)
- time.sleep(delay)
+ def _random_delay(self):
+ delay = random.randint(self.min_delay, self.max_delay)
+ time.sleep(delay)
- def run_with_retry(self, function, *args, **dargs):
- """Call function(*args, **dargs) until either it passes
- without an operational error, or a timeout is reached.
- This will re-connect to the database, so it is NOT safe
- to use this inside of a database transaction.
+ def run_with_retry(self, function, *args, **dargs):
+ """Call function(*args, **dargs) until either it passes
+ without an operational error, or a timeout is reached.
+ This will re-connect to the database, so it is NOT safe
+ to use this inside of a database transaction.
- It can be safely used with transactions, but the
- transaction start & end must be completely contained
- within the call to 'function'."""
- OperationalError = _get_error_class("OperationalError")
+ It can be safely used with transactions, but the
+ transaction start & end must be completely contained
+ within the call to 'function'."""
+ OperationalError = _get_error_class("OperationalError")
- success = False
- start_time = time.time()
- while not success:
- try:
- result = function(*args, **dargs)
- except OperationalError, e:
- self._log_operational_error(e)
- stop_time = time.time()
- elapsed_time = stop_time - start_time
- if elapsed_time > self.query_timeout:
- raise
- else:
- try:
- self._random_delay()
- self._init_db()
- except OperationalError, e:
- self._log_operational_error(e)
- else:
- success = True
- return result
+ success = False
+ start_time = time.time()
+ while not success:
+ try:
+ result = function(*args, **dargs)
+ except OperationalError, e:
+ self._log_operational_error(e)
+ stop_time = time.time()
+ elapsed_time = stop_time - start_time
+ if elapsed_time > self.query_timeout:
+ raise
+ else:
+ try:
+ self._random_delay()
+ self._init_db()
+ except OperationalError, e:
+ self._log_operational_error(e)
+ else:
+ success = True
+ return result
- def _log_operational_error(self, e):
- msg = ("An operational error occured during a database "
- "operation: %s" % str(e))
- print >> sys.stderr, msg
- sys.stderr.flush() # we want these msgs to show up immediately
+ def _log_operational_error(self, e):
+ msg = ("An operational error occured during a database "
+ "operation: %s" % str(e))
+ print >> sys.stderr, msg
+ sys.stderr.flush() # we want these msgs to show up immediately
- def dprint(self, value):
- if self.debug:
- sys.stdout.write('SQL: ' + str(value) + '\n')
+ def dprint(self, value):
+ if self.debug:
+ sys.stdout.write('SQL: ' + str(value) + '\n')
- def commit(self):
- self.con.commit()
+ def commit(self):
+ self.con.commit()
- def get_last_autonumber_value(self):
- self.cur.execute('SELECT LAST_INSERT_ID()', [])
- return self.cur.fetchall()[0][0]
+ def get_last_autonumber_value(self):
+ self.cur.execute('SELECT LAST_INSERT_ID()', [])
+ return self.cur.fetchall()[0][0]
- def select(self, fields, table, where, wherein={},
- distinct = False, group_by = None, max_rows = None):
- """\
- This selects all the fields requested from a
- specific table with a particular where clause.
- The where clause can either be a dictionary of
- field=value pairs, a string, or a tuple of (string,
- a list of values). The last option is what you
- should use when accepting user input as it'll
- protect you against sql injection attacks (if
- all user data is placed in the array rather than
- the raw SQL).
+ def select(self, fields, table, where, wherein={},
+ distinct = False, group_by = None, max_rows = None):
+ """\
+ This selects all the fields requested from a
+ specific table with a particular where clause.
+ The where clause can either be a dictionary of
+ field=value pairs, a string, or a tuple of (string,
+ a list of values). The last option is what you
+ should use when accepting user input as it'll
+ protect you against sql injection attacks (if
+ all user data is placed in the array rather than
+ the raw SQL).
- For example:
- where = ("a = %s AND b = %s", ['val', 'val'])
- is better than
- where = "a = 'val' AND b = 'val'"
- """
- cmd = ['select']
- if distinct:
- cmd.append('distinct')
- cmd += [fields, 'from', table]
+ For example:
+ where = ("a = %s AND b = %s", ['val', 'val'])
+ is better than
+ where = "a = 'val' AND b = 'val'"
+ """
+ cmd = ['select']
+ if distinct:
+ cmd.append('distinct')
+ cmd += [fields, 'from', table]
- values = []
- if where and isinstance(where, types.DictionaryType):
- # key/value pairs (which should be equal)
- keys = [field + '=%s' for field in where.keys()]
- values = [where[field] for field in where.keys()]
+ values = []
+ if where and isinstance(where, types.DictionaryType):
+ # key/value pairs (which should be equal)
+ keys = [field + '=%s' for field in where.keys()]
+ values = [where[field] for field in where.keys()]
- cmd.append(' where ' + ' and '.join(keys))
- elif where and isinstance(where, types.StringTypes):
- # the exact string
- cmd.append(' where ' + where)
- elif where and isinstance(where, types.TupleType):
- # preformatted where clause + values
- (sql, vals) = where
- values = vals
- cmd.append(' where (%s) ' % sql)
+ cmd.append(' where ' + ' and '.join(keys))
+ elif where and isinstance(where, types.StringTypes):
+ # the exact string
+ cmd.append(' where ' + where)
+ elif where and isinstance(where, types.TupleType):
+ # preformatted where clause + values
+ (sql, vals) = where
+ values = vals
+ cmd.append(' where (%s) ' % sql)
- # TODO: this assumes there's a where clause...bad
- if wherein and isinstance(wherein, types.DictionaryType):
- keys_in = ["%s in (%s) " % (field, ','.join(where))
- for field, where in wherein.iteritems()]
- cmd.append(' and '+' and '.join(keys_in))
+ # TODO: this assumes there's a where clause...bad
+ if wherein and isinstance(wherein, types.DictionaryType):
+ keys_in = ["%s in (%s) " % (field, ','.join(where))
+ for field, where in wherein.iteritems()]
+ cmd.append(' and '+' and '.join(keys_in))
- if group_by:
- cmd.append(' GROUP BY ' + group_by)
+ if group_by:
+ cmd.append(' GROUP BY ' + group_by)
- self.dprint('%s %s' % (' '.join(cmd), values))
+ self.dprint('%s %s' % (' '.join(cmd), values))
- # create a re-runable function for executing the query
- def exec_sql():
- sql = ' '.join(cmd)
- numRec = self.cur.execute(sql, values)
- if max_rows != None and numRec > max_rows:
- msg = 'Exceeded allowed number of records'
- raise MySQLTooManyRows(msg)
- return self.cur.fetchall()
+ # create a re-runable function for executing the query
+ def exec_sql():
+ sql = ' '.join(cmd)
+ numRec = self.cur.execute(sql, values)
+ if max_rows != None and numRec > max_rows:
+ msg = 'Exceeded allowed number of records'
+ raise MySQLTooManyRows(msg)
+ return self.cur.fetchall()
- # run the query, re-trying after operational errors
- if self.autocommit:
- return self.run_with_retry(exec_sql)
- else:
- return exec_sql()
+ # run the query, re-trying after operational errors
+ if self.autocommit:
+ return self.run_with_retry(exec_sql)
+ else:
+ return exec_sql()
- def select_sql(self, fields, table, sql, values):
- """\
- select fields from table "sql"
- """
- cmd = 'select %s from %s %s' % (fields, table, sql)
- self.dprint(cmd)
-
- # create a -re-runable function for executing the query
- def exec_sql():
- self.cur.execute(cmd, values)
- return self.cur.fetchall()
-
- # run the query, re-trying after operational errors
- if self.autocommit:
- return self.run_with_retry(exec_sql)
- else:
- return exec_sql()
-
-
- def _exec_sql_with_commit(self, sql, values, commit):
- if self.autocommit:
- # re-run the query until it succeeds
- def exec_sql():
- self.cur.execute(sql, values)
- self.con.commit()
- self.run_with_retry(exec_sql)
- else:
- # take one shot at running the query
- self.cur.execute(sql, values)
- if commit:
- self.con.commit()
-
-
- def insert(self, table, data, commit = None):
- """\
- 'insert into table (keys) values (%s ... %s)', values
-
- data:
- dictionary of fields and data
- """
- fields = data.keys()
- refs = ['%s' for field in fields]
- values = [data[field] for field in fields]
- cmd = 'insert into %s (%s) values (%s)' % \
- (table, ','.join(fields), ','.join(refs))
- self.dprint('%s %s' % (cmd, values))
-
- self._exec_sql_with_commit(cmd, values, commit)
-
-
- def delete(self, table, where, commit = None):
- cmd = ['delete from', table]
- if commit == None:
- commit = self.autocommit
- if where and isinstance(where, types.DictionaryType):
- keys = [field + '=%s' for field in where.keys()]
- values = [where[field] for field in where.keys()]
- cmd += ['where', ' and '.join(keys)]
- sql = ' '.join(cmd)
- self.dprint('%s %s' % (sql, values))
-
- self._exec_sql_with_commit(sql, values, commit)
-
-
- def update(self, table, data, where, commit = None):
- """\
- 'update table set data values (%s ... %s) where ...'
-
- data:
- dictionary of fields and data
- """
- if commit == None:
- commit = self.autocommit
- cmd = 'update %s ' % table
- fields = data.keys()
- data_refs = [field + '=%s' for field in fields]
- data_values = [data[field] for field in fields]
- cmd += ' set ' + ' and '.join(data_refs)
-
- where_keys = [field + '=%s' for field in where.keys()]
- where_values = [where[field] for field in where.keys()]
- cmd += ' where ' + ' and '.join(where_keys)
-
- values = data_values + where_values
- print '%s %s' % (cmd, values)
-
- self._exec_sql_with_commit(cmd, values, commit)
-
+ def select_sql(self, fields, table, sql, values):
+ """\
+ select fields from table "sql"
+ """
+ cmd = 'select %s from %s %s' % (fields, table, sql)
+ self.dprint(cmd)
- def delete_job(self, tag, commit = None):
- job_idx = self.find_job(tag)
- for test_idx in self.find_tests(job_idx):
- where = {'test_idx' : test_idx}
- self.delete('iteration_result', where)
- self.delete('test_attributes', where)
- where = {'job_idx' : job_idx}
- self.delete('tests', where)
- self.delete('jobs', where)
+ # create a -re-runable function for executing the query
+ def exec_sql():
+ self.cur.execute(cmd, values)
+ return self.cur.fetchall()
+ # run the query, re-trying after operational errors
+ if self.autocommit:
+ return self.run_with_retry(exec_sql)
+ else:
+ return exec_sql()
- def insert_job(self, tag, job, commit = None):
- job.machine_idx = self.lookup_machine(job.machine)
- if not job.machine_idx:
- job.machine_idx = self.insert_machine(job,
- commit=commit)
- self.insert('jobs', {'tag':tag,
- 'label': job.label,
- 'username': job.user,
- 'machine_idx': job.machine_idx,
- 'queued_time': job.queued_time,
- 'started_time': job.started_time,
- 'finished_time': job.finished_time},
- commit=commit)
- job.index = self.get_last_autonumber_value()
- for test in job.tests:
- self.insert_test(job, test, commit=commit)
+ def _exec_sql_with_commit(self, sql, values, commit):
+ if self.autocommit:
+ # re-run the query until it succeeds
+ def exec_sql():
+ self.cur.execute(sql, values)
+ self.con.commit()
+ self.run_with_retry(exec_sql)
+ else:
+ # take one shot at running the query
+ self.cur.execute(sql, values)
+ if commit:
+ self.con.commit()
- def insert_test(self, job, test, commit = None):
- kver = self.insert_kernel(test.kernel, commit=commit)
- data = {'job_idx':job.index, 'test':test.testname,
- 'subdir':test.subdir, 'kernel_idx':kver,
- 'status':self.status_idx[test.status],
- 'reason':test.reason, 'machine_idx':job.machine_idx,
- 'started_time': test.started_time,
- 'finished_time':test.finished_time}
- self.insert('tests', data, commit=commit)
- test_idx = self.get_last_autonumber_value()
- data = { 'test_idx':test_idx }
- for i in test.iterations:
- data['iteration'] = i.index
- for key, value in i.attr_keyval.iteritems():
- data['attribute'] = key
- data['value'] = value
- self.insert('iteration_attributes', data,
- commit=commit)
- for key, value in i.perf_keyval.iteritems():
- data['attribute'] = key
- data['value'] = value
- self.insert('iteration_result', data,
- commit=commit)
+ def insert(self, table, data, commit = None):
+ """\
+ 'insert into table (keys) values (%s ... %s)', values
- for key, value in test.attributes.iteritems():
- data = {'test_idx': test_idx, 'attribute': key,
- 'value': value}
- self.insert('test_attributes', data, commit=commit)
+ data:
+ dictionary of fields and data
+ """
+ fields = data.keys()
+ refs = ['%s' for field in fields]
+ values = [data[field] for field in fields]
+ cmd = 'insert into %s (%s) values (%s)' % \
+ (table, ','.join(fields), ','.join(refs))
+ self.dprint('%s %s' % (cmd, values))
+ self._exec_sql_with_commit(cmd, values, commit)
- def read_machine_map(self):
- self.machine_group = {}
- for line in open(self.machine_map, 'r').readlines():
- (machine, group) = line.split()
- self.machine_group[machine] = group
+ def delete(self, table, where, commit = None):
+ cmd = ['delete from', table]
+ if commit == None:
+ commit = self.autocommit
+ if where and isinstance(where, types.DictionaryType):
+ keys = [field + '=%s' for field in where.keys()]
+ values = [where[field] for field in where.keys()]
+ cmd += ['where', ' and '.join(keys)]
+ sql = ' '.join(cmd)
+ self.dprint('%s %s' % (sql, values))
- def insert_machine(self, job, group = None, commit = None):
- hostname = job.machine
- if self.machine_map and not self.machine_group:
- self.read_machine_map()
+ self._exec_sql_with_commit(sql, values, commit)
- if not group:
- group = self.machine_group.get(hostname, hostname)
- if group == hostname and job.machine_owner:
- group = job.machine_owner + '/' + hostname
- self.insert('machines',
- { 'hostname' : hostname ,
- 'machine_group' : group ,
- 'owner' : job.machine_owner },
- commit=commit)
- return self.get_last_autonumber_value()
+ def update(self, table, data, where, commit = None):
+ """\
+ 'update table set data values (%s ... %s) where ...'
+ data:
+ dictionary of fields and data
+ """
+ if commit == None:
+ commit = self.autocommit
+ cmd = 'update %s ' % table
+ fields = data.keys()
+ data_refs = [field + '=%s' for field in fields]
+ data_values = [data[field] for field in fields]
+ cmd += ' set ' + ' and '.join(data_refs)
- def lookup_machine(self, hostname):
- where = { 'hostname' : hostname }
- rows = self.select('machine_idx', 'machines', where)
- if rows:
- return rows[0][0]
- else:
- return None
+ where_keys = [field + '=%s' for field in where.keys()]
+ where_values = [where[field] for field in where.keys()]
+ cmd += ' where ' + ' and '.join(where_keys)
+ values = data_values + where_values
+ print '%s %s' % (cmd, values)
- def lookup_kernel(self, kernel):
- rows = self.select('kernel_idx', 'kernels',
- {'kernel_hash':kernel.kernel_hash})
- if rows:
- return rows[0][0]
- else:
- return None
+ self._exec_sql_with_commit(cmd, values, commit)
- def insert_kernel(self, kernel, commit = None):
- kver = self.lookup_kernel(kernel)
- if kver:
- return kver
-
- # If this kernel has any significant patches, append their hash
- # as diferentiator.
- printable = kernel.base
- patch_count = 0
- for patch in kernel.patches:
- match = re.match(r'.*(-mm[0-9]+|-git[0-9]+)\.(bz2|gz)$',
- patch.reference)
- if not match:
- patch_count += 1
+ def delete_job(self, tag, commit = None):
+ job_idx = self.find_job(tag)
+ for test_idx in self.find_tests(job_idx):
+ where = {'test_idx' : test_idx}
+ self.delete('iteration_result', where)
+ self.delete('test_attributes', where)
+ where = {'job_idx' : job_idx}
+ self.delete('tests', where)
+ self.delete('jobs', where)
- self.insert('kernels',
- {'base':kernel.base,
- 'kernel_hash':kernel.kernel_hash,
- 'printable':printable},
- commit=commit)
- kver = self.get_last_autonumber_value()
- if patch_count > 0:
- printable += ' p%d' % (kver)
- self.update('kernels',
- {'printable':printable},
- {'kernel_idx':kver})
+ def insert_job(self, tag, job, commit = None):
+ job.machine_idx = self.lookup_machine(job.machine)
+ if not job.machine_idx:
+ job.machine_idx = self.insert_machine(job,
+ commit=commit)
+ self.insert('jobs', {'tag':tag,
+ 'label': job.label,
+ 'username': job.user,
+ 'machine_idx': job.machine_idx,
+ 'queued_time': job.queued_time,
+ 'started_time': job.started_time,
+ 'finished_time': job.finished_time},
+ commit=commit)
+ job.index = self.get_last_autonumber_value()
+ for test in job.tests:
+ self.insert_test(job, test, commit=commit)
- for patch in kernel.patches:
- self.insert_patch(kver, patch, commit=commit)
- return kver
+ def insert_test(self, job, test, commit = None):
+ kver = self.insert_kernel(test.kernel, commit=commit)
+ data = {'job_idx':job.index, 'test':test.testname,
+ 'subdir':test.subdir, 'kernel_idx':kver,
+ 'status':self.status_idx[test.status],
+ 'reason':test.reason, 'machine_idx':job.machine_idx,
+ 'started_time': test.started_time,
+ 'finished_time':test.finished_time}
+ self.insert('tests', data, commit=commit)
+ test_idx = self.get_last_autonumber_value()
+ data = { 'test_idx':test_idx }
- def insert_patch(self, kver, patch, commit = None):
- print patch.reference
- name = os.path.basename(patch.reference)[:80]
- self.insert('patches',
- {'kernel_idx': kver,
- 'name':name,
- 'url':patch.reference,
- 'hash':patch.hash},
+ for i in test.iterations:
+ data['iteration'] = i.index
+ for key, value in i.attr_keyval.iteritems():
+ data['attribute'] = key
+ data['value'] = value
+ self.insert('iteration_attributes', data,
+ commit=commit)
+ for key, value in i.perf_keyval.iteritems():
+ data['attribute'] = key
+ data['value'] = value
+ self.insert('iteration_result', data,
commit=commit)
-
- def find_test(self, job_idx, subdir):
- where = { 'job_idx':job_idx , 'subdir':subdir }
- rows = self.select('test_idx', 'tests', where)
- if rows:
- return rows[0][0]
- else:
- return None
+ for key, value in test.attributes.iteritems():
+ data = {'test_idx': test_idx, 'attribute': key,
+ 'value': value}
+ self.insert('test_attributes', data, commit=commit)
- def find_tests(self, job_idx):
- where = { 'job_idx':job_idx }
- rows = self.select('test_idx', 'tests', where)
- if rows:
- return [row[0] for row in rows]
- else:
- return []
+ def read_machine_map(self):
+ self.machine_group = {}
+ for line in open(self.machine_map, 'r').readlines():
+ (machine, group) = line.split()
+ self.machine_group[machine] = group
- def find_job(self, tag):
- rows = self.select('job_idx', 'jobs', {'tag': tag})
- if rows:
- return rows[0][0]
- else:
- return None
+ def insert_machine(self, job, group = None, commit = None):
+ hostname = job.machine
+ if self.machine_map and not self.machine_group:
+ self.read_machine_map()
+
+ if not group:
+ group = self.machine_group.get(hostname, hostname)
+ if group == hostname and job.machine_owner:
+ group = job.machine_owner + '/' + hostname
+
+ self.insert('machines',
+ { 'hostname' : hostname ,
+ 'machine_group' : group ,
+ 'owner' : job.machine_owner },
+ commit=commit)
+ return self.get_last_autonumber_value()
+
+
+ def lookup_machine(self, hostname):
+ where = { 'hostname' : hostname }
+ rows = self.select('machine_idx', 'machines', where)
+ if rows:
+ return rows[0][0]
+ else:
+ return None
+
+
+ def lookup_kernel(self, kernel):
+ rows = self.select('kernel_idx', 'kernels',
+ {'kernel_hash':kernel.kernel_hash})
+ if rows:
+ return rows[0][0]
+ else:
+ return None
+
+
+ def insert_kernel(self, kernel, commit = None):
+ kver = self.lookup_kernel(kernel)
+ if kver:
+ return kver
+
+ # If this kernel has any significant patches, append their hash
+ # as diferentiator.
+ printable = kernel.base
+ patch_count = 0
+ for patch in kernel.patches:
+ match = re.match(r'.*(-mm[0-9]+|-git[0-9]+)\.(bz2|gz)$',
+ patch.reference)
+ if not match:
+ patch_count += 1
+
+ self.insert('kernels',
+ {'base':kernel.base,
+ 'kernel_hash':kernel.kernel_hash,
+ 'printable':printable},
+ commit=commit)
+ kver = self.get_last_autonumber_value()
+
+ if patch_count > 0:
+ printable += ' p%d' % (kver)
+ self.update('kernels',
+ {'printable':printable},
+ {'kernel_idx':kver})
+
+ for patch in kernel.patches:
+ self.insert_patch(kver, patch, commit=commit)
+ return kver
+
+
+ def insert_patch(self, kver, patch, commit = None):
+ print patch.reference
+ name = os.path.basename(patch.reference)[:80]
+ self.insert('patches',
+ {'kernel_idx': kver,
+ 'name':name,
+ 'url':patch.reference,
+ 'hash':patch.hash},
+ commit=commit)
+
+
+ def find_test(self, job_idx, subdir):
+ where = { 'job_idx':job_idx , 'subdir':subdir }
+ rows = self.select('test_idx', 'tests', where)
+ if rows:
+ return rows[0][0]
+ else:
+ return None
+
+
+ def find_tests(self, job_idx):
+ where = { 'job_idx':job_idx }
+ rows = self.select('test_idx', 'tests', where)
+ if rows:
+ return [row[0] for row in rows]
+ else:
+ return []
+
+
+ def find_job(self, tag):
+ rows = self.select('job_idx', 'jobs', {'tag': tag})
+ if rows:
+ return rows[0][0]
+ else:
+ return None
def _get_db_type():
- """Get the database type name to use from the global config."""
- get_value = global_config.global_config.get_config_value
- return "db_" + get_value("TKO", "db_type", default="mysql")
+ """Get the database type name to use from the global config."""
+ get_value = global_config.global_config.get_config_value
+ return "db_" + get_value("TKO", "db_type", default="mysql")
def _get_error_class(class_name):
- """Retrieves the appropriate error class by name from the database
- module."""
- db_module = __import__("autotest_lib.tko." + _get_db_type(),
- globals(), locals(), ["driver"])
- return getattr(db_module.driver, class_name)
+ """Retrieves the appropriate error class by name from the database
+ module."""
+ db_module = __import__("autotest_lib.tko." + _get_db_type(),
+ globals(), locals(), ["driver"])
+ return getattr(db_module.driver, class_name)
def db(*args, **dargs):
- """Creates an instance of the database class with the arguments
- provided in args and dargs, using the database type specified by
- the global configuration (defaulting to mysql)."""
- db_type = _get_db_type()
- db_module = __import__("autotest_lib.tko." + db_type, globals(),
- locals(), [db_type])
- db = getattr(db_module, db_type)(*args, **dargs)
- return db
+ """Creates an instance of the database class with the arguments
+ provided in args and dargs, using the database type specified by
+ the global configuration (defaulting to mysql)."""
+ db_type = _get_db_type()
+ db_module = __import__("autotest_lib.tko." + db_type, globals(),
+ locals(), [db_type])
+ db = getattr(db_module, db_type)(*args, **dargs)
+ return db
diff --git a/tko/db_mysql.py b/tko/db_mysql.py
index da59ecd..13c2037 100644
--- a/tko/db_mysql.py
+++ b/tko/db_mysql.py
@@ -2,6 +2,6 @@
import db
class db_mysql(db.db_sql):
- def connect(self, host, database, user, password):
- return driver.connect(host=host, user=user,
- passwd=password, db=database)
+ def connect(self, host, database, user, password):
+ return driver.connect(host=host, user=user,
+ passwd=password, db=database)
diff --git a/tko/db_postgres.py b/tko/db_postgres.py
index 166ee02..31834fa 100644
--- a/tko/db_postgres.py
+++ b/tko/db_postgres.py
@@ -2,6 +2,6 @@
import db
class db_postgres(db.db_sql):
- def connect(self, host, database, user, password):
- return driver.connect("dbname=%s user=%s password=%s" % \
- (database, user, password))
+ def connect(self, host, database, user, password):
+ return driver.connect("dbname=%s user=%s password=%s" % \
+ (database, user, password))
diff --git a/tko/delete_job_results b/tko/delete_job_results
index 4977988..f747510 100644
--- a/tko/delete_job_results
+++ b/tko/delete_job_results
@@ -7,15 +7,14 @@
usage = "usage: delete_job_results <job tag>"
if len(sys.argv) < 2:
- print usage
- sys.exit(2)
+ print usage
+ sys.exit(2)
tag = sys.argv[1]
resultsdir = os.path.abspath(os.path.join(thisdir, '../results', tag))
db = db.db()
if not db.find_job(tag):
- raise "Job tag %s does not exist in database" % tag
+ raise "Job tag %s does not exist in database" % tag
db.delete_job(tag)
shutil.rmtree(resultsdir)
-
diff --git a/tko/display.py b/tko/display.py
index ccf1923..f233fb8 100755
--- a/tko/display.py
+++ b/tko/display.py
@@ -2,23 +2,23 @@
import frontend, reason_qualifier
color_map = {
- 'header' : '#e5e5c0', # greyish yellow
- 'blank' : '#ffffff', # white
- 'plain_text' : '#e5e5c0', # greyish yellow
- 'borders' : '#bbbbbb', # grey
- 'white' : '#ffffff', # white
- 'green' : '#66ff66', # green
- 'yellow' : '#fffc00', # yellow
- 'red' : '#ff6666', # red
+ 'header' : '#e5e5c0', # greyish yellow
+ 'blank' : '#ffffff', # white
+ 'plain_text' : '#e5e5c0', # greyish yellow
+ 'borders' : '#bbbbbb', # grey
+ 'white' : '#ffffff', # white
+ 'green' : '#66ff66', # green
+ 'yellow' : '#fffc00', # yellow
+ 'red' : '#ff6666', # red
- #### additional keys for shaded color of a box
- #### depending on stats of GOOD/FAIL
- '100pct' : '#32CD32', # green, 94% to 100% of success
- '95pct' : '#c0ff80', # step twrds yellow, 88% to 94% of success
- '90pct' : '#ffff00', # yellow, 82% to 88%
- '85pct' : '#ffc040', # 76% to 82%
- '75pct' : '#ff4040', # red, 1% to 76%
- '0pct' : '#d080d0', # violet, <1% of success
+ #### additional keys for shaded color of a box
+ #### depending on stats of GOOD/FAIL
+ '100pct' : '#32CD32', # green, 94% to 100% of success
+ '95pct' : '#c0ff80', # step twrds yellow, 88% to 94% of success
+ '90pct' : '#ffff00', # yellow, 82% to 88%
+ '85pct' : '#ffc040', # 76% to 82%
+ '75pct' : '#ff4040', # red, 1% to 76%
+ '0pct' : '#d080d0', # violet, <1% of success
}
@@ -26,316 +26,315 @@
def set_brief_mode():
- global _brief_mode
- _brief_mode = True
+ global _brief_mode
+ _brief_mode = True
def is_brief_mode():
- return _brief_mode
+ return _brief_mode
def color_keys_row():
- """ Returns one row table with samples of 'NNpct' colors
- defined in the color_map
- and numbers of corresponding %%
- """
- ### This function does not require maintenance in case of
- ### color_map augmenting - as long as
- ### color keys for box shading have names that end with 'pct'
- keys = filter(lambda key: key.endswith('pct'), color_map.keys())
- def num_pct(key):
- return int(key.replace('pct',''))
- keys.sort(key=num_pct)
- html = ''
- for key in keys:
- html+= "\t\t\t<td bgcolor =%s> </td>\n"\
- % color_map[key]
- hint = key.replace('pct',' %')
- if hint[0]<>'0': ## anything but 0 %
- hint = 'to ' + hint
- html+= "\t\t\t<td> %s </td>\n" % hint
+ """ Returns one row table with samples of 'NNpct' colors
+ defined in the color_map
+ and numbers of corresponding %%
+ """
+ ### This function does not require maintenance in case of
+ ### color_map augmenting - as long as
+ ### color keys for box shading have names that end with 'pct'
+ keys = filter(lambda key: key.endswith('pct'), color_map.keys())
+ def num_pct(key):
+ return int(key.replace('pct',''))
+ keys.sort(key=num_pct)
+ html = ''
+ for key in keys:
+ html+= "\t\t\t<td bgcolor =%s> </td>\n"\
+ % color_map[key]
+ hint = key.replace('pct',' %')
+ if hint[0]<>'0': ## anything but 0 %
+ hint = 'to ' + hint
+ html+= "\t\t\t<td> %s </td>\n" % hint
- html = """
+ html = """
<table width = "500" border="0" cellpadding="2" cellspacing="2">\n
- <tbody>\n
- <tr>\n
+ <tbody>\n
+ <tr>\n
%s
- </tr>\n
- </tbody>
+ </tr>\n
+ </tbody>
</table><br>
""" % html
- return html
+ return html
def calculate_html(link, data, tooltip=None, row_label=None, column_label=None):
- if not is_brief_mode():
- hover_text = '%s:%s' % (row_label, column_label)
- if data: ## cell is not empty
- hover_text += '<br>%s' % tooltip
- else:
- ## avoid "None" printed in empty cells
- data = ' '
- html = ('<center><a class="info" href="%s">'
- '%s<span>%s</span></a></center>' %
- (link, data, hover_text))
- return html
- # no hover if embedded into AFE but links shall redirect to new window
- if data: ## cell is non empty
- html = '<a href="%s" target=NEW>%s</a>' % (link, data)
- return html
- else: ## cell is empty
- return ' '
+ if not is_brief_mode():
+ hover_text = '%s:%s' % (row_label, column_label)
+ if data: ## cell is not empty
+ hover_text += '<br>%s' % tooltip
+ else:
+ ## avoid "None" printed in empty cells
+ data = ' '
+ html = ('<center><a class="info" href="%s">'
+ '%s<span>%s</span></a></center>' %
+ (link, data, hover_text))
+ return html
+ # no hover if embedded into AFE but links shall redirect to new window
+ if data: ## cell is non empty
+ html = '<a href="%s" target=NEW>%s</a>' % (link, data)
+ return html
+ else: ## cell is empty
+ return ' '
class box:
- def __init__(self, data, color_key = None, header = False, link = None,
- tooltip = None, row_label = None, column_label = None):
-
- ## in brief mode we display grid table only and nothing more
- ## - mouse hovering feature is stubbed in brief mode
- ## - any link opens new window or tab
+ def __init__(self, data, color_key = None, header = False, link = None,
+ tooltip = None, row_label = None, column_label = None):
- redirect = ""
- if is_brief_mode():
- ## we are acting under AFE
- ## any link shall open new window
- redirect = " target=NEW"
-
- if data:
- data = "<tt>%s</tt>" % data
-
- if link and not tooltip:
- ## FlipAxis corner, column and row headers
- self.data = ('<a href="%s"%s>%s</a>' %
- (link, redirect, data))
- else:
- self.data = calculate_html(link, data, tooltip,
- row_label, column_label)
+ ## in brief mode we display grid table only and nothing more
+ ## - mouse hovering feature is stubbed in brief mode
+ ## - any link opens new window or tab
- if color_map.has_key(color_key):
- self.color = color_map[color_key]
- elif header:
- self.color = color_map['header']
- elif data:
- self.color = color_map['plain_text']
- else:
- self.color = color_map['blank']
- self.header = header
+ redirect = ""
+ if is_brief_mode():
+ ## we are acting under AFE
+ ## any link shall open new window
+ redirect = " target=NEW"
+
+ if data:
+ data = "<tt>%s</tt>" % data
+
+ if link and not tooltip:
+ ## FlipAxis corner, column and row headers
+ self.data = ('<a href="%s"%s>%s</a>' %
+ (link, redirect, data))
+ else:
+ self.data = calculate_html(link, data, tooltip,
+ row_label, column_label)
+
+ if color_map.has_key(color_key):
+ self.color = color_map[color_key]
+ elif header:
+ self.color = color_map['header']
+ elif data:
+ self.color = color_map['plain_text']
+ else:
+ self.color = color_map['blank']
+ self.header = header
- def html(self):
- if self.data:
- data = self.data
- else:
- data = ' '
+ def html(self):
+ if self.data:
+ data = self.data
+ else:
+ data = ' '
- if self.header:
- box_html = 'th'
- else:
- box_html = 'td'
+ if self.header:
+ box_html = 'th'
+ else:
+ box_html = 'td'
- return "<%s bgcolor=%s>%s</%s>" % \
- (box_html, self.color, data, box_html)
+ return "<%s bgcolor=%s>%s</%s>" % \
+ (box_html, self.color, data, box_html)
def grade_from_status(status):
- # % of goodness
- # GOOD (6) -> 1
- # TEST_NA (8) is not counted
- # ## If the test doesn't PASS, it FAILS
- # else -> 0
+ # % of goodness
+ # GOOD (6) -> 1
+ # TEST_NA (8) is not counted
+ # ## If the test doesn't PASS, it FAILS
+ # else -> 0
- if status == 6:
- return 1.0
- else:
- return 0.0
+ if status == 6:
+ return 1.0
+ else:
+ return 0.0
def average_grade_from_status_count(status_count):
- average_grade = 0
- total_count = 0
- for key in status_count.keys():
- if key != 8: # TEST_NA status
- average_grade += (grade_from_status(key)
- * status_count[key])
- total_count += status_count[key]
- if total_count != 0:
- average_grade = average_grade / total_count
- else:
- average_grade = 0.0
- return average_grade
+ average_grade = 0
+ total_count = 0
+ for key in status_count.keys():
+ if key != 8: # TEST_NA status
+ average_grade += (grade_from_status(key)
+ * status_count[key])
+ total_count += status_count[key]
+ if total_count != 0:
+ average_grade = average_grade / total_count
+ else:
+ average_grade = 0.0
+ return average_grade
def shade_from_status_count(status_count):
- if not status_count:
- return None
-
- ## average_grade defines a shade of the box
- ## 0 -> violet
- ## 0.76 -> red
- ## 0.88-> yellow
- ## 1.0 -> green
- average_grade = average_grade_from_status_count(status_count)
-
- ## find appropiate keyword from color_map
- if average_grade<0.01:
- shade = '0pct'
- elif average_grade<0.75:
- shade = '75pct'
- elif average_grade<0.85:
- shade = '85pct'
- elif average_grade<0.90:
- shade = '90pct'
- elif average_grade<0.95:
- shade = '95pct'
- else:
- shade = '100pct'
-
- return shade
+ if not status_count:
+ return None
+
+ ## average_grade defines a shade of the box
+ ## 0 -> violet
+ ## 0.76 -> red
+ ## 0.88-> yellow
+ ## 1.0 -> green
+ average_grade = average_grade_from_status_count(status_count)
+
+ ## find appropiate keyword from color_map
+ if average_grade<0.01:
+ shade = '0pct'
+ elif average_grade<0.75:
+ shade = '75pct'
+ elif average_grade<0.85:
+ shade = '85pct'
+ elif average_grade<0.90:
+ shade = '90pct'
+ elif average_grade<0.95:
+ shade = '95pct'
+ else:
+ shade = '100pct'
+
+ return shade
def status_html(db, box_data, shade):
- """
- status_count: dict mapping from status (integer key) to count
- eg. { 'GOOD' : 4, 'FAIL' : 1 }
- """
- status_count = box_data.status_count
- if 6 in status_count.keys():
- html = "%d / %d " \
- %(status_count[6],sum(status_count.values()))
- else:
- html = "%d / %d " % \
- (0, sum(status_count.values()))
+ """
+ status_count: dict mapping from status (integer key) to count
+ eg. { 'GOOD' : 4, 'FAIL' : 1 }
+ """
+ status_count = box_data.status_count
+ if 6 in status_count.keys():
+ html = "%d / %d " \
+ %(status_count[6],sum(status_count.values()))
+ else:
+ html = "%d / %d " % \
+ (0, sum(status_count.values()))
- if box_data.reasons_list:
- reasons_list = box_data.reasons_list
- aggregated_reasons_list = \
- reason_qualifier.aggregate_reason_fields(reasons_list)
- for reason in aggregated_reasons_list:
- ## a bit of more postprocessing
- ## to look nicer in a cell
- ## in future: to do subtable within the cell
- reason = reason.replace('<br>','\n')
- reason = reason.replace('<','[').replace('>',']')
- reason = reason.replace('|','\n').replace('&',' AND ')
- reason = reason.replace('\n','<br>')
- html += '<br>' + reason
+ if box_data.reasons_list:
+ reasons_list = box_data.reasons_list
+ aggregated_reasons_list = \
+ reason_qualifier.aggregate_reason_fields(reasons_list)
+ for reason in aggregated_reasons_list:
+ ## a bit of more postprocessing
+ ## to look nicer in a cell
+ ## in future: to do subtable within the cell
+ reason = reason.replace('<br>','\n')
+ reason = reason.replace('<','[').replace('>',']')
+ reason = reason.replace('|','\n').replace('&',' AND ')
+ reason = reason.replace('\n','<br>')
+ html += '<br>' + reason
- tooltip = ""
- for status in sorted(status_count.keys(), reverse = True):
- status_word = db.status_word[status]
- tooltip += "%d %s " % (status_count[status], status_word)
- return (html,tooltip)
+ tooltip = ""
+ for status in sorted(status_count.keys(), reverse = True):
+ status_word = db.status_word[status]
+ tooltip += "%d %s " % (status_count[status], status_word)
+ return (html,tooltip)
def status_count_box(db, tests, link = None):
- """
- Display a ratio of total number of GOOD tests
- to total number of all tests in the group of tests.
- More info (e.g. 10 GOOD, 2 WARN, 3 FAIL) is in tooltips
- """
- if not tests:
- return box(None, None)
+ """
+ Display a ratio of total number of GOOD tests
+ to total number of all tests in the group of tests.
+ More info (e.g. 10 GOOD, 2 WARN, 3 FAIL) is in tooltips
+ """
+ if not tests:
+ return box(None, None)
- status_count = {}
- for test in tests:
- count = status_count.get(test.status_num, 0)
- status_count[test.status_num] = count + 1
- return status_precounted_box(db, status_count, link)
+ status_count = {}
+ for test in tests:
+ count = status_count.get(test.status_num, 0)
+ status_count[test.status_num] = count + 1
+ return status_precounted_box(db, status_count, link)
def status_precounted_box(db, box_data, link = None,
- x_label = None, y_label = None):
- """
- Display a ratio of total number of GOOD tests
- to total number of all tests in the group of tests.
- More info (e.g. 10 GOOD, 2 WARN, 3 FAIL) is in tooltips
- """
- status_count = box_data.status_count
- if not status_count:
- return box(None, None)
-
- shade = shade_from_status_count(status_count)
- html,tooltip = status_html(db, box_data, shade)
- precounted_box = box(html, shade, False, link, tooltip,
- x_label, y_label)
- return precounted_box
+ x_label = None, y_label = None):
+ """
+ Display a ratio of total number of GOOD tests
+ to total number of all tests in the group of tests.
+ More info (e.g. 10 GOOD, 2 WARN, 3 FAIL) is in tooltips
+ """
+ status_count = box_data.status_count
+ if not status_count:
+ return box(None, None)
+
+ shade = shade_from_status_count(status_count)
+ html,tooltip = status_html(db, box_data, shade)
+ precounted_box = box(html, shade, False, link, tooltip,
+ x_label, y_label)
+ return precounted_box
def print_table(matrix):
- """
- matrix: list of lists of boxes, giving a matrix of data
- Each of the inner lists is a row, not a column.
+ """
+ matrix: list of lists of boxes, giving a matrix of data
+ Each of the inner lists is a row, not a column.
- Display the given matrix of data as a table.
- """
+ Display the given matrix of data as a table.
+ """
- print ('<table bgcolor="%s" cellspacing="1" cellpadding="5" '
- 'style="margin-right: 200px;">') % (
- color_map['borders'])
- for row in matrix:
- print '<tr>'
- for element in row:
- print element.html()
- print '</tr>'
- print '</table>'
+ print ('<table bgcolor="%s" cellspacing="1" cellpadding="5" '
+ 'style="margin-right: 200px;">') % (
+ color_map['borders'])
+ for row in matrix:
+ print '<tr>'
+ for element in row:
+ print element.html()
+ print '</tr>'
+ print '</table>'
def sort_tests(tests):
- kernel_order = ['patch', 'config', 'build', 'mkinitrd', 'install']
+ kernel_order = ['patch', 'config', 'build', 'mkinitrd', 'install']
- results = []
- for kernel_op in kernel_order:
- test = 'kernel.' + kernel_op
- if tests.count(test):
- results.append(test)
- tests.remove(test)
- if tests.count('boot'):
- results.append('boot')
- tests.remove('boot')
- return results + sorted(tests)
+ results = []
+ for kernel_op in kernel_order:
+ test = 'kernel.' + kernel_op
+ if tests.count(test):
+ results.append(test)
+ tests.remove(test)
+ if tests.count('boot'):
+ results.append('boot')
+ tests.remove('boot')
+ return results + sorted(tests)
def print_main_header():
- hover_css="""\
+ hover_css="""\
a.info{
- position:relative; /*this is the key*/
- z-index:1
- color:#000;
- text-decoration:none}
+position:relative; /*this is the key*/
+z-index:1
+color:#000;
+text-decoration:none}
a.info:hover{z-index:25;}
a.info span{display: none}
a.info:hover span{ /*the span will display just on :hover state*/
- display:block;
- position:absolute;
- top:1em; left:1em;
- min-width: 100px;
- overflow: visible;
- border:1px solid #036;
- background-color:#fff; color:#000;
- text-align: left
+display:block;
+position:absolute;
+top:1em; left:1em;
+min-width: 100px;
+overflow: visible;
+border:1px solid #036;
+background-color:#fff; color:#000;
+text-align: left
}
"""
- print '<head><style type="text/css">'
- print 'a { text-decoration: none }'
- print hover_css
- print '</style></head>'
- print '<h2>'
- print '<a href="compose_query.cgi">Functional</a>'
- print '   '
- print '<a href="machine_benchmark.cgi">Performance</a>'
- print '   '
- print '<a href="http://test.kernel.org/autotest">[about Autotest]</a>'
- print '</h2><p>'
+ print '<head><style type="text/css">'
+ print 'a { text-decoration: none }'
+ print hover_css
+ print '</style></head>'
+ print '<h2>'
+ print '<a href="compose_query.cgi">Functional</a>'
+ print '   '
+ print '<a href="machine_benchmark.cgi">Performance</a>'
+ print '   '
+ print '<a href="http://test.kernel.org/autotest">[about Autotest]</a>'
+ print '</h2><p>'
def group_name(group):
- name = re.sub('_', '<br>', group.name)
- if re.search('/', name):
- (owner, machine) = name.split('/', 1)
- name = owner + '<br>' + machine
- return name
-
+ name = re.sub('_', '<br>', group.name)
+ if re.search('/', name):
+ (owner, machine) = name.split('/', 1)
+ name = owner + '<br>' + machine
+ return name
diff --git a/tko/frontend.py b/tko/frontend.py
index 00e7fed..45de298 100755
--- a/tko/frontend.py
+++ b/tko/frontend.py
@@ -10,299 +10,299 @@
root_url_file = os.path.join(tko, '.root_url')
if os.path.exists(root_url_file):
- html_root = open(root_url_file, 'r').readline().rstrip()
+ html_root = open(root_url_file, 'r').readline().rstrip()
else:
- html_root = '/results/'
+ html_root = '/results/'
class status_cell:
- # One cell in the matrix of status data.
- def __init__(self):
- # Count is a dictionary: status -> count of tests with status
- self.status_count = {}
- self.reasons_list = []
- self.job_tag = None
- self.job_tag_count = 0
+ # One cell in the matrix of status data.
+ def __init__(self):
+ # Count is a dictionary: status -> count of tests with status
+ self.status_count = {}
+ self.reasons_list = []
+ self.job_tag = None
+ self.job_tag_count = 0
- def add(self, status, count, job_tags, reasons = None):
- assert count > 0
+ def add(self, status, count, job_tags, reasons = None):
+ assert count > 0
- self.job_tag = job_tags
- self.job_tag_count += count
- if self.job_tag_count > 1:
- self.job_tag = None
-
- self.status_count[status] = count
- ### status == 6 means 'GOOD'
- if status != 6:
- ## None implies sorting problems and extra CRs in a cell
- if reasons:
- self.reasons_list.append(reasons)
+ self.job_tag = job_tags
+ self.job_tag_count += count
+ if self.job_tag_count > 1:
+ self.job_tag = None
+
+ self.status_count[status] = count
+ ### status == 6 means 'GOOD'
+ if status != 6:
+ ## None implies sorting problems and extra CRs in a cell
+ if reasons:
+ self.reasons_list.append(reasons)
class status_data:
- def __init__(self, sql_rows, x_field, y_field, query_reasons = False):
- data = {}
- y_values = set()
+ def __init__(self, sql_rows, x_field, y_field, query_reasons = False):
+ data = {}
+ y_values = set()
- # Walk through the query, filing all results by x, y info
- for row in sql_rows:
- if query_reasons:
- (x,y, status, count, job_tags, reasons) = row
- else:
- (x,y, status, count, job_tags) = row
- reasons = None
- if not data.has_key(x):
- data[x] = {}
- if not data[x].has_key(y):
- y_values.add(y)
- data[x][y] = status_cell()
- data[x][y].add(status, count, job_tags, reasons)
+ # Walk through the query, filing all results by x, y info
+ for row in sql_rows:
+ if query_reasons:
+ (x,y, status, count, job_tags, reasons) = row
+ else:
+ (x,y, status, count, job_tags) = row
+ reasons = None
+ if not data.has_key(x):
+ data[x] = {}
+ if not data[x].has_key(y):
+ y_values.add(y)
+ data[x][y] = status_cell()
+ data[x][y].add(status, count, job_tags, reasons)
- # 2-d hash of data - [x-value][y-value]
- self.data = data
- # List of possible columns (x-values)
- self.x_values = smart_sort(data.keys(), x_field)
- # List of rows columns (y-values)
- self.y_values = smart_sort(list(y_values), y_field)
- nCells = len(self.y_values)*len(self.x_values)
- if nCells > MAX_CELLS:
- msg = 'Exceeded allowed number of cells in a table'
- raise db.MySQLTooManyRows(msg)
-
+ # 2-d hash of data - [x-value][y-value]
+ self.data = data
+ # List of possible columns (x-values)
+ self.x_values = smart_sort(data.keys(), x_field)
+ # List of rows columns (y-values)
+ self.y_values = smart_sort(list(y_values), y_field)
+ nCells = len(self.y_values)*len(self.x_values)
+ if nCells > MAX_CELLS:
+ msg = 'Exceeded allowed number of cells in a table'
+ raise db.MySQLTooManyRows(msg)
+
def get_matrix_data(db_obj, x_axis, y_axis, where = None,
- query_reasons = False):
- # Searches on the test_view table - x_axis and y_axis must both be
- # column names in that table.
- x_field = test_view_field_dict[x_axis]
- y_field = test_view_field_dict[y_axis]
- query_fields_list = [x_field, y_field, 'status','COUNT(status)']
- query_fields_list.append("LEFT(GROUP_CONCAT(job_tag),100)")
- if query_reasons:
- query_fields_list.append(
- "LEFT(GROUP_CONCAT(DISTINCT reason SEPARATOR '|'),500)"
- )
- fields = ','.join(query_fields_list)
+ query_reasons = False):
+ # Searches on the test_view table - x_axis and y_axis must both be
+ # column names in that table.
+ x_field = test_view_field_dict[x_axis]
+ y_field = test_view_field_dict[y_axis]
+ query_fields_list = [x_field, y_field, 'status','COUNT(status)']
+ query_fields_list.append("LEFT(GROUP_CONCAT(job_tag),100)")
+ if query_reasons:
+ query_fields_list.append(
+ "LEFT(GROUP_CONCAT(DISTINCT reason SEPARATOR '|'),500)"
+ )
+ fields = ','.join(query_fields_list)
- group_by = '%s, %s, status' % (x_field, y_field)
- rows = db_obj.select(fields, 'test_view',
- where=where, group_by=group_by, max_rows = MAX_RECORDS)
- return status_data(rows, x_field, y_field, query_reasons)
+ group_by = '%s, %s, status' % (x_field, y_field)
+ rows = db_obj.select(fields, 'test_view',
+ where=where, group_by=group_by, max_rows = MAX_RECORDS)
+ return status_data(rows, x_field, y_field, query_reasons)
# Dictionary used simply for fast lookups from short reference names for users
# to fieldnames in test_view
test_view_field_dict = {
- 'kernel' : 'kernel_printable',
- 'hostname' : 'machine_hostname',
- 'test' : 'test',
- 'label' : 'job_label',
- 'machine_group' : 'machine_group',
- 'reason' : 'reason',
- 'tag' : 'job_tag',
- 'user' : 'job_username',
- 'status' : 'status_word',
- 'time' : 'test_finished_time',
- 'time_daily' : 'DATE(test_finished_time)'
+ 'kernel' : 'kernel_printable',
+ 'hostname' : 'machine_hostname',
+ 'test' : 'test',
+ 'label' : 'job_label',
+ 'machine_group' : 'machine_group',
+ 'reason' : 'reason',
+ 'tag' : 'job_tag',
+ 'user' : 'job_username',
+ 'status' : 'status_word',
+ 'time' : 'test_finished_time',
+ 'time_daily' : 'DATE(test_finished_time)'
}
def smart_sort(list, field):
- if field == 'kernel_printable':
- def kernel_encode(kernel):
- return kernel_versions.version_encode(kernel)
- list.sort(key = kernel_encode, reverse = True)
- return list
- ## old records may contain time=None
- ## make None comparable with timestamp datetime or date
- elif field == 'test_finished_time':
- def convert_None_to_datetime(date_time):
- if not date_time:
- return datetime.datetime(1970, 1, 1, 0, 0, 0)
- else:
- return date_time
- list = map(convert_None_to_datetime, list)
- elif field == 'DATE(test_finished_time)':
- def convert_None_to_date(date):
- if not date:
- return datetime.date(1970, 1, 1)
- else:
- return date
- list = map(convert_None_to_date, list)
- list.sort()
- return list
+ if field == 'kernel_printable':
+ def kernel_encode(kernel):
+ return kernel_versions.version_encode(kernel)
+ list.sort(key = kernel_encode, reverse = True)
+ return list
+ ## old records may contain time=None
+ ## make None comparable with timestamp datetime or date
+ elif field == 'test_finished_time':
+ def convert_None_to_datetime(date_time):
+ if not date_time:
+ return datetime.datetime(1970, 1, 1, 0, 0, 0)
+ else:
+ return date_time
+ list = map(convert_None_to_datetime, list)
+ elif field == 'DATE(test_finished_time)':
+ def convert_None_to_date(date):
+ if not date:
+ return datetime.date(1970, 1, 1)
+ else:
+ return date
+ list = map(convert_None_to_date, list)
+ list.sort()
+ return list
class group:
- @classmethod
- def select(klass, db):
- """Return all possible machine groups"""
- rows = db.select('distinct machine_group', 'machines',
- 'machine_group is not null')
- groupnames = sorted([row[0] for row in rows])
- return [klass(db, groupname) for groupname in groupnames]
+ @classmethod
+ def select(klass, db):
+ """Return all possible machine groups"""
+ rows = db.select('distinct machine_group', 'machines',
+ 'machine_group is not null')
+ groupnames = sorted([row[0] for row in rows])
+ return [klass(db, groupname) for groupname in groupnames]
- def __init__(self, db, name):
- self.name = name
- self.db = db
+ def __init__(self, db, name):
+ self.name = name
+ self.db = db
- def machines(self):
- return machine.select(self.db, { 'machine_group' : self.name })
+ def machines(self):
+ return machine.select(self.db, { 'machine_group' : self.name })
- def tests(self, where = {}):
- values = [self.name]
- sql = 't inner join machines m on m.machine_idx=t.machine_idx'
- sql += ' where m.machine_group=%s'
- for key in where.keys():
- sql += ' and %s=%%s' % key
- values.append(where[key])
- return test.select_sql(self.db, sql, values)
+ def tests(self, where = {}):
+ values = [self.name]
+ sql = 't inner join machines m on m.machine_idx=t.machine_idx'
+ sql += ' where m.machine_group=%s'
+ for key in where.keys():
+ sql += ' and %s=%%s' % key
+ values.append(where[key])
+ return test.select_sql(self.db, sql, values)
class machine:
- @classmethod
- def select(klass, db, where = {}):
- fields = ['machine_idx', 'hostname', 'machine_group', 'owner']
- machines = []
- for row in db.select(','.join(fields), 'machines', where):
- machines.append(klass(db, *row))
- return machines
+ @classmethod
+ def select(klass, db, where = {}):
+ fields = ['machine_idx', 'hostname', 'machine_group', 'owner']
+ machines = []
+ for row in db.select(','.join(fields), 'machines', where):
+ machines.append(klass(db, *row))
+ return machines
- def __init__(self, db, idx, hostname, group, owner):
- self.db = db
- self.idx = idx
- self.hostname = hostname
- self.group = group
- self.owner = owner
+ def __init__(self, db, idx, hostname, group, owner):
+ self.db = db
+ self.idx = idx
+ self.hostname = hostname
+ self.group = group
+ self.owner = owner
class kernel:
- @classmethod
- def select(klass, db, where = {}):
- fields = ['kernel_idx', 'kernel_hash', 'base', 'printable']
- rows = db.select(','.join(fields), 'kernels', where)
- return [klass(db, *row) for row in rows]
+ @classmethod
+ def select(klass, db, where = {}):
+ fields = ['kernel_idx', 'kernel_hash', 'base', 'printable']
+ rows = db.select(','.join(fields), 'kernels', where)
+ return [klass(db, *row) for row in rows]
- def __init__(self, db, idx, hash, base, printable):
- self.db = db
- self.idx = idx
- self.hash = hash
- self.base = base
- self.printable = printable
- self.patches = [] # THIS SHOULD PULL IN PATCHES!
+ def __init__(self, db, idx, hash, base, printable):
+ self.db = db
+ self.idx = idx
+ self.hash = hash
+ self.base = base
+ self.printable = printable
+ self.patches = [] # THIS SHOULD PULL IN PATCHES!
class test:
- @classmethod
- def select(klass, db, where = {}, wherein = {}, distinct = False):
- fields = ['test_idx', 'job_idx', 'test', 'subdir',
- 'kernel_idx', 'status', 'reason', 'machine_idx']
- tests = []
- for row in db.select(','.join(fields), 'tests', where,
- wherein,distinct):
- tests.append(klass(db, *row))
- return tests
+ @classmethod
+ def select(klass, db, where = {}, wherein = {}, distinct = False):
+ fields = ['test_idx', 'job_idx', 'test', 'subdir',
+ 'kernel_idx', 'status', 'reason', 'machine_idx']
+ tests = []
+ for row in db.select(','.join(fields), 'tests', where,
+ wherein,distinct):
+ tests.append(klass(db, *row))
+ return tests
- @classmethod
- def select_sql(klass, db, sql, values):
- fields = ['test_idx', 'job_idx', 'test', 'subdir',
- 'kernel_idx', 'status', 'reason', 'machine_idx']
- fields = ['t.'+field for field in fields]
- rows = db.select_sql(','.join(fields), 'tests', sql, values)
- return [klass(db, *row) for row in rows]
-
-
- def __init__(self, db, test_idx, job_idx, testname, subdir, kernel_idx,
- status_num, reason, machine_idx):
- self.idx = test_idx
- self.job = job(db, job_idx)
- self.testname = testname
- self.subdir = subdir
- self.kernel_idx = kernel_idx
- self.__kernel = None
- self.__iterations = None
- self.machine_idx = machine_idx
- self.__machine = None
- self.status_num = status_num
- self.status_word = db.status_word[status_num]
- self.reason = reason
- self.db = db
- if self.subdir:
- self.url = html_root + self.job.tag + '/' + self.subdir
- else:
- self.url = None
+ @classmethod
+ def select_sql(klass, db, sql, values):
+ fields = ['test_idx', 'job_idx', 'test', 'subdir',
+ 'kernel_idx', 'status', 'reason', 'machine_idx']
+ fields = ['t.'+field for field in fields]
+ rows = db.select_sql(','.join(fields), 'tests', sql, values)
+ return [klass(db, *row) for row in rows]
- def iterations(self):
- """
- Caching function for iterations
- """
- if not self.__iterations:
- self.__iterations = {}
- # A dictionary - dict{key} = [value1, value2, ....]
- where = {'test_idx' : self.idx}
- for i in iteration.select(self.db, where):
- if self.__iterations.has_key(i.key):
- self.__iterations[i.key].append(i.value)
- else:
- self.__iterations[i.key] = [i.value]
- return self.__iterations
-
-
- def kernel(self):
- """
- Caching function for kernels
- """
- if not self.__kernel:
- where = {'kernel_idx' : self.kernel_idx}
- self.__kernel = kernel.select(self.db, where)[0]
- return self.__kernel
+ def __init__(self, db, test_idx, job_idx, testname, subdir, kernel_idx,
+ status_num, reason, machine_idx):
+ self.idx = test_idx
+ self.job = job(db, job_idx)
+ self.testname = testname
+ self.subdir = subdir
+ self.kernel_idx = kernel_idx
+ self.__kernel = None
+ self.__iterations = None
+ self.machine_idx = machine_idx
+ self.__machine = None
+ self.status_num = status_num
+ self.status_word = db.status_word[status_num]
+ self.reason = reason
+ self.db = db
+ if self.subdir:
+ self.url = html_root + self.job.tag + '/' + self.subdir
+ else:
+ self.url = None
- def machine(self):
- """
- Caching function for kernels
- """
- if not self.__machine:
- where = {'machine_idx' : self.machine_idx}
- self.__machine = machine.select(self.db, where)[0]
- return self.__machine
+ def iterations(self):
+ """
+ Caching function for iterations
+ """
+ if not self.__iterations:
+ self.__iterations = {}
+ # A dictionary - dict{key} = [value1, value2, ....]
+ where = {'test_idx' : self.idx}
+ for i in iteration.select(self.db, where):
+ if self.__iterations.has_key(i.key):
+ self.__iterations[i.key].append(i.value)
+ else:
+ self.__iterations[i.key] = [i.value]
+ return self.__iterations
+
+
+ def kernel(self):
+ """
+ Caching function for kernels
+ """
+ if not self.__kernel:
+ where = {'kernel_idx' : self.kernel_idx}
+ self.__kernel = kernel.select(self.db, where)[0]
+ return self.__kernel
+
+
+ def machine(self):
+ """
+ Caching function for kernels
+ """
+ if not self.__machine:
+ where = {'machine_idx' : self.machine_idx}
+ self.__machine = machine.select(self.db, where)[0]
+ return self.__machine
class job:
- def __init__(self, db, job_idx):
- where = {'job_idx' : job_idx}
- rows = db.select('tag, machine_idx', 'jobs', where)
- if not rows:
- return None
- (self.tag, self.machine_idx) = rows[0]
- self.job_idx = job_idx
+ def __init__(self, db, job_idx):
+ where = {'job_idx' : job_idx}
+ rows = db.select('tag, machine_idx', 'jobs', where)
+ if not rows:
+ return None
+ (self.tag, self.machine_idx) = rows[0]
+ self.job_idx = job_idx
-
+
class iteration:
- @classmethod
- def select(klass, db, where):
- fields = ['iteration', 'attribute', 'value']
- iterations = []
- rows = db.select(','.join(fields), 'iteration_result', where)
- for row in rows:
- iterations.append(klass(*row))
- return iterations
+ @classmethod
+ def select(klass, db, where):
+ fields = ['iteration', 'attribute', 'value']
+ iterations = []
+ rows = db.select(','.join(fields), 'iteration_result', where)
+ for row in rows:
+ iterations.append(klass(*row))
+ return iterations
- def __init__(self, iteration, key, value):
- self.iteration = iteration
- self.key = key
- self.value = value
+ def __init__(self, iteration, key, value):
+ self.iteration = iteration
+ self.key = key
+ self.value = value
# class patch:
-# def __init__(self):
-# self.spec = None
+# def __init__(self):
+# self.spec = None
diff --git a/tko/machine_load b/tko/machine_load
index 98b0fa9..b8c0472 100755
--- a/tko/machine_load
+++ b/tko/machine_load
@@ -4,8 +4,8 @@
db = db.db()
for line in open('machines', 'r').readlines():
- (machine, group) = line.rstrip().split()
- print 'X %s Y %s' % (machine, group)
- set = { 'machine_group' : group }
- where = { 'hostname' : machine }
- db.update('machines', set, where)
+ (machine, group) = line.rstrip().split()
+ print 'X %s Y %s' % (machine, group)
+ set = { 'machine_group' : group }
+ where = { 'hostname' : machine }
+ db.update('machines', set, where)
diff --git a/tko/migrations/001_initial_db.py b/tko/migrations/001_initial_db.py
index 2c19abc..065e5c5 100755
--- a/tko/migrations/001_initial_db.py
+++ b/tko/migrations/001_initial_db.py
@@ -1,31 +1,31 @@
import os
required_tables = ('machines', 'jobs', 'patches', 'tests', 'test_attributes',
- 'iteration_result')
+ 'iteration_result')
def migrate_up(manager):
- manager.execute("SHOW TABLES")
- tables = [row[0] for row in manager.cursor.fetchall()]
- db_initialized = True
- for table in required_tables:
- if table not in tables:
- db_initialized = False
- break
- if not db_initialized:
- response = raw_input(
- 'Your tko database does not appear to be initialized. Do '
- 'you want to recreate it (this will result in loss of any '
- 'existing data) (yes/No)? ')
- if response != 'yes':
- raise Exception('User has chosen to abort migration')
+ manager.execute("SHOW TABLES")
+ tables = [row[0] for row in manager.cursor.fetchall()]
+ db_initialized = True
+ for table in required_tables:
+ if table not in tables:
+ db_initialized = False
+ break
+ if not db_initialized:
+ response = raw_input(
+ 'Your tko database does not appear to be initialized. Do '
+ 'you want to recreate it (this will result in loss of any '
+ 'existing data) (yes/No)? ')
+ if response != 'yes':
+ raise Exception('User has chosen to abort migration')
- manager.execute_script(CREATE_DB_SQL)
+ manager.execute_script(CREATE_DB_SQL)
- manager.create_migrate_table()
+ manager.create_migrate_table()
def migrate_down(manager):
- manager.execute_script(DROP_DB_SQL)
+ manager.execute_script(DROP_DB_SQL)
DROP_DB_SQL = """\
@@ -48,44 +48,44 @@
CREATE_DB_SQL = DROP_DB_SQL + """\
-- status key
CREATE TABLE status (
-status_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY , -- numerical status
-word VARCHAR(10) -- status word
+status_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY , -- numerical status
+word VARCHAR(10) -- status word
) TYPE=InnoDB;
-- kernel versions
CREATE TABLE kernels (
kernel_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY,
-kernel_hash VARCHAR(35), -- Hash of base + all patches
-base VARCHAR(30), -- Base version without patches
-printable VARCHAR(100) -- Full version with patches
+kernel_hash VARCHAR(35), -- Hash of base + all patches
+base VARCHAR(30), -- Base version without patches
+printable VARCHAR(100) -- Full version with patches
) TYPE=InnoDB;
-- machines/hosts table
CREATE TABLE machines (
machine_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY,
-hostname VARCHAR(100) unique KEY, -- hostname
-machine_group VARCHAR(80), -- group name
-owner VARCHAR(80) -- owner name
+hostname VARCHAR(100) unique KEY, -- hostname
+machine_group VARCHAR(80), -- group name
+owner VARCHAR(80) -- owner name
) TYPE=InnoDB;
-- main jobs table
CREATE TABLE jobs (
-job_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY, -- index number
-tag VARCHAR(100) unique KEY, -- job key
+job_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY, -- index number
+tag VARCHAR(100) unique KEY, -- job key
label VARCHAR(100), -- job label assigned by user
KEY (label),
-username VARCHAR(80), -- user name
+username VARCHAR(80), -- user name
KEY (username),
-machine_idx INT(10) unsigned NOT NULL, -- reference to machine table
+machine_idx INT(10) unsigned NOT NULL, -- reference to machine table
KEY (machine_idx),
FOREIGN KEY (machine_idx) REFERENCES machines(machine_idx) ON DELETE CASCADE
) TYPE=InnoDB;
-- One entry per patch used, anywhere
CREATE TABLE patches (
-kernel_idx INT(10) unsigned NOT NULL, -- index number
-name VARCHAR(80), -- short name
-url VARCHAR(300), -- full URL
+kernel_idx INT(10) unsigned NOT NULL, -- index number
+name VARCHAR(80), -- short name
+url VARCHAR(300), -- full URL
hash VARCHAR(35),
KEY (kernel_idx),
FOREIGN KEY (kernel_idx) REFERENCES kernels(kernel_idx) ON DELETE CASCADE
@@ -93,18 +93,18 @@
-- test functional results
CREATE TABLE tests (
-test_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY, -- index number
-job_idx INTEGER, -- ref to job table
-test VARCHAR(30), -- name of test
-subdir VARCHAR(60), -- subdirectory name
-kernel_idx INT(10) unsigned NOT NULL, -- kernel test was AGAINST
+test_idx int(10) unsigned NOT NULL auto_increment PRIMARY KEY, -- index number
+job_idx INTEGER, -- ref to job table
+test VARCHAR(30), -- name of test
+subdir VARCHAR(60), -- subdirectory name
+kernel_idx INT(10) unsigned NOT NULL, -- kernel test was AGAINST
KEY (kernel_idx),
FOREIGN KEY (kernel_idx) REFERENCES kernels(kernel_idx) ON DELETE CASCADE,
-status int(10) unsigned NOT NULL, -- test status
+status int(10) unsigned NOT NULL, -- test status
KEY (status),
FOREIGN KEY (status) REFERENCES status(status_idx) ON DELETE CASCADE,
-reason VARCHAR(100), -- reason for test status
-machine_idx INT(10) unsigned NOT NULL, -- reference to machine table
+reason VARCHAR(100), -- reason for test status
+machine_idx INT(10) unsigned NOT NULL, -- reference to machine table
KEY (machine_idx),
FOREIGN KEY (machine_idx) REFERENCES machines(machine_idx) ON DELETE CASCADE,
invalid BOOL NOT NULL
@@ -112,49 +112,49 @@
-- test attributes (key value pairs at a test level)
CREATE TABLE test_attributes (
-test_idx int(10) unsigned NOT NULL, -- ref to test table
+test_idx int(10) unsigned NOT NULL, -- ref to test table
FOREIGN KEY (test_idx) REFERENCES tests(test_idx) ON DELETE CASCADE,
-attribute VARCHAR(30), -- attribute name (e.g. 'version')
-value VARCHAR(100), -- attribute value
+attribute VARCHAR(30), -- attribute name (e.g. 'version')
+value VARCHAR(100), -- attribute value
KEY `test_idx` (`test_idx`)
) TYPE=InnoDB;
-- test performance results
CREATE TABLE iteration_result(
-test_idx int(10) unsigned NOT NULL, -- ref to test table
+test_idx int(10) unsigned NOT NULL, -- ref to test table
FOREIGN KEY (test_idx) REFERENCES tests(test_idx) ON DELETE CASCADE,
-iteration INTEGER, -- integer
-attribute VARCHAR(30), -- attribute name (e.g. 'throughput')
-value FLOAT, -- attribute value (eg 700.1)
+iteration INTEGER, -- integer
+attribute VARCHAR(30), -- attribute name (e.g. 'throughput')
+value FLOAT, -- attribute value (eg 700.1)
KEY `test_idx` (`test_idx`)
) TYPE=InnoDB;
-- BRRD syncronization
CREATE TABLE brrd_sync (
-test_idx int(10) unsigned NOT NULL, -- ref to test table
+test_idx int(10) unsigned NOT NULL, -- ref to test table
FOREIGN KEY (test_idx) REFERENCES tests(test_idx) ON DELETE CASCADE
) TYPE=InnoDB;
-- test_view (to make life easier for people trying to mine data)
CREATE VIEW test_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -163,27 +163,27 @@
-- perf_view (to make life easier for people trying to mine performance data)
CREATE VIEW perf_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word,
- iteration_result.iteration,
- iteration_result.attribute AS iteration_key,
- iteration_result.value AS iteration_value
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word,
+ iteration_result.iteration,
+ iteration_result.attribute AS iteration_key,
+ iteration_result.value AS iteration_value
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
diff --git a/tko/migrations/002_add_job_timestamps.py b/tko/migrations/002_add_job_timestamps.py
index 8bbb83a..7cd5004 100644
--- a/tko/migrations/002_add_job_timestamps.py
+++ b/tko/migrations/002_add_job_timestamps.py
@@ -1,11 +1,11 @@
def migrate_up(manager):
- manager.execute_script(ADD_COLUMNS_SQL)
- manager.execute_script(ALTER_VIEWS_UP_SQL)
+ manager.execute_script(ADD_COLUMNS_SQL)
+ manager.execute_script(ALTER_VIEWS_UP_SQL)
def migrate_down(manager):
- manager.execute_script(DROP_COLUMNS_SQL)
- manager.execute_script(ALTER_VIEWS_DOWN_SQL)
+ manager.execute_script(DROP_COLUMNS_SQL)
+ manager.execute_script(ALTER_VIEWS_DOWN_SQL)
ADD_COLUMNS_SQL = """\
@@ -22,27 +22,27 @@
ALTER_VIEWS_UP_SQL = """\
ALTER VIEW test_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -51,30 +51,30 @@
-- perf_view (to make life easier for people trying to mine performance data)
ALTER VIEW perf_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word,
- iteration_result.iteration,
- iteration_result.attribute AS iteration_key,
- iteration_result.value AS iteration_value
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word,
+ iteration_result.iteration,
+ iteration_result.attribute AS iteration_key,
+ iteration_result.value AS iteration_value
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -86,24 +86,24 @@
ALTER_VIEWS_DOWN_SQL = """\
ALTER VIEW test_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -112,27 +112,27 @@
-- perf_view (to make life easier for people trying to mine performance data)
ALTER VIEW perf_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word,
- iteration_result.iteration,
- iteration_result.attribute AS iteration_key,
- iteration_result.value AS iteration_value
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word,
+ iteration_result.iteration,
+ iteration_result.attribute AS iteration_key,
+ iteration_result.value AS iteration_value
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
diff --git a/tko/migrations/003_add_test_timestamps.py b/tko/migrations/003_add_test_timestamps.py
index e9148cb..fee4cab 100644
--- a/tko/migrations/003_add_test_timestamps.py
+++ b/tko/migrations/003_add_test_timestamps.py
@@ -1,11 +1,11 @@
def migrate_up(manager):
- manager.execute_script(ADD_COLUMN_SQL)
- manager.execute_script(ALTER_VIEWS_UP_SQL)
+ manager.execute_script(ADD_COLUMN_SQL)
+ manager.execute_script(ALTER_VIEWS_UP_SQL)
def migrate_down(manager):
- manager.execute_script(DROP_COLUMN_SQL)
- manager.execute_script(ALTER_VIEWS_DOWN_SQL)
+ manager.execute_script(DROP_COLUMN_SQL)
+ manager.execute_script(ALTER_VIEWS_DOWN_SQL)
ADD_COLUMN_SQL = """\
@@ -18,28 +18,28 @@
ALTER_VIEWS_UP_SQL = """\
ALTER VIEW test_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- tests.finished_time AS test_finished_time,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ tests.finished_time AS test_finished_time,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -48,31 +48,31 @@
-- perf_view (to make life easier for people trying to mine performance data)
ALTER VIEW perf_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- tests.finished_time AS test_finished_time,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word,
- iteration_result.iteration,
- iteration_result.attribute AS iteration_key,
- iteration_result.value AS iteration_value
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ tests.finished_time AS test_finished_time,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word,
+ iteration_result.iteration,
+ iteration_result.attribute AS iteration_key,
+ iteration_result.value AS iteration_value
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -84,27 +84,27 @@
ALTER_VIEWS_DOWN_SQL = """\
ALTER VIEW test_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -113,30 +113,30 @@
-- perf_view (to make life easier for people trying to mine performance data)
ALTER VIEW perf_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word,
- iteration_result.iteration,
- iteration_result.attribute AS iteration_key,
- iteration_result.value AS iteration_value
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word,
+ iteration_result.iteration,
+ iteration_result.attribute AS iteration_key,
+ iteration_result.value AS iteration_value
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
diff --git a/tko/migrations/004_add_test_started.py b/tko/migrations/004_add_test_started.py
index f3d9f3d..cf91064 100644
--- a/tko/migrations/004_add_test_started.py
+++ b/tko/migrations/004_add_test_started.py
@@ -1,11 +1,11 @@
def migrate_up(manager):
- manager.execute_script(ADD_COLUMN_SQL)
- manager.execute_script(ALTER_VIEWS_UP_SQL)
+ manager.execute_script(ADD_COLUMN_SQL)
+ manager.execute_script(ALTER_VIEWS_UP_SQL)
def migrate_down(manager):
- manager.execute_script(DROP_COLUMN_SQL)
- manager.execute_script(ALTER_VIEWS_DOWN_SQL)
+ manager.execute_script(DROP_COLUMN_SQL)
+ manager.execute_script(ALTER_VIEWS_DOWN_SQL)
ADD_COLUMN_SQL = """\
@@ -18,29 +18,29 @@
ALTER_VIEWS_UP_SQL = """\
ALTER VIEW test_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
tests.started_time AS test_started_time,
- tests.finished_time AS test_finished_time,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word
+ tests.finished_time AS test_finished_time,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -49,32 +49,32 @@
-- perf_view (to make life easier for people trying to mine performance data)
ALTER VIEW perf_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
tests.started_time AS test_started_time,
- tests.finished_time AS test_finished_time,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word,
- iteration_result.iteration,
- iteration_result.attribute AS iteration_key,
- iteration_result.value AS iteration_value
+ tests.finished_time AS test_finished_time,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word,
+ iteration_result.iteration,
+ iteration_result.attribute AS iteration_key,
+ iteration_result.value AS iteration_value
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -85,28 +85,28 @@
ALTER_VIEWS_DOWN_SQL = """\
ALTER VIEW test_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- tests.finished_time AS test_finished_time,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ tests.finished_time AS test_finished_time,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
@@ -115,31 +115,31 @@
-- perf_view (to make life easier for people trying to mine performance data)
ALTER VIEW perf_view AS
-SELECT tests.test_idx,
- tests.job_idx,
- tests.test,
- tests.subdir,
- tests.kernel_idx,
- tests.status,
- tests.reason,
- tests.machine_idx,
- tests.finished_time AS test_finished_time,
- jobs.tag AS job_tag,
- jobs.label AS job_label,
- jobs.username AS job_username,
- jobs.queued_time AS job_queued_time,
- jobs.started_time AS job_started_time,
- jobs.finished_time AS job_finished_time,
- machines.hostname AS machine_hostname,
- machines.machine_group,
- machines.owner AS machine_owner,
- kernels.kernel_hash,
- kernels.base AS kernel_base,
- kernels.printable AS kernel_printable,
- status.word AS status_word,
- iteration_result.iteration,
- iteration_result.attribute AS iteration_key,
- iteration_result.value AS iteration_value
+SELECT tests.test_idx,
+ tests.job_idx,
+ tests.test,
+ tests.subdir,
+ tests.kernel_idx,
+ tests.status,
+ tests.reason,
+ tests.machine_idx,
+ tests.finished_time AS test_finished_time,
+ jobs.tag AS job_tag,
+ jobs.label AS job_label,
+ jobs.username AS job_username,
+ jobs.queued_time AS job_queued_time,
+ jobs.started_time AS job_started_time,
+ jobs.finished_time AS job_finished_time,
+ machines.hostname AS machine_hostname,
+ machines.machine_group,
+ machines.owner AS machine_owner,
+ kernels.kernel_hash,
+ kernels.base AS kernel_base,
+ kernels.printable AS kernel_printable,
+ status.word AS status_word,
+ iteration_result.iteration,
+ iteration_result.attribute AS iteration_key,
+ iteration_result.value AS iteration_value
FROM tests
INNER JOIN jobs ON jobs.job_idx = tests.job_idx
INNER JOIN machines ON machines.machine_idx = jobs.machine_idx
diff --git a/tko/migrations/005_add_testna_status.py b/tko/migrations/005_add_testna_status.py
index e22abe5..4e4e480 100644
--- a/tko/migrations/005_add_testna_status.py
+++ b/tko/migrations/005_add_testna_status.py
@@ -1,6 +1,6 @@
def migrate_up(monger):
- monger.execute("INSERT INTO status (word) values ('TEST_NA')")
+ monger.execute("INSERT INTO status (word) values ('TEST_NA')")
def migrate_down(monger):
- monger.execute("DELETE FROM status where word = 'TEST_NA'")
+ monger.execute("DELETE FROM status where word = 'TEST_NA'")
diff --git a/tko/migrations/006_add_table_query_history.py b/tko/migrations/006_add_table_query_history.py
index 41c5e29..891836f 100644
--- a/tko/migrations/006_add_table_query_history.py
+++ b/tko/migrations/006_add_table_query_history.py
@@ -1,9 +1,9 @@
def migrate_up(manager):
- manager.execute_script(ADD_TABLE_QUERY_HISTORY)
+ manager.execute_script(ADD_TABLE_QUERY_HISTORY)
def migrate_down(manager):
- manager.execute_script(DROP_TABLE_QUERY_HISTORY)
+ manager.execute_script(DROP_TABLE_QUERY_HISTORY)
ADD_TABLE_QUERY_HISTORY = """
@@ -15,4 +15,3 @@
DROP_TABLE_QUERY_HISTORY = """
DROP TABLE query_history;
"""
-
diff --git a/tko/migrations/007_widen_reason_field.py b/tko/migrations/007_widen_reason_field.py
index 7df6bc8..aae2a44 100644
--- a/tko/migrations/007_widen_reason_field.py
+++ b/tko/migrations/007_widen_reason_field.py
@@ -1,5 +1,5 @@
def migrate_up(mgr):
- mgr.execute("alter table tests modify column reason varchar(1024);")
+ mgr.execute("alter table tests modify column reason varchar(1024);")
def migrate_down(mgr):
- mgr.execute("alter table tests modify column reason varchar(100);")
+ mgr.execute("alter table tests modify column reason varchar(100);")
diff --git a/tko/migrations/008_add_iteration_attributes.py b/tko/migrations/008_add_iteration_attributes.py
index 4ef176e..87cd56b 100644
--- a/tko/migrations/008_add_iteration_attributes.py
+++ b/tko/migrations/008_add_iteration_attributes.py
@@ -1,18 +1,18 @@
def migrate_up(manager):
- manager.execute_script(CREATE_TABLE_SQL)
+ manager.execute_script(CREATE_TABLE_SQL)
def migrate_down(manager):
- manager.execute_script(DROP_TABLE_SQL)
+ manager.execute_script(DROP_TABLE_SQL)
CREATE_TABLE_SQL = """
-- test iteration attributes (key value pairs at an iteration level)
CREATE TABLE iteration_attributes (
-test_idx int(10) unsigned NOT NULL, -- ref to test table
+test_idx int(10) unsigned NOT NULL, -- ref to test table
FOREIGN KEY (test_idx) REFERENCES tests(test_idx) ON DELETE CASCADE,
-iteration INTEGER, -- integer
-attribute VARCHAR(30), -- attribute name (e.g. 'run_id')
-value VARCHAR(100), -- attribute value
+iteration INTEGER, -- integer
+attribute VARCHAR(30), -- attribute name (e.g. 'run_id')
+value VARCHAR(100), -- attribute value
KEY `test_idx` (`test_idx`)
) TYPE=InnoDB;
"""
diff --git a/tko/models.py b/tko/models.py
index b1651f3..a5e230b 100644
--- a/tko/models.py
+++ b/tko/models.py
@@ -5,136 +5,136 @@
class job(object):
- def __init__(self, dir, user, label, machine, queued_time,
- started_time, finished_time, machine_owner):
- self.dir = dir
- self.tests = []
- self.user = user
- self.label = label
- self.machine = machine
- self.queued_time = queued_time
- self.started_time = started_time
- self.finished_time = finished_time
- self.machine_owner = machine_owner
+ def __init__(self, dir, user, label, machine, queued_time,
+ started_time, finished_time, machine_owner):
+ self.dir = dir
+ self.tests = []
+ self.user = user
+ self.label = label
+ self.machine = machine
+ self.queued_time = queued_time
+ self.started_time = started_time
+ self.finished_time = finished_time
+ self.machine_owner = machine_owner
class kernel(object):
- def __init__(self, base, patches, kernel_hash):
- self.base = base
- self.patches = patches
- self.kernel_hash = kernel_hash
+ def __init__(self, base, patches, kernel_hash):
+ self.base = base
+ self.patches = patches
+ self.kernel_hash = kernel_hash
- @staticmethod
- def compute_hash(base, hashes):
- key_string = ','.join([base] + hashes)
- return md5.new(key_string).hexdigest()
+ @staticmethod
+ def compute_hash(base, hashes):
+ key_string = ','.join([base] + hashes)
+ return md5.new(key_string).hexdigest()
class test(object):
- def __init__(self, subdir, testname, status, reason, test_kernel,
- machine, started_time, finished_time, iterations,
- attributes):
- self.subdir = subdir
- self.testname = testname
- self.status = status
- self.reason = reason
- self.kernel = test_kernel
- self.machine = machine
- self.started_time = started_time
- self.finished_time = finished_time
- self.iterations = iterations
- self.attributes = attributes
+ def __init__(self, subdir, testname, status, reason, test_kernel,
+ machine, started_time, finished_time, iterations,
+ attributes):
+ self.subdir = subdir
+ self.testname = testname
+ self.status = status
+ self.reason = reason
+ self.kernel = test_kernel
+ self.machine = machine
+ self.started_time = started_time
+ self.finished_time = finished_time
+ self.iterations = iterations
+ self.attributes = attributes
- @staticmethod
- def load_iterations(keyval_path):
- """Abstract method to load a list of iterations from a keyval
- file."""
- raise NotImplementedError
+ @staticmethod
+ def load_iterations(keyval_path):
+ """Abstract method to load a list of iterations from a keyval
+ file."""
+ raise NotImplementedError
- @classmethod
- def parse_test(cls, job, subdir, testname, status, reason, test_kernel,
- started_time, finished_time):
- """Given a job and the basic metadata about the test that
- can be extracted from the status logs, parse the test
- keyval files and use it to construct a complete test
- instance."""
- tko_utils.dprint("parsing test %s %s" % (subdir, testname))
+ @classmethod
+ def parse_test(cls, job, subdir, testname, status, reason, test_kernel,
+ started_time, finished_time):
+ """Given a job and the basic metadata about the test that
+ can be extracted from the status logs, parse the test
+ keyval files and use it to construct a complete test
+ instance."""
+ tko_utils.dprint("parsing test %s %s" % (subdir, testname))
- if subdir:
- # grab iterations from the results keyval
- iteration_keyval = os.path.join(job.dir, subdir,
- "results", "keyval")
- iterations = cls.load_iterations(iteration_keyval)
+ if subdir:
+ # grab iterations from the results keyval
+ iteration_keyval = os.path.join(job.dir, subdir,
+ "results", "keyval")
+ iterations = cls.load_iterations(iteration_keyval)
- # grab test attributes from the subdir keyval
- test_keyval = os.path.join(job.dir, subdir, "keyval")
- attributes = test.load_attributes(test_keyval)
- else:
- iterations = []
- attributes = {}
+ # grab test attributes from the subdir keyval
+ test_keyval = os.path.join(job.dir, subdir, "keyval")
+ attributes = test.load_attributes(test_keyval)
+ else:
+ iterations = []
+ attributes = {}
- return cls(subdir, testname, status, reason, test_kernel,
- job.machine, started_time, finished_time,
- iterations, attributes)
+ return cls(subdir, testname, status, reason, test_kernel,
+ job.machine, started_time, finished_time,
+ iterations, attributes)
- @staticmethod
- def load_attributes(keyval_path):
- """Load the test attributes into a dictionary from a test
- keyval path. Does not assume that the path actually exists."""
- if not os.path.exists(keyval_path):
- return {}
- return utils.read_keyval(keyval_path)
+ @staticmethod
+ def load_attributes(keyval_path):
+ """Load the test attributes into a dictionary from a test
+ keyval path. Does not assume that the path actually exists."""
+ if not os.path.exists(keyval_path):
+ return {}
+ return utils.read_keyval(keyval_path)
class patch(object):
- def __init__(self, spec, reference, hash):
- self.spec = spec
- self.reference = reference
- self.hash = hash
+ def __init__(self, spec, reference, hash):
+ self.spec = spec
+ self.reference = reference
+ self.hash = hash
class iteration(object):
- def __init__(self, index, attr_keyval, perf_keyval):
- self.index = index
- self.attr_keyval = attr_keyval
- self.perf_keyval = perf_keyval
+ def __init__(self, index, attr_keyval, perf_keyval):
+ self.index = index
+ self.attr_keyval = attr_keyval
+ self.perf_keyval = perf_keyval
- @staticmethod
- def parse_line_into_dicts(line, attr_dict, perf_dict):
- """Abstract method to parse a keyval line and insert it into
- the appropriate dictionary.
- attr_dict: generic iteration attributes
- perf_dict: iteration performance results
- """
- raise NotImplementedError
+ @staticmethod
+ def parse_line_into_dicts(line, attr_dict, perf_dict):
+ """Abstract method to parse a keyval line and insert it into
+ the appropriate dictionary.
+ attr_dict: generic iteration attributes
+ perf_dict: iteration performance results
+ """
+ raise NotImplementedError
- @classmethod
- def load_from_keyval(cls, keyval_path):
- """Load a list of iterations from an iteration keyval file.
- Keyval data from separate iterations is separated by blank
- lines. Makes use of the parse_line_into_dicts method to
- actually parse the individual lines."""
- if not os.path.exists(keyval_path):
- return []
+ @classmethod
+ def load_from_keyval(cls, keyval_path):
+ """Load a list of iterations from an iteration keyval file.
+ Keyval data from separate iterations is separated by blank
+ lines. Makes use of the parse_line_into_dicts method to
+ actually parse the individual lines."""
+ if not os.path.exists(keyval_path):
+ return []
- iterations = []
- index = 1
- attr, perf = {}, {}
- for line in file(keyval_path):
- line = line.strip()
- if line:
- cls.parse_line_into_dicts(line, attr, perf)
- else:
- iterations.append(cls(index, attr, perf))
- index += 1
- attr, perf = {}, {}
- if attr or perf:
- iterations.append(cls(index, attr, perf))
- return iterations
+ iterations = []
+ index = 1
+ attr, perf = {}, {}
+ for line in file(keyval_path):
+ line = line.strip()
+ if line:
+ cls.parse_line_into_dicts(line, attr, perf)
+ else:
+ iterations.append(cls(index, attr, perf))
+ index += 1
+ attr, perf = {}, {}
+ if attr or perf:
+ iterations.append(cls(index, attr, perf))
+ return iterations
diff --git a/tko/parse.py b/tko/parse.py
index 6d88d83..bdccdda 100755
--- a/tko/parse.py
+++ b/tko/parse.py
@@ -8,194 +8,194 @@
def parse_args():
- # build up our options parser and parse sys.argv
- parser = optparse.OptionParser()
- parser.add_option("-m", help="Send mail for FAILED tests",
- dest="mailit", action="store_true")
- parser.add_option("-r", help="Reparse the results of a job",
- dest="reparse", action="store_true")
- parser.add_option("-o", help="Parse a single results directory",
- dest="singledir", action="store_true")
- parser.add_option("-l", help=("Levels of subdirectories to include "
- "in the job name"),
- type="int", dest="level", default=1)
- parser.add_option("-n", help="No blocking on an existing parse",
- dest="noblock", action="store_true")
- parser.add_option("-s", help="Database server hostname",
- dest="db_host", action="store")
- parser.add_option("-u", help="Database username", dest="db_user",
- action="store")
- parser.add_option("-p", help="Database password", dest="db_pass",
- action="store")
- parser.add_option("-d", help="Database name", dest="db_name",
- action="store")
- options, args = parser.parse_args()
+ # build up our options parser and parse sys.argv
+ parser = optparse.OptionParser()
+ parser.add_option("-m", help="Send mail for FAILED tests",
+ dest="mailit", action="store_true")
+ parser.add_option("-r", help="Reparse the results of a job",
+ dest="reparse", action="store_true")
+ parser.add_option("-o", help="Parse a single results directory",
+ dest="singledir", action="store_true")
+ parser.add_option("-l", help=("Levels of subdirectories to include "
+ "in the job name"),
+ type="int", dest="level", default=1)
+ parser.add_option("-n", help="No blocking on an existing parse",
+ dest="noblock", action="store_true")
+ parser.add_option("-s", help="Database server hostname",
+ dest="db_host", action="store")
+ parser.add_option("-u", help="Database username", dest="db_user",
+ action="store")
+ parser.add_option("-p", help="Database password", dest="db_pass",
+ action="store")
+ parser.add_option("-d", help="Database name", dest="db_name",
+ action="store")
+ options, args = parser.parse_args()
- # we need a results directory
- if len(args) == 0:
- tko_utils.dprint("ERROR: at least one results directory must "
- "be provided")
- parser.print_help()
- sys.exit(1)
+ # we need a results directory
+ if len(args) == 0:
+ tko_utils.dprint("ERROR: at least one results directory must "
+ "be provided")
+ parser.print_help()
+ sys.exit(1)
- # pass the options back
- return options, args
+ # pass the options back
+ return options, args
def format_failure_message(jobname, kernel, testname, status, reason):
- format_string = "%-12s %-20s %-12s %-10s %s"
- return format_string % (jobname, kernel, testname, status, reason)
+ format_string = "%-12s %-20s %-12s %-10s %s"
+ return format_string % (jobname, kernel, testname, status, reason)
def mailfailure(jobname, job, message):
- message_lines = [""]
- message_lines.append("The following tests FAILED for this job")
- message_lines.append("http://%s/results/%s" %
- (socket.gethostname(), jobname))
- message_lines.append("")
- message_lines.append(format_failure_message("Job name", "Kernel",
- "Test name", "FAIL/WARN",
- "Failure reason"))
- message_lines.append(format_failure_message("=" * 8, "=" * 6, "=" * 8,
- "=" * 8, "=" * 14))
- message_header = "\n".join(message_lines)
+ message_lines = [""]
+ message_lines.append("The following tests FAILED for this job")
+ message_lines.append("http://%s/results/%s" %
+ (socket.gethostname(), jobname))
+ message_lines.append("")
+ message_lines.append(format_failure_message("Job name", "Kernel",
+ "Test name", "FAIL/WARN",
+ "Failure reason"))
+ message_lines.append(format_failure_message("=" * 8, "=" * 6, "=" * 8,
+ "=" * 8, "=" * 14))
+ message_header = "\n".join(message_lines)
- subject = "AUTOTEST: FAILED tests from job %s" % jobname
- mail.send("", job.user, "", subject, message_header + message)
+ subject = "AUTOTEST: FAILED tests from job %s" % jobname
+ mail.send("", job.user, "", subject, message_header + message)
def parse_one(db, jobname, path, reparse, mail_on_failure):
- """
- Parse a single job. Optionally send email on failure.
- """
- tko_utils.dprint("\nScanning %s (%s)" % (jobname, path))
- if reparse and db.find_job(jobname):
- tko_utils.dprint("! Deleting old copy of job results to "
- "reparse it")
- db.delete_job(jobname)
- if db.find_job(jobname):
- tko_utils.dprint("! Job is already parsed, done")
- return
+ """
+ Parse a single job. Optionally send email on failure.
+ """
+ tko_utils.dprint("\nScanning %s (%s)" % (jobname, path))
+ if reparse and db.find_job(jobname):
+ tko_utils.dprint("! Deleting old copy of job results to "
+ "reparse it")
+ db.delete_job(jobname)
+ if db.find_job(jobname):
+ tko_utils.dprint("! Job is already parsed, done")
+ return
- # look up the status version
- try:
- job_keyval = utils.read_keyval(path)
- except IOError, e:
- if e.errno == errno.ENOENT:
- status_version = 0
- else:
- raise
- else:
- status_version = job_keyval.get("status_version", 0)
+ # look up the status version
+ try:
+ job_keyval = utils.read_keyval(path)
+ except IOError, e:
+ if e.errno == errno.ENOENT:
+ status_version = 0
+ else:
+ raise
+ else:
+ status_version = job_keyval.get("status_version", 0)
- # parse out the job
- parser = status_lib.parser(status_version)
- job = parser.make_job(path)
- status_log = os.path.join(path, "status.log")
- if not os.path.exists(status_log):
- status_log = os.path.join(path, "status")
- if not os.path.exists(status_log):
- tko_utils.dprint("! Unable to parse job, no status file")
- return
+ # parse out the job
+ parser = status_lib.parser(status_version)
+ job = parser.make_job(path)
+ status_log = os.path.join(path, "status.log")
+ if not os.path.exists(status_log):
+ status_log = os.path.join(path, "status")
+ if not os.path.exists(status_log):
+ tko_utils.dprint("! Unable to parse job, no status file")
+ return
- # parse the status logs
- tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname))
- status_lines = open(status_log).readlines()
- parser.start(job)
- tests = parser.end(status_lines)
- job.tests = tests
+ # parse the status logs
+ tko_utils.dprint("+ Parsing dir=%s, jobname=%s" % (path, jobname))
+ status_lines = open(status_log).readlines()
+ parser.start(job)
+ tests = parser.end(status_lines)
+ job.tests = tests
- # check for failures
- message_lines = [""]
- for test in job.tests:
- if not test.subdir:
- continue
- tko_utils.dprint("* testname, status, reason: %s %s %s"
- % (test.subdir, test.status, test.reason))
- if test.status in ("FAIL", "WARN"):
- message_lines.append(format_failure_message(
- jobname, test.kernel.base, test.subdir,
- test.status, test.reason))
- message = "\n".join(message_lines)
+ # check for failures
+ message_lines = [""]
+ for test in job.tests:
+ if not test.subdir:
+ continue
+ tko_utils.dprint("* testname, status, reason: %s %s %s"
+ % (test.subdir, test.status, test.reason))
+ if test.status in ("FAIL", "WARN"):
+ message_lines.append(format_failure_message(
+ jobname, test.kernel.base, test.subdir,
+ test.status, test.reason))
+ message = "\n".join(message_lines)
- # send out a email report of failure
- if len(message) > 2 and mail_on_failure:
- tko_utils.dprint("Sending email report of failure on %s to %s"
- % (jobname, job.user))
- mailfailure(jobname, job, message)
+ # send out a email report of failure
+ if len(message) > 2 and mail_on_failure:
+ tko_utils.dprint("Sending email report of failure on %s to %s"
+ % (jobname, job.user))
+ mailfailure(jobname, job, message)
- # write the job into the database
- db.insert_job(jobname, job)
- db.commit()
+ # write the job into the database
+ db.insert_job(jobname, job)
+ db.commit()
def parse_path(db, path, level, reparse, mail_on_failure):
- machine_list = os.path.join(path, ".machines")
- if os.path.exists(machine_list):
- # multi-machine job
- for m in file(machine_list):
- machine = m.rstrip()
- if not machine:
- continue
- jobpath = os.path.join(path, machine)
- jobname = "%s/%s" % (os.path.basename(path), machine)
- try:
- db.run_with_retry(parse_one, db, jobname,
- path, reparse,
- mail_on_failure)
- except Exception:
- traceback.print_exc()
- continue
- else:
- # single machine job
- job_elements = path.split("/")[-level:]
- jobname = "/".join(job_elements)
- try:
- db.run_with_retry(parse_one, db, jobname, path,
- reparse, mail_on_failure)
- except Exception:
- traceback.print_exc()
+ machine_list = os.path.join(path, ".machines")
+ if os.path.exists(machine_list):
+ # multi-machine job
+ for m in file(machine_list):
+ machine = m.rstrip()
+ if not machine:
+ continue
+ jobpath = os.path.join(path, machine)
+ jobname = "%s/%s" % (os.path.basename(path), machine)
+ try:
+ db.run_with_retry(parse_one, db, jobname,
+ path, reparse,
+ mail_on_failure)
+ except Exception:
+ traceback.print_exc()
+ continue
+ else:
+ # single machine job
+ job_elements = path.split("/")[-level:]
+ jobname = "/".join(job_elements)
+ try:
+ db.run_with_retry(parse_one, db, jobname, path,
+ reparse, mail_on_failure)
+ except Exception:
+ traceback.print_exc()
def main():
- options, args = parse_args()
- results_dir = os.path.abspath(args[0])
- assert os.path.exists(results_dir)
+ options, args = parse_args()
+ results_dir = os.path.abspath(args[0])
+ assert os.path.exists(results_dir)
- # build up the list of job dirs to parse
- if options.singledir:
- jobs_list = [results_dir]
- else:
- jobs_list = [os.path.join(results_dir, subdir)
- for subdir in os.listdir(results_dir)]
+ # build up the list of job dirs to parse
+ if options.singledir:
+ jobs_list = [results_dir]
+ else:
+ jobs_list = [os.path.join(results_dir, subdir)
+ for subdir in os.listdir(results_dir)]
- # build up the database
- db = tko_db.db(autocommit=False, host=options.db_host,
- user=options.db_user, password=options.db_pass,
- database=options.db_name)
+ # build up the database
+ db = tko_db.db(autocommit=False, host=options.db_host,
+ user=options.db_user, password=options.db_pass,
+ database=options.db_name)
- # parse all the jobs
- for path in jobs_list:
- lockfile = open(os.path.join(path, ".parse.lock"), "w")
- flags = fcntl.LOCK_EX
- if options.noblock:
- flags != fcntl.LOCK_NB
- try:
- fcntl.flock(lockfile, flags)
- except IOError, e:
- # was this because the lock is unavailable?
- if e.errno == errno.EWOULDBLOCK:
- lockfile.close()
- continue
- else:
- raise # something unexpected happened
- try:
- parse_path(db, path, options.level, options.reparse,
- options.mailit)
- finally:
- fcntl.flock(lockfile, fcntl.LOCK_UN)
- lockfile.close()
+ # parse all the jobs
+ for path in jobs_list:
+ lockfile = open(os.path.join(path, ".parse.lock"), "w")
+ flags = fcntl.LOCK_EX
+ if options.noblock:
+ flags != fcntl.LOCK_NB
+ try:
+ fcntl.flock(lockfile, flags)
+ except IOError, e:
+ # was this because the lock is unavailable?
+ if e.errno == errno.EWOULDBLOCK:
+ lockfile.close()
+ continue
+ else:
+ raise # something unexpected happened
+ try:
+ parse_path(db, path, options.level, options.reparse,
+ options.mailit)
+ finally:
+ fcntl.flock(lockfile, fcntl.LOCK_UN)
+ lockfile.close()
if __name__ == "__main__":
- main()
+ main()
diff --git a/tko/parsers/base.py b/tko/parsers/base.py
index c0517a8..37429b6 100644
--- a/tko/parsers/base.py
+++ b/tko/parsers/base.py
@@ -4,67 +4,67 @@
class parser(object):
- """
- Abstract parser base class. Provides a generic implementation of the
- standard parser interfaction functions. The derived classes must
- implement a state_iterator method for this class to be useful.
- """
- def start(self, job):
- """ Initialize the parser for processing the results of
- 'job'."""
- # initialize all the basic parser parameters
- self.job = job
- self.finished = False
- self.line_buffer = status_lib.line_buffer()
- # create and prime the parser state machine
- self.state = self.state_iterator(self.line_buffer)
- self.state.next()
+ """
+ Abstract parser base class. Provides a generic implementation of the
+ standard parser interfaction functions. The derived classes must
+ implement a state_iterator method for this class to be useful.
+ """
+ def start(self, job):
+ """ Initialize the parser for processing the results of
+ 'job'."""
+ # initialize all the basic parser parameters
+ self.job = job
+ self.finished = False
+ self.line_buffer = status_lib.line_buffer()
+ # create and prime the parser state machine
+ self.state = self.state_iterator(self.line_buffer)
+ self.state.next()
- def process_lines(self, lines):
- """ Feed 'lines' into the parser state machine, and return
- a list of all the new test results produced."""
- self.line_buffer.put_multiple(lines)
- try:
- return self.state.next()
- except StopIteration:
- msg = ("WARNING: parser was called to process status "
- "lines after it was end()ed\n"
- "Current traceback:\n" +
- traceback.format_exc() +
- "\nCurrent stack:\n" +
- "".join(traceback.format_stack()))
- tko_utils.dprint(msg)
- return []
+ def process_lines(self, lines):
+ """ Feed 'lines' into the parser state machine, and return
+ a list of all the new test results produced."""
+ self.line_buffer.put_multiple(lines)
+ try:
+ return self.state.next()
+ except StopIteration:
+ msg = ("WARNING: parser was called to process status "
+ "lines after it was end()ed\n"
+ "Current traceback:\n" +
+ traceback.format_exc() +
+ "\nCurrent stack:\n" +
+ "".join(traceback.format_stack()))
+ tko_utils.dprint(msg)
+ return []
- def end(self, lines=[]):
- """ Feed 'lines' into the parser state machine, signal to the
- state machine that no more lines are forthcoming, and then
- return a list of all the new test results produced."""
- self.line_buffer.put_multiple(lines)
- # run the state machine to clear out the buffer
- self.finished = True
- try:
- return self.state.next()
- except StopIteration:
- msg = ("WARNING: parser was end()ed multiple times\n"
- "Current traceback:\n" +
- traceback.format_exc() +
- "\nCurrent stack:\n" +
- "".join(traceback.format_stack()))
- tko_utils.dprint(msg)
- return []
+ def end(self, lines=[]):
+ """ Feed 'lines' into the parser state machine, signal to the
+ state machine that no more lines are forthcoming, and then
+ return a list of all the new test results produced."""
+ self.line_buffer.put_multiple(lines)
+ # run the state machine to clear out the buffer
+ self.finished = True
+ try:
+ return self.state.next()
+ except StopIteration:
+ msg = ("WARNING: parser was end()ed multiple times\n"
+ "Current traceback:\n" +
+ traceback.format_exc() +
+ "\nCurrent stack:\n" +
+ "".join(traceback.format_stack()))
+ tko_utils.dprint(msg)
+ return []
- @staticmethod
- def make_job(dir):
- """ Create a new instance of the job model used by the
- parser, given a results directory."""
- raise NotImplemented
+ @staticmethod
+ def make_job(dir):
+ """ Create a new instance of the job model used by the
+ parser, given a results directory."""
+ raise NotImplemented
- def state_iterator(self, buffer):
- """ A generator method that implements the actual parser
- state machine. """
- raise NotImplemented
+ def state_iterator(self, buffer):
+ """ A generator method that implements the actual parser
+ state machine. """
+ raise NotImplemented
diff --git a/tko/parsers/version_0.py b/tko/parsers/version_0.py
index 13d3859..91d0ad7 100644
--- a/tko/parsers/version_0.py
+++ b/tko/parsers/version_0.py
@@ -6,392 +6,392 @@
class job(models.job):
- def __init__(self, dir):
- job_dict = job.load_from_dir(dir)
- super(job, self).__init__(dir, **job_dict)
+ def __init__(self, dir):
+ job_dict = job.load_from_dir(dir)
+ super(job, self).__init__(dir, **job_dict)
- @staticmethod
- def load_from_dir(dir):
- try:
- keyval = common_utils.read_keyval(dir)
- tko_utils.dprint(str(keyval))
- except Exception:
- keyval = {}
+ @staticmethod
+ def load_from_dir(dir):
+ try:
+ keyval = common_utils.read_keyval(dir)
+ tko_utils.dprint(str(keyval))
+ except Exception:
+ keyval = {}
- user = keyval.get("user", None)
- label = keyval.get("label", None)
- machine = keyval.get("hostname", None)
- if machine:
- assert "," not in machine
- queued_time = tko_utils.get_timestamp(keyval, "job_queued")
- started_time = tko_utils.get_timestamp(keyval, "job_started")
- finished_time = tko_utils.get_timestamp(keyval, "job_finished")
- machine_owner = keyval.get("owner", None)
+ user = keyval.get("user", None)
+ label = keyval.get("label", None)
+ machine = keyval.get("hostname", None)
+ if machine:
+ assert "," not in machine
+ queued_time = tko_utils.get_timestamp(keyval, "job_queued")
+ started_time = tko_utils.get_timestamp(keyval, "job_started")
+ finished_time = tko_utils.get_timestamp(keyval, "job_finished")
+ machine_owner = keyval.get("owner", None)
- if not machine:
- machine = job.find_hostname(dir)
- tko_utils.dprint("MACHINE NAME: %s" % machine)
+ if not machine:
+ machine = job.find_hostname(dir)
+ tko_utils.dprint("MACHINE NAME: %s" % machine)
- return {"user": user, "label": label, "machine": machine,
- "queued_time": queued_time,
- "started_time": started_time,
- "finished_time": finished_time,
- "machine_owner": machine_owner}
+ return {"user": user, "label": label, "machine": machine,
+ "queued_time": queued_time,
+ "started_time": started_time,
+ "finished_time": finished_time,
+ "machine_owner": machine_owner}
- @staticmethod
- def find_hostname(path):
- hostname = os.path.join(path, "sysinfo", "hostname")
- try:
- machine = open(hostname).readline().rstrip()
- return machine
- except Exception:
- tko_utils.dprint("Could not read a hostname from "
- "sysinfo/hostname")
+ @staticmethod
+ def find_hostname(path):
+ hostname = os.path.join(path, "sysinfo", "hostname")
+ try:
+ machine = open(hostname).readline().rstrip()
+ return machine
+ except Exception:
+ tko_utils.dprint("Could not read a hostname from "
+ "sysinfo/hostname")
- uname = os.path.join(path, "sysinfo", "uname_-a")
- try:
- machine = open(uname).readline().split()[1]
- return
- except Exception:
- tko_utils.dprint("Could not read a hostname from "
- "sysinfo/uname_-a")
+ uname = os.path.join(path, "sysinfo", "uname_-a")
+ try:
+ machine = open(uname).readline().split()[1]
+ return
+ except Exception:
+ tko_utils.dprint("Could not read a hostname from "
+ "sysinfo/uname_-a")
- raise Exception("Unable to find a machine name")
+ raise Exception("Unable to find a machine name")
class kernel(models.kernel):
- def __init__(self, job, verify_ident=None):
- kernel_dict = kernel.load_from_dir(job.dir, verify_ident)
- super(kernel, self).__init__(**kernel_dict)
+ def __init__(self, job, verify_ident=None):
+ kernel_dict = kernel.load_from_dir(job.dir, verify_ident)
+ super(kernel, self).__init__(**kernel_dict)
- @staticmethod
- def load_from_dir(dir, verify_ident=None):
- # try and load the booted kernel version
- build_log = os.path.join(dir, "build", "debug", "build_log")
- attributes = kernel.load_from_build_log(build_log)
- if not attributes:
- if verify_ident:
- base = verify_ident
- else:
- base = kernel.load_from_sysinfo(dir)
- patches = []
- hashes = []
- else:
- base, patches, hashes = attributes
- tko_utils.dprint("kernel.__init__() found kernel version %s"
- % base)
+ @staticmethod
+ def load_from_dir(dir, verify_ident=None):
+ # try and load the booted kernel version
+ build_log = os.path.join(dir, "build", "debug", "build_log")
+ attributes = kernel.load_from_build_log(build_log)
+ if not attributes:
+ if verify_ident:
+ base = verify_ident
+ else:
+ base = kernel.load_from_sysinfo(dir)
+ patches = []
+ hashes = []
+ else:
+ base, patches, hashes = attributes
+ tko_utils.dprint("kernel.__init__() found kernel version %s"
+ % base)
- # compute the kernel hash
- if base == "UNKNOWN":
- kernel_hash = "UNKNOWN"
- else:
- kernel_hash = kernel.compute_hash(base, hashes)
+ # compute the kernel hash
+ if base == "UNKNOWN":
+ kernel_hash = "UNKNOWN"
+ else:
+ kernel_hash = kernel.compute_hash(base, hashes)
- return {"base": base, "patches": patches,
- "kernel_hash": kernel_hash}
+ return {"base": base, "patches": patches,
+ "kernel_hash": kernel_hash}
- @staticmethod
- def load_from_sysinfo(path):
- for subdir in ("reboot1", ""):
- uname_path = os.path.join(path, "sysinfo", subdir,
- "uname_-a")
- if not os.path.exists(uname_path):
- continue
- uname = open(uname_path).readline().split()
- return re.sub("-autotest$", "", uname[2])
- return "UNKNOWN"
+ @staticmethod
+ def load_from_sysinfo(path):
+ for subdir in ("reboot1", ""):
+ uname_path = os.path.join(path, "sysinfo", subdir,
+ "uname_-a")
+ if not os.path.exists(uname_path):
+ continue
+ uname = open(uname_path).readline().split()
+ return re.sub("-autotest$", "", uname[2])
+ return "UNKNOWN"
- @staticmethod
- def load_from_build_log(path):
- if not os.path.exists(path):
- return None
+ @staticmethod
+ def load_from_build_log(path):
+ if not os.path.exists(path):
+ return None
- base, patches, hashes = "UNKNOWN", [], []
- for line in file(path):
- head, rest = line.split(": ", 1)
- rest = rest.split()
- if head == "BASE":
- base = rest[0]
- elif head == "PATCH":
- patches.append(patch(*rest))
- hashes.append(rest[2])
- return base, patches, hashes
+ base, patches, hashes = "UNKNOWN", [], []
+ for line in file(path):
+ head, rest = line.split(": ", 1)
+ rest = rest.split()
+ if head == "BASE":
+ base = rest[0]
+ elif head == "PATCH":
+ patches.append(patch(*rest))
+ hashes.append(rest[2])
+ return base, patches, hashes
class test(models.test):
- def __init__(self, subdir, testname, status, reason, test_kernel,
- machine, started_time, finished_time, iterations,
- attributes):
- # for backwards compatibility with the original parser
- # implementation, if there is no test version we need a NULL
- # value to be used; also, if there is a version it should
- # be terminated by a newline
- if "version" in attributes:
- attributes["version"] = str(attributes["version"])
- else:
- attributes["version"] = None
+ def __init__(self, subdir, testname, status, reason, test_kernel,
+ machine, started_time, finished_time, iterations,
+ attributes):
+ # for backwards compatibility with the original parser
+ # implementation, if there is no test version we need a NULL
+ # value to be used; also, if there is a version it should
+ # be terminated by a newline
+ if "version" in attributes:
+ attributes["version"] = str(attributes["version"])
+ else:
+ attributes["version"] = None
- super(test, self).__init__(subdir, testname, status, reason,
- test_kernel, machine, started_time,
- finished_time, iterations,
- attributes)
+ super(test, self).__init__(subdir, testname, status, reason,
+ test_kernel, machine, started_time,
+ finished_time, iterations,
+ attributes)
- @staticmethod
- def load_iterations(keyval_path):
- return iteration.load_from_keyval(keyval_path)
+ @staticmethod
+ def load_iterations(keyval_path):
+ return iteration.load_from_keyval(keyval_path)
class patch(models.patch):
- def __init__(self, spec, reference, hash):
- tko_utils.dprint("PATCH::%s %s %s" % (spec, reference, hash))
- super(patch, self).__init__(spec, reference, hash)
- self.spec = spec
- self.reference = reference
- self.hash = hash
+ def __init__(self, spec, reference, hash):
+ tko_utils.dprint("PATCH::%s %s %s" % (spec, reference, hash))
+ super(patch, self).__init__(spec, reference, hash)
+ self.spec = spec
+ self.reference = reference
+ self.hash = hash
class iteration(models.iteration):
- @staticmethod
- def parse_line_into_dicts(line, attr_dict, perf_dict):
- key, value = line.split("=", 1)
- perf_dict[key] = value
+ @staticmethod
+ def parse_line_into_dicts(line, attr_dict, perf_dict):
+ key, value = line.split("=", 1)
+ perf_dict[key] = value
class status_line(object):
- def __init__(self, indent, status, subdir, testname, reason,
- optional_fields):
- # pull out the type & status of the line
- if status == "START":
- self.type = "START"
- self.status = None
- elif status.startswith("END "):
- self.type = "END"
- self.status = status[4:]
- else:
- self.type = "STATUS"
- self.status = status
- assert (self.status is None or
- self.status in status_lib.status_stack.statuses)
+ def __init__(self, indent, status, subdir, testname, reason,
+ optional_fields):
+ # pull out the type & status of the line
+ if status == "START":
+ self.type = "START"
+ self.status = None
+ elif status.startswith("END "):
+ self.type = "END"
+ self.status = status[4:]
+ else:
+ self.type = "STATUS"
+ self.status = status
+ assert (self.status is None or
+ self.status in status_lib.status_stack.statuses)
- # save all the other parameters
- self.indent = indent
- self.subdir = self.parse_name(subdir)
- self.testname = self.parse_name(testname)
- self.reason = reason
- self.optional_fields = optional_fields
+ # save all the other parameters
+ self.indent = indent
+ self.subdir = self.parse_name(subdir)
+ self.testname = self.parse_name(testname)
+ self.reason = reason
+ self.optional_fields = optional_fields
- @staticmethod
- def parse_name(name):
- if name == "----":
- return None
- return name
+ @staticmethod
+ def parse_name(name):
+ if name == "----":
+ return None
+ return name
- @staticmethod
- def is_status_line(line):
- return re.search(r"^\t*\S", line) is not None
+ @staticmethod
+ def is_status_line(line):
+ return re.search(r"^\t*\S", line) is not None
- @classmethod
- def parse_line(cls, line):
- if not status_line.is_status_line(line):
- return None
- indent, line = re.search(r"^(\t*)(.*)$", line).groups()
- indent = len(indent)
+ @classmethod
+ def parse_line(cls, line):
+ if not status_line.is_status_line(line):
+ return None
+ indent, line = re.search(r"^(\t*)(.*)$", line).groups()
+ indent = len(indent)
- # split the line into the fixed and optional fields
- parts = line.split("\t")
- status, subdir, testname = parts[0:3]
- reason = parts[-1]
- optional_parts = parts[3:-1]
+ # split the line into the fixed and optional fields
+ parts = line.split("\t")
+ status, subdir, testname = parts[0:3]
+ reason = parts[-1]
+ optional_parts = parts[3:-1]
- # all the optional parts should be of the form "key=value"
- assert sum('=' not in part for part in optional_parts) == 0
- optional_fields = dict(part.split("=", 1)
- for part in optional_parts)
+ # all the optional parts should be of the form "key=value"
+ assert sum('=' not in part for part in optional_parts) == 0
+ optional_fields = dict(part.split("=", 1)
+ for part in optional_parts)
- # build up a new status_line and return it
- return cls(indent, status, subdir, testname, reason,
- optional_fields)
+ # build up a new status_line and return it
+ return cls(indent, status, subdir, testname, reason,
+ optional_fields)
class parser(base.parser):
- @staticmethod
- def make_job(dir):
- return job(dir)
+ @staticmethod
+ def make_job(dir):
+ return job(dir)
- def state_iterator(self, buffer):
- new_tests = []
- boot_count = 0
- group_subdir = None
- sought_level = 0
- stack = status_lib.status_stack()
- current_kernel = kernel(self.job)
- boot_in_progress = False
- alert_pending = None
- started_time = None
+ def state_iterator(self, buffer):
+ new_tests = []
+ boot_count = 0
+ group_subdir = None
+ sought_level = 0
+ stack = status_lib.status_stack()
+ current_kernel = kernel(self.job)
+ boot_in_progress = False
+ alert_pending = None
+ started_time = None
- while not self.finished or buffer.size():
- # stop processing once the buffer is empty
- if buffer.size() == 0:
- yield new_tests
- new_tests = []
- continue
+ while not self.finished or buffer.size():
+ # stop processing once the buffer is empty
+ if buffer.size() == 0:
+ yield new_tests
+ new_tests = []
+ continue
- # parse the next line
- line = buffer.get()
- tko_utils.dprint('\nSTATUS: ' + line.strip())
- line = status_line.parse_line(line)
- if line is None:
- tko_utils.dprint('non-status line, ignoring')
- continue # ignore non-status lines
+ # parse the next line
+ line = buffer.get()
+ tko_utils.dprint('\nSTATUS: ' + line.strip())
+ line = status_line.parse_line(line)
+ if line is None:
+ tko_utils.dprint('non-status line, ignoring')
+ continue # ignore non-status lines
- # have we hit the job start line?
- if (line.type == "START" and not line.subdir and
- not line.testname):
- sought_level = 1
- tko_utils.dprint("found job level start "
- "marker, looking for level "
- "1 groups now")
- continue
+ # have we hit the job start line?
+ if (line.type == "START" and not line.subdir and
+ not line.testname):
+ sought_level = 1
+ tko_utils.dprint("found job level start "
+ "marker, looking for level "
+ "1 groups now")
+ continue
- # have we hit the job end line?
- if (line.type == "END" and not line.subdir and
- not line.testname):
- tko_utils.dprint("found job level end "
- "marker, looking for level "
- "0 lines now")
- sought_level = 0
+ # have we hit the job end line?
+ if (line.type == "END" and not line.subdir and
+ not line.testname):
+ tko_utils.dprint("found job level end "
+ "marker, looking for level "
+ "0 lines now")
+ sought_level = 0
- # START line, just push another layer on to the stack
- # and grab the start time if this is at the job level
- # we're currently seeking
- if line.type == "START":
- group_subdir = None
- stack.start()
- if line.indent == sought_level:
- started_time = \
- tko_utils.get_timestamp(
- line.optional_fields, "timestamp")
- tko_utils.dprint("start line, ignoring")
- continue
- # otherwise, update the status on the stack
- else:
- tko_utils.dprint("GROPE_STATUS: %s" %
- [stack.current_status(),
- line.status, line.subdir,
- line.testname, line.reason])
- stack.update(line.status)
+ # START line, just push another layer on to the stack
+ # and grab the start time if this is at the job level
+ # we're currently seeking
+ if line.type == "START":
+ group_subdir = None
+ stack.start()
+ if line.indent == sought_level:
+ started_time = \
+ tko_utils.get_timestamp(
+ line.optional_fields, "timestamp")
+ tko_utils.dprint("start line, ignoring")
+ continue
+ # otherwise, update the status on the stack
+ else:
+ tko_utils.dprint("GROPE_STATUS: %s" %
+ [stack.current_status(),
+ line.status, line.subdir,
+ line.testname, line.reason])
+ stack.update(line.status)
- if line.status == "ALERT":
- tko_utils.dprint("job level alert, recording")
- alert_pending = line.reason
- continue
+ if line.status == "ALERT":
+ tko_utils.dprint("job level alert, recording")
+ alert_pending = line.reason
+ continue
- # ignore Autotest.install => GOOD lines
- if (line.testname == "Autotest.install" and
- line.status == "GOOD"):
- tko_utils.dprint("Successful Autotest "
- "install, ignoring")
- continue
+ # ignore Autotest.install => GOOD lines
+ if (line.testname == "Autotest.install" and
+ line.status == "GOOD"):
+ tko_utils.dprint("Successful Autotest "
+ "install, ignoring")
+ continue
- # ignore END lines for a reboot group
- if (line.testname == "reboot" and line.type == "END"):
- tko_utils.dprint("reboot group, ignoring")
- continue
+ # ignore END lines for a reboot group
+ if (line.testname == "reboot" and line.type == "END"):
+ tko_utils.dprint("reboot group, ignoring")
+ continue
- # convert job-level ABORTs into a 'JOB' test, and
- # ignore other job-level events
- if line.testname is None:
- if (line.status == "ABORT" and
- line.type != "END"):
- line.testname = "JOB"
- else:
- tko_utils.dprint("job level event, "
- "ignoring")
- continue
+ # convert job-level ABORTs into a 'JOB' test, and
+ # ignore other job-level events
+ if line.testname is None:
+ if (line.status == "ABORT" and
+ line.type != "END"):
+ line.testname = "JOB"
+ else:
+ tko_utils.dprint("job level event, "
+ "ignoring")
+ continue
- # use the group subdir for END lines
- if line.type == "END":
- line.subdir = group_subdir
+ # use the group subdir for END lines
+ if line.type == "END":
+ line.subdir = group_subdir
- # are we inside a block group?
- if (line.indent != sought_level and
- line.status != "ABORT" and
- not line.testname.startswith('reboot.')):
- if line.subdir:
- tko_utils.dprint("set group_subdir: "
- + line.subdir)
- group_subdir = line.subdir
- tko_utils.dprint("ignoring incorrect indent "
- "level %d != %d," %
- (line.indent, sought_level))
- continue
+ # are we inside a block group?
+ if (line.indent != sought_level and
+ line.status != "ABORT" and
+ not line.testname.startswith('reboot.')):
+ if line.subdir:
+ tko_utils.dprint("set group_subdir: "
+ + line.subdir)
+ group_subdir = line.subdir
+ tko_utils.dprint("ignoring incorrect indent "
+ "level %d != %d," %
+ (line.indent, sought_level))
+ continue
- # use the subdir as the testname, except for
- # boot.* and kernel.* tests
- if (line.testname is None or
- not re.search(r"^(boot(\.\d+)?$|kernel\.)",
- line.testname)):
- if line.subdir and '.' in line.subdir:
- line.testname = line.subdir
+ # use the subdir as the testname, except for
+ # boot.* and kernel.* tests
+ if (line.testname is None or
+ not re.search(r"^(boot(\.\d+)?$|kernel\.)",
+ line.testname)):
+ if line.subdir and '.' in line.subdir:
+ line.testname = line.subdir
- # has a reboot started?
- if line.testname == "reboot.start":
- started_time = tko_utils.get_timestamp(
- line.optional_fields, "timestamp")
- tko_utils.dprint("reboot start event, "
- "ignoring")
- boot_in_progress = True
- continue
+ # has a reboot started?
+ if line.testname == "reboot.start":
+ started_time = tko_utils.get_timestamp(
+ line.optional_fields, "timestamp")
+ tko_utils.dprint("reboot start event, "
+ "ignoring")
+ boot_in_progress = True
+ continue
- # has a reboot finished?
- if line.testname == "reboot.verify":
- line.testname = "boot.%d" % boot_count
- tko_utils.dprint("reboot verified")
- boot_in_progress = False
- verify_ident = line.reason.strip()
- current_kernel = kernel(self.job, verify_ident)
- boot_count += 1
+ # has a reboot finished?
+ if line.testname == "reboot.verify":
+ line.testname = "boot.%d" % boot_count
+ tko_utils.dprint("reboot verified")
+ boot_in_progress = False
+ verify_ident = line.reason.strip()
+ current_kernel = kernel(self.job, verify_ident)
+ boot_count += 1
- if alert_pending:
- line.status = "ALERT"
- line.reason = alert_pending
- alert_pending = None
+ if alert_pending:
+ line.status = "ALERT"
+ line.reason = alert_pending
+ alert_pending = None
- # create the actual test object
- finished_time = tko_utils.get_timestamp(
- line.optional_fields, "timestamp")
- final_status = stack.end()
- tko_utils.dprint("Adding: "
- "%s\nSubdir:%s\nTestname:%s\n%s" %
- (final_status, line.subdir,
- line.testname, line.reason))
- new_test = test.parse_test(self.job, line.subdir,
- line.testname,
- final_status, line.reason,
- current_kernel,
- started_time,
- finished_time)
- started_time = None
- new_tests.append(new_test)
+ # create the actual test object
+ finished_time = tko_utils.get_timestamp(
+ line.optional_fields, "timestamp")
+ final_status = stack.end()
+ tko_utils.dprint("Adding: "
+ "%s\nSubdir:%s\nTestname:%s\n%s" %
+ (final_status, line.subdir,
+ line.testname, line.reason))
+ new_test = test.parse_test(self.job, line.subdir,
+ line.testname,
+ final_status, line.reason,
+ current_kernel,
+ started_time,
+ finished_time)
+ started_time = None
+ new_tests.append(new_test)
- # the job is finished, but we never came back from reboot
- if boot_in_progress:
- testname = "boot.%d" % boot_count
- reason = "machine did not return from reboot"
- tko_utils.dprint(("Adding: ABORT\nSubdir:----\n"
- "Testname:%s\n%s")
- % (testname, reason))
- new_test = test.parse_test(self.job, None, testname,
- "ABORT", reason,
- current_kernel, None, None)
- new_tests.append(new_test)
- yield new_tests
+ # the job is finished, but we never came back from reboot
+ if boot_in_progress:
+ testname = "boot.%d" % boot_count
+ reason = "machine did not return from reboot"
+ tko_utils.dprint(("Adding: ABORT\nSubdir:----\n"
+ "Testname:%s\n%s")
+ % (testname, reason))
+ new_test = test.parse_test(self.job, None, testname,
+ "ABORT", reason,
+ current_kernel, None, None)
+ new_tests.append(new_test)
+ yield new_tests
diff --git a/tko/parsers/version_0_unittest.py b/tko/parsers/version_0_unittest.py
index 947199b..3b879ae 100644
--- a/tko/parsers/version_0_unittest.py
+++ b/tko/parsers/version_0_unittest.py
@@ -7,118 +7,118 @@
class test_status_line(unittest.TestCase):
- statuses = ["GOOD", "WARN", "FAIL", "ABORT"]
+ statuses = ["GOOD", "WARN", "FAIL", "ABORT"]
- def test_handles_start(self):
- line = version_0.status_line(0, "START", "----", "test",
- "", {})
- self.assertEquals(line.type, "START")
- self.assertEquals(line.status, None)
+ def test_handles_start(self):
+ line = version_0.status_line(0, "START", "----", "test",
+ "", {})
+ self.assertEquals(line.type, "START")
+ self.assertEquals(line.status, None)
- def test_handles_status(self):
- for stat in self.statuses:
- line = version_0.status_line(0, stat, "----", "test",
- "", {})
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, stat)
+ def test_handles_status(self):
+ for stat in self.statuses:
+ line = version_0.status_line(0, stat, "----", "test",
+ "", {})
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, stat)
- def test_handles_endstatus(self):
- for stat in self.statuses:
- line = version_0.status_line(0, "END " + stat, "----",
- "test", "", {})
- self.assertEquals(line.type, "END")
- self.assertEquals(line.status, stat)
+ def test_handles_endstatus(self):
+ for stat in self.statuses:
+ line = version_0.status_line(0, "END " + stat, "----",
+ "test", "", {})
+ self.assertEquals(line.type, "END")
+ self.assertEquals(line.status, stat)
- def test_fails_on_bad_status(self):
- for stat in self.statuses:
- self.assertRaises(AssertionError,
- version_0.status_line, 0,
- "BAD " + stat, "----", "test",
- "", {})
+ def test_fails_on_bad_status(self):
+ for stat in self.statuses:
+ self.assertRaises(AssertionError,
+ version_0.status_line, 0,
+ "BAD " + stat, "----", "test",
+ "", {})
- def test_saves_all_fields(self):
- line = version_0.status_line(5, "GOOD", "subdir_name",
- "test_name", "my reason here",
- {"key1": "value",
- "key2": "another value",
- "key3": "value3"})
- self.assertEquals(line.indent, 5)
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, "subdir_name")
- self.assertEquals(line.testname, "test_name")
- self.assertEquals(line.reason, "my reason here")
- self.assertEquals(line.optional_fields,
- {"key1": "value", "key2": "another value",
- "key3": "value3"})
+ def test_saves_all_fields(self):
+ line = version_0.status_line(5, "GOOD", "subdir_name",
+ "test_name", "my reason here",
+ {"key1": "value",
+ "key2": "another value",
+ "key3": "value3"})
+ self.assertEquals(line.indent, 5)
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, "subdir_name")
+ self.assertEquals(line.testname, "test_name")
+ self.assertEquals(line.reason, "my reason here")
+ self.assertEquals(line.optional_fields,
+ {"key1": "value", "key2": "another value",
+ "key3": "value3"})
- def test_parses_blank_subdir(self):
- line = version_0.status_line(0, "GOOD", "----", "test",
- "", {})
- self.assertEquals(line.subdir, None)
+ def test_parses_blank_subdir(self):
+ line = version_0.status_line(0, "GOOD", "----", "test",
+ "", {})
+ self.assertEquals(line.subdir, None)
- def test_parses_blank_testname(self):
- line = version_0.status_line(0, "GOOD", "subdir", "----",
- "", {})
- self.assertEquals(line.testname, None)
+ def test_parses_blank_testname(self):
+ line = version_0.status_line(0, "GOOD", "subdir", "----",
+ "", {})
+ self.assertEquals(line.testname, None)
- def test_parse_line_smoketest(self):
- input_data = ("\t\t\tGOOD\t----\t----\t"
- "field1=val1\tfield2=val2\tTest Passed")
- line = version_0.status_line.parse_line(input_data)
- self.assertEquals(line.indent, 3)
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, None)
- self.assertEquals(line.testname, None)
- self.assertEquals(line.reason, "Test Passed")
- self.assertEquals(line.optional_fields,
- {"field1": "val1", "field2": "val2"})
+ def test_parse_line_smoketest(self):
+ input_data = ("\t\t\tGOOD\t----\t----\t"
+ "field1=val1\tfield2=val2\tTest Passed")
+ line = version_0.status_line.parse_line(input_data)
+ self.assertEquals(line.indent, 3)
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, None)
+ self.assertEquals(line.testname, None)
+ self.assertEquals(line.reason, "Test Passed")
+ self.assertEquals(line.optional_fields,
+ {"field1": "val1", "field2": "val2"})
- def test_parse_line_handles_newline(self):
- input_data = ("\t\tGOOD\t----\t----\t"
- "field1=val1\tfield2=val2\tNo newline here!")
- for suffix in ("", "\n"):
- line = version_0.status_line.parse_line(input_data +
- suffix)
- self.assertEquals(line.indent, 2)
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, None)
- self.assertEquals(line.testname, None)
- self.assertEquals(line.reason, "No newline here!")
- self.assertEquals(line.optional_fields,
- {"field1": "val1",
- "field2": "val2"})
+ def test_parse_line_handles_newline(self):
+ input_data = ("\t\tGOOD\t----\t----\t"
+ "field1=val1\tfield2=val2\tNo newline here!")
+ for suffix in ("", "\n"):
+ line = version_0.status_line.parse_line(input_data +
+ suffix)
+ self.assertEquals(line.indent, 2)
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, None)
+ self.assertEquals(line.testname, None)
+ self.assertEquals(line.reason, "No newline here!")
+ self.assertEquals(line.optional_fields,
+ {"field1": "val1",
+ "field2": "val2"})
- def test_parse_line_fails_on_untabbed_lines(self):
- input_data = " GOOD\trandom\tfields\tof text"
- line = version_0.status_line.parse_line(input_data)
- self.assertEquals(line, None)
- line = version_0.status_line.parse_line(input_data.lstrip())
- self.assertEquals(line.indent, 0)
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, "random")
- self.assertEquals(line.testname, "fields")
- self.assertEquals(line.reason, "of text")
- self.assertEquals(line.optional_fields, {})
+ def test_parse_line_fails_on_untabbed_lines(self):
+ input_data = " GOOD\trandom\tfields\tof text"
+ line = version_0.status_line.parse_line(input_data)
+ self.assertEquals(line, None)
+ line = version_0.status_line.parse_line(input_data.lstrip())
+ self.assertEquals(line.indent, 0)
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, "random")
+ self.assertEquals(line.testname, "fields")
+ self.assertEquals(line.reason, "of text")
+ self.assertEquals(line.optional_fields, {})
- def test_parse_line_fails_on_bad_optional_fields(self):
- input_data = "GOOD\tfield1\tfield2\tfield3\tfield4"
- self.assertRaises(AssertionError,
- version_0.status_line.parse_line,
- input_data)
+ def test_parse_line_fails_on_bad_optional_fields(self):
+ input_data = "GOOD\tfield1\tfield2\tfield3\tfield4"
+ self.assertRaises(AssertionError,
+ version_0.status_line.parse_line,
+ input_data)
if __name__ == "__main__":
- unittest.main()
+ unittest.main()
diff --git a/tko/parsers/version_1.py b/tko/parsers/version_1.py
index b13e272..77c60c2 100644
--- a/tko/parsers/version_1.py
+++ b/tko/parsers/version_1.py
@@ -5,85 +5,85 @@
class kernel(models.kernel):
- def __init__(self, base, patches):
- if base:
- patches = [patch(*p.split()) for p in patches]
- hashes = [p.hash for p in patches]
- kernel_hash = self.compute_hash(base, hashes)
- else:
- base = "UNKNOWN"
- patches = []
- kernel_hash = "UNKNOWN"
- super(kernel, self).__init__(base, patches, kernel_hash)
+ def __init__(self, base, patches):
+ if base:
+ patches = [patch(*p.split()) for p in patches]
+ hashes = [p.hash for p in patches]
+ kernel_hash = self.compute_hash(base, hashes)
+ else:
+ base = "UNKNOWN"
+ patches = []
+ kernel_hash = "UNKNOWN"
+ super(kernel, self).__init__(base, patches, kernel_hash)
class test(models.test):
- @staticmethod
- def load_iterations(keyval_path):
- return iteration.load_from_keyval(keyval_path)
+ @staticmethod
+ def load_iterations(keyval_path):
+ return iteration.load_from_keyval(keyval_path)
class iteration(models.iteration):
- @staticmethod
- def parse_line_into_dicts(line, attr_dict, perf_dict):
- typed_match = re.search("^([^=]*)\{(\w*)\}=(.*)$", line)
- if typed_match:
- key, val_type, value = typed_match.groups()
- if val_type == "attr":
- attr_dict[key] = value
- elif val_type == "perf":
- perf_dict[key] = value
- else:
- msg = ("WARNING: line '%s' found in test "
- "iteration keyval could not be parsed")
- msg %= line
- tko_utils.dprint(msg)
- return # skip the line
- else:
- # old-fashioned untyped match, assume perf
- untyped_match = re.search("^([^=]*)=(.*)$", line)
- if not untyped_match:
- msg = ("WARNING: line '%s' found in test "
- "iteration keyval could not be parsed")
- msg %= line
- tko_utils.dprint(msg)
- return # skip this line
- key, value = untyped_match.groups()
- perf_dict[key] = value
+ @staticmethod
+ def parse_line_into_dicts(line, attr_dict, perf_dict):
+ typed_match = re.search("^([^=]*)\{(\w*)\}=(.*)$", line)
+ if typed_match:
+ key, val_type, value = typed_match.groups()
+ if val_type == "attr":
+ attr_dict[key] = value
+ elif val_type == "perf":
+ perf_dict[key] = value
+ else:
+ msg = ("WARNING: line '%s' found in test "
+ "iteration keyval could not be parsed")
+ msg %= line
+ tko_utils.dprint(msg)
+ return # skip the line
+ else:
+ # old-fashioned untyped match, assume perf
+ untyped_match = re.search("^([^=]*)=(.*)$", line)
+ if not untyped_match:
+ msg = ("WARNING: line '%s' found in test "
+ "iteration keyval could not be parsed")
+ msg %= line
+ tko_utils.dprint(msg)
+ return # skip this line
+ key, value = untyped_match.groups()
+ perf_dict[key] = value
class status_line(version_0.status_line):
- def is_successful_reboot(self, current_status):
- # make sure this is a reboot line
- if self.testname != "reboot":
- return False
+ def is_successful_reboot(self, current_status):
+ # make sure this is a reboot line
+ if self.testname != "reboot":
+ return False
- # make sure this was not a failure
- get_index = status_lib.status_stack.statuses.index
- if get_index(current_status) <= get_index("FAIL"):
- return False
+ # make sure this was not a failure
+ get_index = status_lib.status_stack.statuses.index
+ if get_index(current_status) <= get_index("FAIL"):
+ return False
- # it must have been a successful reboot
- return True
+ # it must have been a successful reboot
+ return True
- def get_kernel(self):
- # get the base kernel version
- fields = self.optional_fields
- base = fields.get("kernel", "")
- # get a list of patches
- patches = []
- patch_index = 0
- while ("patch%d" % patch_index) in fields:
- patches.append(fields["patch%d" % patch_index])
- patch_index += 1
- # create a new kernel instance
- return kernel(base, patches)
+ def get_kernel(self):
+ # get the base kernel version
+ fields = self.optional_fields
+ base = fields.get("kernel", "")
+ # get a list of patches
+ patches = []
+ patch_index = 0
+ while ("patch%d" % patch_index) in fields:
+ patches.append(fields["patch%d" % patch_index])
+ patch_index += 1
+ # create a new kernel instance
+ return kernel(base, patches)
- def get_timestamp(self):
- return tko_utils.get_timestamp(self.optional_fields,
- "timestamp")
+ def get_timestamp(self):
+ return tko_utils.get_timestamp(self.optional_fields,
+ "timestamp")
# the default implementations from version 0 will do for now
@@ -92,127 +92,127 @@
class parser(base.parser):
- @staticmethod
- def make_job(dir):
- return job(dir)
+ @staticmethod
+ def make_job(dir):
+ return job(dir)
- @staticmethod
- def make_dummy_abort(indent):
- indent = "\t" * indent
- return indent + "END ABORT\t----\t----\tUnexpected ABORT"
+ @staticmethod
+ def make_dummy_abort(indent):
+ indent = "\t" * indent
+ return indent + "END ABORT\t----\t----\tUnexpected ABORT"
- def state_iterator(self, buffer):
- new_tests = []
- boot_count = 0
- min_stack_size = 0
- stack = status_lib.status_stack()
- current_kernel = kernel("", []) # UNKNOWN
- started_time_stack = [None]
- subdir_stack = [None]
+ def state_iterator(self, buffer):
+ new_tests = []
+ boot_count = 0
+ min_stack_size = 0
+ stack = status_lib.status_stack()
+ current_kernel = kernel("", []) # UNKNOWN
+ started_time_stack = [None]
+ subdir_stack = [None]
- while True:
- # are we finished with parsing?
- if buffer.size() == 0 and self.finished:
- if stack.size() == 0:
- break
- # we have status lines left on the stack,
- # we need to implicitly abort them first
- for i in reversed(xrange(stack.size())):
- buffer.put(self.make_dummy_abort(i))
+ while True:
+ # are we finished with parsing?
+ if buffer.size() == 0 and self.finished:
+ if stack.size() == 0:
+ break
+ # we have status lines left on the stack,
+ # we need to implicitly abort them first
+ for i in reversed(xrange(stack.size())):
+ buffer.put(self.make_dummy_abort(i))
- # stop processing once the buffer is empty
- if buffer.size() == 0:
- yield new_tests
- new_tests = []
- continue
+ # stop processing once the buffer is empty
+ if buffer.size() == 0:
+ yield new_tests
+ new_tests = []
+ continue
- # reinitialize the per-iteration state
- started_time = None
- finished_time = None
+ # reinitialize the per-iteration state
+ started_time = None
+ finished_time = None
- # get the next line
- raw_line = buffer.get()
- tko_utils.dprint('\nSTATUS: ' + raw_line.strip())
- line = status_line.parse_line(raw_line)
- if line is None:
- tko_utils.dprint('non-status line, ignoring')
- continue
+ # get the next line
+ raw_line = buffer.get()
+ tko_utils.dprint('\nSTATUS: ' + raw_line.strip())
+ line = status_line.parse_line(raw_line)
+ if line is None:
+ tko_utils.dprint('non-status line, ignoring')
+ continue
- # initial line processing
- if line.type == "START":
- stack.start()
- if (line.testname, line.subdir) == (None,) * 2:
- min_stack_size = stack.size()
- started_time_stack.append(line.get_timestamp())
- subdir_stack.append(line.subdir)
- continue
- elif line.type == "STATUS":
- stack.update(line.status)
- indent = line.indent
- started_time = None
- finished_time = line.get_timestamp()
- if line.subdir:
- subdir_stack[-1] = line.subdir
- elif line.type == "END":
- if (line.testname, line.subdir) == (None,) * 2:
- min_stack_size = stack.size() - 1
- subdir_stack.pop()
- else:
- line.subdir = subdir_stack.pop()
- stack.update(line.status)
- indent = line.indent + 1
- started_time = started_time_stack.pop()
- finished_time = line.get_timestamp()
- else:
- assert False
+ # initial line processing
+ if line.type == "START":
+ stack.start()
+ if (line.testname, line.subdir) == (None,) * 2:
+ min_stack_size = stack.size()
+ started_time_stack.append(line.get_timestamp())
+ subdir_stack.append(line.subdir)
+ continue
+ elif line.type == "STATUS":
+ stack.update(line.status)
+ indent = line.indent
+ started_time = None
+ finished_time = line.get_timestamp()
+ if line.subdir:
+ subdir_stack[-1] = line.subdir
+ elif line.type == "END":
+ if (line.testname, line.subdir) == (None,) * 2:
+ min_stack_size = stack.size() - 1
+ subdir_stack.pop()
+ else:
+ line.subdir = subdir_stack.pop()
+ stack.update(line.status)
+ indent = line.indent + 1
+ started_time = started_time_stack.pop()
+ finished_time = line.get_timestamp()
+ else:
+ assert False
- # have we unexpectedly exited a group?
- if indent < stack.size():
- # yes, implicitly ABORT
- buffer.put_back(raw_line)
- abort = self.make_dummy_abort(stack.size() - 1)
- buffer.put_back(abort)
- continue
- else:
- # no, just update the group status
- current_status = line.status
- stack.update(current_status)
+ # have we unexpectedly exited a group?
+ if indent < stack.size():
+ # yes, implicitly ABORT
+ buffer.put_back(raw_line)
+ abort = self.make_dummy_abort(stack.size() - 1)
+ buffer.put_back(abort)
+ continue
+ else:
+ # no, just update the group status
+ current_status = line.status
+ stack.update(current_status)
- # do we need to pop the stack?
- if line.type == "END":
- current_status = stack.end()
- stack.update(current_status)
- if line.is_successful_reboot(current_status):
- current_kernel = line.get_kernel()
- # rename the reboot testname
- if line.testname == "reboot":
- line.testname = "boot.%d" % boot_count
- boot_count += 1
+ # do we need to pop the stack?
+ if line.type == "END":
+ current_status = stack.end()
+ stack.update(current_status)
+ if line.is_successful_reboot(current_status):
+ current_kernel = line.get_kernel()
+ # rename the reboot testname
+ if line.testname == "reboot":
+ line.testname = "boot.%d" % boot_count
+ boot_count += 1
- # have we just finished a test?
- if stack.size() <= min_stack_size:
- # if there was no testname, just use the subdir
- if line.testname is None:
- line.testname = line.subdir
- # if there was no testname or subdir, use 'JOB'
- if line.testname is None:
- line.testname = "JOB"
+ # have we just finished a test?
+ if stack.size() <= min_stack_size:
+ # if there was no testname, just use the subdir
+ if line.testname is None:
+ line.testname = line.subdir
+ # if there was no testname or subdir, use 'JOB'
+ if line.testname is None:
+ line.testname = "JOB"
- new_test = test.parse_test(self.job,
- line.subdir,
- line.testname,
- current_status,
- line.reason,
- current_kernel,
- started_time,
- finished_time)
- msg = "ADD: %s\nSubdir: %s\nTestname: %s\n%s"
- msg %= (new_test.status, new_test.subdir,
- new_test.testname, new_test.reason)
- tko_utils.dprint(msg)
- new_tests.append(new_test)
+ new_test = test.parse_test(self.job,
+ line.subdir,
+ line.testname,
+ current_status,
+ line.reason,
+ current_kernel,
+ started_time,
+ finished_time)
+ msg = "ADD: %s\nSubdir: %s\nTestname: %s\n%s"
+ msg %= (new_test.status, new_test.subdir,
+ new_test.testname, new_test.reason)
+ tko_utils.dprint(msg)
+ new_tests.append(new_test)
- # the job is finished, nothing to do here but exit
- yield new_tests
+ # the job is finished, nothing to do here but exit
+ yield new_tests
diff --git a/tko/parsers/version_1_unittest.py b/tko/parsers/version_1_unittest.py
index eb3ad37..33077c6 100644
--- a/tko/parsers/version_1_unittest.py
+++ b/tko/parsers/version_1_unittest.py
@@ -7,187 +7,187 @@
class test_status_line(unittest.TestCase):
- statuses = ["GOOD", "WARN", "FAIL", "ABORT"]
+ statuses = ["GOOD", "WARN", "FAIL", "ABORT"]
- def test_handles_start(self):
- line = version_1.status_line(0, "START", "----", "test",
- "", {})
- self.assertEquals(line.type, "START")
- self.assertEquals(line.status, None)
+ def test_handles_start(self):
+ line = version_1.status_line(0, "START", "----", "test",
+ "", {})
+ self.assertEquals(line.type, "START")
+ self.assertEquals(line.status, None)
- def test_handles_status(self):
- for stat in self.statuses:
- line = version_1.status_line(0, stat, "----", "test",
- "", {})
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, stat)
+ def test_handles_status(self):
+ for stat in self.statuses:
+ line = version_1.status_line(0, stat, "----", "test",
+ "", {})
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, stat)
- def test_handles_endstatus(self):
- for stat in self.statuses:
- line = version_1.status_line(0, "END " + stat, "----",
- "test", "", {})
- self.assertEquals(line.type, "END")
- self.assertEquals(line.status, stat)
+ def test_handles_endstatus(self):
+ for stat in self.statuses:
+ line = version_1.status_line(0, "END " + stat, "----",
+ "test", "", {})
+ self.assertEquals(line.type, "END")
+ self.assertEquals(line.status, stat)
- def test_fails_on_bad_status(self):
- for stat in self.statuses:
- self.assertRaises(AssertionError,
- version_1.status_line, 0,
- "BAD " + stat, "----", "test",
- "", {})
+ def test_fails_on_bad_status(self):
+ for stat in self.statuses:
+ self.assertRaises(AssertionError,
+ version_1.status_line, 0,
+ "BAD " + stat, "----", "test",
+ "", {})
- def test_saves_all_fields(self):
- line = version_1.status_line(5, "GOOD", "subdir_name",
- "test_name", "my reason here",
- {"key1": "value",
- "key2": "another value",
- "key3": "value3"})
- self.assertEquals(line.indent, 5)
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, "subdir_name")
- self.assertEquals(line.testname, "test_name")
- self.assertEquals(line.reason, "my reason here")
- self.assertEquals(line.optional_fields,
- {"key1": "value", "key2": "another value",
- "key3": "value3"})
+ def test_saves_all_fields(self):
+ line = version_1.status_line(5, "GOOD", "subdir_name",
+ "test_name", "my reason here",
+ {"key1": "value",
+ "key2": "another value",
+ "key3": "value3"})
+ self.assertEquals(line.indent, 5)
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, "subdir_name")
+ self.assertEquals(line.testname, "test_name")
+ self.assertEquals(line.reason, "my reason here")
+ self.assertEquals(line.optional_fields,
+ {"key1": "value", "key2": "another value",
+ "key3": "value3"})
- def test_parses_blank_subdir(self):
- line = version_1.status_line(0, "GOOD", "----", "test",
- "", {})
- self.assertEquals(line.subdir, None)
+ def test_parses_blank_subdir(self):
+ line = version_1.status_line(0, "GOOD", "----", "test",
+ "", {})
+ self.assertEquals(line.subdir, None)
- def test_parses_blank_testname(self):
- line = version_1.status_line(0, "GOOD", "subdir", "----",
- "", {})
- self.assertEquals(line.testname, None)
+ def test_parses_blank_testname(self):
+ line = version_1.status_line(0, "GOOD", "subdir", "----",
+ "", {})
+ self.assertEquals(line.testname, None)
- def test_parse_line_smoketest(self):
- input_data = ("\t\t\tGOOD\t----\t----\t"
- "field1=val1\tfield2=val2\tTest Passed")
- line = version_1.status_line.parse_line(input_data)
- self.assertEquals(line.indent, 3)
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, None)
- self.assertEquals(line.testname, None)
- self.assertEquals(line.reason, "Test Passed")
- self.assertEquals(line.optional_fields,
- {"field1": "val1", "field2": "val2"})
+ def test_parse_line_smoketest(self):
+ input_data = ("\t\t\tGOOD\t----\t----\t"
+ "field1=val1\tfield2=val2\tTest Passed")
+ line = version_1.status_line.parse_line(input_data)
+ self.assertEquals(line.indent, 3)
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, None)
+ self.assertEquals(line.testname, None)
+ self.assertEquals(line.reason, "Test Passed")
+ self.assertEquals(line.optional_fields,
+ {"field1": "val1", "field2": "val2"})
- def test_parse_line_handles_newline(self):
- input_data = ("\t\tGOOD\t----\t----\t"
- "field1=val1\tfield2=val2\tNo newline here!")
- for suffix in ("", "\n"):
- line = version_1.status_line.parse_line(input_data +
- suffix)
- self.assertEquals(line.indent, 2)
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, None)
- self.assertEquals(line.testname, None)
- self.assertEquals(line.reason, "No newline here!")
- self.assertEquals(line.optional_fields,
- {"field1": "val1",
- "field2": "val2"})
+ def test_parse_line_handles_newline(self):
+ input_data = ("\t\tGOOD\t----\t----\t"
+ "field1=val1\tfield2=val2\tNo newline here!")
+ for suffix in ("", "\n"):
+ line = version_1.status_line.parse_line(input_data +
+ suffix)
+ self.assertEquals(line.indent, 2)
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, None)
+ self.assertEquals(line.testname, None)
+ self.assertEquals(line.reason, "No newline here!")
+ self.assertEquals(line.optional_fields,
+ {"field1": "val1",
+ "field2": "val2"})
- def test_parse_line_fails_on_untabbed_lines(self):
- input_data = " GOOD\trandom\tfields\tof text"
- line = version_1.status_line.parse_line(input_data)
- self.assertEquals(line, None)
- line = version_1.status_line.parse_line(input_data.lstrip())
- self.assertEquals(line.indent, 0)
- self.assertEquals(line.type, "STATUS")
- self.assertEquals(line.status, "GOOD")
- self.assertEquals(line.subdir, "random")
- self.assertEquals(line.testname, "fields")
- self.assertEquals(line.reason, "of text")
- self.assertEquals(line.optional_fields, {})
+ def test_parse_line_fails_on_untabbed_lines(self):
+ input_data = " GOOD\trandom\tfields\tof text"
+ line = version_1.status_line.parse_line(input_data)
+ self.assertEquals(line, None)
+ line = version_1.status_line.parse_line(input_data.lstrip())
+ self.assertEquals(line.indent, 0)
+ self.assertEquals(line.type, "STATUS")
+ self.assertEquals(line.status, "GOOD")
+ self.assertEquals(line.subdir, "random")
+ self.assertEquals(line.testname, "fields")
+ self.assertEquals(line.reason, "of text")
+ self.assertEquals(line.optional_fields, {})
- def test_parse_line_fails_on_bad_optional_fields(self):
- input_data = "GOOD\tfield1\tfield2\tfield3\tfield4"
- self.assertRaises(AssertionError,
- version_1.status_line.parse_line,
- input_data)
+ def test_parse_line_fails_on_bad_optional_fields(self):
+ input_data = "GOOD\tfield1\tfield2\tfield3\tfield4"
+ self.assertRaises(AssertionError,
+ version_1.status_line.parse_line,
+ input_data)
- def test_good_reboot_passes_success_test(self):
- line = version_1.status_line(0, "NOSTATUS", None, "reboot",
- "reboot success", {})
- self.assertEquals(line.is_successful_reboot("GOOD"), True)
- self.assertEquals(line.is_successful_reboot("WARN"), True)
+ def test_good_reboot_passes_success_test(self):
+ line = version_1.status_line(0, "NOSTATUS", None, "reboot",
+ "reboot success", {})
+ self.assertEquals(line.is_successful_reboot("GOOD"), True)
+ self.assertEquals(line.is_successful_reboot("WARN"), True)
- def test_bad_reboot_passes_success_test(self):
- line = version_1.status_line(0, "NOSTATUS", None, "reboot",
- "reboot success", {})
- self.assertEquals(line.is_successful_reboot("FAIL"), False)
- self.assertEquals(line.is_successful_reboot("ABORT"), False)
+ def test_bad_reboot_passes_success_test(self):
+ line = version_1.status_line(0, "NOSTATUS", None, "reboot",
+ "reboot success", {})
+ self.assertEquals(line.is_successful_reboot("FAIL"), False)
+ self.assertEquals(line.is_successful_reboot("ABORT"), False)
- def test_get_kernel_returns_kernel_plus_patches(self):
- line = version_1.status_line(0, "GOOD", "subdir", "testname",
- "reason text",
- {"kernel": "2.6.24-rc40",
- "patch0": "first_patch 0 0",
- "patch1": "another_patch 0 0"})
- kern = line.get_kernel()
- kernel_hash = md5.new("2.6.24-rc40,0,0").hexdigest()
- self.assertEquals(kern.base, "2.6.24-rc40")
- self.assertEquals(kern.patches[0].spec, "first_patch")
- self.assertEquals(kern.patches[1].spec, "another_patch")
- self.assertEquals(len(kern.patches), 2)
- self.assertEquals(kern.kernel_hash, kernel_hash)
+ def test_get_kernel_returns_kernel_plus_patches(self):
+ line = version_1.status_line(0, "GOOD", "subdir", "testname",
+ "reason text",
+ {"kernel": "2.6.24-rc40",
+ "patch0": "first_patch 0 0",
+ "patch1": "another_patch 0 0"})
+ kern = line.get_kernel()
+ kernel_hash = md5.new("2.6.24-rc40,0,0").hexdigest()
+ self.assertEquals(kern.base, "2.6.24-rc40")
+ self.assertEquals(kern.patches[0].spec, "first_patch")
+ self.assertEquals(kern.patches[1].spec, "another_patch")
+ self.assertEquals(len(kern.patches), 2)
+ self.assertEquals(kern.kernel_hash, kernel_hash)
- def test_get_kernel_ignores_out_of_sequence_patches(self):
- line = version_1.status_line(0, "GOOD", "subdir", "testname",
- "reason text",
- {"kernel": "2.6.24-rc40",
- "patch0": "first_patch 0 0",
- "patch2": "another_patch 0 0"})
- kern = line.get_kernel()
- kernel_hash = md5.new("2.6.24-rc40,0").hexdigest()
- self.assertEquals(kern.base, "2.6.24-rc40")
- self.assertEquals(kern.patches[0].spec, "first_patch")
- self.assertEquals(len(kern.patches), 1)
- self.assertEquals(kern.kernel_hash, kernel_hash)
+ def test_get_kernel_ignores_out_of_sequence_patches(self):
+ line = version_1.status_line(0, "GOOD", "subdir", "testname",
+ "reason text",
+ {"kernel": "2.6.24-rc40",
+ "patch0": "first_patch 0 0",
+ "patch2": "another_patch 0 0"})
+ kern = line.get_kernel()
+ kernel_hash = md5.new("2.6.24-rc40,0").hexdigest()
+ self.assertEquals(kern.base, "2.6.24-rc40")
+ self.assertEquals(kern.patches[0].spec, "first_patch")
+ self.assertEquals(len(kern.patches), 1)
+ self.assertEquals(kern.kernel_hash, kernel_hash)
- def test_get_kernel_returns_unknown_with_no_kernel(self):
- line = version_1.status_line(0, "GOOD", "subdir", "testname",
- "reason text",
- {"patch0": "first_patch 0 0",
- "patch2": "another_patch 0 0"})
- kern = line.get_kernel()
- self.assertEquals(kern.base, "UNKNOWN")
- self.assertEquals(kern.patches, [])
- self.assertEquals(kern.kernel_hash, "UNKNOWN")
+ def test_get_kernel_returns_unknown_with_no_kernel(self):
+ line = version_1.status_line(0, "GOOD", "subdir", "testname",
+ "reason text",
+ {"patch0": "first_patch 0 0",
+ "patch2": "another_patch 0 0"})
+ kern = line.get_kernel()
+ self.assertEquals(kern.base, "UNKNOWN")
+ self.assertEquals(kern.patches, [])
+ self.assertEquals(kern.kernel_hash, "UNKNOWN")
- def test_get_timestamp_returns_timestamp_field(self):
- timestamp = datetime.datetime(1970, 1, 1, 4, 30)
- timestamp -= datetime.timedelta(seconds=time.timezone)
- line = version_1.status_line(0, "GOOD", "subdir", "testname",
- "reason text",
- {"timestamp": "16200"})
- self.assertEquals(timestamp, line.get_timestamp())
+ def test_get_timestamp_returns_timestamp_field(self):
+ timestamp = datetime.datetime(1970, 1, 1, 4, 30)
+ timestamp -= datetime.timedelta(seconds=time.timezone)
+ line = version_1.status_line(0, "GOOD", "subdir", "testname",
+ "reason text",
+ {"timestamp": "16200"})
+ self.assertEquals(timestamp, line.get_timestamp())
- def test_get_timestamp_returns_none_on_missing_field(self):
- line = version_1.status_line(0, "GOOD", "subdir", "testname",
- "reason text", {})
- self.assertEquals(None, line.get_timestamp())
+ def test_get_timestamp_returns_none_on_missing_field(self):
+ line = version_1.status_line(0, "GOOD", "subdir", "testname",
+ "reason text", {})
+ self.assertEquals(None, line.get_timestamp())
if __name__ == "__main__":
- unittest.main()
+ unittest.main()
diff --git a/tko/plotgraph.py b/tko/plotgraph.py
index 8b12cbe..c7af2a3 100755
--- a/tko/plotgraph.py
+++ b/tko/plotgraph.py
@@ -11,97 +11,96 @@
Popen = subprocess.Popen
def avg_dev(values):
- if len(values) == 0:
- return (0,0)
- average = float(sum(values)) / len(values)
- sum_sq_dev = sum( [(x - average) ** 2 for x in values] )
- std_dev = sqrt(sum_sq_dev / float(len(values)));
- return (average, std_dev);
+ if len(values) == 0:
+ return (0,0)
+ average = float(sum(values)) / len(values)
+ sum_sq_dev = sum( [(x - average) ** 2 for x in values] )
+ std_dev = sqrt(sum_sq_dev / float(len(values)));
+ return (average, std_dev);
class gnuplot:
- def __init__(self, title, xlabel, ylabel, xsort = sorted, size = "1180,900", keytitle = None):
- self.title = title
- self.xlabel = xlabel
- self.ylabel = ylabel
- self.data_titles = []
- self.datasets = []
- self.xsort = xsort
- self.xvalues = set([])
- self.size = size
- self.keytitle = keytitle
+ def __init__(self, title, xlabel, ylabel, xsort = sorted, size = "1180,900", keytitle = None):
+ self.title = title
+ self.xlabel = xlabel
+ self.ylabel = ylabel
+ self.data_titles = []
+ self.datasets = []
+ self.xsort = xsort
+ self.xvalues = set([])
+ self.size = size
+ self.keytitle = keytitle
- def xtics(self):
- count = 1
- tics = []
- for label in self.xsort(self.xlabels):
- tics.append('"%s" %d' % (label, count))
- count += 1
- return tics
+ def xtics(self):
+ count = 1
+ tics = []
+ for label in self.xsort(self.xlabels):
+ tics.append('"%s" %d' % (label, count))
+ count += 1
+ return tics
- def add_dataset(self, title, labeled_values):
- """
- Add a data line
+ def add_dataset(self, title, labeled_values):
+ """
+ Add a data line
- title: title of the dataset
- labeled_values: dictionary of lists
- { label : [value1, value2, ... ] , ... }
- """
- if not labeled_values:
- raise "plotgraph:add_dataset - dataset was empty! %s" %\
- title
- self.data_titles.append(title)
- data_points = {}
- for label in labeled_values:
- point = "%s %s" % avg_dev(labeled_values[label])
- data_points[label] = point
- self.xvalues.add(label)
- self.datasets.append(data_points)
+ title: title of the dataset
+ labeled_values: dictionary of lists
+ { label : [value1, value2, ... ] , ... }
+ """
+ if not labeled_values:
+ raise "plotgraph:add_dataset - dataset was empty! %s" %\
+ title
+ self.data_titles.append(title)
+ data_points = {}
+ for label in labeled_values:
+ point = "%s %s" % avg_dev(labeled_values[label])
+ data_points[label] = point
+ self.xvalues.add(label)
+ self.datasets.append(data_points)
- def plot(self, cgi_header = False, output = None, test = None):
- if cgi_header:
- print "Content-type: image/png\n"
- sys.stdout.flush()
- if test:
- g = open(test, 'w')
- else:
- p = Popen("/usr/bin/gnuplot", stdin = subprocess.PIPE)
- g = p.stdin
- g.write('set terminal png size %s\n' % self.size)
- if self.keytitle:
- g.write('set key title "%s"\n' % self.keytitle)
- g.write('set key outside\n') # outside right
- else:
- g.write('set key below\n')
- g.write('set title "%s"\n' % self.title)
- g.write('set xlabel "%s"\n' % self.xlabel)
- g.write('set ylabel "%s"\n' % self.ylabel)
- if output:
- g.write('set output "%s"\n' % output)
- g.write('set style data yerrorlines\n')
- g.write('set grid\n')
+ def plot(self, cgi_header = False, output = None, test = None):
+ if cgi_header:
+ print "Content-type: image/png\n"
+ sys.stdout.flush()
+ if test:
+ g = open(test, 'w')
+ else:
+ p = Popen("/usr/bin/gnuplot", stdin = subprocess.PIPE)
+ g = p.stdin
+ g.write('set terminal png size %s\n' % self.size)
+ if self.keytitle:
+ g.write('set key title "%s"\n' % self.keytitle)
+ g.write('set key outside\n') # outside right
+ else:
+ g.write('set key below\n')
+ g.write('set title "%s"\n' % self.title)
+ g.write('set xlabel "%s"\n' % self.xlabel)
+ g.write('set ylabel "%s"\n' % self.ylabel)
+ if output:
+ g.write('set output "%s"\n' % output)
+ g.write('set style data yerrorlines\n')
+ g.write('set grid\n')
- self.xlabels = self.xsort(list(self.xvalues))
-
- g.write('set xrange [0.5:%f]\n' % (len(self.xvalues)+0.5))
- g.write('set xtics rotate (%s)\n' % ','.join(self.xtics()))
+ self.xlabels = self.xsort(list(self.xvalues))
- plot_lines = ['"-" title "%s"' % t for t in self.data_titles]
- g.write('plot ' + ', '.join(plot_lines) + '\n')
+ g.write('set xrange [0.5:%f]\n' % (len(self.xvalues)+0.5))
+ g.write('set xtics rotate (%s)\n' % ','.join(self.xtics()))
- for dataset in self.datasets:
- count = 1
- for label in self.xlabels:
- if label in dataset:
- data = dataset[label]
- g.write("%d %s\n" % (count, str(data)))
- count += 1
- sys.stdout.flush()
- g.write('e\n')
+ plot_lines = ['"-" title "%s"' % t for t in self.data_titles]
+ g.write('plot ' + ', '.join(plot_lines) + '\n')
- g.close()
- if not test:
- sts = os.waitpid(p.pid, 0)
+ for dataset in self.datasets:
+ count = 1
+ for label in self.xlabels:
+ if label in dataset:
+ data = dataset[label]
+ g.write("%d %s\n" % (count, str(data)))
+ count += 1
+ sys.stdout.flush()
+ g.write('e\n')
+ g.close()
+ if not test:
+ sts = os.waitpid(p.pid, 0)
diff --git a/tko/query_lib.py b/tko/query_lib.py
index 09b7635..35e7280 100644
--- a/tko/query_lib.py
+++ b/tko/query_lib.py
@@ -15,76 +15,75 @@
db = db.db()
def dprint(str):
- pass
- #print "! %s<br>" % str
+ pass
+ #print "! %s<br>" % str
def parse_scrub_and_gen_condition(condition, valid_field_dict):
- me = parse_scrub_and_gen_condition # shorten the name
- compare_ops = {'=':'=', '<>':'<>', '==':'=', '!=':'<>', '>':'>',
- '<':'<', '>=':'>=', '<=':'<=', '~':'LIKE', '#':'REGEXP'}
+ me = parse_scrub_and_gen_condition # shorten the name
+ compare_ops = {'=':'=', '<>':'<>', '==':'=', '!=':'<>', '>':'>',
+ '<':'<', '>=':'>=', '<=':'<=', '~':'LIKE', '#':'REGEXP'}
- # strip white space
- condition = condition.strip()
+ # strip white space
+ condition = condition.strip()
- # ()'s
- #match = re.match(r'^[(](.+)[)]$', condition)
- #if match:
- # dprint("Matched () on %s" % condition)
- # depth = 0
- # for c in match.group(1):
- # if c == '(': depth += 1
- # if c == ')': depth -= 1
- # if depth < 0: break
- # dprint("Depth is %d" % depth)
- # if depth == 0:
- # dprint("Match...stripping ()'s")
- # return me(match.group(1), valid_field_dict)
+ # ()'s
+ #match = re.match(r'^[(](.+)[)]$', condition)
+ #if match:
+ # dprint("Matched () on %s" % condition)
+ # depth = 0
+ # for c in match.group(1):
+ # if c == '(': depth += 1
+ # if c == ')': depth -= 1
+ # if depth < 0: break
+ # dprint("Depth is %d" % depth)
+ # if depth == 0:
+ # dprint("Match...stripping ()'s")
+ # return me(match.group(1), valid_field_dict)
- # OR
- match = re.match(r'^(.+)[|](.+)$', condition)
- if match:
- dprint("Matched | on %s" % condition)
- (a_sql, a_values) = me(match.group(1), valid_field_dict)
- (b_sql, b_values) = me(match.group(2), valid_field_dict)
- return (" (%s) OR (%s) " % (a_sql, b_sql),
- a_values + b_values)
+ # OR
+ match = re.match(r'^(.+)[|](.+)$', condition)
+ if match:
+ dprint("Matched | on %s" % condition)
+ (a_sql, a_values) = me(match.group(1), valid_field_dict)
+ (b_sql, b_values) = me(match.group(2), valid_field_dict)
+ return (" (%s) OR (%s) " % (a_sql, b_sql),
+ a_values + b_values)
- # AND
- match = re.match(r'^(.+)[&](.+)$', condition)
- if match:
- dprint("Matched & on %s" % condition)
- (a_sql, a_values) = me(match.group(1), valid_field_dict)
- (b_sql, b_values) = me(match.group(2), valid_field_dict)
- return (" (%s) AND (%s) " % (a_sql, b_sql),
- a_values + b_values)
+ # AND
+ match = re.match(r'^(.+)[&](.+)$', condition)
+ if match:
+ dprint("Matched & on %s" % condition)
+ (a_sql, a_values) = me(match.group(1), valid_field_dict)
+ (b_sql, b_values) = me(match.group(2), valid_field_dict)
+ return (" (%s) AND (%s) " % (a_sql, b_sql),
+ a_values + b_values)
- # NOT
- #match = re.match(r'^[!](.+)$', condition)
- #if match:
- # dprint("Matched ! on %s" % condition)
- # (sql, values) = me(match.group(1), valid_field_dict)
- # return (" NOT (%s) " % (sql,), values)
+ # NOT
+ #match = re.match(r'^[!](.+)$', condition)
+ #if match:
+ # dprint("Matched ! on %s" % condition)
+ # (sql, values) = me(match.group(1), valid_field_dict)
+ # return (" NOT (%s) " % (sql,), values)
- # '<field> <op> <value>' where value can be quoted
- # double quotes are escaped....i.e. '''' is the same as "'"
- regex = r'^(%s)[ \t]*(%s)[ \t]*' + \
- r'(\'((\'\'|[^\'])*)\'|"((""|[^"])*)"|([^\'"].*))$'
- regex = regex % ('|'.join(valid_field_dict.keys()),
- '|'.join(compare_ops.keys()))
- match = re.match(regex, condition)
- if match:
- field = valid_field_dict[match.group(1)]
- op = compare_ops[match.group(2)]
- if match.group(5):
- val = match.group(4).replace("''", "'")
- elif match.group(7):
- val = match.group(6).replace('""', '"')
- elif match.group(8):
- val = match.group(8)
- else:
- raise "Internal error"
- return ("%s %s %%s" % (field, op), [val])
-
+ # '<field> <op> <value>' where value can be quoted
+ # double quotes are escaped....i.e. '''' is the same as "'"
+ regex = r'^(%s)[ \t]*(%s)[ \t]*' + \
+ r'(\'((\'\'|[^\'])*)\'|"((""|[^"])*)"|([^\'"].*))$'
+ regex = regex % ('|'.join(valid_field_dict.keys()),
+ '|'.join(compare_ops.keys()))
+ match = re.match(regex, condition)
+ if match:
+ field = valid_field_dict[match.group(1)]
+ op = compare_ops[match.group(2)]
+ if match.group(5):
+ val = match.group(4).replace("''", "'")
+ elif match.group(7):
+ val = match.group(6).replace('""', '"')
+ elif match.group(8):
+ val = match.group(8)
+ else:
+ raise "Internal error"
+ return ("%s %s %%s" % (field, op), [val])
- raise "Could not parse '%s' (%s)" % (condition, regex)
+ raise "Could not parse '%s' (%s)" % (condition, regex)
diff --git a/tko/reason_qualifier.py b/tko/reason_qualifier.py
index 29e50d7..6347a4f 100755
--- a/tko/reason_qualifier.py
+++ b/tko/reason_qualifier.py
@@ -2,63 +2,60 @@
class reason_counter:
- def __init__(self, wording):
- self.wording = wording
- self.num = 1
-
- def update(self, new_wording):
- self.num += 1
- self.wording = new_wording
+ def __init__(self, wording):
+ self.wording = wording
+ self.num = 1
- def html(self):
- if self.num == 1:
- return self.wording
- else:
- return "%s (%d+)" % (self.wording, self.num)
+ def update(self, new_wording):
+ self.num += 1
+ self.wording = new_wording
+
+ def html(self):
+ if self.num == 1:
+ return self.wording
+ else:
+ return "%s (%d+)" % (self.wording, self.num)
def numbers_are_irrelevant(txt):
- ## ? when do we replace numbers with NN ?
- ## By default is always, but
- ## if/when some categories of reasons choose to keep their numbers,
- ## then the function shall return False for such categories
- return True
+ ## ? when do we replace numbers with NN ?
+ ## By default is always, but
+ ## if/when some categories of reasons choose to keep their numbers,
+ ## then the function shall return False for such categories
+ return True
def aggregate_reason_fields(reasons_list):
- # each reason in the list may be a combination
- # of | - separated reasons.
- # expand into list
- reasons_txt = '|'.join(reasons_list)
- reasons = reasons_txt.split('|')
- reason_htable = {}
- for reason in reasons:
- reason_reduced = reason.strip()
- ## reduce whitespaces
- reason_reduced = re.sub(r"\s+"," ", reason_reduced)
+ # each reason in the list may be a combination
+ # of | - separated reasons.
+ # expand into list
+ reasons_txt = '|'.join(reasons_list)
+ reasons = reasons_txt.split('|')
+ reason_htable = {}
+ for reason in reasons:
+ reason_reduced = reason.strip()
+ ## reduce whitespaces
+ reason_reduced = re.sub(r"\s+"," ", reason_reduced)
- if reason_reduced == '':
- continue # ignore empty reasons
+ if reason_reduced == '':
+ continue # ignore empty reasons
- if numbers_are_irrelevant(reason_reduced):
- # reduce numbers included into reason descriptor
- # by replacing them with generic NN
- reason_reduced = re.sub(r"\d+","NN", reason_reduced)
+ if numbers_are_irrelevant(reason_reduced):
+ # reduce numbers included into reason descriptor
+ # by replacing them with generic NN
+ reason_reduced = re.sub(r"\d+","NN", reason_reduced)
- if not reason_reduced in reason_htable:
- reason_htable[reason_reduced] = reason_counter(reason)
- else:
- ## reason_counter keeps original ( non reduced )
- ## reason if it occured once
- ## if reason occured more then once, reason_counter
- ## will keep it in reduced/generalized form
- reason_htable[reason_reduced].update(reason_reduced)
+ if not reason_reduced in reason_htable:
+ reason_htable[reason_reduced] = reason_counter(reason)
+ else:
+ ## reason_counter keeps original ( non reduced )
+ ## reason if it occured once
+ ## if reason occured more then once, reason_counter
+ ## will keep it in reduced/generalized form
+ reason_htable[reason_reduced].update(reason_reduced)
- generic_reasons = reason_htable.keys()
- generic_reasons.sort(key = (lambda k: reason_htable[k].num),
- reverse = True)
- return map(lambda generic_reason: reason_htable[generic_reason].html(),
- generic_reasons)
-
-
-
+ generic_reasons = reason_htable.keys()
+ generic_reasons.sort(key = (lambda k: reason_htable[k].num),
+ reverse = True)
+ return map(lambda generic_reason: reason_htable[generic_reason].html(),
+ generic_reasons)
diff --git a/tko/retrieve_jobs b/tko/retrieve_jobs
index 196a9d0..58985c6 100755
--- a/tko/retrieve_jobs
+++ b/tko/retrieve_jobs
@@ -1,11 +1,11 @@
#!/usr/bin/python
import sys, db
-try:
- arg = sys.argv[1]
+try:
+ arg = sys.argv[1]
except:
- arg = ''
+ arg = ''
db = db.db()
for record in db.select('* from jobs ' + arg):
- print record
+ print record
diff --git a/tko/status_lib.py b/tko/status_lib.py
index 4ddfa5a..ab5c644 100644
--- a/tko/status_lib.py
+++ b/tko/status_lib.py
@@ -4,67 +4,67 @@
class status_stack(object):
- statuses = logging.job_statuses
+ statuses = logging.job_statuses
- def __init__(self):
- self.status_stack = [self.statuses[-1]]
+ def __init__(self):
+ self.status_stack = [self.statuses[-1]]
- def current_status(self):
- return self.status_stack[-1]
+ def current_status(self):
+ return self.status_stack[-1]
- def update(self, new_status):
- if new_status not in self.statuses:
- return
- old = self.statuses.index(self.current_status())
- new = self.statuses.index(new_status)
- if new < old:
- self.status_stack[-1] = new_status
+ def update(self, new_status):
+ if new_status not in self.statuses:
+ return
+ old = self.statuses.index(self.current_status())
+ new = self.statuses.index(new_status)
+ if new < old:
+ self.status_stack[-1] = new_status
- def start(self):
- self.status_stack.append(self.statuses[-1])
+ def start(self):
+ self.status_stack.append(self.statuses[-1])
- def end(self):
- result = self.status_stack.pop()
- if len(self.status_stack) == 0:
- self.status_stack.append(self.statuses[-1])
- return result
+ def end(self):
+ result = self.status_stack.pop()
+ if len(self.status_stack) == 0:
+ self.status_stack.append(self.statuses[-1])
+ return result
- def size(self):
- return len(self.status_stack) - 1
+ def size(self):
+ return len(self.status_stack) - 1
class line_buffer(object):
- def __init__(self):
- self.buffer = collections.deque()
+ def __init__(self):
+ self.buffer = collections.deque()
- def get(self):
- return self.buffer.pop()
+ def get(self):
+ return self.buffer.pop()
- def put(self, line):
- self.buffer.appendleft(line)
+ def put(self, line):
+ self.buffer.appendleft(line)
- def put_multiple(self, lines):
- self.buffer.extendleft(lines)
+ def put_multiple(self, lines):
+ self.buffer.extendleft(lines)
- def put_back(self, line):
- self.buffer.append(line)
+ def put_back(self, line):
+ self.buffer.append(line)
- def size(self):
- return len(self.buffer)
+ def size(self):
+ return len(self.buffer)
def parser(version):
- library = "autotest_lib.tko.parsers.version_%d" % version
- module = __import__(library, globals(), locals(), ["parser"])
- return module.parser()
+ library = "autotest_lib.tko.parsers.version_%d" % version
+ module = __import__(library, globals(), locals(), ["parser"])
+ return module.parser()
diff --git a/tko/status_lib_unittest.py b/tko/status_lib_unittest.py
index e8c15f2..2378f97 100644
--- a/tko/status_lib_unittest.py
+++ b/tko/status_lib_unittest.py
@@ -7,168 +7,168 @@
class line_buffer_test(unittest.TestCase):
- def test_get_empty(self):
- buf = status_lib.line_buffer()
- self.assertRaises(IndexError, buf.get)
+ def test_get_empty(self):
+ buf = status_lib.line_buffer()
+ self.assertRaises(IndexError, buf.get)
- def test_get_single(self):
- buf = status_lib.line_buffer()
- buf.put("single line")
- self.assertEquals(buf.get(), "single line")
- self.assertRaises(IndexError, buf.get)
+ def test_get_single(self):
+ buf = status_lib.line_buffer()
+ buf.put("single line")
+ self.assertEquals(buf.get(), "single line")
+ self.assertRaises(IndexError, buf.get)
- def test_is_fifo(self):
- buf = status_lib.line_buffer()
- lines = ["line #%d" for x in xrange(10)]
- for line in lines:
- buf.put(line)
- results = []
- while buf.size():
- results.append(buf.get())
- self.assertEquals(lines, results)
+ def test_is_fifo(self):
+ buf = status_lib.line_buffer()
+ lines = ["line #%d" for x in xrange(10)]
+ for line in lines:
+ buf.put(line)
+ results = []
+ while buf.size():
+ results.append(buf.get())
+ self.assertEquals(lines, results)
- def test_put_multiple_same_as_multiple_puts(self):
- buf_put, buf_multi = [status_lib.line_buffer()
- for x in xrange(2)]
- lines = ["line #%d" % x for x in xrange(10)]
- for line in lines:
- buf_put.put(line)
- buf_multi.put_multiple(lines)
- counter = 0
- while buf_put.size():
- self.assertEquals(buf_put.size(), buf_multi.size())
- line = "line #%d" % counter
- self.assertEquals(buf_put.get(), line)
- self.assertEquals(buf_multi.get(), line)
- counter += 1
+ def test_put_multiple_same_as_multiple_puts(self):
+ buf_put, buf_multi = [status_lib.line_buffer()
+ for x in xrange(2)]
+ lines = ["line #%d" % x for x in xrange(10)]
+ for line in lines:
+ buf_put.put(line)
+ buf_multi.put_multiple(lines)
+ counter = 0
+ while buf_put.size():
+ self.assertEquals(buf_put.size(), buf_multi.size())
+ line = "line #%d" % counter
+ self.assertEquals(buf_put.get(), line)
+ self.assertEquals(buf_multi.get(), line)
+ counter += 1
- def test_put_back_is_lifo(self):
- buf = status_lib.line_buffer()
- lines = ["1", "2", "3"]
- for line in lines:
- buf.put(line)
- results = []
- results.append(buf.get())
- buf.put_back("1")
- buf.put_back("0")
- while buf.size():
- results.append(buf.get())
- self.assertEquals(results, ["1", "0", "1", "2", "3"])
+ def test_put_back_is_lifo(self):
+ buf = status_lib.line_buffer()
+ lines = ["1", "2", "3"]
+ for line in lines:
+ buf.put(line)
+ results = []
+ results.append(buf.get())
+ buf.put_back("1")
+ buf.put_back("0")
+ while buf.size():
+ results.append(buf.get())
+ self.assertEquals(results, ["1", "0", "1", "2", "3"])
- def test_size_increased_by_put(self):
- buf = status_lib.line_buffer()
- self.assertEquals(buf.size(), 0)
- buf.put("1")
- buf.put("2")
- self.assertEquals(buf.size(), 2)
- buf.put("3")
- self.assertEquals(buf.size(), 3)
+ def test_size_increased_by_put(self):
+ buf = status_lib.line_buffer()
+ self.assertEquals(buf.size(), 0)
+ buf.put("1")
+ buf.put("2")
+ self.assertEquals(buf.size(), 2)
+ buf.put("3")
+ self.assertEquals(buf.size(), 3)
- def test_size_increased_by_put(self):
- buf = status_lib.line_buffer()
- self.assertEquals(buf.size(), 0)
- buf.put("1")
- buf.put("2")
- self.assertEquals(buf.size(), 2)
- buf.put("3")
- self.assertEquals(buf.size(), 3)
+ def test_size_increased_by_put(self):
+ buf = status_lib.line_buffer()
+ self.assertEquals(buf.size(), 0)
+ buf.put("1")
+ buf.put("2")
+ self.assertEquals(buf.size(), 2)
+ buf.put("3")
+ self.assertEquals(buf.size(), 3)
- def test_size_decreased_by_get(self):
- buf = status_lib.line_buffer()
- buf.put("1")
- buf.put("2")
- buf.put("3")
- self.assertEquals(buf.size(), 3)
- buf.get()
- self.assertEquals(buf.size(), 2)
- buf.get()
- buf.get()
- self.assertEquals(buf.size(), 0)
+ def test_size_decreased_by_get(self):
+ buf = status_lib.line_buffer()
+ buf.put("1")
+ buf.put("2")
+ buf.put("3")
+ self.assertEquals(buf.size(), 3)
+ buf.get()
+ self.assertEquals(buf.size(), 2)
+ buf.get()
+ buf.get()
+ self.assertEquals(buf.size(), 0)
class status_stack_test(unittest.TestCase):
- statuses = logging.job_statuses
+ statuses = logging.job_statuses
- def test_default_to_nostatus(self):
- stack = status_lib.status_stack()
- self.assertEquals(stack.current_status(), "NOSTATUS")
+ def test_default_to_nostatus(self):
+ stack = status_lib.status_stack()
+ self.assertEquals(stack.current_status(), "NOSTATUS")
- def test_default_on_start_to_nostatus(self):
- stack = status_lib.status_stack()
- stack.update("FAIL")
- stack.start()
- self.assertEquals(stack.current_status(), "NOSTATUS")
+ def test_default_on_start_to_nostatus(self):
+ stack = status_lib.status_stack()
+ stack.update("FAIL")
+ stack.start()
+ self.assertEquals(stack.current_status(), "NOSTATUS")
- def test_size_always_at_least_zero(self):
- stack = status_lib.status_stack()
- self.assertEquals(stack.size(), 0)
- stack.start()
- stack.end()
- self.assertEquals(stack.size(), 0)
- stack.end()
- self.assertEquals(stack.size(), 0)
+ def test_size_always_at_least_zero(self):
+ stack = status_lib.status_stack()
+ self.assertEquals(stack.size(), 0)
+ stack.start()
+ stack.end()
+ self.assertEquals(stack.size(), 0)
+ stack.end()
+ self.assertEquals(stack.size(), 0)
- def test_anything_overrides_nostatus(self):
- for status in self.statuses:
- stack = status_lib.status_stack()
- stack.update(status)
- self.assertEquals(stack.current_status(), status)
+ def test_anything_overrides_nostatus(self):
+ for status in self.statuses:
+ stack = status_lib.status_stack()
+ stack.update(status)
+ self.assertEquals(stack.current_status(), status)
- def test_worse_overrides_better(self):
- for i in xrange(len(self.statuses)):
- worse_status = self.statuses[i]
- for j in xrange(i + 1, len(self.statuses)):
- stack = status_lib.status_stack()
- better_status = self.statuses[j]
- stack.update(better_status)
- stack.update(worse_status)
- self.assertEquals(stack.current_status(),
- worse_status)
+ def test_worse_overrides_better(self):
+ for i in xrange(len(self.statuses)):
+ worse_status = self.statuses[i]
+ for j in xrange(i + 1, len(self.statuses)):
+ stack = status_lib.status_stack()
+ better_status = self.statuses[j]
+ stack.update(better_status)
+ stack.update(worse_status)
+ self.assertEquals(stack.current_status(),
+ worse_status)
- def test_better_never_overrides_better(self):
- for i in xrange(len(self.statuses)):
- better_status = self.statuses[i]
- for j in xrange(i):
- stack = status_lib.status_stack()
- worse_status = self.statuses[j]
- stack.update(worse_status)
- stack.update(better_status)
- self.assertEquals(stack.current_status(),
- worse_status)
+ def test_better_never_overrides_better(self):
+ for i in xrange(len(self.statuses)):
+ better_status = self.statuses[i]
+ for j in xrange(i):
+ stack = status_lib.status_stack()
+ worse_status = self.statuses[j]
+ stack.update(worse_status)
+ stack.update(better_status)
+ self.assertEquals(stack.current_status(),
+ worse_status)
- def test_stack_is_lifo(self):
- stack = status_lib.status_stack()
- stack.update("GOOD")
- stack.start()
- stack.update("FAIL")
- stack.start()
- stack.update("WARN")
- self.assertEquals(stack.end(), "WARN")
- self.assertEquals(stack.end(), "FAIL")
- self.assertEquals(stack.end(), "GOOD")
- self.assertEquals(stack.end(), "NOSTATUS")
+ def test_stack_is_lifo(self):
+ stack = status_lib.status_stack()
+ stack.update("GOOD")
+ stack.start()
+ stack.update("FAIL")
+ stack.start()
+ stack.update("WARN")
+ self.assertEquals(stack.end(), "WARN")
+ self.assertEquals(stack.end(), "FAIL")
+ self.assertEquals(stack.end(), "GOOD")
+ self.assertEquals(stack.end(), "NOSTATUS")
class parser_test(unittest.TestCase):
- available_versions = [0, 1]
- def test_can_import_available_versions(self):
- for version in self.available_versions:
- p = status_lib.parser(0)
- self.assertNotEqual(p, None)
+ available_versions = [0, 1]
+ def test_can_import_available_versions(self):
+ for version in self.available_versions:
+ p = status_lib.parser(0)
+ self.assertNotEqual(p, None)
if __name__ == "__main__":
- unittest.main()
+ unittest.main()
diff --git a/tko/unique_cookie.py b/tko/unique_cookie.py
index 6aed57d..292d12a 100644
--- a/tko/unique_cookie.py
+++ b/tko/unique_cookie.py
@@ -4,32 +4,29 @@
def unique_id(cookie_key):
- """
- Find out if remote caller has cookie set on the key.
- If not, set cookie on client side: evaluate this key by a random string.
- ( unique user identifier )
- In both scenarios return value of the cookie, be it old or newly set one
- """
- uid = ''
- ## try to retrieve uid from Cookie
- if 'HTTP_COOKIE' in os.environ:
- ## parse os.environ['HTTP_COOKIE']
- cookies = os.environ['HTTP_COOKIE'].split(';')
- key = '%s=' % cookie_key
- uid_cookies = [c for c in cookies if c.strip().startswith(key)]
+ """
+ Find out if remote caller has cookie set on the key.
+ If not, set cookie on client side: evaluate this key by a random string.
+ ( unique user identifier )
+ In both scenarios return value of the cookie, be it old or newly set one
+ """
+ uid = ''
+ ## try to retrieve uid from Cookie
+ if 'HTTP_COOKIE' in os.environ:
+ ## parse os.environ['HTTP_COOKIE']
+ cookies = os.environ['HTTP_COOKIE'].split(';')
+ key = '%s=' % cookie_key
+ uid_cookies = [c for c in cookies if c.strip().startswith(key)]
- if uid_cookies:
- assert(len(uid_cookies) == 1)
- uid_cookie = uid_cookies[0]
- uid = uid_cookie.replace(key, '')
+ if uid_cookies:
+ assert(len(uid_cookies) == 1)
+ uid_cookie = uid_cookies[0]
+ uid = uid_cookie.replace(key, '')
- if not uid:
- uid = str(random.random())[2:16] # random string of 14 digits
- set_cookie_statement = 'Set-Cookie:%s=%s;' % (cookie_key, uid)
- set_cookie_statement += 'expires=Thu, 26-Dec-2013 22:03:25 GMT;'
- print set_cookie_statement
+ if not uid:
+ uid = str(random.random())[2:16] # random string of 14 digits
+ set_cookie_statement = 'Set-Cookie:%s=%s;' % (cookie_key, uid)
+ set_cookie_statement += 'expires=Thu, 26-Dec-2013 22:03:25 GMT;'
+ print set_cookie_statement
- return uid
-
-
-
+ return uid
diff --git a/tko/utils.py b/tko/utils.py
index 42815b9..82d9216 100644
--- a/tko/utils.py
+++ b/tko/utils.py
@@ -3,16 +3,16 @@
_debug_logger = sys.stderr
def dprint(msg):
- print >> _debug_logger, msg
+ print >> _debug_logger, msg
def redirect_parser_debugging(ostream):
- global _debug_logger
- _debug_logger = ostream
+ global _debug_logger
+ _debug_logger = ostream
def get_timestamp(mapping, field):
- val = mapping.get(field, None)
- if val is not None:
- val = datetime.datetime.fromtimestamp(int(val))
- return val
+ val = mapping.get(field, None)
+ if val is not None:
+ val = datetime.datetime.fromtimestamp(int(val))
+ return val
diff --git a/tko/utils_unittest.py b/tko/utils_unittest.py
index 9368f95..8e9d8e6 100644
--- a/tko/utils_unittest.py
+++ b/tko/utils_unittest.py
@@ -7,38 +7,38 @@
class get_timestamp_test(unittest.TestCase):
- def testZeroTime(self):
- date = utils.get_timestamp({"key": "0"}, "key")
- timezone = datetime.timedelta(seconds=time.timezone)
- utc_date = date + timezone
- # should be equal to epoch, i.e. Jan 1, 1970
- self.assertEquals(utc_date.year, 1970)
- self.assertEquals(utc_date.month, 1)
- self.assertEquals(utc_date.day, 1)
- self.assertEquals(utc_date.hour, 0)
- self.assertEquals(utc_date.minute, 0)
- self.assertEquals(utc_date.second, 0)
- self.assertEquals(utc_date.microsecond, 0)
+ def testZeroTime(self):
+ date = utils.get_timestamp({"key": "0"}, "key")
+ timezone = datetime.timedelta(seconds=time.timezone)
+ utc_date = date + timezone
+ # should be equal to epoch, i.e. Jan 1, 1970
+ self.assertEquals(utc_date.year, 1970)
+ self.assertEquals(utc_date.month, 1)
+ self.assertEquals(utc_date.day, 1)
+ self.assertEquals(utc_date.hour, 0)
+ self.assertEquals(utc_date.minute, 0)
+ self.assertEquals(utc_date.second, 0)
+ self.assertEquals(utc_date.microsecond, 0)
- def test_returns_none_on_missing_value(self):
- date = utils.get_timestamp({}, "missing_key")
- self.assertEquals(date, None)
+ def test_returns_none_on_missing_value(self):
+ date = utils.get_timestamp({}, "missing_key")
+ self.assertEquals(date, None)
- def test_fails_on_non_integer_values(self):
- self.assertRaises(ValueError, utils.get_timestamp,
- {"key": "zero"}, "key")
+ def test_fails_on_non_integer_values(self):
+ self.assertRaises(ValueError, utils.get_timestamp,
+ {"key": "zero"}, "key")
- def test_date_can_be_string_or_integer(self):
- int_times = [1, 12, 123, 1234, 12345, 123456]
- str_times = [str(t) for t in int_times]
- for int_t, str_t in itertools.izip(int_times, str_times):
- date_int = utils.get_timestamp({"key": int_t}, "key")
- date_str = utils.get_timestamp({"key": str_t}, "key")
- self.assertEquals(date_int, date_str)
+ def test_date_can_be_string_or_integer(self):
+ int_times = [1, 12, 123, 1234, 12345, 123456]
+ str_times = [str(t) for t in int_times]
+ for int_t, str_t in itertools.izip(int_times, str_times):
+ date_int = utils.get_timestamp({"key": int_t}, "key")
+ date_str = utils.get_timestamp({"key": str_t}, "key")
+ self.assertEquals(date_int, date_str)
if __name__ == "__main__":
- unittest.main()
+ unittest.main()
diff --git a/tko/vertical_text.py b/tko/vertical_text.py
index 4173411..371de1c 100755
--- a/tko/vertical_text.py
+++ b/tko/vertical_text.py
@@ -39,4 +39,3 @@
f.close()
simple()
-