Add job option for whether or not to parse failed repair results as part of a job, with a default value in global_config. Since the number of options associated with a job is getting out of hand, I packaged them up into a dict in the RPC entry point and passed them around that way from then on.
Signed-off-by: Steve Howard <[email protected]>
git-svn-id: http://test.kernel.org/svn/autotest/trunk@3110 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/cli/job.py b/cli/job.py
index ca6aaf5..d0ef9b7 100755
--- a/cli/job.py
+++ b/cli/job.py
@@ -194,7 +194,8 @@
else:
keys = ['id', 'name', 'priority', 'status_counts', 'hosts_status',
'owner', 'control_type', 'synch_count', 'created_on',
- 'run_verify', 'reboot_before', 'reboot_after']
+ 'run_verify', 'reboot_before', 'reboot_after',
+ 'parse_failed_repair']
if self.show_control_file:
keys.append('control_file')
@@ -211,7 +212,7 @@
[--reboot_before <option>] [--reboot_after <option>]
[--noverify] [--timeout <timeout>] [--one-time-hosts <hosts>]
[--email <email>] [--dependencies <labels this job is dependent on>]
- [--atomic_group <atomic group name>]
+ [--atomic_group <atomic group name>] [--parse-failed-repair <option>]
job_name
Creating a job is rather different from the other create operations,
@@ -271,6 +272,11 @@
type='choice',
choices=('never', 'if all tests passed',
'always'))
+ self.parser.add_option('--parse-failed-repair',
+ help='Whether or not to parse failed repair '
+ 'results as part of the job',
+ type='choice',
+ choices=('true', 'false'))
self.parser.add_option('-l', '--clone', help='Clone an existing job. '
'This will discard all other options except '
'--reuse-hosts.', default=False,
@@ -362,6 +368,9 @@
self.data['reboot_before'] = options.reboot_before.capitalize()
if options.reboot_after:
self.data['reboot_after'] = options.reboot_after.capitalize()
+ if options.parse_failed_repair:
+ self.data['parse_failed_repair'] = (
+ options.parse_failed_repair == 'true')
if options.noverify:
self.data['run_verify'] = False
if options.timeout:
diff --git a/cli/topic_common.py b/cli/topic_common.py
index bf47ce3..6d2129a 100755
--- a/cli/topic_common.py
+++ b/cli/topic_common.py
@@ -99,6 +99,7 @@
'experimental': 'Experimental',
'synch_count': 'Sync Count',
'max_number_of_machines': 'Max. hosts to use',
+ 'parse_failed_repair': 'Include failed repair results',
}
# In the failure, tag that will replace the item.
diff --git a/frontend/afe/doctests/001_rpc_test.txt b/frontend/afe/doctests/001_rpc_test.txt
index 08c1c6f..92959a8 100644
--- a/frontend/afe/doctests/001_rpc_test.txt
+++ b/frontend/afe/doctests/001_rpc_test.txt
@@ -473,7 +473,8 @@
... 'run_verify': 1,
... 'email_list': '',
... 'reboot_before': 'If dirty',
-... 'reboot_after': 'Always'}
+... 'reboot_after': 'Always',
+... 'parse_failed_repair': True}
True
# get_host_queue_entries returns a lot of data, so let's only check a couple
diff --git a/frontend/afe/model_logic.py b/frontend/afe/model_logic.py
index 5610e7a..10cf1a7 100644
--- a/frontend/afe/model_logic.py
+++ b/frontend/afe/model_logic.py
@@ -234,11 +234,11 @@
return cursor.fetchall()
- def _is_relation_to(self, field, model_class):
- return field.rel and field.rel.to is model_class
+ def _is_relation_to(self, field, ModelClass):
+ return field.rel and field.rel.to is ModelClass
- def _determine_pivot_table(self, related_model):
+ def _determine_pivot_table(self, RelatedModel):
"""
Determine the pivot table for this relationship and return a tuple
(pivot_table, pivot_from_field, pivot_to_field). See
@@ -246,14 +246,14 @@
Note -- this depends on Django model internals and will likely need to
be updated when we move to Django 1.x.
"""
- # look for a field on related_model relating to this model
- for field in related_model._meta.fields:
+ # look for a field on RelatedModel relating to this model
+ for field in RelatedModel._meta.fields:
if self._is_relation_to(field, self.model):
# many-to-one -- the related table itself is the pivot table
- return (related_model._meta.db_table, field.column,
- related_model.objects.get_key_on_this_table())
+ return (RelatedModel._meta.db_table, field.column,
+ RelatedModel.objects.get_key_on_this_table())
- for field in related_model._meta.many_to_many:
+ for field in RelatedModel._meta.many_to_many:
if self._is_relation_to(field, self.model):
# many-to-many
return (field.m2m_db_table(), field.m2m_reverse_name(),
@@ -261,12 +261,12 @@
# maybe this model has the many-to-many field
for field in self.model._meta.many_to_many:
- if self._is_relation_to(field, related_model):
+ if self._is_relation_to(field, RelatedModel):
return (field.m2m_db_table(), field.m2m_column_name(),
field.m2m_reverse_name())
raise ValueError('%s has no relation to %s' %
- (related_model, self.model))
+ (RelatedModel, self.model))
def _query_pivot_table(self, id_list, pivot_table, pivot_from_field,
@@ -298,11 +298,11 @@
return related_ids
- def populate_relationships(self, model_objects, related_model,
+ def populate_relationships(self, model_objects, RelatedModel,
related_list_name):
"""
- For each instance in model_objects, add a field named related_list_name
- listing all the related objects of type related_model. related_model
+ For each instance in query_set, add a field named related_list_name
+ listing all the related objects of type RelatedModel. RelatedModel
must be in a many-to-one or many-to-many relationship with this model.
"""
if not model_objects:
@@ -310,12 +310,12 @@
return
id_list = (item._get_pk_val() for item in model_objects)
pivot_table, pivot_from_field, pivot_to_field = (
- self._determine_pivot_table(related_model))
+ self._determine_pivot_table(RelatedModel))
related_ids = self._query_pivot_table(id_list, pivot_table,
pivot_from_field, pivot_to_field)
all_related_ids = list(set(itertools.chain(*related_ids.itervalues())))
- related_objects_by_id = related_model.objects.in_bulk(all_related_ids)
+ related_objects_by_id = RelatedModel.objects.in_bulk(all_related_ids)
for item in model_objects:
related_ids_for_item = related_ids.get(item._get_pk_val(), [])
diff --git a/frontend/afe/models.py b/frontend/afe/models.py
index a4373f8..e832c33 100644
--- a/frontend/afe/models.py
+++ b/frontend/afe/models.py
@@ -633,9 +633,14 @@
job dependencies
reboot_before: Never, If dirty, or Always
reboot_after: Never, If all tests passed, or Always
+ parse_failed_repair: if True, a failed repair launched by this job will have
+ its results parsed as part of the job.
"""
DEFAULT_TIMEOUT = global_config.global_config.get_config_value(
'AUTOTEST_WEB', 'job_timeout_default', default=240)
+ DEFAULT_PARSE_FAILED_REPAIR = global_config.global_config.get_config_value(
+ 'AUTOTEST_WEB', 'parse_failed_repair_default', type=bool,
+ default=False)
Priority = enum.Enum('Low', 'Medium', 'High', 'Urgent')
ControlType = enum.Enum('Server', 'Client', start_value=1)
@@ -662,6 +667,8 @@
reboot_after = dbmodels.SmallIntegerField(choices=RebootAfter.choices(),
blank=True,
default=DEFAULT_REBOOT_AFTER)
+ parse_failed_repair = dbmodels.BooleanField(
+ default=DEFAULT_PARSE_FAILED_REPAIR)
# custom manager
@@ -673,23 +680,28 @@
@classmethod
- def create(cls, owner, name, priority, control_file, control_type,
- hosts, synch_count, timeout, run_verify, email_list,
- dependencies, reboot_before, reboot_after):
+ def create(cls, owner, options, hosts):
"""\
Creates a job by taking some information (the listed args)
and filling in the rest of the necessary information.
"""
AclGroup.check_for_acl_violation_hosts(hosts)
job = cls.add_object(
- owner=owner, name=name, priority=priority,
- control_file=control_file, control_type=control_type,
- synch_count=synch_count, timeout=timeout,
- run_verify=run_verify, email_list=email_list,
- reboot_before=reboot_before, reboot_after=reboot_after,
+ owner=owner,
+ name=options['name'],
+ priority=options['priority'],
+ control_file=options['control_file'],
+ control_type=options['control_type'],
+ synch_count=options.get('synch_count'),
+ timeout=options.get('timeout'),
+ run_verify=options.get('run_verify'),
+ email_list=options.get('email_list'),
+ reboot_before=options.get('reboot_before'),
+ reboot_after=options.get('reboot_after'),
+ parse_failed_repair=options.get('parse_failed_repair'),
created_on=datetime.now())
- job.dependency_labels = dependencies
+ job.dependency_labels = options['dependencies']
return job
@@ -819,7 +831,7 @@
def on_attribute_changed(self, attribute, old_value):
assert attribute == 'status'
logger.info('%s/%d (%d) -> %s' % (self.host, self.job.id, self.id,
- self.status))
+ self.status))
def is_meta_host_entry(self):
diff --git a/frontend/afe/rpc_interface.py b/frontend/afe/rpc_interface.py
index 36cea00..09f88ed 100644
--- a/frontend/afe/rpc_interface.py
+++ b/frontend/afe/rpc_interface.py
@@ -353,37 +353,36 @@
timeout=None, synch_count=None, hosts=(), meta_hosts=(),
run_verify=True, one_time_hosts=(), email_list='',
dependencies=(), reboot_before=None, reboot_after=None,
- atomic_group_name=None):
+ parse_failed_repair=None, atomic_group_name=None):
"""\
Create and enqueue a job.
- priority: Low, Medium, High, Urgent
- control_file: String contents of the control file.
- control_type: Type of control file, Client or Server.
- is_template: If true then create a template job.
- timeout: Hours after this call returns until the job times out.
- synch_count: How many machines the job uses per autoserv execution.
- synch_count == 1 means the job is asynchronous. If an
- atomic group is given this value is treated as a minimum.
- hosts: List of hosts to run job on.
- meta_hosts: List where each entry is a label name, and for each entry
- one host will be chosen from that label to run the job
- on.
- run_verify: Should the host be verified before running the test?
- one_time_hosts: List of hosts not in the database to run the job on.
- email_list: String containing emails to mail when the job is done
- dependencies: List of label names on which this job depends
- reboot_before: Never, If dirty, or Always
- reboot_after: Never, If all tests passed, or Always
- atomic_group_name: The name of an atomic group to schedule the job on.
+ @param name name of this job
+ @param priority Low, Medium, High, Urgent
+ @param control_file String contents of the control file.
+ @param control_type Type of control file, Client or Server.
+ @param synch_count How many machines the job uses per autoserv execution.
+ synch_count == 1 means the job is asynchronous. If an atomic group is
+ given this value is treated as a minimum.
+ @param is_template If true then create a template job.
+ @param timeout Hours after this call returns until the job times out.
+ @param run_verify Should the host be verified before running the test?
+ @param email_list String containing emails to mail when the job is done
+ @param dependencies List of label names on which this job depends
+ @param reboot_before Never, If dirty, or Always
+ @param reboot_after Never, If all tests passed, or Always
+ @param parse_failed_repair if true, results of failed repairs launched by
+ this job will be parsed as part of the job.
+
+ @param hosts List of hosts to run job on.
+ @param meta_hosts List where each entry is a label name, and for each entry
+ one host will be chosen from that label to run the job on.
+ @param one_time_hosts List of hosts not in the database to run the job on.
+ @param atomic_group_name The name of an atomic group to schedule the job on.
+
@returns The created Job id number.
"""
-
- if timeout is None:
- timeout=global_config.global_config.get_config_value(
- 'AUTOTEST_WEB', 'job_timeout_default')
-
owner = thread_local.get_user().login
# input validation
if not (hosts or meta_hosts or one_time_hosts or atomic_group_name):
@@ -425,21 +424,23 @@
this_host = models.Host.create_one_time_host(host)
host_objects.append(this_host)
+ options = dict(name=name,
+ priority=priority,
+ control_file=control_file,
+ control_type=control_type,
+ is_template=is_template,
+ timeout=timeout,
+ synch_count=synch_count,
+ run_verify=run_verify,
+ email_list=email_list,
+ dependencies=dependencies,
+ reboot_before=reboot_before,
+ reboot_after=reboot_after,
+ parse_failed_repair=parse_failed_repair)
return rpc_utils.create_new_job(owner=owner,
+ options=options,
host_objects=host_objects,
metahost_objects=metahost_objects,
- name=name,
- priority=priority,
- control_file=control_file,
- control_type=control_type,
- is_template=is_template,
- synch_count=synch_count,
- timeout=timeout,
- run_verify=run_verify,
- email_list=email_list,
- dependencies=dependencies,
- reboot_before=reboot_before,
- reboot_after=reboot_after,
atomic_group=atomic_group)
@@ -639,6 +640,8 @@
host_statuses: Sorted list of possible Host statuses.
job_statuses: Sorted list of possible HostQueueEntry statuses.
job_timeout_default: The default job timeout length in hours.
+ parse_failed_repair_default: Default value for the parse_failed_repair job
+ option.
reboot_before_options: A list of valid RebootBefore string enums.
reboot_after_options: A list of valid RebootAfter string enums.
motd: Server's message of the day.
@@ -663,6 +666,8 @@
result['host_statuses'] = sorted(models.Host.Status.names)
result['job_statuses'] = sorted(models.HostQueueEntry.Status.names)
result['job_timeout_default'] = models.Job.DEFAULT_TIMEOUT
+ result['parse_failed_repair_default'] = bool(
+ models.Job.DEFAULT_PARSE_FAILED_REPAIR)
result['reboot_before_options'] = models.RebootBefore.names
result['reboot_after_options'] = models.RebootAfter.names
result['motd'] = rpc_utils.get_motd()
@@ -679,7 +684,7 @@
"Parsing": "Awaiting parse of final results",
"Gathering": "Gathering log files",
"Template": "Template job for recurring run"}
-
+
return result
diff --git a/frontend/afe/rpc_utils.py b/frontend/afe/rpc_utils.py
index e04d372..e6af277 100644
--- a/frontend/afe/rpc_utils.py
+++ b/frontend/afe/rpc_utils.py
@@ -372,15 +372,14 @@
return info
-def create_new_job(owner, host_objects, metahost_objects,
- name, priority, control_file, control_type,
- is_template=False, timeout=None, synch_count=None,
- run_verify=True, email_list='', dependencies=[],
- reboot_before=None, reboot_after=None, atomic_group=None):
+def create_new_job(owner, options, host_objects, metahost_objects,
+ atomic_group=None):
labels_by_name = dict((label.name, label)
- for label in models.Label.objects.all())
+ for label in models.Label.objects.all())
all_host_objects = host_objects + metahost_objects
metahost_counts = _get_metahost_counts(metahost_objects)
+ dependencies = options.get('dependencies', [])
+ synch_count = options.get('synch_count')
# check that each metahost request has enough hosts under the label
for label, requested_count in metahost_counts.iteritems():
@@ -413,10 +412,10 @@
check_job_dependencies(host_objects, dependencies)
- dependency_labels = [labels_by_name[label_name]
- for label_name in dependencies]
+ options['dependencies'] = [labels_by_name[label_name]
+ for label_name in dependencies]
- for label in metahost_objects + dependency_labels:
+ for label in metahost_objects + options['dependencies']:
if label.atomic_group and not atomic_group:
raise model_logic.ValidationError(
{'atomic_group_name':
@@ -429,19 +428,10 @@
'Some meta_hosts or dependencies require an atomic group '
'other than the one requested for this job.'})
- job = models.Job.create(owner=owner, name=name, priority=priority,
- control_file=control_file,
- control_type=control_type,
- synch_count=synch_count,
- hosts=all_host_objects,
- timeout=timeout,
- run_verify=run_verify,
- email_list=email_list.strip(),
- dependencies=dependency_labels,
- reboot_before=reboot_before,
- reboot_after=reboot_after)
+ job = models.Job.create(owner=owner, options=options,
+ hosts=all_host_objects)
job.queue(all_host_objects, atomic_group=atomic_group,
- is_template=is_template)
+ is_template=options.get('is_template', False))
return job.id
diff --git a/frontend/client/src/autotest/afe/CreateJobView.java b/frontend/client/src/autotest/afe/CreateJobView.java
index 23b0b9b..521a920 100644
--- a/frontend/client/src/autotest/afe/CreateJobView.java
+++ b/frontend/client/src/autotest/afe/CreateJobView.java
@@ -162,6 +162,7 @@
protected CheckBox skipVerify = new CheckBox();
private RadioChooser rebootBefore = new RadioChooser();
private RadioChooser rebootAfter = new RadioChooser();
+ private CheckBox parseFailedRepair = new CheckBox();
protected TestSelector testSelector;
protected CheckBoxPanel<CheckBox> profilersPanel =
new CheckBoxPanel<CheckBox>(TEST_COLUMNS);
@@ -218,6 +219,8 @@
skipVerify.setChecked(!jobObject.get("run_verify").isBoolean().booleanValue());
rebootBefore.setSelectedChoice(Utils.jsonToString(jobObject.get("reboot_before")));
rebootAfter.setSelectedChoice(Utils.jsonToString(jobObject.get("reboot_after")));
+ parseFailedRepair.setChecked(
+ jobObject.get("parse_failed_repair").isBoolean().booleanValue());
controlTypeSelect.setControlType(
jobObject.get("control_type").isString().stringValue());
@@ -528,6 +531,7 @@
RootPanel.get("create_skip_verify").add(skipVerify);
RootPanel.get("create_reboot_before").add(rebootBefore);
RootPanel.get("create_reboot_after").add(rebootAfter);
+ RootPanel.get("create_parse_failed_repair").add(parseFailedRepair);
RootPanel.get("create_tests").add(testSelector);
RootPanel.get("create_profilers").add(profilersPanel);
RootPanel.get("create_edit_control").add(controlFilePanel);
@@ -539,13 +543,16 @@
}
public void reset() {
+ StaticDataRepository repository = StaticDataRepository.getRepository();
+
jobName.setText("");
resetPriorityToDefault();
rebootBefore.reset();
rebootAfter.reset();
- kernel.setText("");
- timeout.setText(StaticDataRepository.getRepository().
- getData("job_timeout_default").isString().stringValue());
+ parseFailedRepair.setChecked(
+ repository.getData("parse_failed_repair_default").isBoolean().booleanValue());
+ kernel.setText("");
+ timeout.setText(repository.getData("job_timeout_default").isString().stringValue());
emailList.setText("");
testSelector.reset();
skipVerify.setChecked(false);
@@ -581,8 +588,7 @@
public void doCallback(Object source) {
JSONObject args = new JSONObject();
args.put("name", new JSONString(jobName.getText()));
- String priority = priorityList.getItemText(
- priorityList.getSelectedIndex());
+ String priority = priorityList.getItemText(priorityList.getSelectedIndex());
args.put("priority", new JSONString(priority));
args.put("control_file", new JSONString(controlFile.getText()));
args.put("control_type",
@@ -592,15 +598,18 @@
args.put("email_list", new JSONString(emailList.getText()));
args.put("run_verify", JSONBoolean.getInstance(!skipVerify.isChecked()));
args.put("is_template", JSONBoolean.getInstance(isTemplate));
+ args.put("dependencies", getSelectedDependencies());
args.put("reboot_before", new JSONString(rebootBefore.getSelectedChoice()));
args.put("reboot_after", new JSONString(rebootAfter.getSelectedChoice()));
+ args.put("parse_failed_repair",
+ JSONBoolean.getInstance(parseFailedRepair.isChecked()));
+
HostSelector.HostSelection hosts = hostSelector.getSelectedHosts();
args.put("hosts", Utils.stringsToJSON(hosts.hosts));
args.put("meta_hosts", Utils.stringsToJSON(hosts.metaHosts));
args.put("one_time_hosts",
Utils.stringsToJSON(hosts.oneTimeHosts));
- args.put("dependencies", getSelectedDependencies());
-
+
rpcProxy.rpcCall("create_job", args, new JsonRpcCallback() {
@Override
public void onSuccess(JSONValue result) {
diff --git a/frontend/client/src/autotest/afe/JobDetailView.java b/frontend/client/src/autotest/afe/JobDetailView.java
index 760317e..d001d47 100644
--- a/frontend/client/src/autotest/afe/JobDetailView.java
+++ b/frontend/client/src/autotest/afe/JobDetailView.java
@@ -105,6 +105,7 @@
showText(runVerify, "view_run_verify");
showField(jobObject, "reboot_before", "view_reboot_before");
showField(jobObject, "reboot_after", "view_reboot_after");
+ showField(jobObject, "parse_failed_repair", "view_parse_failed_repair");
showField(jobObject, "synch_count", "view_synch_count");
showField(jobObject, "control_type", "view_control_type");
showField(jobObject, "control_file", "view_control_file");
diff --git a/frontend/client/src/autotest/public/AfeClient.html b/frontend/client/src/autotest/public/AfeClient.html
index 1476373..4d2756c 100644
--- a/frontend/client/src/autotest/public/AfeClient.html
+++ b/frontend/client/src/autotest/public/AfeClient.html
@@ -69,6 +69,8 @@
<span class="field-name">Reboot options:</span>
<span id="view_reboot_before"></span> before job,
<span id="view_reboot_after"></span> after job<br>
+ <span class="field-name">Include failed repair results:</span>
+ <span id="view_parse_failed_repair"></span><br>
<span class="field-name">Dependencies:</span>
<span id="view_dependencies"></span><br>
<span class="field-name">Synchronization count:</span>
@@ -117,6 +119,8 @@
<td id="create_reboot_before"></td><td></td></tr>
<tr><td class="field-name">Reboot after:</td>
<td id="create_reboot_after"></td><td></td></tr>
+ <tr><td class="field-name">Include failed repair results:</td>
+ <td id="create_parse_failed_repair"></td><td></td></tr>
<tr><td class="field-name">Tests:</td>
<td id="create_tests" colspan="2"></td></tr>
<tr><td class="field-name">Profilers:</td>
diff --git a/frontend/migrations/034_add_parse_failed_repair_option.py b/frontend/migrations/034_add_parse_failed_repair_option.py
new file mode 100644
index 0000000..94019c5
--- /dev/null
+++ b/frontend/migrations/034_add_parse_failed_repair_option.py
@@ -0,0 +1,7 @@
+UP_SQL = """
+ALTER TABLE jobs ADD COLUMN parse_failed_repair bool NOT NULL DEFAULT TRUE;
+"""
+
+DOWN_SQL = """
+ALTER TABLE jobs DROP COLUMN parse_failed_repair;
+"""
diff --git a/global_config.ini b/global_config.ini
index 968020a..75235a3 100644
--- a/global_config.ini
+++ b/global_config.ini
@@ -19,6 +19,7 @@
user: autotest
password: please_set_this_password
job_timeout_default: 72
+parse_failed_repair_default: 0
# Only set this if your server is not 'http://[SERVER] hostname/afe/'
#base_url: http://your_autotest_server/afe/
diff --git a/scheduler/monitor_db.py b/scheduler/monitor_db.py
index 678aec1..d6466bf 100755
--- a/scheduler/monitor_db.py
+++ b/scheduler/monitor_db.py
@@ -1362,7 +1362,7 @@
return first_execution_tag
- def _copy_and_parse_results(self, queue_entries, use_monitor=None):
+ def _copy_results(self, queue_entries, use_monitor=None):
assert len(queue_entries) > 0
if use_monitor is None:
assert self.monitor
@@ -1373,10 +1373,17 @@
_drone_manager.copy_to_results_repository(use_monitor.get_process(),
results_path)
+
+ def _parse_results(self, queue_entries):
reparse_task = FinalReparseTask(queue_entries)
self.agent.dispatcher.add_agent(Agent([reparse_task], num_processes=0))
+ def _copy_and_parse_results(self, queue_entries, use_monitor=None):
+ self._copy_results(queue_entries, use_monitor)
+ self._parse_results(queue_entries)
+
+
def run(self, pidfile_name=_AUTOSERV_PID_FILE, paired_with_pidfile=None):
if self.cmd:
self.monitor = PidfileRunMonitor()
@@ -1472,7 +1479,9 @@
source_path=self.temp_results_dir + '/',
destination_path=self.queue_entry_to_fail.execution_tag() + '/')
- self._copy_and_parse_results([self.queue_entry_to_fail])
+ self._copy_results([self.queue_entry_to_fail])
+ if self.queue_entry_to_fail.job.parse_failed_repair:
+ self._parse_results([self.queue_entry_to_fail])
self.queue_entry_to_fail.handle_host_failure()
@@ -2510,7 +2519,8 @@
_table_name = 'jobs'
_fields = ('id', 'owner', 'name', 'priority', 'control_file',
'control_type', 'created_on', 'synch_count', 'timeout',
- 'run_verify', 'email_list', 'reboot_before', 'reboot_after')
+ 'run_verify', 'email_list', 'reboot_before', 'reboot_after',
+ 'parse_failed_repair')
def __init__(self, id=None, row=None, **kwargs):
diff --git a/scheduler/monitor_db_unittest.py b/scheduler/monitor_db_unittest.py
index f217634..bc27cbe 100644
--- a/scheduler/monitor_db_unittest.py
+++ b/scheduler/monitor_db_unittest.py
@@ -1458,7 +1458,7 @@
self._test_repair_task_helper(False)
- def test_repair_task_with_queue_entry(self):
+ def _test_repair_task_with_queue_entry_helper(self, parse_failed_repair):
self.god.stub_class(monitor_db, 'FinalReparseTask')
self.god.stub_class(monitor_db, 'Agent')
self.god.stub_class_method(monitor_db.TaskWithJobKeyvals,
@@ -1479,11 +1479,13 @@
self._setup_move_logfile(copy_on_drone=True)
self.queue_entry.execution_tag.expect_call().and_return('tag')
self._setup_move_logfile()
- reparse_task = monitor_db.FinalReparseTask.expect_new(
- [self.queue_entry])
- reparse_agent = monitor_db.Agent.expect_new([reparse_task],
- num_processes=0)
- self._dispatcher.add_agent.expect_call(reparse_agent)
+ self.job.parse_failed_repair = parse_failed_repair
+ if parse_failed_repair:
+ reparse_task = monitor_db.FinalReparseTask.expect_new(
+ [self.queue_entry])
+ reparse_agent = monitor_db.Agent.expect_new([reparse_task],
+ num_processes=0)
+ self._dispatcher.add_agent.expect_call(reparse_agent)
self.queue_entry.handle_host_failure.expect_call()
task = monitor_db.RepairTask(self.host, self.queue_entry)
@@ -1495,6 +1497,11 @@
self.god.check_playback()
+ def test_repair_task_with_queue_entry(self):
+ self._test_repair_task_with_queue_entry_helper(True)
+ self._test_repair_task_with_queue_entry_helper(False)
+
+
def setup_verify_expects(self, success, use_queue_entry):
if use_queue_entry:
self.queue_entry.set_status.expect_call('Verifying')