Skip to content

Commit

Permalink
add option max_reflections_per_experiment to combine_experiments (#1369)
Browse files Browse the repository at this point in the history
* add option max_reflections_per_experiment to combine_experiments

* test min_ and max_reflections_per_experiment
  • Loading branch information
dwpaley committed Aug 6, 2020
1 parent 5dea370 commit 3ceefdf
Show file tree
Hide file tree
Showing 4 changed files with 60 additions and 4 deletions.
1 change: 1 addition & 0 deletions AUTHORS
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ Aaron Brewster
Asmit Bhowmick
Benjamin Williams
Billy Poon
Daniel Paley
David Waterman
Derek Mendez
Graeme Winter
Expand Down
30 changes: 26 additions & 4 deletions command_line/combine_experiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,12 @@
.help = "If not None, throw out any experiment with fewer than this"
"many reflections"
max_reflections_per_experiment = None
.type = int
.expert_level = 2
.help = "If not None, throw out any experiment with more than this"
"many reflections"
include scope dials.algorithms.integration.stills_significance_filter.phil_scope
}
""",
Expand Down Expand Up @@ -501,7 +507,8 @@ def average_detectors(target, panelgroups, depth):
# set up global experiments and reflections lists
reflections = flex.reflection_table()
global_id = 0
skipped_expts = 0
skipped_expts_min_refl = 0
skipped_expts_max_refl = 0
experiments = ExperimentList()

# loop through the input, building up the global lists
Expand All @@ -523,7 +530,13 @@ def average_detectors(target, panelgroups, depth):
params.output.min_reflections_per_experiment is not None
and n_sub_ref < params.output.min_reflections_per_experiment
):
skipped_expts += 1
skipped_expts_min_refl += 1
continue
if (
params.output.max_reflections_per_experiment is not None
and n_sub_ref > params.output.max_reflections_per_experiment
):
skipped_expts_max_refl += 1
continue

nrefs_per_exp.append(n_sub_ref)
Expand All @@ -550,11 +563,20 @@ def average_detectors(target, panelgroups, depth):

if (
params.output.min_reflections_per_experiment is not None
and skipped_expts > 0
and skipped_expts_min_refl > 0
):
print(
"Removed {0} experiments with fewer than {1} reflections".format(
skipped_expts, params.output.min_reflections_per_experiment
skipped_expts_min_refl, params.output.min_reflections_per_experiment
)
)
if (
params.output.max_reflections_per_experiment is not None
and skipped_expts_max_refl > 0
):
print(
"Removed {0} experiments with more than {1} reflections".format(
skipped_expts_max_refl, params.output.max_reflections_per_experiment
)
)

Expand Down
1 change: 1 addition & 0 deletions newsfragments/1369.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
dials.combine_experiments: add an option output.max_reflections_per_experiment
32 changes: 32 additions & 0 deletions test/command_line/test_combine_experiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,6 +299,38 @@ def narrow_wedge_input_with_identifiers(dials_regression, tmpdir):
return phil_input


@pytest.mark.parametrize("min_refl", ["None", "100"])
@pytest.mark.parametrize("max_refl", ["None", "150"])
def test_min_max_reflections_per_experiment(
dials_regression, run_in_tmpdir, min_refl, max_refl
):

expected_results = {
("None", "None"): 10,
("None", "150"): 9,
("100", "None"): 6,
("100", "150"): 5,
}

data_dir = os.path.join(dials_regression, "refinement_test_data", "multi_stills")
input_phil = (
" input.experiments={0}/combined_experiments.json\n"
+ " input.reflections={0}/combined_reflections.pickle\n"
+ " output.min_reflections_per_experiment={1}\n"
+ " output.max_reflections_per_experiment={2}\n"
).format(data_dir, min_refl, max_refl)
with open("input.phil", "w") as phil_file:
phil_file.writelines(input_phil)

result = procrunner.run(["dials.combine_experiments", "input.phil"])
assert not result.returncode and not result.stderr

# load results
exp = ExperimentListFactory.from_json_file("combined.expt", check_format=False)

assert len(exp) == expected_results[(min_refl, max_refl)]


@pytest.mark.parametrize("with_identifiers", ["True", "False"])
@pytest.mark.parametrize("method", ["random", "n_refl", "significance_filter"])
def test_combine_nsubset(
Expand Down

0 comments on commit 3ceefdf

Please sign in to comment.