1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
|
"""
Tests for the constraints system used in refinement
"""
from __future__ import annotations
import shutil
import subprocess
from copy import deepcopy
from dxtbx.model.experiment_list import ExperimentListFactory
from libtbx.test_utils import approx_equal
from scitbx import sparse
from dials.algorithms.refinement.constraints import (
ConstraintManager,
EqualShiftConstraint,
SparseConstraintManager,
)
from dials.algorithms.refinement.engine import Journal
from dials.array_family import flex
def test_contraints_manager_simple_test():
x = flex.random_double(10)
# constrain parameters 2 and 4 and 6, 7 and 8
c1 = EqualShiftConstraint([1, 3], x)
c2 = EqualShiftConstraint([5, 6, 7], x)
cm = ConstraintManager([c1, c2], len(x))
constrained_x = cm.constrain_parameters(x)
# check the constrained parameters are as expected
assert len(constrained_x) == 7
assert constrained_x[5] == flex.mean(x.select([1, 3]))
assert constrained_x[6] == flex.mean(x[5:8])
# minimiser would modify the constrained parameters
mod_constrained_x = constrained_x + 10.0
# check the expanded parameters are as expected
expanded = cm.expand_parameters(mod_constrained_x)
assert x + 10.0 == expanded
# make a matrix to exercise jacobian compaction
j = flex.random_double(20 * 10)
j.reshape(flex.grid(20, 10))
# for constrained columns, elements that are non-zero in one column are
# zero in the other columns. Enforce that in this example
mask2 = flex.bool([True] * 10 + [False] * 10)
mask4 = ~mask2
col2 = j.matrix_copy_column(1)
col2.set_selected(mask2, 0)
j.matrix_paste_column_in_place(col2, 1)
col4 = j.matrix_copy_column(3)
col4.set_selected(mask4, 0)
j.matrix_paste_column_in_place(col4, 3)
mask6 = flex.bool([False] * 7 + [True] * 13)
mask7 = mask6.reversed()
mask8 = ~(mask6 & mask7)
col6 = j.matrix_copy_column(5)
col6.set_selected(mask6, 0)
j.matrix_paste_column_in_place(col6, 5)
col7 = j.matrix_copy_column(6)
col7.set_selected(mask7, 0)
j.matrix_paste_column_in_place(col7, 6)
col8 = j.matrix_copy_column(7)
col8.set_selected(mask8, 0)
j.matrix_paste_column_in_place(col8, 7)
cj = cm.constrain_jacobian(j)
# check expected dimensions
assert cj.all() == (20, 7)
# check that the constrained columns are equal to sums of the relevant
# columns in the original Jacobian
tmp = j.matrix_copy_column(1) + j.matrix_copy_column(3)
assert (cj.matrix_copy_column(5) == tmp).all_eq(True)
tmp = j.matrix_copy_column(5) + j.matrix_copy_column(6) + j.matrix_copy_column(7)
assert (cj.matrix_copy_column(6) == tmp).all_eq(True)
# convert to a sparse matrix to exercise the sparse Jacobian compaction
j2 = sparse.matrix(20, 10)
mask = flex.bool(20, True)
for i, c in enumerate(j2.cols()):
c.set_selected(mask, j.matrix_copy_column(i))
assert (j2.as_dense_matrix() == j).all_eq(True)
cm2 = SparseConstraintManager([c1, c2], len(x))
cj2 = cm2.constrain_jacobian(j2)
# ensure dense and sparse calculations give the same result
assert (cj2.as_dense_matrix() == cj).all_eq(True)
# construct derivatives of the objective dL/dp from the Jacobian to test
# constrain_gradient_vector. Here assume unit weights
dL_dp = [sum(col.as_dense_vector()) for col in j2.cols()]
constr_dL_dp = cm.constrain_gradient_vector(dL_dp)
# check constrained values are equal to sums of relevant elements in the
# original gradient vector
assert constr_dL_dp[5] == dL_dp[1] + dL_dp[3]
assert constr_dL_dp[6] == dL_dp[5] + dL_dp[6] + dL_dp[7]
def test_constrained_refinement(dials_data, tmp_path):
"""Test joint refinement where two detectors are constrained to enforce a
differential distance (along the shared initial normal vector) of 1 mm.
This test can be constructed on the fly from data already in
dials_data"""
# use the 'centroid' data for this test. The 'regularized' experiments are
# useful because the detector has fast and slow exactly aligned with X, -Y
# so the distance is exactly along the normal vector and can be altered
# directly by changing the Z component of the origin vector
data_dir = dials_data("refinement_test_data", pathlib=True)
experiments_path = data_dir / "from-xds.json"
pickle_path = data_dir / "from-xds-1000.pickle"
# load the experiments and spots
el = ExperimentListFactory.from_json_file(experiments_path, check_format=False)
rt = flex.reflection_table.from_file(pickle_path)
# adjust the detector distance by -0.5 mm
detector = el[0].detector
panel = detector[0]
fast = panel.get_fast_axis()
slow = panel.get_slow_axis()
origin = panel.get_origin()
panel.set_frame(fast, slow, origin[0:2] + (origin[2] + 0.5,))
# duplicate the experiment and adjust distance by +1 mm
e2 = deepcopy(el[0])
detector = e2.detector
panel = detector[0]
fast = panel.get_fast_axis()
slow = panel.get_slow_axis()
origin = panel.get_origin()
panel.set_frame(fast, slow, origin[0:2] + (origin[2] - 1.0,))
# append to the experiment list and write out
el.append(e2)
el.as_json(tmp_path / "foo.expt")
# duplicate the reflections and increment the experiment id
rt2 = deepcopy(rt)
rt2["id"] = rt2["id"] + 1
# concatenate reflections and write out
rt.extend(rt2)
rt.as_file(tmp_path / "foo.refl")
# Set up refinement, constraining the "Dist" parameter. We have to also
# fix the tilt and twist type parameters to ensure this actually constrains
# the distance between the models.
result = subprocess.run(
(
shutil.which("dials.refine"),
"foo.expt",
"foo.refl",
"history=history.json",
"refinement.parameterisation.detector.constraints.parameter=Dist",
"refinement.parameterisation.detector.fix_list=Tau2,Tau3",
"refinement.reflections.outlier.algorithm=null",
"scan_varying=False",
),
cwd=tmp_path,
capture_output=True,
)
assert result.returncode == 0 and not result.stderr
# load refinement history
history = Journal.from_json_file(tmp_path / "history.json")
ref_exp = ExperimentListFactory.from_json_file(
tmp_path / "refined.expt", check_format=False
)
# get parameter vector from the final step
pvec = history["parameter_vector"][-1]
# the constrained parameters have indices 0 and 4 in this case. Check they
# are still exactly 1 mm apart
assert pvec[0] == pvec[4] - 1.0
# check also the refined distances remain 1 mm different
det1, det2 = ref_exp.detectors()
p1 = det1[0]
p2 = det2[0]
assert approx_equal(p2.get_distance() - p1.get_distance(), 1.0, eps=1e-6)
|