1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
|
/** @file
* @brief Xapian::PL2PlusWeight class - the PL2+ weighting scheme of the DFR framework.
*/
/* Copyright (C) 2013 Aarsh Shah
* Copyright (C) 2013,2014,2016,2017,2024 Olly Betts
* Copyright (C) 2016 Vivek Pal
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <config.h>
#include "xapian/weight.h"
#include "common/log2.h"
#include "serialise-double.h"
#include "xapian/error.h"
#include <algorithm>
using namespace std;
namespace Xapian {
PL2PlusWeight::PL2PlusWeight(double c, double delta)
: param_c(c), param_delta(delta)
{
if (param_c <= 0)
throw Xapian::InvalidArgumentError("Parameter c is invalid");
if (param_delta <= 0)
throw Xapian::InvalidArgumentError("Parameter delta is invalid");
need_stat(AVERAGE_LENGTH);
need_stat(DOC_LENGTH);
need_stat(DOC_LENGTH_MIN);
need_stat(DOC_LENGTH_MAX);
need_stat(COLLECTION_SIZE);
need_stat(COLLECTION_FREQ);
need_stat(WDF);
need_stat(WDF_MAX);
need_stat(WQF);
}
PL2PlusWeight *
PL2PlusWeight::clone() const
{
return new PL2PlusWeight(param_c, param_delta);
}
void
PL2PlusWeight::init(double factor_)
{
if (factor_ == 0.0) {
// This object is for the term-independent contribution, and that's
// always zero for this scheme.
return;
}
factor = factor_ * get_wqf();
auto wdf_upper_bound = get_wdf_upper_bound();
mean = double(get_collection_freq()) / get_collection_size();
if (rare(wdf_upper_bound == 0 || mean > 1)) {
// PL2+ is based on a modified PL2 which "essentially ignores
// non-discriminative query terms".
upper_bound = 0;
return;
}
double base_change(1.0 / log(2.0));
P1 = mean * base_change + 0.5 * log2(2.0 * M_PI);
P2 = log2(mean) + base_change;
cl = param_c * get_average_length();
double wdfn_lower = log2(1 + cl / get_doclength_upper_bound());
double divisior = max(get_wdf_upper_bound(), get_doclength_lower_bound());
double wdfn_upper = get_wdf_upper_bound() * log2(1 + cl / divisior);
double P_delta = P1 + (param_delta + 0.5) * log2(param_delta) - P2 * param_delta;
dw = P_delta / (param_delta + 1.0);
// Calculate an upper bound on the weights which get_sumpart() can return.
//
// We consider the equation for P as the sum of two parts which we
// maximise individually:
//
// (a) (wdfn + 0.5) / (wdfn + 1) * log2(wdfn)
// (b) (P1 - P2 * wdfn) / (wdfn + 1)
//
// To maximise (a), the fractional part is always positive (since wdfn>0)
// and is maximised by maximising wdfn - clearer when rewritten as:
// (1 - 0.5 / (wdfn + 1))
//
// The log part of (a) is clearly also maximised by maximising wdfn,
// so we want to evaluate (a) at wdfn=wdfn_upper.
double P_max2a = (wdfn_upper + 0.5) * log2(wdfn_upper) / (wdfn_upper + 1.0);
// To maximise (b) substitute x=wdfn+1 (so x>1) and we get:
//
// (P1 + P2)/x - P2
//
// Differentiating wrt x gives:
//
// -(P1 + P2)/x²
//
// So there are no local minima or maxima, and the function is continuous
// in the range of interest, so the sign of this differential tells us
// whether we want to maximise or minimise wdfn, and the denominator is
// always positive so we can just consider the sign of: (P1 + P2)
//
// Commonly P1 + P2 > 0, in which case we evaluate P at wdfn=wdfn_upper
// giving us a bound that can't be bettered if wdfn_upper is tight.
double wdfn_optb = P1 + P2 > 0 ? wdfn_upper : wdfn_lower;
double P_max2b = (P1 - P2 * wdfn_optb) / (wdfn_optb + 1.0);
upper_bound = factor * (P_max2a + P_max2b + dw);
if (rare(upper_bound < 0)) upper_bound = 0;
}
string
PL2PlusWeight::name() const
{
return "Xapian::PL2PlusWeight";
}
string
PL2PlusWeight::serialise() const
{
string result = serialise_double(param_c);
result += serialise_double(param_delta);
return result;
}
PL2PlusWeight *
PL2PlusWeight::unserialise(const string & s) const
{
const char *ptr = s.data();
const char *end = ptr + s.size();
double c = unserialise_double(&ptr, end);
double delta = unserialise_double(&ptr, end);
if (rare(ptr != end))
throw Xapian::SerialisationError("Extra data in PL2PlusWeight::unserialise()");
return new PL2PlusWeight(c, delta);
}
double
PL2PlusWeight::get_sumpart(Xapian::termcount wdf, Xapian::termcount len,
Xapian::termcount) const
{
// Note: lambda_t in the paper is 1/mean.
if (wdf == 0 || mean > 1) {
// PL2+ is based on a modified PL2 which "essentially ignores
// non-discriminative query terms".
return 0.0;
}
double wdfn = wdf * log2(1 + cl / len);
double P = P1 + (wdfn + 0.5) * log2(wdfn) - P2 * wdfn;
double wt = (P / (wdfn + 1.0)) + dw;
// FIXME: Is a negative wt possible here? It is with vanilla PL2, but for
// PL2+ we've added on dw, and bailed out early if mean < 1.
if (rare(wt <= 0)) return 0.0;
return factor * wt;
}
double
PL2PlusWeight::get_maxpart() const
{
return upper_bound;
}
double
PL2PlusWeight::get_sumextra(Xapian::termcount, Xapian::termcount) const
{
return 0;
}
double
PL2PlusWeight::get_maxextra() const
{
return 0;
}
}
|