如何在 Hibernate Search/Lucene 中禁用默认值 scoring/boosting?
How to disable default scoring/boosting in Hibernate Search/Lucene?
我想为我的用户提供最相关和最好的结果。例如,我奖励具有大标题、描述、附加照片等的记录。对于上下文:记录是自行车路线,具有路线点(坐标)和照片、评论等元数据。
现在,我使用 Hibernate
为这些记录编制了索引,然后我使用 Hibernate Search
中的 Lucene
在索引中进行搜索。为了对我的结果进行评分,我根据文档属性构建查询并在 should
BooleanJunction clause
中提升它们(使用 boostedTo()
):
bj.should(qb.range().onField("descriptionLength").above(3000).createQuery()).boostedTo(3.0f);
bj.should(qb.range().onField("views.views").above(5000).createQuery()).boostedTo(3.0f);
bj.should(qb.range().onField("nameLength").above(20).createQuery()).boostedTo(1.0f);
bj.should(qb.range().onField("picturesLength").above(0).createQuery()).boostedTo(5.0f);
bj.should(qb.keyword().onField("routePoints.poi.participant").matching("true").createQuery()).boostedTo(10.0f);
为了尝试禁用 Lucene 的评分,我覆盖了 DefaultSimilarity
class,将所有比较设置为 1.0f 评分并通过 Hibernate 配置启用它:
public class IgnoreScoringSimilarity extends DefaultSimilarity {
@Override
public float idf(long docFreq, long numDocs) {
return 1.0f;
}
@Override
public float tf(float freq) {
return 1.0f;
}
@Override
public float coord(int overlap, int maxOverlap) {
return 1.0f;
}
@Override
public float lengthNorm(FieldInvertState state) {
return 1.0f;
}
@Override
public float queryNorm(float sumOfSquaredWeights) {
return 1.0f;
}
}
休眠配置:
<property name="hibernate.search.default.similarity" value="com.search.IgnoreScoringSimilarity"/>
这种方法在 90% 的情况下都有效,但是,我仍然看到一些似乎不合适的奇怪结果。我认识的模式是这些路线(文件)的尺寸非常大。一条普通路线有大约 20-30 个路线点,但是这些 out-of-place 结果有 100-150 个。这让我相信默认的 Lucene 评分仍在发生(由于文档大小,评分更高)。
我在禁用 Lucene 的评分方面做错了什么吗?能有别的解释吗?
我可以建议另一种基于自定义结果排序的方法。您可以在 answer 中阅读相关信息。这个答案有点过时,所以我根据Lucene API 4.10.1修改了这个例子。比较器
public abstract class CustomComparator extends FieldComparator<Double> {
double[] scoring;
double bottom;
double topValue;
private FieldCache.Ints[] currentReaderValues;
private String[] fields;
protected abstract double getScore(int[] value);
public CustomComparator(int hitNum, String[] fields) {
this.fields = fields;
scoring = new double[hitNum];
}
int[] fromReaders(int doc) {
int[] result = new int[currentReaderValues.length];
for (int i = 0; i < result.length; i++) {
result[i] = currentReaderValues[i].get(doc);
}
return result;
}
@Override
public int compare(int slot1, int slot2) {
return Double.compare(scoring[slot1], scoring[slot2]);
}
@Override
public void setBottom(int slot) {
this.bottom = scoring[slot];
}
@Override
public void setTopValue(Double top) {
topValue = top;
}
@Override
public int compareBottom(int doc) throws IOException {
double v2 = getScore(fromReaders(doc));
return Double.compare(bottom, v2);
}
@Override
public int compareTop(int doc) throws IOException {
double docValue = getScore(fromReaders(doc));
return Double.compare(topValue, docValue);
}
@Override
public void copy(int slot, int doc) throws IOException {
scoring[slot] = getScore(fromReaders(doc));
}
@Override
public FieldComparator<Double> setNextReader(AtomicReaderContext atomicReaderContext) throws IOException {
currentReaderValues = new FieldCache.Ints[fields.length];
for (int i = 0; i < fields.length; i++) {
currentReaderValues[i] = FieldCache.DEFAULT.getInts(atomicReaderContext.reader(), fields[i], null, false);
}
return this;
}
@Override
public Double value(int slot) {
return scoring[slot];
}
}
搜索示例
public class SortExample {
public static void main(String[] args) throws IOException {
final String[] fields = new String[]{"descriptionLength", "views.views", "nameLength"};
Sort sort = new Sort(
new SortField(
"",
new FieldComparatorSource() {
public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException {
return new CustomComparator(numHits, fields) {
@Override
protected double getScore(int[] value) {
int descriptionLength = value[0];
int views = value[1];
int nameLength = value[2];
return -((descriptionLength > 2000.0 ? 5.0 : 0.0) +
(views > 5000.0 ? 3.0 : 0.0) +
(nameLength > 20.0 ? 1.0 : 0.0));
}
};
}
}
)
);
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(Version.LUCENE_4_10_4, new StandardAnalyzer());
Directory directory = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig);
addDoc(indexWriter, "score 0", 1000, 1000, 10);
addDoc(indexWriter, "score 5", 3000, 1000, 10);
addDoc(indexWriter, "score 3", 1000, 6000, 10);
addDoc(indexWriter, "score 1", 1000, 1000, 30);
addDoc(indexWriter, "score 4", 1000, 6000, 30);
addDoc(indexWriter, "score 6", 5000, 1000, 30);
addDoc(indexWriter, "score 9", 5000, 6000, 30);
final IndexReader indexReader = DirectoryReader.open(indexWriter, false);
IndexSearcher indexSearcher = new IndexSearcher(indexReader);
Query query = new TermQuery(new Term("all", "all"));
int nDocs = 100;
final TopDocs search = indexSearcher.search(query, null, nDocs, sort);
System.out.println("Max " + search.scoreDocs.length + " " + search.getMaxScore());
for (ScoreDoc sd : search.scoreDocs) {
Document document = indexReader.document(sd.doc);
System.out.println(document.getField("name").stringValue());
}
}
private static void addDoc(IndexWriter indexWriter, String name, int descriptionLength, int views, int nameLength) throws IOException {
Document doc = new Document();
doc.add(new TextField("name", name, Field.Store.YES));
doc.add(new TextField("all", "all", Field.Store.YES));
doc.add(new IntField("descriptionLength", descriptionLength, Field.Store.YES));
doc.add(new IntField("views.views", views, Field.Store.YES));
doc.add(new IntField("nameLength", nameLength, Field.Store.YES));
indexWriter.addDocument(doc);
}
}
代码将输出
score 9
score 6
score 5
score 4
score 3
score 1
score 0
我想为我的用户提供最相关和最好的结果。例如,我奖励具有大标题、描述、附加照片等的记录。对于上下文:记录是自行车路线,具有路线点(坐标)和照片、评论等元数据。
现在,我使用 Hibernate
为这些记录编制了索引,然后我使用 Hibernate Search
中的 Lucene
在索引中进行搜索。为了对我的结果进行评分,我根据文档属性构建查询并在 should
BooleanJunction clause
中提升它们(使用 boostedTo()
):
bj.should(qb.range().onField("descriptionLength").above(3000).createQuery()).boostedTo(3.0f);
bj.should(qb.range().onField("views.views").above(5000).createQuery()).boostedTo(3.0f);
bj.should(qb.range().onField("nameLength").above(20).createQuery()).boostedTo(1.0f);
bj.should(qb.range().onField("picturesLength").above(0).createQuery()).boostedTo(5.0f);
bj.should(qb.keyword().onField("routePoints.poi.participant").matching("true").createQuery()).boostedTo(10.0f);
为了尝试禁用 Lucene 的评分,我覆盖了 DefaultSimilarity
class,将所有比较设置为 1.0f 评分并通过 Hibernate 配置启用它:
public class IgnoreScoringSimilarity extends DefaultSimilarity {
@Override
public float idf(long docFreq, long numDocs) {
return 1.0f;
}
@Override
public float tf(float freq) {
return 1.0f;
}
@Override
public float coord(int overlap, int maxOverlap) {
return 1.0f;
}
@Override
public float lengthNorm(FieldInvertState state) {
return 1.0f;
}
@Override
public float queryNorm(float sumOfSquaredWeights) {
return 1.0f;
}
}
休眠配置:
<property name="hibernate.search.default.similarity" value="com.search.IgnoreScoringSimilarity"/>
这种方法在 90% 的情况下都有效,但是,我仍然看到一些似乎不合适的奇怪结果。我认识的模式是这些路线(文件)的尺寸非常大。一条普通路线有大约 20-30 个路线点,但是这些 out-of-place 结果有 100-150 个。这让我相信默认的 Lucene 评分仍在发生(由于文档大小,评分更高)。
我在禁用 Lucene 的评分方面做错了什么吗?能有别的解释吗?
我可以建议另一种基于自定义结果排序的方法。您可以在 answer 中阅读相关信息。这个答案有点过时,所以我根据Lucene API 4.10.1修改了这个例子。比较器
public abstract class CustomComparator extends FieldComparator<Double> {
double[] scoring;
double bottom;
double topValue;
private FieldCache.Ints[] currentReaderValues;
private String[] fields;
protected abstract double getScore(int[] value);
public CustomComparator(int hitNum, String[] fields) {
this.fields = fields;
scoring = new double[hitNum];
}
int[] fromReaders(int doc) {
int[] result = new int[currentReaderValues.length];
for (int i = 0; i < result.length; i++) {
result[i] = currentReaderValues[i].get(doc);
}
return result;
}
@Override
public int compare(int slot1, int slot2) {
return Double.compare(scoring[slot1], scoring[slot2]);
}
@Override
public void setBottom(int slot) {
this.bottom = scoring[slot];
}
@Override
public void setTopValue(Double top) {
topValue = top;
}
@Override
public int compareBottom(int doc) throws IOException {
double v2 = getScore(fromReaders(doc));
return Double.compare(bottom, v2);
}
@Override
public int compareTop(int doc) throws IOException {
double docValue = getScore(fromReaders(doc));
return Double.compare(topValue, docValue);
}
@Override
public void copy(int slot, int doc) throws IOException {
scoring[slot] = getScore(fromReaders(doc));
}
@Override
public FieldComparator<Double> setNextReader(AtomicReaderContext atomicReaderContext) throws IOException {
currentReaderValues = new FieldCache.Ints[fields.length];
for (int i = 0; i < fields.length; i++) {
currentReaderValues[i] = FieldCache.DEFAULT.getInts(atomicReaderContext.reader(), fields[i], null, false);
}
return this;
}
@Override
public Double value(int slot) {
return scoring[slot];
}
}
搜索示例
public class SortExample {
public static void main(String[] args) throws IOException {
final String[] fields = new String[]{"descriptionLength", "views.views", "nameLength"};
Sort sort = new Sort(
new SortField(
"",
new FieldComparatorSource() {
public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException {
return new CustomComparator(numHits, fields) {
@Override
protected double getScore(int[] value) {
int descriptionLength = value[0];
int views = value[1];
int nameLength = value[2];
return -((descriptionLength > 2000.0 ? 5.0 : 0.0) +
(views > 5000.0 ? 3.0 : 0.0) +
(nameLength > 20.0 ? 1.0 : 0.0));
}
};
}
}
)
);
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(Version.LUCENE_4_10_4, new StandardAnalyzer());
Directory directory = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig);
addDoc(indexWriter, "score 0", 1000, 1000, 10);
addDoc(indexWriter, "score 5", 3000, 1000, 10);
addDoc(indexWriter, "score 3", 1000, 6000, 10);
addDoc(indexWriter, "score 1", 1000, 1000, 30);
addDoc(indexWriter, "score 4", 1000, 6000, 30);
addDoc(indexWriter, "score 6", 5000, 1000, 30);
addDoc(indexWriter, "score 9", 5000, 6000, 30);
final IndexReader indexReader = DirectoryReader.open(indexWriter, false);
IndexSearcher indexSearcher = new IndexSearcher(indexReader);
Query query = new TermQuery(new Term("all", "all"));
int nDocs = 100;
final TopDocs search = indexSearcher.search(query, null, nDocs, sort);
System.out.println("Max " + search.scoreDocs.length + " " + search.getMaxScore());
for (ScoreDoc sd : search.scoreDocs) {
Document document = indexReader.document(sd.doc);
System.out.println(document.getField("name").stringValue());
}
}
private static void addDoc(IndexWriter indexWriter, String name, int descriptionLength, int views, int nameLength) throws IOException {
Document doc = new Document();
doc.add(new TextField("name", name, Field.Store.YES));
doc.add(new TextField("all", "all", Field.Store.YES));
doc.add(new IntField("descriptionLength", descriptionLength, Field.Store.YES));
doc.add(new IntField("views.views", views, Field.Store.YES));
doc.add(new IntField("nameLength", nameLength, Field.Store.YES));
indexWriter.addDocument(doc);
}
}
代码将输出
score 9
score 6
score 5
score 4
score 3
score 1
score 0