Package org.apache.lucene.search

Examples of org.apache.lucene.search.Collector


    // Tests that the collector delegates calls to input collectors properly.

    // Tests that the collector handles some null collectors well. If it
    // doesn't, an NPE would be thrown.
    DummyCollector[] dcs = new DummyCollector[] { new DummyCollector(), new DummyCollector() };
    Collector c = MultiCollector.wrap(dcs);
    assertTrue(c.acceptsDocsOutOfOrder());
    c.collect(1);
    c.setNextReader(null);
    c.setScorer(null);

    for (DummyCollector dc : dcs) {
      assertTrue(dc.acceptsDocsOutOfOrderCalled);
      assertTrue(dc.collectCalled);
      assertTrue(dc.setNextReaderCalled);
View Full Code Here


      final IndexSearcher indexSearcher = newSearcher(indexReader);
      final Query phraseQuery = new SpanNearQuery(new SpanQuery[] {
          new SpanTermQuery(new Term(FIELD, "fox")),
          new SpanTermQuery(new Term(FIELD, "jumped")) }, 0, true);
      final FixedBitSet bitset = new FixedBitSet(indexReader.maxDoc());
      indexSearcher.search(phraseQuery, new Collector() {
        private int baseDoc;

        @Override
        public boolean acceptsDocsOutOfOrder() {
          return true;
View Full Code Here

    IndexSearcher searcher = new IndexSearcher(store);
    final float[] scores = new float[NUM_DOCS];
    float lastScore = 0.0f;
   
    // default similarity should put docs with shorter length first
    searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
      private int docBase = 0;
      private Scorer scorer;
     
      public final void collect(int doc) throws IOException {
        scores[doc + docBase] = scorer.score();
      }
      public void setNextReader(IndexReader reader, int docBase) {
        this.docBase = docBase;
      }
      public void setScorer(Scorer scorer) throws IOException {
        this.scorer = scorer;
      }
      public boolean acceptsDocsOutOfOrder() {
        return true;
      }
    });
    searcher.close();
   
    lastScore = Float.MAX_VALUE;
    for (int i = 0; i < NUM_DOCS; i++) {
      String msg = "i=" + i + ", " + scores[i] + " <= " + lastScore;
      assertTrue(msg, scores[i] <= lastScore);
      //System.out.println(msg);
      lastScore = scores[i];
    }

    FieldNormModifier fnm = new FieldNormModifier(store, s);
    fnm.reSetNorms("field");
   
    // new norm (with default similarity) should put longer docs first
    searcher = new IndexSearcher(store);
    searcher.search(new TermQuery(new Term("field", "word"))new Collector() {
      private int docBase = 0;
      private Scorer scorer;
      public final void collect(int doc) throws IOException {
        scores[doc + docBase] = scorer.score();
      }
View Full Code Here

    IndexSearcher searcher = new IndexSearcher(store);
    final float[] scores = new float[NUM_DOCS];
    float lastScore = 0.0f;
   
    // default similarity should return the same score for all documents for this query
    searcher.search(new TermQuery(new Term("untokfield", "20061212")), new Collector() {
      private int docBase = 0;
      private Scorer scorer;
      public final void collect(int doc) throws IOException {
        scores[doc + docBase] = scorer.score();
      }
View Full Code Here

  final float[] scores = new float[NUM_DOCS];
  float lastScore = 0.0f;
 
  // default similarity should put docs with shorter length first
  searcher = new IndexSearcher(store);
  searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
    private int docBase = 0;
    private Scorer scorer;
    public final void collect(int doc) throws IOException {
      scores[doc + docBase] = scorer.score();
    }
    public void setNextReader(IndexReader reader, int docBase) {
      this.docBase = docBase;
    }
    public void setScorer(Scorer scorer) throws IOException {
      this.scorer = scorer;
    }
    public boolean acceptsDocsOutOfOrder() {
      return true;
    }
  });
  searcher.close();
 
  lastScore = Float.MAX_VALUE;
  for (int i = 0; i < NUM_DOCS; i++) {
      String msg = "i=" + i + ", "+scores[i]+" <= "+lastScore;
      assertTrue(msg, scores[i] <= lastScore);
      //System.out.println(msg);
      lastScore = scores[i];
  }

  // override the norms to be inverted
  Similarity s = new DefaultSimilarity() {
    public float lengthNorm(String fieldName, int numTokens) {
        return numTokens;
    }
      };
  FieldNormModifier fnm = new FieldNormModifier(store, s);
  fnm.reSetNorms("field");

  // new norm (with default similarity) should put longer docs first
  searcher = new IndexSearcher(store);
  searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
      private int docBase = 0;
      private Scorer scorer;
      public final void collect(int doc) throws IOException {
        scores[doc + docBase] = scorer.score();
      }
View Full Code Here

   *
   *  This simulates the streaming search use case, where all hits are supposed to
   *  be processed, regardless of their relevance.
   */
  public static void doStreamingSearch(final Searcher searcher, Query query) throws IOException {
    Collector streamingHitCollector = new Collector() {
      private Scorer scorer;
      private int docBase;
     
      // simply print docId and score of every matching document
      public void collect(int doc) throws IOException {
View Full Code Here

    return true;
  }

  @Override
  protected Collector createCollector() throws Exception {
    Collector collector = null;
    if (clnName.equalsIgnoreCase("topScoreDocOrdered") == true) {
      collector = TopScoreDocCollector.create(numHits(), true);
    } else if (clnName.equalsIgnoreCase("topScoreDocUnOrdered") == true) {
      collector = TopScoreDocCollector.create(numHits(), false);
    } else if (clnName.length() > 0){
View Full Code Here

            hits = collector.topDocs();
          } else {
            hits = searcher.search(q, numHits);
          }
        } else {
          Collector collector = createCollector();
          searcher.search(q, null, collector);
          //hits = collector.topDocs();
        }

        final String printHitsField = getRunData().getConfig().get("print.hits.field", null);
View Full Code Here

    BooleanQuery bq = new BooleanQuery();
    bq.add(joinQuery, BooleanClause.Occur.SHOULD);
    bq.add(new TermQuery(new Term("id", "3")), BooleanClause.Occur.SHOULD);

    indexSearcher.search(bq, new Collector() {
        boolean sawFive;
        @Override
        public void setNextReader(AtomicReaderContext context) {
        }
        @Override
View Full Code Here

        }

        // Need to know all documents that have matches. TopDocs doesn't give me that and then I'd be also testing TopDocsCollector...
        final FixedBitSet actualResult = new FixedBitSet(indexSearcher.getIndexReader().maxDoc());
        final TopScoreDocCollector topScoreDocCollector = TopScoreDocCollector.create(10, false);
        indexSearcher.search(joinQuery, new Collector() {

          int docBase;

          @Override
          public void collect(int doc) throws IOException {
View Full Code Here

TOP

Related Classes of org.apache.lucene.search.Collector

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.