Package org.apache.lucene.search

Examples of org.apache.lucene.search.Collector


        }

        // Need to know all documents that have matches. TopDocs doesn't give me that and then I'd be also testing TopDocsCollector...
        final FixedBitSet actualResult = new FixedBitSet(indexSearcher.getIndexReader().maxDoc());
        final TopScoreDocCollector topScoreDocCollector = TopScoreDocCollector.create(10, false);
        indexSearcher.search(joinQuery, new Collector() {

          int docBase;

          @Override
          public void collect(int doc) throws IOException {
View Full Code Here


        toField = "from";
        queryVals = context.toHitsToJoinScore;
      }
      final Map<BytesRef, JoinScore> joinValueToJoinScores = new HashMap<BytesRef, JoinScore>();
      if (multipleValuesPerDocument) {
        fromSearcher.search(new TermQuery(new Term("value", uniqueRandomValue)), new Collector() {

          private Scorer scorer;
          private SortedSetDocValues docTermOrds;
          final BytesRef joinValue = new BytesRef();

          @Override
          public void collect(int doc) throws IOException {
            docTermOrds.setDocument(doc);
            long ord;
            while ((ord = docTermOrds.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
              docTermOrds.lookupOrd(ord, joinValue);
              JoinScore joinScore = joinValueToJoinScores.get(joinValue);
              if (joinScore == null) {
                joinValueToJoinScores.put(BytesRef.deepCopyOf(joinValue), joinScore = new JoinScore());
              }
              joinScore.addScore(scorer.score());
            }
          }

          @Override
          public void setNextReader(AtomicReaderContext context) throws IOException {
            docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), fromField);
          }

          @Override
          public void setScorer(Scorer scorer) {
            this.scorer = scorer;
          }

          @Override
          public boolean acceptsDocsOutOfOrder() {
            return false;
          }
        });
      } else {
        fromSearcher.search(new TermQuery(new Term("value", uniqueRandomValue)), new Collector() {

          private Scorer scorer;
          private BinaryDocValues terms;
          private final BytesRef spare = new BytesRef();

          @Override
          public void collect(int doc) throws IOException {
            terms.get(doc, spare);
            BytesRef joinValue = spare;
            if (joinValue.bytes == BinaryDocValues.MISSING) {
              return;
            }

            JoinScore joinScore = joinValueToJoinScores.get(joinValue);
            if (joinScore == null) {
              joinValueToJoinScores.put(BytesRef.deepCopyOf(joinValue), joinScore = new JoinScore());
            }
            joinScore.addScore(scorer.score());
          }

          @Override
          public void setNextReader(AtomicReaderContext context) throws IOException {
            terms = FieldCache.DEFAULT.getTerms(context.reader(), fromField);
          }

          @Override
          public void setScorer(Scorer scorer) {
            this.scorer = scorer;
          }

          @Override
          public boolean acceptsDocsOutOfOrder() {
            return false;
          }
        });
      }

      final Map<Integer, JoinScore> docToJoinScore = new HashMap<Integer, JoinScore>();
      if (multipleValuesPerDocument) {
        if (scoreDocsInOrder) {
          AtomicReader slowCompositeReader = SlowCompositeReaderWrapper.wrap(toSearcher.getIndexReader());
          Terms terms = slowCompositeReader.terms(toField);
          if (terms != null) {
            DocsEnum docsEnum = null;
            TermsEnum termsEnum = null;
            SortedSet<BytesRef> joinValues = new TreeSet<BytesRef>(BytesRef.getUTF8SortedAsUnicodeComparator());
            joinValues.addAll(joinValueToJoinScores.keySet());
            for (BytesRef joinValue : joinValues) {
              termsEnum = terms.iterator(termsEnum);
              if (termsEnum.seekExact(joinValue, true)) {
                docsEnum = termsEnum.docs(slowCompositeReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
                JoinScore joinScore = joinValueToJoinScores.get(joinValue);

                for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) {
                  // First encountered join value determines the score.
                  // Something to keep in mind for many-to-many relations.
                  if (!docToJoinScore.containsKey(doc)) {
                    docToJoinScore.put(doc, joinScore);
                  }
                }
              }
            }
          }
        } else {
          toSearcher.search(new MatchAllDocsQuery(), new Collector() {

            private SortedSetDocValues docTermOrds;
            private final BytesRef scratch = new BytesRef();
            private int docBase;

            @Override
            public void collect(int doc) throws IOException {
              docTermOrds.setDocument(doc);
              long ord;
              while ((ord = docTermOrds.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
                docTermOrds.lookupOrd(ord, scratch);
                JoinScore joinScore = joinValueToJoinScores.get(scratch);
                if (joinScore == null) {
                  continue;
                }
                Integer basedDoc = docBase + doc;
                // First encountered join value determines the score.
                // Something to keep in mind for many-to-many relations.
                if (!docToJoinScore.containsKey(basedDoc)) {
                  docToJoinScore.put(basedDoc, joinScore);
                }
              }
            }

            @Override
            public void setNextReader(AtomicReaderContext context) throws IOException {
              docBase = context.docBase;
              docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), toField);
            }

            @Override
            public boolean acceptsDocsOutOfOrder() {return false;}
            @Override
            public void setScorer(Scorer scorer) {}
          });
        }
      } else {
        toSearcher.search(new MatchAllDocsQuery(), new Collector() {

          private BinaryDocValues terms;
          private int docBase;
          private final BytesRef spare = new BytesRef();
View Full Code Here

    public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
        final OpenBitSet bits = new OpenBitSet(context.reader().maxDoc());

        final Set<String> matches = loadMatches();

        new IndexSearcher(context.reader()).search(_query, new Collector() {
            private int docBase;
            private AtomicReader reader;

            // ignore scorer
            public void setScorer(Scorer scorer) {
View Full Code Here

        final Set<FeatureId> matches = new HashSet<FeatureId>();
        final Multimap<FeatureId,Integer> docIndexLookup = HashMultimap.create();
       
        if(unrefinedSpatialMatches.isEmpty() || _hits >= _numHits) return bits;

        new IndexSearcher(context.reader()).search(_query, new Collector() {
            private int docBase;
            private Document document;
            private AtomicReader reader;

            // ignore scorer
View Full Code Here

    @Override
    public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
    final BitSet bits = new BitSet(context.reader().maxDoc());

    new IndexSearcher(context.reader()).search(_query, new Collector() {

            private int docBase;
            private IndexReader reader;

            @Override
View Full Code Here

      throw new IllegalArgumentException("query must not be null");
   
    IndexSearcher searcher = createSearcher();
    try {
      final float[] scores = new float[1]; // inits to 0.0f (no match)
      searcher.search(query, new Collector() {
        private Scorer scorer;

        @Override
        public void collect(int doc) throws IOException {
          scores[0] = scorer.score();
View Full Code Here

    if (n == 0) {
      throw new IllegalArgumentException("At least 1 collector must not be null");
    } else if (n == 1) {
      // only 1 Collector - return it.
      Collector col = null;
      for (Collector c : collectors) {
        if (c != null) {
          col = c;
          break;
        }
View Full Code Here

    //  System.out.println("  doQueryFirstScoring");
    //}
    int docID = baseScorer.docID();

    nextDoc: while (docID != DocsEnum.NO_MORE_DOCS) {
      Collector failedCollector = null;
      for (int i=0;i<disis.length;i++) {
        // TODO: should we sort this 2nd dimension of
        // docsEnums from most frequent to least?
        DocIdSetIterator disi = disis[i];
        if (disi != null && disi.docID() < docID) {
View Full Code Here

      // Verify docs are always collected in order.  If we
      // had an AssertingScorer it could catch it when
      // Weight.scoresDocsOutOfOrder lies!:
      new DrillSideways(s, config, tr).search(ddq,
                           new Collector() {
                             int lastDocID;

                             @Override
                             public void setScorer(Scorer s) {
                             }
View Full Code Here

      // expected
    }

    // Tests that the collector handles some null collectors well. If it
    // doesn't, an NPE would be thrown.
    Collector c = MultiCollector.wrap(new DummyCollector(), null, new DummyCollector());
    assertTrue(c instanceof MultiCollector);
    assertTrue(c.acceptsDocsOutOfOrder());
    c.collect(1);
    c.setNextReader(null);
    c.setScorer(null);
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.search.Collector

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.