Package org.apache.lucene.index

Examples of org.apache.lucene.index.IndexReaderContext


    private transient TermContext states[];

    public PhraseWeight(IndexSearcher searcher)
      throws IOException {
      this.similarity = searcher.getSimilarity();
      final IndexReaderContext context = searcher.getTopReaderContext();
      states = new TermContext[terms.size()];
      TermStatistics termStats[] = new TermStatistics[terms.size()];
      for (int i = 0; i < terms.size(); i++) {
        final Term term = terms.get(i);
        states[i] = TermContext.build(context, term);
View Full Code Here


    final IndexReader reader = searcher.getIndexReader();
    Query rewritten = rewrite(query);
    SortedSet<Term> queryTerms = new TreeSet<>();
    rewritten.extractTerms(queryTerms);

    IndexReaderContext readerContext = reader.getContext();
    List<AtomicReaderContext> leaves = readerContext.leaves();

    // Make our own copies because we sort in-place:
    int[] docids = new int[docidsIn.length];
    System.arraycopy(docidsIn, 0, docids, 0, docidsIn.length);
    final String fields[] = new String[fieldsIn.length];
View Full Code Here

    }

    // NOTE: sometimes reader has just one segment, which is
    // important to test
    final IndexSearcher searcher = newSearcher(reader);
    final IndexReaderContext ctx = searcher.getTopReaderContext();

    final ShardSearcher[] subSearchers;
    final int[] docStarts;

    if (ctx instanceof AtomicReaderContext) {
View Full Code Here

  }

  public void testSpanScorerZeroSloppyFreq() throws Exception {
    boolean ordered = true;
    int slop = 1;
    IndexReaderContext topReaderContext = searcher.getTopReaderContext();
    List<AtomicReaderContext> leaves = topReaderContext.leaves();
    int subIndex = ReaderUtil.subIndex(11, leaves);
    for (int i = 0, c = leaves.size(); i < c; i++) {
      final AtomicReaderContext ctx = leaves.get(i);
    
      final Similarity sim = new DefaultSimilarity() {
View Full Code Here

   * this causes problems
   */
  public void testSpanNearScorerSkipTo1() throws Exception {
    SpanNearQuery q = makeQuery();
    Weight w = searcher.createNormalizedWeight(q);
    IndexReaderContext topReaderContext = searcher.getTopReaderContext();
    AtomicReaderContext leave = topReaderContext.leaves().get(0);
    Scorer s = w.scorer(leave, leave.reader().getLiveDocs());
    assertEquals(1, s.advance(1));
  }
View Full Code Here

    public final ShardSearcher[] subSearchers;
    public final int[] docStarts;

    public ShardState(IndexSearcher s) {
      final IndexReaderContext ctx = s.getTopReaderContext();
      final List<AtomicReaderContext> leaves = ctx.leaves();
      subSearchers = new ShardSearcher[leaves.size()];
      for(int searcherIDX=0;searcherIDX<subSearchers.length;searcherIDX++) {
        subSearchers[searcherIDX] = new ShardSearcher(leaves.get(searcherIDX), ctx);
      }
View Full Code Here

      BoboMapFunctionWrapper mapReduceWrapper) throws IOException {
    final FacetValidator validator = createFacetValidator();
    int target = 0;

    IndexReader reader = getIndexReader();
    IndexReaderContext indexReaderContext = reader.getContext();
    if (filter == null) {
      for (int i = 0; i < _subReaders.length; i++) {
        AtomicReaderContext atomicContext = indexReaderContext.children() == null ? (AtomicReaderContext) indexReaderContext
            : (AtomicReaderContext) (indexReaderContext.children().get(i));
        int docStart = start;
       
        atomicContext = AtomicReaderContextUtil.updateDocBase(atomicContext, docStart);
       
        if (reader instanceof BoboMultiReader) {
          docStart = start + ((BoboMultiReader) reader).subReaderBase(i);
        }
        collector.setNextReader(atomicContext);
        validator.setNextReader(_subReaders[i], docStart);

        Scorer scorer = weight.scorer(atomicContext, true, true, _subReaders[i].getLiveDocs());
        if (scorer != null) {
          collector.setScorer(scorer);
          target = scorer.nextDoc();
          while (target != DocIdSetIterator.NO_MORE_DOCS) {
            if (validator.validate(target)) {
              collector.collect(target);
              target = scorer.nextDoc();
            } else {
              target = validator._nextTarget;
              target = scorer.advance(target);
            }
          }
        }
        if (mapReduceWrapper != null) {
          mapReduceWrapper.mapFullIndexReader(_subReaders[i], validator.getCountCollectors());
        }
      }
      return;
    }

    for (int i = 0; i < _subReaders.length; i++) {
      AtomicReaderContext atomicContext = indexReaderContext.children() == null ? (AtomicReaderContext) indexReaderContext
          : (AtomicReaderContext) (indexReaderContext.children().get(i));

      DocIdSet filterDocIdSet = filter.getDocIdSet(atomicContext, _subReaders[i].getLiveDocs());
      if (filterDocIdSet == null) return; // shall we use return or continue here ??
      int docStart = start;
      if (reader instanceof BoboMultiReader) {
View Full Code Here

      BoboMapFunctionWrapper mapReduceWrapper) throws IOException {
    final FacetValidator validator = createFacetValidator();
    int target = 0;

    IndexReader reader = getIndexReader();
    IndexReaderContext indexReaderContext = reader.getContext();
    if (filter == null) {
      for (int i = 0; i < _subReaders.length; i++) {
        AtomicReaderContext atomicContext = indexReaderContext.children() == null ? (AtomicReaderContext) indexReaderContext
            : (AtomicReaderContext) (indexReaderContext.children().get(i));
        int docStart = start;
        if (reader instanceof BoboMultiReader) {
          docStart = start + ((BoboMultiReader) reader).subReaderBase(i);
        }
        collector.setNextReader(atomicContext);
        validator.setNextReader(_subReaders[i], docStart);

        Scorer scorer = weight.scorer(atomicContext, true, true, _subReaders[i].getLiveDocs());
        if (scorer != null) {
          collector.setScorer(scorer);
          target = scorer.nextDoc();
          while (target != DocIdSetIterator.NO_MORE_DOCS) {
            if (validator.validate(target)) {
              collector.collect(target);
              target = scorer.nextDoc();
            } else {
              target = validator._nextTarget;
              target = scorer.advance(target);
            }
          }
        }
        if (mapReduceWrapper != null) {
          mapReduceWrapper.mapFullIndexReader(_subReaders[i], validator.getCountCollectors());
        }
      }
      return;
    }

    for (int i = 0; i < _subReaders.length; i++) {
      AtomicReaderContext atomicContext = indexReaderContext.children() == null ? (AtomicReaderContext) indexReaderContext
          : (AtomicReaderContext) (indexReaderContext.children().get(i));

      DocIdSet filterDocIdSet = filter.getDocIdSet(atomicContext, _subReaders[i].getLiveDocs());
      if (filterDocIdSet == null) return; // shall we use return or continue here ??
      int docStart = start;
      if (reader instanceof BoboMultiReader) {
View Full Code Here

    /*
    Our own api to highlight a single document field, passing in the query terms, and get back our own Snippet object
     */
    public Snippet[] highlightDoc(String field, BytesRef[] terms, IndexSearcher searcher, int docId, int maxPassages) throws IOException {
        IndexReader reader = searcher.getIndexReader();
        IndexReaderContext readerContext = reader.getContext();
        List<LeafReaderContext> leaves = readerContext.leaves();

        String[] contents = new String[]{loadCurrentFieldValue()};
        Map<Integer, Object> snippetsMap = highlightField(field, contents, getBreakIterator(field), terms, new int[]{docId}, leaves, maxPassages);

        //increment the current value index so that next time we'll highlight the next value if available
View Full Code Here

                rescoreContext.rescorer().extractTerms(context, rescoreContext, new DelegateSet(termsSet));
            }

            Term[] terms = termsSet.toArray(Term.class);
            TermStatistics[] termStatistics = new TermStatistics[terms.length];
            IndexReaderContext indexReaderContext = context.searcher().getTopReaderContext();
            for (int i = 0; i < terms.length; i++) {
                // LUCENE 4 UPGRADE: cache TermContext?
                TermContext termContext = TermContext.build(indexReaderContext, terms[i]);
                termStatistics[i] = context.searcher().termStatistics(terms[i], termContext);
            }
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.IndexReaderContext

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.