Package org.apache.lucene.index

Examples of org.apache.lucene.index.Fields


      }
    } else {
      for (int docNum = 0; docNum < maxDoc; docNum++) {
        // NOTE: it's very important to first assign to vectors then pass it to
        // termVectorsWriter.addAllDocVectors; see LUCENE-1282
        Fields vectors = reader.getTermVectors(docNum);
        addAllDocVectors(vectors, mergeState);
        mergeState.checkAbort.work(300);
      }
    }
    return maxDoc;
View Full Code Here


  }

  @Override
  public Fields get(int docID) throws IOException {
    if (tvx != null) {
      Fields fields = new TVFields(docID);
      if (fields.size() == 0) {
        // TODO: we can improve writer here, eg write 0 into
        // tvx file, so we know on first read from tvx that
        // this doc has no TVs
        return null;
      } else {
View Full Code Here

    TermContext termContext = termContexts.get(term);
    final TermState state;
    if (termContext == null) {
      // this happens with span-not query, as it doesn't include the NOT side in extractTerms()
      // so we seek to the term now in this segment..., this sucks because its ugly mostly!
      final Fields fields = context.reader().fields();
      if (fields != null) {
        final Terms terms = fields.terms(term.field());
        if (terms != null) {
          final TermsEnum termsEnum = terms.iterator(null);
          if (termsEnum.seekExact(term.bytes(), true)) {
            state = termsEnum.termState();
          } else {
View Full Code Here

  public void collectTermContext(IndexReader reader,
      List<AtomicReaderContext> leaves, TermContext[] contextArray,
      Term[] queryTerms) throws IOException {
    TermsEnum termsEnum = null;
    for (AtomicReaderContext context : leaves) {
      final Fields fields = context.reader().fields();
      if (fields == null) {
        // reader has no fields
        continue;
      }
      for (int i = 0; i < queryTerms.length; i++) {
        Term term = queryTerms[i];
        TermContext termContext = contextArray[i];
        final Terms terms = fields.terms(term.field());
        if (terms == null) {
          // field does not exist
          continue;
        }
        termsEnum = terms.iterator(termsEnum);
View Full Code Here

    Query query = new TermQuery(new Term("field", "one"));
    ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
    assertEquals(1, hits.length);

    Fields vectors = searcher.reader.getTermVectors(hits[0].doc);
    assertNotNull(vectors);
    assertEquals(1, vectors.size());
    Terms vector = vectors.terms("field");
    assertNotNull(vector);
    assertEquals(1, vector.size());
    TermsEnum termsEnum = vector.iterator(null);
    assertNotNull(termsEnum.next());
    assertEquals("one", termsEnum.term().utf8ToString());
View Full Code Here

    // check:
    int numDocs = reader.numDocs();
    long start = 0L;
    for (int docId = 0; docId < numDocs; docId++) {
      start = System.currentTimeMillis();
      Fields vectors = reader.getTermVectors(docId);
      timeElapsed += System.currentTimeMillis()-start;
     
      // verify vectors result
      verifyVectors(vectors, docId);
     
View Full Code Here

    }
  }

  private static List<Term> sample(Random random, IndexReader reader, int size) throws IOException {
    List<Term> sample = new ArrayList<Term>();
    Fields fields = MultiFields.getFields(reader);
    for (String field : fields) {
      Terms terms = fields.terms(field);
      assertNotNull(terms);
      TermsEnum termsEnum = terms.iterator(null);
      while (termsEnum.next() != null) {
        if (sample.size() >= size) {
          int pos = random.nextInt(size);
View Full Code Here

        hit.setStoredFields(reader.document(fdoc.doc));
      }
      if (termVectorsToFetch != null && termVectorsToFetch.size() > 0) {
        Map<String, List<BoboTerm>> tvMap = new HashMap<String, List<BoboTerm>>();
        hit.setTermVectorMap(tvMap);
        Fields fds = reader.getTermVectors(fdoc.doc);
        for (String field : termVectorsToFetch) {
          Terms terms = fds.terms(field);
          if (terms == null) {
            continue;
          }
          TermsEnum termsEnum = terms.iterator(null);
          BytesRef text;
View Full Code Here

                String startuid = Util.path2uid(dir, "");
                IndexReader reader = DirectoryReader.open(indexDirectory); // open existing index
                Terms terms = null;
                int numDocs = reader.numDocs();
                if (numDocs > 0) {
                    Fields uFields = MultiFields.getFields(reader);//reader.getTermVectors(0);
                    terms = uFields.terms(QueryBuilder.U);
                }
               
                try {
                    if (numDocs > 0) {
                        uidIter = terms.iterator(uidIter);                       
View Full Code Here

        try {
            ireader = DirectoryReader.open(indexDirectory); // open existing index
            int numDocs = ireader.numDocs();
            if (numDocs > 0) {
                Fields uFields = MultiFields.getFields(ireader);//reader.getTermVectors(0);
                terms = uFields.terms(QueryBuilder.U);
            }
            iter = terms.iterator(iter); // init uid iterator
            while (iter != null && iter.term() != null) {
                log.fine(Util.uid2url(iter.term().utf8ToString()));
                BytesRef next=iter.next();
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.Fields

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.