Package org.apache.lucene.index

Examples of org.apache.lucene.index.Term.text()


      TermEnum termEnum = reader.terms (new Term (field));
      try {
        do {
          Term term = termEnum.term();
          if (term==null || term.field() != field) break;
          double termval = parser.parseDouble(term.text());
          if (retArray == null) // late init
            retArray = new double[reader.maxDoc()];
          termDocs.seek (termEnum);
          while (termDocs.next()) {
            retArray[termDocs.doc()] = termval;
View Full Code Here


    try {
      int count = 0;
      do {
        final Term t = termEnum.term();
        if (t != null) {
          final int val = NumericUtils.prefixCodedToInt(t.text());
          assertTrue("value not in bounds", val >= lower && val <= upper);
          count++;
        } else break;
      } while (termEnum.next());
      assertFalse(termEnum.next());
View Full Code Here

          query.append(" ");
        }

        Term term = terms.get(i);

        query.append(term.text());
      }
      query.append('\"');

      Explanation idfExpl =
        new Explanation(idf, "idf(" + field + ":" + docFreqs + ")");
View Full Code Here

      }
      int df = te.docFreq();
      if (df < minDf || df > percent){
        continue;
      }
      TermEntry entry = new TermEntry(term.text(), count++, df);
      termEntries.put(entry.term, entry);
    } while (te.next());
    te.close();
  }
 
View Full Code Here

      // AND the term's bitset with cluster doc bitset to get the term's in-cluster frequency.
      // This modifies the termBitset, but that's fine as we are not using it anywhere else.
      termBitset.and(clusterDocBitset);
      int inclusterDF = (int) termBitset.cardinality();
     
      TermEntry entry = new TermEntry(term.text(), count++, inclusterDF);
      termEntryMap.put(entry.term, entry);
    } while (te.next());
    te.close();
   
    List<TermInfoClusterInOut> clusteredTermInfo = new LinkedList<TermInfoClusterInOut>();
View Full Code Here

                for (TermEnum terms : rangeScans) {
                    do {
                        Term t = terms.term();
                        if (t != null) {
                            currentTerm.setBase(t.text());
                            int compare = currentTerm.compareTo(termText);
                            if (compare == 0) {
                                orderedTerms.put(t, terms.docFreq());
                            } else if (compare < 0) {
                                // try next one
View Full Code Here

                           {
                              num = idx.removeDocument(idTerm);
                              if (num > 0)
                              {
                                 if (log.isDebugEnabled())
                                    log.debug(idTerm.text() + " has been found in the persisted index " + i);
                                 break;
                              }
                           }
                        }
                     }
View Full Code Here

                           }
                        }
                     }
                     else if (log.isDebugEnabled())
                     {
                        log.debug(idTerm.text() + " has been found in the volatile index");
                     }
                  }

                  // try to avoid getting index reader for each doc
                  IndexReader indexReader = null;
View Full Code Here

            // there are always two range scanse: one with an initial
            // lower case character and another one with an initial upper case
            // character
            List rangeScans = new ArrayList(2);
            nameLength = FieldNames.getNameLength(term.text());
            String propName = term.text().substring(0, nameLength);
            this.termText = new OffsetCharSequence(nameLength, term.text());
            this.currentTerm = new OffsetCharSequence(nameLength, term.text(), transform);

            try {
View Full Code Here

            // there are always two range scanse: one with an initial
            // lower case character and another one with an initial upper case
            // character
            List rangeScans = new ArrayList(2);
            nameLength = FieldNames.getNameLength(term.text());
            String propName = term.text().substring(0, nameLength);
            this.termText = new OffsetCharSequence(nameLength, term.text());
            this.currentTerm = new OffsetCharSequence(nameLength, term.text(), transform);

            try {
                // start with a term using the lower case character for the first
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.