Package org.apache.lucene.analysis

Examples of org.apache.lucene.analysis.Token.termText()


            assertEquals(
                "1251",
                token.termText(),
                sampleToken == null
                ? null
                : sampleToken.termText());

        }

        inWords1251.close();
        sample1251.close();
View Full Code Here


    StandardTokenizer tokenStream = new StandardTokenizer(new StringReader(input));
    GermanStemFilter filter = new GermanStemFilter(tokenStream);
    Token t = filter.next();
    if (t == null)
      fail();
    assertEquals(expected, t.termText());
    filter.close();
  }

}
View Full Code Here

        Token t = input.next();

        if (t == null)
            return null;

        String txt = t.termText();

        char[] chArray = txt.toCharArray();
        for (int i = 0; i < chArray.length; i++)
        {
            chArray[i] = RussianCharsets.toLowerCase(chArray[i], charset);
View Full Code Here

            StringBuffer sb = new StringBuffer(statement);
            for (int i = suggestions.length - 1; i >= 0; i--)
            {
               Token t = tokens.get(i);
               // only replace if word acutally changed
               if (!t.termText().equalsIgnoreCase(suggestions[i]))
               {
                  sb.replace(t.startOffset(), t.endOffset(), suggestions[i]);
               }
            }
            // if suggestion is same as a statement return null
View Full Code Here

            while ((t = ts.next()) != null)
            {
               String origWord = statement.substring(t.startOffset(), t.endOffset());
               if (t.getPositionIncrement() > 0)
               {
                  words.add(t.termText());
                  tokens.add(t);
               }
               else
               {
                  // very simple implementation: use termText with length
View Full Code Here

               else
               {
                  // very simple implementation: use termText with length
                  // closer to original word
                  Token current = tokens.get(tokens.size() - 1);
                  if (Math.abs(origWord.length() - current.termText().length()) > Math.abs(origWord.length()
                     - t.termText().length()))
                  {
                     // replace current token and word
                     words.set(words.size() - 1, t.termText());
                     tokens.set(tokens.size() - 1, t);
View Full Code Here

            StringBuffer sb = new StringBuffer(statement);
            for (int i = suggestions.length - 1; i >= 0; i--)
            {
               Token t = tokens.get(i);
               // only replace if word acutally changed
               if (!t.termText().equalsIgnoreCase(suggestions[i]))
               {
                  sb.replace(t.startOffset(), t.endOffset(), suggestions[i]);
               }
            }
            // if suggestion is same as a statement return null
View Full Code Here

            while ((t = ts.next()) != null)
            {
               String origWord = statement.substring(t.startOffset(), t.endOffset());
               if (t.getPositionIncrement() > 0)
               {
                  words.add(t.termText());
                  tokens.add(t);
               }
               else
               {
                  // very simple implementation: use termText with length
View Full Code Here

               else
               {
                  // very simple implementation: use termText with length
                  // closer to original word
                  Token current = tokens.get(tokens.size() - 1);
                  if (Math.abs(origWord.length() - current.termText().length()) > Math.abs(origWord.length()
                     - t.termText().length()))
                  {
                     // replace current token and word
                     words.set(words.size() - 1, t.termText());
                     tokens.set(tokens.size() - 1, t);
View Full Code Here

        TokenStream ts = index.getTextAnalyzer().tokenStream("", r);
        Token t;
        try {
            while ((t = ts.next()) != null) {
                TermVectorOffsetInfo[] info =
                        (TermVectorOffsetInfo[]) termMap.get(t.termText());
                if (info == null) {
                    info = new TermVectorOffsetInfo[1];
                } else {
                    TermVectorOffsetInfo[] tmp = info;
                    info = new TermVectorOffsetInfo[tmp.length + 1];
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.