Package org.apache.lucene.analysis.tokenattributes

Examples of org.apache.lucene.analysis.tokenattributes.OffsetAttribute


        @Override
        protected Object highlight(String text, Set<String> matchedTokens, String prefixToken) throws IOException {
          TokenStream ts = queryAnalyzer.tokenStream("text", new StringReader(text));
          try {
            CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
            OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
            ts.reset();
            List<LookupHighlightFragment> fragments = new ArrayList<LookupHighlightFragment>();
            int upto = 0;
            while (ts.incrementToken()) {
              String token = termAtt.toString();
              int startOffset = offsetAtt.startOffset();
              int endOffset = offsetAtt.endOffset();
              if (upto < startOffset) {
                fragments.add(new LookupHighlightFragment(text.substring(upto, startOffset), false));
                upto = startOffset;
              } else if (upto > startOffset) {
                continue;
              }
             
              if (matchedTokens.contains(token)) {
                // Token matches.
                fragments.add(new LookupHighlightFragment(text.substring(startOffset, endOffset), true));
                upto = endOffset;
              } else if (prefixToken != null && token.startsWith(prefixToken)) {
                fragments.add(new LookupHighlightFragment(text.substring(startOffset, startOffset+prefixToken.length()), true));
                if (prefixToken.length() < token.length()) {
                  fragments.add(new LookupHighlightFragment(text.substring(startOffset+prefixToken.length(), startOffset+token.length()), false));
                }
                upto = endOffset;
              }
            }
            ts.end();
            int endOffset = offsetAtt.endOffset();
            if (upto < endOffset) {
              fragments.add(new LookupHighlightFragment(text.substring(upto), false));
            }
           
            return fragments;
View Full Code Here


    try {
      ts = queryAnalyzer.tokenStream("", new StringReader(key.toString()));
      //long t0 = System.currentTimeMillis();
      ts.reset();
      final CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
      final OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
      String lastToken = null;
      BooleanQuery query = new BooleanQuery();
      int maxEndOffset = -1;
      final Set<String> matchedTokens = new HashSet<String>();
      while (ts.incrementToken()) {
        if (lastToken != null) { 
          matchedTokens.add(lastToken);
          query.add(new TermQuery(new Term(TEXT_FIELD_NAME, lastToken)), occur);
        }
        lastToken = termAtt.toString();
        if (lastToken != null) {
          maxEndOffset = Math.max(maxEndOffset, offsetAtt.endOffset());
        }
      }
      ts.end();

      String prefixToken = null;
      if (lastToken != null) {
        Query lastQuery;
        if (maxEndOffset == offsetAtt.endOffset()) {
          // Use PrefixQuery (or the ngram equivalent) when
          // there was no trailing discarded chars in the
          // string (e.g. whitespace), so that if query does
          // not end with a space we show prefix matches for
          // that token:
View Full Code Here

   *  LookupResult#highlightKey} member. */
  protected Object highlight(String text, Set<String> matchedTokens, String prefixToken) throws IOException {
    TokenStream ts = queryAnalyzer.tokenStream("text", new StringReader(text));
    try {
      CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
      OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
      ts.reset();
      StringBuilder sb = new StringBuilder();
      int upto = 0;
      while (ts.incrementToken()) {
        String token = termAtt.toString();
        int startOffset = offsetAtt.startOffset();
        int endOffset = offsetAtt.endOffset();
        if (upto < startOffset) {
          addNonMatch(sb, text.substring(upto, startOffset));
          upto = startOffset;
        } else if (upto > startOffset) {
          continue;
        }
       
        if (matchedTokens.contains(token)) {
          // Token matches.
          addWholeMatch(sb, text.substring(startOffset, endOffset), token);
          upto = endOffset;
        } else if (prefixToken != null && token.startsWith(prefixToken)) {
          addPrefixMatch(sb, text.substring(startOffset, endOffset), token, prefixToken);
          upto = endOffset;
        }
      }
      ts.end();
      int endOffset = offsetAtt.endOffset();
      if (upto < endOffset) {
        addNonMatch(sb, text.substring(upto));
      }
      return sb.toString();
    } finally {
View Full Code Here

        endState = captureState();
        numFillerTokensToInsert = Math.min(posIncrAtt.getPositionIncrement(), maxShingleSize - 1);
        if (numFillerTokensToInsert > 0) {
          nextInputStreamToken = new AttributeSource(getAttributeFactory());
          nextInputStreamToken.addAttribute(CharTermAttribute.class);
          OffsetAttribute newOffsetAtt = nextInputStreamToken.addAttribute(OffsetAttribute.class);
          newOffsetAtt.setOffset(offsetAtt.endOffset(), offsetAtt.endOffset());
          // Recurse/loop just once:
          return getNextToken(target);
        } else {
          newTarget = null;
        }
View Full Code Here

   * <p>
   * This is solely used internally by PostingsHighlighter: <b>DO NOT USE THIS METHOD!</b>
   */
  static DocsAndPositionsEnum getDocsEnum(final TokenStream ts, final CharacterRunAutomaton[] matchers) throws IOException {
    final CharTermAttribute charTermAtt = ts.addAttribute(CharTermAttribute.class);
    final OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
    ts.reset();
   
    // TODO: we could use CachingWrapperFilter, (or consume twice) to allow us to have a true freq()
    // but this would have a performance cost for likely little gain in the user experience, it
    // would only serve to make this method less bogus.
    // instead, we always return freq() = Integer.MAX_VALUE and let PH terminate based on offset...
   
    return new DocsAndPositionsEnum() {
      int currentDoc = -1;
      int currentMatch = -1;
      int currentStartOffset = -1;
      int currentEndOffset = -1;
      TokenStream stream = ts;
     
      final BytesRef matchDescriptions[] = new BytesRef[matchers.length];
     
      @Override
      public int nextPosition() throws IOException {
        if (stream != null) {
          while (stream.incrementToken()) {
            for (int i = 0; i < matchers.length; i++) {
              if (matchers[i].run(charTermAtt.buffer(), 0, charTermAtt.length())) {
                currentStartOffset = offsetAtt.startOffset();
                currentEndOffset = offsetAtt.endOffset();
                currentMatch = i;
                return 0;
              }
            }
          }
View Full Code Here

  }


  private void assertNext(TokenStream ts, String text, int startOffset, int endOffset) throws IOException {
    TermAttribute termAtt = (TermAttribute) ts.addAttribute(TermAttribute.class);
    OffsetAttribute offsetAtt = (OffsetAttribute) ts.addAttribute(OffsetAttribute.class);

    assertTrue(ts.incrementToken());
    assertEquals(text, termAtt.term());
    assertEquals(startOffset, offsetAtt.startOffset());
    assertEquals(endOffset, offsetAtt.endOffset());
  }
View Full Code Here

  }


  private void assertNext(TokenStream ts, String text, int startOffset, int endOffset) throws IOException {
    TermAttribute termAtt = (TermAttribute) ts.addAttribute(TermAttribute.class);
    OffsetAttribute offsetAtt = (OffsetAttribute) ts.addAttribute(OffsetAttribute.class);

    assertTrue(ts.incrementToken());
    assertEquals(text, termAtt.term());
    assertEquals(startOffset, offsetAtt.startOffset());
    assertEquals(endOffset, offsetAtt.endOffset());
  }
View Full Code Here

  }

  public void checkCJKToken(final String str, final TestToken[] out_tokens) throws IOException {
    CJKTokenizer tokenizer = new CJKTokenizer(new StringReader(str));
    TermAttribute termAtt = (TermAttribute) tokenizer.getAttribute(TermAttribute.class);
    OffsetAttribute offsetAtt = (OffsetAttribute) tokenizer.getAttribute(OffsetAttribute.class);
    TypeAttribute typeAtt = (TypeAttribute) tokenizer.getAttribute(TypeAttribute.class);
    for (int i = 0; i < out_tokens.length; i++) {
      assertTrue(tokenizer.incrementToken());
      assertEquals(termAtt.term(), out_tokens[i].termText);
      assertEquals(offsetAtt.startOffset(), out_tokens[i].start);
      assertEquals(offsetAtt.endOffset(), out_tokens[i].end);
      assertEquals(typeAtt.type(), out_tokens[i].type);
    }
    assertFalse(tokenizer.incrementToken());
  }
View Full Code Here

  }
 
  public void testFilterTokens() throws Exception {
    SnowballFilter filter = new SnowballFilter(new TestTokenStream(), "English");
    TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
    OffsetAttribute offsetAtt = filter.getAttribute(OffsetAttribute.class);
    TypeAttribute typeAtt = filter.getAttribute(TypeAttribute.class);
    PayloadAttribute payloadAtt = filter.getAttribute(PayloadAttribute.class);
    PositionIncrementAttribute posIncAtt = filter.getAttribute(PositionIncrementAttribute.class);
    FlagsAttribute flagsAtt = filter.getAttribute(FlagsAttribute.class);
   
    filter.incrementToken();

    assertEquals("accent", termAtt.term());
    assertEquals(2, offsetAtt.startOffset());
    assertEquals(7, offsetAtt.endOffset());
    assertEquals("wrd", typeAtt.type());
    assertEquals(3, posIncAtt.getPositionIncrement());
    assertEquals(77, flagsAtt.getFlags());
    assertEquals(new Payload(new byte[]{0,1,2,3}), payloadAtt.getPayload());
  }
View Full Code Here

        String s = "a天b";
        ChineseTokenizer tokenizer = new ChineseTokenizer(new StringReader(s));

        int correctStartOffset = 0;
        int correctEndOffset = 1;
        OffsetAttribute offsetAtt = tokenizer.getAttribute(OffsetAttribute.class);
        while (tokenizer.incrementToken()) {
          assertEquals(correctStartOffset, offsetAtt.startOffset());
          assertEquals(correctEndOffset, offsetAtt.endOffset());
          correctStartOffset++;
          correctEndOffset++;
        }
    }
View Full Code Here

TOP

Related Classes of org.apache.lucene.analysis.tokenattributes.OffsetAttribute

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.