Package org.apache.lucene.analysis.core

Examples of org.apache.lucene.analysis.core.StopFilter


  @Override
  protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
    Tokenizer tokenizer = new WikipediaTokenizer(reader);
    TokenStream result = new StandardFilter(Version.LUCENE_43, tokenizer);
    result = new LowerCaseFilter(Version.LUCENE_43, result);
    result = new StopFilter(Version.LUCENE_43, result, getStopwordSet());
    return new TokenStreamComponents(tokenizer, result);
  }
View Full Code Here


    Tokenizer tokenizer = new StandardTokenizer(LUCENE_VERSION, reader);
    TokenStream result = new StandardFilter(LUCENE_VERSION, tokenizer);
    result = new LowerCaseFilter(LUCENE_VERSION, result);
    result = new ASCIIFoldingFilter(result);
    result = new AlphaNumericMaxLengthFilter(result);
    result = new StopFilter(LUCENE_VERSION, result, STOP_SET);
    result = new PorterStemFilter(result);
    return new TokenStreamComponents(tokenizer, result);
  }
View Full Code Here

    } else if (pattern == WHITESPACE_PATTERN) { // fast path
      return new TokenStreamComponents(new FastStringTokenizer(reader, false, toLowerCase, stopWords));
    }

    Tokenizer tokenizer = new PatternTokenizer(reader, pattern, toLowerCase);
    TokenStream result = (stopWords != null) ? new StopFilter(matchVersion, tokenizer, stopWords) : tokenizer;
    return new TokenStreamComponents(tokenizer, result);
  }
View Full Code Here

      @Override
      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
        Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
        TokenFilter filter = new FakeStandardTokenizer(tokenizer);
        filter = new StopFilter(TEST_VERSION_CURRENT, filter, CharArraySet.EMPTY_SET);
        filter = new CJKBigramFilter(filter);
        return new TokenStreamComponents(tokenizer, filter);
      }
    };
   
View Full Code Here

    Analyzer indexAnalyzer = new Analyzer() {
        @Override
        protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
          MockTokenizer tokens = new MockTokenizer(reader);
          return new TokenStreamComponents(tokens,
                                           new StopFilter(TEST_VERSION_CURRENT, tokens, stopWords));
        }
      };

    Analyzer queryAnalyzer = new Analyzer() {
        @Override
View Full Code Here

        src.setMaxTokenLength(maxTokenLength);
//        src.setReplaceInvalidAcronym(replaceInvalidAcronym);
        TokenStream tok = new StandardFilter(matchVersion, src);
        tok = new ASCIIFoldingFilter(tok);
        tok = new LowerCaseFilter(matchVersion, tok);
        tok = new StopFilter(matchVersion, tok, stopwords);
        return new TokenStreamComponents(src, tok);
//        {
//            @Override
//            protected boolean reset(final Reader reader) throws IOException {
//                src.setMaxTokenLength(NoDiacriticsStandardAnalyzer.this.maxTokenLength);
View Full Code Here

  @Override
  protected TokenStreamComponents createComponents(String fieldName,
      Reader reader) {
    final Tokenizer source = new StandardTokenizer(matchVersion, reader);
    TokenStream result = new StandardFilter(matchVersion, source);
    StopFilter s = new StopFilter(matchVersion, result, HYPHENATIONS);
    s.setEnablePositionIncrements(false);
    result = s;
    result = new ElisionFilter(result, DEFAULT_ARTICLES);
    result = new IrishLowerCaseFilter(result);
    result = new StopFilter(matchVersion, result, stopwords);
    if(!stemExclusionSet.isEmpty())
      result = new SetKeywordMarkerFilter(result, stemExclusionSet);
    result = new SnowballFilter(result, new IrishStemmer());
    return new TokenStreamComponents(source, result);
  }
View Full Code Here

    Analyzer a3 = new Analyzer() {
      @Override
      public TokenStreamComponents createComponents(String field, Reader reader) {
        Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
        StopFilter filter = new StopFilter(TEST_VERSION_CURRENT,
            tokenizer, StandardAnalyzer.STOP_WORDS_SET);
        filter.setEnablePositionIncrements(true);
        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(filter, flags, protWords));
      }
    };

    assertAnalyzesTo(a3, "lucene.solr",
View Full Code Here

  protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) {
    Set<String> stopWords = stopWordsPerField.get(fieldName);
    if (stopWords == null) {
      return components;
    }
    StopFilter stopFilter = new StopFilter(matchVersion, components.getTokenStream(),
        new CharArraySet(matchVersion, stopWords, false));
    return new TokenStreamComponents(components.getTokenizer(), stopFilter);
  }
View Full Code Here

    } else if (pattern == WHITESPACE_PATTERN) { // fast path
      return new TokenStreamComponents(new FastStringTokenizer(reader, false, toLowerCase, stopWords));
    }

    Tokenizer tokenizer = new PatternTokenizer(reader, pattern, toLowerCase);
    TokenStream result = (stopWords != null) ? new StopFilter(matchVersion, tokenizer, stopWords) : tokenizer;
    return new TokenStreamComponents(tokenizer, result);
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.analysis.core.StopFilter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.