Package org.elasticsearch.index.analysis

Examples of org.elasticsearch.index.analysis.NamedAnalyzer


        if (sub == null) {
            throw new ElasticsearchIllegalArgumentException("Analyzer ["+name+"] analyzer of type ["+NAME+"], must have a \"sub_analyzers\" list property");
        }

        for (String subname : sub) {
            NamedAnalyzer analyzer = analysisService.analyzer(subname);
            if (analyzer == null) {
                logger.debug("Sub-analyzer \""+subname+"\" not found!");
            } else {
                subAnalyzers.add(analyzer);
            }
View Full Code Here


    }

    @Test
    public void testCanUseFromNamedAnalyzer() throws IOException {
        ComboAnalyzer cb = new ComboAnalyzer(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
        NamedAnalyzer namedAnalyzer = new NamedAnalyzer("name", AnalyzerScope.INDEX, cb);
        for (int i = 0 ; i < 3 ; i++)
            assertTokenStreamContents(namedAnalyzer.tokenStream("field", new StringReader("just a little test " + i)),
                    new String[]{"just", "a", "little", "test", Integer.toString(i)},
                    new int[]{ 057, 14, 19},
                    new int[]{ 46, 13, 18, 20},
                    new int[]{ 11111});
    }
View Full Code Here

    @Test
    public void testReuseSequentialMultithreading() throws IOException, InterruptedException {
        // Create the analyzer
        final ComboAnalyzer cb = new ComboAnalyzer(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
        final NamedAnalyzer namedAnalyzer = new NamedAnalyzer("name", AnalyzerScope.INDEX, cb);
        // Use N threads, each running M times
        Thread[] threads = new Thread[4];
        final int runs = 4;
        // The lock ensures only one thread is running at a given time
        final Lock lock = new ReentrantLock();
        // This integer ensures each thread runs with a different input
        // Inputs must not be exchanged from one thread to another during object reuse
        final AtomicInteger sequence = new AtomicInteger(0);
        final AtomicBoolean abort = new AtomicBoolean(false);
        // The barrier ensures that each thread gets a chance to execute, for each run
        // We must use extra care so that all threads can exit as soon as one fails
        final CyclicBarrier latch = new CyclicBarrier(threads.length);
        // Code executed on each thread
        Runnable code = new Runnable() {
            @Override
            public void run() {
                // Run multiple times before quitting
                for (int run = 0 ; run < runs ; ++run) {
                    try {
                        // Serialize runs
                        lock.lock();
                        // Get unique sequence number
                        int i = sequence.getAndIncrement();
                        // Check the analysis went well, including the unique sequence number
                        assertTokenStreamContents(namedAnalyzer.tokenStream("field", new StringReader("just a little test " + i)),
                                new String[]{"just", "a", "little", "test", Integer.toString(i)},
                                new int[]{0, 5, 7, 14, 19},
                                new int[]{4, 6, 13, 18, 19 + ("" + i).length()},
                                new int[]{1, 1, 1, 1, 1});
                    } catch (Exception e) {
View Full Code Here

    @Test
    public void testReuseConcurrentMultithreading() throws IOException, InterruptedException {
        // Create the analyzer
        final ComboAnalyzer cb = new ComboAnalyzer(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
        final NamedAnalyzer namedAnalyzer = new NamedAnalyzer("name", AnalyzerScope.INDEX, cb);
        // Use N threads, each running M times
        Thread[] threads = new Thread[4];
        final int runs = 4000; // leave time for threads to run concurrently
        // This integer ensures each thread runs with a different input
        // Inputs must not be exchanged from one thread to another during object reuse
        final AtomicInteger sequence = new AtomicInteger(0);
        // The barrier ensures that each thread gets a chance to execute, for each run
        final CyclicBarrier latch = new CyclicBarrier(threads.length);
        // Code executed on each thread
        Runnable code = new Runnable() {
            @Override
            public void run() {
                try {
                    latch.await();
                    // Run multiple times before quitting
                    for (int run = 0 ; run < runs ; ++run) {
                        // Get unique sequence number
                        int i = sequence.getAndIncrement();
                        // Check the analysis went well, including the unique sequence number
                        assertTokenStreamContents(namedAnalyzer.tokenStream("field", new StringReader("just a little test " + i)),
                                new String[]{"just", "a", "little", "test", Integer.toString(i)},
                                new int[]{0, 5, 7, 14, 19},
                                new int[]{4, 6, 13, 18, 19 + ("" + i).length()},
                                new int[]{1, 1, 1, 1, 1});
                    }
View Full Code Here

    @Test
    public void testMorphologyAnalysis() throws Exception {
        AnalysisService analysisService = getAnalysisService();

        NamedAnalyzer russianAnalyzer = analysisService.analyzer("russian_morphology");
        MatcherAssert.assertThat(russianAnalyzer.analyzer(), instanceOf(RussianAnalyzer.class));
        assertSimpleTSOutput(russianAnalyzer.tokenStream("test", new StringReader("тест")), new String[] {"тест", "тесто"});

        NamedAnalyzer englishAnalyzer = analysisService.analyzer("english_morphology");
        MatcherAssert.assertThat(englishAnalyzer.analyzer(), instanceOf(EnglishAnalyzer.class));
        assertSimpleTSOutput(englishAnalyzer.tokenStream("test", new StringReader("gone")), new String[]{"gone", "go"});
    }
View Full Code Here

        if (fieldMappers != null) {
            FieldMapper fieldMapper = mapperService.smartName(fieldType.field(), fieldType.types()).mapper();

            queryAnalyzer = fieldMapper.searchAnalyzer();
            if (Strings.hasLength(fieldType.indexAnalyzer())) {
                NamedAnalyzer namedAnalyzer = analysisService.analyzer(fieldType.queryAnalyzer());
                if (namedAnalyzer == null) {
                    throw new ElasticsearchException("Query analyzer[" + fieldType.queryAnalyzer() + "] does not exist.");
                }
                queryAnalyzer = namedAnalyzer.analyzer();
            }

            indexAnalyzer = fieldMapper.searchAnalyzer();
            if (Strings.hasLength(fieldType.indexAnalyzer())) {
                NamedAnalyzer namedAnalyzer = analysisService.analyzer(fieldType.indexAnalyzer());
                if (namedAnalyzer == null) {
                    throw new ElasticsearchException("Index analyzer[" + fieldType.indexAnalyzer() + "] does not exist.");
                }
                indexAnalyzer = namedAnalyzer.analyzer();
            }
        }

        if (queryAnalyzer == null) {
            queryAnalyzer = new StandardAnalyzer(org.elasticsearch.Version.CURRENT.luceneVersion);
View Full Code Here

    return missing.size();
  }

   @Test
   public void ResolvingHashTest(){
      NamedAnalyzer analyzer = Lucene.STANDARD_ANALYZER;
      String value = "Some text with spaces";
      String term = HashedStringFieldData.analyzeStringForTerm(value,
              HashedStringFieldType.hashCode("spaces"), "field", analyzer);
      assertThat(term,equalTo("spaces"));
      value = "Some other text with spaces and more";
View Full Code Here

                Object fieldNode = entry.getValue();
                if (fieldName.equals("type")) {
                    continue;
                }
                if (fieldName.equals("analyzer")) {
                    NamedAnalyzer analyzer = getNamedAnalyzer(parserContext, fieldNode.toString());
                    builder.indexAnalyzer(analyzer);
                    builder.searchAnalyzer(analyzer);
                } else if (Fields.INDEX_ANALYZER.match(fieldName)) {
                    builder.indexAnalyzer(getNamedAnalyzer(parserContext, fieldNode.toString()));
                } else if (Fields.SEARCH_ANALYZER.match(fieldName)) {
View Full Code Here

            builder.postingsFormat(parserContext.postingFormatService().get("default"));
            return builder;
        }

        private NamedAnalyzer getNamedAnalyzer(ParserContext parserContext, String name) {
            NamedAnalyzer analyzer = parserContext.analysisService().analyzer(name);
            if (analyzer == null) {
                throw new ElasticsearchIllegalArgumentException("Can't find default or mapped analyzer with name [" + name + "]");
            }
            return analyzer;
        }
View Full Code Here

                // deprecated option for BW compat
                builder.indexOptions(op);
            } else if (propName.equals("index_options")) {
                builder.indexOptions(nodeIndexOptionValue(propNode));
            } else if (propName.equals("analyzer")) {
                NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
                if (analyzer == null) {
                    throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
                }
                builder.indexAnalyzer(analyzer);
                builder.searchAnalyzer(analyzer);
            } else if (propName.equals("index_analyzer")) {
                NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
                if (analyzer == null) {
                    throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
                }
                builder.indexAnalyzer(analyzer);
            } else if (propName.equals("search_analyzer")) {
                NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
                if (analyzer == null) {
                    throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
                }
                builder.searchAnalyzer(analyzer);
            } else if (propName.equals("include_in_all")) {
View Full Code Here

TOP

Related Classes of org.elasticsearch.index.analysis.NamedAnalyzer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.