Package org.apache.solr.common.util

Examples of org.apache.solr.common.util.SimpleOrderedMap


    // so that "result" is already stored in the response (for aesthetics)


    FacetInfo fi = rb._facetInfo;

    NamedList facet_counts = new SimpleOrderedMap();

    if (fi.exceptionList != null) {
      facet_counts.add("exception",fi.exceptionList);
    }

    NamedList facet_queries = new SimpleOrderedMap();
    facet_counts.add("facet_queries",facet_queries);
    for (QueryFacet qf : fi.queryFacets.values()) {
      facet_queries.add(qf.getKey(), num(qf.count));
    }

    NamedList facet_fields = new SimpleOrderedMap();
    facet_counts.add("facet_fields", facet_fields);

    for (DistribFieldFacet dff : fi.facets.values()) {
      NamedList fieldCounts = new NamedList(); // order is more important for facets
      facet_fields.add(dff.getKey(), fieldCounts);

      ShardFacetCount[] counts;
      boolean countSorted = dff.sort.equals(FacetParams.FACET_SORT_COUNT);
      if (countSorted) {
        counts = dff.countSorted;
        if (counts == null || dff.needRefinements) {
          counts = dff.getCountSorted();
        }
      } else if (dff.sort.equals(FacetParams.FACET_SORT_INDEX)) {
          counts = dff.getLexSorted();
      } else { // TODO: log error or throw exception?
          counts = dff.getLexSorted();
      }

      if (countSorted) {
        int end = dff.limit < 0 ? counts.length : Math.min(dff.offset + dff.limit, counts.length);
        for (int i=dff.offset; i<end; i++) {
          if (counts[i].count < dff.minCount) {
            break;
          }
          fieldCounts.add(counts[i].name, num(counts[i].count));
        }
      } else {
        int off = dff.offset;
        int lim = dff.limit >= 0 ? dff.limit : Integer.MAX_VALUE;

        // index order...
        for (int i=0; i<counts.length; i++) {
          long count = counts[i].count;
          if (count < dff.minCount) continue;
          if (off > 0) {
            off--;
            continue;
          }
          if (lim <= 0) {
            break;
          }
          lim--;
          fieldCounts.add(counts[i].name, num(count));
        }
      }

      if (dff.missing) {
        fieldCounts.add(null, num(dff.missingCount));
      }
    }

    // TODO: facet dates & numbers
    facet_counts.add("facet_dates", new SimpleOrderedMap());
    facet_counts.add("facet_ranges", new SimpleOrderedMap());

    rb.rsp.add("facet_counts", facet_counts);

    rb._facetInfo = null// could be big, so release asap
  }
View Full Code Here


    protected Integer getNumberOfGroups() {
      return null;
    }

    protected NamedList commonResponse() {
      NamedList groupResult = new SimpleOrderedMap();
      grouped.add(key, groupResult)// grouped={ key={

      int matches = getMatches();
      groupResult.add("matches", matches);
      if (totalCount == TotalCount.grouped) {
        Integer totalNrOfGroups = getNumberOfGroups();
        groupResult.add("ngroups", totalNrOfGroups == null ? 0 : totalNrOfGroups);
      }
      maxMatches = Math.max(maxMatches, matches);
      return groupResult;
    }
View Full Code Here

    rsp.add("solr-mbeans", cats);
   
    String[] requestedCats = req.getParams().getParams("cat");
    if (null == requestedCats || 0 == requestedCats.length) {
      for (SolrInfoMBean.Category cat : SolrInfoMBean.Category.values()) {
        cats.add(cat.name(), new SimpleOrderedMap());
      }
    } else {
      for (String catName : requestedCats) {
        cats.add(catName,new SimpleOrderedMap());
      }
    }
        
    Set<String> requestedKeys = arrayToSet(req.getParams().getParams("key"));
   
    Map<String, SolrInfoMBean> reg = core.getInfoRegistry();
    for (Map.Entry<String, SolrInfoMBean> entry : reg.entrySet()) {
      String key = entry.getKey();
      SolrInfoMBean m = entry.getValue();

      if ( ! ( requestedKeys.isEmpty() || requestedKeys.contains(key) ) ) continue;

      NamedList catInfo = (NamedList) cats.get(m.getCategory().name());
      if ( null == catInfo ) continue;

      NamedList mBeanInfo = new SimpleOrderedMap();
      mBeanInfo.add("class", m.getName());
      mBeanInfo.add("version", m.getVersion());
      mBeanInfo.add("description", m.getDescription());
      mBeanInfo.add("srcId", m.getSourceId());
      mBeanInfo.add("src", m.getSource());
      mBeanInfo.add("docs", m.getDocs());
     
      if (req.getParams().getFieldBool(key, "stats", false))
        mBeanInfo.add("stats", m.getStatistics());
     
      catInfo.add(key, mBeanInfo);
    }
    rsp.setHttpCaching(false); // never cache, no matter what init config looks like
  }
View Full Code Here

    SolrParams params = rb.req.getParams();
    if (params.getBool(TermsParams.TERMS, false)) {
      String lowerStr = params.get(TermsParams.TERMS_LOWER, null);
      String[] fields = params.getParams(TermsParams.TERMS_FIELD);
      if (fields != null && fields.length > 0) {
        NamedList terms = new SimpleOrderedMap();
        rb.rsp.add("terms", terms);
        int limit = params.getInt(TermsParams.TERMS_LIMIT, 10);
        if (limit < 0) {
          limit = Integer.MAX_VALUE;
        }
        String upperStr = params.get(TermsParams.TERMS_UPPER);
        boolean upperIncl = params.getBool(TermsParams.TERMS_UPPER_INCLUSIVE, false);
        boolean lowerIncl = params.getBool(TermsParams.TERMS_LOWER_INCLUSIVE, true);
        boolean sort = !TermsParams.TERMS_SORT_INDEX.equals(
                          params.get(TermsParams.TERMS_SORT, TermsParams.TERMS_SORT_COUNT));
        int freqmin = params.getInt(TermsParams.TERMS_MINCOUNT, 1); // initialize freqmin
        int freqmax = params.getInt(TermsParams.TERMS_MAXCOUNT, UNLIMITED_MAX_COUNT); // initialize freqmax
        if (freqmax<0) {
          freqmax = Integer.MAX_VALUE;
        }
        String prefix = params.get(TermsParams.TERMS_PREFIX_STR);
        String regexp = params.get(TermsParams.TERMS_REGEXP_STR);
        Pattern pattern = regexp != null ? Pattern.compile(regexp, resolveRegexpFlags(params)) : null;

        boolean raw = params.getBool(TermsParams.TERMS_RAW, false);
        for (int j = 0; j < fields.length; j++) {
          String field = StringHelper.intern(fields[j]);
          FieldType ft = raw ? null : rb.req.getSchema().getFieldTypeNoEx(field);
          if (ft==null) ft = new StrField();

          // If no lower bound was specified, use the prefix
          String lower = lowerStr==null ? prefix : (raw ? lowerStr : ft.toInternal(lowerStr));
          if (lower == null) lower="";
          String upper = upperStr==null ? null : (raw ? upperStr : ft.toInternal(upperStr));

          Term lowerTerm = new Term(field, lower);
          Term upperTerm = upper==null ? null : new Term(field, upper);
         
          TermEnum termEnum = rb.req.getSearcher().getReader().terms(lowerTerm); //this will be positioned ready to go
          int i = 0;
          BoundedTreeSet<CountPair<String, Integer>> queue = (sort ? new BoundedTreeSet<CountPair<String, Integer>>(limit) : null);
          NamedList fieldTerms = new NamedList();
          terms.add(field, fieldTerms);
          Term lowerTestTerm = termEnum.term();

          //Only advance the enum if we are excluding the lower bound and the lower Term actually matches
          if (lowerTestTerm!=null && lowerIncl == false && lowerTestTerm.field() == field  // intern'd comparison
                  && lowerTestTerm.text().equals(lower)) {
View Full Code Here

        }
      }
    }

    public NamedList buildResponse() {
      NamedList response = new SimpleOrderedMap();

      // determine if we are going index or count sort
      boolean sort = !TermsParams.TERMS_SORT_INDEX.equals(params.get(
          TermsParams.TERMS_SORT, TermsParams.TERMS_SORT_COUNT));

      // init minimum frequency
      long freqmin = 1;
      String s = params.get(TermsParams.TERMS_MINCOUNT);
      if (s != nullfreqmin = Long.parseLong(s);

      // init maximum frequency, default to max int
      long freqmax = -1;
      s = params.get(TermsParams.TERMS_MAXCOUNT);
      if (s != nullfreqmax = Long.parseLong(s);
      if (freqmax < 0) {
        freqmax = Long.MAX_VALUE;
      }

      // init limit, default to max int
      long limit = 10;
      s = params.get(TermsParams.TERMS_LIMIT);
      if (s != nulllimit = Long.parseLong(s);
      if (limit < 0) {
        limit = Long.MAX_VALUE;
      }

      // loop though each field we want terms from
      for (String key : fieldmap.keySet()) {
        NamedList fieldterms = new SimpleOrderedMap();
        TermsResponse.Term[] data = null;
        if (sort) {
          data = getCountSorted(fieldmap.get(key));
        } else {
          data = getLexSorted(fieldmap.get(key));
        }

        // loop though each term until we hit limit
        int cnt = 0;
        for (TermsResponse.Term tc : data) {
          if (tc.getFrequency() >= freqmin && tc.getFrequency() <= freqmax) {
            fieldterms.add(tc.getTerm(), num(tc.getFrequency()));
            cnt++;
          }

          if (cnt >= limit) {
            break;
View Full Code Here

        params.add(SpellCheckComponent.SPELLCHECK_EXTENDED_RESULTS,"true");
        params.add(CommonParams.Q, "anotheq");
       
        SolrRequestHandler handler = core.getRequestHandler("spellCheckCompRH");
        SolrQueryResponse rsp = new SolrQueryResponse();
        rsp.add("responseHeader", new SimpleOrderedMap());
        SolrQueryRequest req = new LocalSolrQueryRequest(core, params);
        handler.handleRequest(req, rsp);
        req.close();
        NamedList values = rsp.getValues();
        NamedList spellCheck = (NamedList) values.get("spellcheck");
View Full Code Here

        int count = params.getInt(SPELLCHECK_COUNT, 1);
        boolean onlyMorePopular = params.getBool(SPELLCHECK_ONLY_MORE_POPULAR,
            DEFAULT_ONLY_MORE_POPULAR);
        boolean extendedResults = params.getBool(SPELLCHECK_EXTENDED_RESULTS,
            false);
        NamedList response = new SimpleOrderedMap();
        IndexReader reader = rb.req.getSearcher().getReader();
        boolean collate = params.getBool(SPELLCHECK_COLLATE, false);
        float accuracy = params.getFloat(SPELLCHECK_ACCURACY, Float.MIN_VALUE);
        SolrParams customParams = getCustomParams(getDictionaryName(params), params, shardRequest);
        SpellingOptions options = new SpellingOptions(tokens, reader, count, onlyMorePopular, extendedResults,
                accuracy, customParams);                      
        SpellingResult spellingResult = spellChecker.getSuggestions(options);
        if (spellingResult != null) {
          NamedList suggestions = toNamedList(shardRequest, spellingResult, q, extendedResults, collate);         
          if (collate) {           
            addCollationsToResponse(params, spellingResult, rb, q, suggestions);
          }
          response.add("suggestions", suggestions);
          rb.rsp.add("spellcheck", response);
        }

      } else {
        throw new SolrException(SolrException.ErrorCode.NOT_FOUND,
View Full Code Here

        for (SuggestWord word : suggestions) words.add(word.string);
        result.add(token, words);
      }
    }
   
    NamedList response = new SimpleOrderedMap();
    NamedList suggestions = toNamedList(false, result, origQuery, extendedResults, collate);
    if (collate) {
      SpellCheckCollation[] sortedCollations = collations.values().toArray(new SpellCheckCollation[collations.size()]);
      Arrays.sort(sortedCollations);
      int i = 0;
      while (i < maxCollations && i < sortedCollations.length) {
        SpellCheckCollation collation = sortedCollations[i];
        i++;
        if (collationExtendedResults) {
          NamedList extendedResult = new NamedList();
          extendedResult.add("collationQuery", collation.getCollationQuery());
          extendedResult.add("hits", collation.getHits());
          extendedResult.add("misspellingsAndCorrections", collation
              .getMisspellingsAndCorrections());
          suggestions.add("collation", extendedResult);
        } else {
          suggestions.add("collation", collation.getCollationQuery());
        }
      }
    }
   
    response.add("suggestions", suggestions);
    rb.rsp.add("spellcheck", response);
  }
View Full Code Here

   
    for (Map.Entry<Token, LinkedHashMap<String, Integer>> entry : suggestions.entrySet()) {
      Token inputToken = entry.getKey();
      Map<String, Integer> theSuggestions = entry.getValue();
      if (theSuggestions != null && (theSuggestions.size()>0 || shardRequest)) {
        SimpleOrderedMap suggestionList = new SimpleOrderedMap();
        suggestionList.add("numFound", theSuggestions.size());
        suggestionList.add("startOffset", inputToken.startOffset());
        suggestionList.add("endOffset", inputToken.endOffset());

        // Logical structure of normal (non-extended) results:
        // "suggestion":["alt1","alt2"]
        //
        // Logical structure of the extended results:
        // "suggestion":[
        //     {"word":"alt1","freq":7},
        //     {"word":"alt2","freq":4}
        // ]
        if (extendedResults && hasFreqInfo) {
          suggestionList.add("origFreq", spellingResult.getTokenFrequency(inputToken));

          ArrayList<SimpleOrderedMap> sugs = new ArrayList<SimpleOrderedMap>();
          suggestionList.add("suggestion", sugs);
          for (Map.Entry<String, Integer> suggEntry : theSuggestions.entrySet()) {
            SimpleOrderedMap sugEntry = new SimpleOrderedMap();
            sugEntry.add("word",suggEntry.getKey());
            sugEntry.add("freq",suggEntry.getValue());
            sugs.add(sugEntry);
          }
        } else {
          suggestionList.add("suggestion", theSuggestions.keySet());
        }
View Full Code Here

   
    //Because a FilterQuery is applied which removes doc id#1 from possible hits, we would
    //not want the collations to return us "lowerfilt:(+faith +hope +loaves)" as this only matches doc id#1.
    SolrRequestHandler handler = core.getRequestHandler("spellCheckCompRH");
    SolrQueryResponse rsp = new SolrQueryResponse();
    rsp.add("responseHeader", new SimpleOrderedMap());
    SolrQueryRequest req = new LocalSolrQueryRequest(core, params);
    handler.handleRequest(req, rsp);
    req.close();
    NamedList values = rsp.getValues();
    NamedList spellCheck = (NamedList) values.get("spellcheck");
View Full Code Here

TOP

Related Classes of org.apache.solr.common.util.SimpleOrderedMap

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.