Package opennlp.tools.namefind

Examples of opennlp.tools.namefind.NameFinderME


   
    List<TokenNameFinder> nameFinders = new ArrayList<TokenNameFinder>();
    List<String> tags = new ArrayList<String>();
   
    for (Map.Entry<String, File> entry : modelFileTagMap.entrySet()) {
      nameFinders.add(new NameFinderME(
          new TokenNameFinderModelLoader().load(entry.getValue())));
      tags.add(entry.getKey());
    }
   
    return new MucMentionInserterStream(new NameFinderCorefEnhancerStream(nameFinders.toArray(
View Full Code Here


      try{
      name = path.substring(path.lastIndexOf("/") + 1, path.indexOf(".", path.lastIndexOf("/") + 1));
      }catch (Exception e){
        name = path;
      }
      model = new NameFinderME(OpenNLPNameFin.getModel(url));
    }
View Full Code Here

     * @throws IOException on any error while reading the model data
     */
    public TokenNameFinder getNameFinder(String type, String language) throws IOException {
        TokenNameFinderModel model = getNameModel(type, language);
        if(model != null){
            return new NameFinderME(model);
        } else {
            log.debug("TokenNameFinder model for type {} and langauge {} not present",type,language);
            return null;
        }
    }
View Full Code Here

        int[] sentencePositions = new int[sentences.length + 1];
        for (int k=0; k<sentenceEndings.length; k++) {
            sentencePositions[k] = sentenceEndings[k].getStart();
        }

        NameFinderME finder = new NameFinderME((TokenNameFinderModel)nameFinderModel);

        List<SurfaceFormOccurrence> sfOccurrences = new ArrayList<SurfaceFormOccurrence>();
        Tokenizer tokenizer = new SimpleTokenizer();
        for (int i = 0; i < sentences.length; i++) {
            String sentence = sentences[i];
            //LOG.debug("Sentence: " + sentence);

            // extract the names in the current sentence
            String[] tokens = tokenizer.tokenize(sentence);
            Span[] tokenspan = tokenizer.tokenizePos(sentence);
            Span[] nameSpans = finder.find(tokens);
            double[] probs = finder.probs();

            if (nameSpans != null && nameSpans.length > 0) {
                //System.out.println("Tokens: " +(new ArrayList(Arrays.asList(tokens))).toString());
                //System.out.println("NameSpans: " +(new ArrayList(Arrays.asList(nameSpans))).toString());
                for (Span span : nameSpans) {
                    StringBuilder buf = new StringBuilder();
                    //System.out.println("StartSpan: " + span.getStart() + " EndSpan: " + span.getEnd());
                    for (int j = span.getStart(); j < span.getEnd(); j++) {
                        //System.out.println(tokens[i] + " appended to " + buf.toString());
                        buf.append(tokens[j]);
                        if(j<span.getEnd()-1) buf.append(" ");
                    }
                    String surfaceFormStr = buf.toString().trim();
                    if (surfaceFormStr.contains(".")) {
                      surfaceFormStr = correctPhrase(surfaceFormStr, sentence);
                    }
                   
                    int entStart = sentencePositions[i] + tokenspan[span.getStart()].getStart();
                    int entEnd = sentencePositions[i] +tokenspan[span.getEnd()-1].getEnd();

                    /*
                    System.out.println("\n\nRR-NE Found = " + buf.toString());
                    System.out.println("Start = " + entStart);
                    System.out.println("End = " + entEnd);
                    System.out.println("Sentence = " + sentence);
                    System.out.println("Text = " + text);
                    */

                    SurfaceForm surfaceForm = new SurfaceForm(surfaceFormStr);
                    SurfaceFormOccurrence sfocc =  new SurfaceFormOccurrence(surfaceForm, text, entStart);
                    sfocc.features().put("type", new Feature("type",oType.toString()));
                    sfOccurrences.add(sfocc);
                }
            }

        }
        finder.clearAdaptiveData();

        if (LOG.isDebugEnabled()) {
            LOG.debug("Occurrences found: "   +StringUtils.join(sfOccurrences, ", "));
        }
        return sfOccurrences;
View Full Code Here

     * Name Finder and Tokenizer.
     *
     * @throws IOException
     */
    public ApacheExtractor() throws IOException {
        nameFinder = new NameFinderME(new TokenNameFinderModel(ApacheExtractor.class.getResourceAsStream(pathToNERModel)));
        tokenizer = new TokenizerME(new TokenizerModel(ApacheExtractor.class.getResourceAsStream(pathToTokenizerModel)));
        sentenceDetector = new SentenceDetectorME(new SentenceModel(ApacheExtractor.class.getResourceAsStream(pathToSentenceDetectorModel)));
    }
View Full Code Here

    protected Map<String,List<NameOccurrence>> extractNameOccurrences(TokenNameFinderModel nameFinderModel,
        AnalysedText at, String language) {
        // version with explicit sentence endings to reflect heading / paragraph
        // structure of an HTML or PDF document converted to text

        NameFinderME finder = new NameFinderME(nameFinderModel);
        Map<String,List<NameOccurrence>> nameOccurrences = new LinkedHashMap<String,List<NameOccurrence>>();
        List<Section> sentences = new ArrayList<Section>();
        //Holds the tokens of the previouse (pos 0) current (pos 1) and next (pos 2) sentence
        AnalysedTextUtils.appandToList(at.getSentences(), sentences);
        if(sentences.isEmpty()){ //no sentence annotations
            sentences.add(at); //process as a single section
        }
        for (int i=0;i<sentences.size();i++) {
            String sentence = sentences.get(i).getSpan();
           
            // build a context by concatenating three sentences to be used for
            // similarity ranking / disambiguation + contextual snippet in the
            // extraction structure
            List<String> contextElements = new ArrayList<String>();
            contextElements.add(sentence);
            //three sentences as context
            String context = at.getSpan().substring(
                sentences.get(Math.max(0, i-1)).getStart(),
                sentences.get(Math.min(sentences.size()-1, i+1)).getEnd());

            // get the tokens, words of the current sentence
            List<Token> tokens = new ArrayList<Token>(32);
            List<String> words = new ArrayList<String>(32);
            for(Iterator<Token> it =sentences.get(i).getTokens();it.hasNext();){
                Token t = it.next();
                tokens.add(t);
                words.add(t.getSpan());
            }
            Span[] nameSpans = finder.find(words.toArray(new String[words.size()]));
            double[] probs = finder.probs();
            //int lastStartPosition = 0;
            for (int j = 0; j < nameSpans.length; j++) {
                String name = at.getSpan().substring(tokens.get(nameSpans[j].getStart()).getStart(),
                    tokens.get(nameSpans[j].getEnd()-1).getEnd());
                Double confidence = 1.0;
                for (int k = nameSpans[j].getStart(); k < nameSpans[j].getEnd(); k++) {
                    confidence *= probs[k];
                }
                int start = tokens.get(nameSpans[j].getStart()).getStart();
                int end = start + name.length();
                NerTag nerTag = config.getNerTag(nameSpans[j].getType());
                //create the occurrence for writing fise:TextAnnotations
                NameOccurrence occurrence = new NameOccurrence(name, start, end, nerTag.getType(),
                    context, confidence);
                List<NameOccurrence> occurrences = nameOccurrences.get(name);
                if (occurrences == null) {
                    occurrences = new ArrayList<NameOccurrence>();
                }
                occurrences.add(occurrence);
                nameOccurrences.put(name, occurrences);
                //add also the NerAnnotation to the AnalysedText
                Chunk chunk = at.addChunk(start, end);
                //TODO: build AnnotationModel based on the configured Mappings
                chunk.addAnnotation(NER_ANNOTATION, Value.value(nerTag, confidence));
            }
        }
        finder.clearAdaptiveData();
        log.debug("{} name occurrences found: {}", nameOccurrences.size(), nameOccurrences);
        return nameOccurrences;
    }   
View Full Code Here

        SentenceDetectorME sentenceDetector = new SentenceDetectorME(getSentenceModel("en"));

        Span[] sentenceSpans = sentenceDetector.sentPosDetect(textWithDots);

        NameFinderME finder = new NameFinderME(nameFinderModel);
        Tokenizer tokenizer = openNLP.getTokenizer(language);
        Map<String,List<NameOccurrence>> nameOccurrences = new LinkedHashMap<String,List<NameOccurrence>>();
        for (int i = 0; i < sentenceSpans.length; i++) {
            String sentence = sentenceSpans[i].getCoveredText(text).toString().trim();

            // build a context by concatenating three sentences to be used for
            // similarity ranking / disambiguation + contextual snippet in the
            // extraction structure
            List<String> contextElements = new ArrayList<String>();
            if (i > 0) {
                CharSequence previousSentence = sentenceSpans[i - 1].getCoveredText(text);
                contextElements.add(previousSentence.toString().trim());
            }
            contextElements.add(sentence.toString().trim());
            if (i + 1 < sentenceSpans.length) {
                CharSequence nextSentence = sentenceSpans[i + 1].getCoveredText(text);
                contextElements.add(nextSentence.toString().trim());
            }
            String context = StringUtils.join(contextElements, " ");

            // extract the names in the current sentence and
            // keep them store them with the current context
            Span[] tokenSpans = tokenizer.tokenizePos(sentence);
            String[] tokens = Span.spansToStrings(tokenSpans, sentence);
            Span[] nameSpans = finder.find(tokens);
            double[] probs = finder.probs();
            //int lastStartPosition = 0;
            for (int j = 0; j < nameSpans.length; j++) {
                String name = sentence.substring(tokenSpans[nameSpans[j].getStart()].getStart(),
                    tokenSpans[nameSpans[j].getEnd()-1].getEnd());
                Double confidence = 1.0;
                for (int k = nameSpans[j].getStart(); k < nameSpans[j].getEnd(); k++) {
                    confidence *= probs[k];
                }
                int start = tokenSpans[nameSpans[j].getStart()].getStart();
                int absoluteStart = sentenceSpans[i].getStart() + start;
                int absoluteEnd = absoluteStart + name.length();
                NerTag nerTag = config.getNerTag(nameSpans[j].getType());
                NameOccurrence occurrence = new NameOccurrence(name, absoluteStart, absoluteEnd,
                    nerTag.getType(),context, confidence);

                List<NameOccurrence> occurrences = nameOccurrences.get(name);
                if (occurrences == null) {
                    occurrences = new ArrayList<NameOccurrence>();
                }
                occurrences.add(occurrence);
                nameOccurrences.put(name, occurrences);
            }
        }
        finder.clearAdaptiveData();
        log.debug("{} name occurrences found: {}", nameOccurrences.size(), nameOccurrences);
        return nameOccurrences;
    }
View Full Code Here

      String nameTypes[] = params.getNameTypes().split(",");
      sampleStream = new NameSampleTypeFilter(nameTypes, sampleStream);
    }

    TokenNameFinderEvaluator evaluator = new TokenNameFinderEvaluator(
        new NameFinderME(model),
        listeners.toArray(new TokenNameFinderEvaluationMonitor[listeners.size()]));

    final PerformanceMonitor monitor = new PerformanceMonitor("sent");

    ObjectStream<NameSample> measuredSampleStream = new ObjectStream<NameSample>() {
View Full Code Here

    if (args.length == 0) {
      System.out.println(getHelp());
    } else {

      NameFinderME nameFinders[] = new NameFinderME[args.length];

      for (int i = 0; i < nameFinders.length; i++) {
        TokenNameFinderModel model = new TokenNameFinderModelLoader().load(new File(args[i]));
        nameFinders[i] = new NameFinderME(model);
      }

//      ObjectStream<String> untokenizedLineStream =
//          new PlainTextByLineStream(new InputStreamReader(System.in));
      ObjectStream<String> untokenizedLineStream;
View Full Code Here

        UimaUtil.BEAM_SIZE_PARAMETER);

    if (beamSize == null)
      beamSize = NameFinderME.DEFAULT_BEAM_SIZE;

    mNameFinder = new NameFinderME(model, beamSize);
  }
View Full Code Here

TOP

Related Classes of opennlp.tools.namefind.NameFinderME

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.