Package org.apache.lucene.store

Examples of org.apache.lucene.store.RAMDirectory


      // this tests against bug 33161 (now fixed)
      // In order to cause the bug, the outer query must have more than one term
      // and all terms required.
      // The contained PhraseMultiQuery must contain exactly one term array.

      RAMDirectory indexStore = new RAMDirectory();
      IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
      add("blueberry pie", writer);
      add("blueberry chewing gum", writer);
      add("blue raspberry pie", writer);
      writer.optimize();
View Full Code Here


      assertEquals("Wrong number of hits", 2, hits.length);
      searcher.close();
  }
   
  public void testPhrasePrefixWithBooleanQuery() throws IOException {
    RAMDirectory indexStore = new RAMDirectory();
    IndexWriter writer = new IndexWriter(indexStore, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, Collections.emptySet()), true, IndexWriter.MaxFieldLength.LIMITED);
    add("This is a test", "object", writer);
    add("a note", "note", writer);
    writer.close();
   
View Full Code Here

  }

  @Override
  protected void setUp() throws Exception {
    super.setUp();
    dir = new RAMDirectory();
  }
View Full Code Here

  }

  @Override
  protected void setUp() throws Exception {
    RAMDirectory rd = new RAMDirectory();
    IndexWriter w = new IndexWriter(rd, analyzer, MaxFieldLength.UNLIMITED);
    for (int i = 0; i < docsContent.length; i++) {
      Document doc = new Document();
      doc.add(new Field("name", docsContent[i].name, Field.Store.YES,
          Field.Index.ANALYZED));
View Full Code Here

    public TestIndexReader(String name) {
        super(name);
    }
   
    public void testCommitUserData() throws Exception {
      RAMDirectory d = new MockRAMDirectory();

      Map commitUserData = new HashMap();
      commitUserData.put("foo", "fighters");
     
      // set up writer
      IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
      writer.setMaxBufferedDocs(2);
      for(int i=0;i<27;i++)
        addDocumentWithFields(writer);
      writer.close();
     
      IndexReader r = IndexReader.open(d, false);
      r.deleteDocument(5);
      r.flush(commitUserData);
      r.close();
     
      SegmentInfos sis = new SegmentInfos();
      sis.read(d);
      IndexReader r2 = IndexReader.open(d, false);
      IndexCommit c = r.getIndexCommit();
      assertEquals(c.getUserData(), commitUserData);

      assertEquals(sis.getCurrentSegmentFileName(), c.getSegmentsFileName());

      assertTrue(c.equals(r.getIndexCommit()));

      // Change the index
      writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
      writer.setMaxBufferedDocs(2);
      for(int i=0;i<7;i++)
        addDocumentWithFields(writer);
      writer.close();

      IndexReader r3 = r2.reopen();
      assertFalse(c.equals(r3.getIndexCommit()));
      assertFalse(r2.getIndexCommit().isOptimized());
      r3.close();

      writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
      writer.optimize();
      writer.close();

      r3 = r2.reopen();
      assertTrue(r3.getIndexCommit().isOptimized());
      r2.close();
      r3.close();
      d.close();
    }
View Full Code Here

      d.close();
    }
   
    public void testIsCurrent() throws Exception
    {
      RAMDirectory d = new MockRAMDirectory();
      IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
      addDocumentWithFields(writer);
      writer.close();
      // set up reader:
      IndexReader reader = IndexReader.open(d, false);
      assertTrue(reader.isCurrent());
      // modify index by adding another document:
      writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
      addDocumentWithFields(writer);
      writer.close();
      assertFalse(reader.isCurrent());
      // re-create index:
      writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
      addDocumentWithFields(writer);
      writer.close();
      assertFalse(reader.isCurrent());
      reader.close();
      d.close();
    }
View Full Code Here

     * Tests the IndexReader.getFieldNames implementation
     * @throws Exception on error
     */
    public void testGetFieldNames() throws Exception
    {
        RAMDirectory d = new MockRAMDirectory();
        // set up writer
        IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
        addDocumentWithFields(writer);
        writer.close();
        // set up reader
        IndexReader reader = IndexReader.open(d, false);
        Collection fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
        assertTrue(fieldNames.contains("keyword"));
        assertTrue(fieldNames.contains("text"));
        assertTrue(fieldNames.contains("unindexed"));
        assertTrue(fieldNames.contains("unstored"));
        reader.close();
        // add more documents
        writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
        // want to get some more segments here
        for (int i = 0; i < 5*writer.getMergeFactor(); i++)
        {
            addDocumentWithFields(writer);
        }
        // new fields are in some different segments (we hope)
        for (int i = 0; i < 5*writer.getMergeFactor(); i++)
        {
            addDocumentWithDifferentFields(writer);
        }
        // new termvector fields
        for (int i = 0; i < 5*writer.getMergeFactor(); i++)
        {
          addDocumentWithTermVectorFields(writer);
        }
       
        writer.close();
        // verify fields again
        reader = IndexReader.open(d, false);
        fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
        assertEquals(13, fieldNames.size());    // the following fields
        assertTrue(fieldNames.contains("keyword"));
        assertTrue(fieldNames.contains("text"));
        assertTrue(fieldNames.contains("unindexed"));
        assertTrue(fieldNames.contains("unstored"));
        assertTrue(fieldNames.contains("keyword2"));
        assertTrue(fieldNames.contains("text2"));
        assertTrue(fieldNames.contains("unindexed2"));
        assertTrue(fieldNames.contains("unstored2"));
        assertTrue(fieldNames.contains("tvnot"));
        assertTrue(fieldNames.contains("termvector"));
        assertTrue(fieldNames.contains("tvposition"));
        assertTrue(fieldNames.contains("tvoffset"));
        assertTrue(fieldNames.contains("tvpositionoffset"));
       
        // verify that only indexed fields were returned
        fieldNames = reader.getFieldNames(IndexReader.FieldOption.INDEXED);
        assertEquals(11, fieldNames.size());    // 6 original + the 5 termvector fields
        assertTrue(fieldNames.contains("keyword"));
        assertTrue(fieldNames.contains("text"));
        assertTrue(fieldNames.contains("unstored"));
        assertTrue(fieldNames.contains("keyword2"));
        assertTrue(fieldNames.contains("text2"));
        assertTrue(fieldNames.contains("unstored2"));
        assertTrue(fieldNames.contains("tvnot"));
        assertTrue(fieldNames.contains("termvector"));
        assertTrue(fieldNames.contains("tvposition"));
        assertTrue(fieldNames.contains("tvoffset"));
        assertTrue(fieldNames.contains("tvpositionoffset"));
       
        // verify that only unindexed fields were returned
        fieldNames = reader.getFieldNames(IndexReader.FieldOption.UNINDEXED);
        assertEquals(2, fieldNames.size());    // the following fields
        assertTrue(fieldNames.contains("unindexed"));
        assertTrue(fieldNames.contains("unindexed2"));
               
        // verify index term vector fields 
        fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR);
        assertEquals(1, fieldNames.size());    // 1 field has term vector only
        assertTrue(fieldNames.contains("termvector"));
       
        fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION);
        assertEquals(1, fieldNames.size());    // 4 fields are indexed with term vectors
        assertTrue(fieldNames.contains("tvposition"));
       
        fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET);
        assertEquals(1, fieldNames.size());    // 4 fields are indexed with term vectors
        assertTrue(fieldNames.contains("tvoffset"));
               
        fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET);
        assertEquals(1, fieldNames.size());    // 4 fields are indexed with term vectors
        assertTrue(fieldNames.contains("tvpositionoffset"));
        reader.close();
        d.close();
    }
View Full Code Here

        reader.close();
        d.close();
    }

  public void testTermVectors() throws Exception {
    RAMDirectory d = new MockRAMDirectory();
    // set up writer
    IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
    // want to get some more segments here
    // new termvector fields
    for (int i = 0; i < 5 * writer.getMergeFactor(); i++) {
View Full Code Here

        dir.close();
    }
   
    public void testBinaryFields() throws IOException
    {
        Directory dir = new RAMDirectory();
        byte[] bin = new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
       
        IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
       
        for (int i = 0; i < 10; i++) {
View Full Code Here

    // Make sure attempts to make changes after reader is
    // closed throws IOException:
    public void testChangesAfterClose() throws IOException
    {
        Directory dir = new RAMDirectory();

        IndexWriter writer = null;
        IndexReader reader = null;
        Term searchTerm = new Term("content", "aaa");
View Full Code Here

TOP

Related Classes of org.apache.lucene.store.RAMDirectory

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.