Package org.apache.lucene.store

Examples of org.apache.lucene.store.RAMDirectory


public class TestSerialization extends TestCase {

  public void test() throws Exception {

    Directory dir = new RAMDirectory();

    IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
    Document doc = new Document();
    doc.add(new Field("foo", "bar rab abr bra rba", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
    doc.add(new Field("moo", "bar rab abr bra rba", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
View Full Code Here


    @Override
    public void setUp() throws Exception {
        super.setUp();

        index = new RAMDirectory();
        IndexWriter writer = new IndexWriter(index,
                                             new WhitespaceAnalyzer(),
                                             true, IndexWriter.MaxFieldLength.LIMITED);
        writer.setSimilarity(sim);
View Full Code Here

     *
     * @throws Exception on error
     */
    public void testGetValuesForIndexedDocument() throws Exception
    {
        RAMDirectory dir = new RAMDirectory();
        IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
        writer.addDocument(makeDocumentWithFields());
        writer.close();

        Searcher searcher = new IndexSearcher(dir, true);
View Full Code Here

      Field field = new Field("id", "id1", Field.Store.YES, Field.Index.NOT_ANALYZED);
      Document doc = new Document();
      doc.add(field);
      doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.NOT_ANALYZED));

      RAMDirectory dir = new RAMDirectory();
      IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
      writer.addDocument(doc);
      field.setValue("id2");
      writer.addDocument(doc);
      field.setValue("id3");
      writer.addDocument(doc);
      writer.close();

      Searcher searcher = new IndexSearcher(dir, true);

      Query query = new TermQuery(new Term("keyword", "test"));

      // ensure that queries return expected results without DateFilter first
      ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
      assertEquals(3, hits.length);
      int result = 0;
      for(int i=0;i<3;i++) {
        Document doc2 = searcher.doc(hits[i].doc);
        Field f = doc2.getField("id");
        if (f.stringValue().equals("id1"))
          result |= 1;
        else if (f.stringValue().equals("id2"))
          result |= 2;
        else if (f.stringValue().equals("id3"))
          result |= 4;
        else
          fail("unexpected id field");
      }
      searcher.close();
      dir.close();
      assertEquals("did not see all IDs", 7, result);
    }
View Full Code Here

  private IndexSearcher searcher;

  @Override
  public void setUp() throws Exception {
    super.setUp();
    directory = new RAMDirectory();
    IndexWriter writer = new IndexWriter(directory,
                                         new SimpleAnalyzer(),
                                         true, IndexWriter.MaxFieldLength.LIMITED);

    Document doc = new Document();
View Full Code Here

              "+partnum:Q36 +space", query.toString("description"));
    assertEquals("doc found!", 1, hits.length);
  }

  public void testMutipleDocument() throws Exception {
    RAMDirectory dir = new RAMDirectory();
    IndexWriter writer = new IndexWriter(dir,new KeywordAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    Document doc = new Document();
    doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.ANALYZED));
    writer.addDocument(doc);
    doc = new Document();
View Full Code Here

public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
  private String[] tokens = new String[] {"term1", "term2", "term3", "term2"};
 
  public void testCaching() throws IOException {
    Directory dir = new RAMDirectory();
    IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
    Document doc = new Document();
    TokenStream stream = new TokenStream() {
      private int index = 0;
      private TermAttribute termAtt = addAttribute(TermAttribute.class);
View Full Code Here

    // Tests whether the DocumentWriter and SegmentMerger correctly enable the
    // payload bit in the FieldInfo
    public void testPayloadFieldBit() throws Exception {
        rnd = newRandom();
        Directory ram = new RAMDirectory();
        PayloadAnalyzer analyzer = new PayloadAnalyzer();
        IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
        Document d = new Document();
        // this field won't have any payloads
        d.add(new Field("f1", "This field has no payloads", Field.Store.NO, Field.Index.ANALYZED));
View Full Code Here

    // Tests if payloads are correctly stored and loaded using both RamDirectory and FSDirectory
    public void testPayloadsEncoding() throws Exception {
        rnd = newRandom();
        // first perform the test using a RAMDirectory
        Directory dir = new RAMDirectory();
        performTest(dir);
       
        // now use a FSDirectory and repeat same test
        File dirName = _TestUtil.getTempDir("test_payloads");
        dir = FSDirectory.open(dirName);
View Full Code Here

        rnd = newRandom();
        final int numThreads = 5;
        final int numDocs = 50;
        final ByteArrayPool pool = new ByteArrayPool(numThreads, 5);
       
        Directory dir = new RAMDirectory();
        final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
        final String field = "test";
       
        Thread[] ingesters = new Thread[numThreads];
        for (int i = 0; i < numThreads; i++) {
View Full Code Here

TOP

Related Classes of org.apache.lucene.store.RAMDirectory

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.