Package org.apache.lucene.store

Examples of org.apache.lucene.store.MockDirectoryWrapper


   * starting index size as its temporary free space
   * required.
   */
  public void testOptimizeTempSpaceUsage() throws IOException {

    MockDirectoryWrapper dir = newDirectory();
    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy()));
    if (VERBOSE) {
      System.out.println("TEST: config1=" + writer.getConfig());
    }

    for(int j=0;j<500;j++) {
      TestIndexWriter.addDocWithIndex(writer, j);
    }
    final int termIndexInterval = writer.getConfig().getTermIndexInterval();
    // force one extra segment w/ different doc store so
    // we see the doc stores get merged
    writer.commit();
    TestIndexWriter.addDocWithIndex(writer, 500);
    writer.close();

    if (VERBOSE) {
      System.out.println("TEST: start disk usage");
    }
    long startDiskUsage = 0;
    String[] files = dir.listAll();
    for(int i=0;i<files.length;i++) {
      startDiskUsage += dir.fileLength(files[i]);
      if (VERBOSE) {
        System.out.println(files[i] + ": " + dir.fileLength(files[i]));
      }
    }

    dir.resetMaxUsedSizeInBytes();
    dir.setTrackDiskUsage(true);

    // Import to use same term index interval else a
    // smaller one here could increase the disk usage and
    // cause a false failure:
    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval).setMergePolicy(newLogMergePolicy()));
    writer.setInfoStream(VERBOSE ? System.out : null);
    writer.optimize();
    writer.close();
    long maxDiskUsage = dir.getMaxUsedSizeInBytes();
    assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
               maxDiskUsage <= 4*startDiskUsage);
    dir.close();
  }
View Full Code Here


    dir1.close();
  }

  // Stress test reopen during addIndexes
  public void testDuringAddIndexes() throws Exception {
    MockDirectoryWrapper dir1 = newDirectory();
    final IndexWriter writer = new IndexWriter(
        dir1,
        newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
            setMergePolicy(newLogMergePolicy(2))
    );
    writer.setInfoStream(infoStream);
    ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2);

    // create the index
    createIndexNoClose(false, "test", writer);
    writer.commit();

    final Directory[] dirs = new Directory[10];
    for (int i=0;i<10;i++) {
      dirs[i] = new MockDirectoryWrapper(random, new RAMDirectory(dir1));
    }

    IndexReader r = writer.getReader();

    final int NUM_THREAD = 5;
    final float SECONDS = 0.5f;

    final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
    final List<Throwable> excs = Collections.synchronizedList(new ArrayList<Throwable>());

    final Thread[] threads = new Thread[NUM_THREAD];
    for(int i=0;i<NUM_THREAD;i++) {
      threads[i] = new Thread() {
          @Override
          public void run() {
            do {
              try {
                writer.addIndexes(dirs);
                writer.maybeMerge();
              } catch (Throwable t) {
                excs.add(t);
                throw new RuntimeException(t);
              }
            } while(System.currentTimeMillis() < endTime);
          }
        };
      threads[i].setDaemon(true);
      threads[i].start();
    }

    int lastCount = 0;
    while(System.currentTimeMillis() < endTime) {
      IndexReader r2 = r.reopen();
      if (r2 != r) {
        r.close();
        r = r2;
      }
      Query q = new TermQuery(new Term("indexname", "test"));
      IndexSearcher searcher = newSearcher(r);
      final int count = searcher.search(q, 10).totalHits;
      searcher.close();
      assertTrue(count >= lastCount);
      lastCount = count;
    }

    for(int i=0;i<NUM_THREAD;i++) {
      threads[i].join();
    }
    // final check
    IndexReader r2 = r.reopen();
    if (r2 != r) {
      r.close();
      r = r2;
    }
    Query q = new TermQuery(new Term("indexname", "test"));
    IndexSearcher searcher = newSearcher(r);
    final int count = searcher.search(q, 10).totalHits;
    searcher.close();
    assertTrue(count >= lastCount);

    assertEquals(0, excs.size());
    r.close();
    assertEquals(0, dir1.getOpenDeletedFiles().size());

    writer.close();

    dir1.close();
  }
View Full Code Here

          @Override
          public void run() {
            try {
              final Directory[] dirs = new Directory[numDirs];
              for (int k = 0; k < numDirs; k++)
                dirs[k] = new MockDirectoryWrapper(random, new RAMDirectory(addDir));
              //int j = 0;
              //while (true) {
                // System.out.println(Thread.currentThread().getName() + ": iter
                // j=" + j);
                for (int x=0; x < numIter; x++) {
View Full Code Here

    return out;
  }

  private static Directory makeEmptyIndex(Random random, final int numDeletedDocs)
    throws IOException {
    Directory d = new MockDirectoryWrapper(random, new RAMDirectory());
      IndexWriter w = new IndexWriter(d, new IndexWriterConfig(
        TEST_VERSION_CURRENT, new MockAnalyzer(random)));

      for (int i = 0; i < numDeletedDocs; i++) {
        w.addDocument(new Document());
View Full Code Here

  }

  // Make sure running BG merges still work fine even when
  // we are hitting exceptions during flushing.
  public void testFlushExceptions() throws IOException {
    MockDirectoryWrapper directory = newDirectory();
    FailOnlyOnFlush failure = new FailOnlyOnFlush();
    directory.failOn(failure);

    IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
    writer.setInfoStream(VERBOSE ? System.out : null);
    Document doc = new Document();
    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
    doc.add(idField);
    int extraCount = 0;

    for(int i=0;i<10;i++) {
      if (VERBOSE) {
        System.out.println("TEST: iter=" + i);
      }

      for(int j=0;j<20;j++) {
        idField.setValue(Integer.toString(i*20+j));
        writer.addDocument(doc);
      }

      // must cycle here because sometimes the merge flushes
      // the doc we just added and so there's nothing to
      // flush, and we don't hit the exception
      while(true) {
        writer.addDocument(doc);
        failure.setDoFail();
        try {
          writer.flush(true, true);
          if (failure.hitExc) {
            fail("failed to hit IOException");
          }
          extraCount++;
        } catch (IOException ioe) {
          if (VERBOSE) {
            ioe.printStackTrace(System.out);
          }
          failure.clearDoFail();
          break;
        }
      }
      assertEquals(20*(i+1)+extraCount, writer.numDocs());
    }

    writer.close();
    IndexReader reader = IndexReader.open(directory, true);
    assertEquals(200+extraCount, reader.numDocs());
    reader.close();
    directory.close();
  }
View Full Code Here

  }

  // Test that deletes committed after a merge started and
  // before it finishes, are correctly merged back:
  public void testDeleteMerging() throws IOException {
    MockDirectoryWrapper directory = newDirectory();

    LogDocMergePolicy mp = new LogDocMergePolicy();
    // Force degenerate merging so we can get a mix of
    // merging of segments with and without deletes at the
    // start:
    mp.setMinMergeDocs(1000);
    IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
        TEST_VERSION_CURRENT, new MockAnalyzer(random))
        .setMergePolicy(mp));
    writer.setInfoStream(VERBOSE ? System.out : null);

    Document doc = new Document();
    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
    doc.add(idField);
    for(int i=0;i<10;i++) {
      if (VERBOSE) {
        System.out.println("\nTEST: cycle");
      }
      for(int j=0;j<100;j++) {
        idField.setValue(Integer.toString(i*100+j));
        writer.addDocument(doc);
      }

      int delID = i;
      while(delID < 100*(1+i)) {
        if (VERBOSE) {
          System.out.println("TEST: del " + delID);
        }
        writer.deleteDocuments(new Term("id", ""+delID));
        delID += 10;
      }

      writer.commit();
    }

    writer.close();
    IndexReader reader = IndexReader.open(directory, true);
    // Verify that we did not lose any deletes...
    assertEquals(450, reader.numDocs());
    reader.close();
    directory.close();
  }
View Full Code Here

    reader.close();
    directory.close();
  }

  public void testNoExtraFiles() throws IOException {
    MockDirectoryWrapper directory = newDirectory();
    IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
        TEST_VERSION_CURRENT, new MockAnalyzer(random))
        .setMaxBufferedDocs(2));
    writer.setInfoStream(VERBOSE ? System.out : null);

    for(int iter=0;iter<7;iter++) {
      if (VERBOSE) {
        System.out.println("TEST: iter=" + iter);
      }

      for(int j=0;j<21;j++) {
        Document doc = new Document();
        doc.add(newField("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
        writer.addDocument(doc);
      }
       
      writer.close();
      TestIndexWriter.assertNoUnreferencedFiles(directory, "testNoExtraFiles");

      // Reopen
      writer = new IndexWriter(directory, newIndexWriterConfig(
          TEST_VERSION_CURRENT, new MockAnalyzer(random))
          .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(2));
      writer.setInfoStream(VERBOSE ? System.out : null);
    }

    writer.close();

    directory.close();
  }
View Full Code Here

    directory.close();
  }

  public void testNoWaitClose() throws IOException {
    MockDirectoryWrapper directory = newDirectory();
    Document doc = new Document();
    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
    doc.add(idField);

    IndexWriter writer = new IndexWriter(
        directory,
        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
            setMaxBufferedDocs(2).
            setMergePolicy(newLogMergePolicy(100))
    );

    for(int iter=0;iter<10;iter++) {

      for(int j=0;j<201;j++) {
        idField.setValue(Integer.toString(iter*201+j));
        writer.addDocument(doc);
      }

      int delID = iter*201;
      for(int j=0;j<20;j++) {
        writer.deleteDocuments(new Term("id", Integer.toString(delID)));
        delID += 5;
      }

      // Force a bunch of merge threads to kick off so we
      // stress out aborting them on close:
      ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
      writer.addDocument(doc);
      writer.commit();

      writer.close(false);

      IndexReader reader = IndexReader.open(directory, true);
      assertEquals((1+iter)*182, reader.numDocs());
      reader.close();

      // Reopen
      writer = new IndexWriter(
          directory,
          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
              setOpenMode(OpenMode.APPEND).
              setMergePolicy(newLogMergePolicy(100))
      );
    }
    writer.close();

    directory.close();
  }
View Full Code Here

   * Recursively looks for indexes underneath <code>file</code>,
   * and runs checkindex on them. returns true if it found any indexes.
   */
  public boolean checkIndexes(File file) throws IOException {
    if (file.isDirectory()) {
      MockDirectoryWrapper dir = newFSDirectory(file);
      dir.setCheckIndexOnClose(false); // don't double-checkindex
      if (IndexReader.indexExists(dir)) {
        if (VERBOSE) {
          System.err.println("Checking index: " + file);
        }
        _TestUtil.checkIndex(dir);
        dir.close();
        return true;
      }
      dir.close();
      for (File f : file.listFiles())
        if (checkIndexes(f))
          return true;
    }
    return false;
View Full Code Here

  }

  // LUCENE-1072: make sure an errant exception on flushing
  // one segment only takes out those docs in that one flush
  public void testDocumentsWriterAbort() throws IOException {
    MockDirectoryWrapper dir = newMockDirectory();
    FailOnlyOnFlush failure = new FailOnlyOnFlush();
    failure.setDoFail();
    dir.failOn(failure);

    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(2));
    Document doc = new Document();
    String contents = "aa bb cc dd ee ff gg hh ii jj kk";
    doc.add(newTextField("content", contents, Field.Store.NO));
    boolean hitError = false;
    for(int i=0;i<200;i++) {
      try {
        writer.addDocument(doc);
      } catch (IOException ioe) {
        // only one flush should fail:
        assertFalse(hitError);
        hitError = true;
      }
    }
    assertTrue(hitError);
    writer.close();
    IndexReader reader = DirectoryReader.open(dir);
    assertEquals(198, reader.docFreq(new Term("content", "aa")));
    reader.close();
    dir.close();
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.store.MockDirectoryWrapper

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.