Package org.apache.kahadb.page

Examples of org.apache.kahadb.page.PageFile


    // /////////////////////////////////////////////////////////////////
    // Initialization related implementation methods.
    // /////////////////////////////////////////////////////////////////

    private PageFile createPageFile() {
        PageFile index = new PageFile(directory, "temp-db");
        index.setEnableWriteThread(isEnableIndexWriteAsync());
        index.setWriteBatchSize(getIndexWriteBatchSize());
        index.setEnableDiskSyncs(false);
        index.setEnableRecoveryFile(false);
        return index;
    }
View Full Code Here


        }
    }

  private void loadPageFile() throws IOException {
    synchronized (indexMutex) {
        final PageFile pageFile = getPageFile();
            pageFile.load();
            pageFile.tx().execute(new Transaction.Closure<IOException>() {
                public void execute(Transaction tx) throws IOException {
                    if (pageFile.getPageCount() == 0) {
                        // First time this is created.. Initialize the metadata
                        Page<Metadata> page = tx.allocate();
                        assert page.getPageId() == 0;
                        page.set(metadata);
                        metadata.page = page;
                        metadata.state = CLOSED_STATE;
                        metadata.destinations = new BTreeIndex<String, StoredDestination>(pageFile, tx.allocate().getPageId());

                        tx.store(metadata.page, metadataMarshaller, true);
                    } else {
                        Page<Metadata> page = tx.load(0, metadataMarshaller);
                        metadata = page.get();
                        metadata.page = page;
                    }
                    metadata.destinations.setKeyMarshaller(StringMarshaller.INSTANCE);
                    metadata.destinations.setValueMarshaller(new StoredDestinationMarshaller());
                    metadata.destinations.load(tx);
                }
            });
            pageFile.flush();
           
            // Load up all the destinations since we need to scan all the indexes to figure out which journal files can be deleted.
            // Perhaps we should just keep an index of file
            storedDestinations.clear();
            pageFile.tx().execute(new Transaction.Closure<IOException>() {
                public void execute(Transaction tx) throws IOException {
                    for (Iterator<Entry<String, StoredDestination>> iterator = metadata.destinations.iterator(tx); iterator.hasNext();) {
                        Entry<String, StoredDestination> entry = iterator.next();
                        StoredDestination sd = loadStoredDestination(tx, entry.getKey(), entry.getValue().subscriptions!=null);
                        storedDestinations.put(entry.getKey(), sd);
View Full Code Here

    // /////////////////////////////////////////////////////////////////
    // Initialization related implementation methods.
    // /////////////////////////////////////////////////////////////////

    private PageFile createPageFile() {
        PageFile index = new PageFile(directory, "db");
        index.setEnableWriteThread(isEnableIndexWriteAsync());
        index.setWriteBatchSize(getIndexWriteBatchSize());
        index.setPageCacheSize(indexCacheSize);
        return index;
    }
View Full Code Here

    public void setUp() throws Exception {
        ROOT_DIR = new File(IOHelper.getDefaultDataDirectory());
        IOHelper.mkdirs(ROOT_DIR);
        IOHelper.deleteChildren(ROOT_DIR);
       
        pf = new PageFile(ROOT_DIR, getClass().getName());
        pf.load();
    }
View Full Code Here

            pf.delete();
        }
    }
   
    protected void createPageFileAndIndex(int pageSize) throws Exception {
        pf = new PageFile(directory, getClass().getName());
        pf.setPageSize(pageSize);
        pf.load();
        tx = pf.tx();
        this.index = createIndex();
    }
View Full Code Here

    }

  private void loadPageFile() throws IOException {
      this.indexLock.writeLock().lock();
      try {
        final PageFile pageFile = getPageFile();
            pageFile.load();
            pageFile.tx().execute(new Transaction.Closure<IOException>() {
                public void execute(Transaction tx) throws IOException {
                    if (pageFile.getPageCount() == 0) {
                        // First time this is created.. Initialize the metadata
                        Page<Metadata> page = tx.allocate();
                        assert page.getPageId() == 0;
                        page.set(metadata);
                        metadata.page = page;
                        metadata.state = CLOSED_STATE;
                        metadata.destinations = new BTreeIndex<String, StoredDestination>(pageFile, tx.allocate().getPageId());

                        tx.store(metadata.page, metadataMarshaller, true);
                    } else {
                        Page<Metadata> page = tx.load(0, metadataMarshaller);
                        metadata = page.get();
                        metadata.page = page;
                    }
                    metadata.destinations.setKeyMarshaller(StringMarshaller.INSTANCE);
                    metadata.destinations.setValueMarshaller(new StoredDestinationMarshaller());
                    metadata.destinations.load(tx);
                }
            });
            pageFile.flush();
           
            // Load up all the destinations since we need to scan all the indexes to figure out which journal files can be deleted.
            // Perhaps we should just keep an index of file
            storedDestinations.clear();
            pageFile.tx().execute(new Transaction.Closure<IOException>() {
                public void execute(Transaction tx) throws IOException {
                    for (Iterator<Entry<String, StoredDestination>> iterator = metadata.destinations.iterator(tx); iterator.hasNext();) {
                        Entry<String, StoredDestination> entry = iterator.next();
                        StoredDestination sd = loadStoredDestination(tx, entry.getKey(), entry.getValue().subscriptions!=null);
                        storedDestinations.put(entry.getKey(), sd);
View Full Code Here

    // /////////////////////////////////////////////////////////////////
    // Initialization related implementation methods.
    // /////////////////////////////////////////////////////////////////

    private PageFile createPageFile() {
        PageFile index = new PageFile(directory, "db");
        index.setEnableWriteThread(isEnableIndexWriteAsync());
        index.setWriteBatchSize(getIndexWriteBatchSize());
        index.setPageCacheSize(indexCacheSize);
        return index;
    }
View Full Code Here

                this.journal = new Journal();
                this.journal.setDirectory(directory);
                this.journal.setMaxFileLength(getJournalMaxFileLength());
                this.journal.setWriteBatchSize(getJournalMaxWriteBatchSize());
                this.journal.start();
                this.pageFile = new PageFile(directory, "tmpDB");
                this.pageFile.load();

                this.pageFile.tx().execute(new Transaction.Closure<IOException>() {
                    public void execute(Transaction tx) throws IOException {
                        if (pageFile.getPageCount() == 0) {
View Full Code Here

        }

        final int numMessages = 2750;

        KahaDBPersistenceAdapter kahaDBPersistenceAdapter = (KahaDBPersistenceAdapter)broker.getPersistenceAdapter();
        PageFile pageFile = kahaDBPersistenceAdapter.getStore().getPageFile();
        LOG.info("PageCount " + pageFile.getPageCount() + " f:" + pageFile.getFreePageCount() + ", fileSize:" + pageFile.getFile().length());

        long lastDiff = 0;
        for (int repeats=0; repeats<2; repeats++) {

            LOG.info("Iteration: "+ repeats  + " Count:" + pageFile.getPageCount() + " f:" + pageFile.getFreePageCount());

            Connection con = createConnection("cliId1" + "-" + repeats);
            Session session = con.createSession(false, Session.AUTO_ACKNOWLEDGE);
            session.createDurableSubscriber(topic, "SubsId", "filter = 'true'", true);
            session.close();
            con.close();

            // send messages
            con = createConnection();
            session = con.createSession(false, Session.AUTO_ACKNOWLEDGE);
            MessageProducer producer = session.createProducer(null);

            for (int i = 0; i < numMessages; i++) {
                Message message = session.createMessage();
                message.setStringProperty("filter", "true");
                producer.send(topic, message);
            }
            con.close();

            Connection con2 = createConnection("cliId1" + "-" + repeats);
            Session session2 = con2.createSession(false, Session.AUTO_ACKNOWLEDGE);
            session2.unsubscribe("SubsId");
            session2.close();
            con2.close();

            LOG.info("PageCount " + pageFile.getPageCount() + " f:" + pageFile.getFreePageCount() " diff: " + (pageFile.getPageCount() - pageFile.getFreePageCount()) + " fileSize:" + pageFile.getFile().length());

            if (lastDiff != 0) {
                assertEquals("Only use X pages per iteration: " + repeats, lastDiff, pageFile.getPageCount() - pageFile.getFreePageCount());
            }
            lastDiff = pageFile.getPageCount() - pageFile.getFreePageCount();
        }
    }
View Full Code Here

                this.journal = new Journal();
                this.journal.setDirectory(directory);
                this.journal.setMaxFileLength(getJournalMaxFileLength());
                this.journal.setWriteBatchSize(getJournalMaxWriteBatchSize());
                this.journal.start();
                this.pageFile = new PageFile(directory, "tmpDB");
                this.pageFile.setEnablePageCaching(getIndexEnablePageCaching());
                this.pageFile.setPageSize(getIndexPageSize());
                this.pageFile.setWriteBatchSize(getIndexWriteBatchSize());
                this.pageFile.setPageCacheSize(getIndexCacheSize());
                this.pageFile.load();
View Full Code Here

TOP

Related Classes of org.apache.kahadb.page.PageFile

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.