Package org.h2.store

Examples of org.h2.store.Data


            r = list.get(index++);
        } else {
            if (listIndex >= list.size()) {
                list.clear();
                listIndex = 0;
                Data buff = rowBuff;
                buff.reset();
                int min = Constants.FILE_BLOCK_SIZE;
                file.readFully(buff.getBytes(), 0, min);
                int len = buff.readInt() * Constants.FILE_BLOCK_SIZE;
                buff.checkCapacity(len);
                if (len - min > 0) {
                    file.readFully(buff.getBytes(), min, len - min);
                }
                while (true) {
                    r = readRow(buff);
                    if (r == null) {
                        break;
View Full Code Here


                int last = storedEntriesPos.size() - 1;
                long pos = storedEntriesPos.get(last);
                storedEntriesPos.remove(last);
                long end = file.length();
                int bufferLength = (int) (end - pos);
                Data buff = Data.create(database, bufferLength);
                file.seek(pos);
                file.readFully(buff.getBytes(), 0, bufferLength);
                while (buff.length() < bufferLength) {
                    UndoLogRecord e = UndoLogRecord.loadFromBuffer(buff, this);
                    records.add(e);
                    memoryUndo++;
                }
                storedEntries -= records.size();
View Full Code Here

                if (file == null) {
                    String fileName = database.createTempFile();
                    file = database.openFile(fileName, "rw", false);
                    file.setLength(FileStore.HEADER_LENGTH);
                }
                Data buff = Data.create(database, SysProperties.PAGE_SIZE);
                for (int i = 0; i < records.size(); i++) {
                    UndoLogRecord r = records.get(i);
                    buff.checkCapacity(SysProperties.PAGE_SIZE);
                    r.append(buff, this);
                    if (i == records.size() - 1 || buff.length() > Constants.UNDO_BLOCK_SIZE) {
                        storedEntriesPos.add(file.getFilePointer());
                        file.write(buff.getBytes(), 0, buff.length());
                        buff.reset();
                    }
                }
                storedEntries += records.size();
                memoryUndo = 0;
                records.clear();
                file.autoDelete();
                return;
            }
        } else {
            if (!entry.isStored()) {
                memoryUndo++;
            }
            if (memoryUndo > database.getMaxMemoryUndo() && database.isPersistent() && !database.isMultiVersion()) {
                if (file == null) {
                    String fileName = database.createTempFile();
                    file = database.openFile(fileName, "rw", false);
                    file.seek(FileStore.HEADER_LENGTH);
                    rowBuff = Data.create(database, SysProperties.PAGE_SIZE);
                    Data buff = rowBuff;
                    for (int i = 0; i < records.size(); i++) {
                        UndoLogRecord r = records.get(i);
                        saveIfPossible(r, buff);
                    }
                } else {
View Full Code Here

            file = db.openFile(fileName, "rw", false);
            file.seek(FileStore.HEADER_LENGTH);
            rowBuff = Data.create(db, SysProperties.PAGE_SIZE);
            file.seek(FileStore.HEADER_LENGTH);
        }
        Data buff = rowBuff;
        initBuffer(buff);
        for (int i = 0; i < list.size(); i++) {
            if (i > 0 && buff.length() > Constants.IO_BUFFER_SIZE) {
                flushBuffer(buff);
                initBuffer(buff);
            }
            Row r = list.get(i);
            writeRow(buff, r);
View Full Code Here

            r = list.get(index++);
        } else {
            if (listIndex >= list.size()) {
                list.clear();
                listIndex = 0;
                Data buff = rowBuff;
                buff.reset();
                int min = Constants.FILE_BLOCK_SIZE;
                file.readFully(buff.getBytes(), 0, min);
                int len = buff.readInt() * Constants.FILE_BLOCK_SIZE;
                buff.checkCapacity(len);
                if (len - min > 0) {
                    file.readFully(buff.getBytes(), min, len - min);
                }
                for (int i = 0;; i++) {
                    r = readRow(buff);
                    if (r == null) {
                        break;
View Full Code Here

            try {
                store.init();
            } catch (Exception e) {
                writeError(writer, e);
            }
            Data s = Data.create(this, 128);
            store.seek(0);
            store.readFully(s.getBytes(), 0, 128);
            s.setPos(48);
            pageSize = s.readInt();
            int writeVersion = s.readByte();
            int readVersion = s.readByte();
            writer.println("-- pageSize: " + pageSize +
                    " writeVersion: " + writeVersion +
                    " readVersion: " + readVersion);
            if (pageSize < PageStore.PAGE_SIZE_MIN || pageSize > PageStore.PAGE_SIZE_MAX) {
                pageSize = SysProperties.PAGE_SIZE;
                writer.println("-- ERROR: page size; using " + pageSize);
            }
            int pageCount = (int) (length / pageSize);
            parents = new int[pageCount];
            s = Data.create(this, pageSize);
            for (int i = 3; i < pageCount; i++) {
                s.reset();
                store.seek(i * pageSize);
                store.readFully(s.getBytes(), 0, 32);
                s.readByte();
                s.readShortInt();
                parents[i] = s.readInt();
            }
            int logKey = 0, logFirstTrunkPage = 0, logFirstDataPage = 0;
            s = Data.create(this, pageSize);
            for (int i = 1;; i++) {
                if (i == 3) {
                    break;
                }
                s.reset();
                store.seek(i * pageSize);
                store.readFully(s.getBytes(), 0, pageSize);
                CRC32 crc = new CRC32();
                crc.update(s.getBytes(), 4, pageSize - 4);
                int expected = (int) crc.getValue();
                int got = s.readInt();
                long writeCounter = s.readLong();
                int key = s.readInt();
                int firstTrunkPage = s.readInt();
                int firstDataPage = s.readInt();
                if (expected == got) {
                    logKey = key;
                    logFirstTrunkPage = firstTrunkPage;
                    logFirstDataPage = firstDataPage;
                }
View Full Code Here

    public void addRows(ArrayList<Value[]> rows) {
        if (sort != null) {
            sort.sort(rows);
        }
        Data buff = rowBuff;
        long start = file.getFilePointer();
        ByteArrayOutputStream buffer = new ByteArrayOutputStream();
        int bufferLen = 0;
        int maxBufferSize = SysProperties.LARGE_RESULT_BUFFER_SIZE;
        for (Value[] row : rows) {
            buff.reset();
            buff.writeInt(0);
            for (int j = 0; j < columnCount; j++) {
                Value v = row[j];
                buff.checkCapacity(buff.getValueLen(v));
                buff.writeValue(v);
            }
            buff.fillAligned();
            int len = buff.length();
            buff.setInt(0, len);
            if (maxBufferSize > 0) {
                buffer.write(buff.getBytes(), 0, len);
                bufferLen += len;
                if (bufferLen > maxBufferSize) {
                    byte[] data = buffer.toByteArray();
                    buffer.reset();
                    file.write(data, 0, data.length);
                    bufferLen = 0;
                }
            } else {
                file.write(buff.getBytes(), 0, len);
            }
        }
        if (bufferLen > 0) {
            byte[] data = buffer.toByteArray();
            file.write(data, 0, data.length);
View Full Code Here

            closeSilently(store);
        }
    }

    private void dumpPageStore(PrintWriter writer, int pageCount) {
        Data s = Data.create(this, pageSize);
        for (int page = 3; page < pageCount; page++) {
            s = Data.create(this, pageSize);
            store.seek(page * pageSize);
            store.readFully(s.getBytes(), 0, pageSize);
            int type = s.readByte();
            switch (type) {
            case Page.TYPE_EMPTY:
                stat.pageTypeCount[type]++;
                continue;
            }
            boolean last = (type & Page.FLAG_LAST) != 0;
            type &= ~Page.FLAG_LAST;
            if (!PageStore.checksumTest(s.getBytes(), page, pageSize)) {
                writer.println("-- ERROR: page " + page + " checksum mismatch type: " + type);
            }
            s.readShortInt();
            switch (type) {
            // type 1
            case Page.TYPE_DATA_LEAF: {
                stat.pageTypeCount[type]++;
                int parentPageId = s.readInt();
                setStorage(s.readVarInt());
                int columnCount = s.readVarInt();
                int entries = s.readShortInt();
                writer.println("-- page " + page + ": data leaf " + (last ? "(last) " : "") + "parent: " + parentPageId +
                        " table: " + storageId + " entries: " + entries + " columns: " + columnCount);
                dumpPageDataLeaf(writer, s, last, page, columnCount, entries);
                break;
            }
            // type 2
            case Page.TYPE_DATA_NODE: {
                stat.pageTypeCount[type]++;
                int parentPageId = s.readInt();
                setStorage(s.readVarInt());
                int rowCount = s.readInt();
                int entries = s.readShortInt();
                writer.println("-- page " + page + ": data node " + (last ? "(last) " : "") + "parent: " + parentPageId +
                        " table: " + storageId + " entries: " + entries + " rowCount: " + rowCount);
                dumpPageDataNode(writer, s, page, entries);
                break;
            }
            // type 3
            case Page.TYPE_DATA_OVERFLOW:
                stat.pageTypeCount[type]++;
                writer.println("-- page " + page + ": data overflow " + (last ? "(last) " : ""));
                break;
            // type 4
            case Page.TYPE_BTREE_LEAF: {
                stat.pageTypeCount[type]++;
                int parentPageId = s.readInt();
                setStorage(s.readVarInt());
                int entries = s.readShortInt();
                writer.println("-- page " + page + ": b-tree leaf " + (last ? "(last) " : "") + "parent: " + parentPageId +
                        " index: " + storageId + " entries: " + entries);
                if (trace) {
                    dumpPageBtreeLeaf(writer, s, entries, !last);
                }
                break;
            }
            // type 5
            case Page.TYPE_BTREE_NODE:
                stat.pageTypeCount[type]++;
                int parentPageId = s.readInt();
                setStorage(s.readVarInt());
                writer.println("-- page " + page + ": b-tree node " + (last ? "(last) " : "") "parent: " + parentPageId +
                        " index: " + storageId);
                dumpPageBtreeNode(writer, s, page, !last);
                break;
            // type 6
View Full Code Here

        }
    }

    private void readRow(ResultDiskTape tape) {
        int min = Constants.FILE_BLOCK_SIZE;
        Data buff = rowBuff;
        buff.reset();
        file.readFully(buff.getBytes(), 0, min);
        int len = buff.readInt();
        buff.checkCapacity(len);
        if (len - min > 0) {
            file.readFully(buff.getBytes(), min, len - min);
        }
        tape.pos += len;
        Value[] row = new Value[columnCount];
        for (int k = 0; k < columnCount; k++) {
            row[k] = buff.readValue();
        }
        tape.buffer.add(row);
    }
View Full Code Here

            }
        }
    }

    private void dumpPageLogStream(PrintWriter writer, int logKey, int logFirstTrunkPage, int logFirstDataPage) throws IOException {
        Data s = Data.create(this, pageSize);
        DataReader in = new DataReader(
                new PageInputStream(writer, this, store, logKey, logFirstTrunkPage, logFirstDataPage, pageSize)
        );
        writer.println("---- Transaction log ----------");
        CompressLZF compress = new CompressLZF();
View Full Code Here

TOP

Related Classes of org.h2.store.Data

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.