Package org.apache.cassandra.db

Examples of org.apache.cassandra.db.ColumnFamilyStore$Flush


            {
                TreeRequest remotereq = this.deserialize(buffer);
                TreeRequest request = new TreeRequest(remotereq.sessionid, message.getFrom(), remotereq.cf);

                // trigger readonly-compaction
                ColumnFamilyStore store = Table.open(request.cf.left).getColumnFamilyStore(request.cf.right);
                Validator validator = new Validator(request);
                logger.debug("Queueing validation compaction for " + request);
                CompactionManager.instance.submitValidation(store, validator);
            }
            catch (IOException e)
View Full Code Here


        /* Create a local sstable for each remote sstable */
        Descriptor remotedesc = remote.desc;

        // new local sstable
        Table table = Table.open(remotedesc.ksname);
        ColumnFamilyStore cfStore = table.getColumnFamilyStore(remotedesc.cfname);
        Descriptor localdesc = Descriptor.fromFilename(cfStore.getFlushPath(remote.size, remote.desc.version));

        return new PendingFile(localdesc, remote);
     }
View Full Code Here

                {
                    SSTableReader sstable = future.get();
                    assert sstable.getTableName().equals(table);
                    if (sstable == null)
                        continue;
                    ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
                    cfs.addSSTable(sstable);
                    if (!cfstores.containsKey(cfs))
                        cfstores.put(cfs, new ArrayList<SSTableReader>());
                    cfstores.get(cfs).add(sstable);
                }
                catch (InterruptedException e)
View Full Code Here

            throw new UnsupportedOperationException(String.format("SSTable %s is not compatible with current version %s",
                                                                  remote.getFilename(), Descriptor.CURRENT_VERSION));

        // new local sstable
        Table table = Table.open(remotedesc.ksname);
        ColumnFamilyStore cfStore = table.getColumnFamilyStore(remotedesc.cfname);
        Descriptor localdesc = Descriptor.fromFilename(cfStore.getFlushPath(remote.size, remote.desc.version));

        return new PendingFile(localdesc, remote);
    }
View Full Code Here

        session.closeIfFinished();
    }

    private SSTableReader streamIn(DataInput input, PendingFile localFile, PendingFile remoteFile) throws IOException
    {
        ColumnFamilyStore cfs = Table.open(localFile.desc.ksname).getColumnFamilyStore(localFile.desc.cfname);
        DecoratedKey key;
        SSTableWriter writer = new SSTableWriter(localFile.getFilename(), remoteFile.estimatedKeys);
        CompactionController controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE, true);

        try
        {
            BytesReadTracker in = new BytesReadTracker(input);

            for (Pair<Long, Long> section : localFile.sections)
            {
                long length = section.right - section.left;
                long bytesRead = 0;
                while (bytesRead < length)
                {
                    in.reset(0);
                    key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
                    long dataSize = SSTableReader.readRowSize(in, localFile.desc);

                    ColumnFamily cached = cfs.getRawCachedRow(key);
                    if (cached != null && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit())
                    {
                        // need to update row cache
                        // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);
                        // row append does not update the max timestamp on its own
                        writer.updateMaxTimestamp(row.maxTimestamp());

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.updateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
                        cfs.invalidateCachedRow(key);
                    }

                    bytesRead += in.getBytesRead();
                    remoteFile.progress += in.getBytesRead();
                }
View Full Code Here

            {
                TreeRequest remotereq = this.deserialize(buffer, message.getVersion());
                TreeRequest request = new TreeRequest(remotereq.sessionid, message.getFrom(), remotereq.range, remotereq.cf);

                // trigger readonly-compaction
                ColumnFamilyStore store = Table.open(request.cf.left).getColumnFamilyStore(request.cf.right);
                Validator validator = new Validator(request);
                logger.debug("Queueing validation compaction for " + request);
                CompactionManager.instance.submitValidation(store, validator);
            }
            catch (IOException e)
View Full Code Here

    }

    /** load keyspace (table) definitions, but do not initialize the table instances. */
    public static void loadSchemas() throws IOException                        
    {
        ColumnFamilyStore schemaCFS = SystemTable.schemaCFS(SystemTable.SCHEMA_KEYSPACES_CF);

        // if table with definitions is empty try loading the old way
        if (schemaCFS.estimateKeys() == 0)
        {
            // we can load tables from local storage if a version is set in the system table and that actually maps to
            // real data in the definitions table.  If we do end up loading from xml, store the definitions so that we
            // don't load from xml anymore.
            UUID uuid = Migration.getLastMigrationId();
View Full Code Here

        String snapshotName = Table.getTimestampedSnapshotName(ksName);

        // remove all cfs from the table instance.
        for (CFMetaData cfm : ksm.cfMetaData().values())
        {
            ColumnFamilyStore cfs = Table.open(ksm.name).getColumnFamilyStore(cfm.cfName);

            Schema.instance.purge(cfm);

            if (!StorageService.instance.isClientMode())
            {
                cfs.snapshot(snapshotName);
                Table.open(ksm.name).dropCf(cfm.cfId);
            }
        }

        if (withSchemaRecord)
View Full Code Here

    }

    private static void dropColumnFamily(String ksName, String cfName, long timestamp, boolean withSchemaRecord) throws IOException
    {
        KSMetaData ksm = Schema.instance.getTableDefinition(ksName);
        ColumnFamilyStore cfs = Table.open(ksName).getColumnFamilyStore(cfName);

        // reinitialize the table.
        CFMetaData cfm = ksm.cfMetaData().get(cfName);

        Schema.instance.purge(cfm);
        Schema.instance.setTableDefinition(makeNewKeyspaceDefinition(ksm, cfm));

        if (withSchemaRecord)
            cfm.dropFromSchema(timestamp).apply();

        if (!StorageService.instance.isClientMode())
        {
            cfs.snapshot(Table.getTimestampedSnapshotName(cfs.columnFamily));
            Table.open(ksm.name).dropCf(cfm.cfId);
        }
    }
View Full Code Here

        if (oldestSegment != null)
        {
            for (Integer dirtyCFId : oldestSegment.getDirtyCFIDs())
            {
                String keypace = Schema.instance.getCF(dirtyCFId).left;
                final ColumnFamilyStore cfs = Table.open(keypace).getColumnFamilyStore(dirtyCFId);
                // flush shouldn't run on the commitlog executor, since it acquires Table.switchLock,
                // which may already be held by a thread waiting for the CL executor (via getContext),
                // causing deadlock
                Runnable runnable = new Runnable()
                {
                    public void run()
                    {
                        cfs.forceFlush();
                    }
                };
                StorageService.optionalTasks.execute(runnable);
            }
        }
View Full Code Here

TOP

Related Classes of org.apache.cassandra.db.ColumnFamilyStore$Flush

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.