Package org.apache.cassandra.db

Examples of org.apache.cassandra.db.Row$RowSerializer


            {
                ColumnFamily resolved = RowRepairResolver.resolveSuperset(versions);
                RowRepairResolver.maybeScheduleRepairs(resolved, table, key, versions, versionSources);
                versions.clear();
                versionSources.clear();
                return new Row(key, resolved);
            }
        };
    }
View Full Code Here


   *
   */
  public Row resolve(List<Message> responses) throws DigestMismatchException
  {
        long startTime = System.currentTimeMillis();
    Row retRow = null;
    List<Row> rowList = new ArrayList<Row>();
    List<EndPoint> endPoints = new ArrayList<EndPoint>();
    String key = null;
    String table = null;
    byte[] digest = new byte[0];
    boolean isDigestQuery = false;
       
        /*
     * Populate the list of rows from each of the messages
     * Check to see if there is a digest query. If a digest
         * query exists then we need to compare the digest with
         * the digest of the data that is received.
        */
        DataInputBuffer bufIn = new DataInputBuffer();
    for (Message response : responses)
    {                     
            byte[] body = response.getMessageBody();
            bufIn.reset(body, body.length);
            try
            {
                long start = System.currentTimeMillis();
                ReadResponse result = ReadResponse.serializer().deserialize(bufIn);
                if (logger_.isDebugEnabled())
                  logger_.debug( "Response deserialization time : " + (System.currentTimeMillis() - start) + " ms.");
          if(!result.isDigestQuery())
          {
            rowList.add(result.row());
            endPoints.add(response.getFrom());
            key = result.row().key();
            table = result.row().getTable();
          }
          else
          {
            digest = result.digest();
            isDigestQuery = true;
          }
            }
            catch( IOException ex )
            {
                logger_.info(LogUtil.throwableToString(ex));
            }
    }
    // If there was a digest query compare it with all the data digests
    // If there is a mismatch then throw an exception so that read repair can happen.
    if(isDigestQuery)
    {
      for(Row row: rowList)
      {
        if( !Arrays.equals(row.digest(), digest) )
        {
                    /* Wrap the key as the context in this exception */
          throw new DigestMismatchException(row.key());
        }
      }
    }
   
        /* If the rowList is empty then we had some exception above. */
        if ( rowList.size() == 0 )
        {
            return retRow;
        }
       
        /* Now calculate the resolved row */
    retRow = new Row(table, key);
    for (int i = 0 ; i < rowList.size(); i++)
    {
      retRow.repair(rowList.get(i));     
    }

        // At  this point  we have the return row .
    // Now we need to calculate the difference
    // so that we can schedule read repairs
    for (int i = 0 ; i < rowList.size(); i++)
    {
      // since retRow is the resolved row it can be used as the super set
      Row diffRow = rowList.get(i).diff(retRow);
      if(diffRow == null) // no repair needs to happen
        continue;
      // create the row mutation message based on the diff and schedule a read repair
      RowMutation rowMutation = new RowMutation(table, key);                 
          for (ColumnFamily cf : diffRow.getColumnFamilies())
          {
              rowMutation.add(cf);
          }
            RowMutationMessage rowMutationMessage = new RowMutationMessage(rowMutation);
          ReadRepairManager.instance().schedule(endPoints.get(i),rowMutationMessage);
View Full Code Here

           
            key = key.trim();
            if ( StorageService.instance().isPrimary(key) )
            {
                System.out.println("Processing key " + key);
                Row row = Table.open("Mailbox").getRow(key, "MailboxMailList0");
                if ( row.isEmpty() )
                {
                    System.out.println("MISSING KEY : " + key);
                    raf.write(key.getBytes());
                    raf.write(System.getProperty("line.separator").getBytes());
                }
View Full Code Here

        ColumnFamily resolved = resolveSuperset(versions);
        maybeScheduleRepairs(resolved, table, key, versions, endPoints);

        if (logger_.isDebugEnabled())
            logger_.debug("resolve: " + (System.currentTimeMillis() - startTime) + " ms.");
    return new Row(key, resolved);
  }
View Full Code Here

        return m;
    }

    public void reload()
    {
        Row cfDefRow = SystemKeyspace.readSchemaRow(SystemKeyspace.SCHEMA_COLUMNFAMILIES_CF, ksName, cfName);

        if (cfDefRow.cf == null || !cfDefRow.cf.hasColumns())
            throw new RuntimeException(String.format("%s not found in the schema definitions keyspace.", ksName + ":" + cfName));

        try
View Full Code Here

    public static CFMetaData fromSchema(UntypedResultSet.Row result)
    {
        String ksName = result.getString("keyspace_name");
        String cfName = result.getString("columnfamily_name");

        Row serializedColumns = SystemKeyspace.readSchemaRow(SystemKeyspace.SCHEMA_COLUMNS_CF, ksName, cfName);
        CFMetaData cfm = fromSchemaNoTriggers(result, ColumnDefinition.resultify(serializedColumns));

        Row serializedTriggers = SystemKeyspace.readSchemaRow(SystemKeyspace.SCHEMA_TRIGGERS_CF, ksName, cfName);
        addTriggerDefinitionsFromSchema(cfm, serializedTriggers);

        return cfm;
    }
View Full Code Here

    {
        DecoratedKey partitionKey = rowMapper.partitionKey(key);

        if (columnFamily.iterator().hasNext()) // Create or update row
        {
            Row row = row(partitionKey, timestamp); // Read row
            Document document = rowMapper.document(row);
            Term term = rowMapper.term(partitionKey);
            luceneIndex.upsert(term, document); // Store document
        }
        else if (columnFamily.deletionInfo() != null) // Delete full row
View Full Code Here

        List<Row> rows = new ArrayList<>(scoredDocuments.size());
        for (ScoredDocument scoredDocument : scoredDocuments) {
            // Extract row from document
            Document document = scoredDocument.getDocument();
            DecoratedKey partitionKey = rowMapper.partitionKey(document);
            Row row = row(partitionKey, timestamp);

            if (row == null) {
                return null;
            }

            // Return decorated row
            Float score = scoredDocument.getScore();
            Row decoratedRow = addScoreColumn(row, timestamp, score);
            rows.add(decoratedRow);
        }
        return rows;
    }
View Full Code Here

        QueryFilter queryFilter = QueryFilter.getIdentityFilter(partitionKey, metadata.cfName, timestamp);
        ColumnFamily columnFamily = baseCfs.getColumnFamily(queryFilter);
        if (columnFamily != null)
        {
            ColumnFamily cleanColumnFamily = cleanExpired(columnFamily, timestamp);
            return new Row(partitionKey, cleanColumnFamily);
        }
        return null;
    }
View Full Code Here

            throw new NotFoundException();

        if(rows.size() > 1)
            throw new RuntimeException("Block id returned more than one row");

        Row row = rows.get(0);
        if(row.cf == null)
            throw new NotFoundException();

        IColumn col = row.cf.getColumn(columnName);
View Full Code Here

TOP

Related Classes of org.apache.cassandra.db.Row$RowSerializer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.