Package org.apache.commons.collections

Examples of org.apache.commons.collections.MultiMap


   */
  @Deprecated
  void replayLogv1(List<File> logs) throws Exception {
    int total = 0;
    int count = 0;
    MultiMap transactionMap = new MultiValueMap();
    //Read inflight puts to see if they were committed
    SetMultimap<Long, Long> inflightPuts = queue.deserializeInflightPuts();
    for (Long txnID : inflightPuts.keySet()) {
      Set<Long> eventPointers = inflightPuts.get(txnID);
      for (Long eventPointer : eventPointers) {
        transactionMap.put(txnID, FlumeEventPointer.fromLong(eventPointer));
      }
    }

    SetMultimap<Long, Long> inflightTakes = queue.deserializeInflightTakes();
    LOG.info("Starting replay of " + logs);
    for (File log : logs) {
      LOG.info("Replaying " + log);
      LogFile.SequentialReader reader = null;
      try {
        reader = LogFileFactory.getSequentialReader(log, encryptionKeyProvider);
        reader.skipToLastCheckpointPosition(queue.getLogWriteOrderID());
        LogRecord entry;
        FlumeEventPointer ptr;
        // for puts the fileId is the fileID of the file they exist in
        // for takes the fileId and offset are pointers to a put
        int fileId = reader.getLogFileID();

        while ((entry = reader.next()) != null) {
          int offset = entry.getOffset();
          TransactionEventRecord record = entry.getEvent();
          short type = record.getRecordType();
          long trans = record.getTransactionID();
          readCount++;
          if (record.getLogWriteOrderID() > lastCheckpoint) {
            if (type == TransactionEventRecord.Type.PUT.get()) {
              putCount++;
              ptr = new FlumeEventPointer(fileId, offset);
              transactionMap.put(trans, ptr);
            } else if (type == TransactionEventRecord.Type.TAKE.get()) {
              takeCount++;
              Take take = (Take) record;
              ptr = new FlumeEventPointer(take.getFileID(), take.getOffset());
              transactionMap.put(trans, ptr);
            } else if (type == TransactionEventRecord.Type.ROLLBACK.get()) {
              rollbackCount++;
              transactionMap.remove(trans);
            } else if (type == TransactionEventRecord.Type.COMMIT.get()) {
              commitCount++;
              @SuppressWarnings("unchecked")
              Collection<FlumeEventPointer> pointers =
                (Collection<FlumeEventPointer>) transactionMap.remove(trans);
              if (((Commit) record).getType()
                      == TransactionEventRecord.Type.TAKE.get()) {
                if (inflightTakes.containsKey(trans)) {
                  if (pointers == null) {
                    pointers = Sets.newHashSet();
View Full Code Here


   * @param logs
   * @throws IOException
   */
  void replayLog(List<File> logs) throws Exception {
    int count = 0;
    MultiMap transactionMap = new MultiValueMap();
    // seed both with the highest known sequence of either the tnxid or woid
    long transactionIDSeed = lastCheckpoint, writeOrderIDSeed = lastCheckpoint;
    LOG.info("Starting replay of " + logs);
    //Load the inflight puts into the transaction map to see if they were
    //committed in one of the logs.
    SetMultimap<Long, Long> inflightPuts = queue.deserializeInflightPuts();
    for (Long txnID : inflightPuts.keySet()) {
      Set<Long> eventPointers = inflightPuts.get(txnID);
      for (Long eventPointer : eventPointers) {
        transactionMap.put(txnID, FlumeEventPointer.fromLong(eventPointer));
      }
    }
    SetMultimap<Long, Long> inflightTakes = queue.deserializeInflightTakes();
    try {
      for (File log : logs) {
        LOG.info("Replaying " + log);
        try {
          LogFile.SequentialReader reader =
              LogFileFactory.getSequentialReader(log, encryptionKeyProvider);
          reader.skipToLastCheckpointPosition(queue.getLogWriteOrderID());
          Preconditions.checkState(!readers.containsKey(reader.getLogFileID()),
              "Readers " + readers + " already contains "
                  + reader.getLogFileID());
          readers.put(reader.getLogFileID(), reader);
          LogRecord logRecord = reader.next();
          if(logRecord == null) {
            readers.remove(reader.getLogFileID());
            reader.close();
          } else {
            logRecordBuffer.add(logRecord);
          }
        } catch(EOFException e) {
          LOG.warn("Ignoring " + log + " due to EOF", e);
        }
      }
      LogRecord entry = null;
      FlumeEventPointer ptr = null;
      while ((entry = next()) != null) {
        // for puts the fileId is the fileID of the file they exist in
        // for takes the fileId and offset are pointers to a put
        int fileId = entry.getFileID();
        int offset = entry.getOffset();
        TransactionEventRecord record = entry.getEvent();
        short type = record.getRecordType();
        long trans = record.getTransactionID();
        transactionIDSeed = Math.max(transactionIDSeed, trans);
        writeOrderIDSeed = Math.max(writeOrderIDSeed,
            record.getLogWriteOrderID());
        readCount++;
        if(readCount % 10000 == 0 && readCount > 0) {
          LOG.info("Read " + readCount + " records");
        }
        if (record.getLogWriteOrderID() > lastCheckpoint) {
          if (type == TransactionEventRecord.Type.PUT.get()) {
            putCount++;
            ptr = new FlumeEventPointer(fileId, offset);
            transactionMap.put(trans, ptr);
          } else if (type == TransactionEventRecord.Type.TAKE.get()) {
            takeCount++;
            Take take = (Take) record;
            ptr = new FlumeEventPointer(take.getFileID(), take.getOffset());
            transactionMap.put(trans, ptr);
          } else if (type == TransactionEventRecord.Type.ROLLBACK.get()) {
            rollbackCount++;
            transactionMap.remove(trans);
          } else if (type == TransactionEventRecord.Type.COMMIT.get()) {
            commitCount++;
            @SuppressWarnings("unchecked")
            Collection<FlumeEventPointer> pointers =
              (Collection<FlumeEventPointer>) transactionMap.remove(trans);
            if (((Commit) record).getType()
                    == TransactionEventRecord.Type.TAKE.get()) {
              if (inflightTakes.containsKey(trans)) {
                if(pointers == null){
                  pointers = Sets.newHashSet();
View Full Code Here

   
    private void addFieldsToParsedObject(Document doc, ParsedObject o)
    {
        try
        {
            MultiMap multiKeywords = new MultiHashMap();
            MultiMap multiFields = new MultiHashMap();
            HashMap fieldMap = new HashMap();
           
            Field classNameField = doc.getField(ParsedObject.FIELDNAME_CLASSNAME);
            if(classNameField != null)
            {
View Full Code Here

  /*
   * Takes all the report's charts and inserts them in their corresponding bands
   */
  protected void layoutCharts() {
    //Pre-sort charts by group column
    MultiMap mmap = new MultiHashMap();
    for (Iterator iter = getReport().getCharts().iterator(); iter.hasNext();) {
      DJChart djChart = (DJChart) iter.next();
      mmap.put(djChart.getColumnsGroup(), djChart);
    }

    for (Iterator iterator = mmap.keySet().iterator(); iterator.hasNext();) {
      Object key =  iterator.next();
      Collection charts = (Collection) mmap.get(key);
      ArrayList l = new ArrayList(charts);
      //Reverse iteration of the charts to meet insertion order
      for (int i = l.size(); i > 0; i--) {
        DJChart djChart = (DJChart) l.get(i-1);
        JRDesignChart chart = createChart(djChart);
View Full Code Here

  }

  void replayLog(List<File> logs) throws IOException {
    int total = 0;
    int count = 0;
    MultiMap transactionMap = new MultiValueMap();
    LOG.info("Starting replay of " + logs);
    for (File log : logs) {
      LOG.info("Replaying " + log);
      LogFile.SequentialReader reader = null;
      try {
        reader = new LogFile.SequentialReader(log);
        Pair<Integer, TransactionEventRecord> entry;
        FlumeEventPointer ptr;
        // for puts the fileId is the fileID of the file they exist in
        // for takes the fileId and offset are pointers to a put
        int fileId = reader.getLogFileID();
        while ((entry = reader.next()) != null) {
          int offset = entry.getLeft();
          TransactionEventRecord record = entry.getRight();
          short type = record.getRecordType();
          long trans = record.getTransactionID();
          if (LOG.isDebugEnabled()) {
            LOG.debug("record.getTimestamp() = " + record.getTimestamp()
                + ", lastCheckpoint = " + lastCheckpoint + ", fileId = "
                + fileId + ", offset = " + offset + ", type = "
                + TransactionEventRecord.getName(type) + ", transaction "
                + trans);
          }
          if (record.getTimestamp() > lastCheckpoint) {
            if (type == TransactionEventRecord.Type.PUT.get()) {
              ptr = new FlumeEventPointer(fileId, offset);
              transactionMap.put(trans, ptr);
            } else if (type == TransactionEventRecord.Type.TAKE.get()) {
              Take take = (Take) record;
              ptr = new FlumeEventPointer(take.getFileID(), take.getOffset());
              transactionMap.put(trans, ptr);
            } else if (type == TransactionEventRecord.Type.ROLLBACK.get()) {
              transactionMap.remove(trans);
            } else if (type == TransactionEventRecord.Type.COMMIT.get()) {
              @SuppressWarnings("unchecked")
              Collection<FlumeEventPointer> pointers =
                (Collection<FlumeEventPointer>) transactionMap.remove(trans);
              if (pointers != null && pointers.size() > 0) {
                processCommit(((Commit) record).getType(), pointers);
                count += pointers.size();
              }
            } else {
View Full Code Here

    }
   
  }

  private static void bindMetaAttributes(Element element, Table table, OverrideRepository repository) {
    MultiMap map = MetaAttributeBinder.loadAndMergeMetaMap( element, new MultiHashMap());
    if(map!=null && !map.isEmpty()) {
      repository.addMetaAttributeInfo( table, map);
    }
  }
View Full Code Here

      TableIdentifier tableIdentifier = TableIdentifier.create(table);
      if(table.getColumn(column)!=null) {
        throw new MappingException("Column " + column.getName() + " already exists in table " + tableIdentifier );
      }
     
      MultiMap map = MetaAttributeBinder.loadAndMergeMetaMap( element, new MultiHashMap());
      if(map!=null && !map.isEmpty()) {
        repository.addMetaAttributeInfo( tableIdentifier, column.getName(), map);
      }
     
      table.addColumn(column);
      columnNames.add(column.getName());
View Full Code Here

      filter.setMatchSchema(element.attributeValue("match-schema") );
      filter.setMatchName(element.attributeValue("match-name") );
      filter.setExclude(Boolean.valueOf(element.attributeValue("exclude") ) );
      filter.setPackage(element.attributeValue("package") );
     
      MultiMap map = MetaAttributeBinder.loadAndMergeMetaMap( element, new MultiHashMap());
      if(map!=null && !map.isEmpty()) {
        filter.setMetaAttributes( map );
      } else {
        filter.setMetaAttributes( null );       
      }
      respository.addTableFilter(filter);
View Full Code Here

    @Test
    public void testOverridePreferred() throws Exception
    {
        DefaultAppBloodhound bh = new DefaultAppBloodhound();
        MultiMap overrides = new MultiValueMap();
        overrides.put("properties", new TestDescriptorParserDefault());

        // test with default annotation values
        bh.mergeParserOverrides(overrides);
        assertEquals(1, bh.parserRegistry.size());
        DescriptorParser result = bh.parserRegistry.get("properties");
View Full Code Here

    @Test
    public void testBothPreferredWithWeight() throws Exception
    {
        DefaultAppBloodhound bh = new DefaultAppBloodhound();
        MultiMap overrides = new MultiValueMap();
        overrides.put("properties", new TestDescriptorParserDefault());
        overrides.put("properties", new TestDescriptorParserPreferred());

        // test with weigh attribute (we have 3 candidates now)
        bh.mergeParserOverrides(overrides);
        assertEquals(1, bh.parserRegistry.size());
        DescriptorParser result = bh.parserRegistry.get("properties");
View Full Code Here

TOP

Related Classes of org.apache.commons.collections.MultiMap

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.