Package com.yammer.metrics.core

Examples of com.yammer.metrics.core.MetricName


  private Meter hits;
  private Meter misses;

  public BlockDirectoryCache(BlockCache blockCache) {
    _blockCache = blockCache;
    hits = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, CACHE, HIT), HIT, TimeUnit.SECONDS);
    misses = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, CACHE, MISS), MISS, TimeUnit.SECONDS);
  }
View Full Code Here


      }
      _locks[i] = new BlockLocks(_numberOfBlocksPerSlab);
      _lockCounters[i] = new AtomicInteger();
    }

    evictions = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, CACHE, EVICTION), EVICTION, TimeUnit.SECONDS);
    Metrics.newGauge(new MetricName(ORG_APACHE_BLUR, CACHE, ENTRIES), new Gauge<Long>() {
      @Override
      public Long value() {
        return (long) getSize();
      }
    });
    Metrics.newGauge(new MetricName(ORG_APACHE_BLUR, CACHE, SIZE), new Gauge<Long>() {
      @Override
      public Long value() {
        return ((long) getSize()) * (long) _8K;
      }
    });
View Full Code Here

  private static final Log LOG = LogFactory.getLog(ShardServerEventHandler.class);
  private final Meter _connectionMeter;
  private final AtomicLong _connections = new AtomicLong();

  public ShardServerEventHandler() {
    Metrics.newGauge(new MetricName(ORG_APACHE_BLUR, BLUR, "Connections"), new Gauge<Long>() {
      @Override
      public Long value() {
        return null;
      }
    });
    _connectionMeter = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, BLUR, "Connections/s"), "Connections/s",
        TimeUnit.SECONDS);
  }
View Full Code Here

  public DeepPagingCache() {
    this(DEFAULT_MAX);
  }

  public DeepPagingCache(long maxEntriesForDeepPaging) {
    _hits = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, DEEP_PAGING_CACHE, HIT), HIT, TimeUnit.SECONDS);
    _misses = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, DEEP_PAGING_CACHE, MISS), MISS, TimeUnit.SECONDS);
    _evictions = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, DEEP_PAGING_CACHE, EVICTION), EVICTION,
        TimeUnit.SECONDS);
    _lruCache = new ConcurrentLinkedHashMap.Builder<DeepPageKeyPlusPosition, DeepPageContainer>()
        .maximumWeightedCapacity(maxEntriesForDeepPaging)
        .listener(new EvictionListener<DeepPageKeyPlusPosition, DeepPageContainer>() {
          @Override
          public void onEviction(DeepPageKeyPlusPosition key, DeepPageContainer value) {
            _positionCache.remove(key);
            _evictions.mark();
          }
        }).build();
    Metrics.newGauge(new MetricName(ORG_APACHE_BLUR, DEEP_PAGING_CACHE, SIZE), new Gauge<Long>() {
      @Override
      public Long value() {
        return _lruCache.weightedSize();
      }
    });
View Full Code Here

      String cluster) {
    _clusterStatus = clusterStatus;
    _configuration = configuration;
    _nodeName = nodeName;
    _cluster = cluster;
    MetricName tableCount = new MetricName(ORG_APACHE_BLUR, BLUR, TABLE_COUNT, _cluster);
    MetricName indexCount = new MetricName(ORG_APACHE_BLUR, BLUR, INDEX_COUNT, _cluster);
    MetricName segmentCount = new MetricName(ORG_APACHE_BLUR, BLUR, SEGMENT_COUNT, _cluster);
    MetricName indexMemoryUsage = new MetricName(ORG_APACHE_BLUR, BLUR, INDEX_MEMORY_USAGE, _cluster);

    Metrics.newGauge(tableCount, new AtomicLongGauge(_tableCount));
    Metrics.newGauge(indexCount, new AtomicLongGauge(_indexCount));
    Metrics.newGauge(segmentCount, new AtomicLongGauge(_segmentCount));
    Metrics.newGauge(indexMemoryUsage, new AtomicLongGauge(_indexMemoryUsage));
View Full Code Here

  public SharedMergeScheduler(int threads) {
    _service = Executors.newThreadPool(SHARED_MERGE_SCHEDULER, threads, false);
    for (int i = 0; i < threads; i++) {
      _service.submit(this);
    }
    MetricName mergeThoughputBytes = new MetricName(ORG_APACHE_BLUR, LUCENE, MERGE_THROUGHPUT_BYTES);
    _throughputBytes = Metrics.newMeter(mergeThoughputBytes, MERGE_THROUGHPUT_BYTES, TimeUnit.SECONDS);
  }
View Full Code Here

    _deepPagingCache = deepPagingCache;
    _indexServer = indexServer;
    _clusterStatus = clusterStatus;
    _filterCache = filterCache;

    MetricName metricName1 = new MetricName(ORG_APACHE_BLUR, BLUR, "External Queries/s");
    MetricName metricName2 = new MetricName(ORG_APACHE_BLUR, BLUR, "Internal Queries/s");
    MetricName metricName3 = new MetricName(ORG_APACHE_BLUR, BLUR, "Fetch Timer");

    _queriesExternalMeter = Metrics.newMeter(metricName1, "External Queries/s", TimeUnit.SECONDS);
    _queriesInternalMeter = Metrics.newMeter(metricName2, "Internal Queries/s", TimeUnit.SECONDS);
    _fetchTimer = Metrics.newTimer(metricName3, TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
View Full Code Here

  private final Meter _created;

  public CacheValueBufferPool(STORE store, int capacity) {
    _store = store;
    _capacity = capacity;
    _created = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, CACHE_POOL, CREATED), CREATED, TimeUnit.SECONDS);
    _reused = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, CACHE_POOL, REUSED), REUSED, TimeUnit.SECONDS);
    _detroyed = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, CACHE_POOL, DESTROYED), DESTROYED, TimeUnit.SECONDS);
  }
View Full Code Here

    }
  }

  private BufferStore(int bufferSize, int count) {
    _bufferSize = bufferSize;
    _created = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, LUCENE, bufferSize + " " + SIZE_ALLOCATED,
        INTERNAL_BUFFERS), INTERNAL_BUFFERS, TimeUnit.SECONDS);
    _lost = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, LUCENE, bufferSize + " " + LOST, INTERNAL_BUFFERS),
        INTERNAL_BUFFERS, TimeUnit.SECONDS);
    _buffers = setupBuffers(bufferSize, count, _created);
  }
View Full Code Here

    }
    return name;
  }

  private MetricsGroup createNewMetricsGroup(String scope) {
    MetricName readAccessName = new MetricName(ORG_APACHE_BLUR, HDFS, "Read Latency in \u00B5s", scope);
    MetricName writeAcccessName = new MetricName(ORG_APACHE_BLUR, HDFS, "Write Latency in \u00B5s", scope);
    MetricName readThroughputName = new MetricName(ORG_APACHE_BLUR, HDFS, "Read Throughput", scope);
    MetricName writeThroughputName = new MetricName(ORG_APACHE_BLUR, HDFS, "Write Throughput", scope);

    Histogram readAccess = Metrics.newHistogram(readAccessName);
    Histogram writeAccess = Metrics.newHistogram(writeAcccessName);
    Meter readThroughput = Metrics.newMeter(readThroughputName, "Read Bytes", TimeUnit.SECONDS);
    Meter writeThroughput = Metrics.newMeter(writeThroughputName, "Write Bytes", TimeUnit.SECONDS);
View Full Code Here

TOP

Related Classes of com.yammer.metrics.core.MetricName

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.