Examples of CachePoolInfo


Examples of org.apache.hadoop.hdfs.protocol.CachePoolInfo

  @Test(timeout=30000)
  public void testMaxRelativeExpiry() throws Exception {
    // Test that negative and really big max expirations can't be set during add
    try {
      dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(-1l));
      fail("Added a pool with a negative max expiry.");
    } catch (InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains("negative", e);
    }
    try {
      dfs.addCachePool(new CachePoolInfo("failpool")
          .setMaxRelativeExpiryMs(Long.MAX_VALUE - 1));
      fail("Added a pool with too big of a max expiry.");
    } catch (InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains("too big", e);
    }
    // Test that setting a max relative expiry on a pool works
    CachePoolInfo coolPool = new CachePoolInfo("coolPool");
    final long poolExpiration = 1000 * 60 * 10l;
    dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(poolExpiration));
    RemoteIterator<CachePoolEntry> poolIt = dfs.listCachePools();
    CachePoolInfo listPool = poolIt.next().getInfo();
    assertFalse("Should only be one pool", poolIt.hasNext());
    assertEquals("Expected max relative expiry to match set value",
        poolExpiration, listPool.getMaxRelativeExpiryMs().longValue());
    // Test that negative and really big max expirations can't be modified
    try {
      dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(-1l));
      fail("Added a pool with a negative max expiry.");
    } catch (InvalidRequestException e) {
      assertExceptionContains("negative", e);
    }
    try {
      dfs.modifyCachePool(coolPool
          .setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER+1));
      fail("Added a pool with too big of a max expiry.");
    } catch (InvalidRequestException e) {
      assertExceptionContains("too big", e);
    }
    // Test that adding a directives without an expiration uses the pool's max
    CacheDirectiveInfo defaultExpiry = new CacheDirectiveInfo.Builder()
        .setPath(new Path("/blah"))
        .setPool(coolPool.getPoolName())
        .build();
    dfs.addCacheDirective(defaultExpiry);
    RemoteIterator<CacheDirectiveEntry> dirIt =
        dfs.listCacheDirectives(defaultExpiry);
    CacheDirectiveInfo listInfo = dirIt.next().getInfo();
    assertFalse("Should only have one entry in listing", dirIt.hasNext());
    long listExpiration = listInfo.getExpiration().getAbsoluteMillis()
        - new Date().getTime();
    assertTrue("Directive expiry should be approximately the pool's max expiry",
        Math.abs(listExpiration - poolExpiration) < 10*1000);
    // Test that the max is enforced on add for relative and absolute
    CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder()
        .setPath(new Path("/lolcat"))
        .setPool(coolPool.getPoolName());
    try {
      dfs.addCacheDirective(builder
          .setExpiration(Expiration.newRelative(poolExpiration+1))
          .build());
      fail("Added a directive that exceeds pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
      dfs.addCacheDirective(builder
          .setExpiration(Expiration.newAbsolute(
              new Date().getTime() + poolExpiration + (10*1000)))
          .build());
      fail("Added a directive that exceeds pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    // Test that max is enforced on modify for relative and absolute Expirations
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
          .setId(listInfo.getId())
          .setExpiration(Expiration.newRelative(poolExpiration+1))
          .build());
      fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
          .setId(listInfo.getId())
          .setExpiration(Expiration.newAbsolute(
              new Date().getTime() + poolExpiration + (10*1000)))
          .build());
      fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    // Test some giant limit values with add
    try {
      dfs.addCacheDirective(builder
          .setExpiration(Expiration.newRelative(
              Long.MAX_VALUE))
          .build());
      fail("Added a directive with a gigantic max value");
    } catch (IllegalArgumentException e) {
      assertExceptionContains("is too far in the future", e);
    }
    try {
      dfs.addCacheDirective(builder
          .setExpiration(Expiration.newAbsolute(
              Long.MAX_VALUE))
          .build());
      fail("Added a directive with a gigantic max value");
    } catch (InvalidRequestException e) {
      assertExceptionContains("is too far in the future", e);
    }
    // Test some giant limit values with modify
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
          .setId(listInfo.getId())
          .setExpiration(Expiration.NEVER)
          .build());
      fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
          .setId(listInfo.getId())
          .setExpiration(Expiration.newAbsolute(
              Long.MAX_VALUE))
          .build());
      fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("is too far in the future", e);
    }
    // Test that the max is enforced on modify correctly when changing pools
    CachePoolInfo destPool = new CachePoolInfo("destPool");
    dfs.addCachePool(destPool.setMaxRelativeExpiryMs(poolExpiration / 2));
    try {
      dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
          .setId(listInfo.getId())
          .setPool(destPool.getPoolName())
          .build());
      fail("Modified a directive to a pool with a lower max expiration");
    } catch (InvalidRequestException e) {
      assertExceptionContains("exceeds the max relative expiration", e);
    }
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
        .setId(listInfo.getId())
        .setPool(destPool.getPoolName())
        .setExpiration(Expiration.newRelative(poolExpiration / 2))
        .build());
    dirIt = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder()
        .setPool(destPool.getPoolName())
        .build());
    listInfo = dirIt.next().getInfo();
    listExpiration = listInfo.getExpiration().getAbsoluteMillis()
        - new Date().getTime();
    assertTrue("Unexpected relative expiry " + listExpiration
        + " expected approximately " + poolExpiration/2,
        Math.abs(poolExpiration/2 - listExpiration) < 10*1000);
    // Test that cache pool and directive expiry can be modified back to never
    dfs.modifyCachePool(destPool
        .setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER));
    poolIt = dfs.listCachePools();
    listPool = poolIt.next().getInfo();
    while (!listPool.getPoolName().equals(destPool.getPoolName())) {
      listPool = poolIt.next().getInfo();
    }
    assertEquals("Expected max relative expiry to match set value",
        CachePoolInfo.RELATIVE_EXPIRY_NEVER,
        listPool.getMaxRelativeExpiryMs().longValue());
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CachePoolInfo

        0xFADED);
    // Set up a log appender watcher
    final LogVerificationAppender appender = new LogVerificationAppender();
    final Logger logger = Logger.getRootLogger();
    logger.addAppender(appender);
    dfs.addCachePool(new CachePoolInfo("pool"));
    dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
        .setPath(fileName).setReplication((short) 1).build());
    waitForCachedBlocks(namenode, -1, numCachedReplicas,
        "testExceeds:1");
    checkPendingCachedEmpty(cluster);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CachePoolInfo

   *          would get if you didn't have read permission for this pool.)
   * @return
   *          Cache pool information.
   */
  CachePoolInfo getInfo(boolean fullInfo) {
    CachePoolInfo info = new CachePoolInfo(poolName);
    if (!fullInfo) {
      return info;
    }
    return info.setOwnerName(ownerName).
        setGroupName(groupName).
        setMode(new FsPermission(mode)).
        setLimit(limit).
        setMaxRelativeExpiryMs(maxRelativeExpiryMs);
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CachePoolInfo

            "Cannot add cache pool " + req.getPoolName(), safeMode);
      }
      if (pc != null) {
        pc.checkSuperuserPrivilege();
      }
      CachePoolInfo info = cacheManager.addCachePool(req);
      poolInfoStr = info.toString();
      getEditLog().logAddCachePool(info, cacheEntry != null);
      success = true;
    } finally {
      writeUnlock();
      if (isAuditEnabled() && isExternalInvocation()) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CachePoolInfo

  }

  public static CachePoolInfo readCachePoolInfo(DataInput in)
      throws IOException {
    String poolName = readString(in);
    CachePoolInfo info = new CachePoolInfo(poolName);
    int flags = readInt(in);
    if ((flags & 0x1) != 0) {
      info.setOwnerName(readString(in));
    }
    if ((flags & 0x2) != 0)  {
      info.setGroupName(readString(in));
    }
    if ((flags & 0x4) != 0) {
      info.setMode(FsPermission.read(in));
    }
    if ((flags & 0x8) != 0) {
      info.setLimit(readLong(in));
    }
    if ((flags & 0x10) != 0) {
      info.setMaxRelativeExpiryMs(readLong(in));
    }
    if ((flags & ~0x1F) != 0) {
      throw new IOException("Unknown flag in CachePoolInfo: " + flags);
    }
    return info;
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CachePoolInfo

  }

  public static CachePoolInfo readCachePoolInfo(Stanza st)
      throws InvalidXmlException {
    String poolName = st.getValue("POOLNAME");
    CachePoolInfo info = new CachePoolInfo(poolName);
    if (st.hasChildren("OWNERNAME")) {
      info.setOwnerName(st.getValue("OWNERNAME"));
    }
    if (st.hasChildren("GROUPNAME")) {
      info.setGroupName(st.getValue("GROUPNAME"));
    }
    if (st.hasChildren("MODE")) {
      info.setMode(FSEditLogOp.fsPermissionFromXml(st));
    }
    if (st.hasChildren("LIMIT")) {
      info.setLimit(Long.parseLong(st.getValue("LIMIT")));
    }
    if (st.hasChildren("MAXRELATIVEEXPIRY")) {
      info.setMaxRelativeExpiryMs(
          Long.parseLong(st.getValue("MAXRELATIVEEXPIRY")));
    }
    return info;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CachePoolInfo

        .newArrayListWithCapacity(cachePools.size());
    ArrayList<CacheDirectiveInfoProto> directives = Lists
        .newArrayListWithCapacity(directivesById.size());

    for (CachePool pool : cachePools.values()) {
      CachePoolInfo p = pool.getInfo(true);
      CachePoolInfoProto.Builder b = CachePoolInfoProto.newBuilder()
          .setPoolName(p.getPoolName());

      if (p.getOwnerName() != null)
        b.setOwnerName(p.getOwnerName());

      if (p.getGroupName() != null)
        b.setGroupName(p.getGroupName());

      if (p.getMode() != null)
        b.setMode(p.getMode().toShort());

      if (p.getLimit() != null)
        b.setLimit(p.getLimit());

      pools.add(b.build());
    }

    for (CacheDirective directive : directivesById.values()) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CachePoolInfo

  }

  public void loadState(PersistState s) throws IOException {
    nextDirectiveId = s.section.getNextDirectiveId();
    for (CachePoolInfoProto p : s.pools) {
      CachePoolInfo info = new CachePoolInfo(p.getPoolName());
      if (p.hasOwnerName())
        info.setOwnerName(p.getOwnerName());

      if (p.hasGroupName())
        info.setGroupName(p.getGroupName());

      if (p.hasMode())
        info.setMode(new FsPermission((short) p.getMode()));

      if (p.hasLimit())
        info.setLimit(p.getLimit());

      addCachePool(info);
    }

    for (CacheDirectiveInfoProto p : s.directives) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CachePoolInfo

  public void testListCachePools() throws Exception {
    final int poolCount = 7;
    HashSet<String> poolNames = new HashSet<String>(poolCount);
    for (int i=0; i<poolCount; i++) {
      String poolName = "testListCachePools-" + i;
      dfs.addCachePool(new CachePoolInfo(poolName));
      poolNames.add(poolName);
    }
    listCachePools(poolNames, 0);

    cluster.transitionToStandby(0);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CachePoolInfo

    Path path = new Path("/p");
    for (int i=0; i<poolCount; i++) {
      String poolName = "testListCacheDirectives-" + i;
      CacheDirectiveInfo directiveInfo =
        new CacheDirectiveInfo.Builder().setPool(poolName).setPath(path).build();
      dfs.addCachePool(new CachePoolInfo(poolName));
      dfs.addCacheDirective(directiveInfo, EnumSet.of(CacheFlag.FORCE));
      poolNames.add(poolName);
    }
    listCacheDirectives(poolNames, 0);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.