Examples of CacheDirectiveInfo


Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

    final int poolCount = 7;
    HashSet<String> poolNames = new HashSet<String>(poolCount);
    Path path = new Path("/p");
    for (int i=0; i<poolCount; i++) {
      String poolName = "testListCacheDirectives-" + i;
      CacheDirectiveInfo directiveInfo =
        new CacheDirectiveInfo.Builder().setPool(poolName).setPath(path).build();
      dfs.addCachePool(new CachePoolInfo(poolName));
      dfs.addCacheDirective(directiveInfo, EnumSet.of(CacheFlag.FORCE));
      poolNames.add(poolName);
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

      }
      if (directive.getId() != null) {
        throw new IOException("addDirective: you cannot specify an ID " +
            "for this operation.");
      }
      CacheDirectiveInfo effectiveDirective =
          cacheManager.addDirective(directive, pc, flags);
      getEditLog().logAddCacheDirectiveInfo(effectiveDirective,
          cacheEntry != null);
      result = effectiveDirective.getId();
      success = true;
    } finally {
      writeUnlock();
      if (success) {
        getEditLog().logSync();
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

      fsNamesys.setLastAllocatedBlockId(allocateBlockIdOp.blockId);
      break;
    }
    case OP_ADD_CACHE_DIRECTIVE: {
      AddCacheDirectiveInfoOp addOp = (AddCacheDirectiveInfoOp) op;
      CacheDirectiveInfo result = fsNamesys.
          getCacheManager().addDirectiveFromEditLog(addOp.directive);
      if (toAddRetryCache) {
        Long id = result.getId();
        fsNamesys.addCacheEntryWithPayload(op.rpcClientId, op.rpcCallId, id);
      }
      break;
    }
    case OP_MODIFY_CACHE_DIRECTIVE: {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

    Long id = info.getId();
    if (id == null) {
      throw new InvalidRequestException("Must supply an ID.");
    }
    CacheDirective prevEntry = getById(id);
    CacheDirectiveInfo newInfo = createFromInfoAndDefaults(info, prevEntry);
    removeInternal(prevEntry);
    addInternal(new CacheDirective(newInfo), getCachePool(newInfo.getPool()));
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

      }
      CacheDirective prevEntry = getById(id);
      checkWritePermission(pc, prevEntry.getPool());

      // Fill in defaults
      CacheDirectiveInfo infoWithDefaults =
          createFromInfoAndDefaults(info, prevEntry);
      CacheDirectiveInfo.Builder builder =
          new CacheDirectiveInfo.Builder(infoWithDefaults);

      // Do validation
      validatePath(infoWithDefaults);
      validateReplication(infoWithDefaults, (short)-1);
      // Need to test the pool being set here to avoid rejecting a modify for a
      // directive that's already been forced into a pool
      CachePool srcPool = prevEntry.getPool();
      CachePool destPool = getCachePool(validatePoolName(infoWithDefaults));
      if (!srcPool.getPoolName().equals(destPool.getPoolName())) {
        checkWritePermission(pc, destPool);
        if (!flags.contains(CacheFlag.FORCE)) {
          checkLimit(destPool, infoWithDefaults.getPath().toUri().getPath(),
              infoWithDefaults.getReplication());
        }
      }
      // Verify the expiration against the destination pool
      validateExpiryTime(infoWithDefaults, destPool.getMaxRelativeExpiryMs());
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

    for (Entry<Long, CacheDirective> cur : tailMap.entrySet()) {
      if (numReplies >= maxListCacheDirectivesNumResponses) {
        return new BatchedListEntries<CacheDirectiveEntry>(replies, true);
      }
      CacheDirective curDirective = cur.getValue();
      CacheDirectiveInfo info = cur.getValue().toInfo();
      if (filter.getPool() != null &&
          !info.getPool().equals(filter.getPool())) {
        continue;
      }
      if (filterPath != null &&
          !info.getPath().toUri().getPath().equals(filterPath)) {
        continue;
      }
      boolean hasPermission = true;
      if (pc != null) {
        try {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

    prog.beginStep(Phase.LOADING_FSIMAGE, step);
    int numDirectives = in.readInt();
    prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
    Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
    for (int i = 0; i < numDirectives; i++) {
      CacheDirectiveInfo info = FSImageSerialization.readCacheDirectiveInfo(in);
      // Get pool reference by looking it up in the map
      final String poolName = info.getPool();
      CachePool pool = cachePools.get(poolName);
      if (pool == null) {
        throw new IOException("Directive refers to pool " + poolName +
            ", which does not exist.");
      }
      CacheDirective directive =
          new CacheDirective(info.getId(), info.getPath().toUri().getPath(),
              info.getReplication(), info.getExpiration().getAbsoluteMillis());
      boolean addedDirective = pool.getDirectiveList().add(directive);
      assert addedDirective;
      if (directivesById.put(directive.getId(), directive) != null) {
        throw new IOException("A directive with ID " + directive.getId() +
            " already exists");
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

    proto.addCachePool(new CachePoolInfo("pool3").
        setMode(new FsPermission((short)0777)));
    proto.addCachePool(new CachePoolInfo("pool4").
        setMode(new FsPermission((short)0)));

    CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().
        setPath(new Path("/alpha")).
        setPool("pool1").
        build();
    CacheDirectiveInfo beta = new CacheDirectiveInfo.Builder().
        setPath(new Path("/beta")).
        setPool("pool2").
        build();
    CacheDirectiveInfo delta = new CacheDirectiveInfo.Builder().
        setPath(new Path("/delta")).
        setPool("pool1").
        build();

    long alphaId = addAsUnprivileged(alpha);
    long alphaId2 = addAsUnprivileged(alpha);
    assertFalse("Expected to get unique directives when re-adding an "
        + "existing CacheDirectiveInfo",
        alphaId == alphaId2);
    long betaId = addAsUnprivileged(beta);

    try {
      addAsUnprivileged(new CacheDirectiveInfo.Builder().
          setPath(new Path("/unicorn")).
          setPool("no_such_pool").
          build());
      fail("expected an error when adding to a non-existent pool.");
    } catch (InvalidRequestException ioe) {
      GenericTestUtils.assertExceptionContains("Unknown pool", ioe);
    }

    try {
      addAsUnprivileged(new CacheDirectiveInfo.Builder().
          setPath(new Path("/blackhole")).
          setPool("pool4").
          build());
      fail("expected an error when adding to a pool with " +
          "mode 0 (no permissions for anyone).");
    } catch (AccessControlException e) {
      GenericTestUtils.
          assertExceptionContains("Permission denied while accessing pool", e);
    }

    try {
      addAsUnprivileged(new CacheDirectiveInfo.Builder().
          setPath(new Path("/illegal:path/")).
          setPool("pool1").
          build());
      fail("expected an error when adding a malformed path " +
          "to the cache directives.");
    } catch (IllegalArgumentException e) {
      GenericTestUtils.assertExceptionContains("is not a valid DFS filename", e);
    }

    try {
      addAsUnprivileged(new CacheDirectiveInfo.Builder().
          setPath(new Path("/emptypoolname")).
          setReplication((short)1).
          setPool("").
          build());
      fail("expected an error when adding a cache " +
          "directive with an empty pool name.");
    } catch (InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains("Invalid empty pool name", e);
    }

    long deltaId = addAsUnprivileged(delta);

    // We expect the following to succeed, because DistributedFileSystem
    // qualifies the path.
    long relativeId = addAsUnprivileged(
        new CacheDirectiveInfo.Builder().
            setPath(new Path("relative")).
            setPool("pool1").
            build());

    RemoteIterator<CacheDirectiveEntry> iter;
    iter = dfs.listCacheDirectives(null);
    validateListAll(iter, alphaId, alphaId2, betaId, deltaId, relativeId );
    iter = dfs.listCacheDirectives(
        new CacheDirectiveInfo.Builder().setPool("pool3").build());
    assertFalse(iter.hasNext());
    iter = dfs.listCacheDirectives(
        new CacheDirectiveInfo.Builder().setPool("pool1").build());
    validateListAll(iter, alphaId, alphaId2, deltaId, relativeId );
    iter = dfs.listCacheDirectives(
        new CacheDirectiveInfo.Builder().setPool("pool2").build());
    validateListAll(iter, betaId);

    dfs.removeCacheDirective(betaId);
    iter = dfs.listCacheDirectives(
        new CacheDirectiveInfo.Builder().setPool("pool2").build());
    assertFalse(iter.hasNext());

    try {
      dfs.removeCacheDirective(betaId);
      fail("expected an error when removing a non-existent ID");
    } catch (InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains("No directive with ID", e);
    }

    try {
      proto.removeCacheDirective(-42l);
      fail("expected an error when removing a negative ID");
    } catch (InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains(
          "Invalid negative ID", e);
    }
    try {
      proto.removeCacheDirective(43l);
      fail("expected an error when removing a non-existent ID");
    } catch (InvalidRequestException e) {
      GenericTestUtils.assertExceptionContains("No directive with ID", e);
    }

    dfs.removeCacheDirective(alphaId);
    dfs.removeCacheDirective(alphaId2);
    dfs.removeCacheDirective(deltaId);

    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().
        setId(relativeId).
        setReplication((short)555).
        build());
    iter = dfs.listCacheDirectives(null);
    assertTrue(iter.hasNext());
    CacheDirectiveInfo modified = iter.next().getInfo();
    assertEquals(relativeId, modified.getId().longValue());
    assertEquals((short)555, modified.getReplication().shortValue());
    dfs.removeCacheDirective(relativeId);
    iter = dfs.listCacheDirectives(null);
    assertFalse(iter.hasNext());

    // Verify that PBCDs with path "." work correctly
    CacheDirectiveInfo directive =
        new CacheDirectiveInfo.Builder().setPath(new Path("."))
            .setPool("pool1").build();
    long id = dfs.addCacheDirective(directive);
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(
        directive).setId(id).setReplication((short)2).build());
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

      }
      RemoteIterator<CacheDirectiveEntry> dit
          = dfs.listCacheDirectives(null);
      for (int i=0; i<numEntries; i++) {
        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
        CacheDirectiveInfo cd = dit.next().getInfo();
        assertEquals(i+1, cd.getId().longValue());
        assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
        assertEquals(pool, cd.getPool());
      }
      assertFalse("Unexpected # of cache directives found", dit.hasNext());
     
      // Checkpoint once to set some cache pools and directives on 2NN side
      secondary.doCheckpoint();
     
      // Add some more CacheManager state
      final String imagePool = "imagePool";
      dfs.addCachePool(new CachePoolInfo(imagePool));
      prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
        .setPath(new Path("/image")).setPool(imagePool).build());

      // Save a new image to force a fresh fsimage download
      dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      dfs.saveNamespace();
      dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

      // Checkpoint again forcing a reload of FSN state
      boolean fetchImage = secondary.doCheckpoint();
      assertTrue("Secondary should have fetched a new fsimage from NameNode",
          fetchImage);

      // Remove temp pool and directive
      dfs.removeCachePool(imagePool);

      // Restart namenode
      cluster.restartNameNode();
   
      // Check that state came back up
      pit = dfs.listCachePools();
      assertTrue("No cache pools found", pit.hasNext());
      info = pit.next().getInfo();
      assertEquals(pool, info.getPoolName());
      assertEquals(pool, info.getPoolName());
      assertEquals(groupName, info.getGroupName());
      assertEquals(mode, info.getMode());
      assertEquals(limit, (long)info.getLimit());
      assertFalse("Unexpected # of cache pools found", pit.hasNext());
   
      dit = dfs.listCacheDirectives(null);
      for (int i=0; i<numEntries; i++) {
        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
        CacheDirectiveInfo cd = dit.next().getInfo();
        assertEquals(i+1, cd.getId().longValue());
        assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
        assertEquals(pool, cd.getPool());
        assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
      }
      assertFalse("Unexpected # of cache directives found", dit.hasNext());
 
      long nextId = dfs.addCacheDirective(
            new CacheDirectiveInfo.Builder().
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo

    // Check the initial statistics at the namenode
    waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
    // Cache and check each path in sequence
    int expected = 0;
    for (int i=0; i<numFiles; i++) {
      CacheDirectiveInfo directive =
          new CacheDirectiveInfo.Builder().
            setPath(new Path(paths.get(i))).
            setPool(pool).
            build();
      nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.