Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs()


      NamenodeProtocols nnRpc = cluster.getNameNodeRpc();
     
      // FileSystem#mkdirs "/testInodeIdBasedPaths"
      Path baseDir = getInodePath(INodeId.ROOT_INODE_ID, "testInodeIdBasedPaths");
      Path baseDirRegPath = new Path("/testInodeIdBasedPaths");
      fs.mkdirs(baseDir);
      fs.exists(baseDir);
      long baseDirFileId = nnRpc.getFileInfo(baseDir.toString()).getFileId();
     
      // FileSystem#create file and FileSystem#close
      Path testFileInodePath = getInodePath(baseDirFileId, "test1");
View Full Code Here


      cluster.waitActive();
      final DistributedFileSystem hdfs = cluster.getFileSystem();
      final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();

      final Path dir = new Path("/dir");
      hdfs.mkdirs(dir);
      long dirId = fsdir.getINode(dir.toString()).getId();
      long parentId = fsdir.getINode("/").getId();
      String testPath = "/.reserved/.inodes/" + dirId + "/..";

      client = new DFSClient(NameNode.getAddress(conf), conf);
View Full Code Here

      final DistributedFileSystem hdfs = cluster.getFileSystem();
      ArrayList<String> source = new ArrayList<String>();

      // tmp1 holds files with 3 blocks, 3 replicas
      // tmp2 holds files with 3 blocks, 1 replica
      hdfs.mkdirs(new Path("/tmp1"));
      hdfs.mkdirs(new Path("/tmp2"));

      source.add("f1");
      source.add("f2");
View Full Code Here

      ArrayList<String> source = new ArrayList<String>();

      // tmp1 holds files with 3 blocks, 3 replicas
      // tmp2 holds files with 3 blocks, 1 replica
      hdfs.mkdirs(new Path("/tmp1"));
      hdfs.mkdirs(new Path("/tmp2"));

      source.add("f1");
      source.add("f2");

      int numEntries = source.size();
View Full Code Here

      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
      cluster.waitActive();
      final DistributedFileSystem hdfs = cluster.getFileSystem();
      final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();

      hdfs.mkdirs(new Path("/tmp"));
      DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
      DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
      DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);

      DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
View Full Code Here

      final DistributedFileSystem dfs = cluster.getFileSystem();
      final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
          WebHdfsFileSystem.SCHEME);

      final Path foo = new Path("/foo");
      dfs.mkdirs(foo);

      try {
        webHdfs.createSnapshot(foo);
        fail("Cannot create snapshot on a non-snapshottable directory");
      } catch (Exception e) {
View Full Code Here

      final DistributedFileSystem dfs = cluster.getFileSystem();
      final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
          WebHdfsFileSystem.SCHEME);

      final Path foo = new Path("/foo");
      dfs.mkdirs(foo);
      dfs.allowSnapshot(foo);

      webHdfs.createSnapshot(foo, "s1");
      final Path spath = webHdfs.createSnapshot(foo, null);
      Assert.assertTrue(webHdfs.exists(spath));
View Full Code Here

      final DistributedFileSystem dfs = cluster.getFileSystem();
      final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
          WebHdfsFileSystem.SCHEME);

      final Path foo = new Path("/foo");
      dfs.mkdirs(foo);
      dfs.allowSnapshot(foo);

      webHdfs.createSnapshot(foo, "s1");
      final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
      Assert.assertTrue(webHdfs.exists(s1path));
View Full Code Here

        .numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
    OutputStream out = null;
    try {
      fs.mkdirs(new Path("/test-target"));
      out = fs.create(new Path("/test-source/foo")); // don't close
      fs.rename(new Path("/test-source/"), new Path("/test-target/"));

      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      cluster.getNameNodeRpc().saveNamespace();
View Full Code Here

    dfs.rename(pathFileCreate, pathFileMoved);
    // OP_DELETE 2
    dfs.delete(pathFileMoved, false);
    // OP_MKDIR 3
    Path pathDirectoryMkdir = new Path("/directory_mkdir");
    dfs.mkdirs(pathDirectoryMkdir);
    // OP_SET_REPLICATION 4
    s = dfs.create(pathFileCreate);
    s.close();
    dfs.setReplication(pathFileCreate, (short)1);
    // OP_SET_PERMISSIONS 7
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.