Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileSystem.listStatus()


    }

    sink.stop();

    // loop through all the files generated and check their contains
    FileStatus[] dirStat = fs.listStatus(dirPath);
    Path fList[] = FileUtil.stat2Paths(dirStat);

    // check that the roll happened correctly for the given data
    // Note that we'll end up with two files with only a head
    long expectedFiles = totalEvents / rollCount;
View Full Code Here


    sink.process();
    Thread.sleep(500); // shouldn't be enough for a timeout to occur
    sink.process();
    sink.process();
    sink.stop();
    FileStatus[] dirStat = fs.listStatus(dirPath);
    Path[] fList = FileUtil.stat2Paths(dirStat);
    Assert.assertEquals("Incorrect content of the directory " + StringUtils.join(fList, ","),
      2, fList.length);
    Assert.assertTrue(!fList[0].getName().endsWith(".tmp") &&
      !fList[1].getName().endsWith(".tmp"));
View Full Code Here

    }

    sink.stop();

    // loop through all the files generated and check their contains
    FileStatus[] dirStat = fs.listStatus(dirPath);
    Path fList[] = FileUtil.stat2Paths(dirStat);

    // check that the roll happened correctly for the given data
    long expectedFiles = totalEvents / rollCount;
    if (totalEvents % rollCount > 0) expectedFiles++;
View Full Code Here

    }

    sink.stop();

    // loop through all the files generated and check their contains
    FileStatus[] dirStat = fs.listStatus(dirPath);
    Path fList[] = FileUtil.stat2Paths(dirStat);

    // check that the roll happened correctly for the given data
    long expectedFiles = totalEvents / rollCount;
    if (totalEvents % rollCount > 0) expectedFiles++;
View Full Code Here

    }

    sink.stop();

    // loop through all the files generated and check their contains
    FileStatus[] dirStat = fs.listStatus(dirPath);
    Path fList[] = FileUtil.stat2Paths(dirStat);

    // check that the roll happened correctly for the given data
    long expectedFiles = totalEvents / rollCount;
    if (totalEvents % rollCount > 0) expectedFiles++;
View Full Code Here

    }

    sink.stop();

    // loop through all the files generated and check their contains
    FileStatus[] dirStat = fs.listStatus(dirPath);
    Path fList[] = FileUtil.stat2Paths(dirStat);

    // check that the roll happened correctly for the given data
    long expectedFiles = totalEvents / rollCount;
    if (totalEvents % rollCount > 0) expectedFiles++;
View Full Code Here

    String applicationBaseDir = springYarnProperties.getApplicationBaseDir();
    Path path = new Path(applicationBaseDir);
    FileSystem fs = path.getFileSystem(yarnConfiguration);
    FileStatus[] listStatus = new FileStatus[0];
    if (fs.exists(path)) {
      listStatus = fs.listStatus(path);
    }
    return ApplicationsReport.installedReportBuilder()
        .add(InstalledField.NAME)
        .add(InstalledField.PATH)
        .from(listStatus)
View Full Code Here

          for (FileStatus status : fileStatus) {
            results.put(status.getPath(), srcFs.getContentSummary(status.getPath()).getLength());
          }
        }
        else {
          FileStatus items[] = srcFs.listStatus(FileUtil.stat2Paths(fileStatus, srcPath));
          if (ObjectUtils.isEmpty(items) && (!srcFs.exists(srcPath))) {
            throw new HadoopException("Cannot access " + src + ": No such file or directory.");
          }
          for (FileStatus status : items) {
            Long size = (status.isDir() ? srcFs.getContentSummary(status.getPath()).getLength() : status.getLen());
View Full Code Here

            te.printStackTrace();
            logger.error("Error thrown while trying to get Hadoop filesystem");
            System.exit(-1);
        }

        FileStatus status = fs.listStatus(p)[0];
        long size = status.getLen();
        HdfsFetcher fetcher = new HdfsFetcher(null,
                                              maxBytesPerSec,
                                              VoldemortConfig.REPORTING_INTERVAL_BYTES,
                                              VoldemortConfig.DEFAULT_BUFFER_SIZE,
View Full Code Here

    private List<FileStatus> getAllSubFileStatus(JobConf inputConf, Path filterMemberPath)
            throws IOException {
        List<FileStatus> list = new ArrayList<FileStatus>();

        FileSystem fs = filterMemberPath.getFileSystem(inputConf);
        FileStatus[] subFiles = fs.listStatus(filterMemberPath);

        if(null != subFiles) {
            if(fs.isDirectory(filterMemberPath)) {
                for(FileStatus subFile: subFiles) {
                    if(!HadoopUtils.shouldPathBeIgnored(subFile.getPath())) {
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.