Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileSystem.createNewFile()


    Path writable = new Path(this.errorDir, ".iswritable");
    if (!fs.createNewFile(writable)) {
      // Maybe this is a re-try... clear the flag and try again
      fs.delete(writable, false);
      if (!fs.createNewFile(writable))
        throw new ThriftTableOperationException(tableId, null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY,
            "Unable to write to " + this.errorDir);
    }
    fs.delete(writable, false);
   
View Full Code Here


    try {
      FileSystem fs = FileSystem.get(new URI(tmpStrDatasetPath), conf);
      fs.delete(tmpDatasetPath, true);
      if (fs.exists(tmpDatasetPath)) {
        fs.createNewFile(tmpDatasetPath);
      }

      BufferedReader br = new BufferedReader(new FileReader(strDataPath));
      String line = null;
      int count = 0;
View Full Code Here

    try {
      FileSystem fs = FileSystem.get(new URI(tmpStrDatasetPath), conf);
      fs.delete(tmpDatasetPath, true);
      if (fs.exists(tmpDatasetPath)) {
        fs.createNewFile(tmpDatasetPath);
      }

      BufferedReader br = new BufferedReader(new FileReader(strDataPath));
      String line = null;
      int count = 0;
View Full Code Here

    try {
      URI uri = new URI(strDataPath);
      FileSystem fs = FileSystem.get(uri, conf);
      fs.delete(dataPath, true);
      if (!fs.exists(dataPath)) {
        fs.createNewFile(dataPath);
        SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf,
            dataPath, LongWritable.class, VectorWritable.class);

        for (int i = 0; i < 1000; ++i) {
          VectorWritable vecWritable = new VectorWritable(trainingData[i % 4]);
View Full Code Here

    if (action.equalsIgnoreCase("setup")) {
      test.setup();
    } else if (action.equalsIgnoreCase("client")) {
      InetAddress addr = InetAddress.getLocalHost();
      String host = addr.getHostName();
      fs.createNewFile(new Path("/accumulo-scale/clients/" + host));
      test.client();
      fs.copyFromLocalFile(new Path("/tmp/scale.out"), new Path("/accumulo-scale/results/" + host));
    } else if (action.equalsIgnoreCase("teardown")) {
      test.teardown();
    }
View Full Code Here

      files.add(entry);
    }
    log.debug("tid " + tid + " importing " + files.size() + " files");

    Path writable = new Path(this.errorDir, ".iswritable");
    if (!fs.createNewFile(writable)) {
      // Maybe this is a re-try... clear the flag and try again
      fs.delete(writable, false);
      if (!fs.createNewFile(writable))
        throw new ThriftTableOperationException(tableId, null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY,
            "Unable to write to " + this.errorDir);
View Full Code Here

    Path writable = new Path(this.errorDir, ".iswritable");
    if (!fs.createNewFile(writable)) {
      // Maybe this is a re-try... clear the flag and try again
      fs.delete(writable, false);
      if (!fs.createNewFile(writable))
        throw new ThriftTableOperationException(tableId, null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY,
            "Unable to write to " + this.errorDir);
    }
    fs.delete(writable, false);
   
View Full Code Here

      for (long fid = 0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) {
        Path file = new Path(familyDir,  String.valueOf(fid));
        Path sourceFile = new Path(rootDir, file);
        Path archiveFile = new Path(archiveDir, file);

        fs.createNewFile(sourceFile);

        try {
          // Try to archive the file
          HFileArchiver.archiveRegion(fs, rootDir,
              sourceRegionDir.getParent(), sourceRegionDir);
View Full Code Here

        UUID.randomUUID().toString());
    fs.mkdirs(logDir);
    Thread thread = null;
    try {
      Path logFile = new Path(logDir, UUID.randomUUID().toString());
      fs.createNewFile(logFile);
      thread = new Thread() {
        public void run() {
          try {
            // this call will block because there are no SplitLogWorkers,
            // until the task znode is deleted below. Then the call will
View Full Code Here

      db = new Database();
      db.setName(TEST_DB1_NAME);
      dbLocation =
          HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/_testDB_file_";
      FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), hiveConf);
      fs.createNewFile(new Path(dbLocation));
      fs.deleteOnExit(new Path(dbLocation));
      db.setLocationUri(dbLocation);

      boolean createFailed = false;
      try {
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.