Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileSystem.create()


    try {
      Path inputDir = new Path(getHadoopTestDir(), "input");
      Path outputDir = new Path(getHadoopTestDir(), "output");

      fs.mkdirs(inputDir);
      Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
      writer.write("a\n");
      writer.write("b\n");
      writer.write("c\n");
      writer.close();
View Full Code Here


  @TestHadoop
  public void testGlobFilter() throws Exception {
    createHoopServer();

    FileSystem fs = FileSystem.get(getHadoopConf());
    fs.create(new Path("foo.txt")).close();

    String user = XTest.getHadoopUsers()[0];
    URL url = new URL(getJettyURL(), MessageFormat.format("/?user.name={0}&op=list&filter=f*", user));
    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
View Full Code Here

        "else\n" +
        " while true\n do\n" +
        "  sleep 2\n" +
        " done\n" +
        "fi";
      DataOutputStream file = fs.create(scriptPath);
      file.writeBytes(script);
      file.close();

      // Set executable permissions on the script.
      new File(scriptPath.toUri().getPath()).setExecutable(true);
View Full Code Here

    }
   
   
      IndexUtils.truncate(fs, A);
      IndexUtils.truncate(fs,B);
     FSDataOutputStream outputA= fs.create(A,true);   
     FSDataOutputStream outputB= fs.create(B,true);   
       
   
    FileStatus[] filelist = fs.listStatus(dir);
 
View Full Code Here

   
   
      IndexUtils.truncate(fs, A);
      IndexUtils.truncate(fs,B);
     FSDataOutputStream outputA= fs.create(A,true);   
     FSDataOutputStream outputB= fs.create(B,true);   
       
   
    FileStatus[] filelist = fs.listStatus(dir);
 
    Long bytesRead = 0l;
View Full Code Here

      Path indexlinks = new Path(rtnpath, "indexLinks");
      if (iscopy) {
        if (lfs.exists(indexlinks)) {
          lfs.delete(indexlinks, true);
        }
        FSDataOutputStream outlinks = lfs.create(indexlinks);
        outlinks.write((new String(storepath.toString() + "\r\n"))
            .getBytes());
        outlinks.close();
        completePath.mkdirs();
      }
View Full Code Here

           
            // create output stream for logging
            // in hadoop.job.history.user.location
            fs = userLogFile.getFileSystem(jobConf);
            out = fs.create(userLogFile, true, 4096);
            writer = new PrintWriter(out);
            fileManager.addWriter(jobId, writer);
          }
         
          ArrayList<PrintWriter> writers = fileManager.getWriters(jobId);
View Full Code Here

            jobFileOut.close();
          }
        }
        if (userLogDir != null) {
          fs = new Path(userLogDir).getFileSystem(jobConf);
          jobFileOut = fs.create(userJobFilePath);
          jobConf.writeXml(jobFileOut);
        }
        if (LOG.isDebugEnabled()) {
          LOG.debug("Job conf for " + jobId + " stored at "
                    + jobFilePath + "and" + userJobFilePath );
View Full Code Here

  public void writeToFile(Path loc, JobConf job, Checksum crc)
      throws IOException {
    final FileSystem rfs = FileSystem.getLocal(job).getRaw();
    CheckedOutputStream chk = null;
    final FSDataOutputStream out = rfs.create(loc);
    try {
      if (crc != null) {
        crc.reset();
        chk = new CheckedOutputStream(out, crc);
        chk.write(buf.array());
View Full Code Here

    FileSystem fs = cluster.getFileSystem();
    Path filePath = new Path("/testExcludedNodes");

    // kill a datanode
    cluster.stopDataNode(AppendTestUtil.nextInt(3));
    OutputStream out = fs.create(filePath, true, 4096);
    out.write(20);

    try {
      out.close();
    } catch (Exception e) {
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.