Package org.apache.accumulo.server.fs

Examples of org.apache.accumulo.server.fs.VolumeManager


    String columnsTmp[] = new String[] {};
    if (opts.columns != null)
      columnsTmp = opts.columns.split(",");
    final String columns[] = columnsTmp;
   
    final VolumeManager fs = VolumeManagerImpl.get();
   
    Instance instance = opts.getInstance();
    final ServerConfiguration sconf = new ServerConfiguration(instance);
   
    String tableId = Tables.getNameToIdMap(instance).get(opts.tableName);
View Full Code Here


  }

  //TODO Remove deprecation warning suppression when Hadoop1 support is dropped
  @SuppressWarnings("deprecation")
  private static void reportHdfsBlockLocations(List<FileRef> files) throws Exception {
    VolumeManager fs = VolumeManagerImpl.get();
   
    System.out.println("\t\tFile block report : ");
    for (FileRef file : files) {
      FileStatus status = fs.getFileStatus(file.path());
     
      if (status.isDir()) {
        // assume it is a map file
        status = fs.getFileStatus(new Path(file + "/data"));
      }
      FileSystem ns = fs.getVolumeByPath(file.path()).getFileSystem();
      BlockLocation[] locs = ns.getFileBlockLocations(status, 0, status.getLen());
     
      System.out.println("\t\t\tBlocks for : " + file);
     
      for (BlockLocation blockLocation : locs) {
View Full Code Here

    Path orig = new Path(paths[0]);
    Path dest = new Path(paths[1]);
    Path tmp = new Path(dest.getParent(), dest.getName() + ".tmp");

    try {
      VolumeManager vm = VolumeManagerImpl.get(ServerConfiguration.getSiteConfiguration());
      FileSystem origFs = TraceFileSystem.wrap(vm.getVolumeByPath(orig).getFileSystem());
      FileSystem destFs = TraceFileSystem.wrap(vm.getVolumeByPath(dest).getFileSystem());
     
      FileUtil.copy(origFs, orig, destFs, tmp, false, true, CachedConfiguration.getInstance());
      destFs.rename(tmp, dest);
      log.debug("copied " + orig + " to " + dest);
    } catch (IOException ex) {
      try {
        VolumeManager vm = VolumeManagerImpl.get(ServerConfiguration.getSiteConfiguration());
        FileSystem destFs = TraceFileSystem.wrap(vm.getVolumeByPath(dest).getFileSystem());
        destFs.create(dest).close();
        log.warn(" marked " + dest + " failed", ex);
      } catch (IOException e) {
        log.error("Unable to create failure flag file " + dest, e);
      }
View Full Code Here

    sb.append("</table>\n");
  }

  private void doAccumuloTable(StringBuilder sb) throws IOException {
    // Accumulo
    VolumeManager vm = VolumeManagerImpl.get(ServerConfiguration.getSiteConfiguration());
    MasterMonitorInfo info = Monitor.getMmi();
    sb.append("<table>\n");
    sb.append("<tr><th colspan='2'><a href='/master'>Accumulo Master</a></th></tr>\n");
    if (info == null) {
      sb.append("<tr><td colspan='2'><span class='error'>Master is Down</span></td></tr>\n");
    } else {
      long totalAcuBytesUsed = 0l;
      long totalHdfsBytesUsed = 0l;

      try {
        for (String baseDir : VolumeConfiguration.getVolumeUris(ServerConfiguration.getSiteConfiguration())) {
          final Path basePath = new Path(baseDir);
          final FileSystem fs = vm.getVolumeByPath(basePath).getFileSystem();

          try {
            // Calculate the amount of space used by Accumulo on the FileSystem
            ContentSummary accumuloSummary = fs.getContentSummary(basePath);
            long bytesUsedByAcuOnFs = accumuloSummary.getSpaceConsumed();
View Full Code Here

    TreeMap<FileRef,DataFileValue> sizes = new TreeMap<FileRef,DataFileValue>();

    Scanner mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
    mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
    Text row = extent.getMetadataEntry();
    VolumeManager fs = VolumeManagerImpl.get();

    Key endKey = new Key(row, DataFileColumnFamily.NAME, new Text(""));
    endKey = endKey.followingKey(PartialKey.ROW_COLFAM);

    mdScanner.setRange(new Range(new Key(row), endKey));
View Full Code Here

  public static Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> getFileAndLogEntries(Credentials credentials, KeyExtent extent) throws KeeperException,
      InterruptedException, IOException {
    ArrayList<LogEntry> result = new ArrayList<LogEntry>();
    TreeMap<FileRef,DataFileValue> sizes = new TreeMap<FileRef,DataFileValue>();

    VolumeManager fs = VolumeManagerImpl.get();
    if (extent.isRootTablet()) {
      getRootLogEntries(result);
      Path rootDir = new Path(getRootTabletDir());
      FileStatus[] files = fs.listStatus(rootDir);
      for (FileStatus fileStatus : files) {
        if (fileStatus.getPath().toString().endsWith("_tmp")) {
          continue;
        }
        DataFileValue dfv = new DataFileValue(0, 0);
View Full Code Here

  }

  public static List<FileRef> getBulkFilesLoaded(Connector conn, KeyExtent extent, long tid) throws IOException {
    List<FileRef> result = new ArrayList<FileRef>();
    try {
      VolumeManager fs = VolumeManagerImpl.get();
      Scanner mscanner = new IsolatedScanner(conn.createScanner(extent.isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY));
      mscanner.setRange(extent.toMetadataRange());
      mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
      for (Entry<Key,Value> entry : mscanner) {
        if (Long.parseLong(entry.getValue().toString()) == tid) {
View Full Code Here

  public static Map<FileRef,Long> getBulkFilesLoaded(Credentials credentials, KeyExtent extent) throws IOException {
    Text metadataRow = extent.getMetadataEntry();
    Map<FileRef,Long> ret = new HashMap<FileRef,Long>();

    VolumeManager fs = VolumeManagerImpl.get();
    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, extent.isMeta() ? RootTable.ID : MetadataTable.ID, Authorizations.EMPTY);
    scanner.setRange(new Range(metadataRow));
    scanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
    for (Entry<Key,Value> entry : scanner) {
      Long tid = Long.parseLong(entry.getValue().toString());
View Full Code Here

    log.debug(" tid " + tid + " sourceDir " + sourceDir);
   
    Utils.getReadLock(tableId, tid).lock();
   
    // check that the error directory exists and is empty
    VolumeManager fs = master.getFileSystem();
   
    Path errorPath = new Path(errorDir);
    FileStatus errorStatus = null;
    try {
      errorStatus = fs.getFileStatus(errorPath);
    } catch (FileNotFoundException ex) {
      // ignored
    }
    if (errorStatus == null)
      throw new ThriftTableOperationException(tableId, null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, errorDir
          + " does not exist");
    if (!errorStatus.isDir())
      throw new ThriftTableOperationException(tableId, null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, errorDir
          + " is not a directory");
    if (fs.listStatus(errorPath).length != 0)
      throw new ThriftTableOperationException(tableId, null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, errorDir
          + " is not empty");
   
    ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
   
View Full Code Here

 
  @Override
  public Repo<Master> call(long tid, Master master) throws Exception {
    // This needs to execute after the arbiter is stopped
   
    VolumeManager fs = master.getFileSystem();
   
    if (!fs.exists(new Path(error, BulkImport.FAILURES_TXT)))
      return new CleanUpBulkImport(tableId, source, bulk, error);
   
    HashMap<String,String> failures = new HashMap<String,String>();
    HashMap<String,String> loadedFailures = new HashMap<String,String>();
   
    FSDataInputStream failFile = fs.open(new Path(error, BulkImport.FAILURES_TXT));
    BufferedReader in = new BufferedReader(new InputStreamReader(failFile, Constants.UTF8));
    try {
      String line = null;
      while ((line = in.readLine()) != null) {
        Path path = new Path(line);
        if (!fs.exists(new Path(error, path.getName())))
          failures.put("/" + path.getParent().getName() + "/" + path.getName(), line);
      }
    } finally {
      failFile.close();
    }
   
    /*
     * I thought I could move files that have no file references in the table. However its possible a clone references a file. Therefore only move files that
     * have no loaded markers.
     */
   
    // determine which failed files were loaded
    Connector conn = master.getConnector();
    Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
    mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
   
    for (Entry<Key,Value> entry : mscanner) {
      if (Long.parseLong(entry.getValue().toString()) == tid) {
        String loadedFile = entry.getKey().getColumnQualifier().toString();
        String absPath = failures.remove(loadedFile);
        if (absPath != null) {
          loadedFailures.put(loadedFile, absPath);
        }
      }
    }
   
    // move failed files that were not loaded
    for (String failure : failures.values()) {
      Path orig = new Path(failure);
      Path dest = new Path(error, orig.getName());
      fs.rename(orig, dest);
      log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": import failed");
    }
   
    if (loadedFailures.size() > 0) {
      DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID()
          + Constants.ZBULK_FAILED_COPYQ);
     
      HashSet<String> workIds = new HashSet<String>();
     
      for (String failure : loadedFailures.values()) {
        Path orig = new Path(failure);
        Path dest = new Path(error, orig.getName());
       
        if (fs.exists(dest))
          continue;
       
        bifCopyQueue.addWork(orig.getName(), (failure + "," + dest).getBytes(Constants.UTF8));
        workIds.add(orig.getName());
        log.debug("tid " + tid + " added to copyq: " + orig + " to " + dest + ": failed");
      }
     
      bifCopyQueue.waitUntilDone(workIds);
    }
   
    fs.deleteRecursively(new Path(error, BulkImport.FAILURES_TXT));
    return new CleanUpBulkImport(tableId, source, bulk, error);
  }
View Full Code Here

TOP

Related Classes of org.apache.accumulo.server.fs.VolumeManager

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.