Package org.apache.hadoop.hive.metastore.api

Examples of org.apache.hadoop.hive.metastore.api.Index


    }

    private Index get_index_by_name_core(final RawStore ms, final String db_name,
        final String tbl_name, final String index_name)
        throws MetaException, NoSuchObjectException, TException {
      Index index = ms.getIndex(db_name, tbl_name, index_name);

      if (index == null) {
        throw new NoSuchObjectException(db_name + "." + tbl_name
            + " index=" + index_name + " not found");
      }
View Full Code Here


  private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException {
    String dbName = alterIndex.getDbName();
    String baseTableName = alterIndex.getBaseTableName();
    String indexName = alterIndex.getIndexName();
    Index idx = db.getIndex(dbName, baseTableName, indexName);

    switch(alterIndex.getOp()) {
      case ADDPROPS:
        idx.getParameters().putAll(alterIndex.getProps());
        break;
      case UPDATETIMESTAMP:
        try {
          Map<String, String> props = new HashMap<String, String>();
          Map<Map<String, String>, Long> basePartTs = new HashMap<Map<String, String>, Long>();

          Table baseTbl = db.getTable(SessionState.get().getCurrentDatabase(),
              baseTableName);

          if (baseTbl.isPartitioned()) {
            List<Partition> baseParts;
            if (alterIndex.getSpec() != null) {
              baseParts = db.getPartitions(baseTbl, alterIndex.getSpec());
            } else {
              baseParts = db.getPartitions(baseTbl);
            }
            if (baseParts != null) {
              for (Partition p : baseParts) {
                FileSystem fs = p.getPartitionPath().getFileSystem(db.getConf());
                FileStatus fss = fs.getFileStatus(p.getPartitionPath());
                basePartTs.put(p.getSpec(), fss.getModificationTime());
              }
            }
          } else {
            FileSystem fs = baseTbl.getPath().getFileSystem(db.getConf());
            FileStatus fss = fs.getFileStatus(baseTbl.getPath());
            basePartTs.put(null, fss.getModificationTime());
          }
          for (Map<String, String> spec : basePartTs.keySet()) {
            if (spec != null) {
              props.put(spec.toString(), basePartTs.get(spec).toString());
            } else {
              props.put("base_timestamp", basePartTs.get(null).toString());
            }
          }
          idx.getParameters().putAll(props);
        } catch (HiveException e) {
          throw new HiveException("ERROR: Failed to update index timestamps");
        } catch (IOException e) {
          throw new HiveException("ERROR: Failed to look up timestamps on filesystem");
        }

        break;
      default:
        console.printError("Unsupported Alter commnad");
        return 1;
    }

    // set last modified by properties
    if (!updateModifiedParameters(idx.getParameters(), conf)) {
      return 1;
    }

    try {
      db.alterIndex(dbName, baseTableName, indexName, idx);
View Full Code Here

        outputJobInfo.getTableName());

      List<String> indexList = client.listIndexNames(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), Short.MAX_VALUE);

      for (String indexName : indexList) {
        Index index = client.getIndex(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), indexName);
        if (!index.isDeferredRebuild()) {
          throw new HCatException(ErrorType.ERROR_NOT_SUPPORTED, "Store into a table with an automatic index from Pig/Mapreduce is not supported");
        }
      }
      StorageDescriptor sd = table.getTTable().getSd();
View Full Code Here

  @Override
  public Index getIndex(String dbName, String origTableName, String indexName)
      throws MetaException {
    openTransaction();
    MIndex mIndex = this.getMIndex(dbName, origTableName, indexName);
    Index ret = convertToIndex(mIndex);
    commitTransaction();
    return ret;
  }
View Full Code Here

  private Index convertToIndex(MIndex mIndex) throws MetaException {
    if(mIndex == null) {
      return null;
    }

    return new Index(
    mIndex.getIndexName(),
    mIndex.getIndexHandlerClass(),
    mIndex.getOrigTable().getDatabase().getName(),
    mIndex.getOrigTable().getTableName(),
    mIndex.getCreateTime(),
View Full Code Here

      String lineDelim, String mapKeyDelim, String indexComment)
      throws HiveException {

    try {
      String dbName = SessionState.get().getCurrentDatabase();
      Index old_index = null;
      try {
        old_index = getIndex(dbName, tableName, indexName);
      } catch (Exception e) {
      }
      if (old_index != null) {
        throw new HiveException("Index " + indexName + " already exists on table " + tableName + ", db=" + dbName);
      }

      org.apache.hadoop.hive.metastore.api.Table baseTbl = getMSC().getTable(dbName, tableName);
      if (baseTbl.getTableType() == TableType.VIRTUAL_VIEW.toString()) {
        throw new HiveException("tableName="+ tableName +" is a VIRTUAL VIEW. Index on VIRTUAL VIEW is not supported.");
      }

      if (indexTblName == null) {
        indexTblName = MetaStoreUtils.getIndexTableName(dbName, tableName, indexName);
      } else {
        org.apache.hadoop.hive.metastore.api.Table temp = null;
        try {
          temp = getMSC().getTable(dbName, indexTblName);
        } catch (Exception e) {
        }
        if (temp != null) {
          throw new HiveException("Table name " + indexTblName + " already exists. Choose another name.");
        }
      }

      org.apache.hadoop.hive.metastore.api.StorageDescriptor storageDescriptor = baseTbl.getSd().deepCopy();
      SerDeInfo serdeInfo = storageDescriptor.getSerdeInfo();
      if(serde != null) {
        serdeInfo.setSerializationLib(serde);
      } else {
        if (storageHandler == null) {
          serdeInfo.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
        } else {
          HiveStorageHandler sh = HiveUtils.getStorageHandler(getConf(), storageHandler);
          String serDeClassName = sh.getSerDeClass().getName();
          serdeInfo.setSerializationLib(serDeClassName);
        }
      }

      if (fieldDelim != null) {
        serdeInfo.getParameters().put(FIELD_DELIM, fieldDelim);
        serdeInfo.getParameters().put(SERIALIZATION_FORMAT, fieldDelim);
      }
      if (fieldEscape != null) {
        serdeInfo.getParameters().put(ESCAPE_CHAR, fieldEscape);
      }
      if (collItemDelim != null) {
        serdeInfo.getParameters().put(COLLECTION_DELIM, collItemDelim);
      }
      if (mapKeyDelim != null) {
        serdeInfo.getParameters().put(MAPKEY_DELIM, mapKeyDelim);
      }
      if (lineDelim != null) {
        serdeInfo.getParameters().put(LINE_DELIM, lineDelim);
      }

      if (serdeProps != null) {
        Iterator<Entry<String, String>> iter = serdeProps.entrySet()
          .iterator();
        while (iter.hasNext()) {
          Entry<String, String> m = iter.next();
          serdeInfo.getParameters().put(m.getKey(), m.getValue());
        }
      }

      storageDescriptor.setLocation(null);
      if (location != null) {
        storageDescriptor.setLocation(location);
      }
      storageDescriptor.setInputFormat(inputFormat);
      storageDescriptor.setOutputFormat(outputFormat);

      Map<String, String> params = new HashMap<String,String>();

      List<FieldSchema> indexTblCols = new ArrayList<FieldSchema>();
      List<Order> sortCols = new ArrayList<Order>();
      storageDescriptor.setBucketCols(null);
      int k = 0;
      Table metaBaseTbl = new Table(baseTbl);
      for (int i = 0; i < metaBaseTbl.getCols().size(); i++) {
        FieldSchema col = metaBaseTbl.getCols().get(i);
        if (indexedCols.contains(col.getName())) {
          indexTblCols.add(col);
          sortCols.add(new Order(col.getName(), 1));
          k++;
        }
      }
      if (k != indexedCols.size()) {
        throw new RuntimeException(
            "Check the index columns, they should appear in the table being indexed.");
      }

      storageDescriptor.setCols(indexTblCols);
      storageDescriptor.setSortCols(sortCols);

      int time = (int) (System.currentTimeMillis() / 1000);
      org.apache.hadoop.hive.metastore.api.Table tt = null;
      HiveIndexHandler indexHandler = HiveUtils.getIndexHandler(this.getConf(), indexHandlerClass);

      if (indexHandler.usesIndexTable()) {
        tt = new org.apache.hadoop.hive.ql.metadata.Table(dbName, indexTblName).getTTable();
        List<FieldSchema> partKeys = baseTbl.getPartitionKeys();
        tt.setPartitionKeys(partKeys);
        tt.setTableType(TableType.INDEX_TABLE.toString());
        if (tblProps != null) {
          for (Entry<String, String> prop : tblProps.entrySet()) {
            tt.putToParameters(prop.getKey(), prop.getValue());
          }
        }
      }

      if(!deferredRebuild) {
        throw new RuntimeException("Please specify deferred rebuild using \" WITH DEFERRED REBUILD \".");
      }

      Index indexDesc = new Index(indexName, indexHandlerClass, dbName, tableName, time, time, indexTblName,
          storageDescriptor, params, deferredRebuild);
      if (indexComment != null) {
        indexDesc.getParameters().put("comment", indexComment);
      }

      if (idxProps != null)
      {
        indexDesc.getParameters().putAll(idxProps);
      }

      indexHandler.analyzeIndexDefinition(baseTbl, indexDesc, tt);

      this.getMSC().createIndex(indexDesc, tt);
View Full Code Here

    // configured not to ignore this
    boolean throwException =
        !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT);
    if (throwException) {
      try {
        Index idx = db.getIndex(tableName, indexName);
      } catch (HiveException e) {
        throw new SemanticException(ErrorMsg.INVALID_INDEX.getMsg(indexName));
      }
    }
View Full Code Here

  private List<Task<?>> getIndexBuilderMapRed(String baseTableName, String indexName,
      HashMap<String, String> partSpec) throws SemanticException {
    try {
      String dbName = SessionState.get().getCurrentDatabase();
      Index index = db.getIndex(dbName, baseTableName, indexName);
      Table indexTbl = getTable(index.getIndexTableName());
      String baseTblName = index.getOrigTableName();
      Table baseTbl = getTable(baseTblName);

      String handlerCls = index.getIndexHandlerClass();
      HiveIndexHandler handler = HiveUtils.getIndexHandler(conf, handlerCls);

      List<Partition> indexTblPartitions = null;
      List<Partition> baseTblPartitions = null;
      if (indexTbl != null) {
View Full Code Here

    @Override
    public Index add_index(final Index newIndex, final Table indexTable)
        throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
      startFunction("add_index", ": " + newIndex.toString() + " " + indexTable.toString());
      Index ret = null;
      Exception ex = null;
      try {
        ret = add_index_core(getMS(), newIndex, indexTable);
      } catch (Exception e) {
        ex = e;
View Full Code Here

      boolean success = false, indexTableCreated = false;

      try {
        ms.openTransaction();
        Index old_index = null;
        try {
          old_index = get_index_by_name(index.getDbName(), index
              .getOrigTableName(), index.getIndexName());
        } catch (Exception e) {
        }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.metastore.api.Index

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.