Examples of MapJoinKeyObject


Examples of org.apache.hadoop.hive.ql.exec.persistence.MapJoinKeyObject

   */
  @Override
  public void processOp(Object row, int tag) throws HiveException {
    byte alias = (byte)tag;
    // compute keys and values as StandardObjects. Use non-optimized key (MR).
    MapJoinKey key = MapJoinKey.readFromRow(null, new MapJoinKeyObject(),
        row, joinKeys[alias], joinKeysObjectInspectors[alias], true);
    Object[] value = EMPTY_OBJECT_ARRAY;
    if((hasFilter(alias) && filterMaps[alias].length > 0) || joinValues[alias].size() > 0) {
      value = JoinUtil.computeMapJoinValues(row, joinValues[alias],
        joinValuesObjectInspectors[alias], joinFilters[alias], joinFilterObjectInspectors[alias],
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.persistence.MapJoinKeyObject

    TezCacheAccess tezCacheAccess = TezCacheAccess.createInstance(hconf);
    // We only check if we can use optimized keys here; that is ok because we don't
    // create optimized keys in MapJoin if hash map doesn't have optimized keys.
    if (!HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDKEYS)) {
      lastKey = new MapJoinKeyObject();
    }
    Output output = new Output(); // Reusable output for serialization.
    for (int pos = 0; pos < mapJoinTables.length; pos++) {
      if (pos == desc.getPosBigTable()) {
        continue;
      }

      String inputName = parentToInput.get(pos);
      LogicalInput input = tezContext.getInput(inputName);

      try {
        KeyValueReader kvReader = (KeyValueReader) input.getReader();

        MapJoinTableContainer tableContainer = new HashMapWrapper(hashTableThreshold,
            hashTableLoadFactor);

        // simply read all the kv pairs into the hashtable.

        while (kvReader.next()) {
          // We pass key in as reference, to find out quickly if optimized keys can be used.
          // However, we do not reuse the object since we are putting them into the hashmap.
          lastKey = MapJoinKey.read(output, lastKey, mapJoinTableSerdes[pos].getKeyContext(),
              (Writable)kvReader.getCurrentKey(), false);

          LazyFlatRowContainer values = (LazyFlatRowContainer)tableContainer.get(lastKey);
          if (values == null) {
            values = new LazyFlatRowContainer();
            tableContainer.put(lastKey, values);
          }
          values.add(mapJoinTableSerdes[pos].getValueContext(),
              (BytesWritable)kvReader.getCurrentValue(), useLazyRows);
        }

        mapJoinTables[pos] = tableContainer;
      } catch (IOException e) {
        throw new HiveException(e);
      } catch (SerDeException e) {
        throw new HiveException(e);
      } catch (Exception e) {
        throw new HiveException(e);
      }
      // Register that the Input has been cached.
      LOG.info("Is this a bucket map join: " + desc.isBucketMapJoin());
      // cache is disabled for bucket map join because of the same reason
      // given in loadHashTable in MapJoinOperator.
      if (!desc.isBucketMapJoin()) {
        tezCacheAccess.registerCachedInput(inputName);
        LOG.info("Setting Input: " + inputName + " as cached");
      }
    }
    if (lastKey == null) {
      lastKey = new MapJoinKeyObject(); // No rows in tables, the key type doesn't matter.
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.persistence.MapJoinKeyObject

    // compute keys and values as StandardObjects. Use non-optimized key (MR).
    Object[] currentKey = new Object[joinKeys[alias].size()];
    for (int keyIndex = 0; keyIndex < joinKeys[alias].size(); ++keyIndex) {
      currentKey[keyIndex] = joinKeys[alias].get(keyIndex).evaluate(row);
    }
    MapJoinKeyObject key = new MapJoinKeyObject();
    key.readFromRow(currentKey, joinKeysObjectInspectors[alias]);

    Object[] value = EMPTY_OBJECT_ARRAY;
    if((hasFilter(alias) && filterMaps[alias].length > 0) || joinValues[alias].size() > 0) {
      value = JoinUtil.computeMapJoinValues(row, joinValues[alias],
        joinValuesObjectInspectors[alias], joinFilters[alias], joinFilterObjectInspectors[alias],
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.persistence.MapJoinKeyObject

    TezCacheAccess tezCacheAccess = TezCacheAccess.createInstance(hconf);
    // We only check if we can use optimized keys here; that is ok because we don't
    // create optimized keys in MapJoin if hash map doesn't have optimized keys.
    if (!HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDKEYS)) {
      lastKey = new MapJoinKeyObject();
    }
    Output output = new Output(); // Reusable output for serialization.
    for (int pos = 0; pos < mapJoinTables.length; pos++) {
      if (pos == desc.getPosBigTable()) {
        continue;
      }

      String inputName = parentToInput.get(pos);
      LogicalInput input = tezContext.getInput(inputName);

      try {
        KeyValueReader kvReader = (KeyValueReader) input.getReader();

        MapJoinTableContainer tableContainer = new HashMapWrapper(hashTableThreshold,
            hashTableLoadFactor);

        // simply read all the kv pairs into the hashtable.

        while (kvReader.next()) {
          // We pass key in as reference, to find out quickly if optimized keys can be used.
          // However, we do not reuse the object since we are putting them into the hashmap.
          lastKey = MapJoinKey.read(output, lastKey, mapJoinTableSerdes[pos].getKeyContext(),
              (Writable)kvReader.getCurrentKey(), false);

          LazyFlatRowContainer values = (LazyFlatRowContainer)tableContainer.get(lastKey);
          if (values == null) {
            values = new LazyFlatRowContainer();
            tableContainer.put(lastKey, values);
          }
          values.add(mapJoinTableSerdes[pos].getValueContext(),
              (BytesWritable)kvReader.getCurrentValue(), useLazyRows);
        }

        mapJoinTables[pos] = tableContainer;
      } catch (IOException e) {
        throw new HiveException(e);
      } catch (SerDeException e) {
        throw new HiveException(e);
      } catch (Exception e) {
        throw new HiveException(e);
      }
      // Register that the Input has been cached.
      LOG.info("Is this a bucket map join: " + desc.isBucketMapJoin());
      // cache is disabled for bucket map join because of the same reason
      // given in loadHashTable in MapJoinOperator.
      if (!desc.isBucketMapJoin()) {
        tezCacheAccess.registerCachedInput(inputName);
        LOG.info("Setting Input: " + inputName + " as cached");
      }
    }
    if (lastKey == null) {
      lastKey = new MapJoinKeyObject(); // No rows in tables, the key type doesn't matter.
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.persistence.MapJoinKeyObject

   */
  @Override
  public void processOp(Object row, int tag) throws HiveException {
    byte alias = (byte)tag;
    // compute keys and values as StandardObjects. Use non-optimized key (MR).
    MapJoinKey key = MapJoinKey.readFromRow(null, new MapJoinKeyObject(),
        row, joinKeys[alias], joinKeysObjectInspectors[alias], true);
    Object[] value = EMPTY_OBJECT_ARRAY;
    if((hasFilter(alias) && filterMaps[alias].length > 0) || joinValues[alias].size() > 0) {
      value = JoinUtil.computeMapJoinValues(row, joinValues[alias],
        joinValuesObjectInspectors[alias], joinFilters[alias], joinFilterObjectInspectors[alias],
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.