Package org.voltdb.plannodes

Examples of org.voltdb.plannodes.HashAggregatePlanNode


        Collection<HashAggregatePlanNode> nodes = PlanNodeUtil.getPlanNodes(rootNode, HashAggregatePlanNode.class);
        if (nodes.size() != 1) {
            if (debug.val) LOG.debug("SKIP - Not an aggregate query plan");
            return Pair.of(false, rootNode);
        }
        final HashAggregatePlanNode node = CollectionUtil.first(nodes);
       
        // Skip single-partition query plans
        if (PlanNodeUtil.isDistributedQuery(rootNode) == false) {
            if (debug.val) LOG.debug("SKIP - Not a distributed query plan");
            return (Pair.of(false, rootNode));
        }
// // Right now, Can't do averages
// for (ExpressionType et: node.getAggregateTypes()) {
// if (et.equals(ExpressionType.AGGREGATE_AVG)) {
// if (debug.val) LOG.debug("SKIP - Right now can't optimize AVG()");
// return (Pair.of(false, rootNode));
// }
// }
       
        // Get the AbstractScanPlanNode that is directly below us
        Collection<AbstractScanPlanNode> scans = PlanNodeUtil.getPlanNodes(node, AbstractScanPlanNode.class);
        if (debug.val) LOG.debug("<ScanPlanNodes>: "+ scans);
        if (scans.size() != 1) {
            if (debug.val)
                LOG.debug("SKIP - Multiple scans!");
            return (Pair.of(false, rootNode));
        }
       
        if (debug.val) LOG.debug("Trying to apply Aggregate pushdown optimization!");
        AbstractScanPlanNode scan_node = CollectionUtil.first(scans);
        assert (scan_node != null);
       
// // For some reason we have to do this??
// for (int col = 0, cnt = scan_node.getOutputColumnGUIDs().size(); col < cnt; col++) {
// int col_guid = scan_node.getOutputColumnGUIDs().get(col);
// assert (state.plannerContext.get(col_guid) != null) : "Failed [" + col_guid + "]";
// // PlanColumn retval = new PlanColumn(guid, expression, columnName,
// // sortOrder, storage);
// } // FOR
       
        // Skip if we're already directly after the scan (meaning no network traffic)
        if (scan_node.getParent(0).equals(node)) {
            if (debug.val)
                LOG.debug("SKIP - Aggregate does not need to be distributed");
            return (Pair.of(false, rootNode));
        }
       
        // Check if this is COUNT(DISTINCT) query
        // If it is then we can only pushdown the DISTINCT
        AbstractPlanNode clone_node = null;
        if (node.getAggregateTypes().contains(ExpressionType.AGGREGATE_COUNT)) {
            for (AbstractPlanNode child : node.getChildren()) {
                if (child.getClass().equals(DistinctPlanNode.class)) {
                    try {
                        clone_node = (AbstractPlanNode) child.clone(false, true);
                    } catch (CloneNotSupportedException ex) {
                        throw new RuntimeException(ex);
                    }
                    state.markDirty(clone_node);
                    break;
                }
            } // FOR
        }
       
        // Note that we don't want actually move the existing aggregate. We just
        // want to clone it and then attach it down below the SEND/RECIEVE so
        // that we calculate the aggregates in parallel
        if (clone_node == null) {
            clone_node = this.cloneAggregatePlanNode(node);
        }
        assert (clone_node != null);
       
        // But this means we have to also update the RECEIVE to only expect the
        // columns that the AggregateNode will be sending along
        ReceivePlanNode recv_node = null;
        if (clone_node instanceof DistinctPlanNode) {
            recv_node = (ReceivePlanNode) node.getChild(0).getChild(0);
        } else {
            recv_node = (ReceivePlanNode) node.getChild(0);
        }
        recv_node.getOutputColumnGUIDs().clear();
        recv_node.getOutputColumnGUIDs().addAll(clone_node.getOutputColumnGUIDs());
        state.markDirty(recv_node);

        assert (recv_node.getChild(0) instanceof SendPlanNode);
        SendPlanNode send_node = (SendPlanNode) recv_node.getChild(0);
        send_node.getOutputColumnGUIDs().clear();
        send_node.getOutputColumnGUIDs().addAll(clone_node.getOutputColumnGUIDs());
        send_node.addIntermediary(clone_node);
        state.markDirty(send_node);

        // 2011-12-08: We now need to correct the aggregate columns for the
        // original plan node
        if ((clone_node instanceof DistinctPlanNode) == false) {
            // If we have a AGGREGATE_WEIGHTED_AVG in our node, then we know that
            // we can skip the last column because that's the COUNT from the remote partition
            boolean has_weightedAvg = node.getAggregateTypes().contains(ExpressionType.AGGREGATE_WEIGHTED_AVG);
            node.getAggregateColumnGuids().clear();
            int num_cols = clone_node.getOutputColumnGUIDCount() - (has_weightedAvg ? 1 : 0);
            for (int i = 0; i < num_cols; i++) {
                Integer aggOutput = clone_node.getOutputColumnGUID(i);
                PlanColumn planCol = state.plannerContext.get(aggOutput);
                assert (planCol != null);
                AbstractExpression exp = planCol.getExpression();
                assert (exp != null);
                Collection<String> refTables = ExpressionUtil.getReferencedTableNames(exp);
                assert (refTables != null);
                if (refTables.size() == 1 && refTables.contains(PlanAssembler.AGGREGATE_TEMP_TABLE)) {
                    node.getAggregateColumnGuids().add(planCol.guid());
                }
            } // FOR
        }

        if (debug.val) {
View Full Code Here


*
* @param node
* @return
*/
    protected HashAggregatePlanNode cloneAggregatePlanNode(final HashAggregatePlanNode node) {
        HashAggregatePlanNode clone_agg = null;
        try {
            clone_agg = (HashAggregatePlanNode) node.clone(false, true);
        } catch (CloneNotSupportedException ex) {
            throw new RuntimeException(ex);
        }
        state.markDirty(clone_agg);

        // Update the cloned AggregateNode to handle distributed averages
        List<ExpressionType> clone_types = clone_agg.getAggregateTypes();
       
        // For now we'll always put a COUNT at the end of the AggregatePlanNode
        // This makes it easier for us to find it in the EE
        boolean has_count = false;
// boolean has_count = (clone_types.contains(ExpressionType.AGGREGATE_COUNT) ||
// clone_types.contains(ExpressionType.AGGREGATE_COUNT_STAR));

        int orig_cnt = clone_types.size();
        for (int i = 0; i < orig_cnt; i++) {
            ExpressionType cloneType = clone_types.get(i);
            // Ok, strap on your helmets boys, here's what we got going on here...
            // In order to do a distributed average, we need to send the average
            // AND the count (in order to compute the weight average at the base partition).
            // We need check whether we already have a count already in our list
            // If not, then we'll want to insert it here.
            if (cloneType == ExpressionType.AGGREGATE_AVG) {
                if (has_count == false) {
                    // But now because we add a new output column that we're going to use internally,
                    // we need to make sure that our output columns reflect this.
                    clone_types.add(ExpressionType.AGGREGATE_COUNT_STAR);
                    has_count = true;
                   
                    // Aggregate Input Column
                    // We just need to do it against the first column in the child's output
                    // Picking the column that we want to use doesn't matter even if there is a GROUP BY
                    clone_agg.getAggregateColumnGuids().add(node.getChild(0).getOutputColumnGUID(0));

                    // Aggregate Output Column
                    TupleValueExpression exp = new TupleValueExpression();
                    exp.setValueType(VoltType.BIGINT);
                    exp.setValueSize(VoltType.BIGINT.getLengthInBytesForFixedTypes());
                    exp.setTableName(PlanAssembler.AGGREGATE_TEMP_TABLE);
                    exp.setColumnName("");
                    exp.setColumnAlias("_DTXN_COUNT");
                    exp.setColumnIndex(clone_agg.getOutputColumnGUIDCount());
                    PlanColumn new_pc = state.plannerContext.getPlanColumn(exp, exp.getColumnAlias());
                    clone_agg.getAggregateOutputColumns().add(clone_agg.getOutputColumnGUIDCount());
                    clone_agg.getAggregateColumnNames().add(new_pc.getDisplayName());
                    clone_agg.getOutputColumnGUIDs().add(new_pc.guid());
                }
            }
        } // FOR
       
        // Now go through the original AggregateNode (the one at the top of tree)
        // and change the ExpressiontTypes for the aggregates to handle ahat we're
        // doing down below in the distributed query
        List<ExpressionType> exp_types = node.getAggregateTypes();
        exp_types.clear();
        for (int i = 0; i < orig_cnt; i++) {
            ExpressionType cloneType = clone_types.get(i);
            switch (cloneType) {
                case AGGREGATE_COUNT:
                case AGGREGATE_COUNT_STAR:
                case AGGREGATE_SUM:
                    exp_types.add(ExpressionType.AGGREGATE_SUM);
                    break;
                case AGGREGATE_MAX:
                case AGGREGATE_MIN:
                    exp_types.add(cloneType);
                    break;
                case AGGREGATE_AVG:
                    // This is a special internal marker that allows us to compute
                    // a weighted average from the count
                    exp_types.add(ExpressionType.AGGREGATE_WEIGHTED_AVG);
                    break;
                default:
                    throw new RuntimeException("Unexpected ExpressionType " + cloneType);
            } // SWITCH
        } // FOR
       
        // IMPORTANT: If we have GROUP BY columns, then we need to make sure
        // that those columns are always passed up the query tree at the pushed
        // down node, even if the final answer doesn't need it
        if (node.getGroupByColumnGuids().isEmpty() == false) {
            for (Integer guid : clone_agg.getGroupByColumnGuids()) {
                if (clone_agg.getOutputColumnGUIDs().contains(guid) == false) {
                    clone_agg.getOutputColumnGUIDs().add(guid);
                }
            } // FOR
        }

        assert(clone_agg.getGroupByColumnOffsets().size() == node.getGroupByColumnOffsets().size());
        assert(clone_agg.getGroupByColumnNames().size() == node.getGroupByColumnNames().size());
        assert(clone_agg.getGroupByColumnGuids().size() == node.getGroupByColumnGuids().size()) : clone_agg.getGroupByColumnGuids().size() + " not equal " + node.getGroupByColumnGuids().size();
       
        return (clone_agg);
    }
View Full Code Here

        return null;
    }

    AbstractPlanNode handleAggregationOperators(AbstractPlanNode root) {
        boolean containsAggregateExpression = false;
        HashAggregatePlanNode aggNode = null;
//        String orig_root_debug = PlanNodeUtil.debug(root);

        /* Check if any aggregate expressions are present */
        for (ParsedSelectStmt.ParsedColInfo col : m_parsedSelect.displayColumns) {
            if (col.expression.getExpressionType() == ExpressionType.AGGREGATE_SUM ||
                col.expression.getExpressionType() == ExpressionType.AGGREGATE_COUNT ||
                col.expression.getExpressionType() == ExpressionType.AGGREGATE_COUNT_STAR ||
                col.expression.getExpressionType() == ExpressionType.AGGREGATE_MIN ||
                col.expression.getExpressionType() == ExpressionType.AGGREGATE_MAX ||
                col.expression.getExpressionType() == ExpressionType.AGGREGATE_AVG) {
                containsAggregateExpression = true;
            }
        }

        // "Select A from T group by A" is grouped but has no aggregate operator expressions
        // Catch that case by checking the grouped flag. Probably the OutputColumn iteration
        // above is unnecessary?

        if (m_parsedSelect.grouped)
            containsAggregateExpression = true;

        if (containsAggregateExpression) {
            aggNode = new HashAggregatePlanNode(m_context, getNextPlanNodeId());

            for (ParsedSelectStmt.ParsedColInfo col : m_parsedSelect.groupByColumns) {
                aggNode.getGroupByColumnOffsets().add(col.index);
                aggNode.getGroupByColumnNames().add(col.alias);
                PlanColumn groupByColumn =
                    root.findMatchingOutputColumn(col.tableName, col.columnName,
                                                  col.alias);
                aggNode.appendGroupByColumn(groupByColumn);
            }

            int outputColumnIndex = 0;
            for (ParsedSelectStmt.ParsedColInfo col : m_parsedSelect.displayColumns) {

                AbstractExpression rootExpr = col.expression;
                ExpressionType agg_expression_type = rootExpr.getExpressionType();
                if (rootExpr.getExpressionType() == ExpressionType.AGGREGATE_SUM ||
                    rootExpr.getExpressionType() == ExpressionType.AGGREGATE_MIN ||
                    rootExpr.getExpressionType() == ExpressionType.AGGREGATE_MAX ||
                    rootExpr.getExpressionType() == ExpressionType.AGGREGATE_AVG ||
                    rootExpr.getExpressionType() == ExpressionType.AGGREGATE_COUNT ||
                    rootExpr.getExpressionType() == ExpressionType.AGGREGATE_COUNT_STAR)
                {
                    PlanColumn aggregateColumn = null;
                    if (rootExpr.getLeft() instanceof TupleValueExpression)
                    {
                        TupleValueExpression nested =
                            (TupleValueExpression) rootExpr.getLeft();

                        if (((AggregateExpression)rootExpr).m_distinct) {
                            root = addDistinctNode(root, nested);
                        }

                        aggregateColumn =
                            root.findMatchingOutputColumn(nested.getTableName(),
                                                          nested.getColumnName(),
                                                          nested.getColumnAlias());
                    }
                    // count(*) hack.  we're not getting AGGREGATE_COUNT_STAR
                    // expression types from the parsing, so we have
                    // to detect the null inner expression case and do the
                    // switcharoo ourselves.
                    else if (rootExpr.getExpressionType() == ExpressionType.AGGREGATE_COUNT &&
                             rootExpr.getLeft() == null)
                    {
                        aggregateColumn =
                            m_context.get(root.getOutputColumnGUIDs().get(0));
                        agg_expression_type = ExpressionType.AGGREGATE_COUNT_STAR;
                    }
                    else
                    {
                        throw new PlanningErrorException("Expressions in aggregates currently unsupported");
                    }

                    aggNode.getAggregateColumnGuids().add(aggregateColumn.guid());
                    aggNode.getAggregateColumnNames().add(aggregateColumn.getDisplayName());
                    aggNode.getAggregateTypes().add(agg_expression_type);

                    // A bit of a hack: ProjectionNodes using PlanColumns after the
                    // aggregate node need the output columns here to
                    // contain TupleValueExpressions (effectively on a temp table).
                    // So we construct one based on the output of the
                    // aggregate expression, the column alias provided by HSQL,
                    // and the offset into the output table schema for the
                    // aggregate node that we're computing.
                    TupleValueExpression tve = new TupleValueExpression();
                   
                    // If this is an AVG, then our type should be DECIMAL
                    if (agg_expression_type == ExpressionType.AGGREGATE_AVG) {
                        tve.setValueType(VoltType.FLOAT);
                        tve.setValueSize(VoltType.FLOAT.getLengthInBytesForFixedTypes());
                    }
                    // Otherwise it can be whatever the rootExpression is
                    else {
                        tve.setValueType(rootExpr.getValueType());
                        tve.setValueSize(rootExpr.getValueSize());
                    }
                    tve.setColumnIndex(outputColumnIndex);
                    tve.setColumnName("");
                    tve.setColumnAlias(col.alias);
                    tve.setTableName(AGGREGATE_TEMP_TABLE);
                    PlanColumn colInfo = m_context.getPlanColumn(tve, col.alias);
                    aggNode.appendOutputColumn(colInfo);
                    aggNode.getAggregateOutputColumns().add(outputColumnIndex);
                }
                else
                {
                    /*
                     * These columns are the pass through columns that are not being
                     * aggregated on. These are the ones from the SELECT list. They
                     * MUST already exist in the child node's output. Find them and
                     * add them to the aggregate's output.
                     */
                    PlanColumn passThruColumn =
                        root.findMatchingOutputColumn(col.tableName,
                                                      col.columnName,
                                                      col.alias);
                    aggNode.appendOutputColumn(passThruColumn);
                }

                outputColumnIndex++;
            }

            aggNode.addAndLinkChild(root);
            root = aggNode;
        }
       
        // PAVLO: Push non-AVG aggregates down into the scan for multi-partition queries
        // 2012-02-15: Moved to AggregatePushdownOptimization
View Full Code Here

        p = nodes.get(0);

        // Find re-aggregation node.
        assertTrue(p instanceof ReceivePlanNode);
        assertTrue(p.getParent(0) instanceof HashAggregatePlanNode);
        HashAggregatePlanNode reAggNode = (HashAggregatePlanNode) p.getParent(0);
        String reAggNodeStr = reAggNode.toExplainPlanString().toLowerCase();

        // Find scan node.
        p = pns.get(1);
        assert (p.getScanNodeList().size() == 1);
        p = p.getScanNodeList().get(0);
        String scanNodeStr = p.toExplainPlanString().toLowerCase();

        if (aggFilters != null) {
            String[] aggFilterStrings = null;
            if (aggFilters instanceof String) {
                aggFilterStrings = new String[] { (String) aggFilters };
            } else {
                aggFilterStrings = (String[]) aggFilters;
            }
            for (String aggFilter : aggFilterStrings) {
                System.out.println(reAggNodeStr.contains(aggFilter
                        .toLowerCase()));
                assertTrue(reAggNodeStr.contains(aggFilter.toLowerCase()));
                System.out
                        .println(scanNodeStr.contains(aggFilter.toLowerCase()));
                assertFalse(scanNodeStr.contains(aggFilter.toLowerCase()));
            }
        } else {
            assertNull(reAggNode.getPostPredicate());
        }

        if (scanFilters != null) {
            String[] scanFilterStrings = null;
            if (scanFilters instanceof String) {
View Full Code Here

        }
        if (p instanceof DistinctPlanNode) {
            p = p.getChild(0);
        }

        HashAggregatePlanNode reAggNode = null;

        List<AbstractPlanNode> nodes = p.findAllNodesOfType(PlanNodeType.RECEIVE);
        assertEquals(1, nodes.size());
        AbstractPlanNode receiveNode = nodes.get(0);

        // Indicates that there is no top aggregation node.
        if (numGroupbyOfTopAggNode == -1 ) {
            if (needFix) {
                p = receiveNode.getParent(0);
                assertTrue(p instanceof HashAggregatePlanNode);
                reAggNode = (HashAggregatePlanNode) p;

                assertEquals(numGroupbyOfReaggNode, reAggNode.getGroupByExpressionsSize());
                assertEquals(numAggsOfReaggNode, reAggNode.getAggregateTypesSize());

                p = p.getChild(0);
            }
            assertTrue(p instanceof ReceivePlanNode);

            p = pns.get(1);
            assertTrue(p instanceof SendPlanNode);
            p = p.getChild(0);

            if (distinctPushdown) {
                assertTrue(p instanceof DistinctPlanNode);
                p = p.getChild(0);
            }
            assertTrue(p instanceof AbstractScanPlanNode);
        } else {
            AggregatePlanNode topAggNode = null;
            if (p instanceof AbstractJoinPlanNode) {
                // Inline aggregation with join
                topAggNode = AggregatePlanNode.getInlineAggregationNode(p);
            } else {
                assertTrue(p instanceof AggregatePlanNode);
                topAggNode = (AggregatePlanNode) p;
                p = p.getChild(0);
            }
            assertEquals(numGroupbyOfTopAggNode, topAggNode.getGroupByExpressionsSize());
            assertEquals(numAggsOfTopAggNode, topAggNode.getAggregateTypesSize());

            if (needFix) {
                p = receiveNode.getParent(0);
                assertTrue(p instanceof HashAggregatePlanNode);
                reAggNode = (HashAggregatePlanNode) p;

                assertEquals(numGroupbyOfReaggNode, reAggNode.getGroupByExpressionsSize());
                assertEquals(numAggsOfReaggNode, reAggNode.getAggregateTypesSize());

                p = p.getChild(0);
            }
            assertTrue(p instanceof ReceivePlanNode);
View Full Code Here

        m_scanInlinedProjectionNode.setOutputSchema(inlineProjSchema);

        // (2) Construct the reAggregation Node.

        // Construct the reAggregation plan node's aggSchema
        m_reAggNode = new HashAggregatePlanNode();
        int outputColumnIndex = 0;
        // inlineProjSchema contains the group by columns, while aggSchema may do not.
        NodeSchema aggSchema = new NodeSchema();

        // Construct reAggregation node's aggregation and group by list.
View Full Code Here


    AbstractPlanNode handleMVBasedMultiPartQuery (AbstractPlanNode root, boolean edgeCaseOuterJoin) {
        MaterializedViewFixInfo mvFixInfo = m_parsedSelect.m_mvFixInfo;

        HashAggregatePlanNode reAggNode = new HashAggregatePlanNode(mvFixInfo.getReAggregationPlanNode());
        reAggNode.clearChildren();
        reAggNode.clearParents();

        AbstractPlanNode receiveNode = root;
        AbstractPlanNode reAggParent = null;
        // Find receive plan node and insert the constructed re-aggregation plan node.
        if (root.getPlanNodeType() == PlanNodeType.RECEIVE) {
            root = reAggNode;
        } else {
            List<AbstractPlanNode> recList = root.findAllNodesOfType(PlanNodeType.RECEIVE);
            assert(recList.size() == 1);
            receiveNode = recList.get(0);

            reAggParent = receiveNode.getParent(0);
            boolean result = reAggParent.replaceChild(receiveNode, reAggNode);
            assert(result);
        }
        reAggNode.addAndLinkChild(receiveNode);

        assert(receiveNode instanceof ReceivePlanNode);
        AbstractPlanNode sendNode = receiveNode.getChild(0);
        assert(sendNode instanceof SendPlanNode);
        AbstractPlanNode sendNodeChild = sendNode.getChild(0);

        HashAggregatePlanNode reAggNodeForReplace = null;
        if (m_parsedSelect.m_tableList.size() > 1 && !edgeCaseOuterJoin) {
            reAggNodeForReplace = reAggNode;
        }
        boolean find = mvFixInfo.processScanNodeWithReAggNode(sendNode, reAggNodeForReplace);
        assert(find);
View Full Code Here

            // Construct the aggregate nodes
            if (needHashAgg) {
                if ( m_parsedSelect.m_mvFixInfo.needed() ) {
                    // TODO: may optimize this edge case in future
                    aggNode = new HashAggregatePlanNode();
                } else {
                    if (gbInfo.isChangedToSerialAggregate()) {
                        assert(root instanceof ReceivePlanNode);
                        aggNode = new AggregatePlanNode();
                    } else if (gbInfo.isChangedToPartialAggregate()) {
                        aggNode = new PartialAggregatePlanNode(gbInfo.m_coveredGroupByColumns);
                    } else {
                        aggNode = new HashAggregatePlanNode();
                    }

                    topAggNode = new HashAggregatePlanNode();
                }
            } else {
                aggNode = new AggregatePlanNode();
                if ( ! m_parsedSelect.m_mvFixInfo.needed()) {
                    topAggNode = new AggregatePlanNode();
View Full Code Here

TOP

Related Classes of org.voltdb.plannodes.HashAggregatePlanNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.