Package org.voltdb.expressions

Examples of org.voltdb.expressions.AbstractExpression


        // procWantsCommonPartitioning == true but commonPartitionExpression == null signifies a proc
        // for which the planner was requested to attempt to find an SP plan, but that was not possible
        // -- it had a replicated write or it had one or more partitioned reads that were not all
        // filtered by the same partition key value -- so it was planned as an MP proc.
        boolean procWantsCommonPartitioning = true;
        AbstractExpression commonPartitionExpression = null;
        String exampleSPstatement = null;
        Object exampleSPvalue = null;

        // iterate through the fields and get valid sql statements
        Map<String, Object> fields = lang.accept(procedureIntrospector(compiler), procClass);

        // determine if proc is read or read-write by checking if the proc contains any write sql stmts
        boolean readWrite = false;
        for (Object field : fields.values()) {
            if (!(field instanceof SQLStmt)) continue;
            SQLStmt stmt = (SQLStmt)field;
            QueryType qtype = QueryType.getFromSQL(stmt.getText());
            if (!qtype.isReadOnly()) {
                readWrite = true;
                break;
            }
        }

        // default to FASTER determinism mode, which may favor non-deterministic plans
        // but if it's a read-write proc, use a SAFER planning mode wrt determinism.
        final DeterminismMode detMode = readWrite ? DeterminismMode.SAFER : DeterminismMode.FASTER;

        for (Entry<String, Object> entry : fields.entrySet()) {
            if (!(entry.getValue() instanceof SQLStmt)) continue;

            String stmtName = entry.getKey();
            SQLStmt stmt = (SQLStmt)entry.getValue();

            // add the statement to the catalog
            Statement catalogStmt = procedure.getStatements().add(stmtName);

            // compile the statement
            StatementPartitioning partitioning =
                info.singlePartition ? StatementPartitioning.forceSP() :
                                       StatementPartitioning.forceMP();
            StatementCompiler.compile(compiler, hsql, catalog, db,
                    estimates, catalogStmt, stmt.getText(), stmt.getJoinOrder(),
                    detMode, partitioning);

            if (partitioning.wasSpecifiedAsSingle()) {
                procWantsCommonPartitioning = false; // Don't try to infer what's already been asserted.
                // The planner does not currently attempt to second-guess a plan declared as single-partition, maybe some day.
                // In theory, the PartitioningForStatement would confirm the use of (only) a parameter as a partition key --
                // or if the partition key was determined to be some other constant (expression?) it might display an informational
                // message that the passed parameter is assumed to be equal to the hard-coded partition key constant (expression).

                // Validate any inferred statement partitioning given the statement's possible usage, until a contradiction is found.
            }
            else if (procWantsCommonPartitioning) {
                // Only consider statements that are capable of running SP with a partitioning parameter that does not seem to
                // conflict with the partitioning of prior statements.
                if (partitioning.getCountOfIndependentlyPartitionedTables() == 1) {
                    AbstractExpression statementPartitionExpression = partitioning.singlePartitioningExpressionForReport();
                    if (statementPartitionExpression != null) {
                        if (commonPartitionExpression == null) {
                            commonPartitionExpression = statementPartitionExpression;
                            exampleSPstatement = stmt.getText();
                            exampleSPvalue = partitioning.getInferredPartitioningValue();
View Full Code Here


            // In theory, the PartitioningForStatement would confirm the use of (only) a parameter as a partition key --
            // or if the partition key was determined to be some other hard-coded constant (expression?) it might display a warning
            // message that the passed parameter is assumed to be equal to that constant (expression).
        } else {
            if (partitioning.getCountOfIndependentlyPartitionedTables() == 1) {
                AbstractExpression statementPartitionExpression = partitioning.singlePartitioningExpressionForReport();
                if (statementPartitionExpression != null) {
                    // The planner has uncovered an overlooked opportunity to run the statement SP.
                    String msg = null;
                    if (statementPartitionExpression instanceof ParameterValueExpression) {
                        msg = "This procedure would benefit from setting the attribute 'partitioninfo=" + partitioning.getFullColumnName() +
                                ":" + ((ParameterValueExpression) statementPartitionExpression).getParameterIndex() + "'";
                    } else {
                        String valueDescription = null;
                        Object partitionValue = partitioning.getInferredPartitioningValue();
                        if (partitionValue == null) {
                            // Statement partitioned on a runtime constant. This is likely to be cryptic, but hopefully gets the idea across.
                            valueDescription = "of " + statementPartitionExpression.explain("");
                        } else {
                            valueDescription = partitionValue.toString(); // A simple constant value COULD have been a parameter.
                        }
                        msg = "This procedure would benefit from adding a parameter to be passed the value " + valueDescription +
                                " and setting the attribute 'partitioninfo=" + partitioning.getFullColumnName() +
View Full Code Here

    /**
     * smart accessor - only returns a value if it was unique and is useful
     * @return
     */
    public AbstractExpression singlePartitioningExpression() {
        AbstractExpression e = singlePartitioningExpressionForReport();
        if (e != null && isUsefulPartitioningExpression(e)) {
            return e;
        }
        return null;
    }
View Full Code Here

        byte[][] predicates = new byte[m_predicates.size()][];
        int i = 0;
        int size = 0;
        try {
            for (Pair<AbstractExpression, Boolean> p : m_predicates) {
                final AbstractExpression predicate = p.getFirst();
                JSONStringer stringer = new JSONStringer();
                stringer.object();
                stringer.key("triggersDelete").value(p.getSecond());
                // If the predicate is null, EE will serialize all rows to the corresponding data
                // target. It's the same as passing an always-true expression,
                // but without the overhead of the evaluating the expression. This avoids the
                // overhead when there is only one data target that wants all the rows.
                if (predicate != null) {
                    stringer.key("predicateExpression").object();
                    predicate.toJSONString(stringer);
                    stringer.endObject();
                }
                stringer.endObject();
                predicates[i] = stringer.toString().getBytes(Charsets.UTF_8);
                size += predicates[i].length;
View Full Code Here

            if (commonPartitioning == null) {
                commonPartitioning = partitioning;
                continue;
            }

            AbstractExpression statementPartitionExpression = partitioning.singlePartitioningExpression();
            if (commonPartitioning.requiresTwoFragments()) {
                if (partitioning.requiresTwoFragments() || statementPartitionExpression != null) {
                    // If two child statements need to use a second fragment,
                    // it can't currently be a two-fragment plan.
                    // The coordinator expects a single-table result from each partition.
                    // Also, currently the coordinator of a two-fragment plan is not allowed to
                    // target a particular partition, so neither can the union of the coordinator
                    // and a statement that wants to run single-partition.
                    throw new PlanningErrorException(
                            "Statements are too complex in set operation using multiple partitioned tables.");
                }
                // the new statement is apparently a replicated read and has no effect on partitioning
                continue;
            }
            AbstractExpression commonPartitionExpression = commonPartitioning.singlePartitioningExpression();
            if (commonPartitionExpression == null) {
                // the prior statement(s) were apparently replicated reads
                // and have no effect on partitioning
                commonPartitioning = partitioning;
                continue;
            }
            if (partitioning.requiresTwoFragments()) {
                // Again, currently the coordinator of a two-fragment plan is not allowed to
                // target a particular partition, so neither can the union of the coordinator
                // and a statement that wants to run single-partition.
                throw new PlanningErrorException(
                        "Statements are too complex in set operation using multiple partitioned tables.");
            }
            if (statementPartitionExpression == null) {
                // the new statement is apparently a replicated read and has no effect on partitioning
                continue;
            }
            if ( ! commonPartitionExpression.equals(statementPartitionExpression)) {
                throw new PlanningErrorException(
                        "Statements use conflicting partitioned table filters in set operation or sub-query.");
            }
        }
View Full Code Here

        // generate the delete node with the right target table
        DeletePlanNode deleteNode = new DeletePlanNode();
        deleteNode.setTargetTableName(targetTable.getTypeName());

        ProjectionPlanNode projectionNode = new ProjectionPlanNode();
        AbstractExpression addressExpr = new TupleAddressExpression();
        NodeSchema proj_schema = new NodeSchema();
        // This planner-created column is magic.
        proj_schema.addColumn(new SchemaColumn("VOLT_TEMP_TABLE",
                                               "VOLT_TEMP_TABLE",
                                               "tuple_address",
View Full Code Here

        // update executor in order to figure out which table columns get
        // updated.  We'll associate the actual values with VOLT_TEMP_TABLE
        // to avoid any false schema/column matches with the actual table.
        for (Entry<Column, AbstractExpression> col : m_parsedUpdate.columns.entrySet()) {
            String tableName = col.getKey().getTypeName();
            AbstractExpression expr = col.getValue();
            expr.setInBytes(col.getKey().getInbytes());

            proj_schema.addColumn(new SchemaColumn("VOLT_TEMP_TABLE",
                                                   "VOLT_TEMP_TABLE",
                                                   tableName,
                                                   tableName,
View Full Code Here

            // hint that this statement can be executed SP.
            if (col.equals(m_partitioning.getPartitionColForDML()) && subquery == null) {
                // When AdHoc insert-into-select is supported, we'll need to be able to infer
                // partitioning of the sub-select
                AbstractExpression expr = m_parsedInsert.getExpressionForPartitioning(col);
                String fullColumnName = targetTable.getTypeName() + "." + col.getTypeName();
                m_partitioning.addPartitioningExpression(fullColumnName, expr, expr.getValueType());
            }
        }

        NodeSchema matSchema = null;
        if (subquery == null) {
            matSchema = new NodeSchema();
        }

        int[] fieldMap = new int[m_parsedInsert.m_columns.size()];
        int i = 0;

        // The insert statement's set of columns are contained in a LinkedHashMap,
        // meaning that we'll iterate over the columns here in the order that the user
        // specified them in the original SQL.  (If the statement didn't specify any
        // columns, then all the columns will be in the map in schema order.)
        //   - Build the field map, used by insert executor to build tuple to execute
        //   - For VALUES(...) insert statements, build the materialize node's schema
        for (Map.Entry<Column, AbstractExpression> e : m_parsedInsert.m_columns.entrySet()) {
            Column col = e.getKey();
            fieldMap[i] = col.getIndex();

            if (matSchema != null) {
                AbstractExpression valExpr = e.getValue();
                valExpr.setInBytes(col.getInbytes());

                // Patch over any mismatched expressions with an explicit cast.
                // Most impossible-to-cast type combinations should have already been caught by the
                // parser, but there are also runtime checks in the casting code
                // -- such as for out of range values.
View Full Code Here

            int outputColumnIndex = 0;
            NodeSchema agg_schema = new NodeSchema();
            NodeSchema top_agg_schema = new NodeSchema();

            for (ParsedSelectStmt.ParsedColInfo col : m_parsedSelect.m_aggResultColumns) {
                AbstractExpression rootExpr = col.expression;
                AbstractExpression agg_input_expr = null;
                SchemaColumn schema_col = null;
                SchemaColumn top_schema_col = null;
                if (rootExpr instanceof AggregateExpression) {
                    ExpressionType agg_expression_type = rootExpr.getExpressionType();
                    agg_input_expr = rootExpr.getLeft();

                    // A bit of a hack: ProjectionNodes after the
                    // aggregate node need the output columns here to
                    // contain TupleValueExpressions (effectively on a temp table).
                    // So we construct one based on the output of the
                    // aggregate expression, the column alias provided by HSQL,
                    // and the offset into the output table schema for the
                    // aggregate node that we're computing.
                    // Oh, oh, it's magic, you know..
                    TupleValueExpression tve = new TupleValueExpression(
                            "VOLT_TEMP_TABLE", "VOLT_TEMP_TABLE", "", col.alias, outputColumnIndex);
                    tve.setTypeSizeBytes(rootExpr.getValueType(), rootExpr.getValueSize(),
                            rootExpr.getInBytes());

                    boolean is_distinct = ((AggregateExpression)rootExpr).isDistinct();
                    aggNode.addAggregate(agg_expression_type, is_distinct, outputColumnIndex, agg_input_expr);
                    schema_col = new SchemaColumn("VOLT_TEMP_TABLE", "VOLT_TEMP_TABLE", "", col.alias, tve);
                    top_schema_col = new SchemaColumn("VOLT_TEMP_TABLE", "VOLT_TEMP_TABLE", "", col.alias, tve);

                    /*
                     * Special case count(*), count(), sum(), min() and max() to
                     * push them down to each partition. It will do the
                     * push-down if the select columns only contains the listed
                     * aggregate operators and other group-by columns. If the
                     * select columns includes any other aggregates, it will not
                     * do the push-down. - nshi
                     */
                    if (topAggNode != null) {
                        ExpressionType top_expression_type = agg_expression_type;
                        /*
                         * For count(*), count() and sum(), the pushed-down
                         * aggregate node doesn't change. An extra sum()
                         * aggregate node is added to the coordinator to sum up
                         * the numbers from all the partitions. The input schema
                         * and the output schema of the sum() aggregate node is
                         * the same as the output schema of the push-down
                         * aggregate node.
                         *
                         * If DISTINCT is specified, don't do push-down for
                         * count() and sum()
                         */
                        if (agg_expression_type == ExpressionType.AGGREGATE_COUNT_STAR ||
                            agg_expression_type == ExpressionType.AGGREGATE_COUNT ||
                            agg_expression_type == ExpressionType.AGGREGATE_SUM) {
                            if (is_distinct) {
                                topAggNode = null;
                            }
                            else {
                                top_expression_type = ExpressionType.AGGREGATE_SUM;
                            }
                        }

                        /*
                         * For min() and max(), the pushed-down aggregate node
                         * doesn't change. An extra aggregate node of the same
                         * type is added to the coordinator. The input schema
                         * and the output schema of the top aggregate node is
                         * the same as the output schema of the pushed-down
                         * aggregate node.
                         */
                        else if (agg_expression_type != ExpressionType.AGGREGATE_MIN &&
                                 agg_expression_type != ExpressionType.AGGREGATE_MAX) {
                            /*
                             * Unsupported aggregate for push-down (AVG for example).
                             */
                            topAggNode = null;
                        }

                        if (topAggNode != null) {
                            /*
                             * Input column of the top aggregate node is the output column of the push-down aggregate node
                             */
                            topAggNode.addAggregate(top_expression_type, is_distinct, outputColumnIndex, tve);
                        }
                    }
                }

                // If the rootExpr is not itself an AggregateExpression but simply contains one (or more)
                // like "MAX(counter)+1" or "MAX(col)/MIN(col)" the assumptions about matching input and output
                // columns break down.
                else if (rootExpr.hasAnySubexpressionOfClass(AggregateExpression.class)) {
                    assert(false);
                }
                else
                {
                    /*
                     * These columns are the pass through columns that are not being
                     * aggregated on. These are the ones from the SELECT list. They
                     * MUST already exist in the child node's output. Find them and
                     * add them to the aggregate's output.
                     */
                    schema_col = new SchemaColumn(col.tableName, col.tableAlias, col.columnName, col.alias, col.expression);
                    AbstractExpression topExpr = null;
                    if (col.groupBy) {
                        topExpr = m_parsedSelect.m_groupByExpressions.get(col.alias);
                    } else {
                        topExpr = col.expression;
                    }
View Full Code Here

                // ignore order of keys in GROUP BY expr
                int ithCovered = 0;
                boolean foundPrefixedColumn = false;
                for (; ithCovered < groupBys.size(); ithCovered++) {
                    AbstractExpression gbExpr = groupBys.get(ithCovered).expression;
                    if ( ! (gbExpr instanceof TupleValueExpression)) {
                        continue;
                    }
                    TupleValueExpression gbTVE = (TupleValueExpression)gbExpr;
                    // TVE column index has not been resolved currently
                    if ( ! fromTableAlias.equals(gbTVE.getTableAlias())) {
                        continue;
                    }
                    if (indexColumnName.equals(gbTVE.getColumnName())) {
                        foundPrefixedColumn = true;
                        break;
                    }
                }
                if (! foundPrefixedColumn) {
                    // no prefix match any more
                    break;
                }
                coveredGroupByColumns.add(ithCovered);

                if (coveredGroupByColumns.size() == groupBys.size()) {
                    // covered all group by columns already
                    break;
                }
            }

        } else {
            StmtTableScan fromTableScan = m_parsedSelect.m_tableAliasMap.get(fromTableAlias);
            // either pure expression index or mix of expressions and simple columns
            List<AbstractExpression> indexedExprs = null;
            try {
                indexedExprs = AbstractExpression.fromJSONArrayString(exprsjson, fromTableScan);
            } catch (JSONException e) {
                e.printStackTrace();
                // This case sounds impossible
                return coveredGroupByColumns;
            }

            for (int j = 0; j < indexedExprs.size(); j++) {
                AbstractExpression indexExpr = indexedExprs.get(j);
                // ignore order of keys in GROUP BY expr

                int ithCovered = 0;
                List<AbstractExpression> binding = null;
                for (; ithCovered < groupBys.size(); ithCovered++) {
                    AbstractExpression gbExpr = groupBys.get(ithCovered).expression;
                    binding = gbExpr.bindingToIndexedExpression(indexExpr);
                    if (binding != null) {
                        break;
                    }
                }
                if (binding == null) {
View Full Code Here

TOP

Related Classes of org.voltdb.expressions.AbstractExpression

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.