Package org.voltdb.expressions

Examples of org.voltdb.expressions.TupleValueExpression


        assertTrue(pn instanceof ProjectionPlanNode);
        NodeSchema ns = pn.getOutputSchema();
        for (SchemaColumn sc : ns.getColumns()) {
            AbstractExpression e = sc.getExpression();
            assertTrue(e instanceof TupleValueExpression);
            TupleValueExpression tve = (TupleValueExpression) e;
            assertNotSame(-1, tve.getColumnIndex());
        }
    }
View Full Code Here


    public void testOutputSchemaSomeScanColumns()
    {
        int[] scan_col_indexes = { 1, 3 };
        ArrayList<SchemaColumn> scanColumns = new ArrayList<SchemaColumn>();
        for (int index : scan_col_indexes) {
            TupleValueExpression tve = new TupleValueExpression(TABLE1, TABLE1, COLS[index], COLS[index]);
            tve.setValueType(COLTYPES[index]);
            tve.setValueSize(COLTYPES[index].getLengthInBytesForFixedTypes());
            SchemaColumn col = new SchemaColumn(TABLE1, TABLE1, COLS[index], COLS[index], tve);
            scanColumns.add(col);
        }
        AbstractScanPlanNode dut = SeqScanPlanNode.createDummyForTest(TABLE1, scanColumns);
View Full Code Here

        // update column 4 with a sum of columns 0 and 2
        OperatorExpression col4_exp = new OperatorExpression();
        col4_exp.setValueType(COLTYPES[4]);
        col4_exp.setValueSize(COLTYPES[4].getLengthInBytesForFixedTypes());
        col4_exp.setExpressionType(ExpressionType.OPERATOR_PLUS);
        TupleValueExpression left = new TupleValueExpression(TABLE1, TABLE1, COLS[0], COLS[0]);
        left.setValueType(COLTYPES[0]);
        left.setValueSize(COLTYPES[0].getLengthInBytesForFixedTypes());
        TupleValueExpression right = new TupleValueExpression(TABLE1, TABLE1, COLS[2], COLS[2]);
        right.setValueType(COLTYPES[2]);
        right.setValueSize(COLTYPES[2].getLengthInBytesForFixedTypes());
        col4_exp.setLeft(left);
        col4_exp.setRight(right);
        proj_schema.addColumn(new SchemaColumn(TABLE1, TABLE1, COLS[4], "C1",
                                               col4_exp));
        cols[3] = COLS[4];
View Full Code Here

    {
        m_outputSchema = new NodeSchema();
        m_hasSignificantOutputSchema = true;
        for (int i = 0; i < m_columnNames.length; ++i)
        {
            TupleValueExpression tve = new TupleValueExpression(m_tableName,
                                                                m_tableName,
                                                                m_columnNames[i],
                                                                m_columnNames[i],
                                                                i);
            m_outputSchema.addColumn(new SchemaColumn(m_tableName,
View Full Code Here

            for (int i = stmt.m_groupByColumns.size() + 1; i < stmt.m_displayColumns.size(); i++) {
                ParsedSelectStmt.ParsedColInfo col = stmt.m_displayColumns.get(i);
                Column destColumn = destColumnArray.get(i);

                AbstractExpression colExpr = col.expression.getLeft();
                TupleValueExpression tve = null;
                if (colExpr.getExpressionType() == ExpressionType.VALUE_TUPLE) {
                    tve = (TupleValueExpression)colExpr;
                }
                processMaterializedViewColumn(matviewinfo, srcTable, destColumn,
                        col.expression.getExpressionType(), tve);
View Full Code Here

        assertTrue(pn instanceof NestLoopPlanNode);
        NodeSchema ns = pn.getOutputSchema();
        for (SchemaColumn sc : ns.getColumns()) {
            AbstractExpression e = sc.getExpression();
            assertTrue(e instanceof TupleValueExpression);
            TupleValueExpression tve = (TupleValueExpression) e;
            assertNotSame(-1, tve.getColumnIndex());
        }

        pn = compile("select  distinct(A) FROM R1 JOIN R2 USING(A)");
        pn = pn.getChild(0);
        assertTrue(pn instanceof ProjectionPlanNode);
        ns = pn.getOutputSchema();
        for (SchemaColumn sc : ns.getColumns()) {
            AbstractExpression e = sc.getExpression();
            assertTrue(e instanceof TupleValueExpression);
            TupleValueExpression tve = (TupleValueExpression) e;
            assertNotSame(-1, tve.getColumnIndex());
        }
        pn = pn.getChild(0);
        assertTrue(pn instanceof DistinctPlanNode);
        ns = pn.getOutputSchema();
        for (SchemaColumn sc : ns.getColumns()) {
            AbstractExpression e = sc.getExpression();
            assertTrue(e instanceof TupleValueExpression);
            TupleValueExpression tve = (TupleValueExpression) e;
            assertNotSame(-1, tve.getColumnIndex());
        }

        pn = compile("select  A  FROM R1 JOIN R2 USING(A) ORDER BY A");
        pn = pn.getChild(0);
        assertTrue(pn instanceof ProjectionPlanNode);
        ns = pn.getOutputSchema();
        for (SchemaColumn sc : ns.getColumns()) {
            AbstractExpression e = sc.getExpression();
            assertTrue(e instanceof TupleValueExpression);
            TupleValueExpression tve = (TupleValueExpression) e;
            assertNotSame(-1, tve.getColumnIndex());
        }
        pn = pn.getChild(0);
        assertTrue(pn instanceof OrderByPlanNode);
        ns = pn.getOutputSchema();
        for (SchemaColumn sc : ns.getColumns()) {
            AbstractExpression e = sc.getExpression();
            assertTrue(e instanceof TupleValueExpression);
            TupleValueExpression tve = (TupleValueExpression) e;
            assertNotSame(-1, tve.getColumnIndex());
        }

        List<AbstractPlanNode> apl;
        AbstractPlanNode node;
        SeqScanPlanNode seqScan;
View Full Code Here

     *         The caller can raise an alarm if there is more than one.
     */
    public void analyzeForMultiPartitionAccess(Collection<StmtTableScan> collection,
            HashMap<AbstractExpression, Set<AbstractExpression>> valueEquivalence)
    {
        TupleValueExpression tokenPartitionKey = null;
        Set< Set<AbstractExpression> > eqSets = new HashSet< Set<AbstractExpression> >();
        int unfilteredPartitionKeyCount = 0;

        // reset this flag to forget the last result of the multiple partition access path.
        // AdHoc with parameters will call this function at least two times
        // By default this flag should be true.
        m_joinValid = true;
        boolean subqueryHasReceiveNode = false;
        boolean hasPartitionedTableJoin = false;
        // Iterate over the tables to collect partition columns.
        for (StmtTableScan tableScan : collection) {
            // Replicated tables don't need filter coverage.
            if (tableScan.getIsReplicated()) {
                continue;
            }

            // The partition column can be null in an obscure edge case.
            // The table is declared non-replicated yet specifies no partitioning column.
            // This can occur legitimately when views based on partitioned tables neglect to group by the partition column.
            // The interpretation of this edge case is that the table has "randomly distributed data".
            // In such a case, the table is valid for use by MP queries only and can only be joined with replicated tables
            // because it has no recognized partitioning join key.
            List<SchemaColumn> columnsNeedingCoverage = tableScan.getPartitioningColumns();

            if (tableScan instanceof StmtSubqueryScan) {
                StmtSubqueryScan subScan = (StmtSubqueryScan) tableScan;
                subScan.promoteSinglePartitionInfo(valueEquivalence, eqSets);

                if (subScan.hasReceiveNode()) {
                    if (subqueryHasReceiveNode) {
                        // Has found another subquery with receive node on the same level
                        // Not going to support this kind of subquery join with 2 fragment plan.
                        m_joinValid = false;

                        // Still needs to count the independent partition tables
                        break;
                    }
                    subqueryHasReceiveNode = true;

                    if (subScan.isTableAggregate()) {
                        // Partition Table Aggregate only return one aggregate row.
                        // It has been marked with receive node, any join or processing based on
                        // this table aggregate subquery should be done on coordinator.
                        // Joins: has to be replicated table
                        // Any process based on this subquery should require 1 fragment only.
                        continue;
                    }
                } else {
                    // this subquery partition table without receive node
                    hasPartitionedTableJoin = true;
                }
            } else {
                // This table is a partition table
                hasPartitionedTableJoin = true;
            }

            boolean unfiltered = true;
            for (AbstractExpression candidateColumn : valueEquivalence.keySet()) {
                if ( ! (candidateColumn instanceof TupleValueExpression)) {
                    continue;
                }
                TupleValueExpression candidatePartitionKey = (TupleValueExpression) candidateColumn;
                if (! canCoverPartitioningColumn(candidatePartitionKey, columnsNeedingCoverage)) {
                    continue;
                }
                unfiltered = false;
                if (tokenPartitionKey == null) {
View Full Code Here

        // Cases:
        // child could be columnref, in which case it's just a normal column.
        // Just make a ParsedColInfo object for it and the planner will do the right thing later
        if (child.name.equals("columnref")) {
            assert(order_exp instanceof TupleValueExpression);
            TupleValueExpression tve = (TupleValueExpression) order_exp;
            order_col.columnName = tve.getColumnName();
            order_col.tableName = tve.getTableName();
            order_col.tableAlias = tve.getTableAlias();
            if (order_col.tableAlias == null) {
                order_col.tableAlias = order_col.tableName;
            }

            order_col.alias = tve.getColumnAlias();
        } else {
            String alias = child.attributes.get("alias");
            order_col.alias = alias;
            order_col.tableName = "VOLT_TEMP_TABLE";
            order_col.tableAlias = "VOLT_TEMP_TABLE";
View Full Code Here

                String jsonExpr = index.getExpressionsjson();
                // if this is a pure-column index...
                if (jsonExpr.isEmpty()) {
                    for (ColumnRef cref : index.getColumns()) {
                        Column col = cref.getColumn();
                        TupleValueExpression tve = new TupleValueExpression(table.getTypeName(),
                                                                            orderedAlias.getKey(),
                                                                            col.getName(),
                                                                            col.getName(),
                                                                            col.getIndex());
                        indexExpressions.add(tve);
View Full Code Here

        }
        StmtTableScan tableScan = m_tableAliasMap.get(tableAlias);
        assert(tableScan != null);
        String columnName = exprNode.attributes.get("column");
        String columnAlias = exprNode.attributes.get("alias");
        TupleValueExpression expr = new TupleValueExpression(tableName, tableAlias, columnName, columnAlias);
        // Collect the unique columns used in the plan for a given scan.
        // Resolve the tve and add it to the scan's cache of referenced columns
        tableScan.resolveTVE(expr, columnName);
        return expr;
    }
View Full Code Here

TOP

Related Classes of org.voltdb.expressions.TupleValueExpression

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.