Package org.drools.core.common

Examples of org.drools.core.common.BaseNode


     * @return the actual attached node that may be the one given as parameter
     *         or eventually one that was already in the cache if sharing is enabled
     */
    public BaseNode attachNode(final BuildContext context,
                               final BaseNode candidate) {
        BaseNode node = null;
        RuleBasePartitionId partition = null;
        if ( candidate.getType() == NodeTypeEnums.EntryPointNode ) {
            // entry point nodes are always shared
            node = context.getKnowledgeBase().getRete().getEntryPointNode( ((EntryPointNode) candidate).getEntryPoint() );
            // all EntryPointNodes belong to the main partition
            partition = RuleBasePartitionId.MAIN_PARTITION;
        } else if ( candidate.getType() == NodeTypeEnums.ObjectTypeNode ) {
            // object type nodes are always shared
            Map<ObjectType, ObjectTypeNode> map = context.getKnowledgeBase().getRete().getObjectTypeNodes( context.getCurrentEntryPoint() );
            if ( map != null ) {
                ObjectTypeNode otn = map.get( ((ObjectTypeNode) candidate).getObjectType() );
                if ( otn != null ) {
                    // adjusting expiration offset
                    otn.setExpirationOffset( Math.max( otn.getExpirationOffset(),
                                                       ((ObjectTypeNode) candidate).getExpirationOffset() ) );
                    node = otn;
                }
            }
            // all ObjectTypeNodes belong to the main partition
            partition = RuleBasePartitionId.MAIN_PARTITION;
        } else if ( isSharingEnabledForNode( context,
                                             candidate ) ) {
            if ( (context.getTupleSource() != null) && NodeTypeEnums.isLeftTupleSink( candidate ) ) {
                node = context.getTupleSource().getSinkPropagator().getMatchingNode( candidate );
            } else if ( (context.getObjectSource() != null) && NodeTypeEnums.isObjectSink( candidate ) ) {
                node = context.getObjectSource().getSinkPropagator().getMatchingNode( candidate );
            } else {
                throw new RuntimeException( "This is a bug on node sharing verification. Please report to development team." );
            }
        }

        if ( node == null || node.isStreamMode() ) {
            // SteamMode in Phreak does not allow sharing
            // only attach() if it is a new node
            node = candidate;

            // new node, so it must be labeled
            if ( partition == null ) {
                // if it does not has a predefined label
                if ( context.getPartitionId() == null ) {
                    // if no label in current context, create one
                    context.setPartitionId( context.getKnowledgeBase().createNewPartitionId() );
                }
                partition = context.getPartitionId();
            }
            // set node whit the actual partition label
            node.setPartitionId( partition );
            node.attach(context);
            // adds the node to the context list to track all added nodes
            context.getNodes().add( node );
        } else {
            // shared node found
            // undo previous id assignment
            context.releaseId( candidate.getId() );
        }
        node.addAssociation( context.getRule(), context.peekRuleComponent() );
        return node;
    }
View Full Code Here


                                                                  rule,
                                                                  subrule,
                                                                  subruleIndex,
                                                                  context );

        BaseNode baseTerminalNode = (BaseNode) terminal;
        baseTerminalNode.networkUpdated(new UpdateContext());
        baseTerminalNode.attach(context);
        if ( context.getKnowledgeBase().getConfiguration().isPhreakEnabled() ) {
            AddRemoveRule.addRule( terminal, context.getWorkingMemories(), context.getKnowledgeBase() );
        }

        // adds the terminal node to the list of nodes created/added by this sub-rule
View Full Code Here

     * @return the actual attached node that may be the one given as parameter
     *         or eventually one that was already in the cache if sharing is enabled
     */
    public BaseNode attachNode(final BuildContext context,
                               final BaseNode candidate) {
        BaseNode node = null;
        RuleBasePartitionId partition = null;
        if ( candidate.getType() == NodeTypeEnums.EntryPointNode ) {
            // entry point nodes are always shared
            node = context.getKnowledgeBase().getRete().getEntryPointNode( ((EntryPointNode) candidate).getEntryPoint() );
            // all EntryPointNodes belong to the main partition
            partition = RuleBasePartitionId.MAIN_PARTITION;
        } else if ( candidate.getType() == NodeTypeEnums.ObjectTypeNode ) {
            // object type nodes are always shared
            Map<ObjectType, ObjectTypeNode> map = context.getKnowledgeBase().getRete().getObjectTypeNodes( context.getCurrentEntryPoint() );
            if ( map != null ) {
                ObjectTypeNode otn = map.get( ((ObjectTypeNode) candidate).getObjectType() );
                if ( otn != null ) {
                    // adjusting expiration offset
                    otn.setExpirationOffset( Math.max( otn.getExpirationOffset(),
                                                       ((ObjectTypeNode) candidate).getExpirationOffset() ) );
                    node = otn;
                }
            }
            // all ObjectTypeNodes belong to the main partition
            partition = RuleBasePartitionId.MAIN_PARTITION;
        } else if ( isSharingEnabledForNode( context,
                                             candidate ) ) {
            if ( (context.getTupleSource() != null) && NodeTypeEnums.isLeftTupleSink( candidate ) ) {
                node = context.getTupleSource().getSinkPropagator().getMatchingNode( candidate );
            } else if ( (context.getObjectSource() != null) && NodeTypeEnums.isObjectSink( candidate ) ) {
                node = context.getObjectSource().getSinkPropagator().getMatchingNode( candidate );
            } else {
                throw new RuntimeException( "This is a bug on node sharing verification. Please report to development team." );
            }
        }

        if ( node == null || node.isStreamMode() ) {
            // SteamMode in Phreak does not allow sharing
            // only attach() if it is a new node
            node = candidate;

            // new node, so it must be labeled
            if ( partition == null ) {
                // if it does not has a predefined label
                if ( context.getPartitionId() == null ) {
                    // if no label in current context, create one
                    context.setPartitionId( context.getKnowledgeBase().createNewPartitionId() );
                }
                partition = context.getPartitionId();
            }
            // set node whit the actual partition label
            node.setPartitionId( partition );
            node.attach(context);
            // adds the node to the context list to track all added nodes
            context.getNodes().add( node );
        } else {
            // shared node found
            mergeNodes(node, candidate);
            // undo previous id assignment
            context.releaseId( candidate.getId() );
        }
        node.addAssociation( context.getRule(), context.peekRuleComponent() );
        return node;
    }
View Full Code Here

                adapter = new RuleTerminalNode.RTNCleanupAdapter( (RuleTerminalNode) tn );
            }
            context.setCleanupAdapter( adapter );
        }

        BaseNode node = (BaseNode) tn;
        Set<BaseNode> removedSources = new HashSet<BaseNode>();
        LinkedList<BaseNode> betaStack = new LinkedList<BaseNode>();
        LinkedList<BaseNode> alphaStack = new LinkedList<BaseNode>();
        LinkedList<BaseNode> stillInUse = new LinkedList<BaseNode>();

        // alpha and beta stacks must be separate
        // beta stacks processed first.
        boolean processRian = true;
        while ( node != null ) {
            removeNode(node, removedSources, alphaStack, betaStack, stillInUse, processRian, workingMemories, context);
            if ( !betaStack.isEmpty() ) {
                node = betaStack.removeLast();
                if ( node.getType() == NodeTypeEnums.RightInputAdaterNode ) {
                    processRian = true;
                } else {
                    processRian = false;
                }
View Full Code Here

                workingMemory.clearNodeMemory( (MemoryFactory) node);
            }
        }

        if ( NodeTypeEnums.isBetaNode( node ) ) {
            BaseNode parent =  ((LeftTupleSink) node).getLeftTupleSource();
            node.remove(context, this, workingMemories);

            if ( !((BetaNode)node).isRightInputIsRiaNode() ) {
                // all right inputs need processing too
                alphaStack.addLast( ((BetaNode) node).getRightInput() );
            }

            if ( processRian && ((BetaNode)node).isRightInputIsRiaNode() ) {
                betaStack.addLast( ((BetaNode) node).getLeftTupleSource() );
                betaStack.addLast( ((BetaNode) node).getRightInput() );
            } else {
                removeNode( parent, removedSources, alphaStack, betaStack, stillInUse, true, workingMemories, context );
            }
        } else if ( NodeTypeEnums.isLeftTupleSink(node) ) {
            BaseNode parent =  ((LeftTupleSink) node).getLeftTupleSource();
            node.remove(context, this, workingMemories);
            removeNode( parent, removedSources, alphaStack, betaStack, stillInUse, true, workingMemories, context );
        } else if ( NodeTypeEnums.LeftInputAdapterNode == node.getType() ) {
            BaseNode parent =  ((LeftInputAdapterNode) node).getParentObjectSource();
            node.remove(context, this, workingMemories);
            removeNode( parent , removedSources, alphaStack, betaStack, stillInUse, true, workingMemories, context );
        } else if ( NodeTypeEnums.isObjectSource( node ) ) {
            if ( removedSources.add(node) ) {
                BaseNode parent = ((ObjectSource) node).getParentObjectSource();
                node.remove(context, this, workingMemories);
                removeNode(parent, removedSources, alphaStack, betaStack, stillInUse, true, workingMemories, context);
            }
        } else {
            throw new IllegalStateException("Defensive exception, should not fall through");
View Full Code Here

                                                                  rule,
                                                                  subrule,
                                                                  subruleIndex,
                                                                  context );

        BaseNode baseTerminalNode = (BaseNode) terminal;
        baseTerminalNode.networkUpdated(new UpdateContext());
        baseTerminalNode.attach(context);
        if ( context.getKnowledgeBase().getConfiguration().isPhreakEnabled() ) {
            AddRemoveRule.addRule( terminal, context.getWorkingMemories(), context.getKnowledgeBase() );
        }

        // adds the terminal node to the list of nodes created/added by this sub-rule
View Full Code Here

     * @return the actual attached node that may be the one given as parameter
     *         or eventually one that was already in the cache if sharing is enabled
     */
    public BaseNode attachNode(final BuildContext context,
                               final BaseNode candidate) {
        BaseNode node = null;
        RuleBasePartitionId partition = null;
        if ( candidate.getType() == NodeTypeEnums.EntryPointNode ) {
            // entry point nodes are always shared
            node = context.getKnowledgeBase().getRete().getEntryPointNode( ((EntryPointNode) candidate).getEntryPoint() );
            // all EntryPointNodes belong to the main partition
            partition = RuleBasePartitionId.MAIN_PARTITION;
        } else if ( candidate.getType() == NodeTypeEnums.ObjectTypeNode ) {
            // object type nodes are always shared
            Map<ObjectType, ObjectTypeNode> map = context.getKnowledgeBase().getRete().getObjectTypeNodes( context.getCurrentEntryPoint() );
            if ( map != null ) {
                ObjectTypeNode otn = map.get( ((ObjectTypeNode) candidate).getObjectType() );
                if ( otn != null ) {
                    // adjusting expiration offset
                    otn.setExpirationOffset( Math.max( otn.getExpirationOffset(),
                                                       ((ObjectTypeNode) candidate).getExpirationOffset() ) );
                    node = otn;
                }
            }
            // all ObjectTypeNodes belong to the main partition
            partition = RuleBasePartitionId.MAIN_PARTITION;
        } else if ( isSharingEnabledForNode( context,
                                             candidate ) ) {
            if ( (context.getTupleSource() != null) && NodeTypeEnums.isLeftTupleSink( candidate ) ) {
                node = context.getTupleSource().getSinkPropagator().getMatchingNode( candidate );
            } else if ( (context.getObjectSource() != null) && NodeTypeEnums.isObjectSink( candidate ) ) {
                node = context.getObjectSource().getSinkPropagator().getMatchingNode( candidate );
            } else {
                throw new RuntimeException( "This is a bug on node sharing verification. Please report to development team." );
            }
        }

        if ( node == null || node.isStreamMode() ) {
            // SteamMode in Phreak does not allow sharing
            // only attach() if it is a new node
            node = candidate;

            // new node, so it must be labeled
            if ( partition == null ) {
                // if it does not has a predefined label
                if ( context.getPartitionId() == null ) {
                    // if no label in current context, create one
                    context.setPartitionId( context.getKnowledgeBase().createNewPartitionId() );
                }
                partition = context.getPartitionId();
            }
            // set node whit the actual partition label
            node.setPartitionId( partition );
            node.attach(context);
            // adds the node to the context list to track all added nodes
            context.getNodes().add( node );
        } else {
            // shared node found
            // undo previous id assignment
            context.releaseId( candidate.getId() );
        }
        node.addAssociation( context.getRule(), context.peekRuleComponent() );
        return node;
    }
View Full Code Here

                                                                  rule,
                                                                  subrule,
                                                                  subruleIndex,
                                                                  context );

        BaseNode baseTerminalNode = (BaseNode) terminal;
        baseTerminalNode.networkUpdated(new UpdateContext());
        baseTerminalNode.attach(context);
        if ( context.getKnowledgeBase().getConfiguration().isPhreakEnabled() ) {
            AddRemoveRule.addRule( terminal, context.getWorkingMemories(), context.getKnowledgeBase() );
        }

        // adds the terminal node to the list of nodes created/added by this sub-rule
View Full Code Here

                adapter = new RuleTerminalNode.RTNCleanupAdapter( (RuleTerminalNode) tn );
            }
            context.setCleanupAdapter( adapter );
        }

        BaseNode node = (BaseNode) tn;
        LinkedList<BaseNode> betaStack = new LinkedList<BaseNode>();
        LinkedList<BaseNode> alphaStack = new LinkedList<BaseNode>();
        LinkedList<BaseNode> stillInUse = new LinkedList<BaseNode>();

        // alpha and beta stacks must be separate
        // beta stacks processed first.
        boolean processRian = true;
        while ( node != null ) {
            removeNode(node, alphaStack, betaStack, stillInUse, processRian, workingMemories, context);
            if ( !betaStack.isEmpty() ) {
                node = betaStack.removeLast();
                if ( node.getType() == NodeTypeEnums.RightInputAdaterNode ) {
                    processRian = true;
                } else {
                    processRian = false;
                }
View Full Code Here

        if ( node.isInUse() ) {
            stillInUse.add(node);
        }

        if ( NodeTypeEnums.isBetaNode( node ) ) {
            BaseNode parent =  ((LeftTupleSink) node).getLeftTupleSource();
            node.remove(context, this, workingMemories);

            if ( !((BetaNode)node).isRightInputIsRiaNode() ) {
                // all right inputs need processing too
                alphaStack.addLast( ((BetaNode) node).getRightInput() );
            }

            if ( processRian && ((BetaNode)node).isRightInputIsRiaNode() ) {
                betaStack.addLast( ((BetaNode) node).getLeftTupleSource() );
                betaStack.addLast( ((BetaNode) node).getRightInput() );
            } else {
                removeNode( parent, alphaStack, betaStack, stillInUse, true, workingMemories, context );
            }
        } else if ( NodeTypeEnums.isLeftTupleSink(node) ) {
            BaseNode parent =  ((LeftTupleSink) node).getLeftTupleSource();
            node.remove(context, this, workingMemories);
            removeNode( parent, alphaStack, betaStack, stillInUse, true, workingMemories, context );
        } else if ( NodeTypeEnums.LeftInputAdapterNode == node.getType() ) {
            BaseNode parent =  ((LeftInputAdapterNode) node).getParentObjectSource();
            node.remove(context, this, workingMemories);
            removeNode( parent , alphaStack, betaStack, stillInUse, true, workingMemories, context );
        } else if ( NodeTypeEnums.isObjectSource( node ) ) {
            BaseNode parent =  ((ObjectSource) node).getParentObjectSource();
            node.remove(context, this, workingMemories);
            removeNode( parent, alphaStack, betaStack, stillInUse, true, workingMemories, context );
        } else {
            throw new IllegalStateException("Defensive exception, should not fall through");
        }
View Full Code Here

TOP

Related Classes of org.drools.core.common.BaseNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.