Package org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators

Examples of org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POLoad


                FileSpec fSpec = getTempFileSpec();
                ((POStore)mpLeaf).setSFile(fSpec);
                ((POStore)mpLeaf).setIsTmpStore(true);
                mr.setReduceDone(true);
                MapReduceOper limitAdjustMROp = getMROp();
                POLoad ld = getLoad();
                ld.setLFile(fSpec);
                limitAdjustMROp.mapPlan.add(ld);
                POLimit pLimit = new POLimit(new OperatorKey(scope,nig.getNextNodeId(scope)));
                pLimit.setLimit(mr.limit);
                limitAdjustMROp.mapPlan.addAsLeaf(pLimit);
                if (mr.isGlobalSort())
View Full Code Here


        for (MapReduceOper succ : succs) {
            List<PhysicalOperator> roots = succ.mapPlan.getRoots();
            ArrayList<PhysicalOperator> rootsCopy =
                new ArrayList<PhysicalOperator>(roots);
            for (PhysicalOperator op : rootsCopy) {
                POLoad load = (POLoad)op;
                String ifile = load.getLFile().getFileName();
                if (ofile.compareTo(ifile) != 0) {
                    continue;
                }
                PhysicalOperator opSucc = succ.mapPlan.getSuccessors(op).get(0);
                PhysicalPlan clone = null;
View Full Code Here

                        
        MapReduceOper mrOper = getMROper();

        MapReduceOper splittee = mergeList.get(0);
        PhysicalPlan pl = splittee.mapPlan;
        POLoad load = (POLoad)pl.getRoots().get(0);
       
        mrOper.mapPlan.add(load);
      
        // add a dummy store operator, it'll be replaced by the split operator later.
        try {
View Full Code Here

        // 1 join key, it would be the first field of the tuple. If
        // there are multiple Join keys, the tuple itself represents
        // the join key
        Object firstLeftKey = (keys.size() == 1 ? keys.get(0): keys);
       
        POLoad ld = new POLoad(genKey(), new FileSpec(indexFile, new FuncSpec(indexFileLoadFuncSpec)), false);
        try {
            pc = (PigContext)ObjectSerializer.deserialize(PigMapReduce.sJobConf.get("pig.pigContext"));
        } catch (IOException e) {
            int errCode = 2094;
            String msg = "Unable to deserialize pig context.";
            throw new ExecException(msg,errCode,e);
        }
        pc.connect();
        ld.setPc(pc);
        index = new LinkedList<Tuple>();
        for(Result res=ld.getNext(dummyTuple);res.returnStatus!=POStatus.STATUS_EOP;res=ld.getNext(dummyTuple))
            index.offer((Tuple) res.result);  

        Tuple prevIdxEntry = null;
        Tuple matchedEntry;
    
View Full Code Here

        PhysicalOperator po = pos.get(0);
        if (!(po instanceof POLoad)) {
            log.debug("Root operator of map is not load.");
            return; // Huh?
        }
        POLoad load = (POLoad)po;
        String loadFunc = load.getLFile().getFuncName();
        String loadFile = load.getLFile().getFileName();
        if (!("org.apache.pig.impl.builtin.RandomSampleLoader".equals(loadFunc)) && !("org.apache.pig.impl.builtin.PoissonSampleLoader".equals(loadFunc))) {
            log.debug("Not a sampling job.");
            return;
        }
        if (loadFile == null) {
            log.debug("No load file");
            return;
        }

        // Get this job's predecessor.  There should be exactly one.;
        List<MapReduceOper> preds = mPlan.getPredecessors(mr);
        if (preds.size() != 1) {
            log.debug("Too many predecessors to sampling job.");
            return;
        }
        MapReduceOper pred = preds.get(0);

        // The predecessor should be a root.
        List<MapReduceOper> predPreds = mPlan.getPredecessors(pred);
        if (predPreds != null && predPreds.size() > 0) {
            log.debug("Predecessor should be a root of the plan");
            return;
        }

        // The predecessor should have just a load and store in the map, and nothing
        // in the combine or reduce.
        if ( !(pred.reducePlan.isEmpty() && pred.combinePlan.isEmpty())) {
            log.debug("Predecessor has a combine or reduce plan");
            return;
        }

        if (pred.mapPlan == null || pred.mapPlan.size() != 2) {
            log.debug("Predecessor has more than just load+store in the map");
            return;
        }

        List<PhysicalOperator> loads = pred.mapPlan.getRoots();
        if (loads.size() != 1) {
            log.debug("Predecessor plan has more than one root.");
            return;
        }
        PhysicalOperator r = loads.get(0);
        if (!(r instanceof POLoad)) { // Huh?
            log.debug("Predecessor's map plan root is not a load.");
            return;
        }
        POLoad predLoad = (POLoad)r;
        LoadFunc lf = (LoadFunc)PigContext.instantiateFuncFromSpec(predLoad.getLFile().getFuncSpec());
        if (!(lf instanceof SamplableLoader)) {
            log.debug("Predecessor's loader does not implement SamplableLoader");
            return;
        }

        // The MR job should have one successor.
        List<MapReduceOper> succs = mPlan.getSuccessors(mr);
        if (succs.size() != 1) {
            log.debug("Job has more than one successor.");
            return;
        }
        MapReduceOper succ = succs.get(0);

        // Find the load the correlates with the file the sampler is loading, and
        // check that it is using BinaryStorage.
        if (succ.mapPlan == null) { // Huh?
            log.debug("Successor has no map plan.");
            return;
        }
        loads = succ.mapPlan.getRoots();
        POLoad succLoad = null;
        for (PhysicalOperator root : loads) {
            if (!(root instanceof POLoad)) { // Huh?
                log.debug("Successor's roots are not loads");
                return;
            }
            POLoad sl = (POLoad)root;
            if (loadFile.equals(sl.getLFile().getFileName()) &&
                    "org.apache.pig.builtin.BinStorage".equals(sl.getLFile().getFuncName())) {
                succLoad = sl;
                break;
            }
        }

        if (succLoad == null) {
            log.debug("Could not find load that matched file we are sampling.");
            return;
        }

        // Okay, we're on.
        // First, replace our RandomSampleLoader with a RandomSampleLoader that uses
        // the load function from our predecessor.
        String[] rslargs = new String[2];
        FileSpec predFs = predLoad.getLFile();
        // First argument is FuncSpec of loader function to subsume, this we want to set for
        // ourselves.
        rslargs[0] = predFs.getFuncSpec().toString();
        // Second argument is the number of samples per block, read this from the original.
        rslargs[1] = load.getLFile().getFuncSpec().getCtorArgs()[1];
        FileSpec fs = new FileSpec(predFs.getFileName(),new FuncSpec(loadFunc, rslargs));
        POLoad newLoad = new POLoad(load.getOperatorKey(),load.getRequestedParallelism(), fs, load.isSplittable());
        newLoad.setSignature(predLoad.getSignature());
        try {
            mr.mapPlan.replace(load, newLoad);
           
            // check if it has PartitionSkewedKeys
            List<PhysicalOperator> ls = mr.reducePlan.getLeaves();
            for(PhysicalOperator op: ls) {
              scan(mr, op, fs.getFileName());
            }       
        } catch (PlanException e) {
            throw new VisitorException(e);
        }

        // Second, replace the loader in our successor with whatever the originally used loader was.
        fs = new FileSpec(predFs.getFileName(), predFs.getFuncSpec());
        newLoad = new POLoad(succLoad.getOperatorKey(), succLoad.getRequestedParallelism(), fs, succLoad.isSplittable());
        newLoad.setSignature(predLoad.getSignature());
        try {
            succ.mapPlan.replace(succLoad, newLoad);
        } catch (PlanException e) {
            throw new VisitorException(e);
        }
View Full Code Here

        for (MapReduceOper succ : succs) {
            List<PhysicalOperator> roots = succ.mapPlan.getRoots();
            ArrayList<PhysicalOperator> rootsCopy =
                new ArrayList<PhysicalOperator>(roots);
            for (PhysicalOperator op : rootsCopy) {
                POLoad load = (POLoad)op;
                String ifile = load.getLFile().getFileName();
                if (ofile.compareTo(ifile) != 0) {
                    continue;
                }
                PhysicalOperator opSucc = succ.mapPlan.getSuccessors(op).get(0);
                PhysicalPlan clone = null;
View Full Code Here

        MapReduceOper mrOper = getMROper();

        MapReduceOper splittee = mergeList.get(0);
        PhysicalPlan pl = splittee.mapPlan;
        POLoad load = (POLoad)pl.getRoots().get(0);

        mrOper.mapPlan.add(load);

        // add a dummy store operator, it'll be replaced by the split operator later.
        try {
View Full Code Here

    @Override
    public void visit(LOLoad loLoad) throws FrontendException {
        String scope = DEFAULT_SCOPE;
        // The last parameter here is set to true as we assume all files are
        // splittable due to LoadStore Refactor
        POLoad load = new POLoad(new OperatorKey(scope, nodeGen
                .getNextNodeId(scope)), loLoad.getLoadFunc());
        load.addOriginalLocation(loLoad.getAlias(), loLoad.getLocation());
        load.setLFile(loLoad.getFileSpec());
        load.setPc(pc);
        load.setResultType(DataType.BAG);
        load.setSignature(loLoad.getSignature());
        load.setLimit(loLoad.getLimit());
        load.setIsTmpLoad(loLoad.isTmpLoad());
        load.setCacheFiles(loLoad.getLoadFunc().getCacheFiles());
        load.setShipFiles(loLoad.getLoadFunc().getShipFiles());

        currentPlan.add(load);
        logToPhyMap.put(loLoad, load);

        // Load is typically a root operator, but in the multiquery
View Full Code Here

        // the keys are sent in a tuple. If there is really only
        // 1 join key, it would be the first field of the tuple. If
        // there are multiple Join keys, the tuple itself represents
        // the join key
        Object firstLeftKey = (keys.size() == 1 ? keys.get(0): keys);
        POLoad ld = new POLoad(genKey(), new FileSpec(indexFile, new FuncSpec(indexFileLoadFuncSpec)));
               
        Properties props = ConfigurationUtil.getLocalFSProperties();
        PigContext pc = new PigContext(ExecType.LOCAL, props);
        ld.setPc(pc);
        index = new LinkedList<Tuple>();
        for(Result res=ld.getNextTuple();res.returnStatus!=POStatus.STATUS_EOP;res=ld.getNextTuple())
            index.offer((Tuple) res.result);  

       
        Tuple prevIdxEntry = null;
        Tuple matchedEntry;
View Full Code Here

    private NativeMapReduceOper getNativeMROp(String mrJar, String[] parameters) {
        return new NativeMapReduceOper(new OperatorKey(scope,nig.getNextNodeId(scope)), mrJar, parameters);
    }

    private POLoad getLoad(){
        POLoad ld = new POLoad(new OperatorKey(scope,nig.getNextNodeId(scope)));
        ld.setPc(pigContext);
        ld.setIsTmpLoad(true);
        return ld;
    }
View Full Code Here

TOP

Related Classes of org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.POLoad

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.