Package org.jblas

Examples of org.jblas.DoubleMatrix$ElementsAsListView


     * Do predict work with multiple sample(standalone)
     * @param x Input samples matrix
     * @return Predict result matrix (row=x's row, column=class num)
     */
    public final DoubleMatrix predict(DoubleMatrix x) {
        DoubleMatrix y = x.mmul(lrparam.w.transpose()).addiRowVector(lrparam.b);
        softmax(y);
        return y;
    }
View Full Code Here


    public final JavaRDD<SampleVector> predict(JavaRDD<SampleVector> samples, boolean copyX) {
      return samples.map(new PredictSpark(copyX));
    }

    private void softmax(DoubleMatrix y) {
        DoubleMatrix max = y.rowMaxs();
        MatrixFunctions.expi(y.subiColumnVector(max));
        DoubleMatrix sum = y.rowSums();
        y.diviColumnVector(sum);
    }
View Full Code Here

    }

  @Override
  protected void gradientUpdateMiniBatch(SGDTrainConfig config, DoubleMatrix x_samples, DoubleMatrix y_samples, SGDParam curr_param) {
    int nbr_samples = x_samples.rows;
    DoubleMatrix curr_w = ((LRParam)curr_param).w;
    DoubleMatrix curr_b = ((LRParam)curr_param).b;
   
      DoubleMatrix curr_predict_y = x_samples.mmul(curr_w.transpose()).addiRowVector(curr_b);
        softmax(curr_predict_y);
        DoubleMatrix delta_b = y_samples.sub(curr_predict_y);
        DoubleMatrix delta_w = delta_b.transpose().mmul(x_samples);
        delta_b = delta_b.columnSums().divi(nbr_samples);
        delta_w.divi(nbr_samples);
       
        if (config.isUseRegularization()) {
            if (0 != config.getLamada1()) {
                delta_w.addi(MatrixFunctions.signum(curr_w).mmuli(config.getLamada1()));
                delta_b.addi(MatrixFunctions.signum(curr_b).transpose().mmuli(config.getLamada1()));
            }
            if (0 != config.getLamada2()) {
                delta_w.addi(curr_w.mmul(config.getLamada2()));
                delta_b.addi(curr_b.transpose().mmul(config.getLamada2()));
            }
        }
       
        curr_w.addi(delta_w.muli(config.getLearningRate()));
        curr_b.addi(delta_b.transpose().muli(config.getLearningRate()));
  }
View Full Code Here

        for (SampleVector v: arg._2()) {
            myList.add(v);
        }
        Collections.shuffle(myList);
       
        DoubleMatrix x_samples = MathUtil.convertX2Matrix(myList);
        DoubleMatrix y_samples = null;
        if(this.sgd.isSupervise()) {
          y_samples = MathUtil.convertY2Matrix(myList);
        }

        // check whether we use cg this time
View Full Code Here

     * Sigmod output(standalone)
     * @param input Input layer matrix
     * @return Output layer output matrix
     */
  public final DoubleMatrix sigmod_output(DoubleMatrix input) {
    DoubleMatrix output = input;
    for(int i = 0; i < bpparam.w.length; i++) {
      output = output.mmul(bpparam.w[i].transpose()).addiRowVector(bpparam.b[i]);
      MathUtil.sigmod(output);
    }
    return output;
  }
View Full Code Here

        curr_b.addi(delta_b.transpose().muli(config.getLearningRate()));
  }

  @Override
  protected void gradientUpdateCG(SGDTrainConfig config, DoubleMatrix x_samples, DoubleMatrix y_samples, SGDParam curr_param) {
    DoubleMatrix curr_w = ((LRParam)curr_param).w;
    DoubleMatrix curr_b = ((LRParam)curr_param).b;
   
    LROptimizer lropt = new LROptimizer(config, x_samples, y_samples, curr_w, curr_b);
        MyConjugateGradient cg = new MyConjugateGradient(lropt, config.getCgInitStepSize());
        cg.setTolerance(config.getCgTolerance());
        try {
View Full Code Here

   * Sigmod output(standalone)
   * @param input Input layer data
   * @param output Output layer data
   */
  public final void sigmod_output(double[] input, double[] output) {
    DoubleMatrix input_m = new DoubleMatrix(input).transpose();
    DoubleMatrix output_m = sigmod_output(input_m);
    for(int i = 0; i < output.length; i++) {
      output[i] = output_m.get(0, i);
    }
  }
View Full Code Here

      lrparam.b.addi(new_lrparam.b.sub(lrparam.b).divi(nrModelReplica));
  }

  @Override
  protected double loss(List<SampleVector> samples) {
    DoubleMatrix x_samples = MathUtil.convertX2Matrix(samples);
        DoubleMatrix y_samples = MathUtil.convertY2Matrix(samples);
        DoubleMatrix predict_y = predict(x_samples);
        return MatrixFunctions.powi(predict_y.sub(y_samples), 2).sum();
  }
View Full Code Here

   
    public HiddenLayerParam(int _n_in, int _n_out, double[][] _w, double[] _b) {
      int n_visible = _n_in;
      int n_hidden = _n_out;
      if (null == _w) {
            w = new DoubleMatrix(n_hidden, n_visible);
            double a = 1.0 / n_visible;
            for (int i = 0; i < n_hidden; i++) {
                for (int j = 0; j < n_visible; j++) {
                    w.put(i, j, MathUtil.uniform(-a, a));
                }
            }
        } else {
            w = new DoubleMatrix(_w);
        }

        if (null == _b) {
            this.hbias = new DoubleMatrix(n_hidden);
        } else {
            this.hbias = new DoubleMatrix(_b);
        }
        vbias = new DoubleMatrix(n_visible);
    }
View Full Code Here

   
    /**
     * backward
     */
    // 1 last layer
    DoubleMatrix ai = activation[curr_pbparam.nl - 1];
    l_bias[curr_pbparam.nl - 1] = ai.sub(y_samples).muli(ai).muli(ai.neg().addi(1));
   
    //2 back
    for(int i = curr_pbparam.nl - 2; i >= 1; i--) {
      ai = activation[i];
      l_bias[i] = l_bias[i + 1].mmul(curr_pbparam.w[i]).muli(ai).muli(ai.neg().addi(1));
    }
   
    /**
     * delta
     */
    for(int i = 0; i < curr_pbparam.w.length; i++) {
      DoubleMatrix delta_wi = l_bias[i + 1].transpose().mmul(activation[i]).divi(nbr_sample);
      if(config.isUseRegularization()) {
        //for bp, only use L2
        if(0 != config.getLamada2()) {
            delta_wi.addi(curr_pbparam.w[i].mul(config.getLamada2()));
        }
      }
      curr_pbparam.w[i].subi(delta_wi.muli(config.getLearningRate()));
    }
    for(int i = 0; i < curr_pbparam.b.length; i++) {
      DoubleMatrix delta_bi = l_bias[i + 1].columnSums().divi(nbr_sample);
      curr_pbparam.b[i].subi(delta_bi.transpose().muli(config.getLearningRate()));
    }
  }
View Full Code Here

TOP

Related Classes of org.jblas.DoubleMatrix$ElementsAsListView

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.