Package com.etsy.conjecture.data

Examples of com.etsy.conjecture.data.StringKeyedVector.mul()


        StringKeyedVector gradients = instance.getVector().copy();
        double inner = param.dot(instance.getVector());
        double label = instance.getLabel().getAsPlusMinus();
        double z = inner * label;
        if (z <= this.threshold) {
            gradients.mul(-label);
            return gradients;
        } else {
            return new StringKeyedVector();
        }       
    }
View Full Code Here


    clusterCounts.put(closest_center, current_count+1.0);
    // Get per center learning rate
    Double learning_rate = 1.0/clusterCounts.get(closest_center);
    // take gradient step
    StringKeyedVector center = param.get(closest_center);
    center.mul(1-learning_rate);
    instance.mul(learning_rate);
    center.add(instance);
    l1Projection(center);
    param.put(closest_center, center);
  }
View Full Code Here

        double prediction = param.dot(instance.getVector());
        double loss = Math.max(0, 1d - label * prediction);
        if (loss > 0) {
            double norm = instance.getVector().LPNorm(2d);
            double tau = loss / (norm * norm);
            gradients.mul(tau * label);
            return gradients;
        } else {
          return new StringKeyedVector();
        }
    }
View Full Code Here

    @Override
    public StringKeyedVector getUpdate(LabeledInstance instance) {
        StringKeyedVector gradients = model.getGradients(instance);
        double learningRate = getDecreasingLearningRate(model.epoch);
        gradients.mul(-learningRate);
        return gradients;
    }

}
View Full Code Here

    public StringKeyedVector getGradients(LabeledInstance<BinaryLabel> instance) {
        StringKeyedVector gradients = instance.getVector().copy();
        double label = instance.getLabel().getAsPlusMinus();
        double inner = instance.getVector().dot(param);
        double gradient = -label / (Math.exp(label * inner) + 1.0);
        gradients.mul(gradient);
        return gradients;
    }

    protected String getModelType() {
        return "logistic_regression";
View Full Code Here

    @Override
    public StringKeyedVector getGradients(LabeledInstance<RealValuedLabel> instance) {
        StringKeyedVector gradients = instance.getVector().copy();
        double hypothesis = param.dot(instance.getVector());
        double label = instance.getLabel().getValue();
        gradients.mul((2 * (hypothesis-label)));
        return gradients;
    }

    @Override
    protected String getModelType() {
View Full Code Here

        for (LabeledInstance<L> instance : minibatch) {
            updateVec.add(getUpdate(instance)); // accumulate gradient
            model.truncate(instance);
            model.epoch++;
        }
        updateVec.mul(1.0/minibatch.size()); // do a single update, scaling weights by the
                                           // average gradient over the minibatch
        return updateVec;
    }

    /**
 
View Full Code Here

        } else if (instance.getLabel().getValue() - ((RealValuedLabel)model.predict(instance.getVector())).getValue() < 0.0) {
            /** Regression **/
            update = update * -1;
        }
        StringKeyedVector updateVec = instance.getVector().copy();
        updateVec.mul(update);
        return updateVec;
    }

    public PassiveAggressiveOptimizer setC(double C) {
        checkArgument(C > 0, "C must be greater than 0. Given: %s", C);
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.