Package gov.sandia.cognition.statistics.bayesian

Examples of gov.sandia.cognition.statistics.bayesian.KalmanFilter


    final Random rng = new Random(829351983l);
   
    /*
     * Sample from a logit model.
     */
    final KalmanFilter initialFilter = new KalmanFilter(
          new LinearDynamicalSystem(
              MatrixFactory.getDefault().copyArray(new double[][] {
                  {1d, 0d},
                  {0d, 1d}}),
              MatrixFactory.getDefault().copyArray(new double[][] {
View Full Code Here


      Vector classProbs = (currentState == null) ? this.marginalClassProbs :
        this.classTransProbs.getColumn(currentClass);

      currentClass = DiscreteSamplingUtil.sampleIndexFromProbabilities(random, classProbs);

      KalmanFilter currentFilter = this.stateFilters.get(currentClass);
      if (currentState == null) {
        currentState = currentFilter.createInitialLearnedObject().sample(random);
      } else {
        final Matrix G = currentFilter.getModel().getA();
        Matrix modelCovSqrt = CholeskyDecompositionMTJ.create(
            (DenseMatrix) currentFilter.getModelCovariance()).getR();
        currentState = MultivariateGaussian.sample(G.times(currentState), modelCovSqrt, random);
      }
      currentState.plusEquals(currentFilter.getModel().getB().times(
          currentFilter.getCurrentInput()));

      final Matrix F = currentFilter.getModel().getC();
      Vector observationMean = F.times(currentState);
      Matrix measurementCovSqrt = CholeskyDecompositionMTJ.create(
          (DenseMatrix) currentFilter.getMeasurementCovariance()).getR();
      Vector observation = MultivariateGaussian.sample(observationMean,
          measurementCovSqrt, random);
 
      results.add(new SimHmmObservedValue<Vector, Vector>(i, currentClass,
          currentState.clone(), observation));
View Full Code Here

    return this.marginalClassProbs.getDimensionality();
  }

  @Override
  public MultivariateGaussian getEmissionFunction(MultivariateGaussian state, int classId) {
    KalmanFilter kf = this.stateFilters.get(classId);
    final Vector mean = kf.getModel().getC().times(state.getMean());
    final Matrix cov = kf.getModel().getC().times(state.getCovariance())
        .times(kf.getModel().getC().transpose())
        .plus(kf.getMeasurementCovariance());
    final MultivariateGaussian likelihood = new MultivariateGaussian(
        mean, cov);
    return likelihood;
  }
View Full Code Here

      Matrix F, Matrix G, Matrix  modelCovariance,
      Random rng) {
    Preconditions.checkArgument(F.getNumRows() == 1);
    Preconditions.checkArgument(F.getNumColumns() == G.getNumRows());
    Preconditions.checkArgument(G.getNumColumns() == modelCovariance.getNumRows());
    this.initialFilter = new KalmanFilter(
            new LinearDynamicalSystem(
                G,
                MatrixFactory.getDefault().createMatrix(G.getNumRows(), G.getNumColumns()),
                F),
            modelCovariance,
View Full Code Here

      /*
       * Fruewirth-Schnatter's method for upper utility sampling, where
       * instead of sampling the predictors, we use the mean.
       */
      final MultivariateGaussian predictivePrior = particle.getLinearState().clone();
      KalmanFilter kf = particle.getRegressionFilter();
      final Matrix G = kf.getModel().getA();
      final Matrix F = data.getObservedData();
      predictivePrior.setMean(G.times(predictivePrior.getMean()));
      predictivePrior.setCovariance(
          G.times(predictivePrior.getCovariance()).times(G.transpose())
            .plus(kf.getModelCovariance()));
      final Vector betaSample =
          predictivePrior.sample(getRandom());
//          predictivePrior.getMean();
      final double predPriorObsMean =
            F.times(betaSample).getElement(0);
View Full Code Here

  private LogitMixParticle sufficientStatUpdate(
      LogitMixParticle priorParticle, ObservedValue<Vector, Matrix> data) {
    final LogitMixParticle updatedParticle = priorParticle.clone();
   
    final KalmanFilter filter = updatedParticle.getRegressionFilter();
    final UnivariateGaussian evComponent = updatedParticle.EVcomponent;
    // TODO we should've already set this, so it might be redundant.
    filter.setMeasurementCovariance(
        MatrixFactory.getDefault().copyArray(new double[][] {{
          evComponent.getVariance()}}));

    final Vector sampledAugResponse = priorParticle.getAugResponseSample();
    final Vector diffAugResponse =
        sampledAugResponse.minus(VectorFactory.getDefault().copyArray(
        new double[] {
            evComponent.getMean().doubleValue()
            }));
    final MultivariateGaussian posteriorState = updatedParticle.getLinearState().clone();
    filter.update(posteriorState,
        diffAugResponse);
    updatedParticle.setLinearState(posteriorState);
   
    return updatedParticle;
  }
View Full Code Here

      final DataDistribution<LogitMixParticle> initialParticles =
          CountedDataDistribution.create(true);
      for (int i = 0; i < numParticles; i++) {
       
        final MultivariateGaussian initialPriorState = initialPrior.clone();
        final KalmanFilter kf = this.initialFilter.clone();
        final int componentId = this.rng.nextInt(10);
        final UnivariateGaussian evDist = this.evDistribution.
            getDistributions().get(componentId);
       
        final LogitMixParticle particleMvgDPDist =
View Full Code Here

      GaussianArHpTransitionState prevState, int predClass, ObservedValue<Vector,Void> data) {
    /*
     * Perform the filtering step
     */
    MultivariateGaussian priorPredictedState = prevState.getState().clone();
    KalmanFilter kf = Iterables.get(prevState.getHmm().getStateFilters(), predClass);
    kf.predict(priorPredictedState);
   
    final DlmHiddenMarkovModel newHmm = prevState.getHmm().clone();
    final InverseGammaDistribution invScaleSS = prevState.getInvScaleSS().clone();
    final List<MultivariateGaussian> psiSS =
        ObjectUtil.cloneSmartElementsAsArrayList(prevState.getPsiSS());
View Full Code Here

      Matrix F, Matrix G, Matrix  modelCovariance,
      Random rng) {
    Preconditions.checkArgument(F.getNumRows() == 1);
    Preconditions.checkArgument(F.getNumColumns() == G.getNumRows());
    Preconditions.checkArgument(G.getNumColumns() == modelCovariance.getNumRows());
    this.initialFilter = new KalmanFilter(
            new LinearDynamicalSystem(
                G,
                MatrixFactory.getDefault().createMatrix(G.getNumRows(), G.getNumColumns()),
                F),
            modelCovariance,
View Full Code Here

        });
    for (Entry<LogitMixParticle, ? extends Number> particleEntry : target.asMap().entrySet()) {
      final LogitMixParticle particle = particleEntry.getKey();

      final MultivariateGaussian predictivePrior = particle.getLinearState().clone();
      KalmanFilter kf = particle.getRegressionFilter();
      final Matrix G = kf.getModel().getA();
      final Matrix F = data.getObservedData();
      predictivePrior.setMean(G.times(predictivePrior.getMean()));
      predictivePrior.setCovariance(
          G.times(predictivePrior.getCovariance()).times(G.transpose())
            .plus(kf.getModelCovariance()));
      final Vector betaMean = predictivePrior.getMean();

      final int particleCount;
      if (particleEntry.getValue() instanceof MutableDoubleCount) {
        particleCount = ((MutableDoubleCount)particleEntry.getValue()).count;
View Full Code Here

TOP

Related Classes of gov.sandia.cognition.statistics.bayesian.KalmanFilter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.