Package org.encog.ml.train

Examples of org.encog.ml.train.MLTrain


   * @return The lowest number of seconds that each of the ten attempts took.
   */
  public static int evaluateTrain(
      final BasicNetwork network, final MLDataSet training) {
    // train the neural network
    MLTrain train;
   
    train = new ResilientPropagation(network, training);

    final long start = System.currentTimeMillis();
    final long stop = start + (10 * MILIS);

    int iterations = 0;
    while (System.currentTimeMillis() < stop) {
      iterations++;
      train.iteration();
    }

    return iterations;
  }
View Full Code Here


 
  public void trainInstar(CPN network,MLDataSet training)
  {
    int epoch = 1;

    MLTrain train = new TrainInstar(network,training,0.1,true);
    do {
      train.iteration();
      System.out
          .println("Training instar, Epoch #" + epoch + ", Error: " + train.getError() );
      epoch++;
    } while(train.getError()>0.01);
  }
View Full Code Here

 
  public void trainOutstar(CPN network,MLDataSet training)
  {
    int epoch = 1;

    MLTrain train = new TrainOutstar(network,training,0.1);
    do {
      train.iteration();
      System.out
          .println("Training outstar, Epoch #" + epoch + ", error=" + train.getError() );
      epoch++;
    } while(train.getError()>0.01);
  }
View Full Code Here

    network.reset();

    MLDataSet trainingSet = new BasicMLDataSet(XOR_INPUT, XOR_IDEAL);

    // train the neural network
    final MLTrain train = new ResilientPropagation(network, trainingSet);

    do {
      train.iteration();
    } while (train.getError() > 0.009);

    double e = network.calculateError(trainingSet);
    System.out.println("Network traiined to error: " + e);

    System.out.println("Saving network");
View Full Code Here

    network.reset();

    MLDataSet trainingSet = new BasicMLDataSet(XOR_INPUT, XOR_IDEAL);

    // train the neural network
    final MLTrain train = new ResilientPropagation(network, trainingSet);

    do {
      train.iteration();
    } while (train.getError() > 0.009);

    double e = network.calculateError(trainingSet);
    System.out.println("Network traiined to error: " + e);

    System.out.println("Saving network");
View Full Code Here

  public static double trainNetwork(final String what,
      final BasicNetwork network, final MLDataSet trainingSet) {
    // train the neural network
    CalculateScore score = new TrainingSetScore(trainingSet);
    final MLTrain trainAlt = new NeuralSimulatedAnnealing(
        network, score, 10, 2, 100);

    final MLTrain trainMain = new Backpropagation(network, trainingSet,0.000001, 0.0);

    ((Propagation)trainMain).setNumThreads(1);
    final StopTrainingStrategy stop = new StopTrainingStrategy();
    trainMain.addStrategy(new Greedy());
    trainMain.addStrategy(new HybridStrategy(trainAlt));
    trainMain.addStrategy(stop);

    int epoch = 0;
    while (!stop.shouldStop()) {
      trainMain.iteration();
      System.out.println("Training " + what + ", Epoch #" + epoch
          + " Error:" + trainMain.getError());
      epoch++;
    }
    return trainMain.getError();
  }
View Full Code Here

    network2.setBiasActivation(-1);
    network3.setBiasActivation(0.5);
   
    MLDataSet trainingData = new BasicMLDataSet(XOR.XOR_INPUT,XOR.XOR_IDEAL);
   
    MLTrain rprop1 = new ResilientPropagation(network1, trainingData);
    MLTrain rprop2 = new ResilientPropagation(network2, trainingData);
    MLTrain rprop3 = new ResilientPropagation(network3, trainingData);

    NetworkUtil.testTraining(rprop1,0.03);
    NetworkUtil.testTraining(rprop2,0.01);
    NetworkUtil.testTraining(rprop3,0.01);
   
View Full Code Here

   
    BasicNetwork network = EncogUtility.simpleFeedForward(2, 5, 7, 1, true);
    Randomizer randomizer = new ConsistentRandomizer(-1, 1, 19);
    //randomizer.randomize(network);
    System.out.println(network.dumpWeights());
    MLTrain rprop = new ResilientPropagation(network, trainingData);
    int iteration = 0;
    do {
      rprop.iteration();
      System.out.println(rprop.getError());
      iteration++;
    } while( iteration<5000 && rprop.getError()>0.01);
    System.out.println(iteration);
    Assert.assertTrue(iteration<40);
  }
View Full Code Here

          

          
           BasicNetwork network3 = NetworkUtil.createXORNetworknNguyenWidrowUntrained();
          
           MLTrain bpropNguyen = new Backpropagation( network3, trainingData3, 0.9, 0.8 );    
           train(i, bpropNguyen, "NguyenWidrowRandomizer" );
          
           BasicNetwork network2 = NetworkUtil.createXORNetworkUntrained();
          
           MLTrain bpropRange = new Backpropagation( network2, trainingData2, 0.9, 0.8 );    
           train(i, bpropRange,  "RangeRandomizer       ");
       }
   }
View Full Code Here

  public void testRPROP() throws Throwable
  {
    MLDataSet trainingData = new BasicMLDataSet(XOR.XOR_INPUT,XOR.XOR_IDEAL);
   
    BasicNetwork network = NetworkUtil.createXORNetworkUntrained();
    MLTrain rprop = new ResilientPropagation(network, trainingData);
    NetworkUtil.testTraining(rprop,0.03);
  }
View Full Code Here

TOP

Related Classes of org.encog.ml.train.MLTrain

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.