Package JKernelMachines.fr.lip6.kernel.typed

Examples of JKernelMachines.fr.lip6.kernel.typed.DoubleLinear


  {
    eprintln(2, "training on "+train.size()+" train data and "+test.size()+" test data");
   
    //first training
    eprint(3, "first training ");
    svm = new DoublePegasosSVM();
    svm.setLambda(lambda);
    svm.setK(k);
    svm.setT(T);
    svm.setT0(t0);
    svm.train(train);
    eprintln(3, " done.");
   
    //affect numplus highest output to plus class
    eprintln(3, "affecting 1 to the "+numplus+" highest output");
    SortedSet<TrainingSample<double[]>> sorted = new TreeSet<TrainingSample<double[]>>(new Comparator<TrainingSample<double[]>>(){

      public int compare(TrainingSample<double[]> o1, TrainingSample<double[]> o2) {
        int ret = (new Double(svm.valueOf(o2.sample))).compareTo(svm.valueOf(o1.sample));
        if(ret == 0)
          ret = -1;
        return ret;
      }
     
    });
    sorted.addAll(test);
    eprintln(4, "sorted size : "+sorted.size()+" test size : "+test.size());
    int n = 0;
    for(TrainingSample<double[]> t : sorted)
    {
      if(n < numplus)
        t.label = 1;
      else
        t.label = -1;
      n++;
    }
   
    double C = 1. / (train.size()*lambda) ;
    double Cminus = 1e-5;
    double Cplus = 1e-5 * numplus/(test.size() - numplus);
   
    while(Cminus < C || Cplus < C)
    {
      //solve full problem
      ArrayList<TrainingSample<double[]>> full = new ArrayList<TrainingSample<double[]>>();
      full.addAll(train);
      full.addAll(test);
     
      eprint(3, "full training ");
      svm = new DoublePegasosSVM();
      svm.setLambda(lambda);
      svm.setK(k);
      svm.setT(T);
      svm.setT0(t0);
      svm.train(full);
      eprintln(3, "done.");
     
      boolean changed = false;
     
      do
      {
        changed = false;
        //0. computing error
        final Map<TrainingSample<double[]>, Double> errorCache = new HashMap<TrainingSample<double[]>, Double>();
        for(TrainingSample<double[]> t : test)
        {
          double err1 = 1. - t.label * svm.valueOf(t.sample);
          errorCache.put(t, err1);
        }
        eprintln(3, "Error cache done.");
       
        // 1 . sort by descending error
        sorted = new TreeSet<TrainingSample<double[]>>(new Comparator<TrainingSample<double[]>>(){

          public int compare(TrainingSample<double[]> o1,
              TrainingSample<double[]> o2) {
            int ret = errorCache.get(o2).compareTo(errorCache.get(o1));
            if(ret == 0)
              ret = -1;
            return ret;
          }
        });
        sorted.addAll(test);
        List<TrainingSample<double[]>> sortedList = new ArrayList<TrainingSample<double[]>>();
        sortedList.addAll(sorted);
       
       
        eprintln(3, "sorting done, checking couple");
       
        // 2 . test all couple by decreasing error order
//        for(TrainingSample<T> i1 : sorted)
        for(int i = 0 ; i < sortedList.size(); i++)
        {
          TrainingSample<double[]> i1 = sortedList.get(i);
//          for(TrainingSample<T> i2 : sorted)
          for(int j = i+1; j < sortedList.size(); j++)
          {
            TrainingSample<double[]> i2 = sortedList.get(j);
            if(examine(i1, i2, errorCache))
            {
              eprintln(3, "couple found !");
              changed = true;
              break;
            }
          }
          if(changed)
            break;
        }

        if(changed)
        {
          eprintln(3, "re-training");
          svm = new DoublePegasosSVM();
          svm.setLambda(lambda);
          svm.setK(k);
          svm.setT(T);
          svm.setT0(t0);
          svm.train(full);
View Full Code Here


  {
    eprintln(2, "training on "+train.size()+" train data and "+test.size()+" test data");
   
    //first training
    eprint(3, "first training ");
    svm = new DoubleSGDQN();
    DoubleSGDQN.VERBOSE = false;
    svm.train(train);
    eprintln(3, " done.");
   
    //affect numplus highest output to plus class
    eprintln(3, "affecting 1 to the "+numplus+" highest output");
    SortedSet<TrainingSample<double[]>> sorted = new TreeSet<TrainingSample<double[]>>(new Comparator<TrainingSample<double[]>>(){

      public int compare(TrainingSample<double[]> o1, TrainingSample<double[]> o2) {
        int ret = (new Double(svm.valueOf(o2.sample))).compareTo(svm.valueOf(o1.sample));
        if(ret == 0)
          ret = -1;
        return ret;
      }
     
    });
    sorted.addAll(test);
    eprintln(4, "sorted size : "+sorted.size()+" test size : "+test.size());
    int n = 0;
    for(TrainingSample<double[]> t : sorted)
    {
      if(n < numplus)
        t.label = 1;
      else
        t.label = -1;
      n++;
    }
   
    double Cminus = 1e-5;
    double Cplus = 1e-5 * numplus/(test.size() - numplus);
   
    while(Cminus < C || Cplus < C)
    {
      //solve full problem
      ArrayList<TrainingSample<double[]>> full = new ArrayList<TrainingSample<double[]>>();
      full.addAll(train);
      full.addAll(test);
     
      eprint(3, "full training ");
      svm = new DoubleSGDQN();
      svm.setC((Cminus+Cplus)/2.);
      svm.train(full);
      eprintln(3, "done.");
     
      boolean changed = false;
     
      do
      {
        changed = false;
        //0. computing error
        final Map<TrainingSample<double[]>, Double> errorCache = new HashMap<TrainingSample<double[]>, Double>();
        for(TrainingSample<double[]> t : test)
        {
          double err1 = 1. - t.label * svm.valueOf(t.sample);
          errorCache.put(t, err1);
        }
        eprintln(3, "Error cache done.");
       
        // 1 . sort by descending error
        sorted = new TreeSet<TrainingSample<double[]>>(new Comparator<TrainingSample<double[]>>(){

          public int compare(TrainingSample<double[]> o1,
              TrainingSample<double[]> o2) {
            int ret = errorCache.get(o2).compareTo(errorCache.get(o1));
            if(ret == 0)
              ret = -1;
            return ret;
          }
        });
        sorted.addAll(test);
        List<TrainingSample<double[]>> sortedList = new ArrayList<TrainingSample<double[]>>();
        sortedList.addAll(sorted);
       
       
        eprintln(3, "sorting done, checking couple");
       
        // 2 . test all couple by decreasing error order
//        for(TrainingSample<T> i1 : sorted)
        for(int i = 0 ; i < sortedList.size(); i++)
        {
          TrainingSample<double[]> i1 = sortedList.get(i);
//          for(TrainingSample<T> i2 : sorted)
          for(int j = i+1; j < sortedList.size(); j++)
          {
            TrainingSample<double[]> i2 = sortedList.get(j);
            if(examine(i1, i2, errorCache))
            {
              eprintln(3, "couple found !");
              changed = true;
              break;
            }
          }
          if(changed)
            break;
        }

        if(changed)
        {
          eprintln(3, "re-training");
          svm = new DoubleSGDQN();
          svm.setC((Cminus+Cplus)/2.);
          svm.train(full);
        }
      }
      while(changed);
View Full Code Here

    if (files.size() != bow.size()){
      System.err.println("Bow and files of different size !!!! STOPING ...");
      return null;
    }

    DoubleGaussChi2 gaussKernel = new DoubleGaussChi2(gamma);
    double distmean = 0.0;

    if (bComptuteMeanDist){
      int cpt = 0;
      for (int i=0; i < files.size(); i++ ){
        for (int j = i + 1; j < files.size(); j++ ){
          distmean += gaussKernel.distChi2(bow.get(i).get(0), bow.get(j).get(0));
          cpt++;
        }
      }
      distmean = distmean / ((double)cpt);
      if (distmean < Double.MIN_VALUE)
        System.err.println("distmean = " + distmean + " to small - pathological signatures !!!!!! STOPING ...");

      gamma = 1/distmean;
      gaussKernel.setGamma(gamma);

      System.out.println("distmean = " + distmean + " gamma = " + gamma);
    }

    ThreadedSumKernel<ArrayList<double[]>> tsk = new ThreadedSumKernel<ArrayList<double[]>>();
View Full Code Here

    if(!isInialize){
      System.err.println("You need initialize this algorithm for training.");
      return ;
    }
    /* train SVM */
    DoubleLinear kernel = new DoubleLinear();
    svm = new SMOSVM<double[]>(kernel);
    //svm.setC(1);
    svm.setVerbosityLevel(0);

    //////////////////////////////////////////////////////////////////
View Full Code Here

      map.put(s, index);
      index++;
    }
   
    //computing matrix       
    ThreadedMatrixOperator factory = new ThreadedMatrixOperator()
    {
      @Override
      public void doLine(int index, double[] line) {
        //reverse search through mapping S <-> index
        S s1 = null;
        for(S s : map.keySet())
          if( map.get(s) == index )
          {
            s1 = s;
            break;
          }
        //mapped signature
        T t1 = signatures.get(s1);
       
        //all mapping S <-> T
        for(S s2 : map.keySet())
        {
          //get index of s2
          int j = map.get(s2);
          //get signature of s2
          T t2 = signatures.get(s2);
          //add value of kernel
          line[j] = kernel.valueOf(t1, t2);
        }
      };
    };


    /* do the actuel computing of the matrix */
    matrix = factory.getMatrix(matrix);
   
  }
View Full Code Here

  public double[][] getKernelMatrix(final List<TrainingSample<T>> l) {
   
    final List<TrainingSample<T>> e = l;
    double[][] matrix = new double[e.size()][e.size()];
       
    ThreadedMatrixOperator factory = new ThreadedMatrixOperator()
    {
      @Override
      public void doLine(int index, double[] line) {
       
        T xi = l.get(index).sample;
       
        for(int i = line.length-1 ; i >= 0 ; i--)
        {
          line[i] = k.valueOf(xi, l.get(i).sample);
        }
      };
    };

    /* do the actuel computing of the matrix */
    matrix = factory.getMatrix(matrix);
   
    return matrix;
  }
View Full Code Here

        continue;
     

      final double[][] m = k.getKernelMatrix(l);
      // specific factory
      ThreadedMatrixOperator tmo = new ThreadedMatrixOperator(){
       
        @Override
        public void doLine(int index, double[] line) {
         
          for(int i = line.length-1 ; i >= 0 ; i--)
          {
            line[i] += m[index][i] * w;
          }
        };
       
      };
     
      tmo.getMatrix(matrix);
    }
    return matrix;
  }
View Full Code Here

        continue;
     

      final double[][] m = k.getKernelMatrix(l);
      // specific factory
      ThreadedMatrixOperator tmo = new ThreadedMatrixOperator(){
       
        @Override
        public void doLine(int index, double[] line) {
         
          for(int i = line.length-1 ; i >= 0 ; i--)
          {
            line[i] *= Math.pow(m[index][i], w);
          }
        };
       
      };
     
      tmo.getMatrix(matrix);
    }
    return matrix;
  }
View Full Code Here

//        }
//    }
   
    //parallelized
    final double[] resLine = new double[kmatrix.length];
    ThreadedMatrixOperator objFactory = new ThreadedMatrixOperator()
    {
      @Override
      public void doLine(int index, double[] line) {
        if(alp[index] < numPrec)
          return;
        double al1 = -0.5 * alp[index] * l.get(index).label;
        for(int j = line.length-1 ; j != 0 ; j--)
        {
          resLine[index] += al1 * alp[j] * l.get(j).label * kmatrix[index][j];
        }
       
      } 
    };
   
    objFactory.getMatrix(kmatrix);
    double obj1 = 0;
    for(double d : resLine)
      obj1 += d;
   
    double obj2 = 0;
View Full Code Here

//        g += -0.5* alp[x]*alp[y]*l.get(x).label*l.get(y).label*kmatrix[x][y];
//      }
     
      //parallelized
      final double[] resLine = new double[kmatrix.length];
      ThreadedMatrixOperator gradFactory = new ThreadedMatrixOperator()
      {
        @Override
        public void doLine(int index, double[] line) {
          if(alp[index] < numPrec)
            return;
          double al1 = -0.5 * alp[index] * l.get(index).label;
          for(int j = line.length-1 ; j != 0 ; j--)
          {
            resLine[index] += al1 * alp[j] * l.get(j).label * kmatrix[index][j];
          }
         
        } 
      };
     
      gradFactory.getMatrix(kmatrix);
      double g = 0;
      for(double d : resLine)
        g += d;
      grad.add(i, g);
    }
View Full Code Here

TOP

Related Classes of JKernelMachines.fr.lip6.kernel.typed.DoubleLinear

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.