1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
|
// Copyright (C) 2002 Samy Bengio (bengio@idiap.ch)
//
//
// This file is part of Torch. Release II.
// [The Ultimate Machine Learning Library]
//
// Torch is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// Torch is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Torch; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#ifndef LOG_RBF_INC
#define LOG_RBF_INC
#include "GradientMachine.h"
#include "EMTrainer.h"
namespace Torch {
/** LogRBF layer for #GradientMachine#.
Formally speaking, $ouputs[i] = -0.5 \sum_j gamma_ij^2 * (inputs[j] - mu_ij)^2$.\\
$mu_ij$ and $gamma_ij$ are in #params#, with the following structure:\\
$mu_00... mu_0n, gamma_00.. gamma_0n,..., $\\
For a better initialization, one can provide a #EMTrainer# using a
#Kmeans# distribution that will be used to initialize the means and
gamma.
@author Samy Bengio (bengio@idiap.ch)
*/
class LogRBF : public GradientMachine
{
public:
/// optional initialization using a Kmeans
EMTrainer* initial_kmeans_trainer;
///
LogRBF(int n_inputs_, int n_outputs_, EMTrainer* kmeans_trainer=NULL);
//-----
virtual int numberOfParams();
virtual void init();
virtual void reset();
virtual void forward(List *inputs);
virtual void backward(List *inputs, real *alpha);
virtual ~LogRBF();
};
}
#endif
|