File: Distribution.h

package info (click to toggle)
torch 2-1
  • links: PTS
  • area: main
  • in suites: woody
  • size: 5,488 kB
  • ctags: 3,217
  • sloc: cpp: 14,272; makefile: 201
file content (130 lines) | stat: -rw-r--r-- 4,585 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
// Copyright (C) 2002 Samy Bengio (bengio@idiap.ch)
//                
//
// This file is part of Torch. Release II.
// [The Ultimate Machine Learning Library]
//
// Torch is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// Torch is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Torch; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA

#ifndef DISTRIBUTION_INC
#define DISTRIBUTION_INC

#include "GradientMachine.h"
#include "List.h"
#include "SeqDataSet.h"

namespace Torch {

/** This class is designed to handle generative distribution models
    such as Gaussian Mixture Models and Hidden Markov Models. As 
    distribution inherits from GradientMachine, they can be trained 
    by gradient descent or by Expectation Maximization (EM) or even
    Viterbi.

    Note that the output of a distribution is the negative log likelihood.

    @author Samy Bengio (bengio@idiap.ch)
*/
class Distribution : public GradientMachine
{
  public:

    /// size of the observation vectors
    int n_observations;


    /// total number of frames
    int tot_n_frames;

    /// the longest sequence in the database (used to dimensionate the variables)
    int max_n_frames;

    /// the log likelihood
    real log_probability;

    /// the log likelihood for each frame
    real* log_probabilities;

    /// 
    Distribution();

    virtual void init();
    virtual void reset();
    virtual int numberOfParams();
    virtual void allocateMemory();
    virtual void freeMemory();

    /// Returns the log probability of a sequence represented by #inputs#
    virtual real logProbability(List* inputs);
    /// Returns the viterbi score of a sequence represented by #inputs#
    virtual real viterbiLogProbability(List* inputs);
    /// Returns the log probability of a frame of a sequence
    virtual real frameLogProbability(real *observations, real *inputs, int t);
    virtual void frameGenerate(real *observations, real *inputs, int t);
    /// Returns the expected value of #observations# given #inputs#
    virtual void frameExpectation(real *observations, real *inputs, int t);

    /** Methods used to initialize the model at the beginning of each
        EM iteration
    */
    virtual void eMIterInitialize();
    /** Methods used to initialize the model at the beginning of each
        gradient descent iteration
    */
    virtual void iterInitialize();
    /** Methods used to initialize the model at the beginning of each
        example during EM training
    */
    virtual void eMSequenceInitialize(List* inputs);
    /** Methods used to initialize the model at the beginning of each
        example during gradient descent training
    */
    virtual void sequenceInitialize(List* inputs);
    /// The backward step of EM for a sequence
    virtual void eMAccPosteriors(List *inputs, real log_posterior);
    /// The backward step of EM for a frame
    virtual void frameEMAccPosteriors(real *observations, real log_posterior, real *inputs, int t);
    /// The backward step of Viterbi learning for a sequence
    virtual void viterbiAccPosteriors(List *inputs, real log_posterior);
    /// The backward step of Viterbi for a frame
    virtual void frameViterbiAccPosteriors(real *observations, real log_posterior, real *inputs, int t);
    /// The update after each iteration for EM
    virtual void eMUpdate();

    /// For some distribution like SpeechHMM, decodes the most likely path
    virtual void decode(List *inputs);

    virtual void forward(List *inputs);

    /// Same as forward, but for EM
    virtual void eMForward(List *inputs);
    /// Same as forward, but for Viterbi
    virtual void viterbiForward(List *inputs);
    virtual void backward(List *inputs, real *alpha);
    /// Same as backward, but for one frame only
    virtual void frameBackward(real *observations, real *alpha, real *inputs,int t);
    /// Same as backward, but for Viterbi 
    virtual void viterbiBackward(List *inputs, real *alpha);
    virtual void loadFILE(FILE *file);
    virtual void saveFILE(FILE *file);

    virtual ~Distribution();
};


}

#endif