File: abstractdecisiontree.cpp

package info (click to toggle)
mothur 1.33.3%2Bdfsg-2
  • links: PTS, VCS
  • area: main
  • in suites: jessie, jessie-kfreebsd
  • size: 11,248 kB
  • ctags: 12,231
  • sloc: cpp: 152,046; fortran: 665; makefile: 74; sh: 34
file content (314 lines) | stat: -rw-r--r-- 14,117 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
//
//  abstractdecisiontree.cpp
//  Mothur
//
//  Created by Sarah Westcott on 10/1/12.
//  Copyright (c) 2012 Schloss Lab. All rights reserved.
//

#include "abstractdecisiontree.hpp"

/**************************************************************************************************/

AbstractDecisionTree::AbstractDecisionTree(vector<vector<int> >& baseDataSet,
                                         vector<int> globalDiscardedFeatureIndices,
                                         OptimumFeatureSubsetSelector optimumFeatureSubsetSelector, 
                                         string treeSplitCriterion)

                    : baseDataSet(baseDataSet),
                    numSamples((int)baseDataSet.size()),
                    numFeatures((int)(baseDataSet[0].size() - 1)),
                    numOutputClasses(0),
                    rootNode(NULL),
                    nodeIdCount(0),
                    globalDiscardedFeatureIndices(globalDiscardedFeatureIndices),
                    optimumFeatureSubsetSize(optimumFeatureSubsetSelector.getOptimumFeatureSubsetSize(numFeatures)),
                    treeSplitCriterion(treeSplitCriterion) {

    try {
        // TODO: istead of calculating this for every DecisionTree
        // clacualte this once in the RandomForest class and pass the values
        m = MothurOut::getInstance();
        for (int i = 0;  i < numSamples; i++) {
            if (m->control_pressed) { break; }
            int outcome = baseDataSet[i][numFeatures];
            vector<int>::iterator it = find(outputClasses.begin(), outputClasses.end(), outcome);
            if (it == outputClasses.end()){       // find() will return classes.end() if the element is not found
                outputClasses.push_back(outcome);
                numOutputClasses++;
            }
        }
        
        if (m->debug) {
            //m->mothurOut("outputClasses = " + toStringVectorInt(outputClasses));
            m->mothurOut("numOutputClasses = " + toString(numOutputClasses) + '\n');
        }

    }
	catch(exception& e) {
		m->errorOut(e, "AbstractDecisionTree", "AbstractDecisionTree");
		exit(1);
	} 
}
/**************************************************************************************************/
int AbstractDecisionTree::createBootStrappedSamples(){
    try {    
        vector<bool> isInTrainingSamples(numSamples, false);
        
        for (int i = 0; i < numSamples; i++) {
            if (m->control_pressed) { return 0; }
            // TODO: optimize the rand() function call + double check if it's working properly
            int randomIndex = rand() % numSamples;
            bootstrappedTrainingSamples.push_back(baseDataSet[randomIndex]);
            isInTrainingSamples[randomIndex] = true;
        }
        
        for (int i = 0; i < numSamples; i++) {
            if (m->control_pressed) { return 0; }
            if (isInTrainingSamples[i]){ bootstrappedTrainingSampleIndices.push_back(i); }
            else{
                bootstrappedTestSamples.push_back(baseDataSet[i]);
                bootstrappedTestSampleIndices.push_back(i);
            }
        }
        
            // do the transpose of Test Samples
        for (int i = 0; i < bootstrappedTestSamples[0].size(); i++) {
            if (m->control_pressed) { return 0; }
            
            vector<int> tmpFeatureVector(bootstrappedTestSamples.size(), 0);
            for (int j = 0; j < bootstrappedTestSamples.size(); j++) {
                if (m->control_pressed) { return 0; }
                
                tmpFeatureVector[j] = bootstrappedTestSamples[j][i];
            }
            testSampleFeatureVectors.push_back(tmpFeatureVector);
        }
        
        return 0;
    }
	catch(exception& e) {
		m->errorOut(e, "AbstractDecisionTree", "createBootStrappedSamples");
		exit(1);
	} 
}
/**************************************************************************************************/
int AbstractDecisionTree::getMinEntropyOfFeature(vector<int> featureVector,
                                                 vector<int> outputVector,
                                                 double& minEntropy,
                                                 int& featureSplitValue,
                                                 double& intrinsicValue){
    try {

        vector< pair<int, int> > featureOutputPair(featureVector.size(), pair<int, int>(0, 0));
        
        for (int i = 0; i < featureVector.size(); i++) { 
            if (m->control_pressed) { return 0; }
            
            featureOutputPair[i].first = featureVector[i];
            featureOutputPair[i].second = outputVector[i];
        }
        // TODO: using default behavior to sort(), need to specify the comparator for added safety and compiler portability,
        
        IntPairVectorSorter intPairVectorSorter;
        sort(featureOutputPair.begin(), featureOutputPair.end(), intPairVectorSorter);
        
        vector<int> splitPoints;
        vector<int> uniqueFeatureValues(1, featureOutputPair[0].first);
        
        for (int i = 0; i < featureOutputPair.size(); i++) {

            if (m->control_pressed) { return 0; }
            int featureValue = featureOutputPair[i].first;

            vector<int>::iterator it = find(uniqueFeatureValues.begin(), uniqueFeatureValues.end(), featureValue);
            if (it == uniqueFeatureValues.end()){                 // NOT FOUND
                uniqueFeatureValues.push_back(featureValue);
                splitPoints.push_back(i);
            }
        }
        

        
        int bestSplitIndex = -1;
        if (splitPoints.size() == 0){
            // TODO: trying out C++'s infitinity, don't know if this will work properly
            // TODO: check the caller function of this function, there check the value if minEntropy and comapre to inf
            // so that no wrong calculation is done
            minEntropy = numeric_limits<double>::infinity();                          // OUTPUT
            intrinsicValue = numeric_limits<double>::infinity();                      // OUTPUT
            featureSplitValue = -1;                                                   // OUTPUT
        }else{
            getBestSplitAndMinEntropy(featureOutputPair, splitPoints, minEntropy, bestSplitIndex, intrinsicValue);  // OUTPUT
            featureSplitValue = featureOutputPair[splitPoints[bestSplitIndex]].first;    // OUTPUT
        }
        
        return 0;
    }
	catch(exception& e) {
		m->errorOut(e, "AbstractDecisionTree", "getMinEntropyOfFeature");
		exit(1);
	} 
}
/**************************************************************************************************/
double AbstractDecisionTree::calcIntrinsicValue(int numLessThanValueAtSplitPoint, int numGreaterThanValueAtSplitPoint, int numSamples) {
    try {
        double upperSplitEntropy = 0.0, lowerSplitEntropy = 0.0;
        if (numLessThanValueAtSplitPoint > 0) {
            upperSplitEntropy = numLessThanValueAtSplitPoint * log2((double) numLessThanValueAtSplitPoint / (double) numSamples);
        }
        
        if (numGreaterThanValueAtSplitPoint > 0) {
            lowerSplitEntropy = numGreaterThanValueAtSplitPoint * log2((double) numGreaterThanValueAtSplitPoint / (double) numSamples);
        }
        
        double intrinsicValue = - ((double)(upperSplitEntropy + lowerSplitEntropy) / (double)numSamples);
        return intrinsicValue;
    }
	catch(exception& e) {
		m->errorOut(e, "AbstractDecisionTree", "calcIntrinsicValue");
		exit(1);
	} 
}
/**************************************************************************************************/

int AbstractDecisionTree::getBestSplitAndMinEntropy(vector< pair<int, int> > featureOutputPairs, vector<int> splitPoints,
                                                    double& minEntropy, int& minEntropyIndex, double& relatedIntrinsicValue){
    try {
        
        int numSamples = (int)featureOutputPairs.size();
        vector<double> entropies;
        vector<double> intrinsicValues;
        
        for (int i = 0; i < splitPoints.size(); i++) {
            if (m->control_pressed) { return 0; }
            int index = splitPoints[i];
            int valueAtSplitPoint = featureOutputPairs[index].first;

            int numLessThanValueAtSplitPoint = 0;
            int numGreaterThanValueAtSplitPoint = 0;
            
            for (int j = 0; j < featureOutputPairs.size(); j++) {
                if (m->control_pressed) { return 0; }
                pair<int, int> record = featureOutputPairs[j];
                if (record.first < valueAtSplitPoint){ numLessThanValueAtSplitPoint++; }
                else{ numGreaterThanValueAtSplitPoint++; }
            }
            
            double upperEntropyOfSplit = calcSplitEntropy(featureOutputPairs, index, numOutputClasses, true);
            double lowerEntropyOfSplit = calcSplitEntropy(featureOutputPairs, index, numOutputClasses, false);
            
            double totalEntropy = (numLessThanValueAtSplitPoint * upperEntropyOfSplit + numGreaterThanValueAtSplitPoint * lowerEntropyOfSplit) / (double)numSamples;
            double intrinsicValue = calcIntrinsicValue(numLessThanValueAtSplitPoint, numGreaterThanValueAtSplitPoint, numSamples);
            entropies.push_back(totalEntropy);
            intrinsicValues.push_back(intrinsicValue);
            
        }
                
        // set output values
        vector<double>::iterator it = min_element(entropies.begin(), entropies.end());
        minEntropy = *it;                                                         // OUTPUT
        minEntropyIndex = (int)(it - entropies.begin());                          // OUTPUT
        relatedIntrinsicValue = intrinsicValues[minEntropyIndex];                 // OUTPUT
        
        return 0;
    }
	catch(exception& e) {
		m->errorOut(e, "AbstractDecisionTree", "getBestSplitAndMinEntropy");
		exit(1);
	} 
}
/**************************************************************************************************/

double AbstractDecisionTree::calcSplitEntropy(vector< pair<int, int> > featureOutputPairs, int splitIndex, int numOutputClasses, bool isUpperSplit = true) {
    try {
        vector<int> classCounts(numOutputClasses, 0);
        
        if (isUpperSplit) { 
            for (int i = 0; i < splitIndex; i++) {
                if (m->control_pressed) { return 0; }
                classCounts[featureOutputPairs[i].second]++;
            }
        } else {
            for (int i = splitIndex; i < featureOutputPairs.size(); i++) { 
                if (m->control_pressed) { return 0; }
                classCounts[featureOutputPairs[i].second]++;
            }
        }
        
        int totalClassCounts = accumulate(classCounts.begin(), classCounts.end(), 0);
        
        double splitEntropy = 0.0;
        
        for (int i = 0; i < classCounts.size(); i++) {
            if (m->control_pressed) { return 0; }
            if (classCounts[i] == 0) { continue; }
            double probability = (double) classCounts[i] / (double) totalClassCounts;
            splitEntropy += -(probability * log2(probability));
        }
        
        return splitEntropy;
    }
	catch(exception& e) {
		m->errorOut(e, "AbstractDecisionTree", "calcSplitEntropy");
		exit(1);
	} 
}

/**************************************************************************************************/

int AbstractDecisionTree::getSplitPopulation(RFTreeNode* node, vector< vector<int> >& leftChildSamples, vector< vector<int> >& rightChildSamples){    
    try {
        // TODO: there is a possibility of optimization if we can recycle the samples in each nodes
        // we just need to pointers to the samples i.e. vector<int> and use it everywhere and not create the sample 
        // sample over and over again
        // we need to make this const so that it is not modified by all the function calling
        // currently purgeTreeNodesDataRecursively() is used for the same purpose, but this can be avoided altogher
        // if re-using the same data over the classes
        
        int splitFeatureGlobalIndex = node->getSplitFeatureIndex();
        
        for (int i = 0; i < node->getBootstrappedTrainingSamples().size(); i++) {
            if (m->control_pressed) { return 0; }
            vector<int> sample =  node->getBootstrappedTrainingSamples()[i];
            if (m->control_pressed) { return 0; }
            
            if (sample[splitFeatureGlobalIndex] < node->getSplitFeatureValue()) { leftChildSamples.push_back(sample); }
            else { rightChildSamples.push_back(sample); }
        }
        
        return 0;
    }
	catch(exception& e) {
		m->errorOut(e, "AbstractDecisionTree", "getSplitPopulation");
		exit(1);
	} 
}
/**************************************************************************************************/
// TODO: checkIfAlreadyClassified() verify code
// TODO: use bootstrappedOutputVector for easier calculation instead of using getBootstrappedTrainingSamples()
bool AbstractDecisionTree::checkIfAlreadyClassified(RFTreeNode* treeNode, int& outputClass) {
    try {

        vector<int> tempOutputClasses;
        for (int i = 0; i < treeNode->getBootstrappedTrainingSamples().size(); i++) {
            if (m->control_pressed) { return 0; }
            int sampleOutputClass = treeNode->getBootstrappedTrainingSamples()[i][numFeatures];
            vector<int>::iterator it = find(tempOutputClasses.begin(), tempOutputClasses.end(), sampleOutputClass);
            if (it == tempOutputClasses.end()) {               // NOT FOUND
                tempOutputClasses.push_back(sampleOutputClass);
            }
        }
        
        if (tempOutputClasses.size() < 2) { outputClass = tempOutputClasses[0]; return true; }
        else { outputClass = -1; return false; }
        
    }
	catch(exception& e) {
		m->errorOut(e, "AbstractDecisionTree", "checkIfAlreadyClassified");
		exit(1);
	} 
}

/**************************************************************************************************/