File: test_NNGeneral.py

package info (click to toggle)
python-biopython 1.45-3
  • links: PTS, VCS
  • area: main
  • in suites: lenny
  • size: 18,192 kB
  • ctags: 12,310
  • sloc: python: 83,505; xml: 13,834; ansic: 7,015; cpp: 1,855; sql: 1,144; makefile: 179
file content (119 lines) | stat: -rw-r--r-- 4,209 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
#!/usr/bin/env python
"""General test for the NeuralNetwork libraries.

This exercises various elements of the BackPropagation NeuralNetwork
libraries.
"""
# standard library
import sys
import os
import random

# PyUnit
import unittest

# local stuff
from Bio.NeuralNetwork.Training import TrainingExample, ExampleManager
from Bio.NeuralNetwork.StopTraining import ValidationIncreaseStop

def run_tests(argv):
    test_suite = testing_suite()
    runner = unittest.TextTestRunner(sys.stdout, verbosity = 2)
    runner.run(test_suite)

def testing_suite():
    """Generate the set of tests.
    """
    test_suite = unittest.TestSuite()

    test_loader = unittest.TestLoader()
    test_loader.testMethodPrefix = 't_'
    tests = [StopTrainingTest, ExampleManagerTest]

    for test in tests:
        cur_suite = test_loader.loadTestsFromTestCase(test)
        test_suite.addTest(cur_suite)

    return test_suite

class StopTrainingTest(unittest.TestCase):
    """Test functionality for stopping training networks.
    """
    def t_validation_increase_stop(self):
        """Stop training when the ValidationExamples increase.
        """
        stopper = ValidationIncreaseStop(max_iterations = 20,
                                         min_iterations = 2)

        stopper.last_error = 1.0
        do_stop = stopper.stopping_criteria(5, 1.0, 1.5)
        assert do_stop == 1, \
               "Did not tell us to stop when validation error increased."

        stopper.last_error = 1.0
        do_stop = stopper.stopping_criteria(1, 1.0, 1.5)
        assert do_stop == 0, \
               "Told us to stop before we reached the minimum iterations."

        stopper.last_error = 1.0
        do_stop = stopper.stopping_criteria(25, 1.0, 0.5)
        assert do_stop == 1, \
               "Did not tell us to stop when reaching maximum iterations."


class ExampleManagerTest(unittest.TestCase):
    """Tests to make sure the example manager is working properly.
    """
    def setUp(self):
        self.num_examples = 500
        self.examples = []
        for make_example in range(self.num_examples):
            inputs = []
            for input_make in range(3):
                inputs.append(random.randrange(1, 7))
            outputs = [random.randrange(1, 7)]
            self.examples.append(TrainingExample(inputs, outputs))

    def t_adding_examples(self):
        """Make sure test examples are added properly.
        """
        manager = ExampleManager()

        # figure out the expected number of examples in each category
        expected_train = manager.training_percent * self.num_examples
        expected_validation = manager.validation_percent * self.num_examples
        expected_test = self.num_examples - expected_train \
                        - expected_validation

        manager.add_examples(self.examples)

        for expect, actual in [(expected_train, len(manager.train_examples)),
                               (expected_validation,
                                len(manager.validation_examples)),
                               (expected_test, len(manager.test_examples))]:
            
            wrong_percent = abs(expect - actual) / self.num_examples
            assert wrong_percent < .1, \
                   "Deviation in how examples were added, expect %s, got %s" \
                   % (expect, actual)
        
    def t_partioning_examples(self):
        """Test that we can change how to partition the test examples.
        """
        manager = ExampleManager(0, 0)
        manager.add_examples(self.examples)
        assert len(manager.test_examples) == self.num_examples, \
               "Did not partion correctly to test_examples."

        manager = ExampleManager(1.0, 0)
        manager.add_examples(self.examples)
        assert len(manager.train_examples) == self.num_examples, \
               "Did not partition correctly to train_examples."

        manager = ExampleManager(0, 1.0)
        manager.add_examples(self.examples)
        assert len(manager.validation_examples) == self.num_examples, \
               "Did not partition correctly to validation_examples."

if __name__ == "__main__":
    sys.exit(run_tests(sys.argv))