File: split_corpus

package info (click to toggle)
onboard 1.4.1-5
  • links: PTS, VCS
  • area: main
  • in suites: bookworm, bullseye
  • size: 31,548 kB
  • sloc: python: 29,215; cpp: 5,965; ansic: 5,735; xml: 1,026; sh: 163; makefile: 39
file content (73 lines) | stat: -rwxr-xr-x 2,717 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
#!/usr/bin/python3
# -*- coding: utf-8 -*-

# Copyright © 2009-2010, 2012-2013 marmuta <marmvta@gmail.com>
#
# This file is part of Onboard.
#
# Onboard is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Onboard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.

import sys
import codecs
import pypredict
import random

# usage: $0 <source text> [<hstep> [<tstep>]]
# The <source text> is split into three distinct sets of sentences that
# are then saved to training.txt, held_out.txt and testing.txt.
# Every <hstep> sentence is added to held-out.txt, default 20.
# Every <tstep> sentence is added to training.txt, default 20.
def main():
    # every hstep sentence is added to held-out
    hstep = int(sys.argv[2]) if len(sys.argv) >= 2+1 else 20

    # every tstep sentence is added to training
    tstep = int(sys.argv[3]) if len(sys.argv) >= 2+2 else 20

    training, held_out, testing = read_corpus(sys.argv[1], hstep, tstep)

    for fn,sentences in [("training.txt", training),
                     ("held_out.txt", held_out),
                     ("testing.txt", testing)] :
        with codecs.open(fn, "w", encoding='utf-8') as f:
            f.writelines(s + "\n" for s in sentences)


def read_corpus(filename, hstep=20, tstep=20):
    text = pypredict.read_corpus(filename)

    # Split into sentences including separators (punctuation, <s>).
    # "disambiguate" allows to feed saved sentences back into split_sentences
    # without loss, i.e. adjacent sentences cannot join erroneously.
    sentences, spans = pypredict.split_sentences(text, disambiguate=True)

    # divide corpus into 3 sections: training, held_out, test
    r = range(len(sentences))
    sh = set(r[hstep//3::hstep])
    st = set(r[hstep//3*2::tstep])
    st = st - sh
    #print len(st - sh), len(set(r) - sh), len(set(r) - st), len(set(r) - st - sh)
    training  = [sentences[i] for i in set(r) - sh - st]
    held_out  = [sentences[i] for i in sh]
    testing   = [sentences[i] for i in st]

    print("sentences: total {}, training {}, held_out {}, testing {}" \
          .format(len(sentences),len(training),len(held_out),len(testing)))

    return training, held_out, testing


if __name__ == '__main__':
    main()