1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright © 2009, 2012 marmuta <marmvta@gmail.com>
#
# This file is part of Onboard.
#
# Onboard is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Onboard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, re, codecs, math
import pypredict
def main():
model = pypredict.DynamicModel()
model.load(sys.argv[1])
text = pypredict.read_corpus(sys.argv[2])
word_count, ngram_count, entropy, perplexity = calc_stats(model, text)
print "test: words %d, n-grams %d, entropy %f bit/word, perplexity %f" % \
(word_count, ngram_count, entropy, perplexity)
def calc_stats(model, text):
ngram_count = 0
entropy = 0
tokens, spans = pypredict.tokenize_text(text)
word_count = len(tokens)
# extract n-grams of maximum length
for i in xrange(len(tokens)):
b = max(i-(model.order-1),0)
e = min(i-(model.order-1)+model.order, len(tokens))
ngram = tokens[b:e]
if len(ngram) != 1:
p = model.get_probability(ngram)
e = math.log(p,2) if p else float("infinity")
entropy += e
ngram_count += 1
entropy = -entropy/word_count if word_count else 0
try:
perplexity = 2 ** entropy
except:
perplexity = 0
return word_count, ngram_count, entropy, perplexity
if __name__ == '__main__':
main()
|