From 17f215438166729114762c3d9b3179dacd31490d Mon Sep 17 00:00:00 2001
From: JakobSteixner <jakob.steixner@univie.ac.at>
Date: Tue, 18 Jun 2019 23:40:52 +0200
Subject: [PATCH] new:irregular inflection of prefix verbs with known base
 (#258)

* example bug fixes

* python 3 release

* python 3 release

* setup updated

* new: additional logic to allow irregular inflection of unknown prefixed verbs with known base

* new: decomposition of prefix verbs for conjugate

* chg: reorder prefix stripping and infinitive stripping

* test case with unseparable prefix

* chg: skip regular inflection entirely if base verb is identified

* chg: remove debugging try-catch

* chg: problematic StopIteration cases due to breaking changes in 3.7

* switch to force conjugate to consider base form only

* allow parsing base verb candidate to find verbs with stacked prefixes

* failing verbs

* new: faux prefix verbs with at stem that looks like a prefix

* chg: reinstate and add new testcases for test_conjugate

* chg: block exceptional handling of verbs that only look like they have a prefix/end in 'ieren' suffix

* chg: convert expected outcome to set to avoid error from different ordering of results

* fix: error reporting

* chg: with female=1, male=2, expect jane to be sorted first

* chg: remove debugging try-catch, take different sqlite/mysql output into consideration

* chg: fix StopIteration 3.7 incompatibility, ensure check for self.delay

* chg: adapt test

* chg: make sure `Crawler.delay` is respected in `Crawler.crawl()` call

* chg: more verbose report on statistics for find_lemma/find_lexeme

* chg: expected ordering of result

* chg: expected ordering of result

* chg: added exceptions to prefix detection, new test cases.

* chg: explicit Exception for debugging.

* chg: reasoning for additional test cases.

* chg: allow detection of split inflected forms of German prefix verbs

* Remove libsvm/liblinear compilation step in CI

* Fix liblinear loading

* Add Python 2.7 and 3.7 to CI removing Conda setup

* Update  CI Ubuntu image version

* Fix SQLite 'drop all tables' referencing MySQL tables

* Add 'order by' to 'left join' query test

* fix: do not strip prefix in infinite output

* new: test case for correct lemmatization of separated verb form

* fix: skip intermittently failing test - call limits

* merging test fix from https://github.com/thalelinh/pattern/tree/bugfix/fix-master-ci

* chg: non-null seed value in test

* chg: track error for debugging

* new: blacklist simplex verbs starting with "da" for decomposition

* fix: newest scipy requires python 3, retain python2 compatibility by specifying release

* fix: newest cherrypy requires python 3, retain python2 compatibility by specifying release

* new: conjugation of geschehen

* fix: pytest/xdist version conflict

* fix: comment out stochastic test failing ca. 1/3 of time

* fix: comment out intermittently failing stochastic test (succeeds most of the time locally)

* increase version
---
 .travis.yml                           |  24 +-
 README.md                             |  12 +-
 docs/html/pattern.html                |   4 +-
 examples/03-en/08-topmine_ngrammer.py |  12 +-
 pattern/__init__.py                   |   2 +-
 pattern/db/__init__.py                |   2 +-
 pattern/metrics.py                    |   2 +-
 pattern/text/__init__.py              |  29 +-
 pattern/text/de/de-verbs.txt          |   1 +
 pattern/text/de/inflect.py            | 647 +++++++++++++++++---------
 pattern/vector/__init__.py            |   2 +-
 pattern/vector/svm/liblinear.py       |   2 +-
 pattern/web/__init__.py               |  22 +-
 setup.py                              |  15 +-
 test/test_db.py                       |  27 +-
 test/test_de.py                       |  36 +-
 test/test_ru.py                       |   2 -
 test/test_text.py                     |   5 +-
 test/test_vector.py                   |   2 +-
 test/test_web.py                      |  13 +-
 20 files changed, 577 insertions(+), 284 deletions(-)

Index: pattern-5b85d998c30ddc6772b56310713530224466083a/.travis.yml
===================================================================
--- pattern-5b85d998c30ddc6772b56310713530224466083a.orig/.travis.yml
+++ pattern-5b85d998c30ddc6772b56310713530224466083a/.travis.yml
@@ -8,13 +8,9 @@ python:
   - "2.7"
 
 before_install:
+  # Define TZ as some tests rely on timezone
   - export TZ=Europe/Brussels
-  - if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh; else wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; fi
-  - bash miniconda.sh -b -p $HOME/miniconda
-  - export PATH="$HOME/miniconda/bin:$PATH"
-  - conda update --yes conda
-  - conda install --yes numpy scipy mysqlclient
-  - pip install --quiet pytest pytest-cov pytest-xdist chardet
+  - pip install --quiet pytest==4.4 pytest-cov pytest-xdist chardet
 
 install:
   #for handling Python 2.7. = specially for Travis Cl, shouldn't be relevant for anything else
@@ -22,12 +18,12 @@ install:
   - pip install six>=1.11.0
   - python setup.py install --quiet
   - pip freeze
-  # Install and compile libsvm and liblinear
-  - sudo apt-get install -y build-essential
-  - git clone https://github.com/cjlin1/libsvm
-  - cd libsvm; make lib; sudo cp libsvm.so.2 /lib; sudo ln -s /lib/libsvm.so.2 /lib/libsvm.so; cd ..
-  - git clone https://github.com/cjlin1/liblinear
-  - cd liblinear; make lib; sudo cp liblinear.so.3 /lib; sudo ln -s /lib/liblinear.so.3 /lib/liblinear.so; cd ..
+  # No need to recompile libsvm and liblinear, can rely on the precompiled commited versions, so removes an eventual installed version
+  - path_to_libsvm=$(ldconfig -p | grep -m1 "libsvm.so.2" | cut -d ' ' -f4)
+  - if [[ -e $path_to_libsvm ]]; then sudo rm -f $path_to_libsvm; fi
+  - path_to_liblinear=$(ldconfig -p | grep -m1 "liblinear.so.3" | cut -d ' ' -f4)
+  - if [[ -e $path_to_liblinear ]]; then sudo rm -f $path_to_liblinear; fi
+  - sudo ldconfig
 
 script:
   - pytest --cov=pattern
Index: pattern-5b85d998c30ddc6772b56310713530224466083a/examples/03-en/08-topmine_ngrammer.py
===================================================================
--- pattern-5b85d998c30ddc6772b56310713530224466083a.orig/examples/03-en/08-topmine_ngrammer.py
+++ pattern-5b85d998c30ddc6772b56310713530224466083a/examples/03-en/08-topmine_ngrammer.py
@@ -64,10 +64,10 @@ for key in ngrams.keys():
     elif len(key.split("_")) == 3:
         trigrams.append(key)
 
-print("Extracted {} bigrams (removed stopwords):\n".format(len(bigrams)))
+print("Extracted {} bigrams:\n".format(len(bigrams)))
 print(bigrams)
 print("\n")
 
-print("Extracted {} trigrams (removed stopwords):\n".format(len(trigrams)))
+print("Extracted {} trigrams:\n".format(len(trigrams)))
 print(trigrams)
 print("\n")
Index: pattern-5b85d998c30ddc6772b56310713530224466083a/pattern/db/__init__.py
===================================================================
--- pattern-5b85d998c30ddc6772b56310713530224466083a.orig/pattern/db/__init__.py
+++ pattern-5b85d998c30ddc6772b56310713530224466083a/pattern/db/__init__.py
@@ -622,7 +622,7 @@ class Database(object):
             except Exception as e:
                 # Create the database if it doesn't exist yet.
                 if "unknown database" not in str(e).lower():
-                    raise DatabaseConnectionError(e[1]) # Wrong host, username and/or password.
+                    raise DatabaseConnectionError(e.args[1]) # Wrong host, username and/or password.
                 connection = MySQLdb.connect(self.host, self.username, self.password)
                 cursor = connection.cursor()
                 cursor.execute("create database if not exists `%s`;" % self.name)
Index: pattern-5b85d998c30ddc6772b56310713530224466083a/pattern/text/__init__.py
===================================================================
--- pattern-5b85d998c30ddc6772b56310713530224466083a.orig/pattern/text/__init__.py
+++ pattern-5b85d998c30ddc6772b56310713530224466083a/pattern/text/__init__.py
@@ -1081,7 +1081,7 @@ class Parser(object):
             # Word part-of-speech classifier.
             try:
                 self.model = Model(path=model)
-            except ImportError: # pattern.vector
+            except ImportError as e: # pattern.vector
                 pass
 
     def find_keywords(self, string, **kwargs):
@@ -2096,7 +2096,7 @@ def tense_id(*args, **kwargs):
     # Disambiguate aliases: "pl" =>
     # (PRESENT, None, PLURAL, INDICATIVE, IMPERFECTIVE, False).
     return TENSES_ID.get(tense.lower(),
-           TENSES_ID.get((tense, person, number, mood, aspect, negated)))
+        TENSES_ID.get((tense, person, number, mood, aspect, negated)))
 
 tense = tense_id
 
@@ -2170,6 +2170,11 @@ class Verbs(lazydict):
         """
         if dict.__len__(self) == 0:
             self.load()
+        if self._language == 'de' and len(verb.split()) > 1:
+            # allow to find base for prefix forms in split representation,
+            # e. g. 'nimmst an' -> 'annehmen'
+            verb = verb.split()
+            return self.lemma(''.join(verb[1:]  + verb[:1]), parse=parse)
         if verb.lower() in self._inverse:
             return self._inverse[verb.lower()]
         if verb in self._inverse:
@@ -2177,7 +2182,7 @@ class Verbs(lazydict):
         if parse is True: # rule-based
             return self.find_lemma(verb)
 
-    def lexeme(self, verb, parse=True):
+    def lexeme(self, verb, parse=True, no_duplicates=True):
         """ Returns a list of all possible inflections of the given verb.
         """
         a = []
@@ -2186,9 +2191,14 @@ class Verbs(lazydict):
             a = [x for x in self[b] if x != ""]
         elif parse is True: # rule-based
             a = self.find_lexeme(b)
-        u = []
-        [u.append(x) for x in a if x not in u]
-        return u
+
+        if no_duplicates:
+            u = []
+
+            [u.append(x) for x in a if x not in u]
+            return u
+        else:
+            return a
 
     def conjugate(self, verb, *args, **kwargs):
         """ Inflects the verb and returns the given tense (or None).
@@ -2205,7 +2215,10 @@ class Verbs(lazydict):
         i1 = self._format.get(id)
         i2 = self._format.get(self._default.get(id))
         i3 = self._format.get(self._default.get(self._default.get(id)))
-        b = self.lemma(verb, parse=kwargs.get("parse", True))
+        if kwargs.get('allow_inflected', True):
+            b = self.lemma(verb, parse=kwargs.get("parse", True))
+        else:
+            b = verb
         v = []
         # Get the verb lexeme and return the requested index.
         if b in self:
Index: pattern-5b85d998c30ddc6772b56310713530224466083a/pattern/text/de/de-verbs.txt
===================================================================
--- pattern-5b85d998c30ddc6772b56310713530224466083a.orig/pattern/text/de/de-verbs.txt
+++ pattern-5b85d998c30ddc6772b56310713530224466083a/pattern/text/de/de-verbs.txt
@@ -95,6 +95,7 @@ studieren,studiere,studierst,studiert,st
 eignen,eigne,eignest,eignet,eignen,eignet,eignend,eignete,eignetest,eignete,eigneten,eignetet,geeignet,eigne,eignet,eignen,eigne,eignest,eignen,eignet,eignete,eignetest,eigneten,eignetet
 wärmen,wärme,wärmst,wärmt,wärmen,wärmt,wärmend,wärmte,wärmtest,wärmte,wärmten,wärmtet,gewärmt,wärme,wärmt,wärmen,wärme,wärmest,wärmen,wärmet,wärmte,wärmtest,wärmten,wärmtet
 jagen,jage,jagst,jagt,jagen,jagt,jagend,jagte,jagtest,jagte,jagten,jagtet,gejagt,jage,jagt,jagen,jage,jagest,jagen,jaget,jagte,jagtest,jagten,jagtet
+geschehen,geschehe,geschiehst,geschieht,geschehen,gescheht,geschehend,geschah,geschahst,geschah,geschahen,geschaht,geschehen,geschehe,gescheht,geschehen,geschehe,geschehest,geschehen,geschehet,geschähe,geschähest,geschähen,geschähet
 spinnen,spinne,spinnst,spinnt,spinnen,spinnt,spinnend,spann,spannst,spann,spannen,spannt,gesponnen,spinne,spinnt,spinnen,spinne,spinnest,spinnen,spinnet,spönne,spännest,spännen,spännet
 rationalisieren,rationalisiere,rationalisierst,rationalisiert,rationalisieren,rationalisiert,rationalisierend,rationalisierte,rationalisiertest,rationalisierte,rationalisierten,rationalisiertet,rationalisiert,rationalisiere,rationalisiert,rationalisieren,rationalisiere,rationalisierest,rationalisieren,rationalisieret,rationalisierte,rationalisiertest,rationalisierten,rationalisiertet
 rächen,räche,rächst,rächt,rächen,rächt,rächend,rächte,rächtest,rächte,rächten,rächtet,gerächt,räche,rächt,rächen,räche,rächest,rächen,rächet,rächte,rächtest,rächten,rächtet
Index: pattern-5b85d998c30ddc6772b56310713530224466083a/pattern/text/de/inflect.py
===================================================================
--- pattern-5b85d998c30ddc6772b56310713530224466083a.orig/pattern/text/de/inflect.py
+++ pattern-5b85d998c30ddc6772b56310713530224466083a/pattern/text/de/inflect.py
@@ -62,7 +62,7 @@ is_vowel = lambda ch: ch in VOWELS
 # Masculine is the most common, so it is the default for all functions.
 MASCULINE, FEMININE, NEUTER, PLURAL = \
     MALE, FEMALE, NEUTRAL, PLURAL = \
-        M, F, N, PL = "m", "f", "n", "p"
+    M, F, N, PL = "m", "f", "n", "p"
 
 # Inflection role.
 # - nom = subject, "Der Hund bellt" (the dog barks).
@@ -73,17 +73,25 @@ NOMINATIVE, ACCUSATIVE, DATIVE, GENITIVE
     "nominative", "accusative", "dative", "genitive"
 
 article_definite = {
-    ("m", "nom"): "der", ("f", "nom"): "die", ("n", "nom"): "das", ("p", "nom"): "die",
-    ("m", "acc"): "den", ("f", "acc"): "die", ("n", "acc"): "das", ("p", "acc"): "die",
-    ("m", "dat"): "dem", ("f", "dat"): "der", ("n", "dat"): "dem", ("p", "dat"): "den",
-    ("m", "gen"): "des", ("f", "gen"): "der", ("n", "gen"): "des", ("p", "gen"): "der",
+    ("m", "nom"): "der", ("f", "nom"): "die", ("n", "nom"): "das",
+    ("p", "nom"): "die",
+    ("m", "acc"): "den", ("f", "acc"): "die", ("n", "acc"): "das",
+    ("p", "acc"): "die",
+    ("m", "dat"): "dem", ("f", "dat"): "der", ("n", "dat"): "dem",
+    ("p", "dat"): "den",
+    ("m", "gen"): "des", ("f", "gen"): "der", ("n", "gen"): "des",
+    ("p", "gen"): "der",
 }
 
 article_indefinite = {
-    ("m", "nom"): "ein"  , ("f", "nom"): "eine" , ("n", "nom"): "ein"  , ("p", "nom"): "eine",
-    ("m", "acc"): "einen", ("f", "acc"): "eine" , ("n", "acc"): "ein"  , ("p", "acc"): "eine",
-    ("m", "dat"): "einem", ("f", "dat"): "einer", ("n", "dat"): "einem", ("p", "dat"): "einen",
-    ("m", "gen"): "eines", ("f", "gen"): "einer", ("n", "gen"): "eines", ("p", "gen"): "einer",
+    ("m", "nom"): "ein", ("f", "nom"): "eine", ("n", "nom"): "ein",
+    ("p", "nom"): "eine",
+    ("m", "acc"): "einen", ("f", "acc"): "eine", ("n", "acc"): "ein",
+    ("p", "acc"): "eine",
+    ("m", "dat"): "einem", ("f", "dat"): "einer", ("n", "dat"): "einem",
+    ("p", "dat"): "einen",
+    ("m", "gen"): "eines", ("f", "gen"): "einer", ("n", "gen"): "eines",
+    ("p", "gen"): "einer",
 }
 
 
@@ -98,6 +106,7 @@ def indefinite_article(word, gender=MALE
     """
     return article_indefinite.get((gender[:1].lower(), role[:3].lower()))
 
+
 DEFINITE = "definite"
 INDEFINITE = "indefinite"
 
@@ -106,8 +115,10 @@ def article(word, function=INDEFINITE, g
     """ Returns the indefinite (ein) or definite (der/die/das/die) article for the given word.
     """
     return function == DEFINITE \
-       and definite_article(word, gender, role) \
-        or indefinite_article(word, gender, role)
+           and definite_article(word, gender, role) \
+           or indefinite_article(word, gender, role)
+
+
 _article = article
 
 
@@ -116,36 +127,49 @@ def referenced(word, article=INDEFINITE,
     """
     return "%s %s" % (_article(word, article, gender, role), word)
 
+
 #### GENDER #########################################################################################
 
 gender_masculine = (
     "ant", "ast", "ich", "ig", "ismus", "ling", "or", "us"
 )
 gender_feminine = (
-    "a", "anz", "ei", "enz", "heit", "ie", "ik", "in", "keit", "schaf", "sion", "sis",
+    "a", "anz", "ei", "enz", "heit", "ie", "ik", "in", "keit", "schaf", "sion",
+    "sis",
     "tät", "tion", "ung", "ur"
 )
 gender_neuter = (
-    "chen", "icht", "il", "it", "lein", "ma", "ment", "tel", "tum", "um", "al", "an", "ar",
+    "chen", "icht", "il", "it", "lein", "ma", "ment", "tel", "tum", "um", "al",
+    "an", "ar",
     "ät", "ent", "ett", "ier", "iv", "o", "on", "nis", "sal"
 )
 gender_majority_vote = {
     MASCULINE: (
-        "ab", "af", "ag", "ak", "am", "an", "ar", "at", "au", "ch", "ck", "eb", "ef", "eg",
-        "el", "er", "es", "ex", "ff", "go", "hn", "hs", "ib", "if", "ig", "ir", "kt", "lf",
-        "li", "ll", "lm", "ls", "lt", "mi", "nd", "nk", "nn", "nt", "od", "of", "og", "or",
-        "pf", "ph", "pp", "ps", "rb", "rd", "rf", "rg", "ri", "rl", "rm", "rr", "rs", "rt",
-        "rz", "ss", "st", "tz", "ub", "uf", "ug", "uh", "un", "us", "ut", "xt", "zt"
+        "ab", "af", "ag", "ak", "am", "an", "ar", "at", "au", "ch", "ck", "eb",
+        "ef", "eg",
+        "el", "er", "es", "ex", "ff", "go", "hn", "hs", "ib", "if", "ig", "ir",
+        "kt", "lf",
+        "li", "ll", "lm", "ls", "lt", "mi", "nd", "nk", "nn", "nt", "od", "of",
+        "og", "or",
+        "pf", "ph", "pp", "ps", "rb", "rd", "rf", "rg", "ri", "rl", "rm", "rr",
+        "rs", "rt",
+        "rz", "ss", "st", "tz", "ub", "uf", "ug", "uh", "un", "us", "ut", "xt",
+        "zt"
     ),
     FEMININE: (
-        "be", "ce", "da", "de", "dt", "ee", "ei", "et", "eu", "fe", "ft", "ge", "he", "hr",
-        "ht", "ia", "ie", "ik", "in", "it", "iz", "ka", "ke", "la", "le", "me", "na", "ne",
-        "ng", "nz", "on", "pe", "ra", "re", "se", "ta", "te", "ue", "ur", "ve", "ze"
+        "be", "ce", "da", "de", "dt", "ee", "ei", "et", "eu", "fe", "ft", "ge",
+        "he", "hr",
+        "ht", "ia", "ie", "ik", "in", "it", "iz", "ka", "ke", "la", "le", "me",
+        "na", "ne",
+        "ng", "nz", "on", "pe", "ra", "re", "se", "ta", "te", "ue", "ur", "ve",
+        "ze"
     ),
 
     NEUTER: (
-        "ad", "al", "as", "do", "ed", "eh", "em", "en", "hl", "id", "il", "im", "io", "is",
-        "iv", "ix", "ld", "lk", "lo", "lz", "ma", "md", "mm", "mt", "no", "ns", "ol", "om",
+        "ad", "al", "as", "do", "ed", "eh", "em", "en", "hl", "id", "il", "im",
+        "io", "is",
+        "iv", "ix", "ld", "lk", "lo", "lz", "ma", "md", "mm", "mt", "no", "ns",
+        "ol", "om",
         "op", "os", "ot", "pt", "rk", "rn", "ro", "to", "tt", "ul", "um", "uz"
     )
 }
@@ -169,51 +193,93 @@ def gender(word, pos=NOUN):
             if w.endswith(gender_majority_vote[g]):
                 return g
 
+
 #### PLURALIZE ######################################################################################
 
 plural_inflections = [
-    ("aal", "äle"   ), ("aat", "aaten"), ("abe", "aben" ), ("ach", "ächer"), ("ade", "aden"  ),
-    ("age", "agen"  ), ("ahn", "ahnen"), ("ahr", "ahre" ), ("akt", "akte" ), ("ale", "alen"  ),
-    ("ame", "amen"  ), ("amt", "ämter"), ("ane", "anen" ), ("ang", "änge" ), ("ank", "änke"  ),
-    ("ann", "änner" ), ("ant", "anten"), ("aph", "aphen"), ("are", "aren" ), ("arn", "arne"  ),
-    ("ase", "asen"  ), ("ate", "aten" ), ("att", "ätter"), ("atz", "ätze" ), ("aum", "äume"  ),
-    ("aus", "äuser" ), ("bad", "bäder"), ("bel", "bel"  ), ("ben", "ben"  ), ("ber", "ber"   ),
-    ("bot", "bote"  ), ("che", "chen" ), ("chs", "chse" ), ("cke", "cken" ), ("del", "del"   ),
-    ("den", "den"   ), ("der", "der"  ), ("ebe", "ebe"  ), ("ede", "eden" ), ("ehl", "ehle"  ),
-    ("ehr", "ehr"   ), ("eil", "eile" ), ("eim", "eime" ), ("eis", "eise" ), ("eit", "eit"   ),
-    ("ekt", "ekte"  ), ("eld", "elder"), ("ell", "elle" ), ("ene", "enen" ), ("enz", "enzen" ),
-    ("erd", "erde"  ), ("ere", "eren" ), ("erk", "erke" ), ("ern", "erne" ), ("ert", "erte"  ),
-    ("ese", "esen"  ), ("ess", "esse" ), ("est", "este" ), ("etz", "etze" ), ("eug", "euge"  ),
-    ("eur", "eure"  ), ("fel", "fel"  ), ("fen", "fen"  ), ("fer", "fer"  ), ("ffe", "ffen"  ),
-    ("gel", "gel"   ), ("gen", "gen"  ), ("ger", "ger"  ), ("gie", "gie"  ), ("hen", "hen"   ),
-    ("her", "her"   ), ("hie", "hien" ), ("hle", "hlen" ), ("hme", "hmen" ), ("hne", "hnen"  ),
-    ("hof", "höfe"  ), ("hre", "hren" ), ("hrt", "hrten"), ("hse", "hsen" ), ("hte", "hten"  ),
-    ("ich", "iche"  ), ("ick", "icke" ), ("ide", "iden" ), ("ieb", "iebe" ), ("ief", "iefe"  ),
-    ("ieg", "iege"  ), ("iel", "iele" ), ("ien", "ium"  ), ("iet", "iete" ), ("ife", "ifen"  ),
-    ("iff", "iffe"  ), ("ift", "iften"), ("ige", "igen" ), ("ika", "ikum" ), ("ild", "ilder" ),
-    ("ilm", "ilme"  ), ("ine", "inen" ), ("ing", "inge" ), ("ion", "ionen"), ("ise", "isen"  ),
-    ("iss", "isse"  ), ("ist", "isten"), ("ite", "iten" ), ("itt", "itte" ), ("itz", "itze"  ),
-    ("ium", "ium"   ), ("kel", "kel"  ), ("ken", "ken"  ), ("ker", "ker"  ), ("lag", "läge"  ),
-    ("lan", "läne"  ), ("lar", "lare" ), ("lei", "leien"), ("len", "len"  ), ("ler", "ler"   ),
-    ("lge", "lgen"  ), ("lie", "lien" ), ("lle", "llen" ), ("mel", "mel"  ), ("mer", "mer"   ),
-    ("mme", "mmen"  ), ("mpe", "mpen" ), ("mpf", "mpfe" ), ("mus", "mus"  ), ("mut", "mut"   ),
-    ("nat", "nate"  ), ("nde", "nden" ), ("nen", "nen"  ), ("ner", "ner"  ), ("nge", "ngen"  ),
-    ("nie", "nien"  ), ("nis", "nisse"), ("nke", "nken" ), ("nkt", "nkte" ), ("nne", "nnen"  ),
-    ("nst", "nste"  ), ("nte", "nten" ), ("nze", "nzen" ), ("ock", "öcke" ), ("ode", "oden"  ),
-    ("off", "offe"  ), ("oge", "ogen" ), ("ohn", "öhne" ), ("ohr", "ohre" ), ("olz", "ölzer" ),
-    ("one", "onen"  ), ("oot", "oote" ), ("opf", "öpfe" ), ("ord", "orde" ), ("orm", "ormen" ),
-    ("orn", "örner" ), ("ose", "osen" ), ("ote", "oten" ), ("pel", "pel"  ), ("pen", "pen"   ),
-    ("per", "per"   ), ("pie", "pien" ), ("ppe", "ppen" ), ("rag", "räge" ), ("rau", "raün"  ),
-    ("rbe", "rben"  ), ("rde", "rden" ), ("rei", "reien"), ("rer", "rer"  ), ("rie", "rien"  ),
-    ("rin", "rinnen"), ("rke", "rken" ), ("rot", "rote" ), ("rre", "rren" ), ("rte", "rten"  ),
-    ("ruf", "rufe"  ), ("rzt", "rzte" ), ("sel", "sel"  ), ("sen", "sen"  ), ("ser", "ser"   ),
-    ("sie", "sien"  ), ("sik", "sik"  ), ("sse", "ssen" ), ("ste", "sten" ), ("tag", "tage"  ),
-    ("tel", "tel"   ), ("ten", "ten"  ), ("ter", "ter"  ), ("tie", "tien" ), ("tin", "tinnen"),
-    ("tiv", "tive"  ), ("tor", "toren"), ("tte", "tten" ), ("tum", "tum"  ), ("tur", "turen" ),
-    ("tze", "tzen"  ), ("ube", "uben" ), ("ude", "uden" ), ("ufe", "ufen" ), ("uge", "ugen"  ),
-    ("uhr", "uhren" ), ("ule", "ulen" ), ("ume", "umen" ), ("ung", "ungen"), ("use", "usen"  ),
-    ("uss", "üsse"  ), ("ute", "uten" ), ("utz", "utz"  ), ("ver", "ver"  ), ("weg", "wege"  ),
-    ("zer", "zer"   ), ("zug", "züge" ), ("ück", "ücke" )
+    ("aal", "äle"), ("aat", "aaten"), ("abe", "aben"), ("ach", "ächer"),
+    ("ade", "aden"),
+    ("age", "agen"), ("ahn", "ahnen"), ("ahr", "ahre"), ("akt", "akte"),
+    ("ale", "alen"),
+    ("ame", "amen"), ("amt", "ämter"), ("ane", "anen"), ("ang", "änge"),
+    ("ank", "änke"),
+    ("ann", "änner"), ("ant", "anten"), ("aph", "aphen"), ("are", "aren"),
+    ("arn", "arne"),
+    ("ase", "asen"), ("ate", "aten"), ("att", "ätter"), ("atz", "ätze"),
+    ("aum", "äume"),
+    ("aus", "äuser"), ("bad", "bäder"), ("bel", "bel"), ("ben", "ben"),
+    ("ber", "ber"),
+    ("bot", "bote"), ("che", "chen"), ("chs", "chse"), ("cke", "cken"),
+    ("del", "del"),
+    ("den", "den"), ("der", "der"), ("ebe", "ebe"), ("ede", "eden"),
+    ("ehl", "ehle"),
+    ("ehr", "ehr"), ("eil", "eile"), ("eim", "eime"), ("eis", "eise"),
+    ("eit", "eit"),
+    ("ekt", "ekte"), ("eld", "elder"), ("ell", "elle"), ("ene", "enen"),
+    ("enz", "enzen"),
+    ("erd", "erde"), ("ere", "eren"), ("erk", "erke"), ("ern", "erne"),
+    ("ert", "erte"),
+    ("ese", "esen"), ("ess", "esse"), ("est", "este"), ("etz", "etze"),
+    ("eug", "euge"),
+    ("eur", "eure"), ("fel", "fel"), ("fen", "fen"), ("fer", "fer"),
+    ("ffe", "ffen"),
+    ("gel", "gel"), ("gen", "gen"), ("ger", "ger"), ("gie", "gie"),
+    ("hen", "hen"),
+    ("her", "her"), ("hie", "hien"), ("hle", "hlen"), ("hme", "hmen"),
+    ("hne", "hnen"),
+    ("hof", "höfe"), ("hre", "hren"), ("hrt", "hrten"), ("hse", "hsen"),
+    ("hte", "hten"),
+    ("ich", "iche"), ("ick", "icke"), ("ide", "iden"), ("ieb", "iebe"),
+    ("ief", "iefe"),
+    ("ieg", "iege"), ("iel", "iele"), ("ien", "ium"), ("iet", "iete"),
+    ("ife", "ifen"),
+    ("iff", "iffe"), ("ift", "iften"), ("ige", "igen"), ("ika", "ikum"),
+    ("ild", "ilder"),
+    ("ilm", "ilme"), ("ine", "inen"), ("ing", "inge"), ("ion", "ionen"),
+    ("ise", "isen"),
+    ("iss", "isse"), ("ist", "isten"), ("ite", "iten"), ("itt", "itte"),
+    ("itz", "itze"),
+    ("ium", "ium"), ("kel", "kel"), ("ken", "ken"), ("ker", "ker"),
+    ("lag", "läge"),
+    ("lan", "läne"), ("lar", "lare"), ("lei", "leien"), ("len", "len"),
+    ("ler", "ler"),
+    ("lge", "lgen"), ("lie", "lien"), ("lle", "llen"), ("mel", "mel"),
+    ("mer", "mer"),
+    ("mme", "mmen"), ("mpe", "mpen"), ("mpf", "mpfe"), ("mus", "mus"),
+    ("mut", "mut"),
+    ("nat", "nate"), ("nde", "nden"), ("nen", "nen"), ("ner", "ner"),
+    ("nge", "ngen"),
+    ("nie", "nien"), ("nis", "nisse"), ("nke", "nken"), ("nkt", "nkte"),
+    ("nne", "nnen"),
+    ("nst", "nste"), ("nte", "nten"), ("nze", "nzen"), ("ock", "öcke"),
+    ("ode", "oden"),
+    ("off", "offe"), ("oge", "ogen"), ("ohn", "öhne"), ("ohr", "ohre"),
+    ("olz", "ölzer"),
+    ("one", "onen"), ("oot", "oote"), ("opf", "öpfe"), ("ord", "orde"),
+    ("orm", "ormen"),
+    ("orn", "örner"), ("ose", "osen"), ("ote", "oten"), ("pel", "pel"),
+    ("pen", "pen"),
+    ("per", "per"), ("pie", "pien"), ("ppe", "ppen"), ("rag", "räge"),
+    ("rau", "raün"),
+    ("rbe", "rben"), ("rde", "rden"), ("rei", "reien"), ("rer", "rer"),
+    ("rie", "rien"),
+    ("rin", "rinnen"), ("rke", "rken"), ("rot", "rote"), ("rre", "rren"),
+    ("rte", "rten"),
+    ("ruf", "rufe"), ("rzt", "rzte"), ("sel", "sel"), ("sen", "sen"),
+    ("ser", "ser"),
+    ("sie", "sien"), ("sik", "sik"), ("sse", "ssen"), ("ste", "sten"),
+    ("tag", "tage"),
+    ("tel", "tel"), ("ten", "ten"), ("ter", "ter"), ("tie", "tien"),
+    ("tin", "tinnen"),
+    ("tiv", "tive"), ("tor", "toren"), ("tte", "tten"), ("tum", "tum"),
+    ("tur", "turen"),
+    ("tze", "tzen"), ("ube", "uben"), ("ude", "uden"), ("ufe", "ufen"),
+    ("uge", "ugen"),
+    ("uhr", "uhren"), ("ule", "ulen"), ("ume", "umen"), ("ung", "ungen"),
+    ("use", "usen"),
+    ("uss", "üsse"), ("ute", "uten"), ("utz", "utz"), ("ver", "ver"),
+    ("weg", "wege"),
+    ("zer", "zer"), ("zug", "züge"), ("ück", "ücke"), ('apfel', 'äpfel')
 ]
 
 
@@ -237,9 +303,11 @@ def pluralize(word, pos=NOUN, gender=MAL
             return w + "n"
         if w.endswith("ien"):
             return w[:-2] + "um"
-        if w.endswith(("au", "ein", "eit", "er", "en", "el", "chen", "mus", "tät", "tik", "tum", "u")):
+        if w.endswith(("au", "ein", "eit", "er", "en", "el", "chen", "mus",
+                       "tät", "tik", "tum", "u")):
             return w
-        if w.endswith(("ant", "ei", "enz", "ion", "ist", "or", "schaft", "tur", "ung")):
+        if w.endswith(("ant", "ei", "enz", "ion", "ist", "or", "schaft", "tur",
+                       "ung")):
             return w + "en"
         if w.endswith("in"):
             return w + "nen"
@@ -252,70 +320,115 @@ def pluralize(word, pos=NOUN, gender=MAL
         if w.endswith("a"):
             return w[:-1] + "en"
         # Inflect common umlaut vowels: Kopf => Köpfe.
-        if w.endswith(("all", "and", "ang", "ank", "atz", "auf", "ock", "opf", "uch", "uss")):
+        if w.endswith(("all", "and", "ang", "ank", "atz", "auf", "ock", "opf",
+                       "uch", "uss")):
             umlaut = w[-3]
             umlaut = umlaut.replace("a", "ä")
             umlaut = umlaut.replace("o", "ö")
             umlaut = umlaut.replace("u", "ü")
             return w[:-3] + umlaut + w[-2:] + "e"
         for a, b in (
-          ("ag",  "äge"),
-          ("ann", "änner"),
-          ("aum", "äume"),
-          ("aus", "äuser"),
-          ("zug", "züge")):
+                ("ag", "äge"),
+                ("ann", "änner"),
+                ("aum", "äume"),
+                ("aus", "äuser"),
+                ("zug", "züge")):
             if w.endswith(a):
                 return w[:-len(a)] + b
         return w + "e"
     return w
 
+
 #### SINGULARIZE ###################################################################################
 
 singular_inflections = [
-    ( "innen", "in" ), ( "täten", "tät"), ( "ahnen", "ahn"), ( "enten", "ent"), ( "räser", "ras"),
-    ( "hrten", "hrt"), ( "ücher", "uch"), ( "örner", "orn"), ( "änder", "and"), ( "ürmer", "urm"),
-    ( "ahlen", "ahl"), ( "uhren", "uhr"), ( "ätter", "att"), ( "suren", "sur"), ( "chten", "cht"),
-    ( "kuren", "kur"), ( "erzen", "erz"), ( "güter", "gut"), ( "soren", "sor"), ( "änner", "ann"),
-    ( "äuser", "aus"), ( "taten", "tat"), ( "isten", "ist"), ( "bäder", "bad"), ( "ämter", "amt"),
-    ( "eiten", "eit"), ( "raten", "rat"), ( "ormen", "orm"), ( "ionen", "ion"), ( "nisse", "nis"),
-    ( "ölzer", "olz"), ( "ungen", "ung"), ( "läser", "las"), ( "ächer", "ach"), ( "urten", "urt"),
-    ( "enzen", "enz"), ( "aaten", "aat"), ( "aphen", "aph"), ( "öcher", "och"), ( "türen", "tür"),
-    ( "sonen", "son"), ( "ühren", "ühr"), ( "ühner", "uhn"), ( "toren", "tor"), ( "örter", "ort"),
-    ( "anten", "ant"), ( "räder", "rad"), ( "turen", "tur"), ( "äuler", "aul"), (  "änze", "anz"),
-    (  "tten", "tte"), (  "mben", "mbe"), (  "ädte", "adt"), (  "llen", "lle"), (  "ysen", "yse"),
-    (  "rben", "rbe"), (  "hsen", "hse"), (  "raün", "rau"), (  "rven", "rve"), (  "rken", "rke"),
-    (  "ünge", "ung"), (  "üten", "üte"), (  "usen", "use"), (  "tien", "tie"), (  "läne", "lan"),
-    (  "iben", "ibe"), (  "ifen", "ife"), (  "ssen", "sse"), (  "gien", "gie"), (  "eten", "ete"),
-    (  "rden", "rde"), (  "öhne", "ohn"), (  "ärte", "art"), (  "ncen", "nce"), (  "ünde", "und"),
-    (  "uben", "ube"), (  "lben", "lbe"), (  "üsse", "uss"), (  "agen", "age"), (  "räge", "rag"),
-    (  "ogen", "oge"), (  "anen", "ane"), (  "sken", "ske"), (  "eden", "ede"), (  "össe", "oss"),
-    (  "ürme", "urm"), (  "ggen", "gge"), (  "üren", "üre"), (  "nten", "nte"), (  "ühle", "ühl"),
-    (  "änge", "ang"), (  "mmen", "mme"), (  "igen", "ige"), (  "nken", "nke"), (  "äcke", "ack"),
-    (  "oden", "ode"), (  "oben", "obe"), (  "ähne", "ahn"), (  "änke", "ank"), (  "inen", "ine"),
-    (  "seen", "see"), (  "äfte", "aft"), (  "ulen", "ule"), (  "äste", "ast"), (  "hren", "hre"),
-    (  "öcke", "ock"), (  "aben", "abe"), (  "öpfe", "opf"), (  "ugen", "uge"), (  "lien", "lie"),
-    (  "ände", "and"), (  "ücke", "ück"), (  "asen", "ase"), (  "aden", "ade"), (  "dien", "die"),
-    (  "aren", "are"), (  "tzen", "tze"), (  "züge", "zug"), (  "üfte", "uft"), (  "hien", "hie"),
-    (  "nden", "nde"), (  "älle", "all"), (  "hmen", "hme"), (  "ffen", "ffe"), (  "rmen", "rma"),
-    (  "olen", "ole"), (  "sten", "ste"), (  "amen", "ame"), (  "höfe", "hof"), (  "üste", "ust"),
-    (  "hnen", "hne"), (  "ähte", "aht"), (  "umen", "ume"), (  "nnen", "nne"), (  "alen", "ale"),
-    (  "mpen", "mpe"), (  "mien", "mie"), (  "rten", "rte"), (  "rien", "rie"), (  "äute", "aut"),
-    (  "uden", "ude"), (  "lgen", "lge"), (  "ngen", "nge"), (  "iden", "ide"), (  "ässe", "ass"),
-    (  "osen", "ose"), (  "lken", "lke"), (  "eren", "ere"), (  "üche", "uch"), (  "lüge", "lug"),
-    (  "hlen", "hle"), (  "isen", "ise"), (  "ären", "äre"), (  "töne", "ton"), (  "onen", "one"),
-    (  "rnen", "rne"), (  "üsen", "üse"), (  "haün", "hau"), (  "pien", "pie"), (  "ihen", "ihe"),
-    (  "ürfe", "urf"), (  "esen", "ese"), (  "ätze", "atz"), (  "sien", "sie"), (  "läge", "lag"),
-    (  "iven", "ive"), (  "ämme", "amm"), (  "äufe", "auf"), (  "ppen", "ppe"), (  "enen", "ene"),
-    (  "lfen", "lfe"), (  "äume", "aum"), (  "nien", "nie"), (  "unen", "une"), (  "cken", "cke"),
-    (  "oten", "ote"), (   "mie", "mie"), (   "rie", "rie"), (   "sis", "sen"), (   "rin", "rin"),
-    (   "ein", "ein"), (   "age", "age"), (   "ern", "ern"), (   "ber", "ber"), (   "ion", "ion"),
-    (   "inn", "inn"), (   "ben", "ben"), (   "äse", "äse"), (   "eis", "eis"), (   "hme", "hme"),
-    (   "iss", "iss"), (   "hen", "hen"), (   "fer", "fer"), (   "gie", "gie"), (   "fen", "fen"),
-    (   "her", "her"), (   "ker", "ker"), (   "nie", "nie"), (   "mer", "mer"), (   "ler", "ler"),
-    (   "men", "men"), (   "ass", "ass"), (   "ner", "ner"), (   "per", "per"), (   "rer", "rer"),
-    (   "mus", "mus"), (   "abe", "abe"), (   "ter", "ter"), (   "ser", "ser"), (   "äle", "aal"),
-    (   "hie", "hie"), (   "ger", "ger"), (   "tus", "tus"), (   "gen", "gen"), (   "ier", "ier"),
-    (   "ver", "ver"), (   "zer", "zer"),
+    ("innen", "in"), ("täten", "tät"), ("ahnen", "ahn"), ("enten", "ent"),
+    ("räser", "ras"),
+    ("hrten", "hrt"), ("ücher", "uch"), ("örner", "orn"), ("änder", "and"),
+    ("ürmer", "urm"),
+    ("ahlen", "ahl"), ("uhren", "uhr"), ("ätter", "att"), ("suren", "sur"),
+    ("chten", "cht"),
+    ("kuren", "kur"), ("erzen", "erz"), ("güter", "gut"), ("soren", "sor"),
+    ("änner", "ann"),
+    ("äuser", "aus"), ("taten", "tat"), ("isten", "ist"), ("bäder", "bad"),
+    ("ämter", "amt"),
+    ("eiten", "eit"), ("raten", "rat"), ("ormen", "orm"), ("ionen", "ion"),
+    ("nisse", "nis"),
+    ("ölzer", "olz"), ("ungen", "ung"), ("läser", "las"), ("ächer", "ach"),
+    ("urten", "urt"),
+    ("enzen", "enz"), ("aaten", "aat"), ("aphen", "aph"), ("öcher", "och"),
+    ("türen", "tür"),
+    ("sonen", "son"), ("ühren", "ühr"), ("ühner", "uhn"), ("toren", "tor"),
+    ("örter", "ort"),
+    ("anten", "ant"), ("räder", "rad"), ("turen", "tur"), ("äuler", "aul"),
+    ("änze", "anz"),
+    ("tten", "tte"), ("mben", "mbe"), ("ädte", "adt"), ("llen", "lle"),
+    ("ysen", "yse"),
+    ("rben", "rbe"), ("hsen", "hse"), ("raün", "rau"), ("rven", "rve"),
+    ("rken", "rke"),
+    ("ünge", "ung"), ("üten", "üte"), ("usen", "use"), ("tien", "tie"),
+    ("läne", "lan"),
+    ("iben", "ibe"), ("ifen", "ife"), ("ssen", "sse"), ("gien", "gie"),
+    ("eten", "ete"),
+    ("rden", "rde"), ("öhne", "ohn"), ("ärte", "art"), ("ncen", "nce"),
+    ("ünde", "und"),
+    ("uben", "ube"), ("lben", "lbe"), ("üsse", "uss"), ("agen", "age"),
+    ("räge", "rag"),
+    ("ogen", "oge"), ("anen", "ane"), ("sken", "ske"), ("eden", "ede"),
+    ("össe", "oss"),
+    ("ürme", "urm"), ("ggen", "gge"), ("üren", "üre"), ("nten", "nte"),
+    ("ühle", "ühl"),
+    ("änge", "ang"), ("mmen", "mme"), ("igen", "ige"), ("nken", "nke"),
+    ("äcke", "ack"),
+    ("oden", "ode"), ("oben", "obe"), ("ähne", "ahn"), ("änke", "ank"),
+    ("inen", "ine"),
+    ("seen", "see"), ("äfte", "aft"), ("ulen", "ule"), ("äste", "ast"),
+    ("hren", "hre"),
+    ("öcke", "ock"), ("aben", "abe"), ("öpfe", "opf"), ("ugen", "uge"),
+    ("lien", "lie"),
+    ("ände", "and"), ("ücke", "ück"), ("asen", "ase"), ("aden", "ade"),
+    ("dien", "die"),
+    ("aren", "are"), ("tzen", "tze"), ("züge", "zug"), ("üfte", "uft"),
+    ("hien", "hie"),
+    ("nden", "nde"), ("älle", "all"), ("hmen", "hme"), ("ffen", "ffe"),
+    ("rmen", "rma"),
+    ("olen", "ole"), ("sten", "ste"), ("amen", "ame"), ("höfe", "hof"),
+    ("üste", "ust"),
+    ("hnen", "hne"), ("ähte", "aht"), ("umen", "ume"), ("nnen", "nne"),
+    ("alen", "ale"),
+    ("mpen", "mpe"), ("mien", "mie"), ("rten", "rte"), ("rien", "rie"),
+    ("äute", "aut"),
+    ("uden", "ude"), ("lgen", "lge"), ("ngen", "nge"), ("iden", "ide"),
+    ("ässe", "ass"),
+    ("osen", "ose"), ("lken", "lke"), ("eren", "ere"), ("üche", "uch"),
+    ("lüge", "lug"),
+    ("hlen", "hle"), ("isen", "ise"), ("ären", "äre"), ("töne", "ton"),
+    ("onen", "one"),
+    ("rnen", "rne"), ("üsen", "üse"), ("haün", "hau"), ("pien", "pie"),
+    ("ihen", "ihe"),
+    ("ürfe", "urf"), ("esen", "ese"), ("ätze", "atz"), ("sien", "sie"),
+    ("läge", "lag"),
+    ("iven", "ive"), ("ämme", "amm"), ("äufe", "auf"), ("ppen", "ppe"),
+    ("enen", "ene"),
+    ("lfen", "lfe"), ("äume", "aum"), ("nien", "nie"), ("unen", "une"),
+    ("cken", "cke"),
+    ("oten", "ote"), ("mie", "mie"), ("rie", "rie"), ("sis", "sen"),
+    ("rin", "rin"),
+    ("ein", "ein"), ("age", "age"), ("ern", "ern"), ("ber", "ber"),
+    ("ion", "ion"),
+    ("inn", "inn"), ("ben", "ben"), ("äse", "äse"), ("eis", "eis"),
+    ("hme", "hme"),
+    ("iss", "iss"), ("hen", "hen"), ("fer", "fer"), ("gie", "gie"),
+    ("fen", "fen"),
+    ("her", "her"), ("ker", "ker"), ("nie", "nie"), ("mer", "mer"),
+    ("ler", "ler"),
+    ("men", "men"), ("ass", "ass"), ("ner", "ner"), ("per", "per"),
+    ("rer", "rer"),
+    ("mus", "mus"), ("abe", "abe"), ("ter", "ter"), ("ser", "ser"),
+    ("äle", "aal"),
+    ("hie", "hie"), ("ger", "ger"), ("tus", "tus"), ("gen", "gen"),
+    ("ier", "ier"),
+    ("ver", "ver"), ("zer", "zer"), ("äpfel", "apfel")
 ]
 
 singular = {
@@ -334,8 +447,8 @@ def singularize(word, pos=NOUN, gender=M
         return singular[word]
     if pos == NOUN:
         for a, b in singular_inflections:
-            if w.endswith(a):
-                return w[:-len(a)] + b
+            if w.lower().endswith(a):
+                return (w[:-len(a)] + b).capitalize()
         # Default rule: strip known plural suffixes (baseline = 51%).
         for suffix in ("nen", "en", "n", "e", "er", "s"):
             if w.endswith(suffix):
@@ -343,27 +456,50 @@ def singularize(word, pos=NOUN, gender=M
                 break
         # Corrections (these add about 1% accuracy):
         if w.endswith(("rr", "rv", "nz")):
-            return w + "e"
-        return w
-    return w
+            return (w + "e").capitalize()
+        return w.capitalize()
+    return
+
 
 #### VERB CONJUGATION ##############################################################################
 # The verb table was trained on CELEX and contains the top 2000 most frequent verbs.
 
 prefix_inseparable = (
-    "be", "emp", "ent", "er", "ge", "miss", "über", "unter", "ver", "voll", "wider", "zer"
+    "be", "emp", "ent", "er", "ge", "miss", "über", "unter", "ver", "voll",
+    "wider", "zer"
 )
 prefix_separable = (
-    "ab", "an", "auf", "aus", "bei", "durch", "ein", "fort", "mit", "nach", "vor", "weg",
-    "zurück", "zusammen", "zu", "dabei", "daran", "da", "empor", "entgegen", "entlang",
-    "fehl", "fest", "gegenüber", "gleich", "herab", "heran", "herauf", "heraus", "herum",
-    "her", "hinweg", "hinzu", "hin", "los", "nieder", "statt", "umher", "um", "weg",
-    "weiter", "wieder", "zwischen"
-) + ( # There are many more...
-     "dort", "fertig", "frei", "gut", "heim", "hoch", "klein", "klar", "nahe", "offen", "richtig"
-)
+                       "ab", "an", "auf", "aus", "bei", "durch", "ein", "fort",
+                       "mit", "nach", "vor", "weg",
+                       "zurück", "zusammen", "zu", "dabei", "daran", "da",
+                       "empor", "entgegen", "entlang",
+                       "fehl", "fest", "gegenüber", "gleich", "herab", "heran",
+                       "herauf", "heraus", "herum",
+                       "her", "hinweg", "hinzu", "hin", "los", "nieder",
+                       "statt", "umher", "um", "weg",
+                       "weiter", "wieder", "zwischen"
+                   ) + (  # There are many more...
+                       "dort", "fertig", "frei", "gut", "heim", "hoch", "klein",
+                       "klar", "nahe", "offen", "richtig"
+                   )
 prefixes = prefix_inseparable + prefix_separable
 
+# verbs with an initial sequence that looks like it might be a prefix of either category
+# these need to be excluded even when they are regular
+faux_prefix_verbs = (
+    'geiern', 'geifern', 'betteln', 'bersten', 'bechern', 'entern', 'hindern',
+    'einigen', 'gerben', 'gehen',
+    'zurren', 'zucken', 'zupfen', 'beichten', 'beißen', 'einen', 'mitteln',
+    'zerren', 'zuenden',  # zünden
+    'zuzeln', 'gellen', 'zuechten',  # züchten
+    'ankern', 'angeln', 'herzigen', # 'be-herzigen after decomposition
+    'dauern', 'darben', 'danken', # for whatever reason 'da' appears in prefix_separable
+)  # probably more
+
+faux_latinate = (
+    'gieren', 'stieren'
+)  # verbs that look like they might have -ieren suffix that blocks ge- prefixation, but don't
+
 
 def encode_sz(s):
     return s.replace("ß", "ss")
@@ -377,17 +513,18 @@ class Verbs(_Verbs):
 
     def __init__(self):
         _Verbs.__init__(self, os.path.join(MODULE, "de-verbs.txt"),
-            language = "de",
-              format = [0, 1, 2, 3, 4, 5, 8, 17, 18, 19, 20, 21, 24, 52, 54, 53, 55, 56, 58, 59, 67, 68, 70, 71],
-             default = {6: 4, 22: 20, 57: 55, 60: 58, 69: 67, 72: 70}
-            )
+                        language="de",
+                        format=[0, 1, 2, 3, 4, 5, 8, 17, 18, 19, 20, 21, 24, 52,
+                                54, 53, 55, 56, 58, 59, 67, 68, 70, 71],
+                        default={6: 4, 22: 20, 57: 55, 60: 58, 69: 67, 72: 70}
+                        )
 
     def find_lemma(self, verb):
         """ Returns the base form of the given inflected verb, using a rule-based approach.
         """
         v = verb.lower()
         # Common prefixes: be-finden and emp-finden probably inflect like finden.
-        if not (v.startswith("ge") and v.endswith("t")): # Probably gerund.
+        if not (v.startswith("ge") and v.endswith("t")):  # Probably gerund.
             for prefix in prefixes:
                 if v.startswith(prefix) and v[len(prefix):] in self.inflections:
                     return prefix + self.inflections[v[len(prefix):]]
@@ -397,20 +534,26 @@ class Verbs(_Verbs):
         if b.endswith(("ln", "rn")):
             return b
         # Lemmatize regular inflections.
-        for x in ("test", "est", "end", "ten", "tet", "en", "et", "te", "st", "e", "t"):
+        for x in (
+        "test", "est", "end", "ten", "tet", "en", "et", "te", "st", "e", "t"):
             if b.endswith(x):
-                b = b[:-len(x)]; break
+                b = b[:-len(x)];
+                break
         # Subjunctive: hielte => halten, schnitte => schneiden.
         for x, y in (
-          ("ieb", "eib"), ( "ied", "eid"), ( "ief", "auf" ), ( "ieg", "eig" ), ("iel", "alt"),
-          ("ien", "ein"), ("iess", "ass"), ( "ieß", "aß"  ), ( "iff", "eif" ), ("iss", "eiss"),
-          ( "iß", "eiß"), (  "it", "eid"), ( "oss", "iess"), ( "öss", "iess")):
+                ("ieb", "eib"), ("ied", "eid"), ("ief", "auf"), ("ieg", "eig"),
+                ("iel", "alt"),
+                ("ien", "ein"), ("iess", "ass"), ("ieß", "aß"), ("iff", "eif"),
+                ("iss", "eiss"),
+                ("iß", "eiß"), ("it", "eid"), ("oss", "iess"), ("öss", "iess")):
             if b.endswith(x):
-                b = b[:-len(x)] + y; break
+                b = b[:-len(x)] + y;
+                break
         b = b.replace("eeiss", "eiss")
         b = b.replace("eeid", "eit")
         # Subjunctive: wechselte => wechseln
-        if not b.endswith(("e", "l")) and not (b.endswith("er") and len(b) >= 3 and not b[-3] in VOWELS):
+        if not b.endswith(("e", "l")) and not (
+                b.endswith("er") and len(b) >= 3 and not b[-3] in VOWELS):
             b = b + "e"
         # abknallst != abknalln => abknallen
         if b.endswith(("hl", "ll", "ul", "eil")):
@@ -431,49 +574,114 @@ class Verbs(_Verbs):
         """ For a regular verb (base form), returns the forms using a rule-based approach.
         """
         v = verb.lower()
-        # Stem = infinitive minus -en, -ln, -rn.
-        b = b0 = re.sub("en$", "", re.sub("ln$", "l", re.sub("rn$", "r", v)))
         # Split common prefixes.
-        x, x1, x2 = "", "", ""
-        for prefix in prefix_separable:
-            if v.startswith(prefix):
-                b, x = b[len(prefix):], prefix
-                x1 = (" " + x).rstrip()
-                x2 = x + "ge"
-                break
-        # Present tense 1sg and subjunctive -el: handeln => ich handle, du handlest.
-        pl = b.endswith("el") and b[:-2] + "l" or b
-        # Present tense 1pl -el: handeln => wir handeln
-        pw = v.endswith(("ln", "rn")) and v or b + "en"
-        # Present tense ending in -d or -t gets -e:
-        pr = b.endswith(("d", "t")) and b + "e" or b
-        # Present tense 2sg gets -st, unless stem ends with -s or -z.
-        p2 = pr.endswith(("s", "z")) and pr + "t" or pr + "st"
-        # Present participle: spiel + -end, arbeiten + -d:
-        pp = v.endswith(("en", "ln", "rn")) and v + "d" or v + "end"
-        # Past tense regular:
-        pt = encode_sz(pr) + "t"
-        # Past participle: haushalten => hausgehalten
-        ge = (v.startswith(prefix_inseparable) or b.endswith(("r", "t"))) and pt or "ge" + pt
-        ge = x and x + "ge" + pt or ge
-        # Present subjunctive: stem + -e, -est, -en, -et:
-        s1 = encode_sz(pl)
-        # Past subjunctive: past (usually with Umlaut) + -e, -est, -en, -et:
-        s2 = encode_sz(pt)
-        # Construct the lexeme:
-        lexeme = a = [
-            v,
-            pl + "e" + x1, p2 + x1, pr + "t" + x1, pw + x1, pr + "t" + x1, pp,                 # present
-            pt + "e" + x1, pt + "est" + x1, pt + "e" + x1, pt + "en" + x1, pt + "et" + x1, ge, # past
-            b + "e" + x1, pr + "t" + x1, x + pw,                                               # imperative
-            s1 + "e" + x1, s1 + "est" + x1, s1 + "en" + x1, s1 + "et" + x1,                    # subjunctive I
-            s2 + "e" + x1, s2 + "est" + x1, s2 + "en" + x1, s2 + "et" + x1                     # subjunctive II
-        ]
+        x, x1, x2, x3 = "", "", "", ""
+        base_verb_found_inseparable, baseverb_found_separable = False, False
+        if v not in faux_prefix_verbs:
+            for prefix in prefixes:
+                if v.startswith(prefix):
+                    if prefix in prefix_separable:
+                        b, x = v[len(prefix):], prefix
+                        x1 = (" " + x).rstrip()
+                        x2 = (not b.startswith(
+                            prefix_inseparable) or b in faux_prefix_verbs) and x + "ge" or x
+                    else:
+                        b, x3 = v[len(prefix):], prefix
+                        x2 = prefix
+                    try:
+                        base_verb = self.lemma(b, parse=True)
+                        assert base_verb
+                        baseverb_found_separable = prefix in prefix_separable
+                        base_verb_found_inseparable = prefix in prefix_inseparable
+                    except:
+                        pass
+
+        if base_verb_found_inseparable or baseverb_found_separable:
+
+            base_inflected = self.lexeme(base_verb, no_duplicates=False)
+            if base_verb_found_inseparable:
+                lexeme = a = list(map(lambda t: (t.startswith(
+                    'ge')) and x3 + t[2:] or x3 + t,
+                                      base_inflected))
+            else:
+                keep = lambda t: [t]
+                postfix_prefix = lambda t: [t + x1]
+                prefix_prefix = lambda t: [x + t]
+                # desired output for German:
+                # ['inf',
+                # '1sg', '2sg', '3sg', '1pl', '2pl', 'part', '
+                # 1sgp', '2sgp',  '3sgp', '1ppl', '2ppl', 'ppart',
+                # '2sg!', '2pl!', '1pl!',
+                # '1sg?', '2sg?', '1pl?', '2pl?', '
+                # 1sgp?', '2sgp?', '1ppl?', '2ppl?']
+                inflectfunctions = [prefix_prefix, # infinitive
+                                    postfix_prefix, postfix_prefix,
+                                    postfix_prefix, postfix_prefix,
+                                    postfix_prefix,
+                                    prefix_prefix,  # present gerund
+                                    postfix_prefix, postfix_prefix,
+                                    postfix_prefix, postfix_prefix,
+                                    postfix_prefix, lambda t: [
+                        x2 + t if not t.startswith('ge') else x2 + t[2:]],
+                                    # past
+                                    postfix_prefix, postfix_prefix,
+                                    postfix_prefix,  # imperative
+                                    postfix_prefix, postfix_prefix,
+                                    postfix_prefix, postfix_prefix,
+                                    # subjunctive I
+                                    postfix_prefix, postfix_prefix,
+                                    postfix_prefix, postfix_prefix,
+                                    # subjunctive
+                                    ]
+                lexeme = a = []
+                for func, form in zip(inflectfunctions, base_inflected):
+                    lexeme.extend(func(form))
+        else:
+            # Stem = infinitive minus -en, -ln, -rn.
+            b = b0 = re.sub("en$", "",
+                            re.sub("ln$", "l", re.sub("rn$", "r", v)))
+
+            # Present tense 1sg and subjunctive -el: handeln => ich handle, du handlest.
+            pl = b.endswith("el") and b[:-2] + "l" or b
+            # Present tense 1pl -el: handeln => wir handeln
+            pw = v.endswith(("ln", "rn")) and v or b + "en"
+            # Present tense ending in -d or -t gets -e:
+            pr = b.endswith(("d", "t")) and b + "e" or b
+            # Present tense 2sg gets -st, unless stem ends with -s or -z.
+            p2 = pr.endswith(("s", "z")) and pr + "t" or pr + "st"
+            # Present participle: spiel + -end, arbeiten + -d:
+            pp = b.endswith(("en", "ln", "rn")) and b + "d" or b + "end"
+            # Past tense regular:
+            pt = encode_sz(pr) + "t"
+            # Past participle: haushalten => hausgehalten
+            ge = (v.startswith(
+                prefix_inseparable) and not v in faux_prefix_verbs or
+                  (b.endswith(("ier")) \
+                   and not v in faux_latinate)) and pt or "ge" + pt
+            ge = x and x + "ge" + pt or ge
+            # Present subjunctive: stem + -e, -est, -en, -et:
+            s1 = encode_sz(pl)
+            # Past subjunctive: past (usually with Umlaut) + -e, -est, -en, -et:
+            s2 = encode_sz(pt)
+            # Construct the lexeme:
+            lexeme = a = [
+                v,
+                pl + "e" + x1, p2 + x1, pr + "t" + x1, pw + x1, pr + "t" + x1,
+                pp,  # present
+                pt + "e" + x1, pt + "est" + x1, pt + "e" + x1, pt + "en" + x1,
+                pt + "et" + x1, ge,  # past
+                b + "e" + x1, pr + "t" + x1, x + pw,  # imperative
+                s1 + "e" + x1, s1 + "est" + x1, s1 + "en" + x1, s1 + "et" + x1,
+                # subjunctive I
+                s2 + "e" + x1, s2 + "est" + x1, s2 + "en" + x1, s2 + "et" + x1
+                # subjunctive II
+            ]
         # Encode Eszett (ß) and attempt to retrieve from the lexicon.
         # Decode Eszett for present and imperative.
         if encode_sz(v) in self:
             a = self[encode_sz(v)]
-            a = [decode_sz(v) for v in a[:7]] + a[7:13] + [decode_sz(v) for v in a[13:20]] + a[20:]
+            a = [decode_sz(v) for v in a[:7]] + a[7:13] + [decode_sz(v) for v in
+                                                           a[13:20]] + a[20:]
         # Since the lexicon does not contain imperative for all verbs, don't simply return it.
         # Instead, update the rule-based lexeme with inflections from the lexicon.
         return [a[i] or lexeme[i] for i in range(len(a))]
@@ -486,10 +694,13 @@ class Verbs(_Verbs):
             # auswirkte => wirkte aus
             for prefix in prefix_separable:
                 if verb.startswith(prefix):
-                    tenses = _Verbs.tenses(self, verb[len(prefix):] + " " + prefix, parse)
+                    tenses = _Verbs.tenses(self,
+                                           verb[len(prefix):] + " " + prefix,
+                                           parse)
                     break
         return tenses
 
+
 verbs = Verbs()
 
 conjugate, lemma, lexeme, tenses = \
@@ -499,34 +710,45 @@ conjugate, lemma, lexeme, tenses = \
 
 # Strong inflection: no article.
 adjectives_strong = {
-    ("m", "nom"): "er", ("f", "nom"): "e" , ("n", "nom"): "es", ("p", "nom"): "e",
-    ("m", "acc"): "en", ("f", "acc"): "e" , ("n", "acc"): "es", ("p", "acc"): "e",
-    ("m", "dat"): "em", ("f", "dat"): "er", ("n", "dat"): "em", ("p", "dat"): "en",
-    ("m", "gen"): "en", ("f", "gen"): "er", ("n", "gen"): "en", ("p", "gen"): "er",
+    ("m", "nom"): "er", ("f", "nom"): "e", ("n", "nom"): "es",
+    ("p", "nom"): "e",
+    ("m", "acc"): "en", ("f", "acc"): "e", ("n", "acc"): "es",
+    ("p", "acc"): "e",
+    ("m", "dat"): "em", ("f", "dat"): "er", ("n", "dat"): "em",
+    ("p", "dat"): "en",
+    ("m", "gen"): "en", ("f", "gen"): "er", ("n", "gen"): "en",
+    ("p", "gen"): "er",
 }
 
 # Mixed inflection: after indefinite article ein & kein and possessive determiners.
 adjectives_mixed = {
-    ("m", "nom"): "er", ("f", "nom"): "e" , ("n", "nom"): "es", ("p", "nom"): "en",
-    ("m", "acc"): "en", ("f", "acc"): "e" , ("n", "acc"): "es", ("p", "acc"): "en",
-    ("m", "dat"): "en", ("f", "dat"): "en", ("n", "dat"): "en", ("p", "dat"): "en",
-    ("m", "gen"): "en", ("f", "gen"): "en", ("n", "gen"): "en", ("p", "gen"): "en",
+    ("m", "nom"): "er", ("f", "nom"): "e", ("n", "nom"): "es",
+    ("p", "nom"): "en",
+    ("m", "acc"): "en", ("f", "acc"): "e", ("n", "acc"): "es",
+    ("p", "acc"): "en",
+    ("m", "dat"): "en", ("f", "dat"): "en", ("n", "dat"): "en",
+    ("p", "dat"): "en",
+    ("m", "gen"): "en", ("f", "gen"): "en", ("n", "gen"): "en",
+    ("p", "gen"): "en",
 }
 
 # Weak inflection: after definite article.
 adjectives_weak = {
-    ("m", "nom"): "e",  ("f", "nom"): "e" , ("n", "nom"): "e",  ("p", "nom"): "en",
-    ("m", "acc"): "en", ("f", "acc"): "e" , ("n", "acc"): "e",  ("p", "acc"): "en",
-    ("m", "dat"): "en", ("f", "dat"): "en", ("n", "dat"): "en", ("p", "dat"): "en",
-    ("m", "gen"): "en", ("f", "gen"): "en", ("n", "gen"): "en", ("p", "gen"): "en",
+    ("m", "nom"): "e", ("f", "nom"): "e", ("n", "nom"): "e", ("p", "nom"): "en",
+    ("m", "acc"): "en", ("f", "acc"): "e", ("n", "acc"): "e",
+    ("p", "acc"): "en",
+    ("m", "dat"): "en", ("f", "dat"): "en", ("n", "dat"): "en",
+    ("p", "dat"): "en",
+    ("m", "gen"): "en", ("f", "gen"): "en", ("n", "gen"): "en",
+    ("p", "gen"): "en",
 }
 
 # Uninflected + exceptions.
 adjective_attributive = {
-    "etwas" : "etwas",
-    "genug" : "genug",
-    "viel"  : "viel",
-    "wenig" : "wenig"
+    "etwas": "etwas",
+    "genug": "genug",
+    "viel": "viel",
+    "wenig": "wenig"
 }
 
 
@@ -537,21 +759,23 @@ def attributive(adjective, gender=MALE,
         (nominative, accusative, dative, genitive).
     """
     w, g, c, a = \
-        adjective.lower(), gender[:1].lower(), role[:3].lower(), article and article.lower() or None
+        adjective.lower(), gender[:1].lower(), role[
+                                               :3].lower(), article and article.lower() or None
     if w in adjective_attributive:
         return adjective_attributive[w]
     if a is None \
-    or a in ("mir", "dir", "ihm") \
-    or a in ("ein", "etwas", "mehr") \
-    or a.startswith(("all", "mehrer", "wenig", "viel")):
+            or a in ("mir", "dir", "ihm") \
+            or a in ("ein", "etwas", "mehr") \
+            or a.startswith(("all", "mehrer", "wenig", "viel")):
         return w + adjectives_strong.get((g, c), "")
     if a.startswith(("ein", "kein")) \
-    or a.startswith(("mein", "dein", "sein", "ihr", "Ihr", "unser", "euer")):
+            or a.startswith(
+        ("mein", "dein", "sein", "ihr", "Ihr", "unser", "euer")):
         return w + adjectives_mixed.get((g, c), "")
     if a in ("arm", "alt", "all", "der", "die", "das", "den", "dem", "des") \
-    or a.startswith((
-      "derselb", "derjenig", "jed", "jeglich", "jen", "manch",
-      "dies", "solch", "welch")):
+            or a.startswith((
+            "derselb", "derjenig", "jed", "jeglich", "jen", "manch",
+            "dies", "solch", "welch")):
         return w + adjectives_weak.get((g, c), "")
     # Default to strong inflection.
     return w + adjectives_strong.get((g, c), "")
@@ -570,13 +794,14 @@ def predicative(adjective):
         for suffix in ("em", "en", "er", "es", "e"):
             if w.endswith(suffix):
                 b = w[:max(-len(suffix), -(len(w) - 3))]
-                if b.endswith("bl"): # plausibles => plausibel
+                if b.endswith("bl"):  # plausibles => plausibel
                     b = b[:-1] + "el"
-                if b.endswith("pr"): # propres => proper
+                if b.endswith("pr"):  # propres => proper
                     b = b[:-1] + "er"
                 return b
     return w
 
+
 #### COMPARATIVE & SUPERLATIVE #####################################################################
 
 COMPARATIVE = "er"
@@ -601,6 +826,6 @@ def comparative(adjective):
 def superlative(adjective):
     return grade(adjective, SUPERLATIVE)
 
-#print(comparative("schönes"))
-#print(superlative("schönes"))
-#print(superlative("große"))
+# print(comparative("schönes"))
+# print(superlative("schönes"))
+# print(superlative("große"))
Index: pattern-5b85d998c30ddc6772b56310713530224466083a/pattern/vector/__init__.py
===================================================================
--- pattern-5b85d998c30ddc6772b56310713530224466083a.orig/pattern/vector/__init__.py
+++ pattern-5b85d998c30ddc6772b56310713530224466083a/pattern/vector/__init__.py
@@ -3104,7 +3104,7 @@ class SLP(Classifier):
             p = sorted((self._classes[type], type) for type, w in p.items() if w == m > 0)
             p = [type for frequency, type in p if frequency == p[0][0]]
             return choice(p)
-        except:
+        except Exception as e:
             return self.baseline
 
     def finalize(self):
Index: pattern-5b85d998c30ddc6772b56310713530224466083a/pattern/web/__init__.py
===================================================================
--- pattern-5b85d998c30ddc6772b56310713530224466083a.orig/pattern/web/__init__.py
+++ pattern-5b85d998c30ddc6772b56310713530224466083a/pattern/web/__init__.py
@@ -4177,6 +4177,12 @@ class Crawler(object):
         if link.url not in self.visited:
             t = time.time()
             url = URL(link.url)
+            base_link_url = base(link.url) in self.history and base(link.url) or base(link.referrer) in self.history and base(link.referrer) or None # in self.history  or self.history.get(base(link.referrer), None)
+
+            if base_link_url in self.history:
+                if time.time() - self.history[base_link_url] < self.delay:
+                    self.push(link)
+                    return False
             if url.mimetype == "text/html":
                 try:
                     kwargs.setdefault("unicode", True)
Index: pattern-5b85d998c30ddc6772b56310713530224466083a/setup.py
===================================================================
--- pattern-5b85d998c30ddc6772b56310713530224466083a.orig/setup.py
+++ pattern-5b85d998c30ddc6772b56310713530224466083a/setup.py
@@ -89,7 +89,7 @@ setup(
         "pattern.text.en"         : ["*.txt", "*.xml", "*.slp"],
         "pattern.text.en.wordlist": ["*.txt"],
         "pattern.text.en.wordnet" : ["*.txt", "dict/*"],
-        "pattern.text.ru": ["*.txt", "*.xml", "*.slp"],
+        "pattern.text.ru"         : ["*.txt", "*.xml", "*.slp"],
         "pattern.text.ru.wordlist": ["*.txt"],
         "pattern.text.es"         : ["*.txt", "*.xml"],
         "pattern.text.fr"         : ["*.txt", "*.xml"],
@@ -126,6 +126,7 @@ setup(
         "Programming Language :: Python :: 2.7",
         "Programming Language :: Python :: 3",
         "Programming Language :: Python :: 3.6",
+        "Programming Language :: Python :: 3.7",
         "Topic :: Internet :: WWW/HTTP :: Indexing/Search",
         "Topic :: Multimedia :: Graphics",
         "Topic :: Scientific/Engineering :: Artificial Intelligence",
@@ -142,10 +143,10 @@ setup(
         "feedparser",
         "pdfminer" if sys.version < "3" else "pdfminer.six",
         "numpy",
-        "scipy",
+        "scipy" if sys.version >= "3" else "scipy==1.2.1",
         "nltk",
         "python-docx",
-        "cherrypy",
+        "cherrypy" if sys.version >= "3" else "cherrypy==17.4.1",
         "requests"
     ],
     zip_safe = False
Index: pattern-5b85d998c30ddc6772b56310713530224466083a/test/test_db.py
===================================================================
--- pattern-5b85d998c30ddc6772b56310713530224466083a.orig/test/test_db.py
+++ pattern-5b85d998c30ddc6772b56310713530224466083a/test/test_db.py
@@ -63,7 +63,7 @@ def create_db_sqlite():
         password = PASSWORD)
 
     # Drop all tables first
-    for table in list(DB_MYSQL.tables):
+    for table in list(DB_SQLITE.tables):
         DB_SQLITE.drop(table)
 
     return DB_SQLITE
@@ -797,19 +797,24 @@ class _TestQuery(object):
             [(1, "jack", 20),
              (2, "john,jane", 30)])):
             v = self.db.persons.search(**kwargs)
-            v.xml
+            v_rows = v.rows()
+            v_sql = v.SQL()
             self.assertEqual(v.SQL(), sql)
             self.assertEqual(v.rows(), rows)
+
+        v_failing = self.db.persons.search(**dict(fields=["name", "gender.name"], relations=[db.relation("gender", "id", "gender")]))
+        v_failing_rows = v_failing.rows()
         # Assert Database.link() permanent relations.
         v = self.db.persons.search(fields=["name", "gender.name"], sort=["persons.id"])
         v.aliases["gender.name"] = "gender"
         self.db.link("persons", "gender", "gender", "id", join=db.LEFT)
         self.assertEqual(v.SQL(),
             "select persons.name, gender.name as gender from `persons` left join `gender` on persons.gender=gender.id order by persons.id asc;")
-        self.assertEqual(v.rows(),
-            [('john', 'male'),
-             ('jack', 'male'),
-             ('jane', 'female')])
+        self.assertEqual(set(v.rows()),
+            set([('jane', 'female'),
+                 ('john', 'male'),
+                 ('jack', 'male')]))
+
         print("pattern.db.Table.search()")
         print("pattern.db.Table.Query")
 
@@ -818,7 +823,7 @@ class _TestQuery(object):
         v = self.db.persons.search(fields=["name", "gender.name"], sort=["persons.id"])
         v.aliases["gender.name"] = "gender"
         self.db.link("persons", "gender", "gender", "id", join=db.LEFT)
-        self.assertEqual(v.xml,
+        self.assertEqual(set(v.xml.split('\n')),set(
             '<?xml version="1.0" encoding="utf-8"?>\n'
             '<query table="persons" fields="name, gender" count="3">\n'
             '\t<schema>\n'
@@ -826,11 +831,11 @@ class _TestQuery(object):
             '\t\t<field name="gender" type="string" length="100" />\n'
             '\t</schema>\n'
             '\t<rows>\n'
+            '\t\t<row name="jane" gender="female" />\n'
             '\t\t<row name="john" gender="male" />\n'
             '\t\t<row name="jack" gender="male" />\n'
-            '\t\t<row name="jane" gender="female" />\n'
             '\t</rows>\n'
-            '</query>'
+            '</query>'.split('\n'))
         )
         # Assert Database.create() from XML.
         self.assertRaises(db.TableError, self.db.create, v.xml) # table 'persons' already exists
Index: pattern-5b85d998c30ddc6772b56310713530224466083a/test/test_de.py
===================================================================
--- pattern-5b85d998c30ddc6772b56310713530224466083a.orig/test/test_de.py
+++ pattern-5b85d998c30ddc6772b56310713530224466083a/test/test_de.py
@@ -104,9 +104,13 @@ class TestInflection(unittest.TestCase):
         for v1, v2 in de.inflect.verbs.inflections.items():
             if de.inflect.verbs.find_lemma(v1) == v2:
                 i += 1
+            else:
+                pass
             n += 1
         self.assertTrue(float(i) / n > 0.86)
-        print("pattern.de.inflect.verbs.find_lemma()")
+        rate_correct = float(i) / n
+        self.assertTrue(float(i) / n > 0.86)
+        print("pattern.de.inflect.verbs.find_lemma() - hit rate {}".format(rate_correct))
 
     def test_find_lexeme(self):
         # Assert the accuracy of the verb conjugation algorithm.
@@ -119,8 +123,9 @@ class TestInflection(unittest.TestCase):
                 if lexeme1[j] == lexeme2[j]:
                     i += 1
                 n += 1
+        rate_correct = float(i) / n
         self.assertTrue(float(i) / n > 0.86)
-        print("pattern.de.inflect.verbs.find_lexeme()")
+        print("pattern.de.inflect.verbs.find_lexeme() - hit rate {}".format(rate_correct))
 
     def test_conjugate(self):
         # Assert different tenses with different conjugations.
@@ -154,8 +159,22 @@ class TestInflection(unittest.TestCase):
           ("sein", "wäre",    (de.PAST, 3, de.SINGULAR, de.SUBJUNCTIVE)),
           ("sein", "wären",   (de.PAST, 1, de.PLURAL, de.SUBJUNCTIVE)),
           ("sein", "wäret",   (de.PAST, 2, de.PLURAL, de.SUBJUNCTIVE)),
-          ("sein", "wären",   (de.PAST, 3, de.PLURAL, de.SUBJUNCTIVE))):
-            self.assertEqual(de.conjugate(v1, tense), v2)
+          ("sein", "wären",   (de.PAST, 3, de.PLURAL, de.SUBJUNCTIVE)),
+          ("vorgehen", "gingst vor", (de.PAST, 2, de.SINGULAR)), # separable prefix, irregular base
+          ("betreffen", "betroffen", (de.PAST, de.PARTICIPLE)), # inseparable prefix, irregular base
+          ("umbenennen", "benanntest um", (de.PAST, 2, de.SINGULAR)), # stacked prefixes, irregular base
+          ("einberufen", 'berief ein', (de.PAST, 3, de.SINGULAR)), # stacked prefixes, irregular base
+          ('entern', 'geentert', (de.PAST, de.PARTICIPLE)), # looks like prefix ent-
+          ('zurren', 'zurrt', (de.PRESENT, 3, de.SINGULAR)), # looks like prefix zu-
+          ('bechern', 'gebechert', (de.PAST, de.PARTICIPLE)), # looks like prefix be-
+          ('drangsalieren', 'drangsaliert', (de.PAST, de.PARTICIPLE)), # blocking ge-prefixation
+          ('stapfen', 'gestapft', (de.PAST, de.PARTICIPLE)), #
+          ('fristen', 'gefristet', (de.PAST, de.PARTICIPLE)), # might be misinterpreted as past form
+          ('gieren', 'gegiert', (de.PAST, de.PARTICIPLE)), # ends in -ieren but doesn't block ge-prefixation
+          ('angeln', 'angeltest', (de.PAST, 2, de.SINGULAR)), # looks like prefix an-
+          ('geifern', 'gegeifert', (de.PAST, de.PARTICIPLE)), # looks like prefix ge-
+            ):
+            self.assertEqual(de.conjugate(v1, tense, allow_inflected=False), v2,)
         print("pattern.de.conjugate()")
 
     def test_lexeme(self):
@@ -173,6 +192,8 @@ class TestInflection(unittest.TestCase):
         # Assert tense recognition.
         self.assertTrue((de.PRESENT, 3, de.SG) in de.tenses("ist"))
         self.assertTrue("2sg" in de.tenses("bist"))
+        self.assertTrue((de.PAST, 2, de.SINGULAR) in de.tenses('gingst vor'))
+        self.assertTrue((de.PRESENT, 2, de.SINGULAR, de.SUBJUNCTIVE) in de.tenses('gehest vor'))
         print("pattern.de.tenses()")
 
 #---------------------------------------------------------------------------------------------------
@@ -185,9 +206,12 @@ class TestParser(unittest.TestCase):
 
     def test_find_lemmata(self):
         # Assert lemmata for nouns, adjectives and verbs.
-        v = de.parser.find_lemmata([["Ich", "PRP"], ["sage", "VB"], ["schöne", "JJ"], ["Dinge", "NNS"]])
+        v = de.parser.find_lemmata([['Man', 'PRP'], ['nimmt an', 'VB'], [',', 'PUNC'], ["ich", "PRP"], ["sage", "VB"], ["schöne", "JJ"], ["Dinge", "NNS"]])
         self.assertEqual(v, [
-            ["Ich", "PRP", "ich"],
+            ['Man', 'PRP', 'man'],
+            ['nimmt an', 'VB', 'annehmen'],
+            [',', 'PUNC', ','],
+            ["ich", "PRP", "ich"],
             ["sage", "VB", "sagen"],
             ["schöne", "JJ", "schön"],
             ["Dinge", "NNS", "ding"]])
Index: pattern-5b85d998c30ddc6772b56310713530224466083a/test/test_text.py
===================================================================
--- pattern-5b85d998c30ddc6772b56310713530224466083a.orig/test/test_text.py
+++ pattern-5b85d998c30ddc6772b56310713530224466083a/test/test_text.py
@@ -98,11 +98,12 @@ class TestModel(unittest.TestCase):
         for i in range(2):
             v.train("black", "JJ", previous=("the", "DT"), next=("cat", "NN"))
             v.train("on", "IN", previous=("sat", "VBD"), next=("the", "DT"))
-        #self.assertEqual("JJ", v.classify("slack"))
+        # self.assertEqual("JJ", v.classify("slack"))  # fails unpredictably about 1 in 3 times
         self.assertEqual("JJ", v.classify("white", previous=("a", "DT"), next=("cat", "NN")))
         self.assertEqual("IN", v.classify("on", previous=("sat", "VBD")))
         self.assertEqual("IN", v.classify("on", next=("the", "")))
-        #self.assertEqual(["white", "JJ"], v.apply(("white", ""), next=("cat", "")))
+        # fails intermittently:
+        # self.assertEqual(["white", "JJ"], v.apply(("white", ""), next=("cat", "")))
         print("pattern.text.Model")
 
 #---------------------------------------------------------------------------------------------------
Index: pattern-5b85d998c30ddc6772b56310713530224466083a/test/test_vector.py
===================================================================
--- pattern-5b85d998c30ddc6772b56310713530224466083a.orig/test/test_vector.py
+++ pattern-5b85d998c30ddc6772b56310713530224466083a/test/test_vector.py
@@ -888,7 +888,7 @@ class TestClassifier(unittest.TestCase):
 
     def _test_classifier(self, Classifier, **kwargs):
         # Assert classifier training + prediction for trivial cases.
-        v = Classifier(**kwargs)
+        v = Classifier(seed=0, **kwargs)
         test_doc1 = None
         test_doc2 = None
 
Index: pattern-5b85d998c30ddc6772b56310713530224466083a/test/test_web.py
===================================================================
--- pattern-5b85d998c30ddc6772b56310713530224466083a.orig/test/test_web.py
+++ pattern-5b85d998c30ddc6772b56310713530224466083a/test/test_web.py
@@ -536,7 +536,18 @@ class TestSearchEngine(unittest.TestCase
         self._test_search_engine("Bing", *self.api["Bing"])
 
     def test_search_twitter(self):
-        self._test_search_engine("Twitter", *self.api["Twitter"])
+        n_tries = 5
+        for i in range(n_tries):
+            try:
+                self._test_search_engine("Twitter", *self.api["Twitter"])
+                break
+            except AssertionError:
+                if i < n_tries - 1:
+                    # Sleep for a moment to try again
+                    time.sleep(30)
+                    pass
+                else:
+                    raise
 
     @unittest.skip('Mediawiki/Wikipedia API or appearance changed')
     def test_search_wikipedia(self):
