1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
|
From: Boyuan Yang <073plan@gmail.com>
Date: Sun, 21 Jan 2018 19:15:48 +0800
Subject: Fix typos found by codespell
Applied-Upstream: https://github.com/fxsjy/jieba/commit/17ef8abba38552e5b0a78de33765095c149b8c4d
---
jieba/__init__.py | 4 ++--
setup.py | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/jieba/__init__.py b/jieba/__init__.py
index 45dc908..ef65cf5 100644
--- a/jieba/__init__.py
+++ b/jieba/__init__.py
@@ -164,7 +164,7 @@ class Tokenizer(object):
self.initialized = True
default_logger.debug(
"Loading model cost %.3f seconds." % (time.time() - t1))
- default_logger.debug("Prefix dict has been built succesfully.")
+ default_logger.debug("Prefix dict has been built successfully.")
def check_initialized(self):
if not self.initialized:
@@ -275,7 +275,7 @@ class Tokenizer(object):
def cut(self, sentence, cut_all=False, HMM=True):
'''
The main function that segments an entire sentence that contains
- Chinese characters into seperated words.
+ Chinese characters into separated words.
Parameter:
- sentence: The str(unicode) to be segmented.
diff --git a/setup.py b/setup.py
index 265882b..6882cf3 100644
--- a/setup.py
+++ b/setup.py
@@ -44,7 +44,7 @@ GitHub: https://github.com/fxsjy/jieba
setup(name='jieba',
version='0.39',
- description='Chinese Words Segementation Utilities',
+ description='Chinese Words Segmentation Utilities',
long_description=LONGDOC,
author='Sun, Junyi',
author_email='ccnusjy@gmail.com',
|