-
Notifications
You must be signed in to change notification settings - Fork 18
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[FEATURE] Update tokenizers #158
Merged
+126
−14
Merged
Changes from 20 commits
Commits
Show all changes
21 commits
Select commit
Hold shift + click to select a range
dbe8936
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH 8fd96b2
Update tokenization.py
KINGNEWBLUSH 025fa86
modified: AUTHORS.md
KINGNEWBLUSH aea99a2
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH 970c1b9
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH a289a7a
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH ad7df8b
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH 9423b31
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH 5792e48
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH edc266f
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH c526016
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH 64c6cda
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH 3a53b51
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH 569bb9f
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH 4542258
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH 721bc0a
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH 1476f8a
modified: tests/test_tokenizer/test_tokenizer.py
KINGNEWBLUSH f02ccce
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH 05172b4
modified: tests/test_tokenizer/test_tokenizer.py
KINGNEWBLUSH 767778f
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH e86a5e6
modified: EduNLP/SIF/tokenization/text/tokenization.py
KINGNEWBLUSH File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,7 +2,14 @@ | |
# 2021/5/18 @ tongshiwei | ||
import logging | ||
import jieba | ||
from nltk.tokenize import word_tokenize | ||
import nltk | ||
import spacy | ||
import tokenizers as huggingface_tokenizer | ||
from tokenizers.trainers import BpeTrainer | ||
from .stopwords import DEFAULT_STOPWORDS | ||
from tokenizers import Tokenizer as HGTokenizer | ||
|
||
|
||
jieba.setLogLevel(logging.INFO) | ||
|
||
|
@@ -15,7 +22,13 @@ def is_chinese(word): | |
return True | ||
|
||
|
||
def tokenize(text, granularity="word", stopwords="default"): | ||
def tokenize(text, | ||
granularity="word", | ||
stopwords="default", | ||
tokenizer="jieba", | ||
tok_model="en_core_web_sm", | ||
bpe_json='bpe.tokenizer.json', | ||
bpe_trainfile=None): | ||
""" | ||
Using jieba library to tokenize item by word or char. | ||
|
||
|
@@ -37,17 +50,68 @@ def tokenize(text, granularity="word", stopwords="default"): | |
""" | ||
stopwords = DEFAULT_STOPWORDS if stopwords == "default" else stopwords | ||
stopwords = stopwords if stopwords is not None else {} | ||
if granularity == "word": | ||
return [token for token in jieba.cut(text) if token not in stopwords and token.strip()] | ||
elif granularity == "char": | ||
jieba_tokens = [token for token in jieba.cut(text) if token not in stopwords and token.strip()] | ||
# Use jieba_tokens to hangle sentence with mixed chinese and english. | ||
split_tokens = [] | ||
for token in jieba_tokens: | ||
if is_chinese(token): | ||
split_tokens.extend(list(token)) | ||
else: | ||
split_tokens.append(token) | ||
return split_tokens | ||
|
||
if (tokenizer == 'jieba'): | ||
if granularity == "word": | ||
return [ | ||
token for token in jieba.cut(text) | ||
if token not in stopwords and token.strip() | ||
] | ||
elif granularity == "char": | ||
jieba_tokens = [ | ||
token for token in jieba.cut(text) | ||
if token not in stopwords and token.strip() | ||
] | ||
# Use jieba_tokens to hangle sentence with mixed chinese and english. | ||
split_tokens = [] | ||
for token in jieba_tokens: | ||
if is_chinese(token): | ||
split_tokens.extend(list(token)) | ||
else: | ||
split_tokens.append(token) | ||
return split_tokens | ||
else: | ||
raise TypeError("Unknown granularity %s" % granularity) | ||
|
||
elif (tokenizer == 'nltk'): | ||
try: | ||
return [ | ||
token for token in word_tokenize(text) | ||
if token not in stopwords and token.strip() | ||
] | ||
except LookupError: | ||
nltk.download('punkt') | ||
return [ | ||
token for token in word_tokenize(text) | ||
if token not in stopwords and token.strip() | ||
] | ||
|
||
elif (tokenizer == 'spacy'): | ||
try: | ||
spacy_tokenizer = spacy.load(tok_model) | ||
except OSError: | ||
spacy.cli.download(tok_model) | ||
spacy_tokenizer = spacy.load(tok_model) | ||
output = spacy_tokenizer(str(text)) | ||
return [ | ||
token.text for token in output | ||
if token.text not in stopwords | ||
] | ||
|
||
elif (tokenizer == 'bpe'): | ||
try: | ||
tokenizer = HGTokenizer.from_file('bpeTokenizer.json') | ||
except Exception: | ||
tokenizer = huggingface_tokenizer.Tokenizer( | ||
huggingface_tokenizer.models.BPE()) | ||
if (bpe_trainfile is None): | ||
raise LookupError("bpe train file not found, using %s." % bpe_trainfile) | ||
trainer = BpeTrainer( | ||
special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) | ||
tokenizer.train(files=[bpe_trainfile], trainer=trainer) | ||
tokenizer.save('bpeTokenizer.json', pretty=True) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same here |
||
output = tokenizer.encode(text) | ||
output = output.tokens | ||
return output[0] | ||
else: | ||
raise TypeError("Unknown granularity %s" % granularity) | ||
raise TypeError("Invalid Spliter: %s" % tokenizer) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,6 +4,7 @@ | |
import pytest | ||
from EduNLP.Tokenizer import get_tokenizer | ||
from EduNLP.Pretrain import DisenQTokenizer | ||
from EduNLP.utils import abs_current_dir, path_append | ||
|
||
|
||
def test_tokenizer(): | ||
|
@@ -50,6 +51,49 @@ def test_CharTokenizer(): | |
assert ret == ans | ||
|
||
|
||
def test_TokenizerNLTK(): | ||
items = ["The stationery store has 600 exercise books, and after selling\ | ||
some, there are still 4 packs left, 25 each, how many are sold?"] | ||
ans = [ | ||
'The', 'stationery', 'store', 'has', '600', 'exercise', | ||
'books', 'and', 'after', 'selling', 'some', 'there', 'are', 'still', | ||
'4', 'packs', 'left', '25', 'each', 'how', 'many', 'are', 'sold' | ||
] | ||
tokenizer = get_tokenizer("pure_text", | ||
text_params={"tokenizer": 'nltk', "stopwords": set(",?")}) | ||
tokens = tokenizer(items) | ||
ret = next(tokens) | ||
assert ret == ans | ||
|
||
|
||
def test_TokenizerSpacy(): | ||
items = ["The stationery store has 600 exercise books, and after selling\ | ||
some, there are still 4 packs left, 25 each, how many are sold?"] | ||
ans = [ | ||
'The', 'stationery', 'store', 'has', '600', 'exercise', | ||
'books', 'and', 'after', 'selling', ' ', 'some', 'there', 'are', 'still', | ||
'4', 'packs', 'left', '25', 'each', 'how', 'many', 'are', 'sold' | ||
] | ||
tokenizer = get_tokenizer("pure_text", | ||
text_params={"tokenizer": 'spacy', "stopwords": set(",?")}) | ||
tokens = tokenizer(items) | ||
ret = next(tokens) | ||
assert ret == ans | ||
|
||
|
||
def test_TokenizerBPE(): | ||
items = ['The stationery store has $600$ exercise books, and after selling some,\ | ||
there are still $4$ packs left, $25$ each, how many are sold?'] | ||
ans = ['h', '600', ' ', '4', ' ', '25', ' '] | ||
data_path = path_append(abs_current_dir(__file__), | ||
"../../static/test_data/standard_luna_data.json", to_str=True) | ||
tokenizer = get_tokenizer("pure_text", text_params={"tokenizer": 'bpe', "stopwords": set(",?"), | ||
"bpe_trainfile": data_path}) | ||
tokens = tokenizer(items) | ||
ret = next(tokens) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 是否支持中文? |
||
assert ret == ans | ||
|
||
|
||
def test_SpaceTokenizer(): | ||
items = ['文具店有 $600$ 本练习本,卖出一些后,还剩 $4$ 包,每包 $25$ 本,卖出多少本?'] | ||
tokenizer = get_tokenizer("space", stop_words=[]) | ||
|
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
change this to a parameter instead of a hard-coded path. Or directly reuse
tok_model
param