From fb64c42a3fe20f0c08625c56ec8360b768ad7707 Mon Sep 17 00:00:00 2001 From: Debojeet Chatterjee Date: Wed, 27 Nov 2019 14:12:10 -0800 Subject: [PATCH] Start, End index calculations fix for unicode characters. (#1171) Summary: Pull Request resolved: https://github.com/facebookresearch/pytext/pull/1171 The existing GPT2BPETokenizer incorrectly calculates the start and end indices for unicode characters. This is because for multi-byte characters, we need to additionally use the byte decoder on the decoded bytes to get back the original token that was encoded. Reviewed By: chenyangyu1988 Differential Revision: D18697646 fbshipit-source-id: 8f4d32a1caa40d8d06e7be31dfd4a6846692531a --- pytext/data/tokenizers/tokenizer.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pytext/data/tokenizers/tokenizer.py b/pytext/data/tokenizers/tokenizer.py index cb3b9039a..1cacf6931 100644 --- a/pytext/data/tokenizers/tokenizer.py +++ b/pytext/data/tokenizers/tokenizer.py @@ -210,6 +210,13 @@ def __init__(self, bpe: GPT2BPEEncoder): def tokenize(self, input_str: str) -> List[Token]: bpe_ids = self.bpe.encode(input_str) char_tokens = [self.bpe.decoder[id].lstrip(u"\u0120") for id in bpe_ids] + # fix for incorrect decoding of utf-8 chars + char_tokens = [ + bytearray([self.bpe.byte_decoder[char] for char in char_token]).decode( + "utf-8" + ) + for char_token in char_tokens + ] lengths = [len(token) for token in char_tokens] tokens = [] end = 0