From 0036774a6fa082e94e1f9640bcb4032667866a76 Mon Sep 17 00:00:00 2001 From: Matt Watson Date: Tue, 5 Dec 2023 18:18:00 -0800 Subject: [PATCH] I am not exactly sure how this was ever working We had copied the bert output, t5 has a different tokenizer. --- keras_nlp/models/t5/t5_tokenizer_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras_nlp/models/t5/t5_tokenizer_test.py b/keras_nlp/models/t5/t5_tokenizer_test.py index 77ad734660..a7558e8b13 100644 --- a/keras_nlp/models/t5/t5_tokenizer_test.py +++ b/keras_nlp/models/t5/t5_tokenizer_test.py @@ -52,7 +52,7 @@ def test_smallest_preset(self): cls=T5Tokenizer, preset=preset, input_data=["The quick brown fox."], - expected_output=[[1996, 4248, 2829, 4419, 1012]], + expected_output=[[37, 1704, 4216, 3, 20400, 5]], ) @pytest.mark.extra_large