diff --git a/keras_hub/src/tokenizers/sentence_piece_tokenizer_trainer.py b/keras_hub/src/tokenizers/sentence_piece_tokenizer_trainer.py index 005b067532..7c5197d093 100644 --- a/keras_hub/src/tokenizers/sentence_piece_tokenizer_trainer.py +++ b/keras_hub/src/tokenizers/sentence_piece_tokenizer_trainer.py @@ -1,5 +1,7 @@ import io +from keras_hub.src.utils.tensor_utils import assert_tf_libs_installed + try: import sentencepiece as spm import tensorflow as tf @@ -77,6 +79,7 @@ def compute_sentence_piece_proto( tf.Tensor([ 4 8 12 5 9 14 5 6 13 4 7 10 11 6 13], shape=(15,), dtype=int32) """ + assert_tf_libs_installed("compute_sentence_piece_proto") if spm is None: raise ImportError( diff --git a/keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py b/keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py index 05e00eec95..3ba7fc4071 100644 --- a/keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py +++ b/keras_hub/src/tokenizers/word_piece_tokenizer_trainer.py @@ -1,5 +1,6 @@ from keras_hub.src.api_export import keras_hub_export from keras_hub.src.tokenizers.word_piece_tokenizer import pretokenize +from keras_hub.src.utils.tensor_utils import assert_tf_libs_installed try: import tensorflow as tf @@ -117,6 +118,8 @@ def normalize_and_split(x): inputs.map(tokenizer.tokenize) ``` """ # noqa: E501 + assert_tf_libs_installed("compute_word_piece_vocabulary") + # Read data files. if not isinstance(data, (list, tf.data.Dataset)): raise ValueError(