From a56c6ad42fa47aa1415dccdc8fa24f431a7bcc72 Mon Sep 17 00:00:00 2001 From: Yulong Lin Date: Thu, 16 May 2024 13:17:25 +0800 Subject: [PATCH] Fix docstring mistakes in bundler.py The output dimensions for one of the examples is wrong, since the batch size is 2, not 1. Further, there is no need to import RobertaClassificationHead for that example, as it is accessed via torchtext.models.RobertaClassificationHead --- torchtext/models/roberta/bundler.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/torchtext/models/roberta/bundler.py b/torchtext/models/roberta/bundler.py index a50c9cd75d..c27bb3f275 100644 --- a/torchtext/models/roberta/bundler.py +++ b/torchtext/models/roberta/bundler.py @@ -39,7 +39,6 @@ class RobertaBundle: Example - Pretrained large xlmr encoder attached to un-initialized classification head >>> import torch, torchtext - >>> from torchtext.models import RobertaClassificationHead >>> from torchtext.functional import to_tensor >>> xlmr_large = torchtext.models.XLMR_LARGE_ENCODER >>> classifier_head = torchtext.models.RobertaClassificationHead(num_classes=2, input_dim = 1024) @@ -49,7 +48,7 @@ class RobertaBundle: >>> model_input = to_tensor(transform(input_batch), padding_value=1) >>> output = model(model_input) >>> output.shape - torch.Size([1, 2]) + torch.Size([2, 2]) Example - User-specified configuration and checkpoint >>> from torchtext.models import RobertaEncoderConf, RobertaBundle, RobertaClassificationHead