Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Starcoder2 model #28

Open
wants to merge 7 commits into
base: hf_main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docs/source/en/_toctree.yml
Original file line number Diff line number Diff line change
Expand Up @@ -478,6 +478,8 @@
title: Splinter
- local: model_doc/squeezebert
title: SqueezeBERT
- local: model_doc/starcoder2
title: Starcoder2
- local: model_doc/stablelm
title: StableLm
- local: model_doc/switch_transformers
Expand Down
53 changes: 53 additions & 0 deletions docs/source/en/model_doc/starcoder2.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
<!--Copyright 2024 Mistral AI and The HuggingFace Team. All rights reserved.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.

⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.

-->

# Starcoder2

## Overview

The Starcoder2 model was proposed in [<INSERT PAPER NAME HERE>](<INSERT PAPER LINK HERE>) by <INSERT AUTHORS HERE>.
<INSERT SHORT SUMMARY HERE>

The abstract from the paper is the following:

*<INSERT PAPER ABSTRACT HERE>*

Tips:

<INSERT TIPS ABOUT MODEL HERE>

This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/<INSERT YOUR HF USERNAME HERE>).
The original code can be found [here](<INSERT LINK TO GITHUB REPO HERE>).


## Starcoder2Config

[[autodoc]] Starcoder2Config

## Starcoder2Model

[[autodoc]] Starcoder2Model
- forward

## Starcoder2ForCausalLM

[[autodoc]] Starcoder2ForCausalLM
- forward

## Starcoder2ForSequenceClassification

[[autodoc]] Starcoder2ForSequenceClassification
- forward
16 changes: 16 additions & 0 deletions src/transformers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -806,6 +806,7 @@
"SqueezeBertConfig",
"SqueezeBertTokenizer",
],
"models.starcoder2": ["STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Starcoder2Config"],
"models.stablelm": ["STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP", "StableLmConfig"],
"models.swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
Expand Down Expand Up @@ -3252,6 +3253,14 @@
"SqueezeBertPreTrainedModel",
]
)
_import_structure["models.starcoder2"].extend(
[
"Starcoder2ForCausalLM",
"Starcoder2ForSequenceClassification",
"Starcoder2Model",
"Starcoder2PreTrainedModel",
]
)
_import_structure["models.stablelm"].extend(
[
"StableLmForCausalLM",
Expand Down Expand Up @@ -5561,6 +5570,7 @@
SqueezeBertConfig,
SqueezeBertTokenizer,
)
from .models.starcoder2 import STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP, Starcoder2Config
from .models.stablelm import STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP, StableLmConfig
from .models.swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
Expand Down Expand Up @@ -7673,6 +7683,12 @@
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
from .models.starcoder2 import (
Starcoder2ForCausalLM,
Starcoder2ForSequenceClassification,
Starcoder2Model,
Starcoder2PreTrainedModel,
)
from .models.stablelm import (
StableLmForCausalLM,
StableLmForSequenceClassification,
Expand Down
1 change: 1 addition & 0 deletions src/transformers/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,7 @@
speecht5,
splinter,
squeezebert,
starcoder2,
stablelm,
swiftformer,
swin,
Expand Down
3 changes: 3 additions & 0 deletions src/transformers/models/auto/configuration_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,7 @@
("speecht5", "SpeechT5Config"),
("splinter", "SplinterConfig"),
("squeezebert", "SqueezeBertConfig"),
("starcoder2", "Starcoder2Config"),
("stablelm", "StableLmConfig"),
("swiftformer", "SwiftFormerConfig"),
("swin", "SwinConfig"),
Expand Down Expand Up @@ -433,6 +434,7 @@
("speecht5", "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP"),
("splinter", "SPLINTER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
("squeezebert", "SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
("starcoder2", "STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
("stablelm", "STABLELM_PRETRAINED_CONFIG_ARCHIVE_MAP"),
("swiftformer", "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
("swin", "SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP"),
Expand Down Expand Up @@ -685,6 +687,7 @@
("speecht5", "SpeechT5"),
("splinter", "Splinter"),
("squeezebert", "SqueezeBERT"),
("starcoder2", "Starcoder2"),
("stablelm", "StableLm"),
("swiftformer", "SwiftFormer"),
("swin", "Swin Transformer"),
Expand Down
3 changes: 3 additions & 0 deletions src/transformers/models/auto/modeling_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,7 @@
("speecht5", "SpeechT5Model"),
("splinter", "SplinterModel"),
("squeezebert", "SqueezeBertModel"),
("starcoder2", "Starcoder2Model"),
("stablelm", "StableLmModel"),
("swiftformer", "SwiftFormerModel"),
("swin", "SwinModel"),
Expand Down Expand Up @@ -461,6 +462,7 @@
("roformer", "RoFormerForCausalLM"),
("rwkv", "RwkvForCausalLM"),
("speech_to_text_2", "Speech2Text2ForCausalLM"),
("starcoder2", "Starcoder2ForCausalLM"),
("stablelm", "StableLmForCausalLM"),
("transfo-xl", "TransfoXLLMHeadModel"),
("trocr", "TrOCRForCausalLM"),
Expand Down Expand Up @@ -808,6 +810,7 @@
("roc_bert", "RoCBertForSequenceClassification"),
("roformer", "RoFormerForSequenceClassification"),
("squeezebert", "SqueezeBertForSequenceClassification"),
("starcoder2", "Starcoder2ForSequenceClassification"),
("stablelm", "StableLmForSequenceClassification"),
("t5", "T5ForSequenceClassification"),
("tapas", "TapasForSequenceClassification"),
Expand Down
1 change: 1 addition & 0 deletions src/transformers/models/auto/tokenization_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,7 @@
"squeezebert",
("SqueezeBertTokenizer", "SqueezeBertTokenizerFast" if is_tokenizers_available() else None),
),
("starcoder2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
("stablelm", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
(
"switch_transformers",
Expand Down
62 changes: 62 additions & 0 deletions src/transformers/models/starcoder2/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
# Copyright 2024 Starcoder2 AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING

from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)


_import_structure = {
"configuration_starcoder2": ["STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Starcoder2Config"],
}


try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_starcoder2"] = [
"Starcoder2ForCausalLM",
"Starcoder2Model",
"Starcoder2PreTrainedModel",
"Starcoder2ForSequenceClassification",
]


if TYPE_CHECKING:
from .configuration_starcoder2 import STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP, Starcoder2Config

try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_starcoder2 import (
Starcoder2ForCausalLM,
Starcoder2ForSequenceClassification,
Starcoder2Model,
Starcoder2PreTrainedModel,
)


else:
import sys

sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
165 changes: 165 additions & 0 deletions src/transformers/models/starcoder2/configuration_starcoder2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
# coding=utf-8
# Copyright 2024 Starcoder2 AI and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Starcoder2 model configuration"""

from ...configuration_utils import PretrainedConfig
from ...utils import logging


logger = logging.get_logger(__name__)

STARCODER2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"mistralai/Mistral-7B-v0.1": "https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/config.json",
}


class Starcoder2Config(PretrainedConfig):
r"""
# TODO: Update ckpt name
This is the configuration class to store the configuration of a [`Starcoder2Model`]. It is used to instantiate a
Starcoder2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Starcoder2-7B-v0.1 or Starcoder2-7B-Instruct-v0.1.

[mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)
[starcoder2ai/Starcoder2-7B-Instruct-v0.1](https://huggingface.co/starcoder2ai/Starcoder2-7B-Instruct-v0.1)

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.


# TODO: Update
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Starcoder2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Starcoder2Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
The maximum sequence length that this model might ever be used with. Starcoder2's sliding window attention
allows sequence of up to 4096*32 tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
sliding_window (`int`, *optional*, defaults to None):
Sliding window attention window size. If not specified, will default to `None` (no sliding window).
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.

```python
>>> from transformers import Starcoder2Model, Starcoder2Config

>>> # Initializing a Starcoder2 7B style configuration
>>> configuration = Starcoder2Config()

>>> # Initializing a model from the Starcoder2 7B style configuration
>>> model = Starcoder2Model(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```"""

model_type = "starcoder2"
keys_to_ignore_at_inference = ["past_key_values"]

def __init__(
self,
vocab_size=49152,
hidden_size=3072,
intermediate_size=12288,
num_hidden_layers=30,
num_attention_heads=24,
num_key_value_heads=2,
# TODO: Infer from hidden_act?
mlp_type="default",
hidden_act="gelu_pytorch_tanh",
max_position_embeddings=4096,
# TODO: Check init method.
initializer_range=0.018042,
norm_type="layer_norm",
norm_epsilon=1e-5,
use_cache=True,
# TODO: Wrong defaults?
bos_token_id=50256,
eos_token_id=50256,
rope_theta=10000.0,
sliding_window=None,
attention_dropout=0.0,
residual_dropout=0.0,
embedding_dropout=0.0,
# TODO: Implement
use_bias: bool = True,
# TODO: Other dropouts?
# TODO: Allow normal pos embeddings for santacoder/starcoder1?
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
self.use_bias = use_bias

# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads

self.num_key_value_heads = num_key_value_heads
self.mlp_type = mlp_type
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.norm_type = norm_type
self.norm_epsilon = norm_epsilon
self.use_cache = use_cache
self.rope_theta = rope_theta
self.attention_dropout = attention_dropout
self.residual_dropout = residual_dropout
self.embedding_dropout = embedding_dropout

super().__init__(
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs,
)
Loading