generated from fastai/nbdev_template
-
Notifications
You must be signed in to change notification settings - Fork 1.6k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
1f344c9
commit 9b4c4c1
Showing
5 changed files
with
197 additions
and
10 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,94 @@ | ||
# Copyright 2025 The HuggingFace Team. All rights reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
from fastapi import FastAPI, HTTPException | ||
from pydantic import BaseModel | ||
from transformers import AutoModelForCausalLM | ||
import torch | ||
import argparse | ||
import uvicorn | ||
import argparse | ||
from trl import ModelConfig | ||
""" | ||
Usage | ||
python trl/models/remote_model_app.py --model_name deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B --port 8000 | ||
""" | ||
|
||
app = FastAPI() | ||
model = None | ||
|
||
class ForwardPassRequest(BaseModel): | ||
input_ids: list[list[int]] | ||
attention_mask: list[list[int]] | ||
logits_to_keep: int | ||
|
||
@app.post("/forward/") | ||
async def forward_pass(request: ForwardPassRequest): | ||
print(request) | ||
device = model.device | ||
input_ids = torch.LongTensor(request.input_ids).to(device) | ||
attention_mask = torch.LongTensor(request.attention_mask).to(device) | ||
logits_to_keep = request.logits_to_keep | ||
# Perform the forward pass | ||
with torch.no_grad(): | ||
outputs = model( | ||
input_ids=input_ids, | ||
attention_mask=attention_mask, | ||
logits_to_keep=logits_to_keep, | ||
) | ||
logits = outputs.logits | ||
|
||
# Convert logits to CPU and then to a list for JSON serialization | ||
logits_list = logits.cpu().tolist() | ||
|
||
return {"logits": logits_list} | ||
|
||
@app.get("/health") | ||
async def health_check(): | ||
""" | ||
Provides a health check endpoint for the server. | ||
Returns: | ||
dict: A dictionary indicating the server's health status. | ||
""" | ||
return {"status": "OK"} | ||
|
||
def init_model(model_config: ModelConfig): | ||
global model | ||
|
||
torch_dtype = ( | ||
model_args.torch_dtype | ||
if model_args.torch_dtype in ["auto", None] | ||
else getattr(torch, model_args.torch_dtype) | ||
) | ||
model = AutoModelForCausalLM.from_pretrained( | ||
model_config.model_name_or_path, | ||
revision=model_config.model_revision, | ||
trust_remote_code=model_config.trust_remote_code, | ||
attn_implementation=model_config.attn_implementation, | ||
torch_dtype=torch_dtype, | ||
) | ||
|
||
if torch.cuda.is_available(): | ||
model.to("cuda") | ||
print(f"Model '{model_config.model_name_or_path}' loaded on GPU") | ||
else: | ||
print(f"Model '{model_config.model_name_or_path}' loaded on CPU") | ||
|
||
if __name__ == "__main__": | ||
from trl import ModelConfig, TrlParser | ||
parser = TrlParser(ModelConfig) | ||
model_args = parser.parse_args_and_config()[0] | ||
init_model(model_args) | ||
uvicorn.run(app) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,80 @@ | ||
# Copyright 2025 The HuggingFace Team. All rights reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
import torch | ||
import requests | ||
from transformers.modeling_outputs import CausalLMOutputWithPast | ||
|
||
class RemoteModel(): | ||
def __init__(self, remote_model_url): | ||
self.remote_model_url = remote_model_url | ||
# Check if the remote server is healthy | ||
health_check_url = f"{self.remote_model_url}/health" | ||
response = requests.get(health_check_url) | ||
if response.status_code != 200: | ||
raise Exception(f"Server health check failed: {response.text}") | ||
|
||
def __call__(self, input_ids: torch.Tensor, attention_mask: torch.Tensor, logits_to_keep: int) -> CausalLMOutputWithPast: | ||
""" | ||
Sends a request to the remote server to perform a forward pass. | ||
Args: | ||
input_ids (torch.Tensor): The input token IDs. | ||
attention_mask (torch.Tensor): The attention mask. | ||
logits_to_keep (int): The number of logits to keep. | ||
Returns: | ||
CausalLMOutputWithPast: Contains only the logits. | ||
""" | ||
# Convert tensors to lists for JSON serialization | ||
device = input_ids.device | ||
input_ids_list = input_ids.tolist() | ||
attention_mask_list = attention_mask.tolist() | ||
|
||
# Prepare the request body | ||
request_body = { | ||
"input_ids": input_ids_list, | ||
"attention_mask": attention_mask_list, | ||
"logits_to_keep": logits_to_keep | ||
} | ||
|
||
# Send the POST request to the server | ||
# add a few retries? | ||
response = requests.post(f"{self.remote_model_url}/forward", json=request_body) | ||
|
||
# Check for errors | ||
if response.status_code != 200: | ||
raise Exception(f"Error from server: {response}") | ||
|
||
# Parse the response | ||
response_json = response.json() | ||
logits_list = response_json["logits"] | ||
|
||
# Convert the logits back to a tensor | ||
logits = torch.tensor(logits_list).to(device) | ||
|
||
return CausalLMOutputWithPast(logits=logits) | ||
|
||
if __name__ == "__main__": | ||
import argparse | ||
# Parse command line arguments | ||
parser = argparse.ArgumentParser() | ||
parser.add_argument("--url", type=str, required=True) | ||
args = parser.parse_args() | ||
remote_model = RemoteModel(args.url) | ||
print(remote_model.remote_model_url) | ||
input_ids = torch.Tensor([[1, 2, 3]]) | ||
attention_mask = torch.Tensor([[1, 1, 1]]) | ||
logits_to_keep = 1 | ||
print(remote_model(input_ids, attention_mask, logits_to_keep)) | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters