From bf6de78caf5953dc278c99f9ebc438448c53be19 Mon Sep 17 00:00:00 2001 From: AssanaliAbu Date: Sun, 25 Jun 2023 09:03:26 +0600 Subject: [PATCH] initial commit --- autotests/helm/values.yaml | 4 +-- solution/helm/Dockerfile | 23 +++++++++++++++++ solution/helm/app.py | 47 ++++++++++++++++++++++++++++++++++ solution/helm/inference.py | 12 +++++++++ solution/helm/models.py | 22 ++++++++++++++++ solution/helm/requirements.txt | 6 +++++ 6 files changed, 112 insertions(+), 2 deletions(-) create mode 100644 solution/helm/Dockerfile create mode 100644 solution/helm/app.py create mode 100644 solution/helm/inference.py create mode 100644 solution/helm/models.py create mode 100644 solution/helm/requirements.txt diff --git a/autotests/helm/values.yaml b/autotests/helm/values.yaml index cda6a5e..79e56d6 100644 --- a/autotests/helm/values.yaml +++ b/autotests/helm/values.yaml @@ -25,8 +25,8 @@ global: activeDeadlineSeconds: 3600 # 1h env: - PARTICIPANT_NAME: - api_host: http://inca-smc-mlops-challenge-solution.default.svc.cluster.local/ + PARTICIPANT_NAME: AssanaliAbu + api_host: http://inca-smc-mlops-challenge-solution.default.svc.cluster.local/process # K6, do not edit! K6_PROMETHEUS_RW_SERVER_URL: http://kube-prometheus-stack-prometheus.monitoring.svc.cluster.local:9090/api/v1/write diff --git a/solution/helm/Dockerfile b/solution/helm/Dockerfile new file mode 100644 index 0000000..16b3351 --- /dev/null +++ b/solution/helm/Dockerfile @@ -0,0 +1,23 @@ +# Use a base image +FROM python:3.8-slim-buster + +# Set the working directory in the container +WORKDIR /app + +# Copy project files into the Docker image +COPY requirements.txt /app +COPY app.py /app +COPY inference.py /app +COPY models.py /app + +# Install dependencies +RUN pip install -r requirements.txt + +# Expose the port on which the application will run +EXPOSE 8000 + +# Set the command to run the application when the container starts +# CMD ["python", "app.py"] +CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"] + + diff --git a/solution/helm/app.py b/solution/helm/app.py new file mode 100644 index 0000000..30586c3 --- /dev/null +++ b/solution/helm/app.py @@ -0,0 +1,47 @@ +from fastapi import FastAPI, Request +from models import initialize_models, models +from inference import run_inference +import asyncio + +app = FastAPI() + +@app.on_event("startup") +async def startup_event(): + try: + await initialize_models() + except RuntimeError as e: + # Handle the initialization error and return an error response if needed + return {"error": str(e)} + + +@app.post("/process") +async def process_text(request: Request): + try: + data = await request.json() + text = data.strip('"') + + tasks = [] + for model_name in models.keys(): + task = asyncio.create_task(run_inference(model_name, text)) + tasks.append(task) + + results = await asyncio.gather(*tasks) + + response = {} + for model_name, result in zip(models.keys(), results): + response[model_name] = { + "score": result["score"], + "label": result["label"] + } + + return response + + except Exception as e: + # Handle any exceptions and return an error response + return {"error": str(e)} + + +if __name__ == "__main__": + import uvicorn + + uvicorn.run(app, host="localhost", port=8000) diff --git a/solution/helm/inference.py b/solution/helm/inference.py new file mode 100644 index 0000000..875993a --- /dev/null +++ b/solution/helm/inference.py @@ -0,0 +1,12 @@ +import asyncio +from models import initialized_models, models +from typing import Dict +from transformers import pipeline + +# Run inference on a specific model +async def run_inference(model_name: str, text: str) -> Dict[str, float]: + model = initialized_models[model_name] + output = await asyncio.to_thread(model, text) + score = output[0]['score'] + label = output[0]['label'] + return {"score": score, "label": label} diff --git a/solution/helm/models.py b/solution/helm/models.py new file mode 100644 index 0000000..1d4761a --- /dev/null +++ b/solution/helm/models.py @@ -0,0 +1,22 @@ +import asyncio +from transformers import pipeline + +# Define the models and their corresponding tasks +models = { + "cardiffnlp": ("sentiment-analysis", "cardiffnlp/twitter-xlm-roberta-base-sentiment"), + "ivanlau": ("text-classification", "ivanlau/language-detection-fine-tuned-on-xlm-roberta-base"), + "svalabs": ("text-classification", "svalabs/twitter-xlm-roberta-crypto-spam"), + "EIStakovskii": ("text-classification", "EIStakovskii/xlm_roberta_base_multilingual_toxicity_classifier_plus"), + "jy46604790": ("text-classification", "jy46604790/Fake-News-Bert-Detect") +} + +# Initialize the models +initialized_models = {} + +async def initialize_models(): + try: + for model_name, (task, model) in models.items(): + initialized_models[model_name] = pipeline(task, model=model) + except Exception as e: + # Handle any exceptions and raise an error + raise RuntimeError(f"Failed to initialize models: {str(e)}") diff --git a/solution/helm/requirements.txt b/solution/helm/requirements.txt new file mode 100644 index 0000000..6ecb742 --- /dev/null +++ b/solution/helm/requirements.txt @@ -0,0 +1,6 @@ +fastapi==0.68.0 +transformers==4.27.3 +uvicorn==0.15.0 +asyncio>=3.4.3 +torch==1.9.0 +