Skip to content
This repository has been archived by the owner on Jun 25, 2023. It is now read-only.

initial commit #43

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions autotests/helm/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ global:
activeDeadlineSeconds: 3600 # 1h

env:
PARTICIPANT_NAME: <REPLACE_WITH_USERNAME>
api_host: http://inca-smc-mlops-challenge-solution.default.svc.cluster.local/<REPLACE_WITH_ENDPOINT>
PARTICIPANT_NAME: AssanaliAbu
api_host: http://inca-smc-mlops-challenge-solution.default.svc.cluster.local/process

# K6, do not edit!
K6_PROMETHEUS_RW_SERVER_URL: http://kube-prometheus-stack-prometheus.monitoring.svc.cluster.local:9090/api/v1/write
Expand Down
23 changes: 23 additions & 0 deletions solution/helm/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# Use a base image
FROM python:3.8-slim-buster

# Set the working directory in the container
WORKDIR /app

# Copy project files into the Docker image
COPY requirements.txt /app
COPY app.py /app
COPY inference.py /app
COPY models.py /app

# Install dependencies
RUN pip install -r requirements.txt

# Expose the port on which the application will run
EXPOSE 8000

# Set the command to run the application when the container starts
# CMD ["python", "app.py"]
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]


47 changes: 47 additions & 0 deletions solution/helm/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
from fastapi import FastAPI, Request
from models import initialize_models, models
from inference import run_inference
import asyncio

app = FastAPI()

@app.on_event("startup")
async def startup_event():
try:
await initialize_models()
except RuntimeError as e:
# Handle the initialization error and return an error response if needed
return {"error": str(e)}


@app.post("/process")
async def process_text(request: Request):
try:
data = await request.json()
text = data.strip('"')

tasks = []
for model_name in models.keys():
task = asyncio.create_task(run_inference(model_name, text))
tasks.append(task)

results = await asyncio.gather(*tasks)

response = {}
for model_name, result in zip(models.keys(), results):
response[model_name] = {
"score": result["score"],
"label": result["label"]
}

return response

except Exception as e:
# Handle any exceptions and return an error response
return {"error": str(e)}


if __name__ == "__main__":
import uvicorn

uvicorn.run(app, host="localhost", port=8000)
12 changes: 12 additions & 0 deletions solution/helm/inference.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
import asyncio
from models import initialized_models, models
from typing import Dict
from transformers import pipeline

# Run inference on a specific model
async def run_inference(model_name: str, text: str) -> Dict[str, float]:
model = initialized_models[model_name]
output = await asyncio.to_thread(model, text)
score = output[0]['score']
label = output[0]['label']
return {"score": score, "label": label}
22 changes: 22 additions & 0 deletions solution/helm/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import asyncio
from transformers import pipeline

# Define the models and their corresponding tasks
models = {
"cardiffnlp": ("sentiment-analysis", "cardiffnlp/twitter-xlm-roberta-base-sentiment"),
"ivanlau": ("text-classification", "ivanlau/language-detection-fine-tuned-on-xlm-roberta-base"),
"svalabs": ("text-classification", "svalabs/twitter-xlm-roberta-crypto-spam"),
"EIStakovskii": ("text-classification", "EIStakovskii/xlm_roberta_base_multilingual_toxicity_classifier_plus"),
"jy46604790": ("text-classification", "jy46604790/Fake-News-Bert-Detect")
}

# Initialize the models
initialized_models = {}

async def initialize_models():
try:
for model_name, (task, model) in models.items():
initialized_models[model_name] = pipeline(task, model=model)
except Exception as e:
# Handle any exceptions and raise an error
raise RuntimeError(f"Failed to initialize models: {str(e)}")
6 changes: 6 additions & 0 deletions solution/helm/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
fastapi==0.68.0
transformers==4.27.3
uvicorn==0.15.0
asyncio>=3.4.3
torch==1.9.0