Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: multi-agent AI XBlock #9

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions ai_eval/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@

from .shortanswer import ShortAnswerAIEvalXBlock
from .coding_ai_eval import CodingAIEvalXBlock
from .multiagent import MultiAgentAIEvalXBlock
46 changes: 5 additions & 41 deletions ai_eval/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,10 @@

from django.utils.translation import gettext_noop as _
from xblock.core import XBlock
from xblock.fields import String, Scope, Dict
from xblock.fields import String, Scope
from xblock.validation import ValidationMessage


from .llm import SupportedModels
from .llm import SupportedModels, get_llm_response

try:
from xblock.utils.studio_editable import StudioEditableXBlockMixin
Expand All @@ -28,9 +27,6 @@ class AIEvalXBlock(StudioEditableXBlockMixin, XBlock):
Base class for Xblocks with AI evaluation
"""

USER_KEY = "USER"
LLM_KEY = "LLM"

loader = ResourceLoader(__name__)

icon_class = "problem"
Expand Down Expand Up @@ -59,37 +55,8 @@ class AIEvalXBlock(StudioEditableXBlockMixin, XBlock):
default=SupportedModels.GPT4O.value,
)

evaluation_prompt = String(
display_name=_("Evaluation prompt"),
help=_(
"Enter the evaluation prompt given to the model."
" The question will be inserted right after it."
" The student's answer would then follow the question. Markdown format can be used."
),
default="You are a teacher. Evaluate the student's answer for the following question:",
multiline_editor=True,
scope=Scope.settings,
)
question = String(
display_name=_("Question"),
help=_(
"Enter the question you would like the students to answer."
" Markdown format can be used."
),
default="",
multiline_editor=True,
scope=Scope.settings,
)

messages = Dict(
help=_("Dictionary with chat messages"),
scope=Scope.user_state,
default={USER_KEY: [], LLM_KEY: []},
)
editable_fields = (
"display_name",
"evaluation_prompt",
"question",
"model",
"model_api_key",
"model_api_url",
Expand Down Expand Up @@ -138,9 +105,6 @@ def validate_field_data(self, validation, data):
)
)

if not data.question:
validation.add(
ValidationMessage(
ValidationMessage.ERROR, _("Question field is mandatory")
)
)
def get_llm_response(self, messages):
return get_llm_response(self.model, self.model_api_key, messages,
self.model_api_url)
47 changes: 38 additions & 9 deletions ai_eval/coding_ai_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
from xblock.fields import Dict, Scope, String
from xblock.validation import ValidationMessage

from .llm import get_llm_response
from .base import AIEvalXBlock
from .utils import (
submit_code,
Expand Down Expand Up @@ -62,13 +61,42 @@ class CodingAIEvalXBlock(AIEvalXBlock):
default=LanguageLabels.Python,
Scope=Scope.settings,
)

evaluation_prompt = String(
display_name=_("Evaluation prompt"),
help=_(
"Enter the evaluation prompt given to the model."
" The question will be inserted right after it."
" The student's answer would then follow the question. Markdown format can be used."
),
default="You are a teacher. Evaluate the student's answer for the following question:",
multiline_editor=True,
scope=Scope.settings,
)

question = String(
display_name=_("Question"),
help=_(
"Enter the question you would like the students to answer."
" Markdown format can be used."
),
default="",
multiline_editor=True,
scope=Scope.settings,
)

messages = Dict(
help=_("Dictionary with messages"),
scope=Scope.user_state,
default={USER_RESPONSE: "", AI_EVALUATION: "", CODE_EXEC_RESULT: {}},
)

editable_fields = AIEvalXBlock.editable_fields + ("judge0_api_key", "language")
editable_fields = AIEvalXBlock.editable_fields + (
"question",
"evaluation_prompt",
"judge0_api_key",
"language",
)

def resource_string(self, path):
"""Handy helper for getting resources from our kit."""
Expand Down Expand Up @@ -134,6 +162,13 @@ def validate_field_data(self, validation, data):

super().validate_field_data(validation, data)

if not data.question:
validation.add(
ValidationMessage(
ValidationMessage.ERROR, _("Question field is mandatory")
)
)

if data.language != LanguageLabels.HTML_CSS and not data.judge0_api_key:
validation.add(
ValidationMessage(
Expand Down Expand Up @@ -185,13 +220,7 @@ def get_response(self, data, suffix=""): # pylint: disable=unused-argument
]

try:
response = get_llm_response(
self.model,
self.model_api_key,
messages,
self.model_api_url,
)

response = self.get_llm_response(messages)
except Exception as e:
traceback.print_exc()
logger.error(
Expand Down
Loading
Loading