Skip to content

Commit

Permalink
llms: Add fake package (#935)
Browse files Browse the repository at this point in the history
* feat: add fake package
  • Loading branch information
devalexandre authored Sep 13, 2024
1 parent 17d9e48 commit 862451a
Show file tree
Hide file tree
Showing 3 changed files with 289 additions and 0 deletions.
88 changes: 88 additions & 0 deletions docs/docs/modules/model_io/models/llms/Integrations/fake.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
---
sidebar_label: Fake LLM
---

# Fake LLM

## Overview

This documentation provides an overview of the `fake` package, which offers a simulated implementation of a Language Learning Model (LLM) for testing purposes in Go applications.

## Installation

To use the `fake` package, import it into your Go project:

```bash
go get "github.com/tmc/langchaingo"
```



## Prerequisites
Ensure you have Go programming language installed on your machine (version 1.15 or higher recommended).

## Example Usage
Here is an example demonstrating how to use the fake package:


```go
package main

import (
"context"
"fmt"
"log"

"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/fake"
)

func main() {
// Creating a fake LLM with initial responses.
responses := []string{
"Hello!",
"How are you?",
"I'm fine, thanks.",
}
llm := fake.NewFakeLLM(responses)

// Calling the fake LLM with a prompt.
ctx := context.Background()
response, err := llm.Call(ctx, "Hi there!")
if err != nil {
fmt.Printf("Error calling LLM: %v\n", err)
} else {
fmt.Println("LLM Response:", response)
}

// Adding a new response and testing again.
llm.AddResponse("Goodbye!")
response, err = llm.Call(ctx, "See you later!")
if err != nil {
fmt.Printf("Error calling LLM: %v\n", err)
} else {
fmt.Println("LLM Response:", response)
}
}
```

# API Reference
`NewFakeLLM(responses []string) *LLM`

Creates a new instance of the fake LLM with the provided responses.

`LLM.Call(ctx context.Context, prompt string) (string, error)`

Simulates calling the model with a specific prompt and returns a fictional response.

`LLM.Reset()`

Resets the fake LLM, allowing responses to cycle through again.

`LLM.AddResponse(response string)`

Adds a new response to the list of possible responses of the fake LLM.

# Purpose

The fake package is designed to facilitate testing of applications that interact with language learning models, without relying on real model implementations. It helps validate application logic and behavior in a controlled environment.
57 changes: 57 additions & 0 deletions llms/fake/fakellm.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
package fake

import (
"context"
"errors"

"github.com/tmc/langchaingo/llms"
)

type LLM struct {
responses []string
index int
}

func NewFakeLLM(responses []string) *LLM {
return &LLM{
responses: responses,
index: 0,
}
}

// GenerateContent generate fake content.
func (f *LLM) GenerateContent(_ context.Context, _ []llms.MessageContent, _ ...llms.CallOption) (*llms.ContentResponse, error) {
if len(f.responses) == 0 {
return nil, errors.New("no responses configured")
}
if f.index >= len(f.responses) {
f.index = 0 // reset index
}
response := f.responses[f.index]
f.index++
return &llms.ContentResponse{
Choices: []*llms.ContentChoice{{Content: response}},
}, nil
}

// Call the model with a prompt.
func (f *LLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) {
resp, err := f.GenerateContent(ctx, []llms.MessageContent{{Role: llms.ChatMessageTypeHuman, Parts: []llms.ContentPart{llms.TextContent{Text: prompt}}}}, options...)
if err != nil {
return "", err
}
if len(resp.Choices) < 1 {
return "", errors.New("empty response from model")
}
return resp.Choices[0].Content, nil
}

// Reset the index to 0.
func (f *LLM) Reset() {
f.index = 0
}

// AddResponse adds a response to the list of responses.
func (f *LLM) AddResponse(response string) {
f.responses = append(f.responses, response)
}
144 changes: 144 additions & 0 deletions llms/fake/fakellm_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
package fake

import (
"context"
"testing"

"github.com/tmc/langchaingo/chains"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/memory"
)

func TestFakeLLM_CallMethod(t *testing.T) {
t.Parallel()
responses := setupResponses()
fakeLLM := NewFakeLLM(responses)
ctx := context.Background()

if output, _ := fakeLLM.Call(ctx, "Teste"); output != responses[0] {
t.Errorf("Expected 'Resposta 1', got '%s'", output)
}

if output, _ := fakeLLM.Call(ctx, "Teste"); output != responses[1] {
t.Errorf("Expected 'Resposta 2', got '%s'", output)
}

if output, _ := fakeLLM.Call(ctx, "Teste"); output != responses[2] {
t.Errorf("Expected 'Resposta 3', got '%s'", output)
}

// Testa reinicialização automática
if output, _ := fakeLLM.Call(ctx, "Teste"); output != responses[0] {
t.Errorf("Expected 'Resposta 1', got '%s'", output)
}
}

func TestFakeLLM_GenerateContentMethod(t *testing.T) {
t.Parallel()
responses := setupResponses()
fakeLLM := NewFakeLLM(responses)
ctx := context.Background()
msg := llms.MessageContent{
Role: llms.ChatMessageTypeHuman,
Parts: []llms.ContentPart{llms.TextContent{Text: "Teste"}},
}

resp, err := fakeLLM.GenerateContent(ctx, []llms.MessageContent{msg})
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if len(resp.Choices) < 1 || resp.Choices[0].Content != responses[0] {
t.Errorf("Expected 'Resposta 1', got '%s'", resp.Choices[0].Content)
}

resp, err = fakeLLM.GenerateContent(ctx, []llms.MessageContent{msg})
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if len(resp.Choices) < 1 || resp.Choices[0].Content != responses[1] {
t.Errorf("Expected 'Resposta 2', got '%s'", resp.Choices[0].Content)
}

resp, err = fakeLLM.GenerateContent(ctx, []llms.MessageContent{msg})
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if len(resp.Choices) < 1 || resp.Choices[0].Content != responses[2] {
t.Errorf("Expected 'Resposta 1', got '%s'", resp.Choices[0].Content)
}
}

func TestFakeLLM_ResetMethod(t *testing.T) {
t.Parallel()
responses := setupResponses()
fakeLLM := NewFakeLLM(responses)
ctx := context.Background()

fakeLLM.Reset()
if output, _ := fakeLLM.Call(ctx, "Teste"); output != responses[0] {
t.Errorf("Expected 'Resposta 1', got '%s'", output)
}
}

func TestFakeLLM_AddResponseMethod(t *testing.T) {
t.Parallel()
responses := setupResponses()
fakeLLM := NewFakeLLM(responses)
ctx := context.Background()

fakeLLM.AddResponse("Resposta 4")
fakeLLM.Reset()
_, err := fakeLLM.Call(ctx, "Teste")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
_, err = fakeLLM.Call(ctx, "Teste")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
_, err = fakeLLM.Call(ctx, "Teste")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}

if output, _ := fakeLLM.Call(ctx, "Teste"); output != "Resposta 4" {
t.Errorf("Expected 'Resposta 4', got '%s'", output)
}
}

func TestFakeLLM_WithChain(t *testing.T) {
t.Parallel()
responses := setupResponses()
fakeLLM := NewFakeLLM(responses)
ctx := context.Background()

fakeLLM.AddResponse("My name is Alexandre")

NextToResponse(fakeLLM, 4)
llmChain := chains.NewConversation(fakeLLM, memory.NewConversationBuffer())
out, err := chains.Run(ctx, llmChain, "What's my name? How many times did I ask this?")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}

if out != "My name is Alexandre" {
t.Errorf("Expected 'My name is Alexandre', got '%s'", out)
}
}

func setupResponses() []string {
return []string{
"Resposta 1",
"Resposta 2",
"Resposta 3",
}
}

func NextToResponse(fakeLLM *LLM, n int) {
for i := 1; i < n; i++ {
_, err := fakeLLM.Call(context.Background(), "Teste")
if err != nil {
panic(err)
}
}
}

0 comments on commit 862451a

Please sign in to comment.