Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add characterization tests for basic langchain text responses and stream data. #676

Merged
merged 14 commits into from
Oct 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions packages/core/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@
"eslint": "^7.32.0",
"eslint-config-vercel-ai": "workspace:*",
"jest": "29.2.1",
"langchain": "0.0.172",
"ts-jest": "29.0.3",
"tsup": "^6.7.0",
"typescript": "5.1.3"
Expand Down
145 changes: 145 additions & 0 deletions packages/core/streams/langchain-stream.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
import { createClient } from '../tests/utils/mock-client';
import { setup } from '../tests/utils/mock-service';

describe('LangchainStream', () => {
let server: ReturnType<typeof setup>;
beforeAll(() => {
server = setup(3031);
});
afterAll(() => {
server.teardown();
});

jest.mock('uuid', () => {
let count = 0;
return {
v4: () => {
return `uuid-${count++}`;
},
};
});

const { LangChainStream, StreamingTextResponse, experimental_StreamData } =
require('.') as typeof import('.');
const { ChatOpenAI } =
require('langchain/chat_models/openai') as typeof import('langchain/chat_models/openai');
const { HumanMessage } =
require('langchain/schema') as typeof import('langchain/schema');

function readAllChunks(response: Response) {
return createClient(response).readAll();
}

it('should be able to parse SSE and receive the streamed response', async () => {
const { stream, handlers } = LangChainStream();

const llm = new ChatOpenAI({
streaming: true,
openAIApiKey: 'fake',
configuration: {
baseURL: server.api,
defaultHeaders: {
'x-mock-service': 'openai',
'x-mock-type': 'chat',
'x-flush-delay': '5',
},
},
});

llm.call([new HumanMessage('hello')], {}, [handlers]).catch(console.error);

const response = new StreamingTextResponse(stream);

expect(await readAllChunks(response)).toEqual([
'',
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

With Langchain, there is an empty text chunk at the beginning and the end. Is this desired?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, but it shouldn't be a problem and not worth investigating until we finalize the complex mode API imo

'Hello',
',',
' world',
'.',
'',
]);
});

describe('StreamData prototcol', () => {
it('should send text', async () => {
const data = new experimental_StreamData();

const { stream, handlers } = LangChainStream({
onFinal() {
data.close();
},
experimental_streamData: true,
});

const llm = new ChatOpenAI({
streaming: true,
openAIApiKey: 'fake',
configuration: {
baseURL: server.api,
defaultHeaders: {
'x-mock-service': 'openai',
'x-mock-type': 'chat',
'x-flush-delay': '5',
},
},
});

llm
.call([new HumanMessage('hello')], {}, [handlers])
.catch(console.error);

const response = new StreamingTextResponse(stream, {}, data);

expect(await readAllChunks(response)).toEqual([
'0:""\n',
'0:"Hello"\n',
'0:","\n',
'0:" world"\n',
'0:"."\n',
'0:""\n',
]);
});

it('should send text and data', async () => {
const data = new experimental_StreamData();

data.append({ t1: 'v1' });

const { stream, handlers } = LangChainStream({
onFinal() {
data.close();
},
experimental_streamData: true,
});

const llm = new ChatOpenAI({
streaming: true,
openAIApiKey: 'fake',
configuration: {
baseURL: server.api,
defaultHeaders: {
'x-mock-service': 'openai',
'x-mock-type': 'chat',
'x-flush-delay': '5',
},
},
});

llm
.call([new HumanMessage('hello')], {}, [handlers])
.catch(console.error);

const response = new StreamingTextResponse(stream, {}, data);

expect(await readAllChunks(response)).toEqual([
'2:"[{\\"t1\\":\\"v1\\"}]"\n',
'0:""\n',
'0:"Hello"\n',
'0:","\n',
'0:" world"\n',
'0:"."\n',
'0:""\n',
]);
});
});
});
16 changes: 11 additions & 5 deletions packages/core/tests/utils/mock-service.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ async function flushDataToResponse(
res: ServerResponse,
chunks: { value: object }[],
suffix?: string,
delayInMs = 5,
) {
let resolve = () => {};
let waitForDrain = new Promise<void>(res => (resolve = res));
Expand All @@ -26,7 +27,7 @@ async function flushDataToResponse(
await waitForDrain;
}

await new Promise(r => setTimeout(r, 100));
await new Promise(r => setTimeout(r, delayInMs));
}
if (suffix) {
const data = `data: ${suffix}\n\n`;
Expand All @@ -36,12 +37,15 @@ async function flushDataToResponse(
res.end();
}

export const setup = () => {
export const setup = (port = 3030) => {
let recentFlushed: any[] = [];

const server = createServer((req, res) => {
const service = req.headers['x-mock-service'] || 'openai';
const type = req.headers['x-mock-type'] || 'chat' || 'func_call';
const flushDelayHeader = req.headers['x-flush-delay'];
const flushDelayInMs =
flushDelayHeader === undefined ? undefined : +flushDelayHeader;

switch (type) {
case 'func_call':
Expand Down Expand Up @@ -74,6 +78,7 @@ export const setup = () => {
),
),
'[DONE]',
flushDelayInMs,
);
break;
default:
Expand Down Expand Up @@ -105,6 +110,7 @@ export const setup = () => {
),
),
'[DONE]',
flushDelayInMs,
);
break;
default:
Expand All @@ -116,11 +122,11 @@ export const setup = () => {
}
});

server.listen(3030);
server.listen(port);

return {
port: 3030,
api: 'http://localhost:3030',
port,
api: `http://localhost:${port}`,
teardown: () => {
server.close();
},
Expand Down
Loading
Loading