Skip to content

Commit

Permalink
Add characterization tests for basic langchain text responses and str…
Browse files Browse the repository at this point in the history
…eam data. (#676)
  • Loading branch information
lgrammel authored Oct 26, 2023
1 parent 162c859 commit 7f58d9b
Show file tree
Hide file tree
Showing 4 changed files with 522 additions and 26 deletions.
1 change: 1 addition & 0 deletions packages/core/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@
"eslint": "^7.32.0",
"eslint-config-vercel-ai": "workspace:*",
"jest": "29.2.1",
"langchain": "0.0.172",
"ts-jest": "29.0.3",
"tsup": "^6.7.0",
"typescript": "5.1.3"
Expand Down
145 changes: 145 additions & 0 deletions packages/core/streams/langchain-stream.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
import { createClient } from '../tests/utils/mock-client';
import { setup } from '../tests/utils/mock-service';

describe('LangchainStream', () => {
let server: ReturnType<typeof setup>;
beforeAll(() => {
server = setup(3031);
});
afterAll(() => {
server.teardown();
});

jest.mock('uuid', () => {
let count = 0;
return {
v4: () => {
return `uuid-${count++}`;
},
};
});

const { LangChainStream, StreamingTextResponse, experimental_StreamData } =
require('.') as typeof import('.');
const { ChatOpenAI } =
require('langchain/chat_models/openai') as typeof import('langchain/chat_models/openai');
const { HumanMessage } =
require('langchain/schema') as typeof import('langchain/schema');

function readAllChunks(response: Response) {
return createClient(response).readAll();
}

it('should be able to parse SSE and receive the streamed response', async () => {
const { stream, handlers } = LangChainStream();

const llm = new ChatOpenAI({
streaming: true,
openAIApiKey: 'fake',
configuration: {
baseURL: server.api,
defaultHeaders: {
'x-mock-service': 'openai',
'x-mock-type': 'chat',
'x-flush-delay': '5',
},
},
});

llm.call([new HumanMessage('hello')], {}, [handlers]).catch(console.error);

const response = new StreamingTextResponse(stream);

expect(await readAllChunks(response)).toEqual([
'',
'Hello',
',',
' world',
'.',
'',
]);
});

describe('StreamData prototcol', () => {
it('should send text', async () => {
const data = new experimental_StreamData();

const { stream, handlers } = LangChainStream({
onFinal() {
data.close();
},
experimental_streamData: true,
});

const llm = new ChatOpenAI({
streaming: true,
openAIApiKey: 'fake',
configuration: {
baseURL: server.api,
defaultHeaders: {
'x-mock-service': 'openai',
'x-mock-type': 'chat',
'x-flush-delay': '5',
},
},
});

llm
.call([new HumanMessage('hello')], {}, [handlers])
.catch(console.error);

const response = new StreamingTextResponse(stream, {}, data);

expect(await readAllChunks(response)).toEqual([
'0:""\n',
'0:"Hello"\n',
'0:","\n',
'0:" world"\n',
'0:"."\n',
'0:""\n',
]);
});

it('should send text and data', async () => {
const data = new experimental_StreamData();

data.append({ t1: 'v1' });

const { stream, handlers } = LangChainStream({
onFinal() {
data.close();
},
experimental_streamData: true,
});

const llm = new ChatOpenAI({
streaming: true,
openAIApiKey: 'fake',
configuration: {
baseURL: server.api,
defaultHeaders: {
'x-mock-service': 'openai',
'x-mock-type': 'chat',
'x-flush-delay': '5',
},
},
});

llm
.call([new HumanMessage('hello')], {}, [handlers])
.catch(console.error);

const response = new StreamingTextResponse(stream, {}, data);

expect(await readAllChunks(response)).toEqual([
'2:"[{\\"t1\\":\\"v1\\"}]"\n',
'0:""\n',
'0:"Hello"\n',
'0:","\n',
'0:" world"\n',
'0:"."\n',
'0:""\n',
]);
});
});
});
16 changes: 11 additions & 5 deletions packages/core/tests/utils/mock-service.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ async function flushDataToResponse(
res: ServerResponse,
chunks: { value: object }[],
suffix?: string,
delayInMs = 5,
) {
let resolve = () => {};
let waitForDrain = new Promise<void>(res => (resolve = res));
Expand All @@ -26,7 +27,7 @@ async function flushDataToResponse(
await waitForDrain;
}

await new Promise(r => setTimeout(r, 100));
await new Promise(r => setTimeout(r, delayInMs));
}
if (suffix) {
const data = `data: ${suffix}\n\n`;
Expand All @@ -36,12 +37,15 @@ async function flushDataToResponse(
res.end();
}

export const setup = () => {
export const setup = (port = 3030) => {
let recentFlushed: any[] = [];

const server = createServer((req, res) => {
const service = req.headers['x-mock-service'] || 'openai';
const type = req.headers['x-mock-type'] || 'chat' || 'func_call';
const flushDelayHeader = req.headers['x-flush-delay'];
const flushDelayInMs =
flushDelayHeader === undefined ? undefined : +flushDelayHeader;

switch (type) {
case 'func_call':
Expand Down Expand Up @@ -74,6 +78,7 @@ export const setup = () => {
),
),
'[DONE]',
flushDelayInMs,
);
break;
default:
Expand Down Expand Up @@ -105,6 +110,7 @@ export const setup = () => {
),
),
'[DONE]',
flushDelayInMs,
);
break;
default:
Expand All @@ -116,11 +122,11 @@ export const setup = () => {
}
});

server.listen(3030);
server.listen(port);

return {
port: 3030,
api: 'http://localhost:3030',
port,
api: `http://localhost:${port}`,
teardown: () => {
server.close();
},
Expand Down
Loading

0 comments on commit 7f58d9b

Please sign in to comment.