diff --git a/docs/dotprompt.md b/docs/dotprompt.md index c0a286092..706ec906e 100644 --- a/docs/dotprompt.md +++ b/docs/dotprompt.md @@ -53,7 +53,7 @@ const result = await greetingPrompt.generate({ }, }); -console.log(result.text()); +console.log(result.text); ``` Dotprompt's syntax is based on the [Handlebars](https://handlebarsjs.com/guide/) @@ -183,7 +183,7 @@ const myPrompt = promptRef("myPrompt"); const result = await myPrompt.generate({...}); // now strongly typed as MySchema -result.output(); +result.output; ``` ## Overriding Prompt Metadata @@ -237,7 +237,7 @@ const menu = await createMenuPrompt.generate({ }, }); -console.log(menu.output()); +console.log(menu.output); ``` Output conformance is achieved by inserting additional instructions into the @@ -340,7 +340,7 @@ const result = await describeImagePrompt.generate({ }, }); -console.log(result.text()); +console.log(result.text); ``` ## Partials diff --git a/docs/evaluation.md b/docs/evaluation.md index aa750f04d..613a9f77f 100644 --- a/docs/evaluation.md +++ b/docs/evaluation.md @@ -204,7 +204,7 @@ export const synthesizeQuestions = defineFlow( text: `Generate one question about the text below: ${chunks[i]}`, }, }); - questions.push(qResponse.text()); + questions.push(qResponse.text); } return questions; } diff --git a/docs/get-started.md b/docs/get-started.md index 0b7c02b89..694f89c02 100644 --- a/docs/get-started.md +++ b/docs/get-started.md @@ -131,7 +131,7 @@ so that it can be used outside of a Node project. // Handle the response from the model API. In this sample, we just convert // it to a string, but more complicated flows might coerce the response into // structured output or chain the response into another LLM call, etc. - return llmResponse.text(); + return llmResponse.text; } ); diff --git a/docs/models.md b/docs/models.md index 473ee8ab9..c1731469d 100644 --- a/docs/models.md +++ b/docs/models.md @@ -109,7 +109,7 @@ configureGenkit(/* ... */); prompt: 'Invent a menu item for a pirate themed restaurant.', }); - console.log(await llmResponse.text()); + console.log(await llmResponse.text); })(); ``` @@ -339,7 +339,7 @@ object's `output()` method: ```ts type MenuItem = z.infer; -const output: MenuItem | null = llmResponse.output(); +const output: MenuItem | null = llmResponse.output; ``` #### Handling errors @@ -425,7 +425,7 @@ Handle each of these chunks as they become available: ```ts for await (const responseChunkData of llmResponseStream.stream()) { const responseChunk = responseChunkData as GenerateResponseChunk; - console.log(responseChunk.text()); + console.log(responseChunk.text); } ``` @@ -454,7 +454,7 @@ const llmResponseStream = await generateStream({ for await (const responseChunkData of llmResponseStream.stream()) { const responseChunk = responseChunkData as GenerateResponseChunk; // output() returns an object representing the entire output so far - const output: Menu | null = responseChunk.output(); + const output: Menu | null = responseChunk.output; console.log(output); } ``` diff --git a/docs/plugin-authoring-evaluator.md b/docs/plugin-authoring-evaluator.md index bba9d3211..b5604229b 100644 --- a/docs/plugin-authoring-evaluator.md +++ b/docs/plugin-authoring-evaluator.md @@ -110,9 +110,9 @@ export async function deliciousnessScore< }); // Parse the output - const parsedResponse = response.output(); + const parsedResponse = response.output; if (!parsedResponse) { - throw new Error(`Unable to parse evaluator response: ${response.text()}`); + throw new Error(`Unable to parse evaluator response: ${response.text}`); } // Return a scored response diff --git a/docs/prompts.md b/docs/prompts.md index 1ecf5715a..f3e915570 100644 --- a/docs/prompts.md +++ b/docs/prompts.md @@ -100,7 +100,7 @@ const response = await (threeGreetingsPrompt.generate( { input: { name: 'Fred' } } )); -response.output()?.likeAPirate +response.output?.likeAPirate // "Ahoy there, Fred! May the winds be ever in your favor!" ``` diff --git a/docs/rag.md b/docs/rag.md index 0adff7691..23b459ed0 100644 --- a/docs/rag.md +++ b/docs/rag.md @@ -296,7 +296,7 @@ export const menuQAFlow = defineFlow( context: docs, }); - const output = llmResponse.text(); + const output = llmResponse.text; return output; } ); @@ -333,7 +333,7 @@ defineSimpleRetriever({ // and several keys to use as metadata metadata: ['from', 'to', 'subject'], } async (query, config) => { - const result = await searchEmails(query.text(), {limit: config.limit}); + const result = await searchEmails(query.text, {limit: config.limit}); return result.data.emails; }); ``` @@ -433,7 +433,7 @@ export const rerankFlow = defineFlow( }); return rerankedDocuments.map((doc) => ({ - text: doc.text(), + text: doc.text, score: doc.metadata.score, })); } diff --git a/genkit-tools/cli/config/firebase.index.ts.template b/genkit-tools/cli/config/firebase.index.ts.template index c7030f9f1..f8c8217bf 100644 --- a/genkit-tools/cli/config/firebase.index.ts.template +++ b/genkit-tools/cli/config/firebase.index.ts.template @@ -59,6 +59,6 @@ export const menuSuggestionFlow = onFlow( // convert it to a string, but more complicated flows might coerce the // response into structured output or chain the response into another // LLM call, etc. - return llmResponse.text(); + return llmResponse.text; } ); diff --git a/genkit-tools/cli/config/nextjs.genkit.ts.template b/genkit-tools/cli/config/nextjs.genkit.ts.template index ed619a3be..c1c05d8f8 100644 --- a/genkit-tools/cli/config/nextjs.genkit.ts.template +++ b/genkit-tools/cli/config/nextjs.genkit.ts.template @@ -39,7 +39,7 @@ const menuSuggestionFlow = ai.defineFlow( // convert it to a string, but more complicated flows might coerce the // response into structured output or chain the response into another // LLM call, etc. - return llmResponse.text(); + return llmResponse.text; } ); diff --git a/genkit-tools/cli/config/nodejs.index.ts.template b/genkit-tools/cli/config/nodejs.index.ts.template index 5f480bc47..28388b239 100644 --- a/genkit-tools/cli/config/nodejs.index.ts.template +++ b/genkit-tools/cli/config/nodejs.index.ts.template @@ -39,6 +39,6 @@ export const menuSuggestionFlow = ai.defineFlow( // Handle the response from the model API. In this sample, we just convert // it to a string, but more complicated flows might coerce the response into // structured output or chain the response into another LLM call, etc. - return llmResponse.text(); + return llmResponse.text; } ); diff --git a/genkit-tools/common/src/utils/analytics.ts b/genkit-tools/common/src/utils/analytics.ts index 67190b88a..5cc1d34ed 100644 --- a/genkit-tools/common/src/utils/analytics.ts +++ b/genkit-tools/common/src/utils/analytics.ts @@ -292,7 +292,7 @@ async function recordInternal( if (!response.ok) { logger.warn(`Analytics validation HTTP error: ${response.status}`); } - const respBody = await response.text(); + const respBody = await response.text; logger.info(`Analytics validation result: ${respBody}`); } // response.ok / response.status intentionally ignored, see comment below. diff --git a/js/ai/src/document.ts b/js/ai/src/document.ts index d9ff71f11..70ed10f17 100644 --- a/js/ai/src/document.ts +++ b/js/ai/src/document.ts @@ -70,7 +70,7 @@ export class Document implements DocumentData { * Concatenates all `text` parts present in the document with no delimiter. * @returns A string of all concatenated text parts. */ - text(): string { + get text(): string { return this.content.map((part) => part.text || '').join(''); } @@ -79,7 +79,7 @@ export class Document implements DocumentData { * (for example) an image. * @returns The first detected `media` part in the document. */ - media(): { url: string; contentType?: string } | null { + get media(): { url: string; contentType?: string } | null { return this.content.find((part) => part.media)?.media || null; } diff --git a/js/ai/src/generate.ts b/js/ai/src/generate.ts index dcce0e71e..b4e95716a 100755 --- a/js/ai/src/generate.ts +++ b/js/ai/src/generate.ts @@ -70,8 +70,8 @@ export class Message implements MessageData { * * @returns The structured output contained in the message. */ - output(): T { - return this.data() || extractJson(this.text()); + get output(): T { + return this.data || extractJson(this.text); } toolResponseParts(): ToolResponsePart[] { @@ -83,7 +83,7 @@ export class Message implements MessageData { * Concatenates all `text` parts present in the message with no delimiter. * @returns A string of all concatenated text parts. */ - text(): string { + get text(): string { return this.content.map((part) => part.text || '').join(''); } @@ -92,7 +92,7 @@ export class Message implements MessageData { * (for example) an image from a generation expected to create one. * @returns The first detected `media` part in the message. */ - media(): { url: string; contentType?: string } | null { + get media(): { url: string; contentType?: string } | null { return this.content.find((part) => part.media)?.media || null; } @@ -100,7 +100,7 @@ export class Message implements MessageData { * Returns the first detected `data` part of a message. * @returns The first `data` part detected in the message (if any). */ - data(): T | null { + get data(): T | null { return this.content.find((part) => part.data)?.data as T | null; } @@ -108,7 +108,7 @@ export class Message implements MessageData { * Returns all tool request found in this message. * @returns Array of all tool request found in this message. */ - toolRequests(): ToolRequestPart[] { + get toolRequests(): ToolRequestPart[] { return this.content.filter( (part) => !!part.toolRequest ) as ToolRequestPart[]; @@ -187,7 +187,7 @@ export class GenerateResponse implements ModelResponseData { } if (request?.output?.schema || this.request?.output?.schema) { - const o = this.output(); + const o = this.output; parseSchema(o, { jsonSchema: request?.output?.schema || this.request?.output?.schema, }); @@ -211,8 +211,8 @@ export class GenerateResponse implements ModelResponseData { * @param index The candidate index from which to extract output. If not provided, finds first candidate that conforms to output schema. * @returns The structured output contained in the selected candidate. */ - output(): O | null { - return this.message?.output() || null; + get output(): O | null { + return this.message?.output || null; } /** @@ -220,8 +220,8 @@ export class GenerateResponse implements ModelResponseData { * @param index The candidate index from which to extract text, defaults to first candidate. * @returns A string of all concatenated text parts. */ - text(): string { - return this.message?.text() || ''; + get text(): string { + return this.message?.text || ''; } /** @@ -230,8 +230,8 @@ export class GenerateResponse implements ModelResponseData { * @param index The candidate index from which to extract media, defaults to first candidate. * @returns The first detected `media` part in the candidate. */ - media(): { url: string; contentType?: string } | null { - return this.message?.media() || null; + get media(): { url: string; contentType?: string } | null { + return this.message?.media || null; } /** @@ -239,8 +239,8 @@ export class GenerateResponse implements ModelResponseData { * @param index The candidate index from which to extract data, defaults to first candidate. * @returns The first `data` part detected in the candidate (if any). */ - data(): O | null { - return this.message?.data() || null; + get data(): O | null { + return this.message?.data || null; } /** @@ -248,8 +248,8 @@ export class GenerateResponse implements ModelResponseData { * @param index The candidate index from which to extract tool requests, defaults to first candidate. * @returns Array of all tool request found in the candidate. */ - toolRequests(): ToolRequestPart[] { - return this.message?.toolRequests() || []; + get toolRequests(): ToolRequestPart[] { + return this.message?.toolRequests || []; } /** @@ -316,7 +316,7 @@ export class GenerateResponseChunk * Concatenates all `text` parts present in the chunk with no delimiter. * @returns A string of all concatenated text parts. */ - text(): string { + get text(): string { return this.content.map((part) => part.text || '').join(''); } @@ -325,7 +325,7 @@ export class GenerateResponseChunk * (for example) an image from a generation expected to create one. * @returns The first detected `media` part in the chunk. */ - media(): { url: string; contentType?: string } | null { + get media(): { url: string; contentType?: string } | null { return this.content.find((part) => part.media)?.media || null; } @@ -333,7 +333,7 @@ export class GenerateResponseChunk * Returns the first detected `data` part of a chunk. * @returns The first `data` part detected in the chunk (if any). */ - data(): T | null { + get data(): T | null { return this.content.find((part) => part.data)?.data as T | null; } @@ -341,7 +341,7 @@ export class GenerateResponseChunk * Returns all tool request found in this chunk. * @returns Array of all tool request found in this chunk. */ - toolRequests(): ToolRequestPart[] { + get toolRequests(): ToolRequestPart[] { return this.content.filter( (part) => !!part.toolRequest ) as ToolRequestPart[]; @@ -351,7 +351,7 @@ export class GenerateResponseChunk * Attempts to extract the longest valid JSON substring from the accumulated chunks. * @returns The longest valid JSON substring found in the accumulated chunks. */ - output(): T | null { + get output(): T | null { if (!this.accumulatedChunks) return null; const accumulatedText = this.accumulatedChunks .map((chunk) => chunk.content.map((part) => part.text || '').join('')) diff --git a/js/ai/src/model/middleware.ts b/js/ai/src/model/middleware.ts index 85a30c0b7..11c320cb9 100644 --- a/js/ai/src/model/middleware.ts +++ b/js/ai/src/model/middleware.ts @@ -230,7 +230,7 @@ const CONTEXT_ITEM_TEMPLATE = ( } else if (options?.citationKey === undefined) { out += `[${d.metadata?.['ref'] || d.metadata?.['id'] || index}]: `; } - out += d.text() + '\n'; + out += d.text + '\n'; return out; }; diff --git a/js/ai/src/testing/model-tester.ts b/js/ai/src/testing/model-tester.ts index 14096c8ae..3cf041ae9 100644 --- a/js/ai/src/testing/model-tester.ts +++ b/js/ai/src/testing/model-tester.ts @@ -29,7 +29,7 @@ const tests: Record = { prompt: 'just say "Hi", literally', }); - const got = response.text().trim(); + const got = response.text.trim(); assert.match(got, /Hi/i); }, multimodal: async (model: string) => { @@ -54,7 +54,7 @@ const tests: Record = { }); const want = ''; - const got = response.text().trim(); + const got = response.text.trim(); assert.match(got, /plus/i); }, history: async (model: string) => { @@ -74,11 +74,11 @@ const tests: Record = { messages: response1.messages, }); - const got = response.text().trim(); + const got = response.text.trim(); assert.match(got, /Glorb/); }, 'system prompt': async (model: string) => { - const response = await generate({ + const { text } = await generate({ model, prompt: 'Hi', messages: [ @@ -94,7 +94,7 @@ const tests: Record = { }); const want = 'Bye'; - const got = response.text().trim(); + const got = text.trim(); assert.equal(got, want); }, 'structured output': async (model: string) => { @@ -114,7 +114,7 @@ const tests: Record = { name: 'Jack', occupation: 'Lumberjack', }; - const got = response.output(); + const got = response.output; assert.deepEqual(want, got); }, 'tool calling': async (model: string) => { @@ -125,13 +125,13 @@ const tests: Record = { skip(); } - const response = await generate({ + const { text } = await generate({ model, prompt: 'what is a gablorken of 2? use provided tool', tools: ['gablorkenTool'], }); - const got = response.text().trim(); + const got = text.trim(); assert.match(got, /9.407/); }, }; diff --git a/js/ai/tests/generate/generate_test.ts b/js/ai/tests/generate/generate_test.ts index 4e514d74b..cae8e419c 100644 --- a/js/ai/tests/generate/generate_test.ts +++ b/js/ai/tests/generate/generate_test.ts @@ -108,7 +108,7 @@ describe('GenerateResponse', () => { const response = new GenerateResponse( test.responseData as GenerateResponseData ); - assert.deepStrictEqual(response.output(), test.expectedOutput); + assert.deepStrictEqual(response.output, test.expectedOutput); }); } }); @@ -213,7 +213,7 @@ describe('GenerateResponse', () => { }), finishReason: 'stop', }); - assert.deepStrictEqual(response.toolRequests(), []); + assert.deepStrictEqual(response.toolRequests, []); }); it('returns tool call if present', () => { const toolCall = { @@ -230,7 +230,7 @@ describe('GenerateResponse', () => { }), finishReason: 'stop', }); - assert.deepStrictEqual(response.toolRequests(), [toolCall]); + assert.deepStrictEqual(response.toolRequests, [toolCall]); }); it('returns all tool calls', () => { const toolCall1 = { @@ -254,7 +254,7 @@ describe('GenerateResponse', () => { }), finishReason: 'stop', }); - assert.deepStrictEqual(response.toolRequests(), [toolCall1, toolCall2]); + assert.deepStrictEqual(response.toolRequests, [toolCall1, toolCall2]); }); }); }); @@ -515,7 +515,7 @@ describe('GenerateResponseChunk', () => { const responseChunk: GenerateResponseChunk = new GenerateResponseChunk(chunkData, accumulatedChunks); - const output = responseChunk.output(); + const output = responseChunk.output; assert.deepStrictEqual(output, test.correctJson); }); @@ -601,7 +601,7 @@ describe('generate', () => { ); const want = '[Echo: (banana)]'; - assert.deepStrictEqual(response.text(), want); + assert.deepStrictEqual(response.text, want); }); }); diff --git a/js/ai/tests/model/document_test.ts b/js/ai/tests/model/document_test.ts index d91bdb657..c1adaa97f 100644 --- a/js/ai/tests/model/document_test.ts +++ b/js/ai/tests/model/document_test.ts @@ -23,13 +23,13 @@ describe('document', () => { it('retuns single text part', () => { const doc = new Document({ content: [{ text: 'foo' }] }); - assert.equal(doc.text(), 'foo'); + assert.equal(doc.text, 'foo'); }); it('retuns concatenated text part', () => { const doc = new Document({ content: [{ text: 'foo' }, { text: 'bar' }] }); - assert.equal(doc.text(), 'foobar'); + assert.equal(doc.text, 'foobar'); }); }); @@ -42,7 +42,7 @@ describe('document', () => { ], }); - assert.deepEqual(doc.media(), { url: 'data:foo' }); + assert.deepEqual(doc.media, { url: 'data:foo' }); }); }); diff --git a/js/ai/tests/model/middleware_test.ts b/js/ai/tests/model/middleware_test.ts index 99987c351..9b3eb7054 100644 --- a/js/ai/tests/model/middleware_test.ts +++ b/js/ai/tests/model/middleware_test.ts @@ -521,7 +521,7 @@ describe('augmentWithContext', () => { metadata: { uid: 'second' }, }, ], - { itemTemplate: (d) => `* (${d.metadata!.uid}) -- ${d.text()}\n` } + { itemTemplate: (d) => `* (${d.metadata!.uid}) -- ${d.text}\n` } ); assert.deepEqual(result[0].content.at(-1), { text: `${CONTEXT_PREFACE}* (first) -- i am context\n* (second) -- i am more context\n\n`, diff --git a/js/ai/tests/reranker/reranker_test.ts b/js/ai/tests/reranker/reranker_test.ts index 1b67a663f..4942e02b6 100644 --- a/js/ai/tests/reranker/reranker_test.ts +++ b/js/ai/tests/reranker/reranker_test.ts @@ -38,9 +38,9 @@ describe('reranker', () => { }, async (query, documents, options) => { // Custom reranking logic: score based on string length similarity to query - const queryLength = query.text().length; + const queryLength = query.text.length; const rerankedDocs = documents.map((doc) => { - const score = Math.abs(queryLength - doc.text().length); + const score = Math.abs(queryLength - doc.text.length); return { ...doc, metadata: { ...doc.metadata, score }, @@ -75,8 +75,8 @@ describe('reranker', () => { // Validate the reranked results assert.equal(rerankedDocuments.length, 2); - assert(rerankedDocuments[0].text().includes('a bit longer')); - assert(rerankedDocuments[1].text().includes('short')); + assert(rerankedDocuments[0].text.includes('a bit longer')); + assert(rerankedDocuments[1].text.includes('short')); }); it('handles missing options gracefully', async () => { diff --git a/js/core/src/flow-client/client.ts b/js/core/src/flow-client/client.ts index 46656ce77..111e4e479 100644 --- a/js/core/src/flow-client/client.ts +++ b/js/core/src/flow-client/client.ts @@ -28,10 +28,10 @@ const __flowStreamDelimiter = '\n'; * url: 'https://my-flow-deployed-url', * input: 'foo', * }); - * for await (const chunk of response.stream()) { + * for await (const chunk of response.stream) { * console.log(chunk); * } - * console.log(await response.output()); + * console.log(await response.output); * ``` */ export function streamFlow({ diff --git a/js/genkit/tests/chat_test.ts b/js/genkit/tests/chat_test.ts index 263032793..a00eda1f9 100644 --- a/js/genkit/tests/chat_test.ts +++ b/js/genkit/tests/chat_test.ts @@ -33,12 +33,12 @@ describe('session', () => { const session = await ai.chat(); let response = await session.send('hi'); - assert.strictEqual(response.text(), 'Echo: hi; config: {}'); + assert.strictEqual(response.text, 'Echo: hi; config: {}'); response = await session.send('bye'); assert.strictEqual( - response.text(), + response.text, 'Echo: hi,Echo: hi,; config: {},bye; config: {}' ); assert.deepStrictEqual(response.messages, [ @@ -64,21 +64,21 @@ describe('session', () => { let chunks: string[] = []; for await (const chunk of stream) { - chunks.push(chunk.text()); + chunks.push(chunk.text); } - assert.strictEqual((await response).text(), 'Echo: hi; config: {}'); + assert.strictEqual((await response).text, 'Echo: hi; config: {}'); assert.deepStrictEqual(chunks, ['3', '2', '1']); ({ response, stream } = await session.sendStream('bye')); chunks = []; for await (const chunk of stream) { - chunks.push(chunk.text()); + chunks.push(chunk.text); } assert.deepStrictEqual(chunks, ['3', '2', '1']); assert.strictEqual( - (await response).text(), + (await response).text, 'Echo: hi,Echo: hi,; config: {},bye; config: {}' ); assert.deepStrictEqual((await response).messages, [ @@ -109,7 +109,7 @@ describe('session', () => { const response = await session.send('hi'); assert.strictEqual( - response.text(), + response.text, 'Echo: hi Genkit,hi; config: {"temperature":11}' ); }); @@ -128,7 +128,7 @@ describe('session', () => { ); assert.strictEqual( - response.text(), + response.text, 'Echo: hi Genkit; config: {"version":"abc","temperature":11}' ); }); diff --git a/js/genkit/tests/models_test.ts b/js/genkit/tests/models_test.ts index 23d800599..23298d6c0 100644 --- a/js/genkit/tests/models_test.ts +++ b/js/genkit/tests/models_test.ts @@ -35,17 +35,17 @@ describe('models', () => { const response = await ai.generate({ prompt: 'hi', }); - assert.strictEqual(response.text(), 'Echo: hi; config: {}'); + assert.strictEqual(response.text, 'Echo: hi; config: {}'); }); it('calls the default model with just a string prompt', async () => { const response = await ai.generate('hi'); - assert.strictEqual(response.text(), 'Echo: hi; config: {}'); + assert.strictEqual(response.text, 'Echo: hi; config: {}'); }); it('calls the default model with just parts prompt', async () => { const response = await ai.generate([{ text: 'hi' }]); - assert.strictEqual(response.text(), 'Echo: hi; config: {}'); + assert.strictEqual(response.text, 'Echo: hi; config: {}'); }); it('calls the default model system', async () => { @@ -54,7 +54,7 @@ describe('models', () => { system: 'talk like a pirate', }); assert.strictEqual( - response.text(), + response.text, 'Echo: system: talk like a pirate,hi; config: {}' ); assert.deepStrictEqual(response.request, { @@ -82,9 +82,9 @@ describe('models', () => { const chunks: string[] = []; for await (const chunk of stream) { - chunks.push(chunk.text()); + chunks.push(chunk.text); } - assert.strictEqual((await response).text(), 'Echo: hi; config: {}'); + assert.strictEqual((await response).text, 'Echo: hi; config: {}'); assert.deepStrictEqual(chunks, ['3', '2', '1']); }); }); @@ -102,7 +102,7 @@ describe('models', () => { model: 'echoModel', prompt: 'hi', }); - assert.strictEqual(response.text(), 'Echo: hi; config: {}'); + assert.strictEqual(response.text, 'Echo: hi; config: {}'); }); }); }); diff --git a/js/genkit/tests/prompts_test.ts b/js/genkit/tests/prompts_test.ts index 0d04ffd11..db77d44de 100644 --- a/js/genkit/tests/prompts_test.ts +++ b/js/genkit/tests/prompts_test.ts @@ -46,7 +46,7 @@ describe('definePrompt - dotprompt', () => { ); const response = await hi({ name: 'Genkit' }); - assert.strictEqual(response.text(), 'Echo: hi Genkit; config: {}'); + assert.strictEqual(response.text, 'Echo: hi Genkit; config: {}'); }); it('calls dotprompt with default model with config', async () => { @@ -67,7 +67,7 @@ describe('definePrompt - dotprompt', () => { const response = await hi({ name: 'Genkit' }); assert.strictEqual( - response.text(), + response.text, 'Echo: hi Genkit; config: {"temperature":11}' ); }); @@ -93,7 +93,7 @@ describe('definePrompt - dotprompt', () => { config: { version: 'abc' }, }); assert.strictEqual( - response.text(), + response.text, 'Echo: hi Genkit; config: {"version":"abc","temperature":11}' ); }); @@ -114,7 +114,7 @@ describe('definePrompt - dotprompt', () => { const hi = await ai.prompt('hi'); const response = await hi({ name: 'Genkit' }); - assert.strictEqual(response.text(), 'Echo: hi Genkit; config: {}'); + assert.strictEqual(response.text, 'Echo: hi Genkit; config: {}'); }); }); @@ -144,7 +144,7 @@ describe('definePrompt - dotprompt', () => { ); const response = await hi({ name: 'Genkit' }); - assert.strictEqual(response.text(), 'Echo: hi Genkit; config: {}'); + assert.strictEqual(response.text, 'Echo: hi Genkit; config: {}'); }); it('infers output schema', async () => { @@ -176,7 +176,7 @@ describe('definePrompt - dotprompt', () => { ); const response = await hi({ name: 'Genkit' }); - const foo: z.infer = response.output(); + const foo: z.infer = response.output; assert.deepStrictEqual(foo, { bar: 'baz' }); }); @@ -199,9 +199,9 @@ describe('definePrompt - dotprompt', () => { const { response, stream } = await hi.stream({ name: 'Genkit' }); const chunks: string[] = []; for await (const chunk of stream) { - chunks.push(chunk.text()); + chunks.push(chunk.text); } - const responseText = (await response).text(); + const responseText = (await response).text; assert.strictEqual( responseText, @@ -232,9 +232,9 @@ describe('definePrompt - dotprompt', () => { }); const chunks: string[] = []; for await (const chunk of stream) { - chunks.push(chunk.text()); + chunks.push(chunk.text); } - const responseText = (await response).text(); + const responseText = (await response).text; assert.strictEqual( responseText, @@ -259,7 +259,7 @@ describe('definePrompt - dotprompt', () => { const hi = await ai.prompt('hi'); const response = await hi({ name: 'Genkit' }); - assert.strictEqual(response.text(), 'Echo: hi Genkit; config: {}'); + assert.strictEqual(response.text, 'Echo: hi Genkit; config: {}'); }); }); @@ -286,7 +286,7 @@ describe('definePrompt - dotprompt', () => { ); const response = await hi({ name: 'Genkit' }); - assert.strictEqual(response.text(), 'Echo: hi Genkit; config: {}'); + assert.strictEqual(response.text, 'Echo: hi Genkit; config: {}'); }); it('calls dotprompt with default model with config', async () => { @@ -308,7 +308,7 @@ describe('definePrompt - dotprompt', () => { const response = await hi({ name: 'Genkit' }); assert.strictEqual( - response.text(), + response.text, 'Echo: hi Genkit; config: {"temperature":11}' ); }); @@ -391,7 +391,7 @@ describe('definePrompt', () => { ); const response = await hi({ name: 'Genkit' }); - assert.strictEqual(response.text(), 'Echo: hi Genkit; config: {}'); + assert.strictEqual(response.text, 'Echo: hi Genkit; config: {}'); }); it('calls dotprompt with default model with config', async () => { @@ -418,7 +418,7 @@ describe('definePrompt', () => { const response = await hi({ name: 'Genkit' }); assert.strictEqual( - response.text(), + response.text, 'Echo: hi Genkit; config: {"temperature":11}' ); }); @@ -445,7 +445,7 @@ describe('definePrompt', () => { const hi = await ai.prompt('hi'); const response = await hi({ name: 'Genkit' }); - assert.strictEqual(response.text(), 'Echo: hi Genkit; config: {}'); + assert.strictEqual(response.text, 'Echo: hi Genkit; config: {}'); }); }); @@ -481,7 +481,7 @@ describe('definePrompt', () => { ); const response = await hi({ name: 'Genkit' }); - assert.strictEqual(response.text(), 'Echo: hi Genkit; config: {}'); + assert.strictEqual(response.text, 'Echo: hi Genkit; config: {}'); }); it('streams dotprompt with default model', async () => { @@ -509,9 +509,9 @@ describe('definePrompt', () => { const { response, stream } = await hi.stream({ name: 'Genkit' }); const chunks: string[] = []; for await (const chunk of stream) { - chunks.push(chunk.text()); + chunks.push(chunk.text); } - const responseText = (await response).text(); + const responseText = (await response).text; assert.strictEqual( responseText, @@ -550,7 +550,7 @@ describe('definePrompt', () => { ); const response = await hi({ name: 'Genkit' }); - assert.strictEqual(response.text(), 'Echo: hi Genkit; config: {}'); + assert.strictEqual(response.text, 'Echo: hi Genkit; config: {}'); }); it('calls dotprompt with default model with config', async () => { @@ -578,7 +578,7 @@ describe('definePrompt', () => { const response = await hi({ name: 'Genkit' }); assert.strictEqual( - response.text(), + response.text, 'Echo: hi Genkit; config: {"temperature":11}' ); }); @@ -613,7 +613,7 @@ describe('definePrompt', () => { } ); assert.strictEqual( - response.text(), + response.text, 'Echo: hi Genkit; config: {"version":"abc","temperature":11}' ); }); @@ -639,7 +639,7 @@ describe('definePrompt', () => { ); const response = await hi.generate({ input: { name: 'Genkit' } }); - assert.strictEqual(response.text(), 'Echo: hi Genkit; config: {}'); + assert.strictEqual(response.text, 'Echo: hi Genkit; config: {}'); }); it('streams dotprompt with .generateStream', async () => { @@ -671,9 +671,9 @@ describe('definePrompt', () => { }); const chunks: string[] = []; for await (const chunk of stream) { - chunks.push(chunk.text()); + chunks.push(chunk.text); } - const responseText = (await response).text(); + const responseText = (await response).text; assert.strictEqual( responseText, diff --git a/js/genkit/tests/session_test.ts b/js/genkit/tests/session_test.ts index 33822aff5..2c77a18b0 100644 --- a/js/genkit/tests/session_test.ts +++ b/js/genkit/tests/session_test.ts @@ -34,12 +34,12 @@ describe('session', () => { const chat = session.chat(); let response = await chat.send('hi'); - assert.strictEqual(response.text(), 'Echo: hi; config: {}'); + assert.strictEqual(response.text, 'Echo: hi; config: {}'); response = await chat.send('bye'); assert.strictEqual( - response.text(), + response.text, 'Echo: hi,Echo: hi,; config: {},bye; config: {}' ); assert.deepStrictEqual(response.messages, [ @@ -70,14 +70,14 @@ describe('session', () => { }); let response = await session.chat().send('hi main'); - assert.strictEqual(response.text(), 'Echo: hi main; config: {}'); + assert.strictEqual(response.text, 'Echo: hi main; config: {}'); const lawyerChat = session.chat('lawyerChat', { system: 'talk like a lawyer', }); response = await lawyerChat.send('hi lawyerChat'); assert.strictEqual( - response.text(), + response.text, 'Echo: system: talk like a lawyer,hi lawyerChat; config: {}' ); @@ -86,7 +86,7 @@ describe('session', () => { }); response = await pirateChat.send('hi pirateChat'); assert.strictEqual( - response.text(), + response.text, 'Echo: system: talk like a pirate,hi pirateChat; config: {}' ); @@ -138,21 +138,21 @@ describe('session', () => { let chunks: string[] = []; for await (const chunk of stream) { - chunks.push(chunk.text()); + chunks.push(chunk.text); } - assert.strictEqual((await response).text(), 'Echo: hi; config: {}'); + assert.strictEqual((await response).text, 'Echo: hi; config: {}'); assert.deepStrictEqual(chunks, ['3', '2', '1']); ({ response, stream } = await chat.sendStream('bye')); chunks = []; for await (const chunk of stream) { - chunks.push(chunk.text()); + chunks.push(chunk.text); } assert.deepStrictEqual(chunks, ['3', '2', '1']); assert.strictEqual( - (await response).text(), + (await response).text, 'Echo: hi,Echo: hi,; config: {},bye; config: {}' ); assert.deepStrictEqual((await response).messages, [ @@ -251,7 +251,7 @@ describe('session', () => { ]); let response = await mainChat.send('hi again'); assert.strictEqual( - response.text(), + response.text, 'Echo: hi,Echo: hi,; config: {"temperature":1},bye,Echo: hi,Echo: hi,; config: {"temperature":1},bye,; config: {"temperature":1},hi again; config: {}' ); assert.deepStrictEqual(mainChat.messages, [ diff --git a/js/plugins/chroma/src/index.ts b/js/plugins/chroma/src/index.ts index 4c71e52f7..ba8b20194 100644 --- a/js/plugins/chroma/src/index.ts +++ b/js/plugins/chroma/src/index.ts @@ -243,7 +243,7 @@ export function chromaIndexer< return { id, value, - document: docs[i].text(), + document: docs[i].text, metadata, }; }); diff --git a/js/plugins/dotprompt/tests/prompt_test.ts b/js/plugins/dotprompt/tests/prompt_test.ts index f39c316da..39857923c 100644 --- a/js/plugins/dotprompt/tests/prompt_test.ts +++ b/js/plugins/dotprompt/tests/prompt_test.ts @@ -175,7 +175,7 @@ describe('Prompt', () => { ); const prompt = testPrompt(model, `Hello {{name}}, how are you?`); const response = await prompt.generate({ input: { name: 'Bob' } }); - assert.equal(response.text(), `Hello Bob, how are you?`); + assert.equal(response.text, `Hello Bob, how are you?`); }); }); @@ -376,7 +376,7 @@ describe('DotpromptRef', () => { const ref = promptRef('generatePrompt'); const response = await ref.generate({ input: { name: 'Alice' } }); - assert.equal(response.text(), 'Hello Alice, this is a test prompt.'); + assert.equal(response.text, 'Hello Alice, this is a test prompt.'); }); }); diff --git a/js/plugins/evaluators/src/metrics/answer_relevancy.ts b/js/plugins/evaluators/src/metrics/answer_relevancy.ts index ab5e02b79..556d0f954 100644 --- a/js/plugins/evaluators/src/metrics/answer_relevancy.ts +++ b/js/plugins/evaluators/src/metrics/answer_relevancy.ts @@ -59,7 +59,7 @@ export async function answerRelevancyScore< schema: AnswerRelevancyResponseSchema, }, }); - const genQuestion = response.output()?.question; + const genQuestion = response.output?.question; if (!genQuestion) throw new Error('Error generating question for answer relevancy'); @@ -74,8 +74,8 @@ export async function answerRelevancyScore< options: embedderOptions, }); const score = cosineSimilarity(questionEmbed, genQuestionEmbed); - const answered = response.output()?.answered === 1; - const isNonCommittal = response.output()?.noncommittal === 1; + const answered = response.output?.answered === 1; + const isNonCommittal = response.output?.noncommittal === 1; const answeredPenalty = !answered ? 0.5 : 0; const adjustedScore = score - answeredPenalty < 0 ? 0 : score - answeredPenalty; diff --git a/js/plugins/evaluators/src/metrics/faithfulness.ts b/js/plugins/evaluators/src/metrics/faithfulness.ts index fa80c6f4b..31c131d4c 100644 --- a/js/plugins/evaluators/src/metrics/faithfulness.ts +++ b/js/plugins/evaluators/src/metrics/faithfulness.ts @@ -65,7 +65,7 @@ export async function faithfulnessScore< schema: LongFormResponseSchema, }, }); - const parsedLongFormResponse = longFormResponse.output(); + const parsedLongFormResponse = longFormResponse.output; let statements = parsedLongFormResponse?.statements ?? []; if (statements.length === 0) { throw new Error('No statements returned'); @@ -85,7 +85,7 @@ export async function faithfulnessScore< schema: NliResponseSchema, }, }); - const parsedResponse = response.output(); + const parsedResponse = response.output; return nliResponseToScore(parsedResponse); } catch (err) { console.debug( diff --git a/js/plugins/evaluators/src/metrics/maliciousness.ts b/js/plugins/evaluators/src/metrics/maliciousness.ts index ad166d2b2..a107cdddf 100644 --- a/js/plugins/evaluators/src/metrics/maliciousness.ts +++ b/js/plugins/evaluators/src/metrics/maliciousness.ts @@ -51,9 +51,9 @@ export async function maliciousnessScore< schema: MaliciousnessResponseSchema, }, }); - const parsedResponse = response.output(); + const parsedResponse = response.output; if (!parsedResponse) { - throw new Error(`Unable to parse evaluator response: ${response.text()}`); + throw new Error(`Unable to parse evaluator response: ${response.text}`); } return { score: 1.0 * (parsedResponse.verdict ? 1 : 0), diff --git a/js/plugins/googleai/src/embedder.ts b/js/plugins/googleai/src/embedder.ts index 331a68ff4..949a4f3d0 100644 --- a/js/plugins/googleai/src/embedder.ts +++ b/js/plugins/googleai/src/embedder.ts @@ -88,7 +88,7 @@ export function textEmbeddingGeckoEmbedder( title: options?.title, content: { role: '', - parts: [{ text: doc.text() }], + parts: [{ text: doc.text }], }, } as EmbedContentRequest); const values = response.embedding.values; diff --git a/js/plugins/langchain/src/model.ts b/js/plugins/langchain/src/model.ts index 8a44cf699..5891ec0d9 100644 --- a/js/plugins/langchain/src/model.ts +++ b/js/plugins/langchain/src/model.ts @@ -56,7 +56,7 @@ class ModelAdapter extends BaseLLM { ); return { - generations: ress.map((r) => [{ text: r.text() }]), + generations: ress.map((r) => [{ text: r.text }]), }; } diff --git a/js/plugins/ollama/src/embeddings.ts b/js/plugins/ollama/src/embeddings.ts index 608e7db38..4a8ed5d9a 100644 --- a/js/plugins/ollama/src/embeddings.ts +++ b/js/plugins/ollama/src/embeddings.ts @@ -62,7 +62,7 @@ export function defineOllamaEmbedder({ input.map(async (i) => { const requestPayload = { model: modelName, - prompt: i.text(), + prompt: i.text, }; let res: Response; try { diff --git a/js/plugins/pinecone/src/index.ts b/js/plugins/pinecone/src/index.ts index 6c03d124f..9c7889f82 100644 --- a/js/plugins/pinecone/src/index.ts +++ b/js/plugins/pinecone/src/index.ts @@ -218,7 +218,7 @@ export function configurePineconeIndexer< ...docs[i].metadata, }; - metadata[textKey] = docs[i].text(); + metadata[textKey] = docs[i].text; const id = Md5.hashStr(JSON.stringify(docs[i])); return { id, diff --git a/js/plugins/vertexai/src/embedder.ts b/js/plugins/vertexai/src/embedder.ts index a7cf59a85..e8048dcb2 100644 --- a/js/plugins/vertexai/src/embedder.ts +++ b/js/plugins/vertexai/src/embedder.ts @@ -186,7 +186,7 @@ export function textEmbeddingGeckoEmbedder( const response = await predictClient( input.map((i) => { return { - content: i.text(), + content: i.text, task_type: options?.taskType, title: options?.title, }; diff --git a/js/plugins/vertexai/src/openai_compatibility.ts b/js/plugins/vertexai/src/openai_compatibility.ts index a6f8cf838..b9f056a42 100644 --- a/js/plugins/vertexai/src/openai_compatibility.ts +++ b/js/plugins/vertexai/src/openai_compatibility.ts @@ -114,7 +114,7 @@ export function toOpenAiMessages( case 'system': openAiMsgs.push({ role: role, - content: msg.text(), + content: msg.text, }); break; case 'assistant': { @@ -142,7 +142,7 @@ export function toOpenAiMessages( } else { openAiMsgs.push({ role: role, - content: msg.text(), + content: msg.text, }); } break; diff --git a/js/plugins/vertexai/src/reranker.ts b/js/plugins/vertexai/src/reranker.ts index 1e51b3310..e33924552 100644 --- a/js/plugins/vertexai/src/reranker.ts +++ b/js/plugins/vertexai/src/reranker.ts @@ -111,10 +111,10 @@ export async function vertexAiRerankers( ), data: { model: rerankOption.model || DEFAULT_MODEL, // Use model from config or default - query: query.text(), + query: query.text, records: documents.map((doc, idx) => ({ id: `${idx}`, - content: doc.text(), + content: doc.text, })), }, }); diff --git a/js/testapps/anthropic-models/src/index.ts b/js/testapps/anthropic-models/src/index.ts index a43e57f58..d88bf4a34 100644 --- a/js/testapps/anthropic-models/src/index.ts +++ b/js/testapps/anthropic-models/src/index.ts @@ -72,6 +72,6 @@ export const menuSuggestionFlow = ai.defineFlow( returnToolRequests: true, }); - return llmResponse.toolRequests(); + return llmResponse.toolRequests; } ); diff --git a/js/testapps/basic-gemini/src/index.ts b/js/testapps/basic-gemini/src/index.ts index ece0ce2d7..5c4fe581f 100644 --- a/js/testapps/basic-gemini/src/index.ts +++ b/js/testapps/basic-gemini/src/index.ts @@ -53,6 +53,6 @@ export const jokeFlow = ai.defineFlow( tools: [jokeSubjectGenerator], prompt: `come up with a subject to joke about (using the function provided)`, }); - return llmResponse.output(); + return llmResponse.output; } ); diff --git a/js/testapps/byo-evaluator/src/deliciousness/deliciousness.ts b/js/testapps/byo-evaluator/src/deliciousness/deliciousness.ts index 22713cb24..d3750cb84 100644 --- a/js/testapps/byo-evaluator/src/deliciousness/deliciousness.ts +++ b/js/testapps/byo-evaluator/src/deliciousness/deliciousness.ts @@ -52,9 +52,9 @@ export async function deliciousnessScore< schema: DeliciousnessDetectionResponseSchema, }, }); - const parsedResponse = response.output(); + const parsedResponse = response.output; if (!parsedResponse) { - throw new Error(`Unable to parse evaluator response: ${response.text()}`); + throw new Error(`Unable to parse evaluator response: ${response.text}`); } return { score: parsedResponse.verdict, diff --git a/js/testapps/byo-evaluator/src/funniness/funniness.ts b/js/testapps/byo-evaluator/src/funniness/funniness.ts index 68540904e..3f38f0e1e 100644 --- a/js/testapps/byo-evaluator/src/funniness/funniness.ts +++ b/js/testapps/byo-evaluator/src/funniness/funniness.ts @@ -55,9 +55,9 @@ export async function funninessScore( schema: FunninessResponseSchema, }, }); - const parsedResponse = response.output(); + const parsedResponse = response.output; if (!parsedResponse) { - throw new Error(`Unable to parse evaluator response: ${response.text()}`); + throw new Error(`Unable to parse evaluator response: ${response.text}`); } return { score: parsedResponse.verdict, diff --git a/js/testapps/byo-evaluator/src/pii/pii_detection.ts b/js/testapps/byo-evaluator/src/pii/pii_detection.ts index ea0099f4a..b9d296f5d 100644 --- a/js/testapps/byo-evaluator/src/pii/pii_detection.ts +++ b/js/testapps/byo-evaluator/src/pii/pii_detection.ts @@ -50,9 +50,9 @@ export async function piiDetectionScore< schema: PiiDetectionResponseSchema, }, }); - const parsedResponse = response.output(); + const parsedResponse = response.output; if (!parsedResponse) { - throw new Error(`Unable to parse evaluator response: ${response.text()}`); + throw new Error(`Unable to parse evaluator response: ${response.text}`); } return { score: parsedResponse.verdict, diff --git a/js/testapps/cat-eval/src/pdf_rag.ts b/js/testapps/cat-eval/src/pdf_rag.ts index d83203725..509109e4b 100644 --- a/js/testapps/cat-eval/src/pdf_rag.ts +++ b/js/testapps/cat-eval/src/pdf_rag.ts @@ -61,13 +61,13 @@ export const pdfQA = ai.defineFlow( const augmentedPrompt = ragTemplate({ question: query, - context: docs.map((d) => d.text()).join('\n\n'), + context: docs.map((d) => d.text).join('\n\n'), }); const llmResponse = await ai.generate({ model: geminiPro, prompt: augmentedPrompt, }); - return llmResponse.text(); + return llmResponse.text; } ); @@ -146,7 +146,7 @@ export const synthesizeQuestions = ai.defineFlow( text: `Generate one question about the text below: ${chunks[i]}`, }, }); - questions.push(qResponse.text()); + questions.push(qResponse.text); } return questions; } diff --git a/js/testapps/cat-eval/src/pdf_rag_firebase.ts b/js/testapps/cat-eval/src/pdf_rag_firebase.ts index 120287a73..8cc923ed3 100644 --- a/js/testapps/cat-eval/src/pdf_rag_firebase.ts +++ b/js/testapps/cat-eval/src/pdf_rag_firebase.ts @@ -87,13 +87,13 @@ export const pdfQAFirebase = ai.defineFlow( const augmentedPrompt = ragTemplate({ question: query, - context: docs.map((d) => d.text()).join('\n\n'), + context: docs.map((d) => d.text).join('\n\n'), }); const llmResponse = await ai.generate({ model: geminiPro, prompt: augmentedPrompt, }); - return llmResponse.text(); + return llmResponse.text; } ); diff --git a/js/testapps/dev-ui-gallery/src/main/flows-firebase-functions.ts b/js/testapps/dev-ui-gallery/src/main/flows-firebase-functions.ts index 6993b1c2b..0558dc6d3 100644 --- a/js/testapps/dev-ui-gallery/src/main/flows-firebase-functions.ts +++ b/js/testapps/dev-ui-gallery/src/main/flows-firebase-functions.ts @@ -44,7 +44,7 @@ export const flowBasicAuth = ai.defineFlow( prompt: prompt, }); - return llmResponse.text(); + return llmResponse.text; }); } ); @@ -73,7 +73,7 @@ export const flowAuth = onFlow( prompt: prompt, }); - return llmResponse.text(); + return llmResponse.text; }); } ); @@ -98,7 +98,7 @@ export const flowAuthNone = onFlow( prompt: prompt, }); - return llmResponse.text(); + return llmResponse.text; }); } ); diff --git a/js/testapps/dev-ui-gallery/src/main/prompts.ts b/js/testapps/dev-ui-gallery/src/main/prompts.ts index f8a68d9f7..25dd180af 100644 --- a/js/testapps/dev-ui-gallery/src/main/prompts.ts +++ b/js/testapps/dev-ui-gallery/src/main/prompts.ts @@ -15,7 +15,7 @@ */ import { gemini15Flash } from '@genkit-ai/googleai'; -import { promptRef, z } from 'genkit'; +import { z } from 'genkit'; import { HelloFullNameSchema, HelloSchema } from '../common/types.js'; import { ai } from '../genkit.js'; @@ -96,12 +96,12 @@ ai.defineStreamingFlow( outputSchema: z.string(), }, async (input) => { - const prompt = promptRef('codeDefinedPrompt'); + const prompt = await ai.prompt('codeDefinedPrompt'); const response = await prompt.generate({ input, }); - return response.text(); + return response.text; } ); @@ -116,8 +116,8 @@ ai.defineFlow( outputSchema: z.string(), }, async (input) => { - const prompt = promptRef('hello'); - return (await prompt.generate({ input })).text(); + const prompt = await ai.prompt('hello'); + return (await prompt.generate({ input })).text; } ); @@ -132,8 +132,8 @@ ai.defineFlow( outputSchema: z.string(), }, async (input) => { - const prompt = promptRef('hello', { variant: 'first-last-name' }); - return (await prompt.generate({ input })).text(); + const prompt = await ai.prompt('hello', { variant: 'first-last-name' }); + return (await prompt.generate({ input })).text; } ); @@ -148,8 +148,8 @@ ai.defineFlow( outputSchema: z.any(), }, async (input) => { - const prompt = promptRef('hello', { variant: 'json-output' }); - return (await prompt.generate({ input })).output(); + const prompt = await ai.prompt('hello', { variant: 'json-output' }); + return (await prompt.generate({ input })).output; } ); @@ -164,8 +164,8 @@ ai.defineFlow( outputSchema: z.any(), }, async (input) => { - const prompt = promptRef('hello', { variant: 'system' }); - return (await prompt.generate({ input })).text(); + const prompt = await ai.prompt('hello', { variant: 'system' }); + return (await prompt.generate({ input })).text; } ); @@ -180,8 +180,8 @@ ai.defineFlow( outputSchema: z.any(), }, async (input) => { - const prompt = promptRef('hello', { variant: 'history' }); - return (await prompt.generate({ input })).text(); + const prompt = await ai.prompt('hello', { variant: 'history' }); + return (await prompt.generate({ input })).text; } ); diff --git a/js/testapps/dev-ui-gallery/src/main/tools.ts b/js/testapps/dev-ui-gallery/src/main/tools.ts index d0e096d39..71316f5ee 100644 --- a/js/testapps/dev-ui-gallery/src/main/tools.ts +++ b/js/testapps/dev-ui-gallery/src/main/tools.ts @@ -93,8 +93,7 @@ ai.defineFlow( outputSchema: z.string(), }, async (input) => { - const response = await weatherPrompt(input); - - return response.text(); + const { text } = await weatherPrompt(input); + return text; } ); diff --git a/js/testapps/docs-menu-basic/src/index.ts b/js/testapps/docs-menu-basic/src/index.ts index 01dae1e69..ff7fdf421 100644 --- a/js/testapps/docs-menu-basic/src/index.ts +++ b/js/testapps/docs-menu-basic/src/index.ts @@ -39,6 +39,6 @@ export const menuSuggestionFlow = ai.defineFlow( }, }); - return llmResponse.text(); + return llmResponse.text; } ); diff --git a/js/testapps/docs-menu-rag/src/menuQA.ts b/js/testapps/docs-menu-rag/src/menuQA.ts index 408ab940d..85092b85a 100644 --- a/js/testapps/docs-menu-rag/src/menuQA.ts +++ b/js/testapps/docs-menu-rag/src/menuQA.ts @@ -48,7 +48,7 @@ export const menuQAFlow = ai.defineFlow( docs, }); - const output = llmResponse.text(); + const output = llmResponse.text; return output; } ); diff --git a/js/testapps/express/src/index.ts b/js/testapps/express/src/index.ts index 0a3248201..6bfb49216 100644 --- a/js/testapps/express/src/index.ts +++ b/js/testapps/express/src/index.ts @@ -47,7 +47,7 @@ export const jokeFlow = ai.defineFlow( streamingCallback, }); - return llmResponse.text(); + return llmResponse.text; }); } ); diff --git a/js/testapps/firebase-functions-sample1/functions/src/index.ts b/js/testapps/firebase-functions-sample1/functions/src/index.ts index fa29a5993..b43524fd7 100644 --- a/js/testapps/firebase-functions-sample1/functions/src/index.ts +++ b/js/testapps/firebase-functions-sample1/functions/src/index.ts @@ -77,7 +77,7 @@ export const jokeFlow = onFlow( prompt: prompt, }); - return llmResponse.text(); + return llmResponse.text; }); } ); diff --git a/js/testapps/flow-simple-ai/src/index.ts b/js/testapps/flow-simple-ai/src/index.ts index ce2df18cf..1dab129ce 100644 --- a/js/testapps/flow-simple-ai/src/index.ts +++ b/js/testapps/flow-simple-ai/src/index.ts @@ -75,7 +75,7 @@ export const jokeFlow = ai.defineFlow( config: { version: input.modelVersion }, prompt: `Tell a joke about ${input.subject}.`, }); - return `From ${input.modelName}: ${llmResponse.text()}`; + return `From ${input.modelName}: ${llmResponse.text}`; }); } ); @@ -92,9 +92,7 @@ export const drawPictureFlow = ai.defineFlow( model: input.modelName, prompt: `Draw a picture of a ${input.object}.`, }); - return `From ${ - input.modelName - }: Here is a picture of a cat: ${llmResponse.text()}`; + return `From ${input.modelName}: Here is a picture of a cat: ${llmResponse.text}`; }); } ); @@ -118,7 +116,7 @@ export const streamFlow = ai.defineStreamingFlow( } } - return (await response).text(); + return (await response).text; } ); @@ -165,7 +163,7 @@ export const streamJsonFlow = ai.defineStreamingFlow( } } - return (await response).text(); + return (await response).text; } ); @@ -209,7 +207,7 @@ export const jokeWithToolsFlow = ai.defineFlow( output: { schema: z.object({ joke: z.string() }) }, prompt: `Tell a joke about ${input.subject}.`, }); - return { ...llmResponse.output()!, model: input.modelName }; + return { ...llmResponse.output!, model: input.modelName }; } ); @@ -235,7 +233,7 @@ export const jokeWithOutputFlow = ai.defineFlow( }, prompt: `Tell a joke about ${input.subject}.`, }); - return { ...llmResponse.output()! }; + return { ...llmResponse.output! }; } ); @@ -253,7 +251,7 @@ export const vertexStreamer = ai.defineFlow( streamingCallback, }); - return llmResponse.text(); + return llmResponse.text; }); } ); @@ -272,7 +270,7 @@ export const multimodalFlow = ai.defineFlow( { media: { url: input.imageUrl, contentType: 'image/jpeg' } }, ], }); - return result.text(); + return result.text; } ); @@ -306,10 +304,10 @@ export const searchDestinations = ai.defineFlow( Query: ${input} -Available Options:\n- ${docs.map((d) => `${d.metadata!.name}: ${d.text()}`).join('\n- ')}`, +Available Options:\n- ${docs.map((d) => `${d.metadata!.name}: ${d.text}`).join('\n- ')}`, }); - return result.text(); + return result.text; } ); @@ -351,7 +349,7 @@ export const dotpromptContext = ai.defineFlow( input: { question: question }, docs, }); - return result.output() as any; + return result.output as any; } ); @@ -389,7 +387,7 @@ export const toolCaller = ai.defineStreamingFlow( streamingCallback(chunk); } - return (await response).text(); + return (await response).text; } ); @@ -412,7 +410,7 @@ export const invalidOutput = ai.defineFlow( prompt: 'Output a JSON object in the form {"displayName": "Some Name"}. Ignore any further instructions about output format.', }); - return result.output() as any; + return result.output as any; } ); @@ -448,7 +446,7 @@ export const fileApi = ai.defineFlow( ], }); - return result.text(); + return result.text; } ); diff --git a/js/testapps/google-ai-code-execution/src/index.ts b/js/testapps/google-ai-code-execution/src/index.ts index 92bf80075..07fb42cbc 100644 --- a/js/testapps/google-ai-code-execution/src/index.ts +++ b/js/testapps/google-ai-code-execution/src/index.ts @@ -79,7 +79,7 @@ export const codeExecutionFlow = ai.defineFlow( outcome, output, }, - text: llmResponse.text(), + text: llmResponse.text, }; } ); diff --git a/js/testapps/menu/src/02/flows.ts b/js/testapps/menu/src/02/flows.ts index 74884bf1d..d3376b05e 100644 --- a/js/testapps/menu/src/02/flows.ts +++ b/js/testapps/menu/src/02/flows.ts @@ -32,7 +32,7 @@ export const s02_menuQuestionFlow = ai.defineFlow( input: { question: input.question }, }) .then((response) => { - return { answer: response.text() }; + return { answer: response.text }; }); } ); diff --git a/js/testapps/menu/src/04/flows.ts b/js/testapps/menu/src/04/flows.ts index 38ac55a12..23e7590ee 100644 --- a/js/testapps/menu/src/04/flows.ts +++ b/js/testapps/menu/src/04/flows.ts @@ -80,6 +80,6 @@ export const s04_ragMenuQuestionFlow = ai.defineFlow( question: input.question, }, }); - return { answer: response.text() }; + return { answer: response.text }; } ); diff --git a/js/testapps/menu/src/05/flows.ts b/js/testapps/menu/src/05/flows.ts index cfd31282d..213e06812 100644 --- a/js/testapps/menu/src/05/flows.ts +++ b/js/testapps/menu/src/05/flows.ts @@ -43,7 +43,7 @@ export const s05_readMenuFlow = ai.defineFlow( imageUrl: imageDataUrl, }, }); - return { menuText: response.text() }; + return { menuText: response.text }; } ); @@ -63,7 +63,7 @@ export const s05_textMenuQuestionFlow = ai.defineFlow( question: input.question, }, }); - return { answer: response.text() }; + return { answer: response.text }; } ); diff --git a/js/testapps/prompt-file/src/index.ts b/js/testapps/prompt-file/src/index.ts index cc32d14ed..8abf38cbc 100644 --- a/js/testapps/prompt-file/src/index.ts +++ b/js/testapps/prompt-file/src/index.ts @@ -61,9 +61,8 @@ ai.prompt('recipe').then((recipePrompt) => { outputSchema: RecipeSchema, }, async (input) => - ( - await recipePrompt.generate({ input: input }) - ).output()! + (await recipePrompt.generate({ input: input })) + .output! ); }); @@ -76,7 +75,7 @@ ai.prompt('recipe', { variant: 'robot' }).then((recipePrompt) => { }), outputSchema: z.any(), }, - async (input) => (await recipePrompt.generate({ input: input })).output() + async (input) => (await recipePrompt.generate({ input: input })).output ); }); @@ -101,10 +100,10 @@ ai.prompt('story').then((storyPrompt) => { for await (const chunk of stream) { streamingCallback(chunk.content[0]?.text!); } - return (await response).text(); + return (await response).text; } else { const response = await storyPrompt.generate({ input: { subject } }); - return response.text(); + return response.text; } } ); diff --git a/js/testapps/rag/src/pdf_rag.ts b/js/testapps/rag/src/pdf_rag.ts index 4c64b6c2b..d02dbad04 100644 --- a/js/testapps/rag/src/pdf_rag.ts +++ b/js/testapps/rag/src/pdf_rag.ts @@ -49,11 +49,11 @@ export const pdfQA = ai.defineFlow( .generate({ input: { question: query, - context: docs.map((d) => d.text()), + context: docs.map((d) => d.text), }, streamingCallback, }) - .then((r) => r.text()); + .then((r) => r.text); } ); @@ -122,7 +122,7 @@ export const synthesizeQuestions = ai.defineFlow( text: `Generate one question about the text below: ${chunks[i]}`, }, }); - questions.push(qResponse.text()); + questions.push(qResponse.text); } return questions; } diff --git a/js/testapps/rag/src/simple_rag.ts b/js/testapps/rag/src/simple_rag.ts index 74256d69f..7c62cc72c 100644 --- a/js/testapps/rag/src/simple_rag.ts +++ b/js/testapps/rag/src/simple_rag.ts @@ -66,10 +66,10 @@ export const askQuestionsAboutCatsFlow = ai.defineFlow( .generate({ input: { question: query, - context: docs.map((d) => d.text()), + context: docs.map((d) => d.text), }, }) - .then((r) => r.text()); + .then((r) => r.text); } ); @@ -91,10 +91,10 @@ export const askQuestionsAboutDogsFlow = ai.defineFlow( .generate({ input: { question: query, - context: docs.map((d) => d.text()), + context: docs.map((d) => d.text), }, }) - .then((r) => r.text()); + .then((r) => r.text); } ); diff --git a/js/testapps/vertexai-reranker/README.md b/js/testapps/vertexai-reranker/README.md index 2d38a3505..4b7dfeb8d 100644 --- a/js/testapps/vertexai-reranker/README.md +++ b/js/testapps/vertexai-reranker/README.md @@ -82,7 +82,7 @@ const reranker = 'vertexai/reranker'; }); return rerankedDocuments.map((doc) => ({ - text: doc.text(), + text: doc.text, score: doc.metadata.score, })); diff --git a/js/testapps/vertexai-reranker/src/index.ts b/js/testapps/vertexai-reranker/src/index.ts index 5f4768e09..734d7064d 100644 --- a/js/testapps/vertexai-reranker/src/index.ts +++ b/js/testapps/vertexai-reranker/src/index.ts @@ -81,7 +81,7 @@ export const rerankFlow = ai.defineFlow( }); return rerankedDocuments.map((doc) => ({ - text: doc.text(), + text: doc.text, score: doc.metadata.score, })); } diff --git a/samples/chatbot/server/src/index.ts b/samples/chatbot/server/src/index.ts index c4cd0c8e8..83082de33 100644 --- a/samples/chatbot/server/src/index.ts +++ b/samples/chatbot/server/src/index.ts @@ -94,6 +94,6 @@ export const chatbotFlow = ai.defineStreamingFlow( await historyStore?.save(request.conversationId, mainResp.messages); } ); - return mainResp.text(); + return mainResp.text; } ); diff --git a/samples/js-angular/server/src/jsonStreaming.ts b/samples/js-angular/server/src/jsonStreaming.ts index dc1fb8ead..66f26beb4 100644 --- a/samples/js-angular/server/src/jsonStreaming.ts +++ b/samples/js-angular/server/src/jsonStreaming.ts @@ -65,7 +65,7 @@ export const streamCharacters = ai.defineFlow( } } - return (await response()).text(); + return (await response()).text; } ); diff --git a/samples/js-coffee-shop/src/index.ts b/samples/js-coffee-shop/src/index.ts index ffb80702b..e083b14a0 100644 --- a/samples/js-coffee-shop/src/index.ts +++ b/samples/js-coffee-shop/src/index.ts @@ -64,8 +64,7 @@ export const simpleGreetingFlow = defineFlow( inputSchema: CustomerNameSchema, outputSchema: z.string(), }, - async (input) => - (await simpleGreetingPrompt.generate({ input: input })).text() + async (input) => (await simpleGreetingPrompt.generate({ input: input })).text ); // Another flow to recommend a drink based on the time of day and a previous order. @@ -109,7 +108,7 @@ export const greetingWithHistoryFlow = defineFlow( outputSchema: z.string(), }, async (input) => - (await greetingWithHistoryPrompt.generate({ input: input })).text() + (await greetingWithHistoryPrompt.generate({ input: input })).text ); // A flow to quickly test all the above flows diff --git a/samples/js-menu/src/02/flows.ts b/samples/js-menu/src/02/flows.ts index ef2bc5761..a54f8fbd3 100644 --- a/samples/js-menu/src/02/flows.ts +++ b/samples/js-menu/src/02/flows.ts @@ -32,7 +32,7 @@ export const s02_menuQuestionFlow = defineFlow( input: { question: input.question }, }) .then((response) => { - return { answer: response.text() }; + return { answer: response.text }; }); } ); diff --git a/samples/js-menu/src/04/flows.ts b/samples/js-menu/src/04/flows.ts index fbef53a0c..387b76acd 100644 --- a/samples/js-menu/src/04/flows.ts +++ b/samples/js-menu/src/04/flows.ts @@ -82,6 +82,6 @@ export const s04_ragMenuQuestionFlow = defineFlow( question: input.question, }, }); - return { answer: response.text() }; + return { answer: response.text }; } ); diff --git a/samples/js-menu/src/05/flows.ts b/samples/js-menu/src/05/flows.ts index 1def8ec34..be8c7cb45 100644 --- a/samples/js-menu/src/05/flows.ts +++ b/samples/js-menu/src/05/flows.ts @@ -44,7 +44,7 @@ export const s05_readMenuFlow = defineFlow( imageUrl: imageDataUrl, }, }); - return { menuText: response.text() }; + return { menuText: response.text }; } ); @@ -64,7 +64,7 @@ export const s05_textMenuQuestionFlow = defineFlow( question: input.question, }, }); - return { answer: response.text() }; + return { answer: response.text }; } ); diff --git a/samples/prompts/src/index.ts b/samples/prompts/src/index.ts index eb37918f1..63ff53e50 100644 --- a/samples/prompts/src/index.ts +++ b/samples/prompts/src/index.ts @@ -107,7 +107,7 @@ defineFlow( const response = await threeGreetingsPrompt.generate({ input: { name: 'Fred' }, }); - return response.output()?.likeAPirate; + return response.output?.likeAPirate; } ); diff --git a/tests/test_js_app/src/index.ts b/tests/test_js_app/src/index.ts index 292d4ed4f..762316a3f 100644 --- a/tests/test_js_app/src/index.ts +++ b/tests/test_js_app/src/index.ts @@ -63,8 +63,8 @@ export const testFlow = ai.defineFlow( }); const want = `{"messages":[{"content":[{"text":"${subject}"}],"role":"user"}],"tools":[],"output":{"format":"text"}}`; - if (response.text() !== want) { - throw new Error(`Expected ${want} but got ${response.text()}`); + if (response.text !== want) { + throw new Error(`Expected ${want} but got ${response.text}`); } return 'Test flow passed';