Skip to content

Commit 41d5736

Browse files
authored
ai/core: export language model types (#1463)
1 parent b4c68ec commit 41d5736

File tree

12 files changed

+119
-59
lines changed

12 files changed

+119
-59
lines changed

.changeset/afraid-panthers-sing.md

+6
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
'@ai-sdk/provider': patch
3+
'ai': patch
4+
---
5+
6+
ai/core: re-expose language model types.

packages/core/core/generate-object/generate-object.ts

+15-20
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,4 @@
1-
import {
2-
LanguageModelV1,
3-
LanguageModelV1CallWarning,
4-
LanguageModelV1FinishReason,
5-
LanguageModelV1LogProbs,
6-
NoTextGeneratedError,
7-
} from '@ai-sdk/provider';
1+
import { NoObjectGeneratedError } from '@ai-sdk/provider';
82
import { safeParseJSON } from '@ai-sdk/provider-utils';
93
import { z } from 'zod';
104
import { TokenUsage, calculateTokenUsage } from '../generate-text/token-usage';
@@ -13,6 +7,7 @@ import { convertToLanguageModelPrompt } from '../prompt/convert-to-language-mode
137
import { getValidatedPrompt } from '../prompt/get-validated-prompt';
148
import { prepareCallSettings } from '../prompt/prepare-call-settings';
159
import { Prompt } from '../prompt/prompt';
10+
import { CallWarning, FinishReason, LanguageModel, LogProbs } from '../types';
1611
import { convertZodToJSONSchema } from '../util/convert-zod-to-json-schema';
1712
import { retryWithExponentialBackoff } from '../util/retry-with-exponential-backoff';
1813
import { injectJsonSchemaIntoSystem } from './inject-json-schema-into-system';
@@ -68,7 +63,7 @@ export async function experimental_generateObject<T>({
6863
/**
6964
The language model to use.
7065
*/
71-
model: LanguageModelV1;
66+
model: LanguageModel;
7267

7368
/**
7469
The schema of the object that the model should generate.
@@ -91,11 +86,11 @@ Default and recommended: 'auto' (best mode for the model).
9186
}
9287

9388
let result: string;
94-
let finishReason: LanguageModelV1FinishReason;
89+
let finishReason: FinishReason;
9590
let usage: Parameters<typeof calculateTokenUsage>[0];
96-
let warnings: LanguageModelV1CallWarning[] | undefined;
91+
let warnings: CallWarning[] | undefined;
9792
let rawResponse: { headers?: Record<string, string> } | undefined;
98-
let logprobs: LanguageModelV1LogProbs | undefined;
93+
let logprobs: LogProbs | undefined;
9994

10095
switch (mode) {
10196
case 'json': {
@@ -116,7 +111,7 @@ Default and recommended: 'auto' (best mode for the model).
116111
});
117112

118113
if (generateResult.text === undefined) {
119-
throw new NoTextGeneratedError();
114+
throw new NoObjectGeneratedError();
120115
}
121116

122117
result = generateResult.text;
@@ -147,7 +142,7 @@ Default and recommended: 'auto' (best mode for the model).
147142
);
148143

149144
if (generateResult.text === undefined) {
150-
throw new NoTextGeneratedError();
145+
throw new NoObjectGeneratedError();
151146
}
152147

153148
result = generateResult.text;
@@ -188,7 +183,7 @@ Default and recommended: 'auto' (best mode for the model).
188183
const functionArgs = generateResult.toolCalls?.[0]?.args;
189184

190185
if (functionArgs === undefined) {
191-
throw new NoTextGeneratedError();
186+
throw new NoObjectGeneratedError();
192187
}
193188

194189
result = functionArgs;
@@ -239,7 +234,7 @@ The generated object (typed according to the schema).
239234
/**
240235
The reason why the generation finished.
241236
*/
242-
readonly finishReason: LanguageModelV1FinishReason;
237+
readonly finishReason: FinishReason;
243238

244239
/**
245240
The token usage of the generated text.
@@ -249,7 +244,7 @@ The token usage of the generated text.
249244
/**
250245
Warnings from the model provider (e.g. unsupported settings)
251246
*/
252-
readonly warnings: LanguageModelV1CallWarning[] | undefined;
247+
readonly warnings: CallWarning[] | undefined;
253248

254249
/**
255250
Optional raw response data.
@@ -265,17 +260,17 @@ Response headers.
265260
Logprobs for the completion.
266261
`undefined` if the mode does not support logprobs or if was not enabled
267262
*/
268-
readonly logprobs: LanguageModelV1LogProbs | undefined;
263+
readonly logprobs: LogProbs | undefined;
269264

270265
constructor(options: {
271266
object: T;
272-
finishReason: LanguageModelV1FinishReason;
267+
finishReason: FinishReason;
273268
usage: TokenUsage;
274-
warnings: LanguageModelV1CallWarning[] | undefined;
269+
warnings: CallWarning[] | undefined;
275270
rawResponse?: {
276271
headers?: Record<string, string>;
277272
};
278-
logprobs: LanguageModelV1LogProbs | undefined;
273+
logprobs: LogProbs | undefined;
279274
}) {
280275
this.object = options.object;
281276
this.finishReason = options.finishReason;

packages/core/core/generate-object/stream-object.ts

+7-10
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,15 @@
11
import {
2-
LanguageModelV1,
32
LanguageModelV1CallOptions,
4-
LanguageModelV1CallWarning,
5-
LanguageModelV1FinishReason,
6-
LanguageModelV1LogProbs,
73
LanguageModelV1StreamPart,
84
} from '@ai-sdk/provider';
95
import { z } from 'zod';
6+
import { calculateTokenUsage } from '../generate-text/token-usage';
107
import { CallSettings } from '../prompt/call-settings';
118
import { convertToLanguageModelPrompt } from '../prompt/convert-to-language-model-prompt';
129
import { getValidatedPrompt } from '../prompt/get-validated-prompt';
1310
import { prepareCallSettings } from '../prompt/prepare-call-settings';
1411
import { Prompt } from '../prompt/prompt';
12+
import { CallWarning, FinishReason, LanguageModel, LogProbs } from '../types';
1513
import {
1614
AsyncIterableStream,
1715
createAsyncIterableStream,
@@ -22,7 +20,6 @@ import { isDeepEqualData } from '../util/is-deep-equal-data';
2220
import { parsePartialJson } from '../util/parse-partial-json';
2321
import { retryWithExponentialBackoff } from '../util/retry-with-exponential-backoff';
2422
import { injectJsonSchemaIntoSystem } from './inject-json-schema-into-system';
25-
import { calculateTokenUsage } from '../generate-text/token-usage';
2623

2724
/**
2825
Generate a structured, typed object for a given prompt and schema using a language model.
@@ -75,7 +72,7 @@ export async function experimental_streamObject<T>({
7572
/**
7673
The language model to use.
7774
*/
78-
model: LanguageModelV1;
75+
model: LanguageModel;
7976

8077
/**
8178
The schema of the object that the model should generate.
@@ -231,8 +228,8 @@ export type ObjectStreamPartInput =
231228
}
232229
| {
233230
type: 'finish';
234-
finishReason: LanguageModelV1FinishReason;
235-
logprobs?: LanguageModelV1LogProbs;
231+
finishReason: FinishReason;
232+
logprobs?: LogProbs;
236233
usage: {
237234
promptTokens: number;
238235
completionTokens: number;
@@ -258,7 +255,7 @@ export class StreamObjectResult<T> {
258255
/**
259256
Warnings from the model provider (e.g. unsupported settings)
260257
*/
261-
readonly warnings: LanguageModelV1CallWarning[] | undefined;
258+
readonly warnings: CallWarning[] | undefined;
262259

263260
/**
264261
Optional raw response data.
@@ -276,7 +273,7 @@ Response headers.
276273
rawResponse,
277274
}: {
278275
stream: ReadableStream<string | ObjectStreamPartInput>;
279-
warnings: LanguageModelV1CallWarning[] | undefined;
276+
warnings: CallWarning[] | undefined;
280277
rawResponse?: {
281278
headers?: Record<string, string>;
282279
};

packages/core/core/generate-text/generate-text.ts

+8-13
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,10 @@
1-
import {
2-
LanguageModelV1,
3-
LanguageModelV1CallWarning,
4-
LanguageModelV1FinishReason,
5-
LanguageModelV1LogProbs,
6-
} from '@ai-sdk/provider';
71
import { CallSettings } from '../prompt/call-settings';
82
import { convertToLanguageModelPrompt } from '../prompt/convert-to-language-model-prompt';
93
import { getValidatedPrompt } from '../prompt/get-validated-prompt';
104
import { prepareCallSettings } from '../prompt/prepare-call-settings';
115
import { Prompt } from '../prompt/prompt';
126
import { ExperimentalTool } from '../tool/tool';
7+
import { CallWarning, FinishReason, LanguageModel, LogProbs } from '../types';
138
import { convertZodToJSONSchema } from '../util/convert-zod-to-json-schema';
149
import { retryWithExponentialBackoff } from '../util/retry-with-exponential-backoff';
1510
import { TokenUsage, calculateTokenUsage } from './token-usage';
@@ -66,7 +61,7 @@ export async function experimental_generateText<
6661
/**
6762
The language model to use.
6863
*/
69-
model: LanguageModelV1;
64+
model: LanguageModel;
7065

7166
/**
7267
The tools that the model can call. The model needs to support calling tools.
@@ -177,7 +172,7 @@ The results of the tool calls.
177172
/**
178173
The reason why the generation finished.
179174
*/
180-
readonly finishReason: LanguageModelV1FinishReason;
175+
readonly finishReason: FinishReason;
181176

182177
/**
183178
The token usage of the generated text.
@@ -187,7 +182,7 @@ The token usage of the generated text.
187182
/**
188183
Warnings from the model provider (e.g. unsupported settings)
189184
*/
190-
readonly warnings: LanguageModelV1CallWarning[] | undefined;
185+
readonly warnings: CallWarning[] | undefined;
191186

192187
/**
193188
Optional raw response data.
@@ -203,19 +198,19 @@ Response headers.
203198
Logprobs for the completion.
204199
`undefined` if the mode does not support logprobs or if was not enabled
205200
*/
206-
readonly logprobs: LanguageModelV1LogProbs | undefined;
201+
readonly logprobs: LogProbs | undefined;
207202

208203
constructor(options: {
209204
text: string;
210205
toolCalls: ToToolCallArray<TOOLS>;
211206
toolResults: ToToolResultArray<TOOLS>;
212-
finishReason: LanguageModelV1FinishReason;
207+
finishReason: FinishReason;
213208
usage: TokenUsage;
214-
warnings: LanguageModelV1CallWarning[] | undefined;
209+
warnings: CallWarning[] | undefined;
215210
rawResponse?: {
216211
headers?: Record<string, string>;
217212
};
218-
logprobs: LanguageModelV1LogProbs | undefined;
213+
logprobs: LogProbs | undefined;
219214
}) {
220215
this.text = options.text;
221216
this.toolCalls = options.toolCalls;

packages/core/core/generate-text/stream-text.ts

+6-11
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,3 @@
1-
import {
2-
LanguageModelV1,
3-
LanguageModelV1CallWarning,
4-
LanguageModelV1FinishReason,
5-
LanguageModelV1LogProbs,
6-
} from '@ai-sdk/provider';
71
import { ServerResponse } from 'node:http';
82
import {
93
AIStreamCallbacksAndOptions,
@@ -17,6 +11,7 @@ import { getValidatedPrompt } from '../prompt/get-validated-prompt';
1711
import { prepareCallSettings } from '../prompt/prepare-call-settings';
1812
import { Prompt } from '../prompt/prompt';
1913
import { ExperimentalTool } from '../tool';
14+
import { CallWarning, FinishReason, LanguageModel, LogProbs } from '../types';
2015
import {
2116
AsyncIterableStream,
2217
createAsyncIterableStream,
@@ -77,7 +72,7 @@ export async function experimental_streamText<
7772
/**
7873
The language model to use.
7974
*/
80-
model: LanguageModelV1;
75+
model: LanguageModel;
8176

8277
/**
8378
The tools that the model can call. The model needs to support calling tools.
@@ -134,8 +129,8 @@ export type TextStreamPart<TOOLS extends Record<string, ExperimentalTool>> =
134129
} & ToToolResult<TOOLS>)
135130
| {
136131
type: 'finish';
137-
finishReason: LanguageModelV1FinishReason;
138-
logprobs?: LanguageModelV1LogProbs;
132+
finishReason: FinishReason;
133+
logprobs?: LogProbs;
139134
usage: {
140135
promptTokens: number;
141136
completionTokens: number;
@@ -152,7 +147,7 @@ export class StreamTextResult<TOOLS extends Record<string, ExperimentalTool>> {
152147
/**
153148
Warnings from the model provider (e.g. unsupported settings)
154149
*/
155-
readonly warnings: LanguageModelV1CallWarning[] | undefined;
150+
readonly warnings: CallWarning[] | undefined;
156151

157152
/**
158153
Optional raw response data.
@@ -170,7 +165,7 @@ Response headers.
170165
rawResponse,
171166
}: {
172167
stream: ReadableStream<TextStreamPart<TOOLS>>;
173-
warnings: LanguageModelV1CallWarning[] | undefined;
168+
warnings: CallWarning[] | undefined;
174169
rawResponse?: {
175170
headers?: Record<string, string>;
176171
};

packages/core/core/index.ts

+1
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,5 @@ export * from './generate-object';
22
export * from './generate-text';
33
export * from './prompt';
44
export * from './tool';
5+
export * from './types';
56
export * from './util/deep-partial';

packages/core/core/types/errors.ts

+18
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
export {
2+
APICallError,
3+
EmptyResponseBodyError,
4+
InvalidArgumentError,
5+
InvalidDataContentError,
6+
InvalidPromptError,
7+
InvalidResponseDataError,
8+
InvalidToolArgumentsError,
9+
JSONParseError,
10+
LoadAPIKeyError,
11+
NoObjectGeneratedError,
12+
NoSuchToolError,
13+
RetryError,
14+
ToolCallParseError,
15+
TypeValidationError,
16+
UnsupportedFunctionalityError,
17+
UnsupportedJSONSchemaError,
18+
} from '@ai-sdk/provider';

packages/core/core/types/index.ts

+2
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
export * from './errors';
2+
export * from './language-model';
+35
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
import {
2+
LanguageModelV1,
3+
LanguageModelV1CallWarning,
4+
LanguageModelV1FinishReason,
5+
LanguageModelV1LogProbs,
6+
} from '@ai-sdk/provider';
7+
8+
/**
9+
Language model that is used by the AI SDK Core functions.
10+
*/
11+
export type LanguageModel = LanguageModelV1;
12+
13+
/**
14+
Reason why a language model finished generating a response.
15+
16+
Can be one of the following:
17+
- `stop`: model generated stop sequence
18+
- `length`: model generated maximum number of tokens
19+
- `content-filter`: content filter violation stopped the model
20+
- `tool-calls`: model triggered tool calls
21+
- `error`: model stopped because of an error
22+
- `other`: model stopped for other reasons
23+
*/
24+
export type FinishReason = LanguageModelV1FinishReason;
25+
26+
/**
27+
Log probabilities for each token and its top log probabilities.
28+
*/
29+
export type LogProbs = LanguageModelV1LogProbs;
30+
31+
/**
32+
Warning from the model provider for this call. The call will proceed, but e.g.
33+
some settings might not be supported, which can lead to suboptimal results.
34+
*/
35+
export type CallWarning = LanguageModelV1CallWarning;

packages/provider/src/errors/no-object-generated-error.ts

+7-5
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,16 @@
1-
export class NoTextGeneratedError extends Error {
1+
export class NoObjectGeneratedError extends Error {
22
readonly cause: unknown;
33

44
constructor() {
5-
super(`No text generated.`);
5+
super(`No object generated.`);
66

7-
this.name = 'AI_NoTextGeneratedError';
7+
this.name = 'AI_NoObjectGeneratedError';
88
}
99

10-
static isNoTextGeneratedError(error: unknown): error is NoTextGeneratedError {
11-
return error instanceof Error && error.name === 'AI_NoTextGeneratedError';
10+
static isNoTextGeneratedError(
11+
error: unknown,
12+
): error is NoObjectGeneratedError {
13+
return error instanceof Error && error.name === 'AI_NoObjectGeneratedError';
1214
}
1315

1416
toJSON() {

0 commit comments

Comments
 (0)