1
0
Fork 0

Add errors handling

This commit is contained in:
Pabloader 2026-04-12 10:51:35 +00:00
parent 52060c1170
commit 2a711a6dea
2 changed files with 19 additions and 5 deletions

View File

@ -201,6 +201,9 @@ export const ChatPanel = ({ visible }: { visible: boolean }) => {
if (abortControllerRef.current?.signal.aborted) { if (abortControllerRef.current?.signal.aborted) {
break; break;
} }
if ('error' in chunk) {
throw new Error(chunk.error);
}
const delta = chunk.choices[0]?.delta; const delta = chunk.choices[0]?.delta;
if (delta?.tool_calls) { if (delta?.tool_calls) {

View File

@ -114,6 +114,10 @@ namespace LLM {
choices: ChatCompletionChunkChoice[]; choices: ChatCompletionChunkChoice[];
} }
export interface ChatCompletionError {
error: string;
}
export interface ModelInfo { export interface ModelInfo {
id: string; id: string;
object: 'model'; object: 'model';
@ -246,19 +250,22 @@ namespace LLM {
return request<ModelsResponse>(connection, '/v1/models'); return request<ModelsResponse>(connection, '/v1/models');
} }
export async function countTokens(connection: Connection, body: CountTokensRequest): Promise<CountTokensResponse> { export async function countTokens(connection: Connection, body: CountTokensRequest) {
return request<CountTokensResponse>(connection, '/v1/responses/input_tokens', 'POST', body); return request<CountTokensResponse>(connection, '/v1/responses/input_tokens', 'POST', body);
} }
export async function* generateStream(connection: Connection, config: ChatCompletionRequest): AsyncGenerator<ChatCompletionChunk> { export async function* generateStream(connection: Connection, config: ChatCompletionRequest) {
yield* streamRequest<ChatCompletionChunk>(connection, '/v1/chat/completions', 'POST', { yield* streamRequest<ChatCompletionChunk | ChatCompletionError>(connection, '/v1/chat/completions', 'POST', {
...config, ...config,
stream: true, stream: true,
}); });
} }
export async function generate(connection: Connection, config: ChatCompletionRequest): Promise<ChatCompletionResponse> { export async function generate(connection: Connection, config: ChatCompletionRequest) {
return request<ChatCompletionResponse>(connection, '/v1/chat/completions', 'POST', config); return request<ChatCompletionResponse| ChatCompletionError>(connection, '/v1/chat/completions', 'POST', {
...config,
stream: false,
});
} }
const SUMMARIZATION_PROMPT = `Summarize the following text concisely while preserving key information and meaning. {level} const SUMMARIZATION_PROMPT = `Summarize the following text concisely while preserving key information and meaning. {level}
@ -291,6 +298,10 @@ Provide a clear and coherent summary:`;
max_tokens: 500, max_tokens: 500,
}); });
if ('error' in response) {
throw new Error(response.error);
}
return response.choices[0]?.message.content ?? ''; return response.choices[0]?.message.content ?? '';
} }
} }