From 2a711a6deadcb5fa8d037866eeb93f8485bbaa4f Mon Sep 17 00:00:00 2001 From: Pabloader Date: Sun, 12 Apr 2026 10:51:35 +0000 Subject: [PATCH] Add errors handling --- .../storywriter/components/chat-sidebar.tsx | 3 +++ src/games/storywriter/utils/llm.ts | 21 ++++++++++++++----- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/src/games/storywriter/components/chat-sidebar.tsx b/src/games/storywriter/components/chat-sidebar.tsx index fa1180a..4e77243 100644 --- a/src/games/storywriter/components/chat-sidebar.tsx +++ b/src/games/storywriter/components/chat-sidebar.tsx @@ -201,6 +201,9 @@ export const ChatPanel = ({ visible }: { visible: boolean }) => { if (abortControllerRef.current?.signal.aborted) { break; } + if ('error' in chunk) { + throw new Error(chunk.error); + } const delta = chunk.choices[0]?.delta; if (delta?.tool_calls) { diff --git a/src/games/storywriter/utils/llm.ts b/src/games/storywriter/utils/llm.ts index 7c09684..ac6b349 100644 --- a/src/games/storywriter/utils/llm.ts +++ b/src/games/storywriter/utils/llm.ts @@ -114,6 +114,10 @@ namespace LLM { choices: ChatCompletionChunkChoice[]; } + export interface ChatCompletionError { + error: string; + } + export interface ModelInfo { id: string; object: 'model'; @@ -246,19 +250,22 @@ namespace LLM { return request(connection, '/v1/models'); } - export async function countTokens(connection: Connection, body: CountTokensRequest): Promise { + export async function countTokens(connection: Connection, body: CountTokensRequest) { return request(connection, '/v1/responses/input_tokens', 'POST', body); } - export async function* generateStream(connection: Connection, config: ChatCompletionRequest): AsyncGenerator { - yield* streamRequest(connection, '/v1/chat/completions', 'POST', { + export async function* generateStream(connection: Connection, config: ChatCompletionRequest) { + yield* streamRequest(connection, '/v1/chat/completions', 'POST', { ...config, stream: true, }); } - export async function generate(connection: Connection, config: ChatCompletionRequest): Promise { - return request(connection, '/v1/chat/completions', 'POST', config); + export async function generate(connection: Connection, config: ChatCompletionRequest) { + return request(connection, '/v1/chat/completions', 'POST', { + ...config, + stream: false, + }); } const SUMMARIZATION_PROMPT = `Summarize the following text concisely while preserving key information and meaning. {level} @@ -291,6 +298,10 @@ Provide a clear and coherent summary:`; max_tokens: 500, }); + if ('error' in response) { + throw new Error(response.error); + } + return response.choices[0]?.message.content ?? ''; } }