1
0
Fork 0

Compare commits

..

No commits in common. "d32f675db88561ba6d8ea978bf7aca4557ad9fad" and "7f3e628954041703b84eee99ee1ce517b3b2ba05" have entirely different histories.

4 changed files with 39 additions and 114 deletions

View File

@ -5,7 +5,6 @@ import { useState, useRef, useEffect, useMemo, useCallback } from "preact/hooks"
import LLM from "../utils/llm";
import { highlight } from "../utils/highlight";
import Prompt from "../utils/prompt";
import { Tools } from "../utils/tools";
import clsx from "clsx";
export const ChatSidebar = () => {
@ -106,19 +105,20 @@ export const ChatSidebar = () => {
if (tool_calls) {
const toolMessages: ChatMessage[] = [];
for (const tool of tool_calls) {
const content = await Tools.executeTool(appState, tool);
const message: ChatMessage = {
id: crypto.randomUUID(),
role: 'tool',
content,
tool_call_id: tool.id,
};
dispatch({
type: 'ADD_CHAT_MESSAGE',
storyId: currentStory.id,
message,
});
toolMessages.push(message);
if (tool.function.name === 'test') {
const message: ChatMessage = {
id: crypto.randomUUID(),
role: 'tool',
content: `Test successful, received: ${JSON.stringify(tool.function.arguments)}`,
tool_call_id: tool.id,
};
dispatch({
type: 'ADD_CHAT_MESSAGE',
storyId: currentStory.id,
message,
});
toolMessages.push(message);
}
}
return sendMessage([...newMessages, assistantMessage, ...toolMessages]);

View File

@ -41,30 +41,7 @@ export const SettingsModal = ({ onClose }: Props) => {
const modelsData = useQuery(fetchModels, connectionToFetch);
const isLoadingModels = connectionToFetch != null && modelsData == undefined;
const groupedModels = useMemo(() => {
const sorted = (modelsData ?? []).sort((a, b) => {
// Sort by tool support first (true before false)
if (a.support_tools !== b.support_tools) {
return a.support_tools ? -1 : 1;
}
// Then by max context (bigger first, undefined treated as 0)
const aContext = a.max_context ?? 0;
const bContext = b.max_context ?? 0;
if (aContext !== bContext) {
return bContext - aContext;
}
// Then by name (alphabetically)
return a.id.localeCompare(b.id);
});
// Group by context size
const groups = Map.groupBy(sorted, m => m.max_context ?? 0);
// Convert to array sorted by context size (bigger first)
return Array.from(groups.entries())
.sort((a, b) => b[0] - a[0])
.map(([context, models]) => ({ context, models }));
}, [modelsData]);
const models = modelsData ?? [];
const handleBlur = () => {
if (url && apiKey) {
@ -142,21 +119,15 @@ export const SettingsModal = ({ onClose }: Props) => {
{connectionToTest ? (
isLoadingModels ? (
<p>Loading models...</p>
) : groupedModels.length > 0 ? (
) : models.length > 0 ? (
<select
value={selectedModel}
onChange={setSelectedModel}
class={styles.select}
>
<option value="">Select a model</option>
{groupedModels.map(({ context, models }) => (
<optgroup key={context} label={`${context} context`}>
{models.map(m => (
<option key={m.id} value={m.id}>
{m.support_tools ? '🔧 ' : ''}{m.id} {m.max_length ? `(len: ${m.max_length})` : ''}
</option>
))}
</optgroup>
{models.map(m => (
<option key={m.id} value={m.id}>{m.id}</option>
))}
</select>
) : (

View File

@ -1,8 +1,27 @@
import LLM from "./llm";
import type { AppState } from "../contexts/state";
import { Tools } from "./tools";
namespace Prompt {
const tools: LLM.Tool[] = [
{
type: 'function',
function: {
name: 'test',
description: 'A simple test function',
parameters: {
type: 'object',
properties: {
message: {
type: 'string',
description: 'The test message',
},
},
required: ['message'],
},
},
},
];
export function compilePrompt(state: AppState, newMessages: LLM.ChatMessage[] = []): LLM.ChatCompletionRequest | null {
const { currentStory, model } = state;
@ -21,7 +40,7 @@ namespace Prompt {
return {
model,
messages,
tools: Tools.getTools(),
tools,
// TODO banned_tokens
};
}

View File

@ -1,65 +0,0 @@
import { formatError } from "@common/errors";
import type { AppState } from "../contexts/state";
import LLM from "./llm";
export namespace Tools {
interface Tool {
description: string;
parameters: LLM.ToolObjectParameter;
handler(args: string | Record<string, any>, appState: AppState): unknown;
}
const TOOLS: Record<string, Tool> = {
'test': {
handler: async (args) => (
`Test successful, received: ${JSON.stringify(args)}`
),
description: 'A simple test function',
parameters: {
type: 'object',
properties: {
message: {
type: 'string',
description: 'The test message',
},
},
required: ['message'],
},
}
};
export function getTools(): LLM.Tool[] {
return Object.entries(TOOLS).map(([key, tool]) => {
return {
type: 'function',
function: {
name: key,
description: tool.description,
parameters: tool.parameters,
},
};
});
}
export async function executeTool(appState: AppState, toolCall: LLM.ToolCall): Promise<string> {
const { function: fn } = toolCall;
let args = fn.arguments;
try {
if (typeof fn.arguments === 'string') {
args = JSON.parse(fn.arguments);
}
} catch { }
const handler = TOOLS[fn.name]?.handler;
if (!handler) {
return `Unknown tool: ${fn.name}`;
}
try {
const result = await handler(args, appState);
return JSON.stringify(result);
} catch (err) {
return formatError(err, 'Error executing tool');
}
}
}