Compare commits
2 Commits
7f3e628954
...
d32f675db8
| Author | SHA1 | Date |
|---|---|---|
|
|
d32f675db8 | |
|
|
e72cd513e5 |
|
|
@ -5,6 +5,7 @@ import { useState, useRef, useEffect, useMemo, useCallback } from "preact/hooks"
|
|||
import LLM from "../utils/llm";
|
||||
import { highlight } from "../utils/highlight";
|
||||
import Prompt from "../utils/prompt";
|
||||
import { Tools } from "../utils/tools";
|
||||
import clsx from "clsx";
|
||||
|
||||
export const ChatSidebar = () => {
|
||||
|
|
@ -105,11 +106,11 @@ export const ChatSidebar = () => {
|
|||
if (tool_calls) {
|
||||
const toolMessages: ChatMessage[] = [];
|
||||
for (const tool of tool_calls) {
|
||||
if (tool.function.name === 'test') {
|
||||
const content = await Tools.executeTool(appState, tool);
|
||||
const message: ChatMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
role: 'tool',
|
||||
content: `Test successful, received: ${JSON.stringify(tool.function.arguments)}`,
|
||||
content,
|
||||
tool_call_id: tool.id,
|
||||
};
|
||||
dispatch({
|
||||
|
|
@ -119,7 +120,6 @@ export const ChatSidebar = () => {
|
|||
});
|
||||
toolMessages.push(message);
|
||||
}
|
||||
}
|
||||
|
||||
return sendMessage([...newMessages, assistantMessage, ...toolMessages]);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -41,7 +41,30 @@ export const SettingsModal = ({ onClose }: Props) => {
|
|||
const modelsData = useQuery(fetchModels, connectionToFetch);
|
||||
|
||||
const isLoadingModels = connectionToFetch != null && modelsData == undefined;
|
||||
const models = modelsData ?? [];
|
||||
const groupedModels = useMemo(() => {
|
||||
const sorted = (modelsData ?? []).sort((a, b) => {
|
||||
// Sort by tool support first (true before false)
|
||||
if (a.support_tools !== b.support_tools) {
|
||||
return a.support_tools ? -1 : 1;
|
||||
}
|
||||
// Then by max context (bigger first, undefined treated as 0)
|
||||
const aContext = a.max_context ?? 0;
|
||||
const bContext = b.max_context ?? 0;
|
||||
if (aContext !== bContext) {
|
||||
return bContext - aContext;
|
||||
}
|
||||
// Then by name (alphabetically)
|
||||
return a.id.localeCompare(b.id);
|
||||
});
|
||||
|
||||
// Group by context size
|
||||
const groups = Map.groupBy(sorted, m => m.max_context ?? 0);
|
||||
|
||||
// Convert to array sorted by context size (bigger first)
|
||||
return Array.from(groups.entries())
|
||||
.sort((a, b) => b[0] - a[0])
|
||||
.map(([context, models]) => ({ context, models }));
|
||||
}, [modelsData]);
|
||||
|
||||
const handleBlur = () => {
|
||||
if (url && apiKey) {
|
||||
|
|
@ -119,15 +142,21 @@ export const SettingsModal = ({ onClose }: Props) => {
|
|||
{connectionToTest ? (
|
||||
isLoadingModels ? (
|
||||
<p>Loading models...</p>
|
||||
) : models.length > 0 ? (
|
||||
) : groupedModels.length > 0 ? (
|
||||
<select
|
||||
value={selectedModel}
|
||||
onChange={setSelectedModel}
|
||||
class={styles.select}
|
||||
>
|
||||
<option value="">Select a model</option>
|
||||
{groupedModels.map(({ context, models }) => (
|
||||
<optgroup key={context} label={`${context} context`}>
|
||||
{models.map(m => (
|
||||
<option key={m.id} value={m.id}>{m.id}</option>
|
||||
<option key={m.id} value={m.id}>
|
||||
{m.support_tools ? '🔧 ' : ''}{m.id} {m.max_length ? `(len: ${m.max_length})` : ''}
|
||||
</option>
|
||||
))}
|
||||
</optgroup>
|
||||
))}
|
||||
</select>
|
||||
) : (
|
||||
|
|
|
|||
|
|
@ -1,27 +1,8 @@
|
|||
import LLM from "./llm";
|
||||
import type { AppState } from "../contexts/state";
|
||||
import { Tools } from "./tools";
|
||||
|
||||
namespace Prompt {
|
||||
const tools: LLM.Tool[] = [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'test',
|
||||
description: 'A simple test function',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
message: {
|
||||
type: 'string',
|
||||
description: 'The test message',
|
||||
},
|
||||
},
|
||||
required: ['message'],
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
export function compilePrompt(state: AppState, newMessages: LLM.ChatMessage[] = []): LLM.ChatCompletionRequest | null {
|
||||
const { currentStory, model } = state;
|
||||
|
||||
|
|
@ -40,7 +21,7 @@ namespace Prompt {
|
|||
return {
|
||||
model,
|
||||
messages,
|
||||
tools,
|
||||
tools: Tools.getTools(),
|
||||
// TODO banned_tokens
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,65 @@
|
|||
import { formatError } from "@common/errors";
|
||||
import type { AppState } from "../contexts/state";
|
||||
import LLM from "./llm";
|
||||
|
||||
export namespace Tools {
|
||||
interface Tool {
|
||||
description: string;
|
||||
parameters: LLM.ToolObjectParameter;
|
||||
handler(args: string | Record<string, any>, appState: AppState): unknown;
|
||||
}
|
||||
|
||||
const TOOLS: Record<string, Tool> = {
|
||||
'test': {
|
||||
handler: async (args) => (
|
||||
`Test successful, received: ${JSON.stringify(args)}`
|
||||
),
|
||||
description: 'A simple test function',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
message: {
|
||||
type: 'string',
|
||||
description: 'The test message',
|
||||
},
|
||||
},
|
||||
required: ['message'],
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
export function getTools(): LLM.Tool[] {
|
||||
return Object.entries(TOOLS).map(([key, tool]) => {
|
||||
return {
|
||||
type: 'function',
|
||||
function: {
|
||||
name: key,
|
||||
description: tool.description,
|
||||
parameters: tool.parameters,
|
||||
},
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
export async function executeTool(appState: AppState, toolCall: LLM.ToolCall): Promise<string> {
|
||||
const { function: fn } = toolCall;
|
||||
let args = fn.arguments;
|
||||
try {
|
||||
if (typeof fn.arguments === 'string') {
|
||||
args = JSON.parse(fn.arguments);
|
||||
}
|
||||
} catch { }
|
||||
|
||||
const handler = TOOLS[fn.name]?.handler;
|
||||
if (!handler) {
|
||||
return `Unknown tool: ${fn.name}`;
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await handler(args, appState);
|
||||
return JSON.stringify(result);
|
||||
} catch (err) {
|
||||
return formatError(err, 'Error executing tool');
|
||||
}
|
||||
}
|
||||
}
|
||||
Loading…
Reference in New Issue