Tokens counter
This commit is contained in:
parent
b2b84fa819
commit
ca4f87db4b
|
|
@ -125,6 +125,18 @@
|
|||
border-top: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.optionsRow {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
}
|
||||
|
||||
.tokenCounter {
|
||||
font-size: 11px;
|
||||
color: var(--text-muted);
|
||||
}
|
||||
|
||||
.toggleContainer {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ export const ChatSidebar = () => {
|
|||
const [input, setInput] = useInputState('');
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
const [isCollapsed, setCollapsed] = useState(false);
|
||||
const [tokenCount, setTokenCount] = useState<{ taken: number; total: number } | null>(null);
|
||||
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const messagesRef = useRef<HTMLDivElement>(null);
|
||||
|
|
@ -73,6 +74,43 @@ export const ChatSidebar = () => {
|
|||
};
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
if (!currentStory || !connection || !model) {
|
||||
setTokenCount(null);
|
||||
return;
|
||||
}
|
||||
|
||||
const countTokens = async () => {
|
||||
try {
|
||||
const messages: LLM.ChatMessage[] = [];
|
||||
|
||||
if (input.trim()) {
|
||||
messages.push({ role: 'user', content: input.trim() });
|
||||
}
|
||||
|
||||
const chatRequest = Prompt.compilePrompt(appState, messages);
|
||||
const countRequest: LLM.CountTokensRequest = {
|
||||
model: model.id,
|
||||
input: chatRequest?.messages ?? [],
|
||||
tools: chatRequest?.tools,
|
||||
enable_thinking: chatRequest?.enable_thinking,
|
||||
};
|
||||
|
||||
const response = await LLM.countTokens(connection, countRequest);
|
||||
|
||||
setTokenCount({
|
||||
taken: response.input_tokens,
|
||||
total: model.max_context ?? response.input_tokens,
|
||||
});
|
||||
} catch {
|
||||
setTokenCount(null);
|
||||
}
|
||||
};
|
||||
|
||||
const timeoutId = setTimeout(countTokens, 300);
|
||||
return () => clearTimeout(timeoutId);
|
||||
}, [currentStory, connection, model, input, currentStory?.chatMessages.length]);
|
||||
|
||||
const sendMessage = useCallback(async (newMessages: ChatMessage[]) => {
|
||||
if (!currentStory || !connection || !model) return;
|
||||
|
||||
|
|
@ -277,7 +315,7 @@ export const ChatSidebar = () => {
|
|||
)}
|
||||
{currentStory && (
|
||||
<div class={styles.inputContainer}>
|
||||
{model?.support_thinking &&
|
||||
<div class={styles.optionsRow}>
|
||||
<label class={styles.toggleContainer}>
|
||||
<input
|
||||
type="checkbox"
|
||||
|
|
@ -290,7 +328,12 @@ export const ChatSidebar = () => {
|
|||
/>
|
||||
<span>Enable thinking</span>
|
||||
</label>
|
||||
}
|
||||
{tokenCount && (
|
||||
<div class={styles.tokenCounter}>
|
||||
{tokenCount.taken} / {tokenCount.total} tokens
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
<textarea
|
||||
class={styles.input}
|
||||
value={input}
|
||||
|
|
|
|||
|
|
@ -153,11 +153,22 @@ namespace LLM {
|
|||
data: ModelInfo[];
|
||||
}
|
||||
|
||||
export interface CountTokensRequest {
|
||||
|
||||
interface CountTokensRequestString {
|
||||
model: string;
|
||||
input: string | ChatMessage[];
|
||||
input: string;
|
||||
}
|
||||
|
||||
interface CountTokensRequestMessages {
|
||||
model: string;
|
||||
input: LLM.ChatMessage[];
|
||||
tools?: LLM.Tool[];
|
||||
add_generation_prompt?: boolean;
|
||||
enable_thinking?: boolean;
|
||||
}
|
||||
|
||||
export type CountTokensRequest = CountTokensRequestString | CountTokensRequestMessages;
|
||||
|
||||
export interface CountTokensResponse {
|
||||
object: 'response.input_tokens';
|
||||
input_tokens: number;
|
||||
|
|
|
|||
Loading…
Reference in New Issue