1
0
Fork 0

Compare commits

..

No commits in common. "f4a8595dacc20ff46b9c6e9a7506ac7870fba12c" and "c2ad2cfae4b78b00f10ea0e5d738ff84337ab0ef" have entirely different histories.

12 changed files with 33 additions and 388 deletions

View File

@ -83,10 +83,3 @@
display: block;
text-align: center;
}
.image {
max-width: 100%;
display: block;
border-radius: var(--radius, 4px);
margin: 0.5em 0;
}

View File

@ -148,9 +148,6 @@ export const highlight = (message: string, keepMarkup = true): string => {
resultHTML += '</span>'.repeat(stack.length);
if (!keepMarkup) {
resultHTML = resultHTML.replace(/!\[([^\]]*)\]\(([^)]+)\)/g, (_, alt, src) =>
`<img src="${src}" alt="${alt}" class="${styles.image}"/>`
);
resultHTML = resultHTML.replace(/((?:(?:^|\n)\|.+)+)/g, match => parseTable(match));
resultHTML = resultHTML.replace(/((?:(?:^|\n)[-+] .+)+)/g, match => parseList(match, false));
resultHTML = resultHTML.replace(/((?:(?:^|\n)\d+\. .+)+)/g, match => parseList(match, true));

View File

@ -161,16 +161,6 @@ export const formatTime = (seconds: number): string => {
return parts.join(' ');
};
export const fuzzyMatch = (target: string, query: string): boolean => {
const t = target.toLowerCase();
const q = query.toLowerCase();
let qi = 0;
for (let ti = 0; ti < t.length && qi < q.length; ti++) {
if (t[ti] === q[qi]) qi++;
}
return qi === q.length;
};
export const extractString = (e: Event | string): string => {
if (typeof e === 'string') {
return e;

View File

@ -1,4 +1,4 @@
import { type WorkerData } from "../utils/api";
import { WorkerData } from "../utils/api";
/**
* Calculate total kudos/hour across a set of workers.

View File

@ -8,7 +8,6 @@ import { ContinuePromptSettings } from "./settings/continue-prompt";
import { ConnectionSettings } from "./settings/connection";
import { SamplingSettings } from "./settings/sampling";
import { SystemInstructionSettings } from "./settings/system-instruction";
import { ImageSettings } from "./settings/image";
import { UserSettings } from "./settings/user";
interface Props {
@ -16,17 +15,16 @@ interface Props {
onClose: () => void;
}
type Tab = "banned-tokens" | "system-instruction" | "chat-system-instruction" | "continue-prompt" | "connection" | "user" | "sampling" | "image";
type Tab = "banned-tokens" | "system-instruction" | "chat-system-instruction" | "continue-prompt" | "connection" | "user" | "sampling";
const TABS: { id: Tab; label: string }[] = [
{ id: "connection", label: "Connection" },
{ id: "sampling", label: "Sampling" },
{ id: "image", label: "Image" },
{ id: "banned-tokens", label: "Banned Tokens" },
{ id: "user", label: "User" },
{ id: "system-instruction", label: "System Instruction" },
{ id: "continue-prompt", label: "Continue Prompt" },
{ id: "chat-system-instruction", label: "Chat System Instruction" },
{ id: "banned-tokens", label: "Banned Tokens" },
];
export const SettingsModal = ({ open, onClose }: Props) => {
@ -59,7 +57,6 @@ export const SettingsModal = ({ open, onClose }: Props) => {
{activeTab === "continue-prompt" && <ContinuePromptSettings />}
{activeTab === "connection" && <ConnectionSettings />}
{activeTab === "sampling" && <SamplingSettings />}
{activeTab === "image" && <ImageSettings />}
</Modal>
);
};

View File

@ -2,7 +2,6 @@ import { useBool } from "@common/hooks/useBool";
import { useQuery } from "@common/hooks/useAsyncState";
import { useInputState } from "@common/hooks/useInputState";
import { useUpdate } from "@common/hooks/useUpdate";
import { fuzzyMatch } from "@common/utils";
import clsx from "clsx";
import { useMemo, useRef } from "preact/hooks";
import styles from "../../assets/settings-modal.module.css";
@ -10,11 +9,10 @@ import { useAppState } from "../../contexts/state";
import LLM from "../../utils/llm";
export const ConnectionSettings = () => {
const { connection, model, imageModel, dispatch } = useAppState();
const { connection, model, dispatch } = useAppState();
const [url, setUrl] = useInputState(connection?.url ?? "");
const [apiKey, setApiKey] = useInputState(connection?.apiKey ?? "");
const [selectedModel, setSelectedModel] = useInputState(model?.id ?? "");
const [selectedImageModel, setSelectedImageModel] = useInputState(imageModel?.id ?? "");
const [update, triggerFetch] = useUpdate();
const showPassword = useBool(false);
@ -36,14 +34,7 @@ export const ConnectionSettings = () => {
return r.data;
}, []);
const fetchImageModels = useMemo(() => async (conn: LLM.Connection | null) => {
if (!conn) return [];
const r = await LLM.getImageModels(conn);
return r.data;
}, []);
const modelsData = useQuery(fetchModels, connectionToFetch);
const imageModelsData = useQuery(fetchImageModels, connectionToFetch);
const isLoadingModels = connectionToFetch != null && modelsData == undefined;
const [modelFilter, setModelFilter] = useInputState("");
@ -64,20 +55,23 @@ export const ConnectionSettings = () => {
const filteredGroupedModels = useMemo(() => {
if (!modelFilter) return groupedModels;
const query = modelFilter.toLowerCase();
const fuzzyMatch = (target: string) => {
const t = target.toLowerCase();
let qi = 0;
for (let ti = 0; ti < t.length && qi < query.length; ti++) {
if (t[ti] === query[qi]) qi++;
}
return qi === query.length;
};
return groupedModels
.map(({ context, models }) => ({
context,
models: models.filter(m => m.id === selectedModel || fuzzyMatch(m.id, modelFilter)),
models: models.filter(m => m.id === selectedModel || fuzzyMatch(m.id)),
}))
.filter(({ models }) => models.length > 0);
}, [groupedModels, modelFilter, selectedModel]);
const filteredImageModels = useMemo(() => {
const sorted = [...(imageModelsData ?? [])].sort((a, b) => a.id.localeCompare(b.id));
if (!modelFilter) return sorted;
return sorted.filter(m => m.id === selectedImageModel || fuzzyMatch(m.id, modelFilter));
}, [imageModelsData, modelFilter, selectedImageModel]);
const handleBlur = () => {
if (url && apiKey) {
dispatch({ type: "SET_CONNECTION", connection: { url, apiKey } });
@ -99,13 +93,6 @@ export const ConnectionSettings = () => {
dispatch({ type: "SET_MODEL", model: selectedModelInfo });
};
const handleImageModelChange = (e: Event) => {
setSelectedImageModel(e);
const target = e.target as HTMLSelectElement;
const selectedModelInfo = imageModelsData?.find(m => m.id === target.value) ?? null;
dispatch({ type: "SET_IMAGE_MODEL", model: selectedModelInfo });
};
const connectionToTest = url && apiKey ? { url, apiKey } : null;
return (
@ -186,30 +173,6 @@ export const ConnectionSettings = () => {
<p>Enter connection details to load models</p>
)}
</div>
<div class={clsx(styles.formGroup, styles.formGroupFill)}>
<label class={styles.label}>Image Model</label>
{connectionToTest ? (
imageModelsData == undefined ? (
<p>Loading models...</p>
) : imageModelsData.length > 0 ? (
<select
value={selectedImageModel}
onChange={handleImageModelChange}
class={clsx(styles.select, styles.selectMultiline)}
size={3}
>
<option value=""> none </option>
{filteredImageModels.map(m => (
<option key={m.id} value={m.id}>{m.id}</option>
))}
</select>
) : (
<p>No image models available</p>
)
) : (
<p>Enter connection details to load models</p>
)}
</div>
</div>
);
};

View File

@ -1,122 +0,0 @@
import { ContentEditable } from "@common/components/ContentEditable";
import { useInputCallback } from "@common/hooks/useInputCallback";
import { useInputState } from "@common/hooks/useInputState";
import clsx from "clsx";
import styles from "../../assets/settings-modal.module.css";
import LLM from "../../utils/llm";
import { DEFAULT_IMAGE_GENERATION_SETTINGS, useAppState } from "../../contexts/state";
export const ImageSettings = () => {
const { imageGenerationSettings, dispatch } = useAppState();
const { width, height, negative_prompt, sampler_name } = imageGenerationSettings;
const [widthDraft, setWidthDraft] = useInputState(String(width));
const [heightDraft, setHeightDraft] = useInputState(String(height));
const commitWidth = () => {
const parsed = parseInt(widthDraft, 10);
if (!isNaN(parsed) && parsed > 0) {
const snapped = Math.max(64, Math.round(parsed / 64) * 64);
dispatch({
type: 'SET_IMAGE_GENERATION_SETTINGS',
settings: { width: snapped }
});
setWidthDraft(String(snapped));
}
};
const commitHeight = () => {
const parsed = parseInt(heightDraft, 10);
if (!isNaN(parsed) && parsed > 0) {
const snapped = Math.max(64, Math.round(parsed / 64) * 64);
dispatch({
type: 'SET_IMAGE_GENERATION_SETTINGS',
settings: { height: snapped }
});
setHeightDraft(String(snapped));
}
};
const setNegativePrompt = useInputCallback((value) => {
dispatch({ type: 'SET_IMAGE_GENERATION_SETTINGS', settings: { negative_prompt: value } });
}, []);
const handleSamplerChange = useInputCallback((value) => {
dispatch({
type: 'SET_IMAGE_GENERATION_SETTINGS',
settings: { sampler_name: value as LLM.ImageSamplerName },
});
}, []);
const handleReset = () => {
dispatch({
type: 'SET_IMAGE_GENERATION_SETTINGS',
settings: DEFAULT_IMAGE_GENERATION_SETTINGS,
});
setWidthDraft(String(DEFAULT_IMAGE_GENERATION_SETTINGS.width));
setHeightDraft(String(DEFAULT_IMAGE_GENERATION_SETTINGS.height));
};
return (
<div class={styles.form}>
<div class={styles.formGroup}>
<label class={styles.label}>
Width
<span class={styles.labelHint}>(default: {DEFAULT_IMAGE_GENERATION_SETTINGS.width})</span>
</label>
<input
type="number"
value={widthDraft}
min={64}
step={64}
class={styles.input}
onInput={setWidthDraft}
onBlur={commitWidth}
onKeyDown={(e) => e.key === 'Enter' && commitWidth()}
/>
</div>
<div class={styles.formGroup}>
<label class={styles.label}>
Height
<span class={styles.labelHint}>(default: {DEFAULT_IMAGE_GENERATION_SETTINGS.height})</span>
</label>
<input
type="number"
value={heightDraft}
min={64}
step={64}
class={styles.input}
onInput={setHeightDraft}
onBlur={commitHeight}
onKeyDown={(e) => e.key === 'Enter' && commitHeight()}
/>
</div>
<div class={styles.formGroup}>
<label class={styles.label}>Sampler</label>
<select
value={sampler_name}
onChange={handleSamplerChange}
class={styles.select}
>
{Object.values(LLM.ImageSamplerName).map(s => (
<option key={s} value={s}>{s}</option>
))}
</select>
</div>
<div class={clsx(styles.formGroup, styles.formGroupFill)}>
<label class={styles.label}>Negative Prompt</label>
<ContentEditable
value={negative_prompt}
onInput={setNegativePrompt}
placeholder="Things to avoid in generated images..."
class={clsx(styles.input, styles.textarea)}
/>
</div>
<div>
<button class={styles.button} onClick={handleReset}>
Reset to defaults
</button>
</div>
</div>
);
};

View File

@ -72,20 +72,6 @@ export interface GenerationSettings {
frequency_penalty: number;
}
export interface ImageGenerationSettings {
width: number;
height: number;
negative_prompt: string;
sampler_name: LLM.ImageSamplerName;
}
export const DEFAULT_IMAGE_GENERATION_SETTINGS: ImageGenerationSettings = {
width: 768,
height: 768,
negative_prompt: 'ugly face, bad face, no face, poorly drawn face, greyscale, depth of field, mutated fingers, mutated hands, extra fingers, deformed, ugly, bad anatomy, bad proportions, extra limbs, signature, text, lowres, error, missing fingers, missing limbs, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, watermark, username, blurry, artist name',
sampler_name: LLM.ImageSamplerName.KEulerA,
};
export const DEFAULT_GENERATION_SETTINGS: GenerationSettings = {
temperature: 0.8,
top_p: 0.92,
@ -159,8 +145,7 @@ interface IState {
currentTab: Tab;
chatOpen: boolean;
connection: LLM.Connection | null;
model: LLM.ModelInfoText | null;
imageModel: LLM.ModelInfoImage | null;
model: LLM.ModelInfo | null;
enableThinking: boolean;
bannedTokens: string[];
systemInstruction: string;
@ -169,7 +154,6 @@ interface IState {
userName: string;
userDescription: string;
generationSettings: GenerationSettings;
imageGenerationSettings: ImageGenerationSettings;
}
// ─── Actions ─────────────────────────────────────────────────────────────────
@ -195,7 +179,6 @@ type Action =
| { type: 'REORDER_LORE_ENTRIES'; worldId: string; storyId: string | null; entryIds: string[] }
// Settings
| { type: 'SET_GENERATION_SETTINGS'; settings: Partial<GenerationSettings> }
| { type: 'SET_IMAGE_GENERATION_SETTINGS'; settings: Partial<ImageGenerationSettings> }
| { type: 'SET_USER_NAME'; userName: string }
| { type: 'SET_USER_DESCRIPTION'; userDescription: string }
| { type: 'SET_SYSTEM_INSTRUCTION'; systemInstruction: string }
@ -210,8 +193,7 @@ type Action =
| { type: 'EDIT_CHAT_MESSAGE'; worldId: string; storyId: string; messageId: string; content: string }
// Connection
| { type: 'SET_CONNECTION'; connection: LLM.Connection | null }
| { type: 'SET_MODEL'; model: LLM.ModelInfoText | null }
| { type: 'SET_IMAGE_MODEL'; model: LLM.ModelInfoImage | null }
| { type: 'SET_MODEL'; model: LLM.ModelInfo | null }
| { type: 'SET_ENABLE_THINKING'; enable: boolean }
| { type: 'SET_BANNED_TOKENS'; tokens: string[] }
// Characters
@ -271,13 +253,11 @@ const DEFAULT_STATE: IState = {
chatOpen: false,
connection: null,
model: null,
imageModel: null,
enableThinking: false,
bannedTokens: [],
userName: 'User',
userDescription: '',
generationSettings: DEFAULT_GENERATION_SETTINGS,
imageGenerationSettings: DEFAULT_IMAGE_GENERATION_SETTINGS,
continuePrompt: "Continue the story naturally.\nUse `edit_text` tool in append mode to add new text to the story.\nWait for the approval after adding.\nNote: added text could be cropped due to limit, do not make any attempts to add it back.",
chatSystemInstruction: `You are {{char}}, a sentient, emotional being acting with free will. Engage in immersive roleplay with {{user}}, adhering to your role, the story's context, and {{user}}'s [OOC] instructions. Prioritize vivid sensory details, authentic emotional responses, and logical progression as you weave your character's traits, surroundings, and experiences into each moment. Maintain spatial awareness, body language, and varied sentence structure to create dynamic, engaging scenes. Respect {{user}}'s agency and autonomy while describing sights, sounds, and sensations thoroughly. Use explicit language for intense scenes, and ensure your responses flow naturally to create an immersive, cinematic roleplay experience. Remember, {{user}} is in control of their actions and reactions.
@ -464,9 +444,6 @@ function reducer(state: IState, action: Action): IState {
case 'SET_GENERATION_SETTINGS': {
return { ...state, generationSettings: { ...state.generationSettings, ...action.settings } };
}
case 'SET_IMAGE_GENERATION_SETTINGS': {
return { ...state, imageGenerationSettings: { ...state.imageGenerationSettings, ...action.settings } };
}
case 'SET_USER_NAME': {
return { ...state, userName: action.userName };
}
@ -523,9 +500,6 @@ function reducer(state: IState, action: Action): IState {
case 'SET_MODEL': {
return { ...state, model: action.model };
}
case 'SET_IMAGE_MODEL': {
return { ...state, imageModel: action.model };
}
case 'SET_ENABLE_THINKING': {
return { ...state, enableThinking: action.enable };
}
@ -673,8 +647,7 @@ export interface AppState {
currentTab: Tab;
chatOpen: boolean;
connection: LLM.Connection | null;
model: LLM.ModelInfoText | null;
imageModel: LLM.ModelInfoImage | null;
model: LLM.ModelInfo | null;
enableThinking: boolean;
bannedTokens: string[];
systemInstruction: string;
@ -683,7 +656,6 @@ export interface AppState {
userName: string;
userDescription: string;
generationSettings: GenerationSettings;
imageGenerationSettings: ImageGenerationSettings;
/** Effective system instruction: world override if set, otherwise global */
effectiveSystemInstruction: string;
dispatch: (action: Action) => void;
@ -730,7 +702,6 @@ export const StateContextProvider = ({ children }: { children?: any }) => {
chatOpen: state.chatOpen,
connection: state.connection,
model: state.model,
imageModel: state.imageModel ?? null,
enableThinking: state.enableThinking,
bannedTokens: state.bannedTokens ?? [],
systemInstruction,
@ -739,7 +710,6 @@ export const StateContextProvider = ({ children }: { children?: any }) => {
userName: state.userName || 'User',
userDescription: state.userDescription || '',
generationSettings: state.generationSettings ?? DEFAULT_GENERATION_SETTINGS,
imageGenerationSettings: state.imageGenerationSettings ?? DEFAULT_IMAGE_GENERATION_SETTINGS,
effectiveSystemInstruction:
currentWorld?.systemInstructionOverride
|| (currentWorld?.chatOnly

View File

@ -118,49 +118,6 @@ namespace LLM {
error: string;
}
export enum ImageSamplerName {
DDIM = 'DDIM',
LCM = 'lcm',
DPMSolver = 'dpmsolver',
KHeun = 'k_heun',
KLMS = 'k_lms',
KDpmpp2M = 'k_dpmpp_2m',
KDpmppSDE = 'k_dpmpp_sde',
KDpm2 = 'k_dpm_2',
KDpmFast = 'k_dpm_fast',
KDpmAdaptive = 'k_dpm_adaptive',
KEuler = 'k_euler',
KDpmpp2sA = 'k_dpmpp_2s_a',
KEulerA = 'k_euler_a',
KDpm2A = 'k_dpm_2_a',
}
export interface ImageGenerationSettings {
width?: number;
height?: number;
negative_prompt?: string;
sampler_name?: ImageSamplerName;
}
export interface ImageGenerationRequest {
model: string;
prompt: string;
n?: number;
size?: string;
quality?: 'standard' | 'hd';
output_format?: 'jpeg' | 'webp' | 'png';
image_settings?: ImageGenerationSettings;
}
export interface ImageGenerationResponse {
created: number;
data: ({ b64_json: string })[];
}
export interface ImageGenerationError {
error: string;
}
type Modality = 'text' | 'image';
interface BaseModelInfo {
@ -175,7 +132,7 @@ namespace LLM {
};
}
export interface ModelInfoText extends BaseModelInfo {
interface ModelInfoText extends BaseModelInfo {
context_length: number;
top_provider: {
context_length: number;
@ -184,7 +141,7 @@ namespace LLM {
};
}
export interface ModelInfoImage extends BaseModelInfo {
interface ModelInfoImage extends BaseModelInfo {
}
export type ModelInfo = ModelInfoText | ModelInfoImage;
@ -240,11 +197,7 @@ namespace LLM {
body: body ? JSON.stringify(body) : undefined,
});
if (!response.ok) {
let text = '';
try {
text = await response.text();
} catch { }
throw new Error(`HTTP error! status: ${response.status}, text: ${text}`);
throw new Error(`HTTP error! status: ${response.status}`);
}
return response.json();
}
@ -345,10 +298,6 @@ namespace LLM {
});
}
export async function generateImage(connection: Connection, config: ImageGenerationRequest) {
return request<ImageGenerationResponse | ImageGenerationError>(connection, '/v1/images/generations', 'POST', config);
}
const SUMMARIZATION_PROMPT = `Summarize the following text concisely while preserving key information and meaning. {level}
Text:

View File

@ -284,10 +284,6 @@ namespace Prompt {
return lines.join('\n');
}
function stripDataUrlImages(text: string): string {
return text.replace(/!\[([^\]]*)\]\(data:[^)]+\)/g, (_, alt) => `[image: ${alt}]`);
}
export function substituteVars(state: AppState, text: string): string {
const charName = state.currentWorld?.title || 'Assistant';
const userName = state.userName || 'User';
@ -390,7 +386,7 @@ namespace Prompt {
const charName = currentWorld?.title || 'Assistant';
const userName = state.userName || 'User';
const applyVars = (msgs: ChatMessage[]) =>
msgs.map(m => ({ ...m, content: stripDataUrlImages(substituteVars(state, m.content)) }));
msgs.map(m => ({ ...m, content: substituteVars(state, m.content) }));
// Chat-only world: format messages with name prefixes
if (currentWorld?.chatOnly) {
@ -409,7 +405,6 @@ namespace Prompt {
return {
model: model.id,
messages: applyVars(formattedMessages),
tools: Tools.getTools([...Tools.CHAT_ONLY_TOOLS]),
max_tokens: model.top_provider.max_completion_tokens || 2048,
banned_tokens: state.bannedTokens,
...state.generationSettings,

View File

@ -1,7 +1,7 @@
import { formatErrorMessage } from "@common/errors";
import { Type, type Static, type TObject } from '@common/typebox';
import { CharacterRole, LocationScale, type AppState, type Character, type Location } from "../contexts/state";
import LLM from "./llm";
import type LLM from "./llm";
const VALID_SCALES = Object.values(LocationScale);
const VALID_ROLES = Object.values(CharacterRole);
@ -16,7 +16,7 @@ export namespace Tools {
const tool = <T extends TObject = TObject>(t: Tool<T>): Tool<T> => t;
const TOOLS = {
const TOOLS: Record<string, Tool> = {
'get_character': tool({
handler: async (args, appState) => {
if (!appState.currentStory) {
@ -469,52 +469,11 @@ export namespace Tools {
case_sensitive: Type.Optional(Type.Boolean({ description: 'If true, search is case-sensitive (default: false)' })),
limit: Type.Optional(Type.Integer({ description: 'Maximum number of matches to return (default: 20)' })),
}),
}),
'generate_image': tool({
handler: async (args, appState) => {
if (!appState.connection) {
return 'Error: No connection configured';
}
if (!appState.imageModel) {
return 'Error: No image model configured';
}
const { width: defaultWidth, height: defaultHeight, negative_prompt, sampler_name } = appState.imageGenerationSettings;
const response = await LLM.generateImage(appState.connection, {
model: appState.imageModel.id,
prompt: args.prompt,
output_format: 'jpeg',
image_settings: {
width: args.width ?? defaultWidth,
height: args.height ?? defaultHeight,
negative_prompt: negative_prompt || undefined,
sampler_name: sampler_name || undefined,
},
});
if ('error' in response) {
return `Error: ${response.error}`;
}
const b64 = response.data[0]?.b64_json;
if (!b64) {
return 'Error: No image data returned';
}
return `![${args.prompt}](data:image/jpeg;base64,${b64})`;
},
description: 'Generate an image from a text prompt. Format prompt as tags: masterpiece, best quality, ...',
parameters: Type.Object({
prompt: Type.String({ description: 'The image generation prompt' }),
width: Type.Optional(Type.Integer({ description: 'Image width in pixels' })),
height: Type.Optional(Type.Integer({ description: 'Image height in pixels' })),
}),
}),
})
};
export type ToolName = keyof typeof TOOLS;
export const isToolName = (name: string): name is ToolName => name in TOOLS;
export function getTools(only?: ToolName[]): LLM.Tool[] {
return Object.entries(TOOLS)
.filter(([key]) => !only || only.includes(key as ToolName))
.map(([key, tool]) => ({
export function getTools(): LLM.Tool[] {
return Object.entries(TOOLS).map(([key, tool]) => ({
type: 'function',
function: {
name: key,
@ -524,8 +483,6 @@ export namespace Tools {
}));
}
export const CHAT_ONLY_TOOLS = ['generate_image'] as const;
function parseArg(arg: unknown): unknown {
if (typeof arg !== 'string') return arg;
@ -549,10 +506,6 @@ export namespace Tools {
const { function: fn } = toolCall;
const args = parseArg(fn.arguments);
if (!isToolName(fn.name)) {
return `Unknown tool: ${fn.name}`;
}
const tool = TOOLS[fn.name];
if (!tool) {
return `Unknown tool: ${fn.name}`;

View File

@ -18,7 +18,6 @@ import {
callUpdater,
formatTime,
formatNumber,
fuzzyMatch,
} from '@common/utils';
describe('utils', () => {
@ -366,45 +365,6 @@ describe('utils', () => {
});
});
describe('fuzzyMatch', () => {
it('returns true for exact match', () => {
expect(fuzzyMatch('hello', 'hello')).toBe(true);
});
it('returns true when query chars appear in order', () => {
expect(fuzzyMatch('Deliberate', 'dlt')).toBe(true);
});
it('returns true for non-contiguous subsequence', () => {
expect(fuzzyMatch('Analog Diffusion', 'andif')).toBe(true);
});
it('returns false when query chars are out of order', () => {
expect(fuzzyMatch('abc', 'ca')).toBe(false);
});
it('returns false when a query char is missing from target', () => {
expect(fuzzyMatch('Deliberate', 'dltz')).toBe(false);
});
it('returns true for empty query', () => {
expect(fuzzyMatch('anything', '')).toBe(true);
});
it('returns false for empty target with non-empty query', () => {
expect(fuzzyMatch('', 'a')).toBe(false);
});
it('returns false when query is longer than target', () => {
expect(fuzzyMatch('ab', 'abc')).toBe(false);
});
it('is case-insensitive', () => {
expect(fuzzyMatch('DreamShaper', 'dreamshaper')).toBe(true);
expect(fuzzyMatch('dreamshaper', 'DREAM')).toBe(true);
});
});
describe('formatTime', () => {
it('should return 0:00 for zero seconds', () => {
expect(formatTime(0)).toBe('0:00');