Fix crash in settings, optimize models loading
This commit is contained in:
parent
f4a8595dac
commit
cc8f99084b
|
|
@ -119,7 +119,7 @@ export const ContentEditable = ({
|
||||||
ref={ref}
|
ref={ref}
|
||||||
{...props}
|
{...props}
|
||||||
contentEditable
|
contentEditable
|
||||||
data-placeholder={value.replaceAll('\n', '').length ? undefined : placeholder}
|
data-placeholder={value?.replaceAll('\n', '').length ? undefined : placeholder}
|
||||||
class={clsx(styles.root, autoLines && styles.autoLines, externalClass)}
|
class={clsx(styles.root, autoLines && styles.autoLines, externalClass)}
|
||||||
onInput={handleInput}
|
onInput={handleInput}
|
||||||
/>
|
/>
|
||||||
|
|
|
||||||
|
|
@ -32,24 +32,19 @@ export const ConnectionSettings = () => {
|
||||||
|
|
||||||
const fetchModels = useMemo(() => async (conn: LLM.Connection | null) => {
|
const fetchModels = useMemo(() => async (conn: LLM.Connection | null) => {
|
||||||
if (!conn) return [];
|
if (!conn) return [];
|
||||||
const r = await LLM.getTextModels(conn);
|
const r = await LLM.getModels(conn);
|
||||||
return r.data;
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
const fetchImageModels = useMemo(() => async (conn: LLM.Connection | null) => {
|
|
||||||
if (!conn) return [];
|
|
||||||
const r = await LLM.getImageModels(conn);
|
|
||||||
return r.data;
|
return r.data;
|
||||||
}, []);
|
}, []);
|
||||||
|
|
||||||
const modelsData = useQuery(fetchModels, connectionToFetch);
|
const modelsData = useQuery(fetchModels, connectionToFetch);
|
||||||
const imageModelsData = useQuery(fetchImageModels, connectionToFetch);
|
const textModelsData = useMemo(() => modelsData?.filter(LLM.isTextModel), [modelsData]);
|
||||||
const isLoadingModels = connectionToFetch != null && modelsData == undefined;
|
const imageModelsData = useMemo(() => modelsData?.filter(LLM.isImageModel), [modelsData]);
|
||||||
|
const isLoadingModels = connectionToFetch != null && textModelsData == undefined;
|
||||||
|
|
||||||
const [modelFilter, setModelFilter] = useInputState("");
|
const [modelFilter, setModelFilter] = useInputState("");
|
||||||
|
|
||||||
const groupedModels = useMemo(() => {
|
const groupedModels = useMemo(() => {
|
||||||
const sorted = (modelsData ?? []).sort((a, b) => {
|
const sorted = (textModelsData ?? []).sort((a, b) => {
|
||||||
const aWeight = Number(a.supported_parameters.includes('tools')) * 2 + Number(a.supported_parameters.includes('reasoning'));
|
const aWeight = Number(a.supported_parameters.includes('tools')) * 2 + Number(a.supported_parameters.includes('reasoning'));
|
||||||
const bWeight = Number(b.supported_parameters.includes('tools')) * 2 + Number(b.supported_parameters.includes('reasoning'));
|
const bWeight = Number(b.supported_parameters.includes('tools')) * 2 + Number(b.supported_parameters.includes('reasoning'));
|
||||||
if (aWeight !== bWeight) return bWeight - aWeight;
|
if (aWeight !== bWeight) return bWeight - aWeight;
|
||||||
|
|
@ -60,7 +55,7 @@ export const ConnectionSettings = () => {
|
||||||
return Array.from(groups.entries())
|
return Array.from(groups.entries())
|
||||||
.sort((a, b) => b[0] - a[0])
|
.sort((a, b) => b[0] - a[0])
|
||||||
.map(([context, models]) => ({ context, models }));
|
.map(([context, models]) => ({ context, models }));
|
||||||
}, [modelsData]);
|
}, [textModelsData]);
|
||||||
|
|
||||||
const filteredGroupedModels = useMemo(() => {
|
const filteredGroupedModels = useMemo(() => {
|
||||||
if (!modelFilter) return groupedModels;
|
if (!modelFilter) return groupedModels;
|
||||||
|
|
@ -95,7 +90,7 @@ export const ConnectionSettings = () => {
|
||||||
const handleModelChange = (e: Event) => {
|
const handleModelChange = (e: Event) => {
|
||||||
setSelectedModel(e);
|
setSelectedModel(e);
|
||||||
const target = e.target as HTMLSelectElement;
|
const target = e.target as HTMLSelectElement;
|
||||||
const selectedModelInfo = modelsData?.find(m => m.id === target.value) ?? null;
|
const selectedModelInfo = textModelsData?.find(m => m.id === target.value) ?? null;
|
||||||
dispatch({ type: "SET_MODEL", model: selectedModelInfo });
|
dispatch({ type: "SET_MODEL", model: selectedModelInfo });
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -105,7 +105,8 @@ export const ImageSettings = () => {
|
||||||
</div>
|
</div>
|
||||||
<div class={clsx(styles.formGroup, styles.formGroupFill)}>
|
<div class={clsx(styles.formGroup, styles.formGroupFill)}>
|
||||||
<label class={styles.label}>Negative Prompt</label>
|
<label class={styles.label}>Negative Prompt</label>
|
||||||
<ContentEditable
|
<textarea
|
||||||
|
rows={3}
|
||||||
value={negative_prompt}
|
value={negative_prompt}
|
||||||
onInput={setNegativePrompt}
|
onInput={setNegativePrompt}
|
||||||
placeholder="Things to avoid in generated images..."
|
placeholder="Things to avoid in generated images..."
|
||||||
|
|
|
||||||
|
|
@ -189,8 +189,8 @@ namespace LLM {
|
||||||
|
|
||||||
export type ModelInfo = ModelInfoText | ModelInfoImage;
|
export type ModelInfo = ModelInfoText | ModelInfoImage;
|
||||||
|
|
||||||
const isTextModel = (model: ModelInfo): model is ModelInfoText => ('context_length' in model);
|
export const isTextModel = (model: ModelInfo): model is ModelInfoText => ('context_length' in model);
|
||||||
const isImageModel = (model: ModelInfo): model is ModelInfoImage => Boolean(
|
export const isImageModel = (model: ModelInfo): model is ModelInfoImage => Boolean(
|
||||||
!isTextModel(model) &&
|
!isTextModel(model) &&
|
||||||
model.architecture &&
|
model.architecture &&
|
||||||
(model.architecture.output_modalities).includes('image')
|
(model.architecture.output_modalities).includes('image')
|
||||||
|
|
@ -313,18 +313,8 @@ namespace LLM {
|
||||||
return e != null && typeof e === 'object' && 'data' in e && typeof e.data === 'string';
|
return e != null && typeof e === 'object' && 'data' in e && typeof e.data === 'string';
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function getTextModels(connection: Connection): Promise<ModelsResponse<ModelInfoText>> {
|
export async function getModels(connection: Connection): Promise<ModelsResponse<ModelInfo>> {
|
||||||
const response = await request<ModelsResponse>(connection, '/v1/models');
|
return request<ModelsResponse>(connection, '/v1/models');
|
||||||
|
|
||||||
response.data = response.data.filter(isTextModel);
|
|
||||||
return response as ModelsResponse<ModelInfoText>;
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function getImageModels(connection: Connection): Promise<ModelsResponse<ModelInfoImage>> {
|
|
||||||
const response = await request<ModelsResponse>(connection, '/v1/models');
|
|
||||||
|
|
||||||
response.data = response.data.filter(isImageModel);
|
|
||||||
return response as ModelsResponse<ModelInfoImage>;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function countTokens(connection: Connection, body: CountTokensRequest) {
|
export async function countTokens(connection: Connection, body: CountTokensRequest) {
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue