mirror of
https://github.com/tiddly-gittly/TidGi-Desktop.git
synced 2025-12-15 15:10:31 -08:00
feat: UI and button to test load model in tidgi
This commit is contained in:
parent
4b3c421231
commit
328cfe0a95
9 changed files with 177 additions and 59 deletions
|
|
@ -385,7 +385,13 @@
|
|||
"TimeoutDuration": "Timeout Duration",
|
||||
"UpdateTimeoutDuration": "Unit (Second). If a language model does not respond after so long, it will automatically terminate the conversation and stop waiting, as this basically means that it has stopped running. It needs to be greater than 0 and not too small, otherwise it will not be output properly.",
|
||||
"OpenModelFolder": "Open Model Folder",
|
||||
"OpenModelFolderDescription": "The inference program will look for models in this folder, please put your downloaded models here (or use the command line to create a soft link to here)"
|
||||
"OpenModelFolderDescription": "The inference program will look for models in this folder, please put your downloaded models here (or use the command line to create a soft link to here)",
|
||||
"ModelLoaded": "Model Loaded",
|
||||
"ModelNotLoaded": "Model Not Loaded",
|
||||
"LoadModel": "Load Model",
|
||||
"UnLoadModel": "UnLoad Model",
|
||||
"UnLoadModelDescription": "Clicking to unload the model releases memory, but it needs to be reloaded when used again, which consumes additional time",
|
||||
"LoadModelDescription": "Clicking Try to load a model can be used to check if the model is ready for use. or pre-loaded for use within Tiddlywiki, but the parameters here may not be the same as those provided by the plugin within Tiddlywiki, resulting in inconsistent behavior."
|
||||
},
|
||||
"RunOnBackground": "Run On Background",
|
||||
"RunOnBackgroundDetail": "When window is closed, Continue to run in the background without exiting. Quickly restore the window when opening the app again.",
|
||||
|
|
@ -474,7 +480,8 @@
|
|||
"OpenThisPath": "Open This Path",
|
||||
"ModelNotExistDescription": "Try to load the model using this path you gave, but there is actually no model needed at this location",
|
||||
"GenerationTimeout": "Generation Timeout, Terminated",
|
||||
"ModelLoadingProgress": "Model Loading"
|
||||
"ModelLoadingProgress": "Model Loading",
|
||||
"ModelDisposed": "Model Disposed, Terminated (Need to reload language model)"
|
||||
},
|
||||
"Description": "Description",
|
||||
"Tags": "Tags",
|
||||
|
|
|
|||
|
|
@ -422,7 +422,13 @@
|
|||
"UpdateTimeoutDuration": "单位(秒)。如果一个语言模型这么久了还没回应,将自动终止对话停止等待,因为这基本上意味着它停止运行了。需要大于0,且不能太小,不然无法正常输出。",
|
||||
"TimeoutDuration": "超时时长",
|
||||
"OpenModelFolder": "打开模型文件夹",
|
||||
"OpenModelFolderDescription": "推理程序将在这个文件夹里寻找模型,请将你下载的模型放到这里(或使用命令行创建软链接到这里)"
|
||||
"OpenModelFolderDescription": "推理程序将在这个文件夹里寻找模型,请将你下载的模型放到这里(或使用命令行创建软链接到这里)",
|
||||
"ModelLoaded": "模型已加载",
|
||||
"ModelNotLoaded": "模型未加载",
|
||||
"LoadModel": "加载模型",
|
||||
"UnLoadModel": "卸载模型",
|
||||
"UnLoadModelDescription": "点击卸载模型释放内存,但再次使用时需要重新加载,会消耗额外时间",
|
||||
"LoadModelDescription": "点击尝试加载模型,可用于检测模型是否可以正常使用。或预加载供太微内使用,但此处的参数可能和太微内插件提供的参数不一样,导致行为不一致。"
|
||||
},
|
||||
"RunOnBackground": "保持后台运行",
|
||||
"RunOnBackgroundDetail": "在窗口关闭时不退出,继续保持后台运行。再次打开应用时快速还原窗口。",
|
||||
|
|
@ -480,8 +486,8 @@
|
|||
"ModelNotExistDescription": "尝试使用你给的这个路径加载模型,但在这个位置其实没有所需要的模型",
|
||||
"OpenThisPath": "打开该位置",
|
||||
"GenerationTimeout": "模型生成超时,已中止。",
|
||||
"ModalDisposed": "模型已卸载,生成中止,需要重新加载模型。",
|
||||
"ModelLoadingProgress": "模型加载进度"
|
||||
"ModelLoadingProgress": "模型加载进度",
|
||||
"ModelDisposed": "模型已卸载,生成中止(需要重新加载语言模型)"
|
||||
},
|
||||
"Help": {
|
||||
"Alternatives": "其它源",
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
import React from 'react';
|
||||
import React, { useCallback } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
|
||||
import ChevronRightIcon from '@mui/icons-material/ChevronRight';
|
||||
import { Divider, List, ListItemButton } from '@mui/material';
|
||||
import { Box, Button, Divider, LinearProgress, List, ListItemButton, Tooltip } from '@mui/material';
|
||||
|
||||
import { ListItem, ListItemText } from '@/components/ListItem';
|
||||
import { usePromiseValue } from '@/helpers/useServiceValue';
|
||||
import { useLoadModelObservable, useModelLoadedObservable, useModelLoadProgressObservable } from '@services/languageModel/hooks';
|
||||
import { LanguageModelRunner } from '@services/languageModel/interface';
|
||||
import { usePreferenceObservable } from '@services/preferences/hooks';
|
||||
import { ListItemVertical, Paper, SectionTitle, TextField } from '../PreferenceComponents';
|
||||
import type { ISectionProps } from '../useSections';
|
||||
|
|
@ -18,6 +20,7 @@ export function LanguageModel(props: Partial<ISectionProps>): JSX.Element {
|
|||
async () => await Promise.all([window.service.context.get('LANGUAGE_MODEL_FOLDER')]),
|
||||
[undefined],
|
||||
)!;
|
||||
const modelLoaded = useModelLoadedObservable();
|
||||
|
||||
return (
|
||||
<>
|
||||
|
|
@ -48,23 +51,30 @@ export function LanguageModel(props: Partial<ISectionProps>): JSX.Element {
|
|||
<ListItemVertical>
|
||||
<ListItemText primary={t('Preference.LanguageModel.DefaultModel')} />
|
||||
<ListItemText secondary={t('Preference.LanguageModel.DefaultModelDescription')} />
|
||||
{Object.keys(preference.languageModel.defaultModel).map(key => (
|
||||
<TextField
|
||||
fullWidth
|
||||
key={key}
|
||||
label={key}
|
||||
onChange={async (event: React.ChangeEvent<HTMLInputElement | HTMLTextAreaElement>) => {
|
||||
await window.service.preference.set('languageModel', {
|
||||
...preference.languageModel,
|
||||
defaultModel: {
|
||||
...preference.languageModel.defaultModel,
|
||||
[key]: event.target.value,
|
||||
},
|
||||
});
|
||||
}}
|
||||
value={preference.languageModel.defaultModel[key as keyof typeof preference.languageModel.defaultModel]}
|
||||
/>
|
||||
))}
|
||||
{Object.keys(preference.languageModel.defaultModel).map(key => {
|
||||
const runner = key as LanguageModelRunner;
|
||||
const modelFileName = preference.languageModel.defaultModel[runner];
|
||||
const modelPath = `${LANGUAGE_MODEL_FOLDER}/${modelFileName}`;
|
||||
return (
|
||||
<Box display='flex' key={key} width='100%'>
|
||||
<TextField
|
||||
fullWidth
|
||||
label={modelPath}
|
||||
onChange={async (event: React.ChangeEvent<HTMLInputElement | HTMLTextAreaElement>) => {
|
||||
await window.service.preference.set('languageModel', {
|
||||
...preference.languageModel,
|
||||
defaultModel: {
|
||||
...preference.languageModel.defaultModel,
|
||||
[key]: event.target.value,
|
||||
},
|
||||
});
|
||||
}}
|
||||
value={modelFileName}
|
||||
/>
|
||||
<ModelLoadProgressBar runner={runner} modelLoaded={modelLoaded?.[runner] === true} modelPath={modelPath} />
|
||||
</Box>
|
||||
);
|
||||
})}
|
||||
</ListItemVertical>
|
||||
<Divider />
|
||||
<ListItem>
|
||||
|
|
@ -88,3 +98,49 @@ export function LanguageModel(props: Partial<ISectionProps>): JSX.Element {
|
|||
</>
|
||||
);
|
||||
}
|
||||
|
||||
function ModelLoadProgressBar({ runner, modelLoaded, modelPath }: { modelLoaded: boolean; modelPath: string; runner: LanguageModelRunner }) {
|
||||
const { t } = useTranslation();
|
||||
const modelLoadProgress = useModelLoadProgressObservable();
|
||||
const progress = modelLoadProgress?.[runner] ?? 0;
|
||||
const [loadModal] = useLoadModelObservable();
|
||||
const unloadModal = useCallback(async (runner: LanguageModelRunner) => {
|
||||
await window.service.languageModel.unloadLanguageModel(runner);
|
||||
}, []);
|
||||
return (
|
||||
<Box display='flex' flexDirection='column' justifyContent='flex-end' alignItems='center' width='10em'>
|
||||
{progress > 0 && progress < 1 && <LinearProgress variant='determinate' value={progress} />}
|
||||
<Box display='flex'>
|
||||
{modelLoaded
|
||||
? (
|
||||
<Tooltip title={`${t('Preference.LanguageModel.ModelLoaded')} ${t('Preference.LanguageModel.UnLoadModelDescription')}`}>
|
||||
<Button
|
||||
onClick={async () => {
|
||||
await unloadModal(runner);
|
||||
}}
|
||||
>
|
||||
{t('Preference.LanguageModel.UnLoadModel')}
|
||||
</Button>
|
||||
</Tooltip>
|
||||
)
|
||||
: (
|
||||
<Tooltip title={`${t('Preference.LanguageModel.ModelNotLoaded')} ${t('Preference.LanguageModel.LoadModelDescription')}`}>
|
||||
<Button
|
||||
onClick={() => {
|
||||
loadModal(runner, {
|
||||
loadModelOnly: true,
|
||||
modelName: runner,
|
||||
id: 'tidgi-preference-page-load-model',
|
||||
completionOptions: { prompt: '-' },
|
||||
loadConfig: { modelPath },
|
||||
});
|
||||
}}
|
||||
>
|
||||
{t('Preference.LanguageModel.LoadModel')}
|
||||
</Button>
|
||||
</Tooltip>
|
||||
)}
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
|
|
|||
25
src/services/languageModel/hooks.ts
Normal file
25
src/services/languageModel/hooks.ts
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
import useObservable from 'beautiful-react-hooks/useObservable';
|
||||
import { useCallback, useState } from 'react';
|
||||
import { ILanguageModelAPIResponse, IRunLLAmaOptions, LanguageModelRunner } from './interface';
|
||||
|
||||
export function useLoadModelObservable() {
|
||||
const [value, valueSetter] = useState<ILanguageModelAPIResponse | undefined>();
|
||||
const loadModel = useCallback((runner: LanguageModelRunner, options: IRunLLAmaOptions) => {
|
||||
window.observables.languageModel.runLanguageModel$(runner, options).subscribe({
|
||||
next: valueSetter,
|
||||
});
|
||||
}, []);
|
||||
return [loadModel, value] as const;
|
||||
}
|
||||
|
||||
export function useModelLoadProgressObservable() {
|
||||
const [value, valueSetter] = useState<Record<LanguageModelRunner, number> | undefined>();
|
||||
useObservable(window.observables.languageModel.modelLoadProgress$, valueSetter);
|
||||
return value;
|
||||
}
|
||||
|
||||
export function useModelLoadedObservable() {
|
||||
const [value, valueSetter] = useState<Record<LanguageModelRunner, boolean> | undefined>();
|
||||
useObservable(window.observables.languageModel.modelLoaded$, valueSetter);
|
||||
return value;
|
||||
}
|
||||
|
|
@ -19,7 +19,7 @@ import { IPreferenceService } from '@services/preferences/interface';
|
|||
import serviceIdentifier from '@services/serviceIdentifier';
|
||||
import { IWindowService } from '@services/windows/interface';
|
||||
import { WindowNames } from '@services/windows/WindowProperties';
|
||||
import { ILanguageModelAPIResponse, ILanguageModelService, IRunLLAmaOptions, LanguageModelRunner } from './interface';
|
||||
import { ILanguageModelAPIResponse, ILanguageModelService, ILanguageModelWorkerResponse, IRunLLAmaOptions, LanguageModelRunner } from './interface';
|
||||
import { LLMWorker } from './llmWorker/index';
|
||||
|
||||
@injectable()
|
||||
|
|
@ -35,24 +35,24 @@ export class LanguageModel implements ILanguageModelService {
|
|||
|
||||
private llmWorker?: ModuleThread<LLMWorker>;
|
||||
|
||||
private modalLoaded: Record<LanguageModelRunner, boolean> = {
|
||||
private modelLoaded: Record<LanguageModelRunner, boolean> = {
|
||||
[LanguageModelRunner.llamaCpp]: false,
|
||||
};
|
||||
|
||||
public modalLoaded$ = new BehaviorSubject<Record<LanguageModelRunner, boolean>>(this.modalLoaded);
|
||||
public updateModalLoaded(update: Partial<Record<LanguageModelRunner, boolean>>): void {
|
||||
this.modalLoaded = { ...this.modalLoaded, ...update };
|
||||
this.modalLoaded$.next(this.modalLoaded);
|
||||
public modelLoaded$ = new BehaviorSubject<Record<LanguageModelRunner, boolean>>(this.modelLoaded);
|
||||
public updateModelLoaded(update: Partial<Record<LanguageModelRunner, boolean>>): void {
|
||||
this.modelLoaded = { ...this.modelLoaded, ...update };
|
||||
this.modelLoaded$.next(this.modelLoaded);
|
||||
}
|
||||
|
||||
private modalLoadProgress: Record<LanguageModelRunner, number> = {
|
||||
private modelLoadProgress: Record<LanguageModelRunner, number> = {
|
||||
[LanguageModelRunner.llamaCpp]: 0,
|
||||
};
|
||||
|
||||
public modalLoadProgress$ = new BehaviorSubject<Record<LanguageModelRunner, number>>(this.modalLoadProgress);
|
||||
public updateModalLoadProgress(update: Partial<Record<LanguageModelRunner, number>>): void {
|
||||
this.modalLoadProgress = { ...this.modalLoadProgress, ...update };
|
||||
this.modalLoadProgress$.next(this.modalLoadProgress);
|
||||
public modelLoadProgress$ = new BehaviorSubject<Record<LanguageModelRunner, number>>(this.modelLoadProgress);
|
||||
public updateModelLoadProgress(update: Partial<Record<LanguageModelRunner, number>>): void {
|
||||
this.modelLoadProgress = { ...this.modelLoadProgress, ...update };
|
||||
this.modelLoadProgress$.next(this.modelLoadProgress);
|
||||
}
|
||||
|
||||
private async initWorker(): Promise<void> {
|
||||
|
|
@ -140,11 +140,26 @@ export class LanguageModel implements ILanguageModelService {
|
|||
return;
|
||||
}
|
||||
let observable;
|
||||
const texts = { timeout: i18n.t('LanguageModel.GenerationTimeout'), disposed: i18n.t('LanguageModel.ModalDisposed') };
|
||||
switch (runner) {
|
||||
case LanguageModelRunner.llamaCpp: {
|
||||
observable = worker.runLLama({ completionOptions, loadConfig: { ...config, modelPath }, conversationID }, texts);
|
||||
break;
|
||||
if (options.loadModelOnly === true) {
|
||||
observable = new Observable<ILanguageModelWorkerResponse>((subscriber) => {
|
||||
async function loadLLamaIIFE() {
|
||||
switch (runner) {
|
||||
case LanguageModelRunner.llamaCpp: {
|
||||
await worker.loadLLama({ ...config, modelPath }, conversationID, subscriber);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
void loadLLamaIIFE();
|
||||
});
|
||||
} else {
|
||||
// load and run model
|
||||
const texts = { timeout: i18n.t('LanguageModel.GenerationTimeout'), disposed: i18n.t('LanguageModel.ModelDisposed') };
|
||||
switch (runner) {
|
||||
case LanguageModelRunner.llamaCpp: {
|
||||
observable = worker.runLLama({ completionOptions, loadConfig: { ...config, modelPath }, conversationID }, texts);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
observable?.subscribe({
|
||||
|
|
@ -156,9 +171,9 @@ export class LanguageModel implements ILanguageModelService {
|
|||
case 'progress': {
|
||||
const { percentage, id } = result;
|
||||
if (percentage === 1) {
|
||||
this.updateModalLoaded({ [runner]: true });
|
||||
this.updateModelLoaded({ [runner]: true });
|
||||
}
|
||||
this.updateModalLoadProgress({ [runner]: percentage });
|
||||
this.updateModelLoadProgress({ [runner]: percentage });
|
||||
if (id === conversationID) {
|
||||
subscriber.next(result);
|
||||
}
|
||||
|
|
@ -207,6 +222,6 @@ export class LanguageModel implements ILanguageModelService {
|
|||
break;
|
||||
}
|
||||
}
|
||||
this.updateModalLoaded({ [runner]: false });
|
||||
this.updateModelLoaded({ [runner]: false });
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,6 +55,11 @@ export interface ILLMResultPart extends ILLMResultBase {
|
|||
export interface IRunLLAmaOptions extends ILLMResultBase {
|
||||
completionOptions: Partial<LLamaChatPromptOptions> & { prompt: string };
|
||||
loadConfig: Partial<LlamaModelOptions> & Pick<LlamaModelOptions, 'modelPath'>;
|
||||
/**
|
||||
* Load model to test if it's loadable, or preload model to speed up (when `autoDisposeSequence: false,`).
|
||||
* Without generating text.
|
||||
*/
|
||||
loadModelOnly?: boolean;
|
||||
modelName?: string;
|
||||
}
|
||||
|
||||
|
|
@ -73,6 +78,8 @@ export interface ILanguageModelService {
|
|||
* Abort a chat response generation.
|
||||
*/
|
||||
abortLanguageModel(runner: LanguageModelRunner, id: string): Promise<void>;
|
||||
modelLoadProgress$: Observable<Record<LanguageModelRunner, number>>;
|
||||
modelLoaded$: Observable<Record<LanguageModelRunner, boolean>>;
|
||||
/**
|
||||
* Generate text based on options (including prompt).
|
||||
*/
|
||||
|
|
@ -86,6 +93,8 @@ export const LanguageModelServiceIPCDescriptor = {
|
|||
channel: LanguageModelChannel.name,
|
||||
properties: {
|
||||
abortLanguageModel: ProxyPropertyType.Function,
|
||||
modelLoaded$: ProxyPropertyType.Value$,
|
||||
modelLoadProgress$: ProxyPropertyType.Value$,
|
||||
runLanguageModel$: ProxyPropertyType.Function$,
|
||||
unloadLanguageModel: ProxyPropertyType.Function,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
/* eslint-disable @typescript-eslint/no-misused-promises */
|
||||
import 'source-map-support/register';
|
||||
import { expose } from 'threads/worker';
|
||||
import { abortLLama, loadLLamaAndModal, runLLama, unloadLLama } from './llamaCpp';
|
||||
import { abortLLama, loadLLamaAndModel, runLLama, unloadLLama } from './llamaCpp';
|
||||
|
||||
const llmWorker = { loadLLama: loadLLamaAndModal, unloadLLama, runLLama, abortLLama };
|
||||
const llmWorker = { loadLLama: loadLLamaAndModel, unloadLLama, runLLama, abortLLama };
|
||||
export type LLMWorker = typeof llmWorker;
|
||||
expose(llmWorker);
|
||||
|
|
|
|||
|
|
@ -5,10 +5,10 @@ import { ILanguageModelWorkerResponse, IRunLLAmaOptions } from '../interface';
|
|||
import { DEFAULT_TIMEOUT_DURATION } from './constants';
|
||||
|
||||
let llamaInstance: undefined | Llama;
|
||||
let modalInstance: undefined | LlamaModel;
|
||||
let modelInstance: undefined | LlamaModel;
|
||||
let contextInstance: undefined | LlamaContext;
|
||||
let contextSequenceInstance: undefined | LlamaContextSequence;
|
||||
export async function loadLLamaAndModal(
|
||||
export async function loadLLamaAndModel(
|
||||
loadConfigOverwrite: Partial<LlamaModelOptions> & Pick<LlamaModelOptions, 'modelPath'>,
|
||||
conversationID: string,
|
||||
subscriber: Subscriber<ILanguageModelWorkerResponse>,
|
||||
|
|
@ -25,7 +25,7 @@ export async function loadLLamaAndModal(
|
|||
subscriber.next({ message, ...loggerCommonMeta });
|
||||
},
|
||||
});
|
||||
subscriber.next({ message: 'prepared to load modal', ...loggerCommonMeta, meta: { ...loggerCommonMeta.meta, loadConfigOverwrite } });
|
||||
subscriber.next({ message: 'prepared to load model', ...loggerCommonMeta, meta: { ...loggerCommonMeta.meta, loadConfigOverwrite } });
|
||||
const onLoadProgress = debounce((percentage: number) => {
|
||||
subscriber.next({
|
||||
type: 'progress',
|
||||
|
|
@ -37,9 +37,9 @@ export async function loadLLamaAndModal(
|
|||
onLoadProgress,
|
||||
...loadConfigOverwrite,
|
||||
};
|
||||
modalInstance = await llamaInstance.loadModel(loadConfig);
|
||||
modelInstance = await llamaInstance.loadModel(loadConfig);
|
||||
subscriber.next({ message: 'instance loaded', ...loggerCommonMeta });
|
||||
return modalInstance;
|
||||
return modelInstance;
|
||||
} catch (error) {
|
||||
await unloadLLama();
|
||||
throw error;
|
||||
|
|
@ -48,11 +48,11 @@ export async function loadLLamaAndModal(
|
|||
export async function unloadLLama() {
|
||||
await contextInstance?.dispose();
|
||||
contextSequenceInstance?.dispose();
|
||||
await modalInstance?.dispose();
|
||||
await modelInstance?.dispose();
|
||||
await llamaInstance?.dispose();
|
||||
contextSequenceInstance = undefined;
|
||||
llamaInstance = undefined;
|
||||
modalInstance = undefined;
|
||||
modelInstance = undefined;
|
||||
}
|
||||
const runnerAbortControllers = new Map<string, AbortController>();
|
||||
export function runLLama(
|
||||
|
|
@ -69,8 +69,8 @@ export function runLLama(
|
|||
return new Observable<ILanguageModelWorkerResponse>((subscriber) => {
|
||||
void (async function runLLamaObservableIIFE() {
|
||||
try {
|
||||
if (modalInstance === undefined) {
|
||||
modalInstance = await loadLLamaAndModal(loadConfig, conversationID, subscriber);
|
||||
if (modelInstance === undefined) {
|
||||
modelInstance = await loadLLamaAndModel(loadConfig, conversationID, subscriber);
|
||||
}
|
||||
} catch (error) {
|
||||
subscriber.error(error);
|
||||
|
|
@ -92,8 +92,8 @@ export function runLLama(
|
|||
subscriber.next({ message: 'ready to createCompletion', ...loggerCommonMeta });
|
||||
runnerAbortControllers.set(conversationID, abortController);
|
||||
if (contextInstance === undefined) {
|
||||
contextInstance = await modalInstance.createContext({
|
||||
contextSize: Math.min(4096, modalInstance.trainContextSize),
|
||||
contextInstance = await modelInstance.createContext({
|
||||
contextSize: Math.min(4096, modelInstance.trainContextSize),
|
||||
});
|
||||
}
|
||||
if (contextSequenceInstance === undefined) {
|
||||
|
|
@ -107,7 +107,7 @@ export function runLLama(
|
|||
...completionOptions,
|
||||
signal: abortController.signal,
|
||||
onToken: (tokens) => {
|
||||
if (modalInstance === undefined) {
|
||||
if (modelInstance === undefined) {
|
||||
abortController.abort();
|
||||
runnerAbortControllers.delete(conversationID);
|
||||
subscriber.next({ type: 'result', token: texts.disposed, id: conversationID });
|
||||
|
|
@ -115,7 +115,7 @@ export function runLLama(
|
|||
return;
|
||||
}
|
||||
updateTimeout();
|
||||
subscriber.next({ type: 'result', token: modalInstance.detokenize(tokens), id: conversationID });
|
||||
subscriber.next({ type: 'result', token: modelInstance.detokenize(tokens), id: conversationID });
|
||||
},
|
||||
});
|
||||
// completed
|
||||
|
|
|
|||
|
|
@ -4,6 +4,6 @@ import { IPreferences } from './interface';
|
|||
|
||||
export function usePreferenceObservable(): IPreferences | undefined {
|
||||
const [preference, preferenceSetter] = useState<IPreferences | undefined>();
|
||||
useObservable(window.observables.preference.preference$, preferenceSetter as any);
|
||||
useObservable(window.observables.preference.preference$, preferenceSetter);
|
||||
return preference;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue