diff --git a/README.md b/README.md index 98a85be..25a3a28 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ You can now use this module in any renderer. By default, `@electron/llm` auto-in ``` // First, load the model await window.electronAi.create({ - modelPath: "/full/path/to/model.gguf" + modelAlias: "Meta-Llama-3-8B-Instruct.Q4_K_M.gguf" }) // Then, talk to it diff --git a/__tests__/preload.test.ts b/__tests__/preload.test.ts index de8fdd9..9d093b9 100644 --- a/__tests__/preload.test.ts +++ b/__tests__/preload.test.ts @@ -59,6 +59,16 @@ describe('Preload Interface', () => { ); }); + it('prompt should invoke without options', async () => { + const input = 'Test prompt'; + await (globalThis as any).electronAi.prompt(input); + expect(ipcRenderer.invoke).toHaveBeenCalledWith( + IpcRendererMessage.ELECTRON_LLM_PROMPT, + input, + undefined, + ); + }); + it('promptStreaming should invoke with correct params', async () => { const input = 'Test prompt for streaming'; diff --git a/src/interfaces.ts b/src/interfaces.ts index 29116b2..143193b 100644 --- a/src/interfaces.ts +++ b/src/interfaces.ts @@ -50,7 +50,7 @@ export interface InternalLanguageModelPromptOptions export type AiProcessModelCreateData = InternalLanguageModelCreateOptions; export interface AiProcessSendPromptData { - options: LanguageModelPromptOptions; + options?: LanguageModelPromptOptions; stream?: boolean; input: string; } diff --git a/src/main/register-ai-handlers.ts b/src/main/register-ai-handlers.ts index bfe12c8..2e151f5 100644 --- a/src/main/register-ai-handlers.ts +++ b/src/main/register-ai-handlers.ts @@ -99,7 +99,7 @@ export function registerAiHandlers({ ipcMain.handle( IpcRendererMessage.ELECTRON_LLM_PROMPT, - async (_event, input: string, options: LanguageModelPromptOptions) => { + async (_event, input: string, options?: LanguageModelPromptOptions) => { if (!aiProcess) { throw new Error( 'AI model process not started. Please do so with `electronAi.create()`', @@ -132,7 +132,7 @@ export function registerAiHandlers({ const timeoutPromise = new Promise((_, reject) => { setTimeout( () => reject(new Error('Prompt response timed out.')), - options.timeout || 20000, + options?.timeout || 20000, ); }); @@ -142,7 +142,7 @@ export function registerAiHandlers({ ipcMain.on( IpcRendererMessage.ELECTRON_LLM_PROMPT_STREAMING_REQUEST, - (event, input: string, options: LanguageModelPromptOptions) => { + (event, input: string, options?: LanguageModelPromptOptions) => { if (!aiProcess) { event.sender.send( 'ELECTRON_LLM_PROMPT_STREAMING_ERROR', diff --git a/src/utility/call-ai-model-entry-point.ts b/src/utility/call-ai-model-entry-point.ts index 17bc110..4beec67 100644 --- a/src/utility/call-ai-model-entry-point.ts +++ b/src/utility/call-ai-model-entry-point.ts @@ -48,9 +48,9 @@ async function generateResponse(message: PromptMessage) { return; } - const options = abortSignalManager.getWithSignalFromPromptOptions( - data.options, - ); + const options = + data.options && + abortSignalManager.getWithSignalFromPromptOptions(data.options); try { // Format the prompt payload correctly for the language model