mirror of
https://github.com/tiddly-gittly/TidGi-Desktop.git
synced 2025-12-05 18:20:39 -08:00
* docs: deps * Update dependencies and type usage for AI features Upgraded multiple dependencies in package.json and pnpm-lock.yaml, including @ai-sdk, @mui, react, and others for improved compatibility and performance. Changed type usage from CoreMessage to ModelMessage in mockOpenAI.test.ts to align with updated ai package. No functional changes to application logic. * feat: i18n * feat: test oauth login and use PKCE * fix: use ollama-ai-provider-v2 * test: github and mock oauth2 login * test: gitea login * Refactor context menu cleanup and error message Moved context menu cleanup for OAuth window to a single closed event handler in Authentication service. Simplified error message formatting in ContextService for missing keys. * lint: AI fix * Add tsx as a dev dependency and update scripts Replaced usage of 'pnpm dlx tsx' with direct 'tsx' command in development and test scripts for improved reliability. Added 'tsx' to devDependencies in package.json.
111 lines
4.5 KiB
TypeScript
111 lines
4.5 KiB
TypeScript
import { createAnthropic } from '@ai-sdk/anthropic';
|
||
import { createDeepSeek } from '@ai-sdk/deepseek';
|
||
import { createOpenAI } from '@ai-sdk/openai';
|
||
import { createOpenAICompatible } from '@ai-sdk/openai-compatible';
|
||
import { logger } from '@services/libs/log';
|
||
import { ModelMessage, streamText } from 'ai';
|
||
import { createOllama } from 'ollama-ai-provider-v2';
|
||
|
||
import { getFormattedContent } from '@/pages/ChatTabContent/components/types';
|
||
import { AiAPIConfig } from '@services/agentInstance/promptConcat/promptConcatSchema';
|
||
import { AuthenticationError, MissingAPIKeyError, MissingBaseURLError, parseProviderError } from './errors';
|
||
import type { AIProviderConfig } from './interface';
|
||
|
||
type AIStreamResult = ReturnType<typeof streamText>;
|
||
|
||
export function createProviderClient(providerConfig: { provider: string; providerClass?: string; baseURL?: string }, apiKey?: string) {
|
||
// 首先检查 providerClass,如果没有则回退到基于名称的判断
|
||
const providerClass = providerConfig.providerClass || providerConfig.provider;
|
||
|
||
switch (providerClass) {
|
||
case 'openai':
|
||
return createOpenAI({ apiKey });
|
||
case 'openAICompatible':
|
||
if (!providerConfig.baseURL) {
|
||
throw new MissingBaseURLError(providerConfig.provider);
|
||
}
|
||
return createOpenAICompatible({
|
||
name: providerConfig.provider,
|
||
apiKey,
|
||
baseURL: providerConfig.baseURL,
|
||
});
|
||
case 'deepseek':
|
||
return createDeepSeek({ apiKey });
|
||
case 'anthropic':
|
||
return createAnthropic({ apiKey });
|
||
case 'ollama':
|
||
if (!providerConfig.baseURL) {
|
||
throw new MissingBaseURLError(providerConfig.provider);
|
||
}
|
||
return createOllama({
|
||
baseURL: providerConfig.baseURL,
|
||
});
|
||
default:
|
||
throw new Error(`Unsupported AI provider: ${providerConfig.provider}`);
|
||
}
|
||
}
|
||
|
||
export function streamFromProvider(
|
||
config: AiAPIConfig,
|
||
messages: Array<ModelMessage>,
|
||
signal: AbortSignal,
|
||
providerConfig?: AIProviderConfig,
|
||
): AIStreamResult {
|
||
const provider = config.api.provider;
|
||
const model = config.api.model;
|
||
const modelParameters = config.modelParameters || {};
|
||
const { temperature = 0.7, systemPrompt: fallbackSystemPrompt = 'You are a helpful assistant.' } = modelParameters;
|
||
|
||
logger.info(`Using AI provider: ${provider}, model: ${model}`);
|
||
|
||
try {
|
||
// Check if API key is required
|
||
const isOllama = providerConfig?.providerClass === 'ollama';
|
||
const isLocalOpenAICompatible = providerConfig?.providerClass === 'openAICompatible' &&
|
||
providerConfig?.baseURL &&
|
||
(providerConfig.baseURL.includes('localhost') || providerConfig.baseURL.includes('127.0.0.1'));
|
||
|
||
if (!providerConfig?.apiKey && !isOllama && !isLocalOpenAICompatible) {
|
||
// Ollama and local OpenAI-compatible servers don't require API key
|
||
throw new MissingAPIKeyError(provider);
|
||
}
|
||
|
||
const client = createProviderClient(
|
||
providerConfig,
|
||
providerConfig.apiKey,
|
||
);
|
||
|
||
// Extract system message from messages if present, otherwise use fallback
|
||
const systemMessage = messages.find(message => message.role === 'system');
|
||
const systemPrompt = (systemMessage ? getFormattedContent(systemMessage.content) : undefined) || fallbackSystemPrompt;
|
||
|
||
// Filter out system messages from the messages array since we're handling them separately
|
||
const nonSystemMessages = messages.filter(message => message.role !== 'system');
|
||
|
||
// Ensure we have at least one message to avoid AI library errors
|
||
const finalMessages: Array<ModelMessage> = nonSystemMessages.length > 0 ? nonSystemMessages : [{ role: 'user' as const, content: 'Hi' }];
|
||
|
||
const providerModel = client(model);
|
||
return streamText({
|
||
model: providerModel,
|
||
system: systemPrompt,
|
||
messages: finalMessages,
|
||
temperature,
|
||
abortSignal: signal,
|
||
});
|
||
} catch (error) {
|
||
if (!error) {
|
||
throw new Error(`${provider} error: Unknown error`);
|
||
} else if ((error as Error).message.includes('401')) {
|
||
throw new AuthenticationError(provider);
|
||
} else if ((error as Error).message.includes('404')) {
|
||
throw new Error(`${provider} error: Model "${model}" not found`);
|
||
} else if ((error as Error).message.includes('429')) {
|
||
throw new Error(`${provider} too many requests: Reduce request frequency or check API limits`);
|
||
} else {
|
||
logger.error(`${provider} streaming error:`, error);
|
||
// Try to parse the error into a more specific type if possible
|
||
throw parseProviderError(error as Error, provider);
|
||
}
|
||
}
|
||
}
|