TidGi-Desktop/features/stepDefinitions/agent.ts
linonetwo 0312a49925 feat: Implement background task management for agent instances
- Added functionality to restore heartbeat timers and alarms for active agents upon service initialization.
- Introduced methods to retrieve active background tasks and cancel them via the UI.
- Enhanced alarm clock tool to persist alarm data in the database, ensuring alarms survive app restarts.
- Updated agent instance schema to include scheduled alarm data.
- Modified prompt concatenation logic to support context window size for message history.
- Removed system prompt parameter from model parameters schema and related components.
- Improved UI to display and manage background tasks, including heartbeat and alarm details.
2026-03-05 18:27:21 +08:00

641 lines
24 KiB
TypeScript

import { After, DataTable, Given, Then, When } from '@cucumber/cucumber';
import { AIGlobalSettings, AIProviderConfig } from '@services/externalAPI/interface';
import type { IWorkspace } from '@services/workspaces/interface';
import { backOff } from 'exponential-backoff';
import fs from 'fs-extra';
import { isEqual, omit } from 'lodash';
import path from 'path';
import type { ISettingFile } from '../../src/services/database/interface';
import { MockOpenAIServer } from '../supports/mockOpenAI';
import { getSettingsPath } from '../supports/paths';
import { PLAYWRIGHT_SHORT_TIMEOUT } from '../supports/timeouts';
import type { ApplicationWorld } from './application';
// Backoff configuration for retries
const BACKOFF_OPTIONS = {
numOfAttempts: 10,
startingDelay: 200,
timeMultiple: 1.5,
};
/**
* Generate deterministic embedding vector based on a semantic tag
* This allows us to control similarity in tests without writing full 384-dim vectors
*
* Strategy:
* - Similar tags (note1, note1-similar) -> similar vectors (high similarity)
* - Different tags (note1, note2) -> different vectors (medium similarity)
* - Unrelated tags (note1, unrelated) -> very different vectors (low similarity)
*/
function generateSemanticEmbedding(tag: string): number[] {
const vector: number[] = [];
// Parse tag to determine semantic relationship
// Format: "note1", "note2", "query-note1", "unrelated"
const baseTag = tag.replace(/-similar$/, '').replace(/^query-/, '');
const isSimilar = tag.includes('-similar');
const isQuery = tag.startsWith('query-');
const isUnrelated = tag === 'unrelated';
// Generate base vector from tag
const seed = Array.from(baseTag).reduce((hash, char) => {
return ((hash << 5) - hash) + char.charCodeAt(0);
}, 0);
for (let dimension = 0; dimension < 384; dimension++) {
const x = Math.sin((seed + dimension) * 0.1) * 10000;
let value = x - Math.floor(x);
// Adjust vector based on semantic relationship
if (isUnrelated) {
// Completely different direction
value = -value;
} else if (isSimilar || isQuery) {
// Very similar (>95% similarity) - add small noise
value = value + (Math.sin(dimension * 0.01) * 0.05);
}
// Normalize to [-1, 1]
vector.push(value * 2 - 1);
}
return vector;
}
// Helper function to start mock OpenAI server and update settings
async function startMockOpenAIServerAndUpdateSettings(
world: ApplicationWorld,
rules: Array<{ response: string; stream?: boolean; embedding?: number[] }>,
): Promise<void> {
// Use dynamic port (0) to allow parallel test execution
world.mockOpenAIServer = new MockOpenAIServer(0, rules);
world.providerConfig = createProviderConfig();
await world.mockOpenAIServer.start();
// Update provider config with actual mock server URL
world.providerConfig.baseURL = `${world.mockOpenAIServer.baseUrl}/v1`;
// Update AI settings in settings.json with the correct baseURL
const settingsPath = getSettingsPath(world);
if (fs.existsSync(settingsPath)) {
const settings = fs.readJsonSync(settingsPath) as ISettingFile;
if (settings.aiSettings?.providers?.[0]) {
settings.aiSettings.providers[0].baseURL = world.providerConfig.baseURL;
fs.writeJsonSync(settingsPath, settings, { spaces: 2 });
}
}
}
// Agent-specific Given steps
/**
* Start mock OpenAI server without any rules.
* Rules can be added later using "I add mock OpenAI responses" step.
*/
Given('I have started the mock OpenAI server without rules', function(this: ApplicationWorld, done: (error?: Error) => void) {
startMockOpenAIServerAndUpdateSettings(this, [])
.then(() => {
done();
})
.catch((error: unknown) => {
done(error as Error);
});
});
/**
* Start mock OpenAI server with predefined rules from dataTable.
* This is the legacy method used when rules are known upfront.
*/
Given('I have started the mock OpenAI server', function(this: ApplicationWorld, dataTable: DataTable | undefined, done: (error?: Error) => void) {
try {
const rules: Array<{ response: string; stream?: boolean; embedding?: number[] }> = [];
if (dataTable && typeof dataTable.raw === 'function') {
const rows = dataTable.raw();
// Skip header row
for (let index = 1; index < rows.length; index++) {
const row = rows[index];
const response = (row[0] ?? '').trim();
const stream = (row[1] ?? '').trim().toLowerCase() === 'true';
const embeddingTag = (row[2] ?? '').trim();
// Generate embedding from semantic tag if provided
let embedding: number[] | undefined;
if (embeddingTag) {
embedding = generateSemanticEmbedding(embeddingTag);
}
// Include rules with a response OR an embedding — MockOpenAIServer separates them into chatRules vs embeddingRules internally
if (response || embedding) rules.push({ response, stream, embedding });
}
}
startMockOpenAIServerAndUpdateSettings(this, rules)
.then(() => {
done();
})
.catch((error: unknown) => {
done(error as Error);
});
} catch (error) {
done(error as Error);
}
});
/**
* Add new responses to an already-running mock OpenAI server.
* This allows scenarios to configure server responses after the application has started.
*/
Given('I add mock OpenAI responses:', function(this: ApplicationWorld, dataTable: DataTable | undefined) {
if (!this.mockOpenAIServer) {
throw new Error('Mock OpenAI server is not running. Use "I have started the mock OpenAI server" first.');
}
const rules: Array<{ response: string; stream?: boolean; embedding?: number[] }> = [];
if (dataTable && typeof dataTable.raw === 'function') {
const rows = dataTable.raw();
// Skip header row
for (let index = 1; index < rows.length; index++) {
const row = rows[index];
const response = (row[0] ?? '').trim();
const stream = (row[1] ?? '').trim().toLowerCase() === 'true';
const embeddingTag = (row[2] ?? '').trim();
// Generate embedding from semantic tag if provided
let embedding: number[] | undefined;
if (embeddingTag) {
embedding = generateSemanticEmbedding(embeddingTag);
}
// Include rules with a response OR an embedding — MockOpenAIServer separates them into chatRules vs embeddingRules internally
if (response || embedding) rules.push({ response, stream, embedding });
}
}
this.mockOpenAIServer.addRules(rules);
});
// Mock OpenAI server cleanup - for scenarios using mock OpenAI
After({ tags: '@mockOpenAI' }, async function(this: ApplicationWorld) {
// Stop mock OpenAI server with timeout protection
if (this.mockOpenAIServer) {
try {
await Promise.race([
this.mockOpenAIServer.stop(),
new Promise<void>((resolve) => setTimeout(resolve, 2000)),
]);
} catch {
// Ignore errors during cleanup
} finally {
this.mockOpenAIServer = undefined;
}
}
});
// Only keep agent-specific steps that can't use generic ones
Then('I should see {int} messages in chat history', async function(this: ApplicationWorld, expectedCount: number) {
const currentWindow = this.currentWindow || this.mainWindow;
if (!currentWindow) {
throw new Error('No current window is available');
}
const messageSelector = '[data-testid="message-bubble"]';
await backOff(
async () => {
// Wait for at least one message to exist
await currentWindow.waitForSelector(messageSelector, { timeout: PLAYWRIGHT_SHORT_TIMEOUT });
// Count current messages
const messages = currentWindow.locator(messageSelector);
const currentCount = await messages.count();
if (currentCount === expectedCount) {
return; // Success
} else if (currentCount > expectedCount) {
throw new Error(`Expected ${expectedCount} messages but found ${currentCount} (too many)`);
} else {
// Not enough messages yet, throw to trigger retry
throw new Error(`Expected ${expectedCount} messages but found ${currentCount}`);
}
},
BACKOFF_OPTIONS,
).catch(async (error: unknown) => {
// Get final count for error message
try {
const finalCount = await currentWindow.locator(messageSelector).count();
throw new Error(`Could not find expected ${expectedCount} messages. Found ${finalCount}. Error: ${(error as Error).message}`);
} catch {
throw new Error(`Could not find expected ${expectedCount} messages. Error: ${(error as Error).message}`);
}
});
});
Then('the last AI request should contain system prompt {string}', async function(this: ApplicationWorld, expectedPrompt: string) {
if (!this.mockOpenAIServer) {
throw new Error('Mock OpenAI server is not running');
}
const lastRequest = this.mockOpenAIServer.getLastRequest();
if (!lastRequest) {
throw new Error('No AI request has been made yet');
}
// Find system message in the request
const systemMessage = lastRequest.messages.find(message => message.role === 'system');
if (!systemMessage) {
throw new Error('No system message found in the AI request');
}
if (!systemMessage.content || !systemMessage.content.includes(expectedPrompt)) {
throw new Error(`Expected system prompt to contain "${expectedPrompt}", but got: "${systemMessage.content}"`);
}
});
Then('the last AI request system prompt should not contain {string}', async function(this: ApplicationWorld, unexpectedText: string) {
if (!this.mockOpenAIServer) {
throw new Error('Mock OpenAI server is not running');
}
const lastRequest = this.mockOpenAIServer.getLastRequest();
if (!lastRequest) {
throw new Error('No AI request has been made yet');
}
const systemMessage = lastRequest.messages.find(message => message.role === 'system');
if (!systemMessage) {
// No system message means it definitely doesn't contain the text
return;
}
if (systemMessage.content && systemMessage.content.includes(unexpectedText)) {
throw new Error(`Expected system prompt NOT to contain "${unexpectedText}", but it was found in: "${systemMessage.content.substring(0, 300)}..."`);
}
});
Then('the last AI request should have {int} messages', async function(this: ApplicationWorld, expectedCount: number) {
if (!this.mockOpenAIServer) {
throw new Error('Mock OpenAI server is not running');
}
const lastRequest = this.mockOpenAIServer.getLastRequest();
if (!lastRequest) {
throw new Error('No AI request has been made yet');
}
const actualCount = lastRequest.messages.length;
if (actualCount !== expectedCount) {
throw new Error(`Expected ${expectedCount} messages in the AI request, but got ${actualCount}`);
}
});
Then('the last AI request user message should contain {string}', async function(this: ApplicationWorld, expectedText: string) {
if (!this.mockOpenAIServer) {
throw new Error('Mock OpenAI server is not running');
}
const lastRequest = this.mockOpenAIServer.getLastRequest();
if (!lastRequest) {
throw new Error('No AI request has been made yet');
}
// Find the last user message in the request
const userMessages = lastRequest.messages.filter(message => message.role === 'user');
if (userMessages.length === 0) {
throw new Error('No user message found in the AI request');
}
const lastUserMessage = userMessages[userMessages.length - 1];
const content = lastUserMessage.content ?? '';
const normalizedExpectedText = expectedText.replaceAll('\\n', '\n');
const contentHasExpectedText = content.includes(expectedText) || content.includes(normalizedExpectedText);
if (!contentHasExpectedText) {
throw new Error(`Expected user message to contain "${expectedText}", but got: "${content}"`);
}
});
Then('the last AI request user message should not contain {string}', async function(this: ApplicationWorld, unexpectedText: string) {
if (!this.mockOpenAIServer) {
throw new Error('Mock OpenAI server is not running');
}
const lastRequest = this.mockOpenAIServer.getLastRequest();
if (!lastRequest) {
throw new Error('No AI request has been made yet');
}
// Find the last user message in the request
const userMessages = lastRequest.messages.filter(message => message.role === 'user');
if (userMessages.length === 0) {
throw new Error('No user message found in the AI request');
}
const lastUserMessage = userMessages[userMessages.length - 1];
if (lastUserMessage.content && lastUserMessage.content.includes(unexpectedText)) {
throw new Error(`Expected user message NOT to contain "${unexpectedText}", but it was found in: "${lastUserMessage.content.substring(0, 200)}..."`);
}
});
// Factory function to create scenario-specific provider config
// Returns a new object each time to avoid state pollution between scenarios
function createProviderConfig(): AIProviderConfig {
return {
provider: 'TestProvider',
baseURL: 'http://127.0.0.1:0/v1', // Will be updated with actual port when mock server starts
models: [
{ name: 'test-model', features: ['language'] },
{ name: 'test-embedding-model', features: ['language', 'embedding'] },
{ name: 'test-speech-model', features: ['speech'] },
],
providerClass: 'openAICompatible',
isPreset: false,
enabled: true,
};
}
const desiredModelParameters = { temperature: 0.7, systemPrompt: 'You are a helpful assistant.', topP: 0.95 };
// Step to remove AI settings for testing config errors
Given('I remove test ai settings', function(this: ApplicationWorld) {
const settingsPath = path.resolve(process.cwd(), 'test-artifacts', this.scenarioSlug, 'userData-test', 'settings', 'settings.json');
if (fs.existsSync(settingsPath)) {
const existing = fs.readJsonSync(settingsPath) as ISettingFile;
// Remove aiSettings but keep other settings
const { aiSettings: _removed, ...rest } = existing;
fs.writeJsonSync(settingsPath, rest, { spaces: 2 });
}
});
Given('I ensure test ai settings exists', function(this: ApplicationWorld) {
const settingsPath = path.resolve(process.cwd(), 'test-artifacts', this.scenarioSlug, 'userData-test', 'settings', 'settings.json');
const parsed = fs.readJsonSync(settingsPath) as Record<string, unknown>;
const actual = (parsed.aiSettings as Record<string, unknown> | undefined) || null;
if (!actual) {
throw new Error('aiSettings not found in settings file');
}
const actualProviders = (actual.providers as Array<Record<string, unknown>>) || [];
// If providerConfig is set (from mock server), use it; otherwise create expected config
// and use actual baseURL from settings (for UI-configured scenarios)
let providerConfig: AIProviderConfig;
const providerName = 'TestProvider';
const existingProvider = actualProviders.find(p => p.provider === providerName) as AIProviderConfig | undefined;
if (this.providerConfig) {
// Use the mock server's providerConfig
providerConfig = this.providerConfig;
} else if (existingProvider) {
// For UI-configured scenarios: build expected config using actual baseURL
providerConfig = createProviderConfig();
providerConfig.baseURL = existingProvider.baseURL ?? providerConfig.baseURL;
} else {
providerConfig = createProviderConfig();
}
// Build expected aiSettings from providerConfig and compare with actual
const modelsArray = providerConfig.models;
const modelName = modelsArray[0]?.name;
// Check TestProvider exists
const testProvider = actualProviders.find(p => p.provider === providerName);
if (!testProvider) {
console.error('TestProvider not found in actual providers:', JSON.stringify(actualProviders, null, 2));
throw new Error('TestProvider not found in aiSettings');
}
// Verify TestProvider configuration
if (!isEqual(testProvider, providerConfig)) {
console.error('TestProvider config mismatch. expected:', JSON.stringify(providerConfig, null, 2));
console.error('TestProvider config actual:', JSON.stringify(testProvider, null, 2));
throw new Error('TestProvider configuration does not match expected');
}
// Check ComfyUI provider exists
const comfyuiProvider = actualProviders.find(p => p.provider === 'comfyui');
if (!comfyuiProvider) {
console.error('ComfyUI provider not found in actual providers:', JSON.stringify(actualProviders, null, 2));
throw new Error('ComfyUI provider not found in aiSettings');
}
// Verify ComfyUI has test-flux model with workflow path
const comfyuiModels = (comfyuiProvider.models as Array<Record<string, unknown>>) || [];
const testFluxModel = comfyuiModels.find(m => m.name === 'test-flux');
if (!testFluxModel) {
console.error('test-flux model not found in ComfyUI models:', JSON.stringify(comfyuiModels, null, 2));
throw new Error('test-flux model not found in ComfyUI provider');
}
// Verify workflow path
const parameters = testFluxModel.parameters as Record<string, unknown> | undefined;
if (!parameters || parameters.workflowPath !== 'C:/test/mock/workflow.json') {
console.error('Workflow path mismatch. expected: C:/test/mock/workflow.json, actual:', parameters?.workflowPath);
throw new Error('Workflow path not correctly saved');
}
// Verify default config
const defaultConfig = actual.defaultConfig as Record<string, unknown>;
const defaultModel = defaultConfig.default as Record<string, unknown>;
if (defaultModel?.provider !== providerName || defaultModel?.model !== modelName) {
console.error('Default config mismatch. expected provider:', providerName, 'model:', modelName);
console.error('actual defaultModel:', JSON.stringify(defaultModel, null, 2));
throw new Error('Default configuration does not match expected');
}
});
// Version without datatable for simple cases
Given('I add test ai settings', async function(this: ApplicationWorld) {
const settingsPath = path.resolve(process.cwd(), 'test-artifacts', this.scenarioSlug, 'userData-test', 'settings', 'settings.json');
let existing = {} as ISettingFile;
if (fs.existsSync(settingsPath)) {
existing = fs.readJsonSync(settingsPath) as ISettingFile;
} else {
fs.ensureDirSync(path.dirname(settingsPath));
}
// Initialize scenario-specific providerConfig if not set
if (!this.providerConfig) {
this.providerConfig = createProviderConfig();
}
const providerConfig = this.providerConfig;
const modelsArray = providerConfig.models;
const modelName = modelsArray[0]?.name;
const embeddingModelName = modelsArray[1]?.name;
const speechModelName = modelsArray[2]?.name;
const newAi: AIGlobalSettings = {
providers: [providerConfig],
defaultConfig: {
default: {
provider: providerConfig.provider,
model: modelName,
},
embedding: {
provider: providerConfig.provider,
model: embeddingModelName,
},
speech: {
provider: providerConfig.provider,
model: speechModelName,
},
modelParameters: desiredModelParameters,
},
};
const newPreferences = existing.preferences || {};
fs.writeJsonSync(settingsPath, { ...existing, aiSettings: newAi, preferences: newPreferences } as ISettingFile, { spaces: 2 });
});
// Version with datatable for advanced configuration
Given('I add test ai settings:', async function(this: ApplicationWorld, dataTable: DataTable) {
const settingsPath = path.resolve(process.cwd(), 'test-artifacts', this.scenarioSlug, 'userData-test', 'settings', 'settings.json');
let existing = {} as ISettingFile;
if (fs.existsSync(settingsPath)) {
existing = fs.readJsonSync(settingsPath) as ISettingFile;
} else {
fs.ensureDirSync(path.dirname(settingsPath));
}
// Initialize scenario-specific providerConfig if not set
if (!this.providerConfig) {
this.providerConfig = createProviderConfig();
}
const providerConfig = this.providerConfig;
const modelsArray = providerConfig.models;
const modelName = modelsArray[0]?.name;
const embeddingModelName = modelsArray[1]?.name;
const speechModelName = modelsArray[2]?.name;
// Parse options from data table
let freeModel: string | undefined;
let aiGenerateBackupTitle: boolean | undefined;
let aiGenerateBackupTitleTimeout: number | undefined;
if (dataTable && typeof dataTable.raw === 'function') {
const rows = dataTable.raw();
// Process all rows as key-value pairs (no header row)
for (let index = 0; index < rows.length; index++) {
const row = rows[index];
const key = (row[0] ?? '').trim();
const value = (row[1] ?? '').trim();
if (key === 'freeModel') {
// If value is 'true', enable freeModel using the same model as main model
if (value === 'true') {
freeModel = modelName;
}
} else if (key === 'aiGenerateBackupTitle') {
aiGenerateBackupTitle = value === 'true';
} else if (key === 'aiGenerateBackupTitleTimeout') {
aiGenerateBackupTitleTimeout = Number.parseInt(value, 10);
}
}
}
const newAi: AIGlobalSettings = {
providers: [providerConfig],
defaultConfig: {
default: {
provider: providerConfig.provider,
model: modelName,
},
embedding: {
provider: providerConfig.provider,
model: embeddingModelName,
},
speech: {
provider: providerConfig.provider,
model: speechModelName,
},
...(freeModel
? {
free: {
provider: providerConfig.provider,
model: freeModel,
},
}
: {}),
modelParameters: desiredModelParameters,
},
};
const newPreferences = {
...(existing.preferences || {}),
...(aiGenerateBackupTitle !== undefined ? { aiGenerateBackupTitle } : {}),
...(aiGenerateBackupTitleTimeout !== undefined ? { aiGenerateBackupTitleTimeout } : {}),
};
fs.writeJsonSync(settingsPath, { ...existing, aiSettings: newAi, preferences: newPreferences } as ISettingFile, { spaces: 2 });
});
async function clearAISettings(scenarioRoot?: string) {
const root = scenarioRoot || process.cwd();
const settingsPath = path.resolve(root, 'userData-test', 'settings', 'settings.json');
if (!(await fs.pathExists(settingsPath))) return;
const parsed = await fs.readJson(settingsPath) as ISettingFile;
const cleaned = omit(parsed, ['aiSettings']);
await fs.writeJson(settingsPath, cleaned, { spaces: 2 });
}
// Step to send ask AI with selection IPC message
When('I send ask AI with selection message with text {string} and workspace {string}', async function(this: ApplicationWorld, selectionText: string, workspaceName: string) {
const currentWindow = await this.getWindow('main');
if (!currentWindow) {
throw new Error('Main window not found');
}
// Get workspace ID from workspace name
const workspaceId = await currentWindow.evaluate(async (name: string): Promise<string | undefined> => {
// Use a narrow type view of window.service to avoid coupling to preload internals.
const windowWithService = window as unknown as { service: { workspace: { getWorkspacesAsList: () => Promise<IWorkspace[]> } } };
const workspaces = await windowWithService.service.workspace.getWorkspacesAsList();
const workspace = workspaces.find((ws) => ws.name === name);
return workspace?.id;
}, workspaceName);
if (!workspaceId) {
throw new Error(`Workspace with name "${workspaceName}" not found`);
}
// Send IPC message to trigger "Talk with AI" through main process
// Use app.evaluate to access Electron main process API
if (!this.app) {
throw new Error('Electron app not found');
}
const sendResult = await this.app.evaluate(async ({ BrowserWindow }, { text, wsId }: { text: string; wsId: string }) => {
// Find main window - the first window is always the main window in TidGi
const allWindows = BrowserWindow.getAllWindows();
const mainWindow = allWindows[0]; // Main window is always the first window created
if (!mainWindow) {
return { success: false, error: 'No windows found', windowCount: allWindows.length };
}
const data = {
selectionText: text,
wikiUrl: `tidgi://${wsId}`,
workspaceId: wsId,
};
// Send IPC message to renderer
mainWindow.webContents.send('ask-ai-with-selection', data);
return { success: true };
}, { text: selectionText, wsId: workspaceId });
if (!sendResult.success) {
throw new Error(`Failed to send IPC message: ${sendResult.error || 'Unknown error'}`);
}
// Small delay to ensure IPC message is processed (cross-process communication needs time)
await new Promise(resolve => setTimeout(resolve, 200));
});
export { clearAISettings };