fix(agent): Eliminate race condition in completion handling
PROBLEM: - Agents completing with questions were incorrectly marked as "crashed" - Race condition: polling handler AND crash handler both called handleCompletion() - Caused database corruption and lost pending questions SOLUTION: - Add completion mutex in OutputHandler to prevent concurrent processing - Remove duplicate completion call from crash handler - Only one handler executes completion logic per agent TESTING: - Added mutex-completion.test.ts with 4 test cases - Verified mutex prevents concurrent access - Verified lock cleanup on exceptions - Verified different agents can process concurrently FIXES: residential-cuckoo and 12+ other agents stuck in crashed state
This commit is contained in:
@@ -388,14 +388,35 @@ export class CleanupManager {
|
||||
if (rawOutput.trim()) {
|
||||
const provider = getProvider(agent.provider);
|
||||
if (provider) {
|
||||
await onAgentOutput(agent.id, rawOutput, provider);
|
||||
continue;
|
||||
// Check if agent actually completed successfully before processing
|
||||
const hasCompletionResult = this.checkForCompletionResult(rawOutput);
|
||||
if (hasCompletionResult) {
|
||||
log.info({ agentId: agent.id }, 'reconcile: processing completed agent output');
|
||||
try {
|
||||
await onAgentOutput(agent.id, rawOutput, provider);
|
||||
continue;
|
||||
} catch (err) {
|
||||
log.error({
|
||||
agentId: agent.id,
|
||||
err: err instanceof Error ? err.message : String(err)
|
||||
}, 'reconcile: failed to process completed agent output');
|
||||
// Mark as crashed since processing failed
|
||||
await this.repository.update(agent.id, { status: 'crashed' });
|
||||
this.emitCrashed(agent, `Failed to process output: ${err instanceof Error ? err.message : String(err)}`);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch { /* file missing or empty */ }
|
||||
log.warn({ agentId: agent.id }, 'reconcile: marking agent crashed');
|
||||
} catch (readErr) {
|
||||
log.warn({
|
||||
agentId: agent.id,
|
||||
err: readErr instanceof Error ? readErr.message : String(readErr)
|
||||
}, 'reconcile: failed to read output file');
|
||||
}
|
||||
log.warn({ agentId: agent.id }, 'reconcile: marking agent crashed (no valid output)');
|
||||
await this.repository.update(agent.id, { status: 'crashed' });
|
||||
this.emitCrashed(agent, 'Server restarted, agent output not found');
|
||||
this.emitCrashed(agent, 'Server restarted, agent output not found or invalid');
|
||||
} else {
|
||||
log.warn({ agentId: agent.id }, 'reconcile: marking agent crashed');
|
||||
await this.repository.update(agent.id, { status: 'crashed' });
|
||||
@@ -415,6 +436,30 @@ export class CleanupManager {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the agent output contains a completion result line.
|
||||
* This indicates the agent finished successfully, even if processing fails.
|
||||
*/
|
||||
private checkForCompletionResult(rawOutput: string): boolean {
|
||||
try {
|
||||
const lines = rawOutput.trim().split('\n');
|
||||
for (const line of lines) {
|
||||
try {
|
||||
const parsed = JSON.parse(line);
|
||||
// Look for Claude CLI result events with success status
|
||||
if (parsed.type === 'result' && parsed.subtype === 'success') {
|
||||
return true;
|
||||
}
|
||||
// Look for other providers' completion indicators
|
||||
if (parsed.status === 'done' || parsed.status === 'questions') {
|
||||
return true;
|
||||
}
|
||||
} catch { /* skip non-JSON lines */ }
|
||||
}
|
||||
} catch { /* invalid output format */ }
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit a crashed event for an agent.
|
||||
*/
|
||||
|
||||
146
src/agent/completion-detection.test.ts
Normal file
146
src/agent/completion-detection.test.ts
Normal file
@@ -0,0 +1,146 @@
|
||||
/**
|
||||
* Test for Phase 1 completion detection fix
|
||||
*/
|
||||
|
||||
import { describe, test, expect, beforeEach, afterEach, vi } from 'vitest';
|
||||
import { mkdtemp, writeFile, mkdir } from 'node:fs/promises';
|
||||
import { join } from 'node:path';
|
||||
import { tmpdir } from 'node:os';
|
||||
import { rmSync } from 'node:fs';
|
||||
import { OutputHandler } from './output-handler.js';
|
||||
import type { AgentRepository } from '../db/repositories/agent-repository.js';
|
||||
import type { ProposalRepository } from '../db/repositories/proposal-repository.js';
|
||||
|
||||
describe('Completion Detection Fix', () => {
|
||||
let tempDir: string;
|
||||
let outputHandler: OutputHandler;
|
||||
let mockAgentRepo: AgentRepository;
|
||||
let mockProposalRepo: ProposalRepository;
|
||||
|
||||
beforeEach(async () => {
|
||||
tempDir = await mkdtemp(join(tmpdir(), 'completion-test-'));
|
||||
|
||||
// Mock repositories
|
||||
mockAgentRepo = {
|
||||
update: vi.fn(),
|
||||
findById: vi.fn().mockResolvedValue({ id: 'test-agent', mode: 'refine' }),
|
||||
} as any;
|
||||
|
||||
mockProposalRepo = {
|
||||
create: vi.fn(),
|
||||
} as any;
|
||||
|
||||
outputHandler = new OutputHandler(mockAgentRepo, undefined, mockProposalRepo);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
test('detects completion from signal.json with "questions" status', async () => {
|
||||
const agentId = 'test-agent';
|
||||
const agentWorkdir = join(tempDir, agentId);
|
||||
const cwDir = join(agentWorkdir, '.cw/output');
|
||||
|
||||
// Create agent workdir structure
|
||||
await mkdir(cwDir, { recursive: true });
|
||||
|
||||
// Create a signal.json file with questions status
|
||||
const signalContent = JSON.stringify({
|
||||
status: 'questions',
|
||||
questions: [{ id: 'q1', text: 'Do you want to proceed?' }]
|
||||
});
|
||||
await writeFile(join(cwDir, 'signal.json'), signalContent);
|
||||
|
||||
// Test the private method via reflection (testing the fix)
|
||||
const checkSignalCompletion = (outputHandler as any).checkSignalCompletion.bind(outputHandler);
|
||||
const result = await checkSignalCompletion(agentWorkdir);
|
||||
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
test('detects completion from signal.json with "done" status', async () => {
|
||||
const agentId = 'test-agent';
|
||||
const agentWorkdir = join(tempDir, agentId);
|
||||
const cwDir = join(agentWorkdir, '.cw/output');
|
||||
|
||||
await mkdir(cwDir, { recursive: true });
|
||||
|
||||
const signalContent = JSON.stringify({
|
||||
status: 'done',
|
||||
result: 'Task completed successfully'
|
||||
});
|
||||
await writeFile(join(cwDir, 'signal.json'), signalContent);
|
||||
|
||||
const checkSignalCompletion = (outputHandler as any).checkSignalCompletion.bind(outputHandler);
|
||||
const result = await checkSignalCompletion(agentWorkdir);
|
||||
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
test('detects completion from signal.json with "error" status', async () => {
|
||||
const agentId = 'test-agent';
|
||||
const agentWorkdir = join(tempDir, agentId);
|
||||
const cwDir = join(agentWorkdir, '.cw/output');
|
||||
|
||||
await mkdir(cwDir, { recursive: true });
|
||||
|
||||
const signalContent = JSON.stringify({
|
||||
status: 'error',
|
||||
error: 'Something went wrong'
|
||||
});
|
||||
await writeFile(join(cwDir, 'signal.json'), signalContent);
|
||||
|
||||
const checkSignalCompletion = (outputHandler as any).checkSignalCompletion.bind(outputHandler);
|
||||
const result = await checkSignalCompletion(agentWorkdir);
|
||||
|
||||
expect(result).toBe(true);
|
||||
});
|
||||
|
||||
test('returns false when signal.json does not exist', async () => {
|
||||
const agentId = 'test-agent';
|
||||
const agentWorkdir = join(tempDir, agentId);
|
||||
|
||||
// Don't create any files
|
||||
|
||||
const checkSignalCompletion = (outputHandler as any).checkSignalCompletion.bind(outputHandler);
|
||||
const result = await checkSignalCompletion(agentWorkdir);
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
test('returns false for incomplete status', async () => {
|
||||
const agentId = 'test-agent';
|
||||
const agentWorkdir = join(tempDir, agentId);
|
||||
const cwDir = join(agentWorkdir, '.cw/output');
|
||||
|
||||
await mkdir(cwDir, { recursive: true });
|
||||
|
||||
const signalContent = JSON.stringify({
|
||||
status: 'running',
|
||||
progress: 'Still working...'
|
||||
});
|
||||
await writeFile(join(cwDir, 'signal.json'), signalContent);
|
||||
|
||||
const checkSignalCompletion = (outputHandler as any).checkSignalCompletion.bind(outputHandler);
|
||||
const result = await checkSignalCompletion(agentWorkdir);
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
test('handles malformed signal.json gracefully', async () => {
|
||||
const agentId = 'test-agent';
|
||||
const agentWorkdir = join(tempDir, agentId);
|
||||
const cwDir = join(agentWorkdir, '.cw/output');
|
||||
|
||||
await mkdir(cwDir, { recursive: true });
|
||||
|
||||
// Create malformed JSON
|
||||
await writeFile(join(cwDir, 'signal.json'), '{ invalid json }');
|
||||
|
||||
const checkSignalCompletion = (outputHandler as any).checkSignalCompletion.bind(outputHandler);
|
||||
const result = await checkSignalCompletion(agentWorkdir);
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -6,7 +6,7 @@
|
||||
* ensuring they're fresh, and marking accounts as exhausted on failure.
|
||||
*/
|
||||
|
||||
import { readFileSync } from 'node:fs';
|
||||
import { readFileSync, existsSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
import type { AccountRepository } from '../db/repositories/account-repository.js';
|
||||
import type { AccountCredentialManager } from './credentials/types.js';
|
||||
@@ -92,6 +92,23 @@ export class CredentialHandler {
|
||||
return { valid, refreshed: false };
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the access token from a config directory's .credentials.json.
|
||||
* Returns null if credentials file is missing or malformed.
|
||||
* Used for CLAUDE_CODE_OAUTH_TOKEN env var injection.
|
||||
*/
|
||||
readAccessToken(configDir: string): string | null {
|
||||
try {
|
||||
const credPath = join(configDir, '.credentials.json');
|
||||
if (!existsSync(credPath)) return null;
|
||||
const raw = readFileSync(credPath, 'utf-8');
|
||||
const parsed = JSON.parse(raw);
|
||||
return parsed.claudeAiOauth?.accessToken ?? null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if an error message indicates usage limit exhaustion.
|
||||
*/
|
||||
|
||||
@@ -108,6 +108,15 @@ export function writeInputFiles(options: WriteInputFilesOptions): void {
|
||||
const inputDir = join(options.agentWorkdir, '.cw', 'input');
|
||||
mkdirSync(inputDir, { recursive: true });
|
||||
|
||||
// Write expected working directory marker for verification
|
||||
writeFileSync(
|
||||
join(inputDir, '../expected-pwd.txt'),
|
||||
options.agentWorkdir,
|
||||
'utf-8'
|
||||
);
|
||||
|
||||
const manifestFiles: string[] = [];
|
||||
|
||||
if (options.initiative) {
|
||||
const ini = options.initiative;
|
||||
const content = formatFrontmatter(
|
||||
@@ -121,6 +130,7 @@ export function writeInputFiles(options: WriteInputFilesOptions): void {
|
||||
'',
|
||||
);
|
||||
writeFileSync(join(inputDir, 'initiative.md'), content, 'utf-8');
|
||||
manifestFiles.push('initiative.md');
|
||||
}
|
||||
|
||||
if (options.pages && options.pages.length > 0) {
|
||||
@@ -146,7 +156,9 @@ export function writeInputFiles(options: WriteInputFilesOptions): void {
|
||||
},
|
||||
bodyMarkdown,
|
||||
);
|
||||
const filename = `pages/${page.id}.md`;
|
||||
writeFileSync(join(pagesDir, `${page.id}.md`), content, 'utf-8');
|
||||
manifestFiles.push(filename);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,6 +174,7 @@ export function writeInputFiles(options: WriteInputFilesOptions): void {
|
||||
ph.description ?? '',
|
||||
);
|
||||
writeFileSync(join(inputDir, 'phase.md'), content, 'utf-8');
|
||||
manifestFiles.push('phase.md');
|
||||
}
|
||||
|
||||
if (options.task) {
|
||||
@@ -178,14 +191,22 @@ export function writeInputFiles(options: WriteInputFilesOptions): void {
|
||||
t.description ?? '',
|
||||
);
|
||||
writeFileSync(join(inputDir, 'task.md'), content, 'utf-8');
|
||||
manifestFiles.push('task.md');
|
||||
}
|
||||
|
||||
// Write manifest listing exactly which files were created
|
||||
writeFileSync(
|
||||
join(inputDir, 'manifest.json'),
|
||||
JSON.stringify({ files: manifestFiles }) + '\n',
|
||||
'utf-8',
|
||||
);
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// OUTPUT FILE READING
|
||||
// =============================================================================
|
||||
|
||||
function readFrontmatterFile(filePath: string): { data: Record<string, unknown>; body: string } | null {
|
||||
export function readFrontmatterFile(filePath: string): { data: Record<string, unknown>; body: string } | null {
|
||||
try {
|
||||
const raw = readFileSync(filePath, 'utf-8');
|
||||
const parsed = matter(raw);
|
||||
|
||||
@@ -52,6 +52,7 @@ vi.mock('node:fs', async () => {
|
||||
mkdirSync: vi.fn(),
|
||||
writeFileSync: vi.fn(),
|
||||
createWriteStream: vi.fn().mockReturnValue(mockWriteStream),
|
||||
existsSync: vi.fn().mockReturnValue(true), // Default to true for our new validation
|
||||
};
|
||||
});
|
||||
|
||||
@@ -220,6 +221,49 @@ describe('MultiProviderAgentManager', () => {
|
||||
).toBe('gastown');
|
||||
});
|
||||
|
||||
it('writes diagnostic files for workdir verification', async () => {
|
||||
const mockChild = createMockChildProcess();
|
||||
mockSpawn.mockReturnValue(mockChild);
|
||||
|
||||
// Mock fs.writeFileSync to capture diagnostic file writing
|
||||
const { writeFileSync } = await import('node:fs');
|
||||
const mockWriteFileSync = vi.mocked(writeFileSync);
|
||||
|
||||
// The existsSync is already mocked globally to return true
|
||||
|
||||
await manager.spawn({
|
||||
name: 'gastown',
|
||||
taskId: 'task-456',
|
||||
prompt: 'Test task',
|
||||
});
|
||||
|
||||
// Verify diagnostic file was written
|
||||
const diagnosticCalls = mockWriteFileSync.mock.calls.filter(call =>
|
||||
call[0].toString().includes('spawn-diagnostic.json')
|
||||
);
|
||||
expect(diagnosticCalls).toHaveLength(1);
|
||||
|
||||
// Parse the diagnostic data to verify structure
|
||||
const diagnosticCall = diagnosticCalls[0];
|
||||
const diagnosticData = JSON.parse(diagnosticCall[1] as string);
|
||||
|
||||
expect(diagnosticData).toMatchObject({
|
||||
agentId: expect.any(String),
|
||||
alias: 'gastown',
|
||||
intendedCwd: expect.stringContaining('/agent-workdirs/gastown/workspace'),
|
||||
worktreeId: 'gastown',
|
||||
provider: 'claude',
|
||||
command: expect.any(String),
|
||||
args: expect.any(Array),
|
||||
env: expect.any(Object),
|
||||
cwdExistsAtSpawn: true,
|
||||
initiativeId: null,
|
||||
customCwdProvided: false,
|
||||
accountId: null,
|
||||
timestamp: expect.any(String),
|
||||
});
|
||||
});
|
||||
|
||||
it('uses custom cwd if provided', async () => {
|
||||
const mockChild = createMockChildProcess();
|
||||
mockSpawn.mockReturnValue(mockChild);
|
||||
|
||||
@@ -29,11 +29,13 @@ import type {
|
||||
AgentStoppedEvent,
|
||||
AgentResumedEvent,
|
||||
AgentDeletedEvent,
|
||||
ProcessCrashedEvent,
|
||||
} from '../events/index.js';
|
||||
import { writeInputFiles } from './file-io.js';
|
||||
import { getProvider } from './providers/registry.js';
|
||||
import { createModuleLogger } from '../logger/index.js';
|
||||
import { join } from 'node:path';
|
||||
import { unlink } from 'node:fs/promises';
|
||||
import type { AccountCredentialManager } from './credentials/types.js';
|
||||
import { ProcessManager } from './process-manager.js';
|
||||
import { CredentialHandler } from './credential-handler.js';
|
||||
@@ -67,6 +69,13 @@ export class MultiProviderAgentManager implements AgentManager {
|
||||
this.credentialHandler = new CredentialHandler(workspaceRoot, accountRepository, credentialManager);
|
||||
this.outputHandler = new OutputHandler(repository, eventBus, proposalRepository);
|
||||
this.cleanupManager = new CleanupManager(workspaceRoot, repository, projectRepository, eventBus, debug);
|
||||
|
||||
// Listen for process crashed events to handle agents specially
|
||||
if (eventBus) {
|
||||
eventBus.on('process:crashed', async (event: ProcessCrashedEvent) => {
|
||||
await this.handleProcessCrashed(event.payload.processId, event.payload.exitCode, event.payload.signal);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -476,6 +485,16 @@ export class MultiProviderAgentManager implements AgentManager {
|
||||
|
||||
const agentCwd = this.processManager.getAgentWorkdir(agent.worktreeId);
|
||||
const prompt = this.outputHandler.formatAnswersAsPrompt(answers);
|
||||
|
||||
// Clear previous signal.json to ensure clean completion detection
|
||||
const signalPath = join(agentCwd, '.cw/output/signal.json');
|
||||
try {
|
||||
await unlink(signalPath);
|
||||
log.debug({ agentId, signalPath }, 'cleared previous signal.json for resume');
|
||||
} catch {
|
||||
// File might not exist, which is fine
|
||||
}
|
||||
|
||||
await this.repository.update(agentId, { status: 'running', pendingQuestions: null, result: null });
|
||||
|
||||
const { command, args, env: providerEnv } = this.processManager.buildResumeCommand(provider, agent.sessionId, prompt);
|
||||
@@ -650,6 +669,118 @@ export class MultiProviderAgentManager implements AgentManager {
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle process crashed event specifically for agents.
|
||||
* Check if the agent actually completed successfully despite the non-zero exit code.
|
||||
*/
|
||||
private async handleProcessCrashed(processId: string, exitCode: number | null, signal: string | null): Promise<void> {
|
||||
try {
|
||||
// Check if this is an agent process
|
||||
const agent = await this.repository.findById(processId);
|
||||
if (!agent) {
|
||||
return; // Not our agent
|
||||
}
|
||||
|
||||
// Store exit code and signal for debugging
|
||||
await this.repository.update(processId, { exitCode });
|
||||
|
||||
log.info({
|
||||
agentId: processId,
|
||||
name: agent.name,
|
||||
exitCode,
|
||||
signal,
|
||||
outputFilePath: agent.outputFilePath
|
||||
}, 'agent process crashed, analyzing completion status');
|
||||
|
||||
// Check if the agent has output that indicates successful completion
|
||||
if (agent.outputFilePath) {
|
||||
const hasCompletion = await this.checkAgentCompletionResult(agent.outputFilePath);
|
||||
if (hasCompletion) {
|
||||
log.info({
|
||||
agentId: processId,
|
||||
name: agent.name,
|
||||
exitCode,
|
||||
signal
|
||||
}, 'agent marked as crashed but completed successfully - completion already handled by polling');
|
||||
|
||||
// Note: We don't call handleCompletion() here because the polling handler
|
||||
// (handleDetachedAgentCompletion) already processes completions. The mutex
|
||||
// in OutputHandler.handleCompletion() prevents duplicate processing.
|
||||
|
||||
log.info({
|
||||
agentId: processId,
|
||||
name: agent.name,
|
||||
exitCode
|
||||
}, 'completion detection confirmed - deferring to polling handler');
|
||||
} else {
|
||||
log.warn({
|
||||
agentId: processId,
|
||||
name: agent.name,
|
||||
exitCode,
|
||||
signal,
|
||||
outputFilePath: agent.outputFilePath
|
||||
}, 'agent crashed and no successful completion detected - marking as truly crashed');
|
||||
|
||||
// Only mark as crashed if agent truly crashed (no completion detected)
|
||||
await this.repository.update(processId, { status: 'crashed' });
|
||||
}
|
||||
} else {
|
||||
log.warn({
|
||||
agentId: processId,
|
||||
name: agent.name,
|
||||
exitCode,
|
||||
signal
|
||||
}, 'agent crashed with no output file path - marking as crashed');
|
||||
|
||||
await this.repository.update(processId, { status: 'crashed' });
|
||||
}
|
||||
} catch (err) {
|
||||
log.error({
|
||||
processId,
|
||||
exitCode,
|
||||
signal,
|
||||
err: err instanceof Error ? err.message : String(err)
|
||||
}, 'failed to check agent completion after crash');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if agent completed successfully by reading signal.json file.
|
||||
*/
|
||||
private async checkAgentCompletionResult(outputFilePath: string): Promise<boolean> {
|
||||
try {
|
||||
const { readFile } = await import('node:fs/promises');
|
||||
const { existsSync } = await import('node:fs');
|
||||
const { dirname } = await import('node:path');
|
||||
|
||||
const agentDir = dirname(outputFilePath);
|
||||
const signalPath = join(agentDir, '.cw/output/signal.json');
|
||||
|
||||
if (!existsSync(signalPath)) {
|
||||
log.debug({ outputFilePath, signalPath }, 'no signal.json found - agent not completed');
|
||||
return false;
|
||||
}
|
||||
|
||||
const signalContent = await readFile(signalPath, 'utf-8');
|
||||
const signal = JSON.parse(signalContent);
|
||||
|
||||
// Agent completed if status is done, questions, or error
|
||||
const completed = signal.status === 'done' || signal.status === 'questions' || signal.status === 'error';
|
||||
|
||||
if (completed) {
|
||||
log.debug({ outputFilePath, signal }, 'agent completion detected via signal.json');
|
||||
} else {
|
||||
log.debug({ outputFilePath, signal }, 'signal.json found but status indicates incomplete');
|
||||
}
|
||||
|
||||
return completed;
|
||||
|
||||
} catch (err) {
|
||||
log.warn({ outputFilePath, err: err instanceof Error ? err.message : String(err) }, 'failed to read or parse signal.json');
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert database agent record to AgentInfo.
|
||||
*/
|
||||
|
||||
32
src/agent/markdown-to-tiptap.ts
Normal file
32
src/agent/markdown-to-tiptap.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
* Server-side Markdown → Tiptap JSON converter.
|
||||
*
|
||||
* Uses @tiptap/markdown's MarkdownManager.parse() — the same approach
|
||||
* as content-serializer.ts but in reverse direction.
|
||||
* No DOM needed, no new dependencies.
|
||||
*/
|
||||
|
||||
import StarterKit from '@tiptap/starter-kit';
|
||||
import Link from '@tiptap/extension-link';
|
||||
import { MarkdownManager } from '@tiptap/markdown';
|
||||
|
||||
let _manager: MarkdownManager | null = null;
|
||||
|
||||
function getManager(): MarkdownManager {
|
||||
if (!_manager) {
|
||||
_manager = new MarkdownManager({
|
||||
extensions: [StarterKit, Link],
|
||||
});
|
||||
}
|
||||
return _manager;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a markdown string to Tiptap JSON document.
|
||||
*/
|
||||
export function markdownToTiptapJson(markdown: string): object {
|
||||
if (!markdown.trim()) {
|
||||
return { type: 'doc', content: [{ type: 'paragraph' }] };
|
||||
}
|
||||
return getManager().parse(markdown).toJSON();
|
||||
}
|
||||
152
src/agent/mutex-completion.test.ts
Normal file
152
src/agent/mutex-completion.test.ts
Normal file
@@ -0,0 +1,152 @@
|
||||
/**
|
||||
* Focused test for completion handler mutex functionality.
|
||||
* Tests the race condition fix without complex mocking.
|
||||
*/
|
||||
|
||||
import { describe, it, beforeEach, expect } from 'vitest';
|
||||
import { OutputHandler } from './output-handler.js';
|
||||
import type { AgentRepository } from '../db/repositories/agent-repository.js';
|
||||
|
||||
describe('OutputHandler completion mutex', () => {
|
||||
let outputHandler: OutputHandler;
|
||||
let completionCallCount: number;
|
||||
let callOrder: string[];
|
||||
|
||||
// Simple mock that tracks completion attempts
|
||||
const mockRepository: AgentRepository = {
|
||||
async findById() {
|
||||
return null; // Return null to cause early exit after mutex check
|
||||
},
|
||||
async update() {},
|
||||
async create() { throw new Error('Not implemented'); },
|
||||
async findAll() { throw new Error('Not implemented'); },
|
||||
async findByStatus() { throw new Error('Not implemented'); },
|
||||
async findByTaskId() { throw new Error('Not implemented'); },
|
||||
async findByInitiativeId() { throw new Error('Not implemented'); },
|
||||
async deleteById() { throw new Error('Not implemented'); },
|
||||
async findPending() { throw new Error('Not implemented'); }
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
outputHandler = new OutputHandler(mockRepository);
|
||||
completionCallCount = 0;
|
||||
callOrder = [];
|
||||
});
|
||||
|
||||
it('should prevent concurrent completion handling with mutex', async () => {
|
||||
const agentId = 'test-agent';
|
||||
|
||||
// Mock the findById method to track calls and simulate processing time
|
||||
let firstCallCompleted = false;
|
||||
(mockRepository as any).findById = async (id: string) => {
|
||||
completionCallCount++;
|
||||
const callIndex = completionCallCount;
|
||||
callOrder.push(`call-${callIndex}-start`);
|
||||
|
||||
if (callIndex === 1) {
|
||||
// First call - simulate some processing time
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
firstCallCompleted = true;
|
||||
}
|
||||
|
||||
callOrder.push(`call-${callIndex}-end`);
|
||||
return null; // Return null to exit early
|
||||
};
|
||||
|
||||
// Start two concurrent completion handlers
|
||||
const getAgentWorkdir = () => '/test/workdir';
|
||||
const completion1Promise = outputHandler.handleCompletion(agentId, undefined, getAgentWorkdir);
|
||||
const completion2Promise = outputHandler.handleCompletion(agentId, undefined, getAgentWorkdir);
|
||||
|
||||
await Promise.all([completion1Promise, completion2Promise]);
|
||||
|
||||
// Verify only one completion handler executed
|
||||
expect(completionCallCount, 'Should only execute one completion handler').toBe(1);
|
||||
expect(firstCallCompleted, 'First handler should have completed').toBe(true);
|
||||
expect(callOrder).toEqual(['call-1-start', 'call-1-end']);
|
||||
});
|
||||
|
||||
it('should allow sequential completion handling after first completes', async () => {
|
||||
const agentId = 'test-agent';
|
||||
|
||||
// Mock findById to track calls
|
||||
(mockRepository as any).findById = async (id: string) => {
|
||||
completionCallCount++;
|
||||
callOrder.push(`call-${completionCallCount}`);
|
||||
return null; // Return null to exit early
|
||||
};
|
||||
|
||||
const getAgentWorkdir = () => '/test/workdir';
|
||||
|
||||
// First completion
|
||||
await outputHandler.handleCompletion(agentId, undefined, getAgentWorkdir);
|
||||
|
||||
// Second completion (after first is done)
|
||||
await outputHandler.handleCompletion(agentId, undefined, getAgentWorkdir);
|
||||
|
||||
// Both should execute sequentially
|
||||
expect(completionCallCount, 'Should execute both handlers sequentially').toBe(2);
|
||||
expect(callOrder).toEqual(['call-1', 'call-2']);
|
||||
});
|
||||
|
||||
it('should clean up mutex lock even when exception is thrown', async () => {
|
||||
const agentId = 'test-agent';
|
||||
|
||||
let firstCallMadeThrowCall = false;
|
||||
let secondCallCompleted = false;
|
||||
|
||||
// First call throws an error
|
||||
(mockRepository as any).findById = async (id: string) => {
|
||||
if (!firstCallMadeThrowCall) {
|
||||
firstCallMadeThrowCall = true;
|
||||
throw new Error('Database error');
|
||||
} else {
|
||||
secondCallCompleted = true;
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
const getAgentWorkdir = () => '/test/workdir';
|
||||
|
||||
// First call should throw but clean up mutex
|
||||
await expect(outputHandler.handleCompletion(agentId, undefined, getAgentWorkdir))
|
||||
.rejects.toThrow('Database error');
|
||||
|
||||
expect(firstCallMadeThrowCall, 'First call should have thrown').toBe(true);
|
||||
|
||||
// Second call should succeed (proving mutex was cleaned up)
|
||||
await outputHandler.handleCompletion(agentId, undefined, getAgentWorkdir);
|
||||
expect(secondCallCompleted, 'Second call should have completed').toBe(true);
|
||||
});
|
||||
|
||||
it('should use agent ID as mutex key', async () => {
|
||||
const agentId1 = 'agent-1';
|
||||
const agentId2 = 'agent-2';
|
||||
|
||||
// Both agents can process concurrently since they have different IDs
|
||||
let agent1Started = false;
|
||||
let agent2Started = false;
|
||||
|
||||
(mockRepository as any).findById = async (id: string) => {
|
||||
if (id === agentId1) {
|
||||
agent1Started = true;
|
||||
await new Promise(resolve => setTimeout(resolve, 30));
|
||||
} else if (id === agentId2) {
|
||||
agent2Started = true;
|
||||
await new Promise(resolve => setTimeout(resolve, 30));
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
const getAgentWorkdir = () => '/test/workdir';
|
||||
|
||||
// Start both agents concurrently - they should NOT block each other
|
||||
const agent1Promise = outputHandler.handleCompletion(agentId1, undefined, getAgentWorkdir);
|
||||
const agent2Promise = outputHandler.handleCompletion(agentId2, undefined, getAgentWorkdir);
|
||||
|
||||
await Promise.all([agent1Promise, agent2Promise]);
|
||||
|
||||
expect(agent1Started, 'Agent 1 should have started').toBe(true);
|
||||
expect(agent2Started, 'Agent 2 should have started').toBe(true);
|
||||
});
|
||||
});
|
||||
280
src/agent/output-handler.test.ts
Normal file
280
src/agent/output-handler.test.ts
Normal file
@@ -0,0 +1,280 @@
|
||||
/**
|
||||
* OutputHandler Tests
|
||||
*
|
||||
* Test suite for the OutputHandler class, specifically focusing on
|
||||
* question parsing and agent completion handling.
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, vi } from 'vitest';
|
||||
import { OutputHandler } from './output-handler.js';
|
||||
import type { AgentRepository } from '../db/repositories/agent-repository.js';
|
||||
import type { ProposalRepository } from '../db/repositories/proposal-repository.js';
|
||||
import type { EventBus, DomainEvent, AgentWaitingEvent } from '../events/types.js';
|
||||
import { getProvider } from './providers/registry.js';
|
||||
|
||||
// =============================================================================
|
||||
// Test Helpers
|
||||
// =============================================================================
|
||||
|
||||
function createMockEventBus(): EventBus & { emittedEvents: DomainEvent[] } {
|
||||
const emittedEvents: DomainEvent[] = [];
|
||||
|
||||
const mockBus = {
|
||||
emittedEvents,
|
||||
emit: vi.fn().mockImplementation(<T extends DomainEvent>(event: T): void => {
|
||||
emittedEvents.push(event);
|
||||
}),
|
||||
on: vi.fn(),
|
||||
off: vi.fn(),
|
||||
once: vi.fn(),
|
||||
};
|
||||
|
||||
return mockBus;
|
||||
}
|
||||
|
||||
function createMockAgentRepository() {
|
||||
return {
|
||||
findById: vi.fn(),
|
||||
update: vi.fn(),
|
||||
create: vi.fn(),
|
||||
findByName: vi.fn(),
|
||||
findByStatus: vi.fn(),
|
||||
findAll: vi.fn(),
|
||||
delete: vi.fn(),
|
||||
};
|
||||
}
|
||||
|
||||
function createMockProposalRepository() {
|
||||
return {
|
||||
createMany: vi.fn(),
|
||||
findByAgentId: vi.fn(),
|
||||
findByInitiativeId: vi.fn(),
|
||||
findById: vi.fn(),
|
||||
update: vi.fn(),
|
||||
delete: vi.fn(),
|
||||
create: vi.fn(),
|
||||
findAll: vi.fn(),
|
||||
};
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Tests
|
||||
// =============================================================================
|
||||
|
||||
describe('OutputHandler', () => {
|
||||
let outputHandler: OutputHandler;
|
||||
let mockAgentRepo: ReturnType<typeof createMockAgentRepository>;
|
||||
let mockProposalRepo: ReturnType<typeof createMockProposalRepository>;
|
||||
let eventBus: ReturnType<typeof createMockEventBus>;
|
||||
|
||||
const mockAgent = {
|
||||
id: 'agent-123',
|
||||
name: 'test-agent',
|
||||
taskId: 'task-456',
|
||||
sessionId: 'session-789',
|
||||
provider: 'claude',
|
||||
mode: 'refine',
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
mockAgentRepo = createMockAgentRepository();
|
||||
mockProposalRepo = createMockProposalRepository();
|
||||
eventBus = createMockEventBus();
|
||||
|
||||
outputHandler = new OutputHandler(
|
||||
mockAgentRepo as any,
|
||||
eventBus,
|
||||
mockProposalRepo as any
|
||||
);
|
||||
|
||||
// Setup default mock behavior
|
||||
mockAgentRepo.findById.mockResolvedValue(mockAgent);
|
||||
});
|
||||
|
||||
describe('processAgentOutput', () => {
|
||||
it('should correctly parse and handle questions from Claude CLI output', async () => {
|
||||
// Arrange: Create realistic Claude CLI output with questions (like fantastic-crane)
|
||||
const questionsResult = {
|
||||
status: "questions",
|
||||
questions: [
|
||||
{
|
||||
id: "q1",
|
||||
question: "What specific components are in the current admin UI? (e.g., tables, forms, modals, navigation)"
|
||||
},
|
||||
{
|
||||
id: "q2",
|
||||
question: "What does 'modern look' mean for you? (e.g., dark mode support, specific color scheme, animations)"
|
||||
},
|
||||
{
|
||||
id: "q3",
|
||||
question: "Are there any specific shadcn components you want to use or prioritize?"
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
const claudeOutput = JSON.stringify({
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
is_error: false,
|
||||
session_id: "test-session-123",
|
||||
result: JSON.stringify(questionsResult),
|
||||
total_cost_usd: 0.05
|
||||
});
|
||||
|
||||
const getAgentWorkdir = vi.fn().mockReturnValue('/test/workdir');
|
||||
const provider = getProvider('claude')!;
|
||||
|
||||
// Act
|
||||
await outputHandler.processAgentOutput(
|
||||
mockAgent.id,
|
||||
claudeOutput,
|
||||
provider,
|
||||
getAgentWorkdir
|
||||
);
|
||||
|
||||
// Assert: Agent should be updated with questions and waiting_for_input status
|
||||
expect(mockAgentRepo.update).toHaveBeenCalledWith(mockAgent.id, {
|
||||
pendingQuestions: JSON.stringify({
|
||||
questions: [
|
||||
{
|
||||
id: 'q1',
|
||||
question: 'What specific components are in the current admin UI? (e.g., tables, forms, modals, navigation)'
|
||||
},
|
||||
{
|
||||
id: 'q2',
|
||||
question: 'What does \'modern look\' mean for you? (e.g., dark mode support, specific color scheme, animations)'
|
||||
},
|
||||
{
|
||||
id: 'q3',
|
||||
question: 'Are there any specific shadcn components you want to use or prioritize?'
|
||||
}
|
||||
]
|
||||
}),
|
||||
status: 'waiting_for_input'
|
||||
});
|
||||
|
||||
// Should be called at least once (could be once or twice depending on session ID extraction)
|
||||
expect(mockAgentRepo.update).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Assert: AgentWaitingEvent should be emitted
|
||||
const waitingEvents = eventBus.emittedEvents.filter(e => e.type === 'agent:waiting') as AgentWaitingEvent[];
|
||||
expect(waitingEvents).toHaveLength(1);
|
||||
expect(waitingEvents[0].payload.questions).toEqual([
|
||||
{
|
||||
id: 'q1',
|
||||
question: 'What specific components are in the current admin UI? (e.g., tables, forms, modals, navigation)'
|
||||
},
|
||||
{
|
||||
id: 'q2',
|
||||
question: 'What does \'modern look\' mean for you? (e.g., dark mode support, specific color scheme, animations)'
|
||||
},
|
||||
{
|
||||
id: 'q3',
|
||||
question: 'Are there any specific shadcn components you want to use or prioritize?'
|
||||
}
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle malformed questions gracefully', async () => {
|
||||
// Arrange: Create output with malformed questions JSON
|
||||
const malformedOutput = JSON.stringify({
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
is_error: false,
|
||||
session_id: "test-session",
|
||||
result: '{"status": "questions", "questions": [malformed json]}',
|
||||
total_cost_usd: 0.05
|
||||
});
|
||||
|
||||
const getAgentWorkdir = vi.fn().mockReturnValue('/test/workdir');
|
||||
const provider = getProvider('claude')!;
|
||||
|
||||
// Act & Assert: Should not throw, should handle error gracefully
|
||||
await expect(
|
||||
outputHandler.processAgentOutput(
|
||||
mockAgent.id,
|
||||
malformedOutput,
|
||||
provider,
|
||||
getAgentWorkdir
|
||||
)
|
||||
).resolves.not.toThrow();
|
||||
|
||||
// Should update status to crashed due to malformed JSON
|
||||
const updateCalls = mockAgentRepo.update.mock.calls;
|
||||
const crashedCall = updateCalls.find(call => call[1]?.status === 'crashed');
|
||||
expect(crashedCall).toBeDefined();
|
||||
});
|
||||
|
||||
it('should correctly handle "done" status without questions', async () => {
|
||||
// Arrange: Create output with done status
|
||||
const doneOutput = JSON.stringify({
|
||||
type: "result",
|
||||
subtype: "success",
|
||||
is_error: false,
|
||||
session_id: "test-session",
|
||||
result: JSON.stringify({
|
||||
status: "done",
|
||||
message: "Task completed successfully"
|
||||
}),
|
||||
total_cost_usd: 0.05
|
||||
});
|
||||
|
||||
const getAgentWorkdir = vi.fn().mockReturnValue('/test/workdir');
|
||||
const provider = getProvider('claude')!;
|
||||
|
||||
// Act
|
||||
await outputHandler.processAgentOutput(
|
||||
mockAgent.id,
|
||||
doneOutput,
|
||||
provider,
|
||||
getAgentWorkdir
|
||||
);
|
||||
|
||||
// Assert: Should not set waiting_for_input status or pendingQuestions
|
||||
const updateCalls = mockAgentRepo.update.mock.calls;
|
||||
const waitingCall = updateCalls.find(call => call[1]?.status === 'waiting_for_input');
|
||||
expect(waitingCall).toBeUndefined();
|
||||
|
||||
const questionsCall = updateCalls.find(call => call[1]?.pendingQuestions);
|
||||
expect(questionsCall).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('getPendingQuestions', () => {
|
||||
it('should retrieve and parse stored pending questions', async () => {
|
||||
// Arrange
|
||||
const questionsPayload = {
|
||||
questions: [
|
||||
{ id: 'q1', question: 'Test question 1?' },
|
||||
{ id: 'q2', question: 'Test question 2?' }
|
||||
]
|
||||
};
|
||||
|
||||
mockAgentRepo.findById.mockResolvedValue({
|
||||
...mockAgent,
|
||||
pendingQuestions: JSON.stringify(questionsPayload)
|
||||
});
|
||||
|
||||
// Act
|
||||
const result = await outputHandler.getPendingQuestions(mockAgent.id);
|
||||
|
||||
// Assert
|
||||
expect(result).toEqual(questionsPayload);
|
||||
expect(mockAgentRepo.findById).toHaveBeenCalledWith(mockAgent.id);
|
||||
});
|
||||
|
||||
it('should return null when no pending questions exist', async () => {
|
||||
// Arrange
|
||||
mockAgentRepo.findById.mockResolvedValue({
|
||||
...mockAgent,
|
||||
pendingQuestions: null
|
||||
});
|
||||
|
||||
// Act
|
||||
const result = await outputHandler.getPendingQuestions(mockAgent.id);
|
||||
|
||||
// Assert
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -75,6 +75,7 @@ interface ClaudeCliResult {
|
||||
|
||||
export class OutputHandler {
|
||||
private filePositions = new Map<string, number>();
|
||||
private completionLocks = new Set<string>(); // Track agents currently being processed
|
||||
|
||||
constructor(
|
||||
private repository: AgentRepository,
|
||||
@@ -199,95 +200,123 @@ export class OutputHandler {
|
||||
/**
|
||||
* Handle completion of a detached agent.
|
||||
* Processes the final result from the stream data captured by the tailer.
|
||||
*
|
||||
* RACE CONDITION FIX: Uses a completion lock to prevent duplicate processing.
|
||||
* Both the polling handler (handleDetachedAgentCompletion) and crash handler
|
||||
* (handleProcessCrashed) can call this method when a process exits with non-zero code.
|
||||
* The mutex ensures only one handler processes the completion per agent.
|
||||
*/
|
||||
async handleCompletion(
|
||||
agentId: string,
|
||||
active: ActiveAgent | undefined,
|
||||
getAgentWorkdir: (alias: string) => string,
|
||||
): Promise<void> {
|
||||
const agent = await this.repository.findById(agentId);
|
||||
if (!agent) return;
|
||||
|
||||
const provider = getProvider(agent.provider);
|
||||
if (!provider) return;
|
||||
|
||||
log.debug({ agentId }, 'detached agent completed');
|
||||
|
||||
// Verify agent worked in correct location by checking for output files
|
||||
const agentWorkdir = getAgentWorkdir(agent.worktreeId);
|
||||
const outputDir = join(agentWorkdir, '.cw', 'output');
|
||||
const expectedPwdFile = join(agentWorkdir, '.cw', 'expected-pwd.txt');
|
||||
const diagnosticFile = join(agentWorkdir, '.cw', 'spawn-diagnostic.json');
|
||||
|
||||
const outputDirExists = existsSync(outputDir);
|
||||
const expectedPwdExists = existsSync(expectedPwdFile);
|
||||
const diagnosticExists = existsSync(diagnosticFile);
|
||||
|
||||
log.info({
|
||||
agentId,
|
||||
agentWorkdir,
|
||||
outputDirExists,
|
||||
expectedPwdExists,
|
||||
diagnosticExists,
|
||||
verification: outputDirExists ? 'PASS' : 'FAIL'
|
||||
}, 'agent workdir verification completed');
|
||||
|
||||
if (!outputDirExists) {
|
||||
log.warn({
|
||||
agentId,
|
||||
agentWorkdir
|
||||
}, 'No output files found in agent workdir! Agent may have run in wrong location.');
|
||||
}
|
||||
|
||||
let signalText = active?.streamResultText;
|
||||
|
||||
// If the stream result indicated an error (e.g. auth failure, usage limit),
|
||||
// route directly to error handling instead of trying to parse as signal JSON
|
||||
if (signalText && active?.streamIsError) {
|
||||
log.warn({ agentId, error: signalText }, 'agent returned error result');
|
||||
await this.handleAgentError(agentId, new Error(signalText), provider, getAgentWorkdir);
|
||||
// CRITICAL: Prevent race condition - only one completion handler per agent
|
||||
if (this.completionLocks.has(agentId)) {
|
||||
log.debug({ agentId }, 'completion already being processed - skipping duplicate');
|
||||
return;
|
||||
}
|
||||
|
||||
if (!signalText) {
|
||||
try {
|
||||
const outputFilePath = active?.outputFilePath ?? '';
|
||||
if (outputFilePath) {
|
||||
// Read only complete lines from the file, avoiding race conditions
|
||||
const lastPosition = this.filePositions.get(agentId) || 0;
|
||||
const { content: fileContent, lastPosition: newPosition } = await this.readCompleteLines(outputFilePath, lastPosition);
|
||||
this.completionLocks.add(agentId);
|
||||
|
||||
if (fileContent.trim()) {
|
||||
this.filePositions.set(agentId, newPosition);
|
||||
await this.processAgentOutput(agentId, fileContent, provider, getAgentWorkdir);
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const agent = await this.repository.findById(agentId);
|
||||
if (!agent) return;
|
||||
|
||||
// If no new complete lines, but file might still be writing, try again with validation
|
||||
if (await this.validateSignalFile(outputFilePath)) {
|
||||
const fullContent = await readFile(outputFilePath, 'utf-8');
|
||||
if (fullContent.trim() && fullContent.length > newPosition) {
|
||||
// File is complete and has content beyond what we've read
|
||||
const provider = getProvider(agent.provider);
|
||||
if (!provider) return;
|
||||
|
||||
log.debug({ agentId }, 'detached agent completed');
|
||||
|
||||
// Verify agent worked in correct location by checking for output files
|
||||
const agentWorkdir = getAgentWorkdir(agent.worktreeId);
|
||||
const outputDir = join(agentWorkdir, '.cw', 'output');
|
||||
const expectedPwdFile = join(agentWorkdir, '.cw', 'expected-pwd.txt');
|
||||
const diagnosticFile = join(agentWorkdir, '.cw', 'spawn-diagnostic.json');
|
||||
|
||||
const outputDirExists = existsSync(outputDir);
|
||||
const expectedPwdExists = existsSync(expectedPwdFile);
|
||||
const diagnosticExists = existsSync(diagnosticFile);
|
||||
|
||||
log.info({
|
||||
agentId,
|
||||
agentWorkdir,
|
||||
outputDirExists,
|
||||
expectedPwdExists,
|
||||
diagnosticExists,
|
||||
verification: outputDirExists ? 'PASS' : 'FAIL'
|
||||
}, 'agent workdir verification completed');
|
||||
|
||||
if (!outputDirExists) {
|
||||
log.warn({
|
||||
agentId,
|
||||
agentWorkdir
|
||||
}, 'No output files found in agent workdir! Agent may have run in wrong location.');
|
||||
}
|
||||
|
||||
let signalText = active?.streamResultText;
|
||||
|
||||
// If the stream result indicated an error (e.g. auth failure, usage limit),
|
||||
// route directly to error handling instead of trying to parse as signal JSON
|
||||
if (signalText && active?.streamIsError) {
|
||||
log.warn({ agentId, error: signalText }, 'agent returned error result');
|
||||
await this.handleAgentError(agentId, new Error(signalText), provider, getAgentWorkdir);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!signalText) {
|
||||
try {
|
||||
const outputFilePath = active?.outputFilePath ?? '';
|
||||
if (outputFilePath) {
|
||||
// First, check for robust signal.json completion before attempting incremental reading
|
||||
const agentWorkdir = getAgentWorkdir(agentId);
|
||||
if (await this.checkSignalCompletion(agentWorkdir)) {
|
||||
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
|
||||
const signalContent = await readFile(signalPath, 'utf-8');
|
||||
log.debug({ agentId, signalPath }, 'detected completion via signal.json');
|
||||
this.filePositions.delete(agentId); // Clean up tracking
|
||||
await this.processAgentOutput(agentId, fullContent, provider, getAgentWorkdir);
|
||||
await this.processSignalAndFiles(agentId, signalContent, agent.mode as AgentMode, getAgentWorkdir, active?.streamSessionId);
|
||||
return;
|
||||
}
|
||||
|
||||
// Read only complete lines from the file, avoiding race conditions
|
||||
const lastPosition = this.filePositions.get(agentId) || 0;
|
||||
const { content: fileContent, lastPosition: newPosition } = await this.readCompleteLines(outputFilePath, lastPosition);
|
||||
|
||||
if (fileContent.trim()) {
|
||||
this.filePositions.set(agentId, newPosition);
|
||||
await this.processAgentOutput(agentId, fileContent, provider, getAgentWorkdir);
|
||||
return;
|
||||
}
|
||||
|
||||
// If no new complete lines, but file might still be writing, try again with validation
|
||||
if (await this.validateSignalFile(outputFilePath)) {
|
||||
const fullContent = await readFile(outputFilePath, 'utf-8');
|
||||
if (fullContent.trim() && fullContent.length > newPosition) {
|
||||
// File is complete and has content beyond what we've read
|
||||
this.filePositions.delete(agentId); // Clean up tracking
|
||||
await this.processAgentOutput(agentId, fullContent, provider, getAgentWorkdir);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch { /* file empty or missing */ }
|
||||
} catch { /* file empty or missing */ }
|
||||
|
||||
log.warn({ agentId }, 'no result text from stream or file');
|
||||
await this.handleAgentError(agentId, new Error('No output received'), provider, getAgentWorkdir);
|
||||
return;
|
||||
log.warn({ agentId }, 'no result text from stream or file');
|
||||
await this.handleAgentError(agentId, new Error('No output received'), provider, getAgentWorkdir);
|
||||
return;
|
||||
}
|
||||
|
||||
await this.processSignalAndFiles(
|
||||
agentId,
|
||||
signalText,
|
||||
agent.mode as AgentMode,
|
||||
getAgentWorkdir,
|
||||
active?.streamSessionId,
|
||||
);
|
||||
} finally {
|
||||
this.completionLocks.delete(agentId); // Always clean up
|
||||
}
|
||||
|
||||
await this.processSignalAndFiles(
|
||||
agentId,
|
||||
signalText,
|
||||
agent.mode as AgentMode,
|
||||
getAgentWorkdir,
|
||||
active?.streamSessionId,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -724,6 +753,33 @@ export class OutputHandler {
|
||||
// Private Helpers
|
||||
// =========================================================================
|
||||
|
||||
/**
|
||||
* Check if agent completed successfully by reading signal.json file.
|
||||
* This is the robust completion detection logic that handles all completion statuses.
|
||||
*/
|
||||
private async checkSignalCompletion(agentWorkdir: string): Promise<boolean> {
|
||||
try {
|
||||
const { existsSync } = await import('node:fs');
|
||||
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
|
||||
|
||||
if (!existsSync(signalPath)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const signalContent = await readFile(signalPath, 'utf-8');
|
||||
const signal = JSON.parse(signalContent);
|
||||
|
||||
// Agent completed if status is done, questions, or error
|
||||
const completed = signal.status === 'done' || signal.status === 'questions' || signal.status === 'error';
|
||||
|
||||
return completed;
|
||||
|
||||
} catch (err) {
|
||||
log.warn({ agentWorkdir, err: err instanceof Error ? err.message : String(err) }, 'failed to read or parse signal.json');
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private emitCrashed(agent: { id: string; name: string; taskId: string | null }, error: string): void {
|
||||
if (this.eventBus) {
|
||||
const event: AgentCrashedEvent = {
|
||||
|
||||
423
src/agent/process-manager.test.ts
Normal file
423
src/agent/process-manager.test.ts
Normal file
@@ -0,0 +1,423 @@
|
||||
/**
|
||||
* ProcessManager Unit Tests
|
||||
*
|
||||
* Tests for ProcessManager class focusing on working directory handling,
|
||||
* command building, and spawn validation.
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { ProcessManager } from './process-manager.js';
|
||||
import type { ProjectRepository } from '../db/repositories/project-repository.js';
|
||||
import type { EventBus } from '../events/index.js';
|
||||
|
||||
// Mock child_process.spawn
|
||||
vi.mock('node:child_process', () => ({
|
||||
spawn: vi.fn(),
|
||||
}));
|
||||
|
||||
// Mock fs operations
|
||||
vi.mock('node:fs', () => ({
|
||||
writeFileSync: vi.fn(),
|
||||
mkdirSync: vi.fn(),
|
||||
openSync: vi.fn((path) => {
|
||||
// Return different fd numbers for stdout and stderr
|
||||
if (path.includes('output.jsonl')) return 99;
|
||||
if (path.includes('stderr.log')) return 100;
|
||||
return 101;
|
||||
}),
|
||||
closeSync: vi.fn(),
|
||||
existsSync: vi.fn(),
|
||||
}));
|
||||
|
||||
// Mock FileTailer
|
||||
vi.mock('./file-tailer.js', () => ({
|
||||
FileTailer: class MockFileTailer {
|
||||
start = vi.fn().mockResolvedValue(undefined);
|
||||
stop = vi.fn().mockResolvedValue(undefined);
|
||||
},
|
||||
}));
|
||||
|
||||
// Mock SimpleGitWorktreeManager
|
||||
const mockCreate = vi.fn();
|
||||
vi.mock('../git/manager.js', () => ({
|
||||
SimpleGitWorktreeManager: class MockWorktreeManager {
|
||||
create = mockCreate;
|
||||
},
|
||||
}));
|
||||
|
||||
// Mock project clones
|
||||
vi.mock('../git/project-clones.js', () => ({
|
||||
ensureProjectClone: vi.fn().mockResolvedValue('/mock/clone/path'),
|
||||
getProjectCloneDir: vi.fn().mockReturnValue('/mock/clone/path'),
|
||||
}));
|
||||
|
||||
// Mock providers
|
||||
vi.mock('./providers/parsers/index.js', () => ({
|
||||
getStreamParser: vi.fn().mockReturnValue({ parse: vi.fn() }),
|
||||
}));
|
||||
|
||||
import { spawn } from 'node:child_process';
|
||||
import { existsSync, writeFileSync, mkdirSync, openSync, closeSync } from 'node:fs';
|
||||
import { ensureProjectClone } from '../git/project-clones.js';
|
||||
|
||||
const mockSpawn = vi.mocked(spawn);
|
||||
const mockExistsSync = vi.mocked(existsSync);
|
||||
const mockWriteFileSync = vi.mocked(writeFileSync);
|
||||
const mockMkdirSync = vi.mocked(mkdirSync);
|
||||
const mockOpenSync = vi.mocked(openSync);
|
||||
const mockCloseSync = vi.mocked(closeSync);
|
||||
|
||||
describe('ProcessManager', () => {
|
||||
let processManager: ProcessManager;
|
||||
let mockProjectRepository: ProjectRepository;
|
||||
let mockEventBus: EventBus;
|
||||
|
||||
const workspaceRoot = '/test/workspace';
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
// Mock child process
|
||||
const mockChild = {
|
||||
pid: 12345,
|
||||
unref: vi.fn(),
|
||||
on: vi.fn(),
|
||||
kill: vi.fn(),
|
||||
};
|
||||
mockSpawn.mockReturnValue(mockChild as any);
|
||||
|
||||
// Mock project repository
|
||||
mockProjectRepository = {
|
||||
findProjectsByInitiativeId: vi.fn().mockResolvedValue([]),
|
||||
create: vi.fn(),
|
||||
findAll: vi.fn(),
|
||||
findById: vi.fn(),
|
||||
findByName: vi.fn(),
|
||||
update: vi.fn(),
|
||||
delete: vi.fn(),
|
||||
setInitiativeProjects: vi.fn(),
|
||||
addProjectToInitiative: vi.fn(),
|
||||
removeProjectFromInitiative: vi.fn(),
|
||||
};
|
||||
|
||||
// Mock event bus
|
||||
mockEventBus = {
|
||||
emit: vi.fn(),
|
||||
on: vi.fn(),
|
||||
off: vi.fn(),
|
||||
once: vi.fn(),
|
||||
};
|
||||
|
||||
processManager = new ProcessManager(workspaceRoot, mockProjectRepository, mockEventBus);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.resetAllMocks();
|
||||
});
|
||||
|
||||
describe('getAgentWorkdir', () => {
|
||||
it('returns correct agent workdir path', () => {
|
||||
const alias = 'test-agent';
|
||||
const expected = '/test/workspace/agent-workdirs/test-agent';
|
||||
|
||||
const result = processManager.getAgentWorkdir(alias);
|
||||
|
||||
expect(result).toBe(expected);
|
||||
});
|
||||
});
|
||||
|
||||
describe('createProjectWorktrees', () => {
|
||||
beforeEach(() => {
|
||||
// Mock the global worktree create function
|
||||
mockCreate.mockResolvedValue({
|
||||
id: 'project1',
|
||||
path: '/test/workspace/agent-workdirs/test-agent/project1',
|
||||
branch: 'agent/test-agent',
|
||||
isMainWorktree: false,
|
||||
});
|
||||
|
||||
// Mock project repository
|
||||
vi.mocked(mockProjectRepository.findProjectsByInitiativeId).mockResolvedValue([
|
||||
{ id: '1', name: 'project1', url: 'https://github.com/user/project1.git', createdAt: new Date(), updatedAt: new Date() }
|
||||
]);
|
||||
|
||||
// Mock existsSync to return true for worktree paths
|
||||
mockExistsSync.mockImplementation((path) => {
|
||||
return path.toString().includes('/agent-workdirs/');
|
||||
});
|
||||
});
|
||||
|
||||
it('creates worktrees for initiative projects', async () => {
|
||||
const alias = 'test-agent';
|
||||
const initiativeId = 'init-123';
|
||||
|
||||
const result = await processManager.createProjectWorktrees(alias, initiativeId);
|
||||
|
||||
expect(result).toBe('/test/workspace/agent-workdirs/test-agent');
|
||||
expect(mockProjectRepository.findProjectsByInitiativeId).toHaveBeenCalledWith('init-123');
|
||||
expect(ensureProjectClone).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('throws error when worktree creation fails', async () => {
|
||||
// Mock worktree path to not exist after creation
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
|
||||
const alias = 'test-agent';
|
||||
const initiativeId = 'init-123';
|
||||
|
||||
await expect(processManager.createProjectWorktrees(alias, initiativeId))
|
||||
.rejects.toThrow('Worktree creation failed:');
|
||||
});
|
||||
|
||||
it('logs comprehensive worktree creation details', async () => {
|
||||
const alias = 'test-agent';
|
||||
const initiativeId = 'init-123';
|
||||
|
||||
await processManager.createProjectWorktrees(alias, initiativeId);
|
||||
|
||||
// Verify logging (implementation would need to capture log calls)
|
||||
// For now, just verify the method completes successfully
|
||||
expect(mockProjectRepository.findProjectsByInitiativeId).toHaveBeenCalledWith('init-123');
|
||||
});
|
||||
});
|
||||
|
||||
describe('createStandaloneWorktree', () => {
|
||||
beforeEach(() => {
|
||||
mockCreate.mockResolvedValue({
|
||||
id: 'workspace',
|
||||
path: '/test/workspace/agent-workdirs/test-agent/workspace',
|
||||
branch: 'agent/test-agent',
|
||||
isMainWorktree: false,
|
||||
});
|
||||
|
||||
mockExistsSync.mockImplementation((path) => {
|
||||
return path.toString().includes('/workspace');
|
||||
});
|
||||
});
|
||||
|
||||
it('creates standalone worktree', async () => {
|
||||
const alias = 'test-agent';
|
||||
|
||||
const result = await processManager.createStandaloneWorktree(alias);
|
||||
|
||||
expect(result).toBe('/test/workspace/agent-workdirs/test-agent/workspace');
|
||||
});
|
||||
|
||||
it('throws error when standalone worktree creation fails', async () => {
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
|
||||
const alias = 'test-agent';
|
||||
|
||||
await expect(processManager.createStandaloneWorktree(alias))
|
||||
.rejects.toThrow('Standalone worktree creation failed:');
|
||||
});
|
||||
});
|
||||
|
||||
describe('spawnDetached', () => {
|
||||
beforeEach(() => {
|
||||
mockExistsSync.mockReturnValue(true); // CWD exists
|
||||
});
|
||||
|
||||
it('validates cwd exists before spawn', () => {
|
||||
const agentId = 'agent-123';
|
||||
const command = 'claude';
|
||||
const args = ['--help'];
|
||||
const cwd = '/test/workspace/agent-workdirs/test-agent';
|
||||
const env = { TEST_VAR: 'value' };
|
||||
const providerName = 'claude';
|
||||
|
||||
processManager.spawnDetached(agentId, command, args, cwd, env, providerName);
|
||||
|
||||
expect(mockExistsSync).toHaveBeenCalledWith(cwd);
|
||||
expect(mockSpawn).toHaveBeenCalledWith(command, args, {
|
||||
cwd,
|
||||
env: expect.objectContaining(env),
|
||||
detached: true,
|
||||
stdio: ['ignore', 99, 100],
|
||||
});
|
||||
});
|
||||
|
||||
it('throws error when cwd does not exist', () => {
|
||||
mockExistsSync.mockReturnValue(false);
|
||||
|
||||
const agentId = 'agent-123';
|
||||
const command = 'claude';
|
||||
const args = ['--help'];
|
||||
const cwd = '/nonexistent/path';
|
||||
const env = {};
|
||||
const providerName = 'claude';
|
||||
|
||||
expect(() => {
|
||||
processManager.spawnDetached(agentId, command, args, cwd, env, providerName);
|
||||
}).toThrow('Agent working directory does not exist: /nonexistent/path');
|
||||
});
|
||||
|
||||
it('passes correct cwd parameter to spawn', () => {
|
||||
const agentId = 'agent-123';
|
||||
const command = 'claude';
|
||||
const args = ['--help'];
|
||||
const cwd = '/test/workspace/agent-workdirs/test-agent';
|
||||
const env = { CLAUDE_CONFIG_DIR: '/config' };
|
||||
const providerName = 'claude';
|
||||
|
||||
processManager.spawnDetached(agentId, command, args, cwd, env, providerName);
|
||||
|
||||
expect(mockSpawn).toHaveBeenCalledTimes(1);
|
||||
const spawnCall = mockSpawn.mock.calls[0];
|
||||
expect(spawnCall[0]).toBe(command);
|
||||
expect(spawnCall[1]).toEqual(args);
|
||||
expect(spawnCall[2]).toEqual({
|
||||
cwd,
|
||||
env: expect.objectContaining({
|
||||
...process.env,
|
||||
CLAUDE_CONFIG_DIR: '/config',
|
||||
}),
|
||||
detached: true,
|
||||
stdio: ['ignore', 99, 100],
|
||||
});
|
||||
});
|
||||
|
||||
it('logs comprehensive spawn information', () => {
|
||||
const agentId = 'agent-123';
|
||||
const command = 'claude';
|
||||
const args = ['--json-schema', 'schema.json'];
|
||||
const cwd = '/test/workspace/agent-workdirs/test-agent';
|
||||
const env = { CLAUDE_CONFIG_DIR: '/config' };
|
||||
const providerName = 'claude';
|
||||
|
||||
const result = processManager.spawnDetached(agentId, command, args, cwd, env, providerName);
|
||||
|
||||
expect(result).toHaveProperty('pid', 12345);
|
||||
expect(result).toHaveProperty('outputFilePath');
|
||||
expect(result).toHaveProperty('tailer');
|
||||
|
||||
// Verify log directory creation
|
||||
expect(mockMkdirSync).toHaveBeenCalledWith(
|
||||
'/test/workspace/.cw/agent-logs/agent-123',
|
||||
{ recursive: true }
|
||||
);
|
||||
});
|
||||
|
||||
it('writes prompt file when provided', () => {
|
||||
const agentId = 'agent-123';
|
||||
const command = 'claude';
|
||||
const args = ['--help'];
|
||||
const cwd = '/test/workspace/agent-workdirs/test-agent';
|
||||
const env = {};
|
||||
const providerName = 'claude';
|
||||
const prompt = 'Test prompt';
|
||||
|
||||
processManager.spawnDetached(agentId, command, args, cwd, env, providerName, prompt);
|
||||
|
||||
expect(mockWriteFileSync).toHaveBeenCalledWith(
|
||||
'/test/workspace/.cw/agent-logs/agent-123/PROMPT.md',
|
||||
'Test prompt',
|
||||
'utf-8'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('buildSpawnCommand', () => {
|
||||
it('builds command with native prompt mode', () => {
|
||||
const provider = {
|
||||
name: 'claude',
|
||||
command: 'claude',
|
||||
args: ['--json-schema', 'schema.json'],
|
||||
env: {},
|
||||
promptMode: 'native' as const,
|
||||
processNames: ['claude'],
|
||||
resumeStyle: 'flag' as const,
|
||||
resumeFlag: '--resume',
|
||||
nonInteractive: {
|
||||
subcommand: 'chat',
|
||||
promptFlag: '-p',
|
||||
outputFlag: '--output-format json',
|
||||
},
|
||||
};
|
||||
const prompt = 'Test prompt';
|
||||
|
||||
const result = processManager.buildSpawnCommand(provider, prompt);
|
||||
|
||||
expect(result).toEqual({
|
||||
command: 'claude',
|
||||
args: ['chat', '--json-schema', 'schema.json', '-p', 'Test prompt', '--output-format', 'json'],
|
||||
env: {},
|
||||
});
|
||||
});
|
||||
|
||||
it('builds command with flag prompt mode', () => {
|
||||
const provider = {
|
||||
name: 'codex',
|
||||
command: 'codex',
|
||||
args: ['--format', 'json'],
|
||||
env: {},
|
||||
promptMode: 'flag' as const,
|
||||
processNames: ['codex'],
|
||||
resumeStyle: 'subcommand' as const,
|
||||
resumeFlag: 'resume',
|
||||
nonInteractive: {
|
||||
subcommand: 'run',
|
||||
promptFlag: '--prompt',
|
||||
outputFlag: '--json',
|
||||
},
|
||||
};
|
||||
const prompt = 'Test prompt';
|
||||
|
||||
const result = processManager.buildSpawnCommand(provider, prompt);
|
||||
|
||||
expect(result).toEqual({
|
||||
command: 'codex',
|
||||
args: ['run', '--format', 'json', '--prompt', 'Test prompt', '--json'],
|
||||
env: {},
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('buildResumeCommand', () => {
|
||||
it('builds resume command with flag style', () => {
|
||||
const provider = {
|
||||
name: 'claude',
|
||||
command: 'claude',
|
||||
args: [],
|
||||
env: {},
|
||||
promptMode: 'native' as const,
|
||||
processNames: ['claude'],
|
||||
resumeStyle: 'flag' as const,
|
||||
resumeFlag: '--resume',
|
||||
nonInteractive: {
|
||||
subcommand: 'chat',
|
||||
promptFlag: '-p',
|
||||
outputFlag: '--json',
|
||||
},
|
||||
};
|
||||
const sessionId = 'session-123';
|
||||
const prompt = 'Continue working';
|
||||
|
||||
const result = processManager.buildResumeCommand(provider, sessionId, prompt);
|
||||
|
||||
expect(result).toEqual({
|
||||
command: 'claude',
|
||||
args: ['--resume', 'session-123', '-p', 'Continue working', '--json'],
|
||||
env: {},
|
||||
});
|
||||
});
|
||||
|
||||
it('throws error for providers without resume support', () => {
|
||||
const provider = {
|
||||
name: 'noresume',
|
||||
command: 'noresume',
|
||||
args: [],
|
||||
env: {},
|
||||
promptMode: 'native' as const,
|
||||
processNames: ['noresume'],
|
||||
resumeStyle: 'none' as const,
|
||||
};
|
||||
const sessionId = 'session-123';
|
||||
const prompt = 'Continue working';
|
||||
|
||||
expect(() => {
|
||||
processManager.buildResumeCommand(provider, sessionId, prompt);
|
||||
}).toThrow("Provider 'noresume' does not support resume");
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -9,22 +9,27 @@
|
||||
const SIGNAL_FORMAT = `
|
||||
## Signal Output
|
||||
|
||||
When done, output ONLY this JSON (no other text before or after):
|
||||
When done, write \`.cw/output/signal.json\` with:
|
||||
{ "status": "done" }
|
||||
|
||||
If you need clarification, output:
|
||||
If you need clarification, write:
|
||||
{ "status": "questions", "questions": [{ "id": "q1", "question": "Your question" }] }
|
||||
|
||||
If you hit an unrecoverable error, output:
|
||||
{ "status": "error", "error": "Description of what went wrong" }`;
|
||||
If you hit an unrecoverable error, write:
|
||||
{ "status": "error", "error": "Description of what went wrong" }
|
||||
|
||||
IMPORTANT: Always write this file as your final action before terminating.`;
|
||||
|
||||
const INPUT_FILES = `
|
||||
## Input Files
|
||||
|
||||
Read context from \`.cw/input/\`:
|
||||
Read \`.cw/input/manifest.json\` first — it lists exactly which input files exist.
|
||||
Then read only those files from \`.cw/input/\`.
|
||||
|
||||
Possible files:
|
||||
- \`initiative.md\` — Initiative details (frontmatter: id, name, status)
|
||||
- \`phase.md\` — Phase details if applicable (frontmatter: id, number, name, status; body: description)
|
||||
- \`task.md\` — Task details if applicable (frontmatter: id, name, category, type, priority, status; body: description)
|
||||
- \`phase.md\` — Phase details (frontmatter: id, number, name, status; body: description)
|
||||
- \`task.md\` — Task details (frontmatter: id, name, category, type, priority, status; body: description)
|
||||
- \`pages/\` — Initiative pages (one file per page; frontmatter: title, parentPageId, sortOrder; body: markdown content)`;
|
||||
|
||||
const SUMMARY_REQUIREMENT = `
|
||||
|
||||
@@ -76,8 +76,9 @@ export class ClaudeStreamParser implements StreamParser {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Check for error first (can appear on any event type)
|
||||
if ('is_error' in parsed && parsed.is_error && 'result' in parsed) {
|
||||
// Check for error on non-result events (e.g. stream errors)
|
||||
// Result events with is_error are handled in the 'result' case below
|
||||
if ('is_error' in parsed && parsed.is_error && 'result' in parsed && parsed.type !== 'result') {
|
||||
return [{ type: 'error', message: String(parsed.result) }];
|
||||
}
|
||||
|
||||
@@ -148,6 +149,7 @@ export class ClaudeStreamParser implements StreamParser {
|
||||
text: resultEvent.result || '',
|
||||
sessionId: resultEvent.session_id,
|
||||
costUsd: resultEvent.total_cost_usd,
|
||||
isError: resultEvent.is_error === true,
|
||||
});
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -42,6 +42,8 @@ export interface StreamResultEvent {
|
||||
text: string;
|
||||
sessionId?: string;
|
||||
costUsd?: number;
|
||||
/** True when the CLI returned an error result (e.g. auth failure, usage limit) */
|
||||
isError?: boolean;
|
||||
}
|
||||
|
||||
/** Error event */
|
||||
|
||||
@@ -77,6 +77,8 @@ export interface AgentInfo {
|
||||
createdAt: Date;
|
||||
/** Last activity timestamp */
|
||||
updatedAt: Date;
|
||||
/** When the user dismissed this agent (null if not dismissed) */
|
||||
userDismissedAt?: Date | null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user