refactor: Restructure monorepo to apps/server/ and apps/web/ layout

Move src/ → apps/server/ and packages/web/ → apps/web/ to adopt
standard monorepo conventions (apps/ for runnable apps, packages/
for reusable libraries). Update all config files, shared package
imports, test fixtures, and documentation to reflect new paths.

Key fixes:
- Update workspace config to ["apps/*", "packages/*"]
- Update tsconfig.json rootDir/include for apps/server/
- Add apps/web/** to vitest exclude list
- Update drizzle.config.ts schema path
- Fix ensure-schema.ts migration path detection (3 levels up in dev,
  2 levels up in dist)
- Fix tests/integration/cli-server.test.ts import paths
- Update packages/shared imports to apps/server/ paths
- Update all docs/ files with new paths
This commit is contained in:
Lukas May
2026-03-03 11:22:53 +01:00
parent 8c38d958ce
commit 34578d39c6
535 changed files with 75452 additions and 687 deletions

View File

@@ -0,0 +1,67 @@
import { existsSync, readFileSync } from 'node:fs';
import { join } from 'node:path';
import { homedir, platform } from 'node:os';
import { execa } from 'execa';
export interface ExtractedAccount {
email: string;
accountUuid: string;
configJson: object;
credentials: string;
}
/**
* Resolve the Claude Code config path with fallback logic.
* Primary: ~/.claude/.claude.json (if it exists and has oauthAccount)
* Fallback: ~/.claude.json
*/
function getClaudeConfigPath(): string {
const home = homedir();
const primary = join(home, '.claude', '.claude.json');
const fallback = join(home, '.claude.json');
if (existsSync(primary)) {
try {
const json = JSON.parse(readFileSync(primary, 'utf-8'));
if (json.oauthAccount) return primary;
} catch {
// invalid JSON, fall through
}
}
return fallback;
}
export async function extractCurrentClaudeAccount(): Promise<ExtractedAccount> {
const home = homedir();
// 1. Read Claude config (with fallback logic matching ccswitch)
const configPath = getClaudeConfigPath();
const configRaw = readFileSync(configPath, 'utf-8');
const configJson = JSON.parse(configRaw);
const email = configJson.oauthAccount?.emailAddress;
const accountUuid = configJson.oauthAccount?.accountUuid;
if (!email || !accountUuid) {
throw new Error('No Claude account found. Please log in with `claude` first.');
}
// 2. Read credentials (platform-specific)
let credentials: string;
if (platform() === 'darwin') {
// macOS: read from Keychain
const { stdout } = await execa('security', [
'find-generic-password',
'-s', 'Claude Code-credentials',
'-w',
]);
credentials = stdout;
} else {
// Linux: read from file
const credPath = join(home, '.claude', '.credentials.json');
credentials = readFileSync(credPath, 'utf-8');
}
return { email, accountUuid, configJson, credentials };
}

View File

@@ -0,0 +1,5 @@
export { extractCurrentClaudeAccount, type ExtractedAccount } from './extractor.js';
export { setupAccountConfigDir } from './setup.js';
export { getAccountConfigDir } from './paths.js';
export { checkAccountHealth, ensureAccountCredentials } from './usage.js';
export type { AccountHealthResult, AccountUsage, UsageTier } from './usage.js';

View File

@@ -0,0 +1,5 @@
import { join } from 'node:path';
export function getAccountConfigDir(workspaceRoot: string, accountId: string): string {
return join(workspaceRoot, '.cw', 'accounts', accountId);
}

View File

@@ -0,0 +1,15 @@
import { mkdirSync, writeFileSync } from 'node:fs';
import { join } from 'node:path';
export function setupAccountConfigDir(
configDir: string,
extracted: { configJson: object; credentials: string },
): void {
mkdirSync(configDir, { recursive: true });
// Write .claude.json
writeFileSync(join(configDir, '.claude.json'), JSON.stringify(extracted.configJson, null, 2));
// Write .credentials.json
writeFileSync(join(configDir, '.credentials.json'), extracted.credentials);
}

View File

@@ -0,0 +1,374 @@
import { readFileSync, existsSync, writeFileSync, mkdirSync } from 'node:fs';
import { join, dirname } from 'node:path';
import type { Account } from '../../db/schema.js';
import type { AgentInfo } from '../types.js';
import type { AccountCredentialManager } from '../credentials/types.js';
import { createModuleLogger } from '../../logger/index.js';
import { getAccountConfigDir } from './paths.js';
import { setupAccountConfigDir } from './setup.js';
const log = createModuleLogger('account-usage');
const USAGE_API_URL = 'https://api.anthropic.com/api/oauth/usage';
const TOKEN_REFRESH_URL = 'https://console.anthropic.com/v1/oauth/token';
const OAUTH_CLIENT_ID = '9d1c250a-e61b-44d9-88ed-5944d1962f5e';
const TOKEN_REFRESH_BUFFER_MS = 300_000; // 5 minutes
export interface OAuthCredentials {
accessToken: string;
refreshToken: string | null;
expiresAt: number | null; // ms epoch, null for setup tokens
subscriptionType: string | null;
rateLimitTier: string | null;
}
export interface UsageTier {
utilization: number;
resets_at: string | null;
}
export interface AccountUsage {
five_hour: UsageTier | null;
seven_day: UsageTier | null;
seven_day_sonnet: UsageTier | null;
seven_day_opus: UsageTier | null;
extra_usage: {
is_enabled: boolean;
monthly_limit: number | null;
used_credits: number | null;
utilization: number | null;
} | null;
}
export interface AccountHealthResult {
id: string;
email: string;
provider: string;
credentialsValid: boolean;
tokenValid: boolean;
tokenExpiresAt: string | null;
subscriptionType: string | null;
error: string | null;
usage: AccountUsage | null;
isExhausted: boolean;
exhaustedUntil: string | null;
lastUsedAt: string | null;
agentCount: number;
activeAgentCount: number;
}
function readCredentials(configDir: string): OAuthCredentials | null {
try {
const credPath = join(configDir, '.credentials.json');
if (!existsSync(credPath)) return null;
const raw = readFileSync(credPath, 'utf-8');
const parsed = JSON.parse(raw);
const oauth = parsed.claudeAiOauth;
if (!oauth || !oauth.accessToken) return null;
return {
accessToken: oauth.accessToken,
refreshToken: oauth.refreshToken ?? null,
expiresAt: oauth.expiresAt ?? null,
subscriptionType: oauth.subscriptionType ?? null,
rateLimitTier: oauth.rateLimitTier ?? null,
};
} catch {
return null;
}
}
function isTokenExpired(credentials: OAuthCredentials): boolean {
if (!credentials.expiresAt) return false; // Setup tokens without expiry are treated as non-expired
return credentials.expiresAt < Date.now() + TOKEN_REFRESH_BUFFER_MS;
}
/**
* Write credentials back to the config directory.
* Matches ccswitch's update_credentials_with_token() behavior.
*/
function writeCredentials(
configDir: string,
accessToken: string,
refreshToken: string,
expiresIn: number,
): void {
const credPath = join(configDir, '.credentials.json');
// Read existing credentials to preserve other fields
let existing: Record<string, unknown> = {};
try {
if (existsSync(credPath)) {
existing = JSON.parse(readFileSync(credPath, 'utf-8'));
}
} catch {
// Start fresh if can't read
}
// Calculate expiry in milliseconds (matching ccswitch behavior)
const nowMs = Date.now();
const expiresAt = nowMs + (expiresIn * 1000);
// Update claudeAiOauth section
const claudeAiOauth = (existing.claudeAiOauth as Record<string, unknown>) ?? {};
claudeAiOauth.accessToken = accessToken;
claudeAiOauth.refreshToken = refreshToken;
claudeAiOauth.expiresAt = expiresAt;
existing.claudeAiOauth = claudeAiOauth;
// Ensure directory exists
mkdirSync(dirname(credPath), { recursive: true });
// Write back (compact JSON for consistency with ccswitch)
writeFileSync(credPath, JSON.stringify(existing));
log.debug({ configDir }, 'credentials written after token refresh');
}
async function refreshToken(
refreshTokenStr: string,
): Promise<{ accessToken: string; refreshToken: string; expiresIn: number } | null> {
try {
const response = await fetch(TOKEN_REFRESH_URL, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
grant_type: 'refresh_token',
refresh_token: refreshTokenStr,
client_id: OAUTH_CLIENT_ID,
scope: 'user:inference user:profile',
}),
});
if (!response.ok) return null;
const data = await response.json();
return {
accessToken: data.access_token,
refreshToken: data.refresh_token,
expiresIn: data.expires_in,
};
} catch {
return null;
}
}
type FetchUsageResult =
| { ok: true; usage: AccountUsage }
| { ok: false; status: number; statusText: string }
| { ok: false; status: 0; statusText: string };
async function fetchUsage(accessToken: string): Promise<FetchUsageResult> {
try {
const response = await fetch(USAGE_API_URL, {
method: 'GET',
headers: {
Authorization: `Bearer ${accessToken}`,
'anthropic-beta': 'oauth-2025-04-20',
'Content-Type': 'application/json',
},
});
if (!response.ok) {
return { ok: false, status: response.status, statusText: response.statusText };
}
const data = await response.json();
return {
ok: true,
usage: {
five_hour: data.five_hour ?? null,
seven_day: data.seven_day ?? null,
seven_day_sonnet: data.seven_day_sonnet ?? null,
seven_day_opus: data.seven_day_opus ?? null,
extra_usage: data.extra_usage ?? null,
},
};
} catch (err) {
return { ok: false, status: 0, statusText: err instanceof Error ? err.message : 'Network error' };
}
}
export async function checkAccountHealth(
account: Account,
agents: AgentInfo[],
credentialManager?: AccountCredentialManager,
workspaceRoot?: string,
): Promise<AccountHealthResult> {
const configDir = workspaceRoot ? getAccountConfigDir(workspaceRoot, account.id) : null;
const accountAgents = agents.filter((a) => a.accountId === account.id);
const activeAgents = accountAgents.filter(
(a) => a.status === 'running' || a.status === 'waiting_for_input',
);
const base: AccountHealthResult = {
id: account.id,
email: account.email,
provider: account.provider,
credentialsValid: false,
tokenValid: false,
tokenExpiresAt: null,
subscriptionType: null,
error: null,
usage: null,
isExhausted: account.isExhausted,
exhaustedUntil: account.exhaustedUntil?.toISOString() ?? null,
lastUsedAt: account.lastUsedAt?.toISOString() ?? null,
agentCount: accountAgents.length,
activeAgentCount: activeAgents.length,
};
if (!configDir) {
return { ...base, error: 'Cannot derive config dir: workspaceRoot not provided' };
}
// Ensure DB credentials are written to disk so file-based checks can find them
if (account.configJson && account.credentials) {
try {
setupAccountConfigDir(configDir, {
configJson: JSON.parse(account.configJson),
credentials: account.credentials,
});
} catch (err) {
log.warn({ accountId: account.id, err: err instanceof Error ? err.message : String(err) }, 'failed to sync DB credentials to disk');
}
}
try {
// Use credential manager if provided, otherwise fall back to direct functions
let accessToken: string;
let currentExpiresAt: number | null;
let subscriptionType: string | null = null;
if (credentialManager) {
const result = await credentialManager.ensureValid(configDir, account.id);
if (!result.valid || !result.credentials) {
return {
...base,
credentialsValid: result.credentials !== null,
error: result.error ?? 'Credentials validation failed',
};
}
accessToken = result.credentials.accessToken;
currentExpiresAt = result.credentials.expiresAt;
subscriptionType = result.credentials.subscriptionType;
} else {
// Legacy path: direct function calls
const credentials = readCredentials(configDir);
if (!credentials) {
return {
...base,
error: 'Credentials file not found or unreadable',
};
}
accessToken = credentials.accessToken;
currentExpiresAt = credentials.expiresAt;
subscriptionType = credentials.subscriptionType;
if (isTokenExpired(credentials)) {
if (!credentials.refreshToken) {
log.warn({ accountId: account.id }, 'setup token expired, no refresh token');
return {
...base,
credentialsValid: true,
error: 'Setup token expired, no refresh token available',
};
}
log.info({ accountId: account.id, email: account.email }, 'token expired, refreshing');
const refreshed = await refreshToken(credentials.refreshToken);
if (!refreshed) {
log.warn({ accountId: account.id }, 'token refresh failed');
return {
...base,
credentialsValid: true,
error: 'Token expired and refresh failed',
};
}
accessToken = refreshed.accessToken;
// Persist the refreshed credentials back to disk
const newRefreshToken = refreshed.refreshToken || credentials.refreshToken;
writeCredentials(configDir, accessToken, newRefreshToken, refreshed.expiresIn);
currentExpiresAt = Date.now() + (refreshed.expiresIn * 1000);
log.info({ accountId: account.id, expiresIn: refreshed.expiresIn }, 'token refreshed and persisted');
}
}
const isSetupToken = !currentExpiresAt;
const usageResult = await fetchUsage(accessToken);
if (!usageResult.ok) {
const statusDetail = usageResult.status > 0
? `HTTP ${usageResult.status} ${usageResult.statusText}`
: usageResult.statusText;
if (isSetupToken) {
// Setup tokens often can't query the usage API — not a hard error
return {
...base,
credentialsValid: true,
tokenValid: true,
tokenExpiresAt: null,
subscriptionType,
error: `Usage API unavailable for setup token (${statusDetail}). Run \`claude\` with this account to complete OAuth setup.`,
};
}
return {
...base,
credentialsValid: true,
error: `Usage API request failed: ${statusDetail}`,
};
}
return {
...base,
credentialsValid: true,
tokenValid: true,
tokenExpiresAt: currentExpiresAt ? new Date(currentExpiresAt).toISOString() : null,
subscriptionType,
usage: usageResult.usage,
};
} catch (err) {
return {
...base,
error: err instanceof Error ? err.message : String(err),
};
}
}
/**
* Ensure account credentials are valid and refreshed if needed.
* Call this before spawning an agent to ensure the credentials file
* has fresh tokens that the agent subprocess can use.
*
* Returns true if credentials are valid (or were successfully refreshed).
* Returns false if credentials are missing or refresh failed.
*
* @deprecated Use AccountCredentialManager.ensureValid() instead for event emission support.
*/
export async function ensureAccountCredentials(configDir: string): Promise<boolean> {
const credentials = readCredentials(configDir);
if (!credentials) {
log.warn({ configDir }, 'no credentials found');
return false;
}
if (!isTokenExpired(credentials)) {
log.debug({ configDir }, 'credentials valid, no refresh needed');
return true;
}
if (!credentials.refreshToken) {
log.error({ configDir }, 'setup token expired, no refresh token available');
return false;
}
log.info({ configDir }, 'credentials expired, refreshing before spawn');
const refreshed = await refreshToken(credentials.refreshToken);
if (!refreshed) {
log.error({ configDir }, 'failed to refresh credentials');
return false;
}
const newRefreshToken = refreshed.refreshToken || credentials.refreshToken;
writeCredentials(configDir, refreshed.accessToken, newRefreshToken, refreshed.expiresIn);
log.info({ configDir, expiresIn: refreshed.expiresIn }, 'credentials refreshed before spawn');
return true;
}

View File

@@ -0,0 +1,34 @@
/**
* Agent Alias Generator
*
* Generates unique funny aliases for agents using adjective-animal combinations.
* E.g., "jolly-penguin", "bold-eagle", "swift-otter".
*/
import { uniqueNamesGenerator, adjectives, animals } from 'unique-names-generator';
import type { AgentRepository } from '../db/repositories/agent-repository.js';
const MAX_RETRIES = 10;
/**
* Generate a unique agent alias that doesn't collide with existing agent names.
*
* @param repository - Agent repository to check for name collisions
* @returns A unique adjective-animal alias (e.g., "jolly-penguin")
*/
export async function generateUniqueAlias(repository: AgentRepository): Promise<string> {
for (let i = 0; i < MAX_RETRIES; i++) {
const alias = uniqueNamesGenerator({
dictionaries: [adjectives, animals],
separator: '-',
style: 'lowerCase',
});
const existing = await repository.findByName(alias);
if (!existing) {
return alias;
}
}
throw new Error(`Failed to generate unique alias after ${MAX_RETRIES} attempts`);
}

View File

@@ -0,0 +1,526 @@
/**
* CleanupManager — Worktree, branch, and log cleanup for agents.
*
* Extracted from MultiProviderAgentManager. Handles all filesystem
* and git cleanup operations, plus orphan detection and reconciliation.
*/
import { promisify } from 'node:util';
import { execFile } from 'node:child_process';
import { readFile, readdir, rm, cp, mkdir } from 'node:fs/promises';
import { existsSync } from 'node:fs';
import { join } from 'node:path';
import type { AgentRepository } from '../db/repositories/agent-repository.js';
import type { ProjectRepository } from '../db/repositories/project-repository.js';
import type { EventBus, AgentCrashedEvent } from '../events/index.js';
import { createModuleLogger } from '../logger/index.js';
import { SimpleGitWorktreeManager } from '../git/manager.js';
import { getProjectCloneDir } from '../git/project-clones.js';
import { getStreamParser } from './providers/parsers/index.js';
import { FileTailer } from './file-tailer.js';
import { getProvider } from './providers/registry.js';
import type { StreamEvent } from './providers/parsers/index.js';
import type { SignalManager } from './lifecycle/signal-manager.js';
import { isPidAlive } from './process-manager.js';
const log = createModuleLogger('cleanup-manager');
const execFileAsync = promisify(execFile);
export class CleanupManager {
constructor(
private workspaceRoot: string,
private repository: AgentRepository,
private projectRepository: ProjectRepository,
private eventBus?: EventBus,
private debug: boolean = false,
private signalManager?: SignalManager,
) {}
/**
* Resolve the agent's working directory path.
*/
private getAgentWorkdir(alias: string): string {
return join(this.workspaceRoot, 'agent-workdirs', alias);
}
/**
* Resolve the actual working directory for an agent, probing for the
* workspace/ subdirectory used by standalone agents.
*/
private resolveAgentCwd(worktreeId: string): string {
const base = this.getAgentWorkdir(worktreeId);
const workspaceSub = join(base, 'workspace');
if (!existsSync(join(base, '.cw', 'output')) && existsSync(join(workspaceSub, '.cw'))) {
return workspaceSub;
}
return base;
}
/**
* Remove git worktrees for an agent.
* Handles both initiative-linked (multi-project) and standalone agents.
*/
async removeAgentWorktrees(alias: string, initiativeId: string | null): Promise<void> {
const agentWorkdir = this.getAgentWorkdir(alias);
try {
await readdir(agentWorkdir);
} catch {
return;
}
if (initiativeId) {
const projects = await this.projectRepository.findProjectsByInitiativeId(initiativeId);
for (const project of projects) {
try {
const clonePath = join(this.workspaceRoot, getProjectCloneDir(project.name, project.id));
const wm = new SimpleGitWorktreeManager(clonePath, undefined, agentWorkdir);
await wm.remove(project.name);
} catch (err) {
log.warn({ alias, project: project.name, err: err instanceof Error ? err.message : String(err) }, 'failed to remove project worktree');
}
}
} else {
try {
const wm = new SimpleGitWorktreeManager(this.workspaceRoot, undefined, agentWorkdir);
await wm.remove('workspace');
} catch (err) {
log.warn({ alias, err: err instanceof Error ? err.message : String(err) }, 'failed to remove standalone worktree');
}
}
await rm(agentWorkdir, { recursive: true, force: true });
await this.pruneWorktrees(initiativeId);
}
/**
* Delete agent/<alias> branches from all relevant repos.
*/
async removeAgentBranches(alias: string, initiativeId: string | null): Promise<void> {
const branchName = `agent/${alias}`;
const repoPaths: string[] = [];
if (initiativeId) {
const projects = await this.projectRepository.findProjectsByInitiativeId(initiativeId);
for (const project of projects) {
repoPaths.push(join(this.workspaceRoot, getProjectCloneDir(project.name, project.id)));
}
} else {
repoPaths.push(this.workspaceRoot);
}
for (const repoPath of repoPaths) {
try {
await execFileAsync('git', ['branch', '-D', branchName], { cwd: repoPath });
} catch {
// Branch may not exist
}
}
}
/**
* Remove log directory for an agent.
*/
async removeAgentLogs(agentName: string): Promise<void> {
const logDir = join(this.workspaceRoot, '.cw', 'agent-logs', agentName);
await rm(logDir, { recursive: true, force: true });
}
/**
* Run git worktree prune on all relevant repos.
*/
async pruneWorktrees(initiativeId: string | null): Promise<void> {
const repoPaths: string[] = [];
if (initiativeId) {
const projects = await this.projectRepository.findProjectsByInitiativeId(initiativeId);
for (const project of projects) {
repoPaths.push(join(this.workspaceRoot, getProjectCloneDir(project.name, project.id)));
}
} else {
repoPaths.push(this.workspaceRoot);
}
for (const repoPath of repoPaths) {
try {
await execFileAsync('git', ['worktree', 'prune'], { cwd: repoPath });
} catch (err) {
log.warn({ repoPath, err: err instanceof Error ? err.message : String(err) }, 'failed to prune worktrees');
}
}
}
/**
* Clean up orphaned agent workdirs (directories with no matching DB agent).
*/
async cleanupOrphanedWorkdirs(): Promise<void> {
const workdirsPath = join(this.workspaceRoot, 'agent-workdirs');
let entries: string[];
try {
entries = await readdir(workdirsPath);
} catch {
return;
}
const agents = await this.repository.findAll();
const knownAliases = new Set(agents.map(a => a.name));
for (const entry of entries) {
if (!knownAliases.has(entry)) {
log.info({ orphan: entry }, 'removing orphaned agent workdir');
try {
await rm(join(workdirsPath, entry), { recursive: true, force: true });
} catch (err) {
log.warn({ orphan: entry, err: err instanceof Error ? err.message : String(err) }, 'failed to remove orphaned workdir');
}
}
}
try {
await execFileAsync('git', ['worktree', 'prune'], { cwd: this.workspaceRoot });
} catch { /* ignore */ }
const reposPath = join(this.workspaceRoot, 'repos');
try {
const repoDirs = await readdir(reposPath);
for (const repoDir of repoDirs) {
try {
await execFileAsync('git', ['worktree', 'prune'], { cwd: join(reposPath, repoDir) });
} catch { /* ignore */ }
}
} catch { /* no repos dir */ }
}
/**
* Clean up orphaned agent log directories (directories with no matching DB agent).
*/
async cleanupOrphanedLogs(): Promise<void> {
const logsPath = join(this.workspaceRoot, '.cw', 'agent-logs');
let entries: string[];
try {
entries = await readdir(logsPath);
} catch {
return;
}
const agents = await this.repository.findAll();
const knownNames = new Set(agents.map(a => a.name));
for (const entry of entries) {
if (!knownNames.has(entry)) {
log.info({ orphan: entry }, 'removing orphaned agent log dir');
try {
await rm(join(logsPath, entry), { recursive: true, force: true });
} catch (err) {
log.warn({ orphan: entry, err: err instanceof Error ? err.message : String(err) }, 'failed to remove orphaned log dir');
}
}
}
}
/**
* Get the relative subdirectory names of dirty worktrees for an agent.
* Returns an empty array if all worktrees are clean or the workdir doesn't exist.
*/
async getDirtyWorktreePaths(alias: string, initiativeId: string | null): Promise<string[]> {
const agentWorkdir = this.getAgentWorkdir(alias);
try {
await readdir(agentWorkdir);
} catch {
return [];
}
const worktreePaths: { absPath: string; name: string }[] = [];
if (initiativeId) {
const projects = await this.projectRepository.findProjectsByInitiativeId(initiativeId);
for (const project of projects) {
worktreePaths.push({ absPath: join(agentWorkdir, project.name), name: project.name });
}
} else {
worktreePaths.push({ absPath: join(agentWorkdir, 'workspace'), name: 'workspace' });
}
const dirty: string[] = [];
for (const { absPath, name } of worktreePaths) {
try {
const { stdout } = await execFileAsync('git', ['status', '--porcelain'], { cwd: absPath });
if (stdout.trim().length > 0) dirty.push(name);
} catch {
dirty.push(name);
}
}
return dirty;
}
/**
* Check if all project worktrees for an agent are clean (no uncommitted/untracked files).
*/
async isWorkdirClean(alias: string, initiativeId: string | null): Promise<boolean> {
const dirty = await this.getDirtyWorktreePaths(alias, initiativeId);
if (dirty.length > 0) {
log.info({ alias, dirtyWorktrees: dirty }, 'workdir has uncommitted changes');
}
return dirty.length === 0;
}
/**
* Archive agent workdir and logs to .cw/debug/ before removal.
*/
async archiveForDebug(alias: string, agentId: string): Promise<void> {
const agentWorkdir = this.getAgentWorkdir(alias);
const debugWorkdir = join(this.workspaceRoot, '.cw', 'debug', 'workdirs', alias);
const logDir = join(this.workspaceRoot, '.cw', 'agent-logs', alias);
const debugLogDir = join(this.workspaceRoot, '.cw', 'debug', 'agent-logs', alias);
try {
if (existsSync(agentWorkdir)) {
await mkdir(join(this.workspaceRoot, '.cw', 'debug', 'workdirs'), { recursive: true });
await cp(agentWorkdir, debugWorkdir, { recursive: true });
log.debug({ alias, debugWorkdir }, 'archived workdir for debug');
}
} catch (err) {
log.warn({ alias, err: err instanceof Error ? err.message : String(err) }, 'failed to archive workdir for debug');
}
try {
if (existsSync(logDir)) {
await mkdir(join(this.workspaceRoot, '.cw', 'debug', 'agent-logs'), { recursive: true });
await cp(logDir, debugLogDir, { recursive: true });
log.debug({ agentId, debugLogDir }, 'archived logs for debug');
}
} catch (err) {
log.warn({ agentId, err: err instanceof Error ? err.message : String(err) }, 'failed to archive logs for debug');
}
}
/**
* Auto-cleanup agent workdir after successful completion.
* Removes worktrees and logs but preserves branches and DB record.
*/
async autoCleanupAfterCompletion(
agentId: string,
alias: string,
initiativeId: string | null,
): Promise<{ clean: boolean; removed: boolean }> {
const agentWorkdir = this.getAgentWorkdir(alias);
// Idempotent: if workdir is already gone, nothing to do
if (!existsSync(agentWorkdir)) {
return { clean: true, removed: true };
}
const clean = await this.isWorkdirClean(alias, initiativeId);
if (!clean) {
return { clean: false, removed: false };
}
if (this.debug) {
await this.archiveForDebug(alias, agentId);
}
try {
await this.removeAgentWorktrees(alias, initiativeId);
} catch (err) {
log.warn({ agentId, alias, err: err instanceof Error ? err.message : String(err) }, 'auto-cleanup: failed to remove worktrees');
}
try {
await this.removeAgentLogs(alias);
} catch (err) {
log.warn({ agentId, err: err instanceof Error ? err.message : String(err) }, 'auto-cleanup: failed to remove logs');
}
log.info({ agentId, alias }, 'auto-cleanup: workdir and logs removed');
return { clean: true, removed: true };
}
/**
* Reconcile agent state after server restart.
* Checks all agents in 'running' status:
* - If PID is still alive: create FileTailer to resume streaming
* - If PID is dead but output file exists: process the output
* - Otherwise: mark as crashed
*
* @param activeAgents - Shared map from manager to register live agents
* @param onStreamEvent - Callback for stream events from tailer
* @param onAgentOutput - Callback to process raw agent output
* @param pollForCompletion - Callback to start polling for completion
*/
async reconcileAfterRestart(
activeAgents: Map<string, {
agentId: string;
pid: number;
tailer: FileTailer;
outputFilePath: string;
agentCwd?: string;
}>,
onStreamEvent: (agentId: string, event: StreamEvent) => void,
onAgentOutput: (agentId: string, rawOutput: string, provider: NonNullable<ReturnType<typeof getProvider>>) => Promise<void>,
pollForCompletion: (agentId: string, pid: number) => void,
onRawContent?: (agentId: string, agentName: string, content: string) => void,
): Promise<void> {
const runningAgents = await this.repository.findByStatus('running');
log.info({ runningCount: runningAgents.length }, 'reconciling agents after restart');
for (const agent of runningAgents) {
const alive = agent.pid ? isPidAlive(agent.pid) : false;
log.info({ agentId: agent.id, pid: agent.pid, alive }, 'reconcile: checking agent');
if (alive && agent.outputFilePath) {
log.debug({ agentId: agent.id, pid: agent.pid }, 'reconcile: resuming streaming for alive agent');
const parser = getStreamParser(agent.provider);
const tailer = new FileTailer({
filePath: agent.outputFilePath,
agentId: agent.id,
parser,
onEvent: (event) => onStreamEvent(agent.id, event),
startFromBeginning: false,
onRawContent: onRawContent
? (content) => onRawContent(agent.id, agent.name, content)
: undefined,
});
tailer.start().catch((err) => {
log.warn({ agentId: agent.id, err: err instanceof Error ? err.message : String(err) }, 'failed to start tailer during reconcile');
});
const pid = agent.pid!;
// Resolve actual agent cwd — standalone agents run in workspace/ subdir
const resolvedCwd = this.resolveAgentCwd(agent.worktreeId);
activeAgents.set(agent.id, {
agentId: agent.id,
pid,
tailer,
outputFilePath: agent.outputFilePath,
agentCwd: resolvedCwd,
});
pollForCompletion(agent.id, pid);
} else if (agent.outputFilePath) {
// CRITICAL FIX: Check for signal.json completion FIRST before parsing raw output
// Resolve actual agent cwd — standalone agents run in workspace/ subdir
const agentWorkdir = this.resolveAgentCwd(agent.worktreeId);
const hasValidSignal = this.signalManager ? await this.signalManager.readSignal(agentWorkdir) : null;
if (hasValidSignal) {
log.debug({ agentId: agent.id }, 'found valid signal.json, processing as completion');
try {
const signalFile = join(agentWorkdir, '.cw/output/signal.json');
const signalContent = await readFile(signalFile, 'utf-8');
const provider = getProvider(agent.provider);
if (provider) {
await onAgentOutput(agent.id, signalContent, provider);
continue;
}
} catch (err) {
log.error({
agentId: agent.id,
err: err instanceof Error ? err.message : String(err)
}, 'reconcile: failed to process signal.json');
// Fall through to raw output processing
}
}
try {
const rawOutput = await readFile(agent.outputFilePath, 'utf-8');
if (rawOutput.trim()) {
const provider = getProvider(agent.provider);
if (provider) {
// Check if agent actually completed successfully before processing
const hasCompletionResult = this.checkForCompletionResult(rawOutput);
if (hasCompletionResult) {
log.info({ agentId: agent.id }, 'reconcile: processing completed agent output');
try {
await onAgentOutput(agent.id, rawOutput, provider);
continue;
} catch (err) {
log.error({
agentId: agent.id,
err: err instanceof Error ? err.message : String(err)
}, 'reconcile: failed to process completed agent output');
// Mark as crashed since processing failed
await this.repository.update(agent.id, { status: 'crashed' });
this.emitCrashed(agent, `Failed to process output: ${err instanceof Error ? err.message : String(err)}`);
continue;
}
}
}
}
} catch (readErr) {
log.warn({
agentId: agent.id,
err: readErr instanceof Error ? readErr.message : String(readErr)
}, 'reconcile: failed to read output file');
}
log.warn({ agentId: agent.id }, 'reconcile: marking agent crashed (no valid output)');
await this.repository.update(agent.id, { status: 'crashed' });
this.emitCrashed(agent, 'Server restarted, agent output not found or invalid');
} else {
log.warn({ agentId: agent.id }, 'reconcile: marking agent crashed');
await this.repository.update(agent.id, { status: 'crashed' });
this.emitCrashed(agent, 'Server restarted while agent was running');
}
}
try {
await this.cleanupOrphanedWorkdirs();
} catch (err) {
log.warn({ err: err instanceof Error ? err.message : String(err) }, 'orphaned workdir cleanup failed');
}
try {
await this.cleanupOrphanedLogs();
} catch (err) {
log.warn({ err: err instanceof Error ? err.message : String(err) }, 'orphaned log cleanup failed');
}
}
/**
* Check if the agent output contains a completion result line.
* This indicates the agent finished successfully, even if processing fails.
*/
private checkForCompletionResult(rawOutput: string): boolean {
try {
const lines = rawOutput.trim().split('\n');
for (const line of lines) {
try {
const parsed = JSON.parse(line);
// Look for Claude CLI result events with success status
if (parsed.type === 'result' && parsed.subtype === 'success') {
return true;
}
// Look for other providers' completion indicators
if (parsed.status === 'done' || parsed.status === 'questions') {
return true;
}
} catch { /* skip non-JSON lines */ }
}
} catch { /* invalid output format */ }
return false;
}
/**
* Emit a crashed event for an agent.
*/
private emitCrashed(agent: { id: string; name: string; taskId: string | null }, error: string): void {
if (this.eventBus) {
const event: AgentCrashedEvent = {
type: 'agent:crashed',
timestamp: new Date(),
payload: {
agentId: agent.id,
name: agent.name,
taskId: agent.taskId ?? '',
error,
},
};
this.eventBus.emit(event);
}
}
}

View File

@@ -0,0 +1,126 @@
/**
* Content Serializer
*
* Converts Tiptap JSON page tree into markdown for agent prompts.
* Uses @tiptap/markdown's MarkdownManager for standard node serialization,
* with custom handling only for pageLink nodes.
*/
import { Node, type JSONContent } from '@tiptap/core';
import StarterKit from '@tiptap/starter-kit';
import Link from '@tiptap/extension-link';
import { MarkdownManager } from '@tiptap/markdown';
/**
* Minimal page shape needed for serialization.
*/
export interface PageForSerialization {
id: string;
parentPageId: string | null;
title: string;
content: string | null; // JSON string from Tiptap
sortOrder: number;
}
/**
* Server-side pageLink node — only needs schema definition + markdown rendering.
*/
const ServerPageLink = Node.create({
name: 'pageLink',
group: 'block',
atom: true,
addAttributes() {
return {
pageId: { default: null },
};
},
renderMarkdown(node: JSONContent) {
const pageId = (node.attrs?.pageId as string) ?? '';
return `[[page:${pageId}]]\n\n`;
},
});
let _manager: MarkdownManager | null = null;
function getManager(): MarkdownManager {
if (!_manager) {
_manager = new MarkdownManager({
extensions: [StarterKit, Link, ServerPageLink],
});
}
return _manager;
}
/**
* Convert a Tiptap JSON document to markdown.
*/
export function tiptapJsonToMarkdown(json: unknown): string {
if (!json || typeof json !== 'object') return '';
const doc = json as JSONContent;
if (doc.type !== 'doc' || !Array.isArray(doc.content)) return '';
return getManager().serialize(doc).trim();
}
/**
* Serialize an array of pages into a single markdown document.
* Pages are organized as a tree (root first, then children by sortOrder).
*
* Each page is marked with <!-- page:$id --> so the agent can reference them.
*/
export function serializePageTree(pages: PageForSerialization[]): string {
if (pages.length === 0) return '';
// Build parent→children map
const childrenMap = new Map<string | null, PageForSerialization[]>();
for (const page of pages) {
const parentKey = page.parentPageId;
if (!childrenMap.has(parentKey)) {
childrenMap.set(parentKey, []);
}
childrenMap.get(parentKey)!.push(page);
}
// Sort children by sortOrder
for (const children of childrenMap.values()) {
children.sort((a, b) => a.sortOrder - b.sortOrder);
}
// Render tree depth-first
const sections: string[] = [];
function renderPage(page: PageForSerialization, depth: number): void {
const headerPrefix = '#'.repeat(Math.min(depth + 1, 6));
let section = `<!-- page:${page.id} -->\n${headerPrefix} ${page.title}`;
if (page.content) {
try {
const parsed = JSON.parse(page.content);
const md = tiptapJsonToMarkdown(parsed);
if (md.trim()) {
section += `\n\n${md}`;
}
} catch {
// Invalid JSON — skip content
}
}
sections.push(section);
const children = childrenMap.get(page.id) ?? [];
for (const child of children) {
renderPage(child, depth + 1);
}
}
// Start from root pages (parentPageId is null)
const roots = childrenMap.get(null) ?? [];
for (const root of roots) {
renderPage(root, 1);
}
return sections.join('\n\n');
}

View File

@@ -0,0 +1,208 @@
/**
* CredentialHandler — Account selection, credential management, and exhaustion handling.
*
* Extracted from MultiProviderAgentManager. Handles account lifecycle:
* selecting the next available account, writing credentials to disk,
* ensuring they're fresh, and marking accounts as exhausted on failure.
*/
import { readFileSync, existsSync } from 'node:fs';
import { join } from 'node:path';
import type { AccountRepository } from '../db/repositories/account-repository.js';
import type { AccountCredentialManager } from './credentials/types.js';
import type { Account } from '../db/schema.js';
import { ensureAccountCredentials } from './accounts/usage.js';
import { getAccountConfigDir } from './accounts/paths.js';
import { setupAccountConfigDir } from './accounts/setup.js';
import { createModuleLogger } from '../logger/index.js';
const log = createModuleLogger('credential-handler');
/** Default exhaustion duration: 5 hours */
const DEFAULT_EXHAUSTION_HOURS = 5;
export class CredentialHandler {
constructor(
private workspaceRoot: string,
private accountRepository?: AccountRepository,
private credentialManager?: AccountCredentialManager,
) {}
/**
* Select the next available account for a provider.
* Clears expired exhaustion, returns least-recently-used non-exhausted account.
* Returns null if no accounts are available.
*/
async selectAccount(providerName: string): Promise<{ account: Account; accountId: string; configDir: string } | null> {
if (!this.accountRepository) return null;
await this.accountRepository.clearExpiredExhaustion();
const account = await this.accountRepository.findNextAvailable(providerName);
if (!account) return null;
const configDir = getAccountConfigDir(this.workspaceRoot, account.id);
await this.accountRepository.updateLastUsed(account.id);
return { account, accountId: account.id, configDir };
}
/**
* Write account credentials from DB to the convention-based config directory.
* Must be called before ensureCredentials so the files exist on disk.
*/
writeCredentialsToDisk(account: Account, configDir: string): void {
if (account.configJson && account.credentials) {
setupAccountConfigDir(configDir, {
configJson: JSON.parse(account.configJson),
credentials: account.credentials,
});
log.debug({ accountId: account.id, configDir }, 'wrote account credentials from DB to disk');
} else {
log.warn({ accountId: account.id }, 'account has no stored credentials in DB');
}
}
/**
* Read refreshed credentials from disk and persist back to DB.
* Called after credential refresh to keep DB in sync.
*/
async persistRefreshedCredentials(accountId: string, configDir: string): Promise<void> {
if (!this.accountRepository) return;
try {
const credPath = join(configDir, '.credentials.json');
const credentials = readFileSync(credPath, 'utf-8');
await this.accountRepository.updateCredentials(accountId, credentials);
log.debug({ accountId }, 'persisted refreshed credentials back to DB');
} catch (err) {
log.warn({ accountId, err: err instanceof Error ? err.message : String(err) }, 'failed to persist refreshed credentials to DB');
}
}
/**
* Ensure credentials are valid before spawn/resume.
* Uses credentialManager if available, otherwise falls back to legacy function.
* Returns { valid, refreshed } so callers can persist refresh back to DB.
*/
async ensureCredentials(configDir: string, accountId?: string): Promise<{ valid: boolean; refreshed: boolean }> {
if (this.credentialManager) {
const result = await this.credentialManager.ensureValid(configDir, accountId);
return { valid: result.valid, refreshed: result.refreshed };
}
const valid = await ensureAccountCredentials(configDir);
return { valid, refreshed: false };
}
/**
* Read the access token from a config directory's .credentials.json.
* Returns null if credentials file is missing or malformed.
* Used for CLAUDE_CODE_OAUTH_TOKEN env var injection.
*/
readAccessToken(configDir: string): string | null {
try {
const credPath = join(configDir, '.credentials.json');
if (!existsSync(credPath)) return null;
const raw = readFileSync(credPath, 'utf-8');
const parsed = JSON.parse(raw);
return parsed.claudeAiOauth?.accessToken ?? null;
} catch {
return null;
}
}
/**
* Prepare process environment with account credentials.
* Writes credentials to disk, ensures freshness, injects OAuth token.
* Used by spawn, resumeForCommit, and resumeInternal.
*/
async prepareProcessEnv(
providerEnv: Record<string, string>,
provider: { configDirEnv?: string },
accountId: string | null,
): Promise<{ processEnv: Record<string, string>; accountConfigDir: string | null }> {
const processEnv: Record<string, string> = { ...providerEnv };
let accountConfigDir: string | null = null;
if (accountId && provider.configDirEnv && this.accountRepository) {
accountConfigDir = getAccountConfigDir(this.workspaceRoot, accountId);
const account = await this.accountRepository.findById(accountId);
if (account) {
this.writeCredentialsToDisk(account, accountConfigDir);
}
processEnv[provider.configDirEnv] = accountConfigDir;
const { valid, refreshed } = await this.ensureCredentials(accountConfigDir, accountId);
if (!valid) {
log.warn({ accountId }, 'failed to refresh credentials');
}
if (refreshed) {
await this.persistRefreshedCredentials(accountId, accountConfigDir);
}
const accessToken = this.readAccessToken(accountConfigDir);
if (accessToken) {
processEnv['CLAUDE_CODE_OAUTH_TOKEN'] = accessToken;
log.debug({ accountId }, 'CLAUDE_CODE_OAUTH_TOKEN injected');
}
}
return { processEnv, accountConfigDir };
}
/**
* Check if an error message indicates usage limit exhaustion.
*/
isUsageLimitError(errorMessage: string): boolean {
const patterns = [
'usage limit',
'rate limit',
'quota exceeded',
'too many requests',
'capacity',
'exhausted',
];
const lower = errorMessage.toLowerCase();
return patterns.some((p) => lower.includes(p));
}
/**
* Handle account exhaustion: mark current account exhausted and find next available.
* Returns the new account info if failover succeeded, null otherwise.
* Does NOT re-spawn — the caller (manager) handles that.
*/
async handleExhaustion(
accountId: string,
providerName: string,
): Promise<{ account: Account; accountId: string; configDir: string } | null> {
if (!this.accountRepository) return null;
log.warn({ accountId, provider: providerName }, 'account exhausted, attempting failover');
// Mark current account as exhausted
const exhaustedUntil = new Date(Date.now() + DEFAULT_EXHAUSTION_HOURS * 60 * 60 * 1000);
await this.accountRepository.markExhausted(accountId, exhaustedUntil);
// Find next available account
const nextAccount = await this.accountRepository.findNextAvailable(providerName);
if (!nextAccount) {
log.warn({ accountId }, 'account failover failed, no accounts available');
return null;
}
log.info({ previousAccountId: accountId, newAccountId: nextAccount.id }, 'account failover successful');
// Write credentials and ensure they're fresh
const nextConfigDir = getAccountConfigDir(this.workspaceRoot, nextAccount.id);
this.writeCredentialsToDisk(nextAccount, nextConfigDir);
const { valid, refreshed } = await this.ensureCredentials(nextConfigDir, nextAccount.id);
if (!valid) {
log.warn({ newAccountId: nextAccount.id }, 'failed to refresh failover account credentials');
return null;
}
if (refreshed) {
await this.persistRefreshedCredentials(nextAccount.id, nextConfigDir);
}
await this.accountRepository.updateLastUsed(nextAccount.id);
return { account: nextAccount, accountId: nextAccount.id, configDir: nextConfigDir };
}
}

View File

@@ -0,0 +1,330 @@
/**
* Default Account Credential Manager
*
* File-based adapter implementing AccountCredentialManager port.
* Reads/writes credentials from ~/.cw/accounts/<uuid>/.credentials.json
* and emits events on credential state changes.
*/
import { readFileSync, existsSync, writeFileSync, mkdirSync } from 'node:fs';
import { join, dirname } from 'node:path';
import type { EventBus } from '../../events/index.js';
import type {
AccountCredentialManager,
OAuthCredentials,
RefreshResult,
CredentialValidationResult,
} from './types.js';
import type {
AccountCredentialsRefreshedEvent,
AccountCredentialsExpiredEvent,
AccountCredentialsValidatedEvent,
} from '../../events/types.js';
import { createModuleLogger } from '../../logger/index.js';
const log = createModuleLogger('credential-manager');
/** Anthropic OAuth token refresh endpoint */
const TOKEN_REFRESH_URL = 'https://console.anthropic.com/v1/oauth/token';
/** OAuth client ID for Claude CLI */
const OAUTH_CLIENT_ID = '9d1c250a-e61b-44d9-88ed-5944d1962f5e';
/** Buffer before expiry to trigger refresh (5 minutes) */
const TOKEN_REFRESH_BUFFER_MS = 300_000;
/**
* DefaultAccountCredentialManager - File-based credential management with event emission.
*
* Implements the AccountCredentialManager port for managing OAuth credentials
* stored in account config directories.
*/
export class DefaultAccountCredentialManager implements AccountCredentialManager {
constructor(private eventBus?: EventBus) {}
/**
* Read credentials from a config directory.
*/
read(configDir: string): OAuthCredentials | null {
try {
const credPath = join(configDir, '.credentials.json');
if (!existsSync(credPath)) return null;
const raw = readFileSync(credPath, 'utf-8');
const parsed = JSON.parse(raw);
const oauth = parsed.claudeAiOauth;
if (!oauth || !oauth.accessToken) return null;
return {
accessToken: oauth.accessToken,
refreshToken: oauth.refreshToken ?? null,
expiresAt: oauth.expiresAt ?? null,
subscriptionType: oauth.subscriptionType ?? null,
rateLimitTier: oauth.rateLimitTier ?? null,
};
} catch {
return null;
}
}
/**
* Check if credentials are expired or about to expire.
*/
isExpired(credentials: OAuthCredentials): boolean {
if (!credentials.expiresAt) return false; // Setup tokens without expiry are treated as non-expired
return credentials.expiresAt < Date.now() + TOKEN_REFRESH_BUFFER_MS;
}
/**
* Refresh an access token using the refresh token.
*/
async refresh(configDir: string, refreshToken: string): Promise<RefreshResult | null> {
try {
const response = await fetch(TOKEN_REFRESH_URL, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
grant_type: 'refresh_token',
refresh_token: refreshToken,
client_id: OAUTH_CLIENT_ID,
scope: 'user:inference user:profile',
}),
});
if (!response.ok) {
log.warn({ configDir, status: response.status }, 'token refresh failed');
return null;
}
const data = await response.json();
return {
accessToken: data.access_token,
refreshToken: data.refresh_token,
expiresIn: data.expires_in,
};
} catch (err) {
log.error({ configDir, err: err instanceof Error ? err.message : String(err) }, 'token refresh error');
return null;
}
}
/**
* Write updated credentials to the config directory.
*/
write(
configDir: string,
accessToken: string,
refreshToken: string,
expiresIn: number,
): void {
const credPath = join(configDir, '.credentials.json');
// Read existing credentials to preserve other fields
let existing: Record<string, unknown> = {};
try {
if (existsSync(credPath)) {
existing = JSON.parse(readFileSync(credPath, 'utf-8'));
}
} catch {
// Start fresh if can't read
}
// Calculate expiry in milliseconds
const nowMs = Date.now();
const expiresAt = nowMs + expiresIn * 1000;
// Update claudeAiOauth section
const claudeAiOauth = (existing.claudeAiOauth as Record<string, unknown>) ?? {};
claudeAiOauth.accessToken = accessToken;
claudeAiOauth.refreshToken = refreshToken;
claudeAiOauth.expiresAt = expiresAt;
existing.claudeAiOauth = claudeAiOauth;
// Ensure directory exists
mkdirSync(dirname(credPath), { recursive: true });
// Write back (compact JSON for consistency)
writeFileSync(credPath, JSON.stringify(existing));
log.debug({ configDir }, 'credentials written after token refresh');
}
/**
* Ensure credentials are valid, refreshing if needed.
*/
async ensureValid(configDir: string, accountId?: string): Promise<CredentialValidationResult> {
const credentials = this.read(configDir);
if (!credentials) {
log.warn({ configDir, accountId }, 'no credentials found');
this.emitExpired(accountId, 'credentials_missing', 'Credentials file not found');
return {
valid: false,
credentials: null,
error: 'Credentials file not found',
refreshed: false,
};
}
if (!this.isExpired(credentials)) {
log.debug({ configDir, accountId }, 'credentials valid, no refresh needed');
this.emitValidated(accountId, true, credentials.expiresAt, false);
return {
valid: true,
credentials,
error: null,
refreshed: false,
};
}
// Credentials expired — attempt refresh if we have a refresh token
if (!credentials.refreshToken) {
log.warn({ configDir, accountId }, 'setup token expired, no refresh token available');
this.emitExpired(accountId, 'token_expired', 'Setup token expired, no refresh token available');
return {
valid: false,
credentials: null,
error: 'Setup token expired, no refresh token available',
refreshed: false,
};
}
log.info({ configDir, accountId }, 'credentials expired, refreshing');
const previousExpiresAt = credentials.expiresAt;
const refreshed = await this.refresh(configDir, credentials.refreshToken);
if (!refreshed) {
log.error({ configDir, accountId }, 'failed to refresh credentials');
this.emitExpired(accountId, 'refresh_failed', 'Token refresh failed');
return {
valid: false,
credentials: null,
error: 'Token refresh failed',
refreshed: false,
};
}
// Write refreshed credentials
const newRefreshToken = refreshed.refreshToken || credentials.refreshToken;
this.write(configDir, refreshed.accessToken, newRefreshToken, refreshed.expiresIn);
const newExpiresAt = Date.now() + refreshed.expiresIn * 1000;
log.info({ configDir, accountId, expiresIn: refreshed.expiresIn }, 'credentials refreshed');
this.emitRefreshed(accountId, newExpiresAt, previousExpiresAt);
this.emitValidated(accountId, true, newExpiresAt, true);
// Read back updated credentials
const updatedCredentials = this.read(configDir);
return {
valid: true,
credentials: updatedCredentials,
error: null,
refreshed: true,
};
}
/**
* Validate credentials without attempting refresh.
*/
async validate(configDir: string, accountId?: string): Promise<CredentialValidationResult> {
const credentials = this.read(configDir);
if (!credentials) {
this.emitValidated(accountId, false, null, false);
return {
valid: false,
credentials: null,
error: 'Credentials file not found',
refreshed: false,
};
}
const expired = this.isExpired(credentials);
this.emitValidated(accountId, !expired, credentials.expiresAt, false);
if (expired) {
return {
valid: false,
credentials,
error: 'Token expired',
refreshed: false,
};
}
return {
valid: true,
credentials,
error: null,
refreshed: false,
};
}
/**
* Emit credentials refreshed event.
*/
private emitRefreshed(
accountId: string | undefined,
expiresAt: number,
previousExpiresAt: number | null,
): void {
if (!this.eventBus) return;
const event: AccountCredentialsRefreshedEvent = {
type: 'account:credentials_refreshed',
timestamp: new Date(),
payload: {
accountId: accountId ?? null,
expiresAt,
previousExpiresAt,
},
};
this.eventBus.emit(event);
}
/**
* Emit credentials expired event.
*/
private emitExpired(
accountId: string | undefined,
reason: 'token_expired' | 'refresh_failed' | 'credentials_missing',
error: string | null,
): void {
if (!this.eventBus) return;
const event: AccountCredentialsExpiredEvent = {
type: 'account:credentials_expired',
timestamp: new Date(),
payload: {
accountId: accountId ?? null,
reason,
error,
},
};
this.eventBus.emit(event);
}
/**
* Emit credentials validated event.
*/
private emitValidated(
accountId: string | undefined,
valid: boolean,
expiresAt: number | null,
wasRefreshed: boolean,
): void {
if (!this.eventBus) return;
const event: AccountCredentialsValidatedEvent = {
type: 'account:credentials_validated',
timestamp: new Date(),
payload: {
accountId: accountId ?? null,
valid,
expiresAt,
wasRefreshed,
},
};
this.eventBus.emit(event);
}
}

View File

@@ -0,0 +1,17 @@
/**
* Credentials Module - Public API
*
* Exports the AccountCredentialManager port interface and default adapter.
* All modules should import from this index file.
*/
// Port interface and types
export type {
AccountCredentialManager,
OAuthCredentials,
RefreshResult,
CredentialValidationResult,
} from './types.js';
// Adapter implementation
export { DefaultAccountCredentialManager } from './default-credential-manager.js';

View File

@@ -0,0 +1,98 @@
/**
* Account Credential Manager Types
*
* Port interface for managing OAuth credentials for agent accounts.
* The credential manager reads, validates, refreshes, and persists tokens,
* emitting events on state changes.
*/
/**
* OAuth credentials stored in the account's config directory.
*/
export interface OAuthCredentials {
accessToken: string;
refreshToken: string | null;
/** Expiry time in milliseconds since epoch. Null for setup tokens with no expiry. */
expiresAt: number | null;
subscriptionType: string | null;
rateLimitTier: string | null;
}
/**
* Result of a token refresh attempt.
*/
export interface RefreshResult {
accessToken: string;
refreshToken: string;
/** Token lifetime in seconds */
expiresIn: number;
}
/**
* Result of credential validation or ensureValid operation.
*/
export interface CredentialValidationResult {
/** Whether credentials are currently valid and usable */
valid: boolean;
/** Current credentials if valid, null otherwise */
credentials: OAuthCredentials | null;
/** Error message if validation failed */
error: string | null;
/** Whether credentials were refreshed during this operation */
refreshed: boolean;
}
/**
* Port interface for account credential management.
*
* Implementations:
* - DefaultAccountCredentialManager: File-based adapter using ~/.cw/accounts/<uuid>/.credentials.json
*/
export interface AccountCredentialManager {
/**
* Read credentials from a config directory.
* Returns null if credentials file is missing or malformed.
*/
read(configDir: string): OAuthCredentials | null;
/**
* Check if credentials are expired or about to expire.
* Uses a buffer (default 5 minutes) to preemptively refresh.
*/
isExpired(credentials: OAuthCredentials): boolean;
/**
* Refresh an access token using the refresh token.
* Returns null if refresh fails.
*/
refresh(configDir: string, refreshToken: string): Promise<RefreshResult | null>;
/**
* Write updated credentials to the config directory.
* Preserves other fields in the credentials file.
*/
write(
configDir: string,
accessToken: string,
refreshToken: string,
expiresIn: number,
): void;
/**
* Ensure credentials are valid, refreshing if needed.
* Emits events on refresh or expiration.
*
* @param configDir - Path to the account's config directory
* @param accountId - Optional account ID for event payloads
*/
ensureValid(configDir: string, accountId?: string): Promise<CredentialValidationResult>;
/**
* Validate credentials without attempting refresh.
* Useful for health checks where you want to report state without side effects.
*
* @param configDir - Path to the account's config directory
* @param accountId - Optional account ID for event payloads
*/
validate(configDir: string, accountId?: string): Promise<CredentialValidationResult>;
}

View File

@@ -0,0 +1,341 @@
/**
* File-Based Agent I/O Tests
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { mkdirSync, writeFileSync, rmSync, existsSync } from 'node:fs';
import { join } from 'node:path';
import { tmpdir } from 'node:os';
import { randomUUID } from 'crypto';
import {
writeInputFiles,
readSummary,
readPhaseFiles,
readTaskFiles,
readDecisionFiles,
readPageFiles,
generateId,
} from './file-io.js';
import type { Initiative, Phase, Task } from '../db/schema.js';
let testDir: string;
beforeEach(() => {
testDir = join(tmpdir(), `cw-file-io-test-${randomUUID()}`);
mkdirSync(testDir, { recursive: true });
});
afterEach(() => {
rmSync(testDir, { recursive: true, force: true });
});
describe('generateId', () => {
it('returns a non-empty string', () => {
const id = generateId();
expect(id).toBeTruthy();
expect(typeof id).toBe('string');
});
it('returns unique values', () => {
const ids = new Set(Array.from({ length: 100 }, () => generateId()));
expect(ids.size).toBe(100);
});
});
describe('writeInputFiles', () => {
it('writes initiative.md with frontmatter', () => {
const initiative: Initiative = {
id: 'init-1',
name: 'Test Initiative',
status: 'active',
mergeRequiresApproval: true,
branch: 'cw/test-initiative',
executionMode: 'review_per_phase',
createdAt: new Date('2026-01-01'),
updatedAt: new Date('2026-01-02'),
};
writeInputFiles({ agentWorkdir: testDir, initiative });
const filePath = join(testDir, '.cw', 'input', 'initiative.md');
expect(existsSync(filePath)).toBe(true);
});
it('writes phase.md with frontmatter', () => {
const phase = {
id: 'phase-1',
initiativeId: 'init-1',
number: 1,
name: 'Phase One',
content: 'First phase',
status: 'pending',
createdAt: new Date(),
updatedAt: new Date(),
} as Phase;
writeInputFiles({ agentWorkdir: testDir, phase });
const filePath = join(testDir, '.cw', 'input', 'phase.md');
expect(existsSync(filePath)).toBe(true);
});
it('writes task.md with frontmatter', () => {
const task = {
id: 'task-1',
name: 'Test Task',
description: 'Do the thing',
category: 'execute',
type: 'auto',
priority: 'medium',
status: 'pending',
order: 1,
createdAt: new Date(),
updatedAt: new Date(),
} as Task;
writeInputFiles({ agentWorkdir: testDir, task });
const filePath = join(testDir, '.cw', 'input', 'task.md');
expect(existsSync(filePath)).toBe(true);
});
it('writes pages to pages/ subdirectory', () => {
writeInputFiles({
agentWorkdir: testDir,
pages: [
{ id: 'page-1', parentPageId: null, title: 'Root', content: null, sortOrder: 0 },
{ id: 'page-2', parentPageId: 'page-1', title: 'Child', content: null, sortOrder: 1 },
],
});
expect(existsSync(join(testDir, '.cw', 'input', 'pages', 'page-1.md'))).toBe(true);
expect(existsSync(join(testDir, '.cw', 'input', 'pages', 'page-2.md'))).toBe(true);
});
it('handles empty options without error', () => {
writeInputFiles({ agentWorkdir: testDir });
expect(existsSync(join(testDir, '.cw', 'input'))).toBe(true);
});
});
describe('readSummary', () => {
it('reads SUMMARY.md with frontmatter', () => {
const outputDir = join(testDir, '.cw', 'output');
mkdirSync(outputDir, { recursive: true });
writeFileSync(
join(outputDir, 'SUMMARY.md'),
`---
files_modified:
- src/foo.ts
- src/bar.ts
---
Task completed successfully. Refactored the module.
`,
'utf-8',
);
const summary = readSummary(testDir);
expect(summary).not.toBeNull();
expect(summary!.body).toBe('Task completed successfully. Refactored the module.');
expect(summary!.filesModified).toEqual(['src/foo.ts', 'src/bar.ts']);
});
it('returns null when SUMMARY.md does not exist', () => {
const summary = readSummary(testDir);
expect(summary).toBeNull();
});
it('handles SUMMARY.md without frontmatter', () => {
const outputDir = join(testDir, '.cw', 'output');
mkdirSync(outputDir, { recursive: true });
writeFileSync(join(outputDir, 'SUMMARY.md'), 'Just plain text\n', 'utf-8');
const summary = readSummary(testDir);
expect(summary).not.toBeNull();
expect(summary!.body).toBe('Just plain text');
expect(summary!.filesModified).toBeUndefined();
});
it('handles empty files_modified', () => {
const outputDir = join(testDir, '.cw', 'output');
mkdirSync(outputDir, { recursive: true });
writeFileSync(
join(outputDir, 'SUMMARY.md'),
`---
files_modified: []
---
Done.
`,
'utf-8',
);
const summary = readSummary(testDir);
expect(summary!.filesModified).toEqual([]);
});
});
describe('readPhaseFiles', () => {
it('reads phase files from phases/ directory', () => {
const phasesDir = join(testDir, '.cw', 'output', 'phases');
mkdirSync(phasesDir, { recursive: true });
writeFileSync(
join(phasesDir, 'abc123.md'),
`---
title: Database Schema
dependencies:
- xyz789
---
Create the user tables and auth schema.
`,
'utf-8',
);
const phases = readPhaseFiles(testDir);
expect(phases).toHaveLength(1);
expect(phases[0].id).toBe('abc123');
expect(phases[0].title).toBe('Database Schema');
expect(phases[0].dependencies).toEqual(['xyz789']);
expect(phases[0].body).toBe('Create the user tables and auth schema.');
});
it('returns empty array when directory does not exist', () => {
const phases = readPhaseFiles(testDir);
expect(phases).toEqual([]);
});
it('handles phases with no dependencies', () => {
const phasesDir = join(testDir, '.cw', 'output', 'phases');
mkdirSync(phasesDir, { recursive: true });
writeFileSync(
join(phasesDir, 'p1.md'),
`---
title: Foundation
---
Set up the base.
`,
'utf-8',
);
const phases = readPhaseFiles(testDir);
expect(phases[0].dependencies).toEqual([]);
});
});
describe('readTaskFiles', () => {
it('reads task files from tasks/ directory', () => {
const tasksDir = join(testDir, '.cw', 'output', 'tasks');
mkdirSync(tasksDir, { recursive: true });
writeFileSync(
join(tasksDir, 'task-1.md'),
`---
title: Implement login
category: execute
type: auto
dependencies:
- task-0
---
Build the login form and submit handler.
`,
'utf-8',
);
const tasks = readTaskFiles(testDir);
expect(tasks).toHaveLength(1);
expect(tasks[0].id).toBe('task-1');
expect(tasks[0].title).toBe('Implement login');
expect(tasks[0].category).toBe('execute');
expect(tasks[0].type).toBe('auto');
expect(tasks[0].dependencies).toEqual(['task-0']);
expect(tasks[0].body).toBe('Build the login form and submit handler.');
});
it('defaults category and type when missing', () => {
const tasksDir = join(testDir, '.cw', 'output', 'tasks');
mkdirSync(tasksDir, { recursive: true });
writeFileSync(join(tasksDir, 't1.md'), `---\ntitle: Minimal\n---\nDo it.\n`, 'utf-8');
const tasks = readTaskFiles(testDir);
expect(tasks[0].category).toBe('execute');
expect(tasks[0].type).toBe('auto');
});
it('returns empty array when directory does not exist', () => {
expect(readTaskFiles(testDir)).toEqual([]);
});
});
describe('readDecisionFiles', () => {
it('reads decision files from decisions/ directory', () => {
const decisionsDir = join(testDir, '.cw', 'output', 'decisions');
mkdirSync(decisionsDir, { recursive: true });
writeFileSync(
join(decisionsDir, 'd1.md'),
`---
topic: Authentication
decision: Use JWT
reason: Stateless and scalable
---
Additional context about the decision.
`,
'utf-8',
);
const decisions = readDecisionFiles(testDir);
expect(decisions).toHaveLength(1);
expect(decisions[0].id).toBe('d1');
expect(decisions[0].topic).toBe('Authentication');
expect(decisions[0].decision).toBe('Use JWT');
expect(decisions[0].reason).toBe('Stateless and scalable');
expect(decisions[0].body).toBe('Additional context about the decision.');
});
it('returns empty array when directory does not exist', () => {
expect(readDecisionFiles(testDir)).toEqual([]);
});
});
describe('readPageFiles', () => {
it('reads page files from pages/ directory', () => {
const pagesDir = join(testDir, '.cw', 'output', 'pages');
mkdirSync(pagesDir, { recursive: true });
writeFileSync(
join(pagesDir, 'page-abc.md'),
`---
title: Architecture Overview
summary: Updated the overview section
---
# Architecture
New content for the page.
`,
'utf-8',
);
const pages = readPageFiles(testDir);
expect(pages).toHaveLength(1);
expect(pages[0].pageId).toBe('page-abc');
expect(pages[0].title).toBe('Architecture Overview');
expect(pages[0].summary).toBe('Updated the overview section');
expect(pages[0].body).toBe('# Architecture\n\nNew content for the page.');
});
it('returns empty array when directory does not exist', () => {
expect(readPageFiles(testDir)).toEqual([]);
});
it('ignores non-.md files', () => {
const pagesDir = join(testDir, '.cw', 'output', 'pages');
mkdirSync(pagesDir, { recursive: true });
writeFileSync(join(pagesDir, 'readme.txt'), 'not a page', 'utf-8');
writeFileSync(join(pagesDir, 'page1.md'), '---\ntitle: Page 1\n---\nContent.\n', 'utf-8');
const pages = readPageFiles(testDir);
expect(pages).toHaveLength(1);
});
});

View File

@@ -0,0 +1,376 @@
/**
* File-Based Agent I/O
*
* Writes context as input files before agent spawn and reads output files after completion.
* Uses YAML frontmatter (gray-matter) for structured metadata and markdown bodies.
*
* Input: .cw/input/ — written by system before spawn
* Output: .cw/output/ — written by agent during execution
*/
import { mkdirSync, writeFileSync, readdirSync, existsSync } from 'node:fs';
import { readFileSync } from 'node:fs';
import { join } from 'node:path';
import matter from 'gray-matter';
import { nanoid } from 'nanoid';
import { tiptapJsonToMarkdown } from './content-serializer.js';
import type { AgentInputContext } from './types.js';
// Re-export for convenience
export type { AgentInputContext } from './types.js';
// =============================================================================
// TYPES
// =============================================================================
export interface WriteInputFilesOptions extends AgentInputContext {
agentWorkdir: string;
}
export interface ParsedSummary {
body: string;
filesModified?: string[];
}
export interface ParsedPhaseFile {
id: string;
title: string;
dependencies: string[];
body: string;
}
export interface ParsedTaskFile {
id: string;
title: string;
category: string;
type: string;
dependencies: string[];
body: string;
}
export interface ParsedDecisionFile {
id: string;
topic: string;
decision: string;
reason: string;
body: string;
}
export interface ParsedPageFile {
pageId: string;
title: string;
summary: string;
body: string;
}
// =============================================================================
// ID GENERATION
// =============================================================================
export function generateId(): string {
return nanoid();
}
// =============================================================================
// INPUT FILE WRITING
// =============================================================================
function formatFrontmatter(data: Record<string, unknown>, body: string = ''): string {
const lines: string[] = ['---'];
for (const [key, value] of Object.entries(data)) {
if (value === undefined || value === null) continue;
if (Array.isArray(value)) {
if (value.length === 0) {
lines.push(`${key}: []`);
} else {
lines.push(`${key}:`);
for (const item of value) {
lines.push(` - ${String(item)}`);
}
}
} else if (value instanceof Date) {
lines.push(`${key}: "${value.toISOString()}"`);
} else if (typeof value === 'string' && (value.includes('\n') || value.includes(':'))) {
lines.push(`${key}: ${JSON.stringify(value)}`);
} else {
lines.push(`${key}: ${String(value)}`);
}
}
lines.push('---');
if (body) {
lines.push('');
lines.push(body);
}
return lines.join('\n') + '\n';
}
export function writeInputFiles(options: WriteInputFilesOptions): void {
const inputDir = join(options.agentWorkdir, '.cw', 'input');
mkdirSync(inputDir, { recursive: true });
// Write expected working directory marker for verification
writeFileSync(
join(inputDir, '../expected-pwd.txt'),
options.agentWorkdir,
'utf-8'
);
const manifestFiles: string[] = [];
if (options.initiative) {
const ini = options.initiative;
const content = formatFrontmatter(
{
id: ini.id,
name: ini.name,
status: ini.status,
mergeRequiresApproval: ini.mergeRequiresApproval,
branch: ini.branch,
},
'',
);
writeFileSync(join(inputDir, 'initiative.md'), content, 'utf-8');
manifestFiles.push('initiative.md');
}
if (options.pages && options.pages.length > 0) {
const pagesDir = join(inputDir, 'pages');
mkdirSync(pagesDir, { recursive: true });
for (const page of options.pages) {
let bodyMarkdown = '';
if (page.content) {
try {
const parsed = JSON.parse(page.content);
bodyMarkdown = tiptapJsonToMarkdown(parsed);
} catch {
// Invalid JSON content — skip
}
}
const content = formatFrontmatter(
{
title: page.title,
parentPageId: page.parentPageId,
sortOrder: page.sortOrder,
},
bodyMarkdown,
);
const filename = `pages/${page.id}.md`;
writeFileSync(join(pagesDir, `${page.id}.md`), content, 'utf-8');
manifestFiles.push(filename);
}
}
if (options.phase) {
const ph = options.phase;
let bodyMarkdown = '';
if (ph.content) {
try {
bodyMarkdown = tiptapJsonToMarkdown(JSON.parse(ph.content));
} catch {
// Invalid JSON content — skip
}
}
const content = formatFrontmatter(
{
id: ph.id,
name: ph.name,
status: ph.status,
},
bodyMarkdown,
);
writeFileSync(join(inputDir, 'phase.md'), content, 'utf-8');
manifestFiles.push('phase.md');
}
if (options.task) {
const t = options.task;
const content = formatFrontmatter(
{
id: t.id,
name: t.name,
category: t.category,
type: t.type,
priority: t.priority,
status: t.status,
},
t.description ?? '',
);
writeFileSync(join(inputDir, 'task.md'), content, 'utf-8');
manifestFiles.push('task.md');
}
// Write read-only context directories
const contextFiles: string[] = [];
if (options.phases && options.phases.length > 0) {
const phasesDir = join(inputDir, 'context', 'phases');
mkdirSync(phasesDir, { recursive: true });
for (const ph of options.phases) {
let bodyMarkdown = '';
if (ph.content) {
try {
bodyMarkdown = tiptapJsonToMarkdown(JSON.parse(ph.content));
} catch {
// Invalid JSON content — skip
}
}
const content = formatFrontmatter(
{
id: ph.id,
name: ph.name,
status: ph.status,
dependsOn: ph.dependsOn ?? [],
},
bodyMarkdown,
);
const filename = `context/phases/${ph.id}.md`;
writeFileSync(join(phasesDir, `${ph.id}.md`), content, 'utf-8');
contextFiles.push(filename);
}
}
if (options.tasks && options.tasks.length > 0) {
const tasksDir = join(inputDir, 'context', 'tasks');
mkdirSync(tasksDir, { recursive: true });
for (const t of options.tasks) {
const content = formatFrontmatter(
{
id: t.id,
name: t.name,
phaseId: t.phaseId,
parentTaskId: t.parentTaskId,
category: t.category,
type: t.type,
priority: t.priority,
status: t.status,
},
t.description ?? '',
);
const filename = `context/tasks/${t.id}.md`;
writeFileSync(join(tasksDir, `${t.id}.md`), content, 'utf-8');
contextFiles.push(filename);
}
}
// Write manifest listing exactly which files were created
writeFileSync(
join(inputDir, 'manifest.json'),
JSON.stringify({
files: manifestFiles,
contextFiles,
agentId: options.agentId ?? null,
agentName: options.agentName ?? null,
}) + '\n',
'utf-8',
);
}
// =============================================================================
// OUTPUT FILE READING
// =============================================================================
export function readFrontmatterFile(filePath: string): { data: Record<string, unknown>; body: string } | null {
try {
const raw = readFileSync(filePath, 'utf-8');
const parsed = matter(raw);
return { data: parsed.data as Record<string, unknown>, body: parsed.content.trim() };
} catch {
return null;
}
}
function readFrontmatterDir<T>(
dirPath: string,
mapper: (data: Record<string, unknown>, body: string, filename: string) => T | null,
): T[] {
if (!existsSync(dirPath)) return [];
const results: T[] = [];
try {
const entries = readdirSync(dirPath);
for (const entry of entries) {
if (!entry.endsWith('.md')) continue;
const filePath = join(dirPath, entry);
const parsed = readFrontmatterFile(filePath);
if (!parsed) continue;
const mapped = mapper(parsed.data, parsed.body, entry);
if (mapped) results.push(mapped);
}
} catch {
// Directory read error — return empty
}
return results;
}
export function readSummary(agentWorkdir: string): ParsedSummary | null {
const filePath = join(agentWorkdir, '.cw', 'output', 'SUMMARY.md');
const parsed = readFrontmatterFile(filePath);
if (!parsed) return null;
const filesModified = parsed.data.files_modified;
return {
body: parsed.body,
filesModified: Array.isArray(filesModified) ? filesModified.map(String) : undefined,
};
}
export function readPhaseFiles(agentWorkdir: string): ParsedPhaseFile[] {
const dirPath = join(agentWorkdir, '.cw', 'output', 'phases');
return readFrontmatterDir(dirPath, (data, body, filename) => {
const id = filename.replace(/\.md$/, '');
const deps = Array.isArray(data.dependencies) ? data.dependencies.map(String) : [];
return {
id,
title: String(data.title ?? ''),
dependencies: deps,
body,
};
});
}
export function readTaskFiles(agentWorkdir: string): ParsedTaskFile[] {
const dirPath = join(agentWorkdir, '.cw', 'output', 'tasks');
return readFrontmatterDir(dirPath, (data, body, filename) => {
const id = filename.replace(/\.md$/, '');
const deps = Array.isArray(data.dependencies) ? data.dependencies.map(String) : [];
return {
id,
title: String(data.title ?? ''),
category: String(data.category ?? 'execute'),
type: String(data.type ?? 'auto'),
dependencies: deps,
body,
};
});
}
export function readDecisionFiles(agentWorkdir: string): ParsedDecisionFile[] {
const dirPath = join(agentWorkdir, '.cw', 'output', 'decisions');
return readFrontmatterDir(dirPath, (data, body, filename) => {
const id = filename.replace(/\.md$/, '');
return {
id,
topic: String(data.topic ?? ''),
decision: String(data.decision ?? ''),
reason: String(data.reason ?? ''),
body,
};
});
}
export function readPageFiles(agentWorkdir: string): ParsedPageFile[] {
const dirPath = join(agentWorkdir, '.cw', 'output', 'pages');
return readFrontmatterDir(dirPath, (data, body, filename) => {
const pageId = filename.replace(/\.md$/, '');
return {
pageId,
title: String(data.title ?? ''),
summary: String(data.summary ?? ''),
body,
};
});
}

View File

@@ -0,0 +1,257 @@
/**
* File Tailer
*
* Watches an output file and emits parsed events in real-time.
* Used for crash-resilient agent spawning where subprocesses write
* directly to files instead of using pipes.
*
* Uses fs.watch() for efficient change detection with a poll fallback
* since fs.watch isn't 100% reliable on all platforms.
*/
import { watch, type FSWatcher } from 'node:fs';
import { open, stat } from 'node:fs/promises';
import type { FileHandle } from 'node:fs/promises';
import type { StreamParser, StreamEvent } from './providers/stream-types.js';
import { createModuleLogger } from '../logger/index.js';
const log = createModuleLogger('file-tailer');
/** Poll interval for fallback polling (ms) */
const POLL_INTERVAL_MS = 500;
/** Read buffer size (bytes) */
const READ_BUFFER_SIZE = 64 * 1024;
export interface FileTailerOptions {
/** Path to the output file to watch */
filePath: string;
/** Agent ID for logging */
agentId: string;
/** Parser to convert lines to stream events */
parser: StreamParser;
/** Optional callback for each stream event */
onEvent?: (event: StreamEvent) => void;
/** If true, read from beginning of file; otherwise tail only new content (default: false) */
startFromBeginning?: boolean;
/** Callback for raw file content chunks (for DB persistence + event emission) */
onRawContent?: (content: string) => void;
}
/**
* FileTailer watches a file for changes and emits parsed stream events.
*
* Behavior:
* - Uses fs.watch() for efficient change detection
* - Falls back to polling every 500ms (fs.watch misses events sometimes)
* - Reads new content incrementally, splits into lines
* - Feeds each line to the parser, emits resulting events
* - Handles partial lines at buffer boundaries
*/
export class FileTailer {
private position = 0;
private watcher: FSWatcher | null = null;
private pollInterval: NodeJS.Timeout | null = null;
private fileHandle: FileHandle | null = null;
private stopped = false;
private partialLine = '';
private reading = false;
private readonly filePath: string;
private readonly agentId: string;
private readonly parser: StreamParser;
private readonly onEvent?: (event: StreamEvent) => void;
private readonly startFromBeginning: boolean;
private readonly onRawContent?: (content: string) => void;
constructor(options: FileTailerOptions) {
this.filePath = options.filePath;
this.agentId = options.agentId;
this.parser = options.parser;
this.onEvent = options.onEvent;
this.startFromBeginning = options.startFromBeginning ?? false;
this.onRawContent = options.onRawContent;
}
/**
* Start watching the file for changes.
* Initializes position, starts fs.watch, and begins poll fallback.
*/
async start(): Promise<void> {
if (this.stopped) return;
log.debug({ filePath: this.filePath, agentId: this.agentId }, 'starting file tailer');
try {
// Open file for reading
this.fileHandle = await open(this.filePath, 'r');
// Set initial position
if (this.startFromBeginning) {
this.position = 0;
} else {
// Seek to end
const stats = await stat(this.filePath);
this.position = stats.size;
}
// Start fs.watch for efficient change detection
this.watcher = watch(this.filePath, (eventType) => {
if (eventType === 'change' && !this.stopped) {
this.readNewContent().catch((err) => {
log.warn({ err: err instanceof Error ? err.message : String(err), agentId: this.agentId }, 'error reading new content');
});
}
});
this.watcher.on('error', (err) => {
log.warn({ err: err instanceof Error ? err.message : String(err), agentId: this.agentId }, 'watcher error');
});
// Start poll fallback (fs.watch misses events sometimes)
this.pollInterval = setInterval(() => {
if (!this.stopped) {
this.readNewContent().catch((err) => {
log.warn({ err: err instanceof Error ? err.message : String(err), agentId: this.agentId }, 'poll read error');
});
}
}, POLL_INTERVAL_MS);
// If starting from beginning, do initial read
if (this.startFromBeginning) {
await this.readNewContent();
}
} catch (err) {
log.error({ err: err instanceof Error ? err.message : String(err), filePath: this.filePath }, 'failed to start file tailer');
}
}
/**
* Read new content from the file since last position.
* Splits into lines, feeds to parser, emits events.
*/
private async readNewContent(): Promise<void> {
if (this.stopped || !this.fileHandle || this.reading) return;
this.reading = true;
try {
// Check current file size
const stats = await stat(this.filePath);
if (stats.size <= this.position) {
return; // No new content
}
// Read new bytes
const bytesToRead = stats.size - this.position;
const buffer = Buffer.alloc(Math.min(bytesToRead, READ_BUFFER_SIZE));
const { bytesRead } = await this.fileHandle.read(buffer, 0, buffer.length, this.position);
if (bytesRead === 0) return;
this.position += bytesRead;
// Fire raw content callback for DB persistence (before line splitting)
const rawChunk = buffer.toString('utf-8', 0, bytesRead);
if (this.onRawContent) {
this.onRawContent(rawChunk);
}
// Convert to string and process lines
const content = this.partialLine + rawChunk;
const lines = content.split('\n');
// Last element is either empty (if content ended with \n) or a partial line
this.partialLine = lines.pop() ?? '';
// Process complete lines
for (const line of lines) {
if (line.trim()) {
this.processLine(line);
}
}
// If there's more content to read, schedule another read
if (stats.size > this.position) {
setImmediate(() => {
this.readNewContent().catch(() => {});
});
}
} finally {
this.reading = false;
}
}
/**
* Process a single line through the parser and emit events.
*/
private processLine(line: string): void {
const events = this.parser.parseLine(line);
for (const event of events) {
if (this.onEvent) {
this.onEvent(event);
}
}
}
/**
* Stop watching the file.
* Cleans up watcher, poll timer, and file handle.
*/
async stop(): Promise<void> {
if (this.stopped) return;
this.stopped = true;
log.debug({ filePath: this.filePath, agentId: this.agentId }, 'stopping file tailer');
// Close watcher
if (this.watcher) {
this.watcher.close();
this.watcher = null;
}
// Clear poll timer
if (this.pollInterval) {
clearInterval(this.pollInterval);
this.pollInterval = null;
}
// Do one final read to catch any remaining content
try {
await this.readNewContent();
// Process any remaining partial line
if (this.partialLine.trim()) {
this.processLine(this.partialLine);
this.partialLine = '';
}
// Signal end of stream to parser
const endEvents = this.parser.end();
for (const event of endEvents) {
if (this.onEvent) {
this.onEvent(event);
}
}
} catch {
// Ignore errors during cleanup
}
// Close file handle
if (this.fileHandle) {
try {
await this.fileHandle.close();
} catch {
// Ignore close errors
}
this.fileHandle = null;
}
}
/**
* Check if the tailer has been stopped.
*/
get isStopped(): boolean {
return this.stopped;
}
}

View File

@@ -0,0 +1,84 @@
/**
* Agent Module - Public API
*
* Exports the AgentManager port interface and related types.
* All consumers should import from this index file.
*/
// Port interface and types
export type {
AgentStatus,
SpawnAgentOptions,
AgentInfo,
AgentResult,
AgentManager,
AgentInputContext,
} from './types.js';
// Adapter implementations
export { MultiProviderAgentManager } from './manager.js';
/** @deprecated Use MultiProviderAgentManager instead */
export { MultiProviderAgentManager as ClaudeAgentManager } from './manager.js';
export { MockAgentManager, type MockAgentScenario } from './mock-manager.js';
// Provider registry
export {
getProvider,
listProviders,
registerProvider,
loadProvidersFromFile,
PROVIDER_PRESETS,
} from './providers/index.js';
export type { AgentProviderConfig } from './providers/index.js';
// Agent prompts
export {
buildDiscussPrompt,
buildPlanPrompt,
buildExecutePrompt,
buildRefinePrompt,
buildDetailPrompt,
} from './prompts/index.js';
// Schema
export { agentSignalSchema, agentSignalJsonSchema } from './schema.js';
export type { AgentSignal } from './schema.js';
// Backward compat
export { agentOutputSchema, agentOutputJsonSchema } from './schema.js';
// File I/O
export {
writeInputFiles,
readSummary,
readPhaseFiles,
readTaskFiles,
readDecisionFiles,
readPageFiles,
generateId,
} from './file-io.js';
export type {
WriteInputFilesOptions,
ParsedSummary,
ParsedPhaseFile,
ParsedTaskFile,
ParsedDecisionFile,
ParsedPageFile,
} from './file-io.js';
// Content serializer
export { serializePageTree, tiptapJsonToMarkdown } from './content-serializer.js';
export type { PageForSerialization } from './content-serializer.js';
// Alias generator
export { generateUniqueAlias } from './alias.js';
// File tailer for crash-resilient streaming
export { FileTailer } from './file-tailer.js';
export type { FileTailerOptions } from './file-tailer.js';
// Extracted manager helpers
export { ProcessManager } from './process-manager.js';
export { CredentialHandler } from './credential-handler.js';
export { OutputHandler } from './output-handler.js';
export type { ActiveAgent } from './output-handler.js';
export { CleanupManager } from './cleanup-manager.js';

View File

@@ -0,0 +1,108 @@
/**
* CleanupStrategy — Centralized cleanup logic based on debug mode and agent state.
*
* Determines when and how to clean up agent workdirs and resources.
* Supports archive mode for debugging vs. immediate cleanup for production.
*/
import { createModuleLogger } from '../../logger/index.js';
import type { CleanupManager } from '../cleanup-manager.js';
const log = createModuleLogger('cleanup-strategy');
export type CleanupAction = 'remove' | 'archive' | 'preserve';
export interface AgentInfo {
id: string;
name: string;
status: string;
initiativeId?: string | null;
worktreeId: string;
}
export interface CleanupStrategy {
shouldCleanup(agent: AgentInfo, isDebugMode: boolean): Promise<CleanupAction>;
executeCleanup(agent: AgentInfo, action: CleanupAction): Promise<void>;
}
export class DefaultCleanupStrategy implements CleanupStrategy {
constructor(private cleanupManager: CleanupManager) {}
/**
* Determine what cleanup action should be taken for an agent.
* Considers agent status and debug mode setting.
*/
async shouldCleanup(agent: AgentInfo, isDebugMode: boolean): Promise<CleanupAction> {
log.debug({
agentId: agent.id,
name: agent.name,
status: agent.status,
isDebugMode
}, 'evaluating cleanup action for agent');
// Never cleanup agents waiting for user input
if (agent.status === 'waiting_for_input') {
log.debug({ agentId: agent.id, status: agent.status }, 'preserving agent waiting for input');
return 'preserve';
}
// Never cleanup running agents
if (agent.status === 'running') {
log.debug({ agentId: agent.id, status: agent.status }, 'preserving running agent');
return 'preserve';
}
// For completed/idle/crashed agents, decide based on debug mode
if (agent.status === 'idle' || agent.status === 'completed' || agent.status === 'crashed') {
if (isDebugMode) {
log.debug({ agentId: agent.id, status: agent.status }, 'archiving agent in debug mode');
return 'archive';
} else {
log.debug({ agentId: agent.id, status: agent.status }, 'removing agent in production mode');
return 'remove';
}
}
// For stopped agents, clean up immediately regardless of debug mode
if (agent.status === 'stopped') {
log.debug({ agentId: agent.id, status: agent.status }, 'removing stopped agent');
return 'remove';
}
// Default to preserve for any unrecognized status
log.debug({ agentId: agent.id, status: agent.status }, 'preserving agent with unrecognized status');
return 'preserve';
}
/**
* Execute the determined cleanup action.
*/
async executeCleanup(agent: AgentInfo, action: CleanupAction): Promise<void> {
log.debug({
agentId: agent.id,
name: agent.name,
action
}, 'executing cleanup action');
switch (action) {
case 'remove':
await this.cleanupManager.removeAgentWorktrees(agent.name, agent.initiativeId ?? null);
await this.cleanupManager.removeAgentBranches(agent.name, agent.initiativeId ?? null);
await this.cleanupManager.removeAgentLogs(agent.id);
log.info({ agentId: agent.id, name: agent.name }, 'agent workdir and resources removed');
break;
case 'archive':
await this.cleanupManager.archiveForDebug(agent.worktreeId, agent.id);
log.info({ agentId: agent.id, name: agent.name }, 'agent workdir archived for debugging');
break;
case 'preserve':
log.debug({ agentId: agent.id, name: agent.name }, 'agent workdir preserved');
break;
default:
log.warn({ agentId: agent.id, action }, 'unknown cleanup action, preserving by default');
}
}
}

View File

@@ -0,0 +1,358 @@
/**
* AgentLifecycleController — Unified orchestrator for complete agent lifecycle.
*
* Replaces scattered lifecycle logic with comprehensive orchestration including:
* - Always clear signal.json before spawn/resume
* - Robust process completion waiting
* - Retry up to 3 times with comprehensive error handling
* - Auth/usage limit error detection with account switching
* - Missing signal recovery with instruction prompts
* - Debug mode archival vs production cleanup
*/
import { createModuleLogger } from '../../logger/index.js';
import type { AgentRepository } from '../../db/repositories/agent-repository.js';
import type { AccountRepository } from '../../db/repositories/account-repository.js';
import type { ProcessManager } from '../process-manager.js';
import type { CleanupManager } from '../cleanup-manager.js';
import type { SpawnAgentOptions } from '../types.js';
import type { SignalManager, SignalData } from './signal-manager.js';
import type { RetryPolicy, AgentError } from './retry-policy.js';
import { AgentExhaustedError, AgentFailureError } from './retry-policy.js';
import type { AgentErrorAnalyzer } from './error-analyzer.js';
import type { CleanupStrategy, AgentInfo } from './cleanup-strategy.js';
const log = createModuleLogger('lifecycle-controller');
export interface CompletionResult {
success: boolean;
signal?: SignalData;
error?: Error;
exitCode?: number | null;
stderr?: string;
}
export interface ResumeAgentOptions {
agentId: string;
answers: Record<string, string>;
}
export class AgentLifecycleController {
constructor(
private signalManager: SignalManager,
private retryPolicy: RetryPolicy,
private errorAnalyzer: AgentErrorAnalyzer,
private processManager: ProcessManager,
private repository: AgentRepository,
private cleanupManager: CleanupManager,
private cleanupStrategy: CleanupStrategy,
private accountRepository?: AccountRepository,
private debug: boolean = false,
) {}
/**
* Execute spawn operation with comprehensive retry and error handling.
* Always clears signal.json before starting and waits for process completion.
*/
async spawnWithRetry(
spawnFn: (options: SpawnAgentOptions) => Promise<AgentInfo>,
options: SpawnAgentOptions
): Promise<AgentInfo> {
log.info({
taskId: options.taskId,
provider: options.provider,
initiativeId: options.initiativeId,
mode: options.mode
}, 'starting agent spawn with retry');
return this.executeWithRetry('spawn', spawnFn, options);
}
/**
* Execute resume operation with comprehensive retry and error handling.
* Always clears signal.json before resuming and waits for process completion.
*/
async resumeWithRetry(
resumeFn: (agentId: string, answers: Record<string, string>) => Promise<void>,
options: ResumeAgentOptions
): Promise<void> {
log.info({
agentId: options.agentId,
answerKeys: Object.keys(options.answers)
}, 'starting agent resume with retry');
await this.executeWithRetry('resume', async () => {
await resumeFn(options.agentId, options.answers);
const agent = await this.repository.findById(options.agentId);
if (!agent) throw new Error(`Agent '${options.agentId}' not found after resume`);
return this.toAgentInfo(agent);
}, options);
}
/**
* Main retry orchestrator for spawn/resume operations.
*/
private async executeWithRetry<T>(
operation: 'spawn' | 'resume',
operationFn: (options: T) => Promise<AgentInfo>,
options: T
): Promise<AgentInfo> {
for (let attempt = 1; attempt <= this.retryPolicy.maxAttempts; attempt++) {
try {
log.debug({ operation, attempt, maxAttempts: this.retryPolicy.maxAttempts }, 'starting attempt');
// Execute operation
const agent = await operationFn(options);
const agentWorkdir = this.processManager.getAgentWorkdir(agent.worktreeId);
// CRITICAL: Always clear signal.json before start
log.debug({ agentId: agent.id, agentWorkdir }, 'clearing signal.json before process start');
await this.signalManager.clearSignal(agentWorkdir);
// Wait for process completion with robust detection
const result = await this.waitForCompletion(agent);
if (result.success) {
// Handle post-completion cleanup
await this.handlePostCompletion(agent);
log.info({
agentId: agent.id,
name: agent.name,
attempt,
operation
}, 'agent lifecycle completed successfully');
return agent;
}
// Analyze error and determine retry strategy
const agentError = await this.errorAnalyzer.analyzeError(
result.error || new Error('Unknown completion failure'),
result.exitCode,
result.stderr,
agentWorkdir
);
// Persist error to DB if required
if (agentError.shouldPersistToDB) {
await this.persistError(agent.id, agentError);
}
// Handle account switching for usage limits
if (agentError.requiresAccountSwitch) {
await this.handleAccountExhaustion(agent.id);
throw new AgentExhaustedError(agentError.message, agentError);
}
// Check if should retry
if (!this.retryPolicy.shouldRetry(agentError, attempt)) {
log.warn({
agentId: agent.id,
errorType: agentError.type,
attempt,
maxAttempts: this.retryPolicy.maxAttempts
}, 'max retry attempts reached or error not retriable');
throw new AgentFailureError(agentError.message, agentError);
}
// Handle special retry cases
if (agentError.type === 'missing_signal') {
// This would need to modify the options to add instruction prompt
// For now, log the special case
log.info({
agentId: agent.id,
attempt
}, 'will retry with missing signal instruction (not yet implemented)');
}
// Wait before retry
const delay = this.retryPolicy.getRetryDelay(attempt);
log.info({
agentId: agent.id,
attempt,
delay,
errorType: agentError.type,
errorMessage: agentError.message
}, 'retrying after delay');
await this.delay(delay);
} catch (error) {
if (error instanceof AgentExhaustedError || error instanceof AgentFailureError) {
throw error; // Don't retry these
}
if (attempt === this.retryPolicy.maxAttempts) {
log.error({
operation,
attempt,
error: error instanceof Error ? error.message : String(error)
}, 'final attempt failed, giving up');
throw error;
}
log.warn({
operation,
attempt,
error: error instanceof Error ? error.message : String(error)
}, 'attempt failed, will retry');
}
}
throw new Error('Unexpected: retry loop completed without success or terminal error');
}
/**
* Wait for process completion with robust signal detection.
* Replaces scattered completion detection with unified approach.
*/
private async waitForCompletion(agent: AgentInfo): Promise<CompletionResult> {
const agentWorkdir = this.processManager.getAgentWorkdir(agent.worktreeId);
log.debug({
agentId: agent.id,
name: agent.name,
agentWorkdir
}, 'waiting for process completion');
// Wait for process to exit (this would need integration with ProcessManager)
// For now, simulate with a timeout approach
// TODO: Implement waitForProcessCompletion in ProcessManager
// Wait for signal within reasonable timeout (30 seconds)
const signal = await this.signalManager.waitForSignal(agentWorkdir, 30000);
if (signal) {
log.debug({
agentId: agent.id,
signalStatus: signal.status
}, 'agent completed with valid signal');
return { success: true, signal };
}
// No signal found - this is an error condition
log.warn({
agentId: agent.id,
agentWorkdir
}, 'process completed without valid signal.json');
return {
success: false,
error: new Error('Process completed without valid signal.json'),
exitCode: null // Would get from ProcessManager
};
}
/**
* Handle post-completion cleanup based on agent status and debug mode.
*/
private async handlePostCompletion(agent: AgentInfo): Promise<void> {
// Only cleanup if agent is not waiting for user input
if (agent.status === 'waiting_for_input') {
log.debug({ agentId: agent.id }, 'agent waiting for input, skipping cleanup');
return;
}
try {
const cleanupAction = await this.cleanupStrategy.shouldCleanup(agent, this.debug);
await this.cleanupStrategy.executeCleanup(agent, cleanupAction);
log.debug({
agentId: agent.id,
name: agent.name,
cleanupAction
}, 'post-completion cleanup executed');
} catch (error) {
log.warn({
agentId: agent.id,
error: error instanceof Error ? error.message : String(error)
}, 'post-completion cleanup failed');
}
}
/**
* Persist error details to database for debugging.
*/
private async persistError(agentId: string, error: AgentError): Promise<void> {
try {
const errorData = {
errorType: error.type,
errorMessage: error.message,
exitCode: error.exitCode,
isTransient: error.isTransient,
requiresAccountSwitch: error.requiresAccountSwitch,
updatedAt: new Date(),
};
// This would need database schema updates to store error details
// For now, just update with basic error info
await this.repository.update(agentId, {
exitCode: error.exitCode,
updatedAt: new Date(),
});
log.debug({
agentId,
errorType: error.type,
exitCode: error.exitCode
}, 'error details persisted to database');
} catch (dbError) {
log.warn({
agentId,
error: dbError instanceof Error ? dbError.message : String(dbError)
}, 'failed to persist error to database');
}
}
/**
* Handle account exhaustion by marking account as exhausted.
*/
private async handleAccountExhaustion(agentId: string): Promise<void> {
if (!this.accountRepository) {
log.debug({ agentId }, 'no account repository available for exhaustion handling');
return;
}
try {
const agent = await this.repository.findById(agentId);
if (!agent?.accountId) {
log.debug({ agentId }, 'agent has no account ID for exhaustion handling');
return;
}
// Mark account as exhausted for 1 hour
const exhaustedUntil = new Date(Date.now() + 60 * 60 * 1000);
await this.accountRepository.markExhausted(agent.accountId, exhaustedUntil);
log.info({
agentId,
accountId: agent.accountId,
exhaustedUntil
}, 'marked account as exhausted due to usage limits');
} catch (error) {
log.warn({
agentId,
error: error instanceof Error ? error.message : String(error)
}, 'failed to mark account as exhausted');
}
}
/**
* Simple delay utility for retry backoff.
*/
private delay(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms));
}
/**
* Convert database agent record to AgentInfo.
*/
private toAgentInfo(agent: any): AgentInfo {
return {
id: agent.id,
name: agent.name,
status: agent.status,
initiativeId: agent.initiativeId,
worktreeId: agent.worktreeId,
};
}
}

View File

@@ -0,0 +1,214 @@
/**
* ErrorAnalyzer Tests — Verify error classification patterns.
*/
import { describe, it, expect, beforeEach, vi } from 'vitest';
import { AgentErrorAnalyzer } from './error-analyzer.js';
import type { SignalManager } from './signal-manager.js';
describe('AgentErrorAnalyzer', () => {
let errorAnalyzer: AgentErrorAnalyzer;
let mockSignalManager: SignalManager;
beforeEach(() => {
mockSignalManager = {
clearSignal: vi.fn(),
checkSignalExists: vi.fn(),
readSignal: vi.fn(),
waitForSignal: vi.fn(),
validateSignalFile: vi.fn(),
};
errorAnalyzer = new AgentErrorAnalyzer(mockSignalManager);
});
describe('analyzeError', () => {
describe('auth failure detection', () => {
it('should detect unauthorized errors', async () => {
const error = new Error('Unauthorized access');
const result = await errorAnalyzer.analyzeError(error);
expect(result.type).toBe('auth_failure');
expect(result.isTransient).toBe(true);
expect(result.requiresAccountSwitch).toBe(false);
expect(result.shouldPersistToDB).toBe(true);
});
it('should detect invalid token errors', async () => {
const error = new Error('Invalid token provided');
const result = await errorAnalyzer.analyzeError(error);
expect(result.type).toBe('auth_failure');
expect(result.isTransient).toBe(true);
});
it('should detect 401 errors', async () => {
const error = new Error('HTTP 401 - Authentication failed');
const result = await errorAnalyzer.analyzeError(error);
expect(result.type).toBe('auth_failure');
});
it('should detect auth failures in stderr', async () => {
const error = new Error('Process failed');
const stderr = 'Error: Authentication failed - expired token';
const result = await errorAnalyzer.analyzeError(error, null, stderr);
expect(result.type).toBe('auth_failure');
});
});
describe('usage limit detection', () => {
it('should detect rate limit errors', async () => {
const error = new Error('Rate limit exceeded');
const result = await errorAnalyzer.analyzeError(error);
expect(result.type).toBe('usage_limit');
expect(result.isTransient).toBe(false);
expect(result.requiresAccountSwitch).toBe(true);
expect(result.shouldPersistToDB).toBe(true);
});
it('should detect quota exceeded errors', async () => {
const error = new Error('Quota exceeded for this month');
const result = await errorAnalyzer.analyzeError(error);
expect(result.type).toBe('usage_limit');
});
it('should detect 429 errors', async () => {
const error = new Error('HTTP 429 - Too many requests');
const result = await errorAnalyzer.analyzeError(error);
expect(result.type).toBe('usage_limit');
});
it('should detect usage limits in stderr', async () => {
const error = new Error('Request failed');
const stderr = 'API usage limit reached. Try again later.';
const result = await errorAnalyzer.analyzeError(error, null, stderr);
expect(result.type).toBe('usage_limit');
});
});
describe('timeout detection', () => {
it('should detect timeout errors', async () => {
const error = new Error('Request timeout');
const result = await errorAnalyzer.analyzeError(error);
expect(result.type).toBe('timeout');
expect(result.isTransient).toBe(true);
expect(result.requiresAccountSwitch).toBe(false);
});
it('should detect timed out errors', async () => {
const error = new Error('Connection timed out');
const result = await errorAnalyzer.analyzeError(error);
expect(result.type).toBe('timeout');
});
});
describe('missing signal detection', () => {
it('should detect missing signal when process exits successfully', async () => {
vi.mocked(mockSignalManager.checkSignalExists).mockResolvedValue(false);
const error = new Error('No output');
const result = await errorAnalyzer.analyzeError(error, 0, undefined, '/agent/workdir');
expect(result.type).toBe('missing_signal');
expect(result.isTransient).toBe(true);
expect(result.requiresAccountSwitch).toBe(false);
expect(result.shouldPersistToDB).toBe(false);
expect(mockSignalManager.checkSignalExists).toHaveBeenCalledWith('/agent/workdir');
});
it('should not detect missing signal when signal exists', async () => {
vi.mocked(mockSignalManager.checkSignalExists).mockResolvedValue(true);
const error = new Error('No output');
const result = await errorAnalyzer.analyzeError(error, 0, undefined, '/agent/workdir');
expect(result.type).toBe('unknown');
});
it('should not detect missing signal for non-zero exit codes', async () => {
const error = new Error('Process failed');
const result = await errorAnalyzer.analyzeError(error, 1, undefined, '/agent/workdir');
expect(result.type).toBe('process_crash');
});
});
describe('process crash detection', () => {
it('should detect crashes with non-zero exit code', async () => {
const error = new Error('Process exited with code 1');
const result = await errorAnalyzer.analyzeError(error, 1);
expect(result.type).toBe('process_crash');
expect(result.exitCode).toBe(1);
expect(result.shouldPersistToDB).toBe(true);
});
it('should detect transient crashes based on exit code', async () => {
const error = new Error('Process interrupted');
const result = await errorAnalyzer.analyzeError(error, 130); // SIGINT
expect(result.type).toBe('process_crash');
expect(result.isTransient).toBe(true);
});
it('should detect signal-based crashes as transient', async () => {
const error = new Error('Segmentation fault');
const result = await errorAnalyzer.analyzeError(error, 139); // SIGSEGV (128+11, signal-based)
expect(result.type).toBe('process_crash');
expect(result.isTransient).toBe(true); // signal-based exit codes (128-255) are transient
});
it('should detect transient patterns in stderr', async () => {
const error = new Error('Process failed');
const stderr = 'Network error: connection refused';
const result = await errorAnalyzer.analyzeError(error, 1, stderr);
expect(result.type).toBe('process_crash');
expect(result.isTransient).toBe(true);
});
});
describe('unknown error handling', () => {
it('should classify unrecognized errors as unknown', async () => {
const error = new Error('Something very weird happened');
const result = await errorAnalyzer.analyzeError(error);
expect(result.type).toBe('unknown');
expect(result.isTransient).toBe(false);
expect(result.requiresAccountSwitch).toBe(false);
expect(result.shouldPersistToDB).toBe(true);
});
it('should handle string errors', async () => {
const result = await errorAnalyzer.analyzeError('String error message');
expect(result.type).toBe('unknown');
expect(result.message).toBe('String error message');
});
});
describe('error context preservation', () => {
it('should preserve original error object', async () => {
const originalError = new Error('Test error');
const result = await errorAnalyzer.analyzeError(originalError);
expect(result.originalError).toBe(originalError);
});
it('should preserve exit code and signal', async () => {
const error = new Error('Process failed');
const result = await errorAnalyzer.analyzeError(error, 42, 'stderr output');
expect(result.exitCode).toBe(42);
});
});
});
});

View File

@@ -0,0 +1,233 @@
/**
* ErrorAnalyzer — Intelligent error classification and handling strategies.
*
* Analyzes various error conditions from agent processes and classifies them
* for appropriate retry and recovery strategies. Replaces scattered error
* handling with centralized, comprehensive error analysis.
*/
import { createModuleLogger } from '../../logger/index.js';
import type { SignalManager } from './signal-manager.js';
import type { AgentError, AgentErrorType } from './retry-policy.js';
const log = createModuleLogger('error-analyzer');
// Common error patterns for different providers
const ERROR_PATTERNS = {
auth_failure: [
/unauthorized/i,
/invalid.*(token|key|credential)/i,
/authentication.*failed/i,
/401/,
/access.*denied/i,
/invalid.*session/i,
/expired.*token/i,
],
usage_limit: [
/rate.*(limit|exceeded)/i,
/quota.*exceeded/i,
/too.*many.*requests/i,
/429/,
/usage.*limit/i,
/throttled/i,
/credit.*insufficient/i,
/api.*limit.*reached/i,
],
timeout: [
/timeout/i,
/timed.*out/i,
/deadline.*exceeded/i,
/connection.*timeout/i,
/read.*timeout/i,
],
process_crash: [
/segmentation.*fault/i,
/core.*dumped/i,
/fatal.*error/i,
/killed/i,
/aborted/i,
],
};
export class AgentErrorAnalyzer {
constructor(private signalManager: SignalManager) {}
/**
* Analyze an error and classify it for retry strategy.
* Combines multiple signals: error message, exit code, stderr, and workdir state.
*/
async analyzeError(
error: Error | string,
exitCode?: number | null,
stderr?: string,
agentWorkdir?: string
): Promise<AgentError> {
const errorMessage = error instanceof Error ? error.message : String(error);
const fullContext = [errorMessage, stderr].filter(Boolean).join(' ');
log.debug({
errorMessage,
exitCode,
hasStderr: !!stderr,
hasWorkdir: !!agentWorkdir
}, 'analyzing agent error');
// Check for auth failure patterns
if (this.matchesPattern(fullContext, ERROR_PATTERNS.auth_failure)) {
return {
type: 'auth_failure',
message: errorMessage,
isTransient: true,
requiresAccountSwitch: false,
shouldPersistToDB: true,
exitCode,
originalError: error instanceof Error ? error : undefined,
};
}
// Check for usage limit patterns
if (this.matchesPattern(fullContext, ERROR_PATTERNS.usage_limit)) {
return {
type: 'usage_limit',
message: errorMessage,
isTransient: false,
requiresAccountSwitch: true,
shouldPersistToDB: true,
exitCode,
originalError: error instanceof Error ? error : undefined,
};
}
// Check for timeout patterns
if (this.matchesPattern(fullContext, ERROR_PATTERNS.timeout)) {
return {
type: 'timeout',
message: errorMessage,
isTransient: true,
requiresAccountSwitch: false,
shouldPersistToDB: true,
exitCode,
originalError: error instanceof Error ? error : undefined,
};
}
// Special case: process completed successfully but no signal.json
if (agentWorkdir && exitCode === 0) {
const hasSignal = await this.signalManager.checkSignalExists(agentWorkdir);
if (!hasSignal) {
log.debug({ agentWorkdir }, 'process completed successfully but no signal.json found');
return {
type: 'missing_signal',
message: 'Process completed successfully but no signal.json was generated',
isTransient: true,
requiresAccountSwitch: false,
shouldPersistToDB: false,
exitCode,
originalError: error instanceof Error ? error : undefined,
};
}
}
// Check for process crash patterns
if (this.matchesPattern(fullContext, ERROR_PATTERNS.process_crash) ||
(exitCode !== null && exitCode !== 0 && exitCode !== undefined)) {
// Determine if crash is transient based on exit code and patterns
const isTransient = this.isTransientCrash(exitCode, stderr);
return {
type: 'process_crash',
message: errorMessage,
isTransient,
requiresAccountSwitch: false,
shouldPersistToDB: true,
exitCode,
originalError: error instanceof Error ? error : undefined,
};
}
// Unknown error type
log.debug({
errorMessage,
exitCode,
stderr: stderr?.substring(0, 200) + '...'
}, 'error does not match known patterns, classifying as unknown');
return {
type: 'unknown',
message: errorMessage,
isTransient: false,
requiresAccountSwitch: false,
shouldPersistToDB: true,
exitCode,
originalError: error instanceof Error ? error : undefined,
};
}
/**
* Validate credentials with a brief test request using invalid token.
* This helps distinguish between token expiry vs. account exhaustion.
*/
async validateTokenWithInvalidRequest(accountId: string): Promise<boolean> {
// User requirement: "brief check with invalid access token to determine behavior"
// This would need integration with credential system and is provider-specific
// For now, return true to indicate token appears valid
log.debug({ accountId }, 'token validation requested (not yet implemented)');
return true;
}
/**
* Check if error message or stderr matches any of the given patterns.
*/
private matchesPattern(text: string, patterns: RegExp[]): boolean {
if (!text) return false;
return patterns.some(pattern => pattern.test(text));
}
/**
* Determine if a process crash is likely transient (can be retried).
* Based on exit codes and stderr content.
*/
private isTransientCrash(exitCode?: number | null, stderr?: string): boolean {
// Exit codes that indicate transient failures
const transientExitCodes = new Set([
130, // SIGINT (interrupted)
143, // SIGTERM (terminated)
124, // timeout command
1, // Generic error (might be transient)
]);
if (exitCode !== null && exitCode !== undefined) {
if (transientExitCodes.has(exitCode)) {
log.debug({ exitCode }, 'exit code indicates transient failure');
return true;
}
// Very high exit codes often indicate system issues
if (exitCode > 128 && exitCode < 256) {
log.debug({ exitCode }, 'signal-based exit code may be transient');
return true;
}
}
// Check stderr for transient patterns
if (stderr) {
const transientPatterns = [
/temporary/i,
/network.*error/i,
/connection.*refused/i,
/service.*unavailable/i,
/disk.*full/i,
/out.*of.*memory/i,
];
if (transientPatterns.some(pattern => pattern.test(stderr))) {
log.debug({ stderr: stderr.substring(0, 100) + '...' }, 'stderr indicates transient failure');
return true;
}
}
log.debug({ exitCode, hasStderr: !!stderr }, 'crash appears non-transient');
return false;
}
}

View File

@@ -0,0 +1,58 @@
/**
* Lifecycle Factory — Wire up all lifecycle components with proper dependencies.
*
* Creates and configures the complete lifecycle management system with all
* dependencies properly injected. Provides simple entry point for integration.
*/
import { FileSystemSignalManager } from './signal-manager.js';
import { DefaultRetryPolicy } from './retry-policy.js';
import { AgentErrorAnalyzer } from './error-analyzer.js';
import { DefaultCleanupStrategy } from './cleanup-strategy.js';
import { AgentLifecycleController } from './controller.js';
import type { AgentRepository } from '../../db/repositories/agent-repository.js';
import type { AccountRepository } from '../../db/repositories/account-repository.js';
import type { ProcessManager } from '../process-manager.js';
import type { CleanupManager } from '../cleanup-manager.js';
export interface LifecycleFactoryOptions {
repository: AgentRepository;
processManager: ProcessManager;
cleanupManager: CleanupManager;
accountRepository?: AccountRepository;
debug?: boolean;
}
/**
* Create a fully configured AgentLifecycleController with all dependencies.
*/
export function createLifecycleController(options: LifecycleFactoryOptions): AgentLifecycleController {
const {
repository,
processManager,
cleanupManager,
accountRepository,
debug = false
} = options;
// Create core components
const signalManager = new FileSystemSignalManager();
const retryPolicy = new DefaultRetryPolicy();
const errorAnalyzer = new AgentErrorAnalyzer(signalManager);
const cleanupStrategy = new DefaultCleanupStrategy(cleanupManager);
// Wire up the main controller
const lifecycleController = new AgentLifecycleController(
signalManager,
retryPolicy,
errorAnalyzer,
processManager,
repository,
cleanupManager,
cleanupStrategy,
accountRepository,
debug
);
return lifecycleController;
}

View File

@@ -0,0 +1,32 @@
/**
* Agent Lifecycle Management — Unified components for robust agent orchestration.
*
* Exports all lifecycle management components for comprehensive agent handling:
* - SignalManager: Atomic signal.json operations
* - RetryPolicy: Intelligent retry strategies
* - ErrorAnalyzer: Error classification and handling
* - CleanupStrategy: Debug vs production cleanup logic
* - AgentLifecycleController: Main orchestrator
*/
export { FileSystemSignalManager, type SignalManager, type SignalData } from './signal-manager.js';
export {
DefaultRetryPolicy,
type RetryPolicy,
type AgentError,
type AgentErrorType,
AgentExhaustedError,
AgentFailureError
} from './retry-policy.js';
export { AgentErrorAnalyzer } from './error-analyzer.js';
export {
DefaultCleanupStrategy,
type CleanupStrategy,
type CleanupAction,
type AgentInfo as LifecycleAgentInfo
} from './cleanup-strategy.js';
export {
AgentLifecycleController,
type CompletionResult,
type ResumeAgentOptions
} from './controller.js';

View File

@@ -0,0 +1,59 @@
import { describe, it, expect } from 'vitest';
import { MISSING_SIGNAL_INSTRUCTION, addInstructionToPrompt } from './instructions.js';
describe('instructions', () => {
describe('MISSING_SIGNAL_INSTRUCTION', () => {
it('should contain key guidance about signal.json creation', () => {
expect(MISSING_SIGNAL_INSTRUCTION).toContain('signal.json');
expect(MISSING_SIGNAL_INSTRUCTION).toContain('.cw/output/signal.json');
expect(MISSING_SIGNAL_INSTRUCTION).toContain('"status": "done"');
expect(MISSING_SIGNAL_INSTRUCTION).toContain('"status": "questions"');
expect(MISSING_SIGNAL_INSTRUCTION).toContain('"status": "error"');
});
it('should be a clear instruction for missing signal recovery', () => {
expect(MISSING_SIGNAL_INSTRUCTION).toContain('IMPORTANT');
expect(MISSING_SIGNAL_INSTRUCTION).toContain('previous execution completed');
expect(MISSING_SIGNAL_INSTRUCTION).toContain('did not generate');
});
});
describe('addInstructionToPrompt', () => {
it('should add instruction to the beginning of the prompt', () => {
const originalPrompt = 'Please help me with this task';
const instruction = 'First, create a file called test.txt';
const result = addInstructionToPrompt(originalPrompt, instruction);
expect(result).toBe(`First, create a file called test.txt\n\nPlease help me with this task`);
});
it('should trim the instruction', () => {
const originalPrompt = 'Please help me';
const instruction = ' Important: Do this first ';
const result = addInstructionToPrompt(originalPrompt, instruction);
expect(result).toBe(`Important: Do this first\n\nPlease help me`);
});
it('should handle empty original prompt', () => {
const originalPrompt = '';
const instruction = 'Create a signal.json file';
const result = addInstructionToPrompt(originalPrompt, instruction);
expect(result).toBe(`Create a signal.json file\n\n`);
});
it('should handle missing signal instruction with real prompt', () => {
const originalPrompt = 'Fix the bug in the authentication system';
const result = addInstructionToPrompt(originalPrompt, MISSING_SIGNAL_INSTRUCTION);
expect(result).toContain('IMPORTANT: Your previous execution completed');
expect(result).toContain('Fix the bug in the authentication system');
expect(result.indexOf('IMPORTANT')).toBeLessThan(result.indexOf('Fix the bug'));
});
});
});

View File

@@ -0,0 +1,28 @@
/**
* Instructions for agent retry scenarios
*/
export const MISSING_SIGNAL_INSTRUCTION = `
IMPORTANT: Your previous execution completed but did not generate the required signal.json file.
Please ensure you complete your task and create a signal.json file at .cw/output/signal.json with one of these formats:
For successful completion:
{"status": "done"}
For questions requiring user input:
{"status": "questions", "questions": [{"id": "q1", "question": "Your question here"}]}
For errors:
{"status": "error", "error": "Description of the error"}
Please retry your task and ensure the signal.json file is properly created.
`;
/**
* Adds an instruction to the beginning of a prompt
*/
export function addInstructionToPrompt(originalPrompt: string, instruction: string): string {
return `${instruction.trim()}\n\n${originalPrompt}`;
}

View File

@@ -0,0 +1,146 @@
/**
* RetryPolicy Tests — Verify retry logic for different error types.
*/
import { describe, it, expect, beforeEach } from 'vitest';
import { DefaultRetryPolicy, type AgentError } from './retry-policy.js';
describe('DefaultRetryPolicy', () => {
let retryPolicy: DefaultRetryPolicy;
beforeEach(() => {
retryPolicy = new DefaultRetryPolicy();
});
describe('configuration', () => {
it('should have correct max attempts', () => {
expect(retryPolicy.maxAttempts).toBe(3);
});
it('should have exponential backoff delays', () => {
expect(retryPolicy.backoffMs).toEqual([1000, 2000, 4000]);
});
});
describe('shouldRetry', () => {
it('should retry auth failures', () => {
const error: AgentError = {
type: 'auth_failure',
message: 'Unauthorized',
isTransient: true,
requiresAccountSwitch: false,
shouldPersistToDB: true
};
expect(retryPolicy.shouldRetry(error, 1)).toBe(true);
expect(retryPolicy.shouldRetry(error, 2)).toBe(true);
expect(retryPolicy.shouldRetry(error, 3)).toBe(false); // At max attempts
});
it('should not retry usage limit errors', () => {
const error: AgentError = {
type: 'usage_limit',
message: 'Rate limit exceeded',
isTransient: false,
requiresAccountSwitch: true,
shouldPersistToDB: true
};
expect(retryPolicy.shouldRetry(error, 1)).toBe(false);
expect(retryPolicy.shouldRetry(error, 2)).toBe(false);
});
it('should retry missing signal errors', () => {
const error: AgentError = {
type: 'missing_signal',
message: 'No signal.json found',
isTransient: true,
requiresAccountSwitch: false,
shouldPersistToDB: false
};
expect(retryPolicy.shouldRetry(error, 1)).toBe(true);
expect(retryPolicy.shouldRetry(error, 2)).toBe(true);
expect(retryPolicy.shouldRetry(error, 3)).toBe(false); // At max attempts
});
it('should retry transient process crashes', () => {
const error: AgentError = {
type: 'process_crash',
message: 'Process died',
isTransient: true,
requiresAccountSwitch: false,
shouldPersistToDB: true
};
expect(retryPolicy.shouldRetry(error, 1)).toBe(true);
expect(retryPolicy.shouldRetry(error, 2)).toBe(true);
});
it('should not retry non-transient process crashes', () => {
const error: AgentError = {
type: 'process_crash',
message: 'Segmentation fault',
isTransient: false,
requiresAccountSwitch: false,
shouldPersistToDB: true
};
expect(retryPolicy.shouldRetry(error, 1)).toBe(false);
expect(retryPolicy.shouldRetry(error, 2)).toBe(false);
});
it('should retry timeouts', () => {
const error: AgentError = {
type: 'timeout',
message: 'Process timed out',
isTransient: true,
requiresAccountSwitch: false,
shouldPersistToDB: true
};
expect(retryPolicy.shouldRetry(error, 1)).toBe(true);
expect(retryPolicy.shouldRetry(error, 2)).toBe(true);
expect(retryPolicy.shouldRetry(error, 3)).toBe(false); // At max attempts
});
it('should not retry unknown errors', () => {
const error: AgentError = {
type: 'unknown',
message: 'Something weird happened',
isTransient: false,
requiresAccountSwitch: false,
shouldPersistToDB: true
};
expect(retryPolicy.shouldRetry(error, 1)).toBe(false);
expect(retryPolicy.shouldRetry(error, 2)).toBe(false);
});
it('should not retry when at max attempts regardless of error type', () => {
const error: AgentError = {
type: 'auth_failure',
message: 'Unauthorized',
isTransient: true,
requiresAccountSwitch: false,
shouldPersistToDB: true
};
expect(retryPolicy.shouldRetry(error, 3)).toBe(false);
expect(retryPolicy.shouldRetry(error, 4)).toBe(false);
});
});
describe('getRetryDelay', () => {
it('should return correct delay for each attempt', () => {
expect(retryPolicy.getRetryDelay(1)).toBe(1000);
expect(retryPolicy.getRetryDelay(2)).toBe(2000);
expect(retryPolicy.getRetryDelay(3)).toBe(4000);
});
it('should cap delay at maximum for high attempts', () => {
expect(retryPolicy.getRetryDelay(4)).toBe(4000);
expect(retryPolicy.getRetryDelay(10)).toBe(4000);
});
});
});

View File

@@ -0,0 +1,121 @@
/**
* RetryPolicy — Comprehensive retry logic with error-specific handling.
*
* Implements intelligent retry strategies for different types of agent failures.
* Replaces scattered retry logic with unified, configurable policies.
*/
import { createModuleLogger } from '../../logger/index.js';
const log = createModuleLogger('retry-policy');
export type AgentErrorType =
| 'auth_failure' // 401 errors, invalid tokens
| 'usage_limit' // Rate limiting, quota exceeded
| 'missing_signal' // Process completed but no signal.json
| 'process_crash' // Process exited with error code
| 'timeout' // Process timed out
| 'unknown'; // Unclassified errors
export interface AgentError {
type: AgentErrorType;
message: string;
isTransient: boolean; // Can this error be resolved by retrying?
requiresAccountSwitch: boolean; // Should we switch to next account?
shouldPersistToDB: boolean; // Should this error be saved for debugging?
exitCode?: number | null;
signal?: string | null;
originalError?: Error;
}
export interface RetryPolicy {
readonly maxAttempts: number;
readonly backoffMs: number[];
shouldRetry(error: AgentError, attempt: number): boolean;
getRetryDelay(attempt: number): number;
}
export class DefaultRetryPolicy implements RetryPolicy {
readonly maxAttempts = 3;
readonly backoffMs = [1000, 2000, 4000]; // 1s, 2s, 4s exponential backoff
shouldRetry(error: AgentError, attempt: number): boolean {
if (attempt >= this.maxAttempts) {
log.debug({
errorType: error.type,
attempt,
maxAttempts: this.maxAttempts
}, 'max retry attempts reached');
return false;
}
switch (error.type) {
case 'auth_failure':
// Retry auth failures - tokens might be refreshed
log.debug({ attempt, errorType: error.type }, 'retrying auth failure');
return true;
case 'usage_limit':
// Don't retry usage limits - need account switch
log.debug({ attempt, errorType: error.type }, 'not retrying usage limit - requires account switch');
return false;
case 'missing_signal':
// Retry missing signal - add instruction prompt
log.debug({ attempt, errorType: error.type }, 'retrying missing signal with instruction');
return true;
case 'process_crash':
// Only retry transient crashes
const shouldRetryTransient = error.isTransient;
log.debug({
attempt,
errorType: error.type,
isTransient: error.isTransient,
shouldRetry: shouldRetryTransient
}, 'process crash retry decision');
return shouldRetryTransient;
case 'timeout':
// Retry timeouts up to max attempts
log.debug({ attempt, errorType: error.type }, 'retrying timeout');
return true;
case 'unknown':
default:
// Don't retry unknown errors by default
log.debug({ attempt, errorType: error.type }, 'not retrying unknown error');
return false;
}
}
getRetryDelay(attempt: number): number {
const index = Math.min(attempt - 1, this.backoffMs.length - 1);
const delay = this.backoffMs[index] || this.backoffMs[this.backoffMs.length - 1];
log.debug({ attempt, delay }, 'retry delay calculated');
return delay;
}
}
/**
* AgentExhaustedError - Special error indicating account needs switching.
* When thrown, caller should attempt account failover rather than retry.
*/
export class AgentExhaustedError extends Error {
constructor(message: string, public readonly originalError?: AgentError) {
super(message);
this.name = 'AgentExhaustedError';
}
}
/**
* AgentFailureError - Terminal failure that cannot be retried.
* Indicates all retry attempts have been exhausted or error is non-retriable.
*/
export class AgentFailureError extends Error {
constructor(message: string, public readonly originalError?: AgentError) {
super(message);
this.name = 'AgentFailureError';
}
}

View File

@@ -0,0 +1,180 @@
/**
* SignalManager Tests — Verify atomic signal.json operations.
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { mkdtemp, rm, writeFile, mkdir } from 'node:fs/promises';
import { join } from 'node:path';
import { tmpdir } from 'node:os';
import { FileSystemSignalManager } from './signal-manager.js';
describe('FileSystemSignalManager', () => {
let tempDir: string;
let agentWorkdir: string;
let signalManager: FileSystemSignalManager;
beforeEach(async () => {
tempDir = await mkdtemp(join(tmpdir(), 'signal-manager-test-'));
agentWorkdir = join(tempDir, 'agent-workdir');
await mkdir(join(agentWorkdir, '.cw', 'output'), { recursive: true });
signalManager = new FileSystemSignalManager();
});
afterEach(async () => {
await rm(tempDir, { recursive: true, force: true });
});
describe('clearSignal', () => {
it('should remove existing signal.json file', async () => {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
await writeFile(signalPath, JSON.stringify({ status: 'done' }));
await signalManager.clearSignal(agentWorkdir);
const exists = await signalManager.checkSignalExists(agentWorkdir);
expect(exists).toBe(false);
});
it('should not throw if signal.json does not exist', async () => {
await expect(signalManager.clearSignal(agentWorkdir)).resolves.not.toThrow();
});
});
describe('checkSignalExists', () => {
it('should return true when signal.json exists', async () => {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
await writeFile(signalPath, JSON.stringify({ status: 'done' }));
const exists = await signalManager.checkSignalExists(agentWorkdir);
expect(exists).toBe(true);
});
it('should return false when signal.json does not exist', async () => {
const exists = await signalManager.checkSignalExists(agentWorkdir);
expect(exists).toBe(false);
});
});
describe('readSignal', () => {
it('should read valid done signal', async () => {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
const expectedSignal = { status: 'done' };
await writeFile(signalPath, JSON.stringify(expectedSignal));
const signal = await signalManager.readSignal(agentWorkdir);
expect(signal).toEqual(expectedSignal);
});
it('should read valid questions signal', async () => {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
const expectedSignal = {
status: 'questions',
questions: [{ id: '1', question: 'What to do?' }]
};
await writeFile(signalPath, JSON.stringify(expectedSignal));
const signal = await signalManager.readSignal(agentWorkdir);
expect(signal).toEqual(expectedSignal);
});
it('should read valid error signal', async () => {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
const expectedSignal = { status: 'error', error: 'Something went wrong' };
await writeFile(signalPath, JSON.stringify(expectedSignal));
const signal = await signalManager.readSignal(agentWorkdir);
expect(signal).toEqual(expectedSignal);
});
it('should return null for invalid JSON', async () => {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
await writeFile(signalPath, '{ invalid json');
const signal = await signalManager.readSignal(agentWorkdir);
expect(signal).toBeNull();
});
it('should return null for invalid status', async () => {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
await writeFile(signalPath, JSON.stringify({ status: 'invalid' }));
const signal = await signalManager.readSignal(agentWorkdir);
expect(signal).toBeNull();
});
it('should return null for empty file', async () => {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
await writeFile(signalPath, '');
const signal = await signalManager.readSignal(agentWorkdir);
expect(signal).toBeNull();
});
it('should return null when file does not exist', async () => {
const signal = await signalManager.readSignal(agentWorkdir);
expect(signal).toBeNull();
});
});
describe('waitForSignal', () => {
it('should return signal when file already exists', async () => {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
const expectedSignal = { status: 'done' };
await writeFile(signalPath, JSON.stringify(expectedSignal));
const signal = await signalManager.waitForSignal(agentWorkdir, 1000);
expect(signal).toEqual(expectedSignal);
});
it('should wait for signal to appear', async () => {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
const expectedSignal = { status: 'done' };
// Write signal after a delay
setTimeout(async () => {
await writeFile(signalPath, JSON.stringify(expectedSignal));
}, 100);
const signal = await signalManager.waitForSignal(agentWorkdir, 1000);
expect(signal).toEqual(expectedSignal);
});
it('should timeout if signal never appears', async () => {
const signal = await signalManager.waitForSignal(agentWorkdir, 100);
expect(signal).toBeNull();
});
});
describe('validateSignalFile', () => {
it('should return true for complete valid JSON file', async () => {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
await writeFile(signalPath, JSON.stringify({ status: 'done' }));
const isValid = await signalManager.validateSignalFile(signalPath);
expect(isValid).toBe(true);
});
it('should return false for incomplete JSON', async () => {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
await writeFile(signalPath, '{ "status": "don');
const isValid = await signalManager.validateSignalFile(signalPath);
expect(isValid).toBe(false);
});
it('should return false for empty file', async () => {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
await writeFile(signalPath, '');
const isValid = await signalManager.validateSignalFile(signalPath);
expect(isValid).toBe(false);
});
it('should return false when file does not exist', async () => {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
const isValid = await signalManager.validateSignalFile(signalPath);
expect(isValid).toBe(false);
});
});
});

View File

@@ -0,0 +1,178 @@
/**
* SignalManager — Centralized signal.json operations with atomic file handling.
*
* Provides robust signal.json management with proper error handling and atomic
* operations. Replaces scattered signal detection logic throughout the codebase.
*/
import { readFile, unlink, stat } from 'node:fs/promises';
import { existsSync } from 'node:fs';
import { join } from 'node:path';
import { createModuleLogger } from '../../logger/index.js';
const log = createModuleLogger('signal-manager');
export interface SignalData {
status: 'done' | 'questions' | 'error';
questions?: Array<{
id: string;
question: string;
options?: string[];
}>;
error?: string;
}
export interface SignalManager {
clearSignal(agentWorkdir: string): Promise<void>;
checkSignalExists(agentWorkdir: string): Promise<boolean>;
readSignal(agentWorkdir: string): Promise<SignalData | null>;
waitForSignal(agentWorkdir: string, timeoutMs: number): Promise<SignalData | null>;
validateSignalFile(signalPath: string): Promise<boolean>;
}
export class FileSystemSignalManager implements SignalManager {
/**
* Clear signal.json file atomically. Always called before spawn/resume.
* This prevents race conditions in completion detection.
*/
async clearSignal(agentWorkdir: string): Promise<void> {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
try {
await unlink(signalPath);
log.debug({ agentWorkdir, signalPath }, 'signal.json cleared successfully');
} catch (error: any) {
if (error.code !== 'ENOENT') {
log.warn({ agentWorkdir, signalPath, error: error.message }, 'failed to clear signal.json');
throw error;
}
// File doesn't exist - that's fine, it's already "cleared"
log.debug({ agentWorkdir, signalPath }, 'signal.json already absent (nothing to clear)');
}
}
/**
* Check if signal.json file exists synchronously.
*/
async checkSignalExists(agentWorkdir: string): Promise<boolean> {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
return existsSync(signalPath);
}
/**
* Read and parse signal.json file with robust error handling.
* Returns null if file doesn't exist or is invalid.
*/
async readSignal(agentWorkdir: string): Promise<SignalData | null> {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
try {
if (!existsSync(signalPath)) {
return null;
}
const content = await readFile(signalPath, 'utf-8');
const trimmed = content.trim();
if (!trimmed) {
log.debug({ agentWorkdir, signalPath }, 'signal.json is empty');
return null;
}
const signal = JSON.parse(trimmed) as SignalData;
// Basic validation
if (!signal.status || !['done', 'questions', 'error'].includes(signal.status)) {
log.warn({ agentWorkdir, signalPath, signal }, 'signal.json has invalid status');
return null;
}
log.debug({ agentWorkdir, signalPath, status: signal.status }, 'signal.json read successfully');
return signal;
} catch (error) {
log.warn({
agentWorkdir,
signalPath,
error: error instanceof Error ? error.message : String(error)
}, 'failed to read or parse signal.json');
return null;
}
}
/**
* Wait for signal.json to appear and be valid, with exponential backoff polling.
* Returns null if timeout is reached or signal is never valid.
*/
async waitForSignal(agentWorkdir: string, timeoutMs: number): Promise<SignalData | null> {
const startTime = Date.now();
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
let attempt = 0;
log.debug({ agentWorkdir, timeoutMs }, 'waiting for signal.json to appear');
while (Date.now() - startTime < timeoutMs) {
const signal = await this.readSignal(agentWorkdir);
if (signal) {
log.debug({
agentWorkdir,
signalPath,
status: signal.status,
waitTime: Date.now() - startTime
}, 'signal.json found and valid');
return signal;
}
// Exponential backoff: 100ms, 200ms, 400ms, 800ms, then 1s max
const delay = Math.min(100 * Math.pow(2, attempt), 1000);
await new Promise(resolve => setTimeout(resolve, delay));
attempt++;
}
log.debug({
agentWorkdir,
signalPath,
timeoutMs,
totalWaitTime: Date.now() - startTime
}, 'timeout waiting for signal.json');
return null;
}
/**
* Validate that a signal file is complete and properly formatted.
* Used to detect if file is still being written vs. truly missing/incomplete.
*/
async validateSignalFile(signalPath: string): Promise<boolean> {
try {
if (!existsSync(signalPath)) {
return false;
}
// Check file is not empty and appears complete
const stats = await stat(signalPath);
if (stats.size === 0) {
return false;
}
const content = await readFile(signalPath, 'utf-8');
const trimmed = content.trim();
if (!trimmed) {
return false;
}
// Check if JSON structure appears complete
const endsCorrectly = trimmed.endsWith('}') || trimmed.endsWith(']');
if (!endsCorrectly) {
return false;
}
// Try to parse as JSON to ensure it's valid
JSON.parse(trimmed);
return true;
} catch (error) {
log.debug({ signalPath, error: error instanceof Error ? error.message : String(error) }, 'signal file validation failed');
return false;
}
}
}

View File

@@ -0,0 +1,529 @@
/**
* MultiProviderAgentManager Tests
*
* Unit tests for the MultiProviderAgentManager adapter.
* Mocks child_process.spawn since we can't spawn real Claude CLI in tests.
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { MultiProviderAgentManager } from './manager.js';
import type { AgentRepository } from '../db/repositories/agent-repository.js';
import type { ProjectRepository } from '../db/repositories/project-repository.js';
import { EventEmitterBus } from '../events/index.js';
import type { DomainEvent } from '../events/index.js';
// Mock child_process.spawn and execFile
vi.mock('node:child_process', () => ({
spawn: vi.fn(),
execFile: vi.fn((_cmd: string, _args: string[], _opts: unknown, cb?: Function) => {
if (cb) cb(null, '', '');
}),
}));
// Import spawn to get the mock
import { spawn } from 'node:child_process';
const mockSpawn = vi.mocked(spawn);
// Mock SimpleGitWorktreeManager so spawn doesn't need a real git repo
vi.mock('../git/manager.js', () => {
return {
SimpleGitWorktreeManager: class MockWorktreeManager {
create = vi.fn().mockResolvedValue({ id: 'workspace', path: '/tmp/test-workspace/agent-workdirs/gastown/workspace', branch: 'agent/gastown' });
get = vi.fn().mockResolvedValue(null);
list = vi.fn().mockResolvedValue([]);
remove = vi.fn().mockResolvedValue(undefined);
},
};
});
// Mock fs operations for file-based output
vi.mock('node:fs', async () => {
const actual = await vi.importActual('node:fs');
// Create a mock write stream
const mockWriteStream = {
write: vi.fn(),
end: vi.fn(),
on: vi.fn(),
};
return {
...actual,
openSync: vi.fn().mockReturnValue(99),
closeSync: vi.fn(),
mkdirSync: vi.fn(),
writeFileSync: vi.fn(),
createWriteStream: vi.fn().mockReturnValue(mockWriteStream),
existsSync: vi.fn().mockReturnValue(true), // Default to true for our new validation
};
});
vi.mock('node:fs/promises', async () => {
const actual = await vi.importActual('node:fs/promises');
return {
...actual,
readFile: vi.fn().mockResolvedValue(''),
readdir: vi.fn().mockRejectedValue(new Error('ENOENT')),
rm: vi.fn().mockResolvedValue(undefined),
};
});
// Mock FileTailer to avoid actual file watching
vi.mock('./file-tailer.js', () => ({
FileTailer: class MockFileTailer {
start = vi.fn().mockResolvedValue(undefined);
stop = vi.fn().mockResolvedValue(undefined);
isStopped = false;
},
}));
import type { ChildProcess } from 'node:child_process';
/**
* Create a mock ChildProcess for detached spawning.
* The process is spawned detached and unreferenced.
*/
function createMockChildProcess(options?: {
pid?: number;
}) {
const { pid = 123 } = options ?? {};
// Create a minimal mock that satisfies the actual usage in spawnDetached
const childProcess = {
pid,
unref: vi.fn(),
on: vi.fn().mockReturnThis(),
kill: vi.fn(),
} as unknown as ChildProcess;
return childProcess;
}
describe('MultiProviderAgentManager', () => {
let manager: MultiProviderAgentManager;
let mockRepository: AgentRepository;
let mockProjectRepository: ProjectRepository;
let eventBus: EventEmitterBus;
let capturedEvents: DomainEvent[];
const mockAgent = {
id: 'agent-123',
name: 'gastown',
taskId: 'task-456',
initiativeId: null as string | null,
sessionId: 'session-789',
worktreeId: 'gastown',
status: 'idle' as const,
mode: 'execute' as const,
provider: 'claude',
accountId: null as string | null,
pid: null as number | null,
outputFilePath: null as string | null,
result: null as string | null,
pendingQuestions: null as string | null,
createdAt: new Date(),
updatedAt: new Date(),
};
beforeEach(() => {
vi.clearAllMocks();
capturedEvents = [];
mockRepository = {
create: vi.fn().mockResolvedValue(mockAgent),
findById: vi.fn().mockResolvedValue(mockAgent),
findByName: vi.fn().mockResolvedValue(null), // No duplicate by default
findByTaskId: vi.fn().mockResolvedValue(mockAgent),
findBySessionId: vi.fn().mockResolvedValue(mockAgent),
findAll: vi.fn().mockResolvedValue([mockAgent]),
findByStatus: vi.fn().mockResolvedValue([mockAgent]),
update: vi.fn().mockResolvedValue(mockAgent),
delete: vi.fn().mockResolvedValue(undefined),
};
mockProjectRepository = {
create: vi.fn(),
findById: vi.fn(),
findByName: vi.fn(),
findAll: vi.fn().mockResolvedValue([]),
update: vi.fn(),
delete: vi.fn(),
addProjectToInitiative: vi.fn(),
removeProjectFromInitiative: vi.fn(),
findProjectsByInitiativeId: vi.fn().mockResolvedValue([]),
setInitiativeProjects: vi.fn(),
};
eventBus = new EventEmitterBus();
// Subscribe to all agent events
eventBus.on('agent:spawned', (e) => capturedEvents.push(e));
eventBus.on('agent:stopped', (e) => capturedEvents.push(e));
eventBus.on('agent:crashed', (e) => capturedEvents.push(e));
eventBus.on('agent:resumed', (e) => capturedEvents.push(e));
eventBus.on('agent:waiting', (e) => capturedEvents.push(e));
manager = new MultiProviderAgentManager(
mockRepository,
'/tmp/test-workspace',
mockProjectRepository,
undefined,
eventBus
);
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('spawn', () => {
it('creates agent record with provided name', async () => {
const mockChild = createMockChildProcess();
mockSpawn.mockReturnValue(mockChild);
const result = await manager.spawn({
name: 'gastown',
taskId: 'task-456',
prompt: 'Test task',
});
expect(mockRepository.create).toHaveBeenCalledWith(
expect.objectContaining({ name: 'gastown' })
);
expect(result.name).toBe('gastown');
});
it('rejects duplicate agent names', async () => {
mockRepository.findByName = vi.fn().mockResolvedValue(mockAgent);
await expect(
manager.spawn({
name: 'gastown',
taskId: 'task-456',
prompt: 'Test',
})
).rejects.toThrow("Agent with name 'gastown' already exists");
});
it('emits AgentSpawned event with name', async () => {
const mockChild = createMockChildProcess();
mockSpawn.mockReturnValue(mockChild);
await manager.spawn({
name: 'gastown',
taskId: 'task-456',
prompt: 'Test',
});
const spawnedEvent = capturedEvents.find(
(e) => e.type === 'agent:spawned'
);
expect(spawnedEvent).toBeDefined();
expect(
(spawnedEvent as { payload: { name: string } }).payload.name
).toBe('gastown');
});
it('writes diagnostic files for workdir verification', async () => {
const mockChild = createMockChildProcess();
mockSpawn.mockReturnValue(mockChild);
// Mock fs.writeFileSync to capture diagnostic file writing
const { writeFileSync } = await import('node:fs');
const mockWriteFileSync = vi.mocked(writeFileSync);
// The existsSync is already mocked globally to return true
await manager.spawn({
name: 'gastown',
taskId: 'task-456',
prompt: 'Test task',
});
// Verify diagnostic file was written
const diagnosticCalls = mockWriteFileSync.mock.calls.filter(call =>
call[0].toString().includes('spawn-diagnostic.json')
);
expect(diagnosticCalls).toHaveLength(1);
// Parse the diagnostic data to verify structure
const diagnosticCall = diagnosticCalls[0];
const diagnosticData = JSON.parse(diagnosticCall[1] as string);
expect(diagnosticData).toMatchObject({
agentId: expect.any(String),
alias: 'gastown',
intendedCwd: expect.stringContaining('/agent-workdirs/gastown/workspace'),
worktreeId: 'gastown',
provider: 'claude',
command: expect.any(String),
args: expect.any(Array),
env: expect.any(Object),
cwdExistsAtSpawn: true,
initiativeId: null,
customCwdProvided: false,
accountId: null,
timestamp: expect.any(String),
});
});
it('uses custom cwd if provided', async () => {
const mockChild = createMockChildProcess();
mockSpawn.mockReturnValue(mockChild);
await manager.spawn({
name: 'chinatown',
taskId: 'task-789',
prompt: 'Test task',
cwd: '/custom/path',
});
// Verify spawn was called with custom cwd
expect(mockSpawn).toHaveBeenCalledWith(
'claude',
expect.arrayContaining(['-p', expect.stringContaining('Test task'), '--output-format', 'stream-json']),
expect.objectContaining({ cwd: '/custom/path' })
);
});
});
describe('stop', () => {
it('stops running agent and updates status', async () => {
// When we call stop, it looks up the agent by ID
// The repository mock returns mockAgent which has id 'agent-123'
await manager.stop(mockAgent.id);
expect(mockRepository.update).toHaveBeenCalledWith(
mockAgent.id,
{ status: 'stopped', pendingQuestions: null }
);
});
it('kills detached process if running', async () => {
const mockChild = createMockChildProcess();
mockSpawn.mockReturnValue(mockChild);
// Spawn returns immediately since process is detached
const spawned = await manager.spawn({
name: 'gastown',
taskId: 'task-456',
prompt: 'Test',
});
// Now stop using the returned agent ID
await manager.stop(spawned.id);
// Verify status was updated (process.kill is called internally, not on the child object)
expect(mockRepository.update).toHaveBeenCalledWith(
spawned.id,
{ status: 'stopped', pendingQuestions: null }
);
});
it('throws if agent not found', async () => {
mockRepository.findById = vi.fn().mockResolvedValue(null);
await expect(manager.stop('nonexistent')).rejects.toThrow(
"Agent 'nonexistent' not found"
);
});
it('emits AgentStopped event with user_requested reason', async () => {
const mockChild = createMockChildProcess();
mockSpawn.mockReturnValue(mockChild);
const spawned = await manager.spawn({
name: 'gastown',
taskId: 'task-456',
prompt: 'Test',
});
await manager.stop(spawned.id);
const stoppedEvent = capturedEvents.find(
(e) => e.type === 'agent:stopped'
);
expect(stoppedEvent).toBeDefined();
expect(
(stoppedEvent as { payload: { reason: string } }).payload.reason
).toBe('user_requested');
});
});
describe('list', () => {
it('returns all agents with names', async () => {
const agents = await manager.list();
expect(agents).toHaveLength(1);
expect(agents[0].name).toBe('gastown');
});
});
describe('get', () => {
it('finds agent by id', async () => {
const agent = await manager.get('agent-123');
expect(mockRepository.findById).toHaveBeenCalledWith('agent-123');
expect(agent?.id).toBe('agent-123');
});
it('returns null if agent not found', async () => {
mockRepository.findById = vi.fn().mockResolvedValue(null);
const agent = await manager.get('nonexistent');
expect(agent).toBeNull();
});
});
describe('getByName', () => {
it('finds agent by name', async () => {
mockRepository.findByName = vi.fn().mockResolvedValue(mockAgent);
const agent = await manager.getByName('gastown');
expect(mockRepository.findByName).toHaveBeenCalledWith('gastown');
expect(agent?.name).toBe('gastown');
});
it('returns null if agent not found', async () => {
mockRepository.findByName = vi.fn().mockResolvedValue(null);
const agent = await manager.getByName('nonexistent');
expect(agent).toBeNull();
});
});
describe('resume', () => {
it('resumes agent waiting for input with answers map', async () => {
mockRepository.findById = vi.fn().mockResolvedValue({
...mockAgent,
status: 'waiting_for_input',
});
const mockChild = createMockChildProcess();
mockSpawn.mockReturnValue(mockChild);
await manager.resume(mockAgent.id, { q1: 'Answer one', q2: 'Answer two' });
// Verify spawn was called with resume args
expect(mockSpawn).toHaveBeenCalledWith(
'claude',
expect.arrayContaining([
'--resume',
'session-789',
'--output-format',
'stream-json',
]),
expect.any(Object)
);
});
it('rejects if agent not waiting for input', async () => {
mockRepository.findById = vi.fn().mockResolvedValue({
...mockAgent,
status: 'running',
});
await expect(manager.resume(mockAgent.id, { q1: 'Answer' })).rejects.toThrow(
'not waiting for input'
);
});
it('rejects if agent has no session', async () => {
mockRepository.findById = vi.fn().mockResolvedValue({
...mockAgent,
status: 'waiting_for_input',
sessionId: null,
});
await expect(manager.resume(mockAgent.id, { q1: 'Answer' })).rejects.toThrow(
'has no session to resume'
);
});
it('emits AgentResumed event', async () => {
mockRepository.findById = vi.fn().mockResolvedValue({
...mockAgent,
status: 'waiting_for_input',
});
const mockChild = createMockChildProcess();
mockSpawn.mockReturnValue(mockChild);
await manager.resume(mockAgent.id, { q1: 'User answer' });
const resumedEvent = capturedEvents.find(
(e) => e.type === 'agent:resumed'
);
expect(resumedEvent).toBeDefined();
expect(
(resumedEvent as { payload: { sessionId: string } }).payload.sessionId
).toBe('session-789');
});
});
describe('getResult', () => {
it('returns null when agent has no result', async () => {
const result = await manager.getResult('agent-123');
expect(result).toBeNull();
});
});
describe('delete', () => {
it('deletes agent and clears active state', async () => {
const mockChild = createMockChildProcess();
mockSpawn.mockReturnValue(mockChild);
// Spawn an agent first
const spawned = await manager.spawn({
name: 'gastown',
taskId: 'task-456',
prompt: 'Test',
});
// Delete the agent
await manager.delete(spawned.id);
// Verify DB record was deleted
expect(mockRepository.delete).toHaveBeenCalledWith(spawned.id);
});
it('emits agent:deleted event', async () => {
const mockChild = createMockChildProcess();
mockSpawn.mockReturnValue(mockChild);
eventBus.on('agent:deleted', (e) => capturedEvents.push(e));
const spawned = await manager.spawn({
name: 'gastown',
taskId: 'task-456',
prompt: 'Test',
});
await manager.delete(spawned.id);
const deletedEvent = capturedEvents.find(
(e) => e.type === 'agent:deleted'
);
expect(deletedEvent).toBeDefined();
expect(
(deletedEvent as { payload: { name: string } }).payload.name
).toBe('gastown');
});
it('throws if agent not found', async () => {
mockRepository.findById = vi.fn().mockResolvedValue(null);
await expect(manager.delete('nonexistent')).rejects.toThrow(
"Agent 'nonexistent' not found"
);
});
it('handles missing workdir gracefully', async () => {
// Agent exists in DB but has no active state and workdir doesn't exist
// The delete should succeed (best-effort cleanup)
await manager.delete(mockAgent.id);
expect(mockRepository.delete).toHaveBeenCalledWith(mockAgent.id);
});
});
});

View File

@@ -0,0 +1,948 @@
/**
* Multi-Provider Agent Manager — Orchestrator
*
* Implementation of AgentManager port supporting multiple CLI providers.
* Delegates to extracted helpers:
* - ProcessManager: subprocess spawn/kill/poll, worktree creation, command building
* - CredentialHandler: account selection, credential write/refresh, exhaustion handling
* - OutputHandler: stream events, signal parsing, file reading, result capture
* - CleanupManager: worktree/branch/log removal, orphan cleanup, reconciliation
*/
import type {
AgentManager,
AgentInfo,
SpawnAgentOptions,
AgentResult,
AgentStatus,
AgentMode,
PendingQuestions,
} from './types.js';
import type { AgentRepository } from '../db/repositories/agent-repository.js';
import type { AccountRepository } from '../db/repositories/account-repository.js';
import type { ProjectRepository } from '../db/repositories/project-repository.js';
import type { ChangeSetRepository } from '../db/repositories/change-set-repository.js';
import type { PhaseRepository } from '../db/repositories/phase-repository.js';
import type { TaskRepository } from '../db/repositories/task-repository.js';
import type { PageRepository } from '../db/repositories/page-repository.js';
import type { LogChunkRepository } from '../db/repositories/log-chunk-repository.js';
import { generateUniqueAlias } from './alias.js';
import type {
EventBus,
AgentSpawnedEvent,
AgentStoppedEvent,
AgentResumedEvent,
AgentDeletedEvent,
ProcessCrashedEvent,
} from '../events/index.js';
import { writeInputFiles } from './file-io.js';
import { buildWorkspaceLayout, buildInterAgentCommunication } from './prompts/index.js';
import { getProvider } from './providers/registry.js';
import { createModuleLogger } from '../logger/index.js';
import { join } from 'node:path';
import { unlink, readFile } from 'node:fs/promises';
import { existsSync, writeFileSync } from 'node:fs';
import type { AccountCredentialManager } from './credentials/types.js';
import { ProcessManager } from './process-manager.js';
import { CredentialHandler } from './credential-handler.js';
import { OutputHandler, type ActiveAgent } from './output-handler.js';
import { CleanupManager } from './cleanup-manager.js';
import { createLifecycleController } from './lifecycle/factory.js';
import type { AgentLifecycleController } from './lifecycle/controller.js';
import { AgentExhaustedError, AgentFailureError } from './lifecycle/retry-policy.js';
import { FileSystemSignalManager } from './lifecycle/signal-manager.js';
import type { SignalManager } from './lifecycle/signal-manager.js';
const log = createModuleLogger('agent-manager');
export class MultiProviderAgentManager implements AgentManager {
private static readonly MAX_COMMIT_RETRIES = 1;
private activeAgents: Map<string, ActiveAgent> = new Map();
private commitRetryCount: Map<string, number> = new Map();
private processManager: ProcessManager;
private credentialHandler: CredentialHandler;
private outputHandler: OutputHandler;
private cleanupManager: CleanupManager;
private lifecycleController: AgentLifecycleController;
private signalManager: SignalManager;
constructor(
private repository: AgentRepository,
private workspaceRoot: string,
private projectRepository: ProjectRepository,
private accountRepository?: AccountRepository,
private eventBus?: EventBus,
private credentialManager?: AccountCredentialManager,
private changeSetRepository?: ChangeSetRepository,
private phaseRepository?: PhaseRepository,
private taskRepository?: TaskRepository,
private pageRepository?: PageRepository,
private logChunkRepository?: LogChunkRepository,
private debug: boolean = false,
processManagerOverride?: ProcessManager,
) {
this.signalManager = new FileSystemSignalManager();
this.processManager = processManagerOverride ?? new ProcessManager(workspaceRoot, projectRepository);
this.credentialHandler = new CredentialHandler(workspaceRoot, accountRepository, credentialManager);
this.outputHandler = new OutputHandler(repository, eventBus, changeSetRepository, phaseRepository, taskRepository, pageRepository, this.signalManager);
this.cleanupManager = new CleanupManager(workspaceRoot, repository, projectRepository, eventBus, debug, this.signalManager);
this.lifecycleController = createLifecycleController({
repository,
processManager: this.processManager,
cleanupManager: this.cleanupManager,
accountRepository,
debug,
});
// Listen for process crashed events to handle agents specially
if (eventBus) {
eventBus.on('process:crashed', async (event: ProcessCrashedEvent) => {
await this.handleProcessCrashed(event.payload.processId, event.payload.exitCode, event.payload.signal);
});
}
}
/**
* Centralized cleanup of all in-memory state for an agent.
* Cancels polling timer, removes from activeAgents and commitRetryCount.
*/
private cleanupAgentState(agentId: string): void {
const active = this.activeAgents.get(agentId);
if (active?.cancelPoll) active.cancelPoll();
this.activeAgents.delete(agentId);
this.commitRetryCount.delete(agentId);
}
/**
* Create a fire-and-forget callback for persisting raw output chunks to the DB.
* Returns undefined if no logChunkRepository is configured.
*/
private createLogChunkCallback(
agentId: string,
agentName: string,
sessionNumber: number,
): ((content: string) => void) | undefined {
const repo = this.logChunkRepository;
if (!repo) return undefined;
return (content) => {
repo.insertChunk({ agentId, agentName, sessionNumber, content })
.then(() => {
if (this.eventBus) {
this.eventBus.emit({
type: 'agent:output' as const,
timestamp: new Date(),
payload: { agentId, stream: 'stdout', data: content },
});
}
})
.catch(err => log.warn({ agentId, err: err instanceof Error ? err.message : String(err) }, 'failed to persist log chunk'));
};
}
/**
* Spawn a new agent using the unified lifecycle controller.
* Features comprehensive retry, error handling, and cleanup.
*/
async spawnWithLifecycle(options: SpawnAgentOptions): Promise<AgentInfo> {
log.info({
taskId: options.taskId,
provider: options.provider,
initiativeId: options.initiativeId,
mode: options.mode
}, 'spawning agent with unified lifecycle management');
let spawnedAgent: AgentInfo | undefined;
await this.lifecycleController.spawnWithRetry(
async (opts) => {
const agent = await this.spawnInternal(opts);
spawnedAgent = agent;
return { id: agent.id, name: agent.name, status: agent.status, initiativeId: agent.initiativeId, worktreeId: agent.worktreeId };
},
options
);
return spawnedAgent!;
}
/**
* Spawn a new agent to work on a task (legacy method).
* Consider using spawnWithLifecycle for better error handling.
*/
async spawn(options: SpawnAgentOptions): Promise<AgentInfo> {
return this.spawnInternal(options);
}
/**
* Internal spawn implementation without lifecycle management.
* Used by both legacy spawn() and new lifecycle-managed spawn.
*/
private async spawnInternal(options: SpawnAgentOptions): Promise<AgentInfo> {
const { taskId, cwd, mode = 'execute', provider: providerName = 'claude', initiativeId, baseBranch, branchName } = options;
let { prompt } = options;
log.info({ taskId, provider: providerName, initiativeId, mode, baseBranch, branchName }, 'spawn requested');
const provider = getProvider(providerName);
if (!provider) {
throw new Error(`Unknown provider: '${providerName}'. Available: claude, codex, gemini, cursor, auggie, amp, opencode`);
}
// Generate or validate name
let name: string;
if (options.name) {
name = options.name;
const existing = await this.repository.findByName(name);
if (existing) {
throw new Error(`Agent with name '${name}' already exists`);
}
} else {
name = await generateUniqueAlias(this.repository);
}
const alias = name;
log.debug({ alias }, 'alias generated');
// 1. Account selection
let accountId: string | null = null;
let accountConfigDir: string | null = null;
const accountResult = await this.credentialHandler.selectAccount(providerName);
if (accountResult) {
accountId = accountResult.accountId;
accountConfigDir = accountResult.configDir;
this.credentialHandler.writeCredentialsToDisk(accountResult.account, accountConfigDir);
const { valid, refreshed } = await this.credentialHandler.ensureCredentials(accountConfigDir, accountId);
if (!valid) {
log.warn({ alias, accountId }, 'failed to refresh account credentials, proceeding anyway');
}
if (refreshed) {
await this.credentialHandler.persistRefreshedCredentials(accountId, accountConfigDir);
}
}
if (accountId) {
log.info({ alias, accountId }, 'account selected');
} else {
log.debug('no accounts available, spawning without account');
}
// 2. Create isolated worktrees
let agentCwd: string;
if (initiativeId) {
log.debug({ alias, initiativeId, baseBranch, branchName }, 'creating initiative-based worktrees');
agentCwd = await this.processManager.createProjectWorktrees(alias, initiativeId, baseBranch, branchName);
// Log projects linked to the initiative
const projects = await this.projectRepository.findProjectsByInitiativeId(initiativeId);
log.info({
alias,
initiativeId,
projectCount: projects.length,
projects: projects.map(p => ({ name: p.name, url: p.url })),
agentCwd
}, 'initiative-based agent workdir created');
} else {
log.debug({ alias }, 'creating standalone worktree');
agentCwd = await this.processManager.createStandaloneWorktree(alias);
log.info({ alias, agentCwd }, 'standalone agent workdir created');
}
// Verify the final agentCwd exists
const cwdVerified = existsSync(agentCwd);
log.info({
alias,
agentCwd,
cwdVerified,
initiativeBasedAgent: !!initiativeId
}, 'agent workdir setup completed');
// 2b. Append workspace layout to prompt now that worktrees exist
const workspaceSection = buildWorkspaceLayout(agentCwd);
if (workspaceSection) {
prompt = prompt + workspaceSection;
}
// 3. Create agent record
const agent = await this.repository.create({
name: alias,
taskId: taskId ?? null,
initiativeId: initiativeId ?? null,
sessionId: null,
worktreeId: alias,
status: 'running',
mode,
provider: providerName,
accountId,
});
const agentId = agent.id;
// 3a. Append inter-agent communication instructions with actual agent ID
prompt = prompt + buildInterAgentCommunication(agentId);
// 3b. Write input files (after agent creation so we can include agentId/agentName)
if (options.inputContext) {
writeInputFiles({ agentWorkdir: agentCwd, ...options.inputContext, agentId, agentName: alias });
log.debug({ alias }, 'input files written');
}
// 4. Build spawn command
const { command, args, env: providerEnv } = this.processManager.buildSpawnCommand(provider, prompt);
const finalCwd = cwd ?? agentCwd;
log.info({
agentId,
alias,
command,
args: args.join(' '),
finalCwd,
customCwdProvided: !!cwd,
providerEnv: Object.keys(providerEnv)
}, 'spawn command built');
// 5. Prepare process environment with credentials
const { processEnv } = await this.credentialHandler.prepareProcessEnv(providerEnv, provider, accountId);
log.debug({
agentId,
finalProcessEnv: Object.keys(processEnv),
hasAccountConfig: !!accountId,
hasOAuthToken: !!processEnv['CLAUDE_CODE_OAUTH_TOKEN'],
}, 'process environment prepared');
// 6. Spawn detached subprocess
const { pid, outputFilePath, tailer } = this.processManager.spawnDetached(
agentId, alias, command, args, cwd ?? agentCwd, processEnv, providerName, prompt,
(event) => this.outputHandler.handleStreamEvent(agentId, event, this.activeAgents.get(agentId)),
this.createLogChunkCallback(agentId, alias, 1),
);
await this.repository.update(agentId, { pid, outputFilePath });
// Write spawn diagnostic file for post-execution verification
const diagnostic = {
timestamp: new Date().toISOString(),
agentId,
alias,
intendedCwd: finalCwd,
worktreeId: agent.worktreeId,
provider: providerName,
command,
args,
env: processEnv,
cwdExistsAtSpawn: existsSync(finalCwd),
initiativeId: initiativeId || null,
customCwdProvided: !!cwd,
accountId: accountId || null,
};
writeFileSync(
join(finalCwd, '.cw', 'spawn-diagnostic.json'),
JSON.stringify(diagnostic, null, 2),
'utf-8'
);
const activeEntry: ActiveAgent = { agentId, pid, tailer, outputFilePath, agentCwd: finalCwd };
this.activeAgents.set(agentId, activeEntry);
log.info({ agentId, alias, pid, diagnosticWritten: true }, 'detached subprocess started with diagnostic');
// Emit spawned event
if (this.eventBus) {
const event: AgentSpawnedEvent = {
type: 'agent:spawned',
timestamp: new Date(),
payload: { agentId, name: alias, taskId: taskId ?? null, worktreeId: alias, provider: providerName },
};
this.eventBus.emit(event);
}
// Start polling for completion
const { cancel } = this.processManager.pollForCompletion(
agentId, pid,
() => this.handleDetachedAgentCompletion(agentId),
() => this.activeAgents.get(agentId)?.tailer,
);
activeEntry.cancelPoll = cancel;
return this.toAgentInfo(agent);
}
/**
* Handle completion of a detached agent.
*/
private async handleDetachedAgentCompletion(agentId: string): Promise<void> {
if (!this.activeAgents.has(agentId)) return;
const active = this.activeAgents.get(agentId);
await this.outputHandler.handleCompletion(
agentId,
active,
(alias) => this.processManager.getAgentWorkdir(alias),
);
// Sync credentials back to DB if the agent had an account
await this.syncCredentialsPostCompletion(agentId);
this.cleanupAgentState(agentId);
// Auto-cleanup workdir after completion
await this.tryAutoCleanup(agentId);
}
/**
* Attempt auto-cleanup of agent workdir after completion.
* If dirty and retries remain, resumes the agent to commit changes.
*/
private async tryAutoCleanup(agentId: string): Promise<void> {
try {
const agent = await this.repository.findById(agentId);
if (!agent || agent.status !== 'idle') return;
const { clean, removed } = await this.cleanupManager.autoCleanupAfterCompletion(
agentId, agent.name, agent.initiativeId,
);
if (removed) {
this.commitRetryCount.delete(agentId);
log.info({ agentId, alias: agent.name }, 'auto-cleanup completed');
return;
}
if (!clean) {
const retries = this.commitRetryCount.get(agentId) ?? 0;
if (retries < MultiProviderAgentManager.MAX_COMMIT_RETRIES) {
this.commitRetryCount.set(agentId, retries + 1);
const resumed = await this.resumeForCommit(agentId);
if (resumed) {
log.info({ agentId, alias: agent.name, retry: retries + 1 }, 'resumed agent to commit uncommitted changes');
return;
}
}
log.warn({ agentId, alias: agent.name }, 'agent workdir has uncommitted changes after max retries, leaving in place');
this.commitRetryCount.delete(agentId);
}
} catch (err) {
log.warn({ agentId, err: err instanceof Error ? err.message : String(err) }, 'auto-cleanup failed');
this.commitRetryCount.delete(agentId);
}
}
/**
* Resume an agent's session with a prompt to commit uncommitted changes.
* Returns false if the agent can't be resumed (no session, provider doesn't support resume).
*/
private async resumeForCommit(agentId: string): Promise<boolean> {
const agent = await this.repository.findById(agentId);
if (!agent?.sessionId) return false;
const provider = getProvider(agent.provider);
if (!provider || provider.resumeStyle === 'none') return false;
// Check which specific worktrees are dirty — skip resume if all clean
const dirtyPaths = await this.cleanupManager.getDirtyWorktreePaths(agent.name, agent.initiativeId);
if (dirtyPaths.length === 0) return false;
const dirtyList = dirtyPaths.map(p => `- \`${p}/\``).join('\n');
const commitPrompt =
'You have uncommitted changes in the following project directories:\n' +
dirtyList + '\n\n' +
'For each directory listed above, `cd` into it, then run `git add -A && git commit -m "<message>"` ' +
'with an appropriate commit message describing the work. Do not make any other changes.';
await this.repository.update(agentId, { status: 'running', pendingQuestions: null, result: null });
const agentCwd = this.processManager.getAgentWorkdir(agent.worktreeId);
const { command, args, env: providerEnv } = this.processManager.buildResumeCommand(provider, agent.sessionId, commitPrompt);
const { processEnv } = await this.credentialHandler.prepareProcessEnv(providerEnv, provider, agent.accountId);
const prevActive = this.activeAgents.get(agentId);
prevActive?.cancelPoll?.();
if (prevActive?.tailer) {
await prevActive.tailer.stop();
}
// Determine session number for commit retry
let commitSessionNumber = 1;
if (this.logChunkRepository) {
commitSessionNumber = (await this.logChunkRepository.getSessionCount(agentId)) + 1;
}
const { pid, outputFilePath, tailer } = this.processManager.spawnDetached(
agentId, agent.name, command, args, agentCwd, processEnv, provider.name, commitPrompt,
(event) => this.outputHandler.handleStreamEvent(agentId, event, this.activeAgents.get(agentId)),
this.createLogChunkCallback(agentId, agent.name, commitSessionNumber),
);
await this.repository.update(agentId, { pid, outputFilePath });
const commitActiveEntry: ActiveAgent = { agentId, pid, tailer, outputFilePath };
this.activeAgents.set(agentId, commitActiveEntry);
const { cancel: commitCancel } = this.processManager.pollForCompletion(
agentId, pid,
() => this.handleDetachedAgentCompletion(agentId),
() => this.activeAgents.get(agentId)?.tailer,
);
commitActiveEntry.cancelPoll = commitCancel;
return true;
}
/**
* Sync credentials from agent's config dir back to DB after completion.
* The subprocess may have refreshed tokens mid-session; this ensures
* the DB stays current and the next spawn uses fresh tokens.
*/
private async syncCredentialsPostCompletion(agentId: string): Promise<void> {
if (!this.accountRepository) return;
try {
const agent = await this.repository.findById(agentId);
if (!agent?.accountId) return;
const { getAccountConfigDir } = await import('./accounts/paths.js');
const configDir = getAccountConfigDir(this.workspaceRoot, agent.accountId);
await this.credentialHandler.persistRefreshedCredentials(agent.accountId, configDir);
log.debug({ agentId, accountId: agent.accountId }, 'post-completion credential sync done');
} catch (err) {
log.warn({ agentId, err: err instanceof Error ? err.message : String(err) }, 'post-completion credential sync failed');
}
}
/**
* Stop a running agent.
*/
async stop(agentId: string): Promise<void> {
const agent = await this.repository.findById(agentId);
if (!agent) throw new Error(`Agent '${agentId}' not found`);
log.info({ agentId, name: agent.name }, 'stopping agent');
const active = this.activeAgents.get(agentId);
if (active) {
try { process.kill(active.pid, 'SIGTERM'); } catch { /* already exited */ }
await active.tailer.stop();
}
this.cleanupAgentState(agentId);
// Sync credentials before marking stopped
await this.syncCredentialsPostCompletion(agentId);
await this.repository.update(agentId, { status: 'stopped', pendingQuestions: null });
if (this.eventBus) {
const event: AgentStoppedEvent = {
type: 'agent:stopped',
timestamp: new Date(),
payload: { agentId, name: agent.name, taskId: agent.taskId ?? '', reason: 'user_requested' },
};
this.eventBus.emit(event);
}
}
/**
* List all agents with their current status.
*/
async list(): Promise<AgentInfo[]> {
const agents = await this.repository.findAll();
return agents.map((a) => this.toAgentInfo(a));
}
/**
* Get a specific agent by ID.
*/
async get(agentId: string): Promise<AgentInfo | null> {
const agent = await this.repository.findById(agentId);
return agent ? this.toAgentInfo(agent) : null;
}
/**
* Get a specific agent by name.
*/
async getByName(name: string): Promise<AgentInfo | null> {
const agent = await this.repository.findByName(name);
return agent ? this.toAgentInfo(agent) : null;
}
/**
* Resume an agent using the unified lifecycle controller.
* Features comprehensive retry, error handling, and cleanup.
*/
async resumeWithLifecycle(agentId: string, answers: Record<string, string>): Promise<void> {
log.info({
agentId,
answerKeys: Object.keys(answers)
}, 'resuming agent with unified lifecycle management');
await this.lifecycleController.resumeWithRetry(
(id, modifiedAnswers) => this.resumeInternal(id, modifiedAnswers),
{ agentId, answers }
);
}
/**
* Resume an agent that's waiting for input (legacy method).
* Consider using resumeWithLifecycle for better error handling.
*/
async resume(agentId: string, answers: Record<string, string>): Promise<void> {
return this.resumeInternal(agentId, answers);
}
/**
* Internal resume implementation without lifecycle management.
* Used by both legacy resume() and new lifecycle-managed resume.
*/
private async resumeInternal(agentId: string, answers: Record<string, string>): Promise<void> {
const agent = await this.repository.findById(agentId);
if (!agent) throw new Error(`Agent '${agentId}' not found`);
if (agent.status !== 'waiting_for_input') {
throw new Error(`Agent '${agent.name}' is not waiting for input (status: ${agent.status})`);
}
if (!agent.sessionId) {
throw new Error(`Agent '${agent.name}' has no session to resume`);
}
log.info({ agentId, sessionId: agent.sessionId, provider: agent.provider }, 'resuming agent');
const provider = getProvider(agent.provider);
if (!provider) throw new Error(`Unknown provider: '${agent.provider}'`);
if (provider.resumeStyle === 'none') {
throw new Error(`Provider '${provider.name}' does not support resume`);
}
const agentCwd = this.processManager.getAgentWorkdir(agent.worktreeId);
const prompt = this.outputHandler.formatAnswersAsPrompt(answers);
// Clear previous signal.json to ensure clean completion detection
const signalPath = join(agentCwd, '.cw/output/signal.json');
try {
await unlink(signalPath);
log.debug({ agentId, signalPath }, 'cleared previous signal.json for resume');
} catch {
// File might not exist, which is fine
}
await this.repository.update(agentId, { status: 'running', pendingQuestions: null, result: null });
const { command, args, env: providerEnv } = this.processManager.buildResumeCommand(provider, agent.sessionId, prompt);
log.debug({ command, args: args.join(' ') }, 'resume command built');
// Prepare process environment with credentials
const { processEnv } = await this.credentialHandler.prepareProcessEnv(providerEnv, provider, agent.accountId);
// Stop previous tailer and cancel previous poll
const prevActive = this.activeAgents.get(agentId);
prevActive?.cancelPoll?.();
if (prevActive?.tailer) {
await prevActive.tailer.stop();
}
// Determine session number for this resume
let resumeSessionNumber = 1;
if (this.logChunkRepository) {
resumeSessionNumber = (await this.logChunkRepository.getSessionCount(agentId)) + 1;
}
const { pid, outputFilePath, tailer } = this.processManager.spawnDetached(
agentId, agent.name, command, args, agentCwd, processEnv, provider.name, prompt,
(event) => this.outputHandler.handleStreamEvent(agentId, event, this.activeAgents.get(agentId)),
this.createLogChunkCallback(agentId, agent.name, resumeSessionNumber),
);
await this.repository.update(agentId, { pid, outputFilePath });
const resumeActiveEntry: ActiveAgent = { agentId, pid, tailer, outputFilePath };
this.activeAgents.set(agentId, resumeActiveEntry);
log.info({ agentId, pid }, 'resume detached subprocess started');
if (this.eventBus) {
const event: AgentResumedEvent = {
type: 'agent:resumed',
timestamp: new Date(),
payload: { agentId, name: agent.name, taskId: agent.taskId ?? '', sessionId: agent.sessionId },
};
this.eventBus.emit(event);
}
const { cancel: resumeCancel } = this.processManager.pollForCompletion(
agentId, pid,
() => this.handleDetachedAgentCompletion(agentId),
() => this.activeAgents.get(agentId)?.tailer,
);
resumeActiveEntry.cancelPoll = resumeCancel;
}
/**
* Get the result of an agent's work.
*/
async getResult(agentId: string): Promise<AgentResult | null> {
return this.outputHandler.getResult(agentId, this.activeAgents.get(agentId));
}
/**
* Get pending questions for an agent waiting for input.
*/
async getPendingQuestions(agentId: string): Promise<PendingQuestions | null> {
return this.outputHandler.getPendingQuestions(agentId, this.activeAgents.get(agentId));
}
/**
* Delete an agent and clean up all associated resources.
*/
async delete(agentId: string): Promise<void> {
const agent = await this.repository.findById(agentId);
if (!agent) throw new Error(`Agent '${agentId}' not found`);
log.info({ agentId, name: agent.name }, 'deleting agent');
// 1. Kill process, stop tailer, clear all in-memory state
const active = this.activeAgents.get(agentId);
if (active) {
try { process.kill(active.pid, 'SIGTERM'); } catch { /* already exited */ }
await active.tailer.stop();
}
this.cleanupAgentState(agentId);
// 2. Best-effort cleanup
try { await this.cleanupManager.removeAgentWorktrees(agent.name, agent.initiativeId); }
catch (err) { log.warn({ agentId, err: err instanceof Error ? err.message : String(err) }, 'failed to remove worktrees'); }
try { await this.cleanupManager.removeAgentBranches(agent.name, agent.initiativeId); }
catch (err) { log.warn({ agentId, err: err instanceof Error ? err.message : String(err) }, 'failed to remove branches'); }
try { await this.cleanupManager.removeAgentLogs(agent.name); }
catch (err) { log.warn({ agentId, err: err instanceof Error ? err.message : String(err) }, 'failed to remove logs'); }
// 3b. Delete log chunks from DB
if (this.logChunkRepository) {
try { await this.logChunkRepository.deleteByAgentId(agentId); }
catch (err) { log.warn({ agentId, err: err instanceof Error ? err.message : String(err) }, 'failed to delete log chunks'); }
}
// 4. Delete DB record
await this.repository.delete(agentId);
// 5. Emit deleted event
if (this.eventBus) {
const event: AgentDeletedEvent = {
type: 'agent:deleted',
timestamp: new Date(),
payload: { agentId, name: agent.name },
};
this.eventBus.emit(event);
}
log.info({ agentId, name: agent.name }, 'agent deleted');
}
/**
* Dismiss an agent.
*/
async dismiss(agentId: string): Promise<void> {
const agent = await this.repository.findById(agentId);
if (!agent) throw new Error(`Agent '${agentId}' not found`);
log.info({ agentId, name: agent.name }, 'dismissing agent');
this.cleanupAgentState(agentId);
await this.repository.update(agentId, {
userDismissedAt: new Date(),
updatedAt: new Date(),
});
log.info({ agentId, name: agent.name }, 'agent dismissed');
}
/**
* Clean up orphaned agent workdirs.
*/
async cleanupOrphanedWorkdirs(): Promise<void> {
return this.cleanupManager.cleanupOrphanedWorkdirs();
}
/**
* Clean up orphaned agent log directories.
*/
async cleanupOrphanedLogs(): Promise<void> {
return this.cleanupManager.cleanupOrphanedLogs();
}
/**
* Reconcile agent state after server restart.
*/
async reconcileAfterRestart(): Promise<void> {
const reconcileLogChunkRepo = this.logChunkRepository;
await this.cleanupManager.reconcileAfterRestart(
this.activeAgents,
(agentId, event) => this.outputHandler.handleStreamEvent(agentId, event, this.activeAgents.get(agentId)),
(agentId, rawOutput, provider) => this.outputHandler.processAgentOutput(agentId, rawOutput, provider, (alias) => this.processManager.getAgentWorkdir(alias)),
(agentId, pid) => {
const { cancel } = this.processManager.pollForCompletion(
agentId, pid,
() => this.handleDetachedAgentCompletion(agentId),
() => this.activeAgents.get(agentId)?.tailer,
);
const active = this.activeAgents.get(agentId);
if (active) active.cancelPoll = cancel;
},
reconcileLogChunkRepo
? (agentId, agentName, content) => {
// Determine session number asynchronously — use fire-and-forget
reconcileLogChunkRepo.getSessionCount(agentId).then(count => {
return reconcileLogChunkRepo.insertChunk({
agentId,
agentName,
sessionNumber: count + 1,
content,
});
}).catch(err => log.warn({ agentId, err: err instanceof Error ? err.message : String(err) }, 'failed to persist log chunk during reconciliation'));
}
: undefined,
);
}
/**
* Handle process crashed event specifically for agents.
* Check if the agent actually completed successfully despite the non-zero exit code.
*/
private async handleProcessCrashed(processId: string, exitCode: number | null, signal: string | null): Promise<void> {
try {
// Check if this is an agent process
const agent = await this.repository.findById(processId);
if (!agent) {
return; // Not our agent
}
// Store exit code and signal for debugging
await this.repository.update(processId, { exitCode });
log.info({
agentId: processId,
name: agent.name,
exitCode,
signal,
outputFilePath: agent.outputFilePath
}, 'agent process crashed, analyzing completion status');
// Check if the agent has output that indicates successful completion
if (agent.outputFilePath) {
const hasCompletion = await this.checkAgentCompletionResult(agent.worktreeId);
if (hasCompletion) {
log.info({
agentId: processId,
name: agent.name,
exitCode,
signal
}, 'agent marked as crashed but completed successfully - completion already handled by polling');
// Note: We don't call handleCompletion() here because the polling handler
// (handleDetachedAgentCompletion) already processes completions. The mutex
// in OutputHandler.handleCompletion() prevents duplicate processing.
log.info({
agentId: processId,
name: agent.name,
exitCode
}, 'completion detection confirmed - deferring to polling handler');
} else {
log.warn({
agentId: processId,
name: agent.name,
exitCode,
signal,
outputFilePath: agent.outputFilePath
}, 'agent crashed and no successful completion detected - marking as truly crashed');
// Only mark as crashed if agent truly crashed (no completion detected)
await this.repository.update(processId, { status: 'crashed' });
}
} else {
log.warn({
agentId: processId,
name: agent.name,
exitCode,
signal
}, 'agent crashed with no output file path - marking as crashed');
await this.repository.update(processId, { status: 'crashed' });
}
} catch (err) {
log.error({
processId,
exitCode,
signal,
err: err instanceof Error ? err.message : String(err)
}, 'failed to check agent completion after crash');
}
}
/**
* Check if agent completed successfully by reading signal.json file.
* Probes the workspace/ subdirectory for standalone agents.
*/
private async checkAgentCompletionResult(worktreeId: string): Promise<boolean> {
try {
// Resolve actual agent workdir — standalone agents have .cw inside workspace/ subdir
let agentWorkdir = this.processManager.getAgentWorkdir(worktreeId);
const workspaceSub = join(agentWorkdir, 'workspace');
if (!existsSync(join(agentWorkdir, '.cw', 'output')) && existsSync(join(workspaceSub, '.cw'))) {
agentWorkdir = workspaceSub;
}
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
if (!existsSync(signalPath)) {
log.debug({ worktreeId, signalPath }, 'no signal.json found - agent not completed');
return false;
}
const signalContent = await readFile(signalPath, 'utf-8');
const signal = JSON.parse(signalContent);
// Agent completed if status is done, questions, or error
const completed = signal.status === 'done' || signal.status === 'questions' || signal.status === 'error';
if (completed) {
log.debug({ worktreeId, signal }, 'agent completion detected via signal.json');
} else {
log.debug({ worktreeId, signal }, 'signal.json found but status indicates incomplete');
}
return completed;
} catch (err) {
log.warn({ worktreeId, err: err instanceof Error ? err.message : String(err) }, 'failed to read or parse signal.json');
return false;
}
}
/**
* Convert database agent record to AgentInfo.
*/
private toAgentInfo(agent: {
id: string;
name: string;
taskId: string | null;
initiativeId: string | null;
sessionId: string | null;
worktreeId: string;
status: string;
mode: string;
provider: string;
accountId: string | null;
createdAt: Date;
updatedAt: Date;
userDismissedAt?: Date | null;
}): AgentInfo {
return {
id: agent.id,
name: agent.name,
taskId: agent.taskId ?? '',
initiativeId: agent.initiativeId,
sessionId: agent.sessionId,
worktreeId: agent.worktreeId,
status: agent.status as AgentStatus,
mode: agent.mode as AgentMode,
provider: agent.provider,
accountId: agent.accountId,
createdAt: agent.createdAt,
updatedAt: agent.updatedAt,
userDismissedAt: agent.userDismissedAt,
};
}
}

View File

@@ -0,0 +1,32 @@
/**
* Server-side Markdown → Tiptap JSON converter.
*
* Uses @tiptap/markdown's MarkdownManager.parse() — the same approach
* as content-serializer.ts but in reverse direction.
* No DOM needed, no new dependencies.
*/
import StarterKit from '@tiptap/starter-kit';
import Link from '@tiptap/extension-link';
import { MarkdownManager } from '@tiptap/markdown';
let _manager: MarkdownManager | null = null;
function getManager(): MarkdownManager {
if (!_manager) {
_manager = new MarkdownManager({
extensions: [StarterKit, Link],
});
}
return _manager;
}
/**
* Convert a markdown string to Tiptap JSON document.
*/
export function markdownToTiptapJson(markdown: string): object {
if (!markdown.trim()) {
return { type: 'doc', content: [{ type: 'paragraph' }] };
}
return getManager().parse(markdown);
}

View File

@@ -0,0 +1,906 @@
/**
* MockAgentManager Tests
*
* Comprehensive test suite for the MockAgentManager adapter covering
* all scenario types: success, crash, waiting_for_input.
*/
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import { MockAgentManager, type MockAgentScenario } from './mock-manager.js';
import type { EventBus, DomainEvent, AgentStoppedEvent } from '../events/types.js';
// =============================================================================
// Test Helpers
// =============================================================================
/**
* Create a mock EventBus that captures emitted events.
*/
function createMockEventBus(): EventBus & { emittedEvents: DomainEvent[] } {
const emittedEvents: DomainEvent[] = [];
return {
emittedEvents,
emit<T extends DomainEvent>(event: T): void {
emittedEvents.push(event);
},
on: vi.fn(),
off: vi.fn(),
once: vi.fn(),
};
}
// =============================================================================
// Tests
// =============================================================================
describe('MockAgentManager', () => {
let manager: MockAgentManager;
let eventBus: ReturnType<typeof createMockEventBus>;
beforeEach(() => {
vi.useFakeTimers();
eventBus = createMockEventBus();
manager = new MockAgentManager({ eventBus });
});
afterEach(() => {
manager.clear();
vi.useRealTimers();
});
// ===========================================================================
// spawn() with default scenario (immediate success)
// ===========================================================================
describe('spawn with default scenario', () => {
it('should create agent with running status', async () => {
const agent = await manager.spawn({
name: 'test-agent',
taskId: 'task-1',
prompt: 'Do something',
});
expect(agent.name).toBe('test-agent');
expect(agent.taskId).toBe('task-1');
expect(agent.status).toBe('running');
expect(agent.id).toBeDefined();
expect(agent.sessionId).toBeDefined();
expect(agent.worktreeId).toBeDefined();
});
it('should emit agent:spawned event', async () => {
await manager.spawn({
name: 'spawned-test',
taskId: 'task-1',
prompt: 'Do something',
});
expect(eventBus.emittedEvents.length).toBeGreaterThanOrEqual(1);
const spawnedEvent = eventBus.emittedEvents.find((e) => e.type === 'agent:spawned');
expect(spawnedEvent).toBeDefined();
expect((spawnedEvent as any).payload.name).toBe('spawned-test');
expect((spawnedEvent as any).payload.taskId).toBe('task-1');
});
it('should complete with success after timer fires', async () => {
const agent = await manager.spawn({
name: 'success-test',
taskId: 'task-1',
prompt: 'Do something',
});
// Timer hasn't fired yet
expect(agent.status).toBe('running');
// Advance timers
await vi.advanceTimersByTimeAsync(0);
// Check status changed
const updated = await manager.get(agent.id);
expect(updated?.status).toBe('idle');
// Check result available
const result = await manager.getResult(agent.id);
expect(result).not.toBeNull();
expect(result?.success).toBe(true);
expect(result?.message).toBe('Task completed successfully');
});
it('should emit agent:stopped event on success completion', async () => {
await manager.spawn({
name: 'stop-event-test',
taskId: 'task-1',
prompt: 'Do something',
});
await vi.advanceTimersByTimeAsync(0);
const stoppedEvent = eventBus.emittedEvents.find((e) => e.type === 'agent:stopped');
expect(stoppedEvent).toBeDefined();
expect((stoppedEvent as any).payload.reason).toBe('task_complete');
});
});
// ===========================================================================
// spawn() with configured delay
// ===========================================================================
describe('spawn with configured delay', () => {
it('should not complete before delay expires', async () => {
manager.setScenario('delayed-agent', {
status: 'done',
delay: 100,
result: 'Delayed completion',
});
const agent = await manager.spawn({
name: 'delayed-agent',
taskId: 'task-1',
prompt: 'Do something slowly',
});
// Advance by less than delay
await vi.advanceTimersByTimeAsync(50);
const updated = await manager.get(agent.id);
expect(updated?.status).toBe('running');
});
it('should complete after delay expires', async () => {
manager.setScenario('delayed-agent', {
status: 'done',
delay: 100,
result: 'Delayed completion',
});
const agent = await manager.spawn({
name: 'delayed-agent',
taskId: 'task-1',
prompt: 'Do something slowly',
});
// Advance past delay
await vi.advanceTimersByTimeAsync(100);
const updated = await manager.get(agent.id);
expect(updated?.status).toBe('idle');
const result = await manager.getResult(agent.id);
expect(result?.message).toBe('Delayed completion');
});
});
// ===========================================================================
// spawn() with crash scenario
// ===========================================================================
describe('spawn with error scenario', () => {
it('should emit agent:crashed and set result.success=false', async () => {
manager.setScenario('crash-agent', {
status: 'error',
delay: 0,
error: 'Something went terribly wrong',
});
const agent = await manager.spawn({
name: 'crash-agent',
taskId: 'task-1',
prompt: 'Do something risky',
});
await vi.advanceTimersByTimeAsync(0);
// Check status
const updated = await manager.get(agent.id);
expect(updated?.status).toBe('crashed');
// Check result
const result = await manager.getResult(agent.id);
expect(result?.success).toBe(false);
expect(result?.message).toBe('Something went terribly wrong');
// Check event
const crashedEvent = eventBus.emittedEvents.find((e) => e.type === 'agent:crashed');
expect(crashedEvent).toBeDefined();
expect((crashedEvent as any).payload.error).toBe('Something went terribly wrong');
});
});
// ===========================================================================
// spawn() with question scenario
// ===========================================================================
describe('spawn with questions scenario', () => {
it('should emit agent:waiting and set status to waiting_for_input', async () => {
manager.setScenario('waiting-agent', {
status: 'questions',
delay: 0,
questions: [{ id: 'q1', question: 'Should I continue?' }],
});
const agent = await manager.spawn({
name: 'waiting-agent',
taskId: 'task-1',
prompt: 'Ask a question',
});
await vi.advanceTimersByTimeAsync(0);
// Check status
const updated = await manager.get(agent.id);
expect(updated?.status).toBe('waiting_for_input');
// Check event
const waitingEvent = eventBus.emittedEvents.find((e) => e.type === 'agent:waiting');
expect(waitingEvent).toBeDefined();
expect((waitingEvent as any).payload.questions[0].question).toBe('Should I continue?');
});
});
// ===========================================================================
// resume() after waiting_for_input
// ===========================================================================
describe('resume after questions', () => {
it('should emit agent:resumed and continue with scenario', async () => {
manager.setScenario('resume-agent', {
status: 'questions',
delay: 0,
questions: [{ id: 'q1', question: 'Need your input' }],
});
const agent = await manager.spawn({
name: 'resume-agent',
taskId: 'task-1',
prompt: 'Start working',
});
// Let agent reach waiting state
await vi.advanceTimersByTimeAsync(0);
const waitingAgent = await manager.get(agent.id);
expect(waitingAgent?.status).toBe('waiting_for_input');
// Resume the agent with answers map
await manager.resume(agent.id, { q1: 'Continue with this input' });
// Check agent:resumed event emitted
const resumedEvent = eventBus.emittedEvents.find((e) => e.type === 'agent:resumed');
expect(resumedEvent).toBeDefined();
expect((resumedEvent as any).payload.agentId).toBe(agent.id);
expect((resumedEvent as any).payload.sessionId).toBe(agent.sessionId);
// Status should be running again
const runningAgent = await manager.get(agent.id);
expect(runningAgent?.status).toBe('running');
// Let it complete
await vi.advanceTimersByTimeAsync(0);
const completedAgent = await manager.get(agent.id);
expect(completedAgent?.status).toBe('idle');
const result = await manager.getResult(agent.id);
expect(result?.success).toBe(true);
});
it('should throw if agent not waiting for input', async () => {
const agent = await manager.spawn({
name: 'not-waiting',
taskId: 'task-1',
prompt: 'Work',
});
await expect(manager.resume(agent.id, { q1: 'input' })).rejects.toThrow(
'is not waiting for input'
);
});
it('should throw if agent not found', async () => {
await expect(manager.resume('non-existent-id', { q1: 'input' })).rejects.toThrow(
'not found'
);
});
});
// ===========================================================================
// stop() kills scheduled completion
// ===========================================================================
describe('stop', () => {
it('should cancel scheduled completion and emit agent:stopped', async () => {
manager.setScenario('stoppable-agent', {
status: 'done',
delay: 1000,
result: 'Should not see this',
});
const agent = await manager.spawn({
name: 'stoppable-agent',
taskId: 'task-1',
prompt: 'Long running task',
});
// Stop before completion
await manager.stop(agent.id);
// Check status
const updated = await manager.get(agent.id);
expect(updated?.status).toBe('stopped');
// Check event
const stoppedEvent = eventBus.emittedEvents.find(
(e) => e.type === 'agent:stopped' && (e as any).payload.reason === 'user_requested'
);
expect(stoppedEvent).toBeDefined();
// Advance time - should not complete now
await vi.advanceTimersByTimeAsync(1000);
const stillStopped = await manager.get(agent.id);
expect(stillStopped?.status).toBe('stopped');
});
it('should throw if agent not found', async () => {
await expect(manager.stop('non-existent-id')).rejects.toThrow('not found');
});
});
// ===========================================================================
// list() returns all agents with correct status
// ===========================================================================
describe('list', () => {
it('should return all agents', async () => {
await manager.spawn({ name: 'agent-1', taskId: 't1', prompt: 'p1' });
await manager.spawn({ name: 'agent-2', taskId: 't2', prompt: 'p2' });
await manager.spawn({ name: 'agent-3', taskId: 't3', prompt: 'p3' });
const agents = await manager.list();
expect(agents.length).toBe(3);
expect(agents.map((a) => a.name).sort()).toEqual(['agent-1', 'agent-2', 'agent-3']);
});
it('should return empty array when no agents', async () => {
const agents = await manager.list();
expect(agents).toEqual([]);
});
});
// ===========================================================================
// get() and getByName() lookups
// ===========================================================================
describe('get and getByName', () => {
it('get should return agent by ID', async () => {
const spawned = await manager.spawn({
name: 'get-test',
taskId: 't1',
prompt: 'p1',
});
const found = await manager.get(spawned.id);
expect(found).not.toBeNull();
expect(found?.name).toBe('get-test');
});
it('get should return null for unknown ID', async () => {
const found = await manager.get('unknown-id');
expect(found).toBeNull();
});
it('getByName should return agent by name', async () => {
await manager.spawn({ name: 'named-agent', taskId: 't1', prompt: 'p1' });
const found = await manager.getByName('named-agent');
expect(found).not.toBeNull();
expect(found?.name).toBe('named-agent');
});
it('getByName should return null for unknown name', async () => {
const found = await manager.getByName('unknown-name');
expect(found).toBeNull();
});
});
// ===========================================================================
// setScenario() overrides for specific agent names
// ===========================================================================
describe('setScenario overrides', () => {
it('should use scenario override for specific agent name', async () => {
// Set error scenario for one agent
manager.setScenario('crasher', {
status: 'error',
delay: 0,
error: 'Intentional crash',
});
// Spawn two agents - one with override, one with default
const crasher = await manager.spawn({
name: 'crasher',
taskId: 't1',
prompt: 'p1',
});
const normal = await manager.spawn({
name: 'normal',
taskId: 't2',
prompt: 'p2',
});
await vi.advanceTimersByTimeAsync(0);
// Crasher should have crashed
const crasherUpdated = await manager.get(crasher.id);
expect(crasherUpdated?.status).toBe('crashed');
// Normal should have succeeded
const normalUpdated = await manager.get(normal.id);
expect(normalUpdated?.status).toBe('idle');
});
it('should allow clearing scenario override', async () => {
manager.setScenario('flip-flop', {
status: 'error',
delay: 0,
error: 'Crash for test',
});
// First spawn crashes
const first = await manager.spawn({
name: 'flip-flop',
taskId: 't1',
prompt: 'p1',
});
await vi.advanceTimersByTimeAsync(0);
expect((await manager.get(first.id))?.status).toBe('crashed');
// Clear scenario and remove agent
manager.clearScenario('flip-flop');
manager.clear();
// Second spawn succeeds (default scenario)
const second = await manager.spawn({
name: 'flip-flop',
taskId: 't2',
prompt: 'p2',
});
await vi.advanceTimersByTimeAsync(0);
expect((await manager.get(second.id))?.status).toBe('idle');
});
});
// ===========================================================================
// Event emission order verification
// ===========================================================================
describe('event emission order', () => {
it('should emit spawned before completion events', async () => {
await manager.spawn({ name: 'order-test', taskId: 't1', prompt: 'p1' });
await vi.advanceTimersByTimeAsync(0);
const eventTypes = eventBus.emittedEvents.map((e) => e.type);
const spawnedIndex = eventTypes.indexOf('agent:spawned');
const stoppedIndex = eventTypes.indexOf('agent:stopped');
expect(spawnedIndex).toBeLessThan(stoppedIndex);
});
it('should emit spawned before crashed', async () => {
manager.setScenario('crash-order', { status: 'error', delay: 0, error: 'Crash' });
await manager.spawn({ name: 'crash-order', taskId: 't1', prompt: 'p1' });
await vi.advanceTimersByTimeAsync(0);
const eventTypes = eventBus.emittedEvents.map((e) => e.type);
const spawnedIndex = eventTypes.indexOf('agent:spawned');
const crashedIndex = eventTypes.indexOf('agent:crashed');
expect(spawnedIndex).toBeLessThan(crashedIndex);
});
it('should emit spawned before waiting', async () => {
manager.setScenario('wait-order', {
status: 'questions',
delay: 0,
questions: [{ id: 'q1', question: 'Test question' }],
});
await manager.spawn({ name: 'wait-order', taskId: 't1', prompt: 'p1' });
await vi.advanceTimersByTimeAsync(0);
const eventTypes = eventBus.emittedEvents.map((e) => e.type);
const spawnedIndex = eventTypes.indexOf('agent:spawned');
const waitingIndex = eventTypes.indexOf('agent:waiting');
expect(spawnedIndex).toBeLessThan(waitingIndex);
});
});
// ===========================================================================
// Name uniqueness validation
// ===========================================================================
describe('name uniqueness', () => {
it('should throw when spawning agent with duplicate name', async () => {
await manager.spawn({ name: 'unique-name', taskId: 't1', prompt: 'p1' });
await expect(
manager.spawn({ name: 'unique-name', taskId: 't2', prompt: 'p2' })
).rejects.toThrow("Agent with name 'unique-name' already exists");
});
});
// ===========================================================================
// Constructor options
// ===========================================================================
describe('constructor options', () => {
it('should work without eventBus', async () => {
const noEventManager = new MockAgentManager();
const agent = await noEventManager.spawn({
name: 'no-events',
taskId: 't1',
prompt: 'p1',
});
expect(agent.name).toBe('no-events');
noEventManager.clear();
});
it('should use provided default scenario', async () => {
const customDefault: MockAgentScenario = {
status: 'error',
delay: 0,
error: 'Default crash',
};
const customManager = new MockAgentManager({
eventBus,
defaultScenario: customDefault,
});
const agent = await customManager.spawn({
name: 'custom-default',
taskId: 't1',
prompt: 'p1',
});
await vi.advanceTimersByTimeAsync(0);
expect((await customManager.get(agent.id))?.status).toBe('crashed');
customManager.clear();
});
});
// ===========================================================================
// clear() cleanup
// ===========================================================================
describe('clear', () => {
it('should remove all agents and cancel pending timers', async () => {
manager.setScenario('pending', { status: 'done', delay: 1000 });
await manager.spawn({ name: 'pending', taskId: 't1', prompt: 'p1' });
await manager.spawn({ name: 'another', taskId: 't2', prompt: 'p2' });
expect((await manager.list()).length).toBe(2);
manager.clear();
expect((await manager.list()).length).toBe(0);
});
});
// ===========================================================================
// Agent modes (execute, discuss, plan)
// ===========================================================================
describe('agent modes', () => {
it('should spawn agent with default execute mode', async () => {
const agent = await manager.spawn({
name: 'exec-agent',
taskId: 't1',
prompt: 'test',
});
expect(agent.mode).toBe('execute');
});
it('should spawn agent in discuss mode', async () => {
manager.setScenario('discuss-agent', {
status: 'done',
delay: 0,
result: 'Auth discussion complete',
});
const agent = await manager.spawn({
name: 'discuss-agent',
taskId: 't1',
prompt: 'discuss auth',
mode: 'discuss',
});
expect(agent.mode).toBe('discuss');
});
it('should spawn agent in plan mode', async () => {
manager.setScenario('plan-agent', {
status: 'done',
delay: 0,
result: 'Plan complete',
});
const agent = await manager.spawn({
name: 'plan-agent',
taskId: 't1',
prompt: 'plan work',
mode: 'plan',
});
expect(agent.mode).toBe('plan');
});
it('should emit stopped event with context_complete reason for discuss mode', async () => {
manager.setScenario('discuss-done', {
status: 'done',
delay: 0,
result: 'Done',
});
await manager.spawn({
name: 'discuss-done',
taskId: 't1',
prompt: 'test',
mode: 'discuss',
});
await vi.runAllTimersAsync();
const stopped = eventBus.emittedEvents.find((e) => e.type === 'agent:stopped') as AgentStoppedEvent | undefined;
expect(stopped?.payload.reason).toBe('context_complete');
});
it('should emit stopped event with plan_complete reason for plan mode', async () => {
manager.setScenario('plan-done', {
status: 'done',
delay: 0,
result: 'Plan complete',
});
await manager.spawn({
name: 'plan-done',
taskId: 't1',
prompt: 'test',
mode: 'plan',
});
await vi.runAllTimersAsync();
const stopped = eventBus.emittedEvents.find((e) => e.type === 'agent:stopped') as AgentStoppedEvent | undefined;
expect(stopped?.payload.reason).toBe('plan_complete');
});
});
// ===========================================================================
// Detail mode (phase to tasks)
// ===========================================================================
describe('detail mode', () => {
it('should spawn agent in detail mode', async () => {
const agent = await manager.spawn({
name: 'detailer',
taskId: 'plan-1',
prompt: 'Detail this phase',
mode: 'detail',
});
expect(agent.mode).toBe('detail');
});
it('should complete with detail_complete reason in detail mode', async () => {
manager.setScenario('detailer', {
status: 'done',
result: 'Detail complete',
});
await manager.spawn({ name: 'detailer', taskId: 'plan-1', prompt: 'test', mode: 'detail' });
await vi.advanceTimersByTimeAsync(100);
// Verify agent:stopped event with detail_complete reason (derived from mode)
const stoppedEvent = eventBus.emittedEvents.find((e) => e.type === 'agent:stopped') as AgentStoppedEvent | undefined;
expect(stoppedEvent).toBeDefined();
expect(stoppedEvent?.payload.reason).toBe('detail_complete');
});
it('should pause on questions in detail mode', async () => {
manager.setScenario('detailer', {
status: 'questions',
questions: [{ id: 'q1', question: 'How many tasks?' }],
});
await manager.spawn({ name: 'detailer', taskId: 'plan-1', prompt: 'test', mode: 'detail' });
await vi.advanceTimersByTimeAsync(100);
// Verify agent pauses for questions
const stoppedEvent = eventBus.emittedEvents.find((e) => e.type === 'agent:waiting');
expect(stoppedEvent).toBeDefined();
// Check agent status
const agent = await manager.getByName('detailer');
expect(agent?.status).toBe('waiting_for_input');
});
it('should set result message for detail mode', async () => {
manager.setScenario('detailer', {
status: 'done',
result: 'Detail complete',
});
const agent = await manager.spawn({ name: 'detailer', taskId: 'plan-1', prompt: 'test', mode: 'detail' });
await vi.runAllTimersAsync();
const result = await manager.getResult(agent.id);
expect(result?.success).toBe(true);
expect(result?.message).toBe('Detail complete');
});
});
// ===========================================================================
// Structured question data (new schema tests)
// ===========================================================================
describe('structured questions data', () => {
it('emits agent:waiting with structured questions data', async () => {
manager.setScenario('test-agent', {
status: 'questions',
questions: [
{
id: 'q1',
question: 'Which database?',
options: [
{ label: 'PostgreSQL', description: 'Full-featured' },
{ label: 'SQLite', description: 'Lightweight' },
],
multiSelect: false,
},
],
});
await manager.spawn({ name: 'test-agent', taskId: 'task-1', prompt: 'test' });
await vi.runAllTimersAsync();
const events = eventBus.emittedEvents.filter((e) => e.type === 'agent:waiting');
expect(events.length).toBe(1);
expect((events[0] as any).payload.questions).toHaveLength(1);
expect((events[0] as any).payload.questions[0].options).toHaveLength(2);
expect((events[0] as any).payload.questions[0].options[0].label).toBe('PostgreSQL');
expect((events[0] as any).payload.questions[0].multiSelect).toBe(false);
});
it('stores pending questions for retrieval', async () => {
manager.setScenario('test-agent', {
status: 'questions',
questions: [
{
id: 'q1',
question: 'Which database?',
options: [{ label: 'PostgreSQL' }],
},
],
});
const agent = await manager.spawn({ name: 'test-agent', taskId: 'task-1', prompt: 'test' });
await vi.runAllTimersAsync();
const pending = await manager.getPendingQuestions(agent.id);
expect(pending?.questions[0].question).toBe('Which database?');
expect(pending?.questions[0].options).toHaveLength(1);
expect(pending?.questions[0].options?.[0].label).toBe('PostgreSQL');
});
it('clears pending questions after resume', async () => {
manager.setScenario('resume-test', {
status: 'questions',
questions: [
{
id: 'q1',
question: 'Need your input',
options: [{ label: 'Option A' }, { label: 'Option B' }],
},
],
});
const agent = await manager.spawn({ name: 'resume-test', taskId: 'task-1', prompt: 'test' });
await vi.runAllTimersAsync();
// Verify questions are pending
const pendingBefore = await manager.getPendingQuestions(agent.id);
expect(pendingBefore).not.toBeNull();
expect(pendingBefore?.questions[0].question).toBe('Need your input');
// Resume the agent with answers map
await manager.resume(agent.id, { q1: 'Option A' });
// Pending questions should be cleared
const pendingAfter = await manager.getPendingQuestions(agent.id);
expect(pendingAfter).toBeNull();
});
it('returns null for non-existent agent pending questions', async () => {
const pending = await manager.getPendingQuestions('non-existent-id');
expect(pending).toBeNull();
});
it('returns null for agent not in waiting state', async () => {
const agent = await manager.spawn({ name: 'running-agent', taskId: 'task-1', prompt: 'test' });
// Agent is running, not waiting
const pending = await manager.getPendingQuestions(agent.id);
expect(pending).toBeNull();
});
it('handles multiple questions in single scenario', async () => {
manager.setScenario('multi-q-agent', {
status: 'questions',
questions: [
{
id: 'q1',
question: 'Which database should we use?',
options: [
{ label: 'PostgreSQL', description: 'Full-featured relational DB' },
{ label: 'SQLite', description: 'Lightweight embedded DB' },
],
},
{
id: 'q2',
question: 'Which ORM do you prefer?',
options: [
{ label: 'Drizzle', description: 'TypeScript-first ORM' },
{ label: 'Prisma', description: 'Popular Node.js ORM' },
],
},
{
id: 'q3',
question: 'Any additional notes?',
// No options - free-form text question
},
],
});
const agent = await manager.spawn({ name: 'multi-q-agent', taskId: 'task-1', prompt: 'test' });
await vi.runAllTimersAsync();
// Check status
const updated = await manager.get(agent.id);
expect(updated?.status).toBe('waiting_for_input');
// Check event has all questions
const waitingEvent = eventBus.emittedEvents.find((e) => e.type === 'agent:waiting');
expect(waitingEvent).toBeDefined();
expect((waitingEvent as any).payload.questions).toHaveLength(3);
expect((waitingEvent as any).payload.questions[0].id).toBe('q1');
expect((waitingEvent as any).payload.questions[1].id).toBe('q2');
expect((waitingEvent as any).payload.questions[2].id).toBe('q3');
// Check pending questions retrieval
const pending = await manager.getPendingQuestions(agent.id);
expect(pending?.questions).toHaveLength(3);
expect(pending?.questions[0].question).toBe('Which database should we use?');
expect(pending?.questions[1].question).toBe('Which ORM do you prefer?');
expect(pending?.questions[2].question).toBe('Any additional notes?');
expect(pending?.questions[2].options).toBeUndefined();
// Resume with answers to all questions
await manager.resume(agent.id, { q1: 'PostgreSQL', q2: 'Drizzle', q3: 'Use WAL mode' });
await vi.runAllTimersAsync();
// Agent should complete
const completed = await manager.get(agent.id);
expect(completed?.status).toBe('idle');
// Pending questions should be cleared
const clearedPending = await manager.getPendingQuestions(agent.id);
expect(clearedPending).toBeNull();
});
});
});

View File

@@ -0,0 +1,487 @@
/**
* Mock Agent Manager Adapter
*
* Implementation of AgentManager port for test scenarios.
* Simulates configurable agent behaviors (success, crash, waiting_for_input)
* without spawning real Claude agents.
*/
import { randomUUID } from 'crypto';
import type {
AgentManager,
AgentInfo,
AgentMode,
SpawnAgentOptions,
AgentResult,
AgentStatus,
PendingQuestions,
QuestionItem,
} from './types.js';
import type {
EventBus,
AgentSpawnedEvent,
AgentStoppedEvent,
AgentCrashedEvent,
AgentResumedEvent,
AgentDeletedEvent,
AgentWaitingEvent,
} from '../events/index.js';
/**
* Scenario configuration for mock agent behavior.
* Matches the simplified agent signal schema: done, questions, or error.
* Mode-specific stopped reasons are derived from the agent's mode.
*/
export type MockAgentScenario =
| {
status: 'done';
result?: string;
filesModified?: string[];
delay?: number;
}
| {
status: 'questions';
questions: QuestionItem[];
delay?: number;
}
| {
status: 'error';
error: string;
delay?: number;
};
/**
* Internal agent record with scenario and timer tracking.
*/
interface MockAgentRecord {
info: AgentInfo;
scenario: MockAgentScenario;
result?: AgentResult;
pendingQuestions?: PendingQuestions;
completionTimer?: ReturnType<typeof setTimeout>;
}
/**
* Default scenario: immediate success with generic message.
*/
const DEFAULT_SCENARIO: MockAgentScenario = {
status: 'done',
result: 'Task completed successfully',
filesModified: [],
delay: 0,
};
/**
* MockAgentManager - Adapter implementing AgentManager port for testing.
*
* Enables E2E testing of dispatch/coordination flows without spawning
* real Claude agents. Simulates configurable agent behaviors and
* emits proper lifecycle events.
*/
export class MockAgentManager implements AgentManager {
private agents: Map<string, MockAgentRecord> = new Map();
private scenarioOverrides: Map<string, MockAgentScenario> = new Map();
private defaultScenario: MockAgentScenario;
private eventBus?: EventBus;
constructor(options?: { eventBus?: EventBus; defaultScenario?: MockAgentScenario }) {
this.eventBus = options?.eventBus;
this.defaultScenario = options?.defaultScenario ?? DEFAULT_SCENARIO;
}
/**
* Set scenario override for a specific agent name.
* When spawn() is called with this name, the override takes precedence.
*/
setScenario(agentName: string, scenario: MockAgentScenario): void {
this.scenarioOverrides.set(agentName, scenario);
}
/**
* Clear scenario override for a specific agent name.
*/
clearScenario(agentName: string): void {
this.scenarioOverrides.delete(agentName);
}
/**
* Spawn a new mock agent.
*
* Creates agent record in internal Map, schedules completion based on scenario.
* Completion happens async via setTimeout (even if delay=0).
*/
async spawn(options: SpawnAgentOptions): Promise<AgentInfo> {
const { taskId, prompt } = options;
const name = options.name ?? `agent-${taskId?.slice(0, 6) ?? 'noTask'}`;
// Check name uniqueness
for (const record of this.agents.values()) {
if (record.info.name === name) {
throw new Error(`Agent with name '${name}' already exists`);
}
}
const agentId = randomUUID();
const sessionId = randomUUID();
const worktreeId = randomUUID();
const now = new Date();
// Determine scenario (override takes precedence — use original name or generated)
const scenario = this.scenarioOverrides.get(name) ?? this.defaultScenario;
const info: AgentInfo = {
id: agentId,
name: name ?? `mock-${agentId.slice(0, 6)}`,
taskId: taskId ?? null,
initiativeId: options.initiativeId ?? null,
sessionId,
worktreeId,
status: 'running',
mode: options.mode ?? 'execute',
provider: options.provider ?? 'claude',
accountId: null,
createdAt: now,
updatedAt: now,
};
const record: MockAgentRecord = {
info,
scenario,
};
this.agents.set(agentId, record);
// Emit spawned event
if (this.eventBus) {
const event: AgentSpawnedEvent = {
type: 'agent:spawned',
timestamp: new Date(),
payload: {
agentId,
name,
taskId: taskId ?? null,
worktreeId,
provider: options.provider ?? 'claude',
},
};
this.eventBus.emit(event);
}
// Schedule completion async (even with delay=0, uses setTimeout for async behavior)
this.scheduleCompletion(agentId, scenario);
return info;
}
/**
* Schedule agent completion based on scenario.
*/
private scheduleCompletion(agentId: string, scenario: MockAgentScenario): void {
const delay = scenario.delay ?? 0;
const timer = setTimeout(() => {
this.completeAgent(agentId, scenario);
}, delay);
const record = this.agents.get(agentId);
if (record) {
record.completionTimer = timer;
}
}
/**
* Map agent mode to stopped event reason.
*/
private getStoppedReason(mode: AgentMode): AgentStoppedEvent['payload']['reason'] {
switch (mode) {
case 'discuss': return 'context_complete';
case 'plan': return 'plan_complete';
case 'detail': return 'detail_complete';
case 'refine': return 'refine_complete';
default: return 'task_complete';
}
}
/**
* Complete agent based on scenario status.
*/
private completeAgent(agentId: string, scenario: MockAgentScenario): void {
const record = this.agents.get(agentId);
if (!record) return;
const { info } = record;
switch (scenario.status) {
case 'done':
record.result = {
success: true,
message: scenario.result ?? 'Task completed successfully',
filesModified: scenario.filesModified,
};
record.info.status = 'idle';
record.info.updatedAt = new Date();
if (this.eventBus) {
const reason = this.getStoppedReason(info.mode);
const event: AgentStoppedEvent = {
type: 'agent:stopped',
timestamp: new Date(),
payload: {
agentId,
name: info.name,
taskId: info.taskId,
reason,
},
};
this.eventBus.emit(event);
}
break;
case 'error':
record.result = {
success: false,
message: scenario.error,
};
record.info.status = 'crashed';
record.info.updatedAt = new Date();
if (this.eventBus) {
const event: AgentCrashedEvent = {
type: 'agent:crashed',
timestamp: new Date(),
payload: {
agentId,
name: info.name,
taskId: info.taskId,
error: scenario.error,
},
};
this.eventBus.emit(event);
}
break;
case 'questions':
record.info.status = 'waiting_for_input';
record.info.updatedAt = new Date();
record.pendingQuestions = {
questions: scenario.questions,
};
if (this.eventBus) {
const event: AgentWaitingEvent = {
type: 'agent:waiting',
timestamp: new Date(),
payload: {
agentId,
name: info.name,
taskId: info.taskId,
sessionId: info.sessionId ?? '',
questions: scenario.questions,
},
};
this.eventBus.emit(event);
}
break;
}
}
/**
* Stop a running agent.
*
* Cancels scheduled completion, marks agent stopped, emits agent:stopped event.
*/
async stop(agentId: string): Promise<void> {
const record = this.agents.get(agentId);
if (!record) {
throw new Error(`Agent '${agentId}' not found`);
}
// Cancel any pending completion
if (record.completionTimer) {
clearTimeout(record.completionTimer);
record.completionTimer = undefined;
}
record.info.status = 'stopped';
record.info.updatedAt = new Date();
if (this.eventBus) {
const event: AgentStoppedEvent = {
type: 'agent:stopped',
timestamp: new Date(),
payload: {
agentId,
name: record.info.name,
taskId: record.info.taskId,
reason: 'user_requested',
},
};
this.eventBus.emit(event);
}
}
/**
* Delete an agent and clean up.
* Removes from internal map and emits agent:deleted event.
*/
async delete(agentId: string): Promise<void> {
const record = this.agents.get(agentId);
if (!record) {
throw new Error(`Agent '${agentId}' not found`);
}
// Cancel any pending completion
if (record.completionTimer) {
clearTimeout(record.completionTimer);
record.completionTimer = undefined;
}
const name = record.info.name;
this.agents.delete(agentId);
if (this.eventBus) {
const event: AgentDeletedEvent = {
type: 'agent:deleted',
timestamp: new Date(),
payload: {
agentId,
name,
},
};
this.eventBus.emit(event);
}
}
/**
* List all agents with their current status.
*/
async list(): Promise<AgentInfo[]> {
return Array.from(this.agents.values()).map((record) => record.info);
}
/**
* Get a specific agent by ID.
*/
async get(agentId: string): Promise<AgentInfo | null> {
const record = this.agents.get(agentId);
return record ? record.info : null;
}
/**
* Get a specific agent by name.
*/
async getByName(name: string): Promise<AgentInfo | null> {
for (const record of this.agents.values()) {
if (record.info.name === name) {
return record.info;
}
}
return null;
}
/**
* Resume an agent that's waiting for input.
*
* Re-runs the scenario for the resumed agent. Emits agent:resumed event.
* Agent must be in 'waiting_for_input' status.
*
* @param agentId - Agent to resume
* @param answers - Map of question ID to user's answer
*/
async resume(agentId: string, answers: Record<string, string>): Promise<void> {
const record = this.agents.get(agentId);
if (!record) {
throw new Error(`Agent '${agentId}' not found`);
}
if (record.info.status !== 'waiting_for_input') {
throw new Error(
`Agent '${record.info.name}' is not waiting for input (status: ${record.info.status})`
);
}
if (!record.info.sessionId) {
throw new Error(`Agent '${record.info.name}' has no session to resume`);
}
// Update status to running, clear pending questions
record.info.status = 'running';
record.info.updatedAt = new Date();
record.pendingQuestions = undefined;
// Emit resumed event
if (this.eventBus) {
const event: AgentResumedEvent = {
type: 'agent:resumed',
timestamp: new Date(),
payload: {
agentId,
name: record.info.name,
taskId: record.info.taskId,
sessionId: record.info.sessionId,
},
};
this.eventBus.emit(event);
}
// Re-run scenario (after resume, typically completes successfully)
// For testing, we use a new scenario that defaults to success
// Extract filesModified from original scenario if it was a 'done' type
const originalFilesModified =
record.scenario.status === 'done' ? record.scenario.filesModified : undefined;
const resumeScenario: MockAgentScenario = {
status: 'done',
delay: record.scenario.delay ?? 0,
result: 'Resumed and completed successfully',
filesModified: originalFilesModified,
};
this.scheduleCompletion(agentId, resumeScenario);
}
/**
* Get the result of an agent's work.
*
* Only available after agent completes or crashes.
*/
async getResult(agentId: string): Promise<AgentResult | null> {
const record = this.agents.get(agentId);
return record?.result ?? null;
}
/**
* Get pending questions for an agent waiting for input.
*/
async getPendingQuestions(agentId: string): Promise<PendingQuestions | null> {
const record = this.agents.get(agentId);
return record?.pendingQuestions ?? null;
}
/**
* Dismiss an agent.
* Mock implementation just marks the agent as dismissed.
*/
async dismiss(agentId: string): Promise<void> {
const record = this.agents.get(agentId);
if (!record) {
throw new Error(`Agent '${agentId}' not found`);
}
const now = new Date();
record.info.userDismissedAt = now;
record.info.updatedAt = now;
}
/**
* Clear all agents and pending timers.
* Useful for test cleanup.
*/
clear(): void {
for (const record of this.agents.values()) {
if (record.completionTimer) {
clearTimeout(record.completionTimer);
}
}
this.agents.clear();
this.scenarioOverrides.clear();
}
}

View File

@@ -0,0 +1,174 @@
/**
* Focused test for completion handler mutex functionality.
* Tests the race condition fix without complex mocking.
*/
import { describe, it, beforeEach, expect } from 'vitest';
import { OutputHandler } from './output-handler.js';
import type { AgentRepository } from '../db/repositories/agent-repository.js';
describe('OutputHandler completion mutex', () => {
let outputHandler: OutputHandler;
let completionCallCount: number;
let callOrder: string[];
// Default agent for update return value
const defaultAgent = {
id: 'test-agent',
name: 'test-agent',
taskId: null,
provider: 'claude',
mode: 'execute' as const,
status: 'idle' as const,
worktreeId: 'test-worktree',
outputFilePath: null,
sessionId: null,
result: null,
pendingQuestions: null,
initiativeId: null,
accountId: null,
userDismissedAt: null,
pid: null,
exitCode: null,
createdAt: new Date(),
updatedAt: new Date(),
};
// Simple mock that tracks completion attempts
const mockRepository: AgentRepository = {
async findById() {
return null; // Return null to cause early exit after mutex check
},
async update(_id: string, data: any) { return { ...defaultAgent, ...data }; },
async create() { throw new Error('Not implemented'); },
async findAll() { throw new Error('Not implemented'); },
async findByStatus() { throw new Error('Not implemented'); },
async findByTaskId() { throw new Error('Not implemented'); },
async findByName() { throw new Error('Not implemented'); },
async findBySessionId() { throw new Error('Not implemented'); },
async delete() { throw new Error('Not implemented'); }
};
beforeEach(() => {
outputHandler = new OutputHandler(mockRepository);
completionCallCount = 0;
callOrder = [];
});
it('should prevent concurrent completion handling with mutex', async () => {
const agentId = 'test-agent';
// Mock the findById method to track calls and simulate processing time
let firstCallCompleted = false;
(mockRepository as any).findById = async (id: string) => {
completionCallCount++;
const callIndex = completionCallCount;
callOrder.push(`call-${callIndex}-start`);
if (callIndex === 1) {
// First call - simulate some processing time
await new Promise(resolve => setTimeout(resolve, 50));
firstCallCompleted = true;
}
callOrder.push(`call-${callIndex}-end`);
return null; // Return null to exit early
};
// Start two concurrent completion handlers
const getAgentWorkdir = () => '/test/workdir';
const completion1Promise = outputHandler.handleCompletion(agentId, undefined, getAgentWorkdir);
const completion2Promise = outputHandler.handleCompletion(agentId, undefined, getAgentWorkdir);
await Promise.all([completion1Promise, completion2Promise]);
// Verify only one completion handler executed
expect(completionCallCount, 'Should only execute one completion handler').toBe(1);
expect(firstCallCompleted, 'First handler should have completed').toBe(true);
expect(callOrder).toEqual(['call-1-start', 'call-1-end']);
});
it('should allow sequential completion handling after first completes', async () => {
const agentId = 'test-agent';
// Mock findById to track calls
(mockRepository as any).findById = async (id: string) => {
completionCallCount++;
callOrder.push(`call-${completionCallCount}`);
return null; // Return null to exit early
};
const getAgentWorkdir = () => '/test/workdir';
// First completion
await outputHandler.handleCompletion(agentId, undefined, getAgentWorkdir);
// Second completion (after first is done)
await outputHandler.handleCompletion(agentId, undefined, getAgentWorkdir);
// Both should execute sequentially
expect(completionCallCount, 'Should execute both handlers sequentially').toBe(2);
expect(callOrder).toEqual(['call-1', 'call-2']);
});
it('should clean up mutex lock even when exception is thrown', async () => {
const agentId = 'test-agent';
let firstCallMadeThrowCall = false;
let secondCallCompleted = false;
// First call throws an error
(mockRepository as any).findById = async (id: string) => {
if (!firstCallMadeThrowCall) {
firstCallMadeThrowCall = true;
throw new Error('Database error');
} else {
secondCallCompleted = true;
return null;
}
};
const getAgentWorkdir = () => '/test/workdir';
// First call should throw but clean up mutex
await expect(outputHandler.handleCompletion(agentId, undefined, getAgentWorkdir))
.rejects.toThrow('Database error');
expect(firstCallMadeThrowCall, 'First call should have thrown').toBe(true);
// Second call should succeed (proving mutex was cleaned up)
await outputHandler.handleCompletion(agentId, undefined, getAgentWorkdir);
expect(secondCallCompleted, 'Second call should have completed').toBe(true);
});
it('should use agent ID as mutex key', async () => {
const agentId1 = 'agent-1';
const agentId2 = 'agent-2';
// Both agents can process concurrently since they have different IDs
let agent1Started = false;
let agent2Started = false;
(mockRepository as any).findById = async (id: string) => {
if (id === agentId1) {
agent1Started = true;
await new Promise(resolve => setTimeout(resolve, 30));
} else if (id === agentId2) {
agent2Started = true;
await new Promise(resolve => setTimeout(resolve, 30));
}
return null;
};
const getAgentWorkdir = () => '/test/workdir';
// Start both agents concurrently - they should NOT block each other
const agent1Promise = outputHandler.handleCompletion(agentId1, undefined, getAgentWorkdir);
const agent2Promise = outputHandler.handleCompletion(agentId2, undefined, getAgentWorkdir);
await Promise.all([agent1Promise, agent2Promise]);
expect(agent1Started, 'Agent 1 should have started').toBe(true);
expect(agent2Started, 'Agent 2 should have started').toBe(true);
});
});

View File

@@ -0,0 +1,337 @@
/**
* OutputHandler Tests
*
* Test suite for the OutputHandler class, specifically focusing on
* question parsing and agent completion handling.
*/
import { describe, it, expect, beforeEach, vi } from 'vitest';
import { OutputHandler } from './output-handler.js';
import type { AgentRepository } from '../db/repositories/agent-repository.js';
import type { EventBus, DomainEvent, AgentWaitingEvent } from '../events/types.js';
import { getProvider } from './providers/registry.js';
// =============================================================================
// Test Helpers
// =============================================================================
function createMockEventBus(): EventBus & { emittedEvents: DomainEvent[] } {
const emittedEvents: DomainEvent[] = [];
const mockBus = {
emittedEvents,
emit: vi.fn().mockImplementation(<T extends DomainEvent>(event: T): void => {
emittedEvents.push(event);
}),
on: vi.fn(),
off: vi.fn(),
once: vi.fn(),
};
return mockBus;
}
function createMockAgentRepository() {
return {
findById: vi.fn(),
update: vi.fn(),
create: vi.fn(),
findByName: vi.fn(),
findByStatus: vi.fn(),
findAll: vi.fn(),
delete: vi.fn(),
};
}
// =============================================================================
// Tests
// =============================================================================
describe('OutputHandler', () => {
let outputHandler: OutputHandler;
let mockAgentRepo: ReturnType<typeof createMockAgentRepository>;
let eventBus: ReturnType<typeof createMockEventBus>;
const mockAgent = {
id: 'agent-123',
name: 'test-agent',
taskId: 'task-456',
sessionId: 'session-789',
provider: 'claude',
mode: 'refine',
};
beforeEach(() => {
mockAgentRepo = createMockAgentRepository();
eventBus = createMockEventBus();
outputHandler = new OutputHandler(
mockAgentRepo as any,
eventBus,
);
// Setup default mock behavior
mockAgentRepo.findById.mockResolvedValue(mockAgent);
});
describe('processAgentOutput', () => {
it('should correctly parse and handle questions from Claude CLI output', async () => {
// Arrange: Create realistic Claude CLI output with questions (like fantastic-crane)
const questionsResult = {
status: "questions",
questions: [
{
id: "q1",
question: "What specific components are in the current admin UI? (e.g., tables, forms, modals, navigation)"
},
{
id: "q2",
question: "What does 'modern look' mean for you? (e.g., dark mode support, specific color scheme, animations)"
},
{
id: "q3",
question: "Are there any specific shadcn components you want to use or prioritize?"
}
]
};
const claudeOutput = JSON.stringify({
type: "result",
subtype: "success",
is_error: false,
session_id: "test-session-123",
result: JSON.stringify(questionsResult),
total_cost_usd: 0.05
});
const getAgentWorkdir = vi.fn().mockReturnValue('/test/workdir');
const provider = getProvider('claude')!;
// Act
await outputHandler.processAgentOutput(
mockAgent.id,
claudeOutput,
provider,
getAgentWorkdir
);
// Assert: Agent should be updated with questions and waiting_for_input status
expect(mockAgentRepo.update).toHaveBeenCalledWith(mockAgent.id, {
pendingQuestions: JSON.stringify({
questions: [
{
id: 'q1',
question: 'What specific components are in the current admin UI? (e.g., tables, forms, modals, navigation)'
},
{
id: 'q2',
question: 'What does \'modern look\' mean for you? (e.g., dark mode support, specific color scheme, animations)'
},
{
id: 'q3',
question: 'Are there any specific shadcn components you want to use or prioritize?'
}
]
}),
status: 'waiting_for_input'
});
// Should be called at least once (could be once or twice depending on session ID extraction)
expect(mockAgentRepo.update).toHaveBeenCalledTimes(1);
// Assert: AgentWaitingEvent should be emitted
const waitingEvents = eventBus.emittedEvents.filter(e => e.type === 'agent:waiting') as AgentWaitingEvent[];
expect(waitingEvents).toHaveLength(1);
expect(waitingEvents[0].payload.questions).toEqual([
{
id: 'q1',
question: 'What specific components are in the current admin UI? (e.g., tables, forms, modals, navigation)'
},
{
id: 'q2',
question: 'What does \'modern look\' mean for you? (e.g., dark mode support, specific color scheme, animations)'
},
{
id: 'q3',
question: 'Are there any specific shadcn components you want to use or prioritize?'
}
]);
});
it('should handle malformed questions gracefully', async () => {
// Arrange: Create output with malformed questions JSON
const malformedOutput = JSON.stringify({
type: "result",
subtype: "success",
is_error: false,
session_id: "test-session",
result: '{"status": "questions", "questions": [malformed json]}',
total_cost_usd: 0.05
});
const getAgentWorkdir = vi.fn().mockReturnValue('/test/workdir');
const provider = getProvider('claude')!;
// Act & Assert: Should not throw, should handle error gracefully
await expect(
outputHandler.processAgentOutput(
mockAgent.id,
malformedOutput,
provider,
getAgentWorkdir
)
).resolves.not.toThrow();
// Should update status to crashed due to malformed JSON
const updateCalls = mockAgentRepo.update.mock.calls;
const crashedCall = updateCalls.find(call => call[1]?.status === 'crashed');
expect(crashedCall).toBeDefined();
});
it('should correctly handle "done" status without questions', async () => {
// Arrange: Create output with done status
const doneOutput = JSON.stringify({
type: "result",
subtype: "success",
is_error: false,
session_id: "test-session",
result: JSON.stringify({
status: "done",
message: "Task completed successfully"
}),
total_cost_usd: 0.05
});
const getAgentWorkdir = vi.fn().mockReturnValue('/test/workdir');
const provider = getProvider('claude')!;
// Act
await outputHandler.processAgentOutput(
mockAgent.id,
doneOutput,
provider,
getAgentWorkdir
);
// Assert: Should not set waiting_for_input status or pendingQuestions
const updateCalls = mockAgentRepo.update.mock.calls;
const waitingCall = updateCalls.find(call => call[1]?.status === 'waiting_for_input');
expect(waitingCall).toBeUndefined();
const questionsCall = updateCalls.find(call => call[1]?.pendingQuestions);
expect(questionsCall).toBeUndefined();
});
});
describe('getPendingQuestions', () => {
it('should retrieve and parse stored pending questions', async () => {
// Arrange
const questionsPayload = {
questions: [
{ id: 'q1', question: 'Test question 1?' },
{ id: 'q2', question: 'Test question 2?' }
]
};
mockAgentRepo.findById.mockResolvedValue({
...mockAgent,
pendingQuestions: JSON.stringify(questionsPayload)
});
// Act
const result = await outputHandler.getPendingQuestions(mockAgent.id);
// Assert
expect(result).toEqual(questionsPayload);
expect(mockAgentRepo.findById).toHaveBeenCalledWith(mockAgent.id);
});
it('should return null when no pending questions exist', async () => {
// Arrange
mockAgentRepo.findById.mockResolvedValue({
...mockAgent,
pendingQuestions: null
});
// Act
const result = await outputHandler.getPendingQuestions(mockAgent.id);
// Assert
expect(result).toBeNull();
});
});
// =============================================================================
// formatAnswersAsPrompt Tests
// =============================================================================
describe('formatAnswersAsPrompt', () => {
it('should format normal answers correctly', () => {
const answers = {
'q1': 'The admin UI has tables and forms',
'q2': 'Modern means dark mode and clean aesthetics'
};
const result = outputHandler.formatAnswersAsPrompt(answers);
expect(result).toBe(
'Here are my answers to your questions:\n' +
'[q1]: The admin UI has tables and forms\n' +
'[q2]: Modern means dark mode and clean aesthetics'
);
});
it('should handle instruction-enhanced answers for retry scenarios', () => {
const answers = {
'q1': 'Fix the authentication bug',
'__instruction__': 'IMPORTANT: Create a signal.json file when done'
};
const result = outputHandler.formatAnswersAsPrompt(answers);
expect(result).toBe(
'IMPORTANT: Create a signal.json file when done\n\n' +
'Here are my answers to your questions:\n' +
'[q1]: Fix the authentication bug'
);
});
it('should handle instruction with whitespace correctly', () => {
const answers = {
'q1': 'Complete the task',
'__instruction__': ' \n Some instruction with whitespace \n '
};
const result = outputHandler.formatAnswersAsPrompt(answers);
expect(result).toBe(
'Some instruction with whitespace\n\n' +
'Here are my answers to your questions:\n' +
'[q1]: Complete the task'
);
});
it('should work with only instruction and no real answers', () => {
const answers = {
'__instruction__': 'Retry with this instruction'
};
const result = outputHandler.formatAnswersAsPrompt(answers);
expect(result).toBe(
'Retry with this instruction\n\n' +
'Here are my answers to your questions:\n'
);
});
it('should work with empty answers object', () => {
const answers = {};
const result = outputHandler.formatAnswersAsPrompt(answers);
expect(result).toBe(
'Here are my answers to your questions:\n'
);
});
});
});

View File

@@ -0,0 +1,928 @@
/**
* OutputHandler — Stream event processing, signal parsing, file reading, result capture.
*
* Extracted from MultiProviderAgentManager. Processes all output from agent
* subprocesses: stream events, agent signals, output files, and result/question
* retrieval.
*/
import { readFile } from 'node:fs/promises';
import { existsSync } from 'node:fs';
import { join } from 'node:path';
import type { AgentRepository } from '../db/repositories/agent-repository.js';
import type { ChangeSetRepository, CreateChangeSetEntryData } from '../db/repositories/change-set-repository.js';
import type { PhaseRepository } from '../db/repositories/phase-repository.js';
import type { TaskRepository } from '../db/repositories/task-repository.js';
import type { PageRepository } from '../db/repositories/page-repository.js';
import type {
EventBus,
AgentStoppedEvent,
AgentCrashedEvent,
AgentWaitingEvent,
} from '../events/index.js';
import type {
AgentResult,
AgentMode,
PendingQuestions,
QuestionItem,
} from './types.js';
import type { StreamEvent } from './providers/parsers/index.js';
import type { AgentProviderConfig } from './providers/types.js';
import { agentSignalSchema } from './schema.js';
import {
readSummary,
readPhaseFiles,
readTaskFiles,
readDecisionFiles,
readPageFiles,
readFrontmatterFile,
} from './file-io.js';
import { getProvider } from './providers/registry.js';
import { markdownToTiptapJson } from './markdown-to-tiptap.js';
import type { SignalManager } from './lifecycle/signal-manager.js';
import { createModuleLogger } from '../logger/index.js';
const log = createModuleLogger('output-handler');
/**
* Tracks an active agent with its PID and file tailer.
*/
export interface ActiveAgent {
agentId: string;
pid: number;
tailer: import('./file-tailer.js').FileTailer;
outputFilePath: string;
/** Actual working directory the agent process runs in (may differ from getAgentWorkdir for standalone agents) */
agentCwd?: string;
result?: AgentResult;
pendingQuestions?: PendingQuestions;
streamResultText?: string;
streamSessionId?: string;
streamCostUsd?: number;
/** True when the stream result indicated an error (e.g. auth failure) */
streamIsError?: boolean;
/** Cancel handle for polling timer — call to stop polling on cleanup */
cancelPoll?: () => void;
}
/**
* Result structure from Claude CLI with --output-format json.
*/
interface ClaudeCliResult {
type: 'result';
subtype: 'success' | 'error';
is_error: boolean;
session_id: string;
result: string;
structured_output?: unknown;
total_cost_usd?: number;
}
export class OutputHandler {
private filePositions = new Map<string, number>();
private completionLocks = new Set<string>(); // Track agents currently being processed
constructor(
private repository: AgentRepository,
private eventBus?: EventBus,
private changeSetRepository?: ChangeSetRepository,
private phaseRepository?: PhaseRepository,
private taskRepository?: TaskRepository,
private pageRepository?: PageRepository,
private signalManager?: SignalManager,
) {}
/**
* Validate that a signal file is complete and properly formatted.
*/
private async validateSignalFile(filePath: string): Promise<boolean> {
try {
const content = await readFile(filePath, 'utf-8');
const trimmed = content.trim();
if (!trimmed) return false;
// Check if JSON is complete (ends with } or ])
const endsCorrectly = trimmed.endsWith('}') || trimmed.endsWith(']');
if (!endsCorrectly) return false;
// Try to parse as JSON to ensure it's valid
JSON.parse(trimmed);
return true;
} catch {
return false;
}
}
/**
* Read complete lines from a file, avoiding partial lines that might still be writing.
* This eliminates race conditions when agents are still writing output.
*/
private async readCompleteLines(filePath: string, fromPosition: number = 0): Promise<{ content: string; lastPosition: number }> {
try {
const content = await readFile(filePath, 'utf-8');
if (fromPosition >= content.length) {
return { content: '', lastPosition: fromPosition };
}
// Get content from our last read position
const newContent = content.slice(fromPosition);
// Split into lines
const lines = newContent.split('\n');
// If file doesn't end with newline, last element is potentially incomplete
// Only process complete lines (all but the last, unless file ends with \n)
const hasTrailingNewline = newContent.endsWith('\n');
const completeLines = hasTrailingNewline ? lines : lines.slice(0, -1);
// Calculate new position (only count complete lines)
const completeLinesContent = completeLines.join('\n') + (completeLines.length > 0 && hasTrailingNewline ? '\n' : '');
const newPosition = fromPosition + Buffer.byteLength(completeLinesContent, 'utf-8');
return {
content: completeLinesContent,
lastPosition: newPosition
};
} catch (err) {
log.debug({ filePath, err: err instanceof Error ? err.message : String(err) }, 'failed to read output file lines');
return { content: '', lastPosition: fromPosition };
}
}
/**
* Handle a standardized stream event from a parser.
*/
handleStreamEvent(
agentId: string,
event: StreamEvent,
active: ActiveAgent | undefined,
): void {
switch (event.type) {
case 'init':
if (active && event.sessionId) {
active.streamSessionId = event.sessionId;
this.repository.update(agentId, { sessionId: event.sessionId }).catch((err) => {
log.warn({ agentId, err: err instanceof Error ? err.message : String(err) }, 'failed to update session ID');
});
}
break;
case 'text_delta':
// Text deltas are now streamed via DB log chunks + EventBus in manager.createLogChunkCallback
break;
case 'tool_use_start':
log.debug({ agentId, tool: event.name, toolId: event.id }, 'tool use started');
break;
case 'result':
if (active) {
active.streamResultText = event.text;
active.streamCostUsd = event.costUsd;
active.streamIsError = event.isError === true;
if (!active.streamSessionId && event.sessionId) {
active.streamSessionId = event.sessionId;
}
}
break;
case 'error':
log.error({ agentId, error: event.message }, 'stream error event');
break;
case 'turn_end':
log.debug({ agentId, stopReason: event.stopReason }, 'turn ended');
break;
}
}
/**
* Handle completion of a detached agent.
* Processes the final result from the stream data captured by the tailer.
*
* RACE CONDITION FIX: Uses a completion lock to prevent duplicate processing.
* Both the polling handler (handleDetachedAgentCompletion) and crash handler
* (handleProcessCrashed) can call this method when a process exits with non-zero code.
* The mutex ensures only one handler processes the completion per agent.
*/
async handleCompletion(
agentId: string,
active: ActiveAgent | undefined,
getAgentWorkdir: (alias: string) => string,
): Promise<void> {
// CRITICAL: Prevent race condition - only one completion handler per agent
if (this.completionLocks.has(agentId)) {
log.debug({ agentId }, 'completion already being processed - skipping duplicate');
return;
}
this.completionLocks.add(agentId);
try {
const agent = await this.repository.findById(agentId);
if (!agent) return;
const provider = getProvider(agent.provider);
if (!provider) return;
log.debug({ agentId }, 'detached agent completed');
// Resolve actual agent working directory — standalone agents run in a
// "workspace/" subdirectory inside getAgentWorkdir, so prefer agentCwd
// recorded at spawn time when available.
const agentWorkdir = active?.agentCwd ?? getAgentWorkdir(agent.worktreeId);
const outputDir = join(agentWorkdir, '.cw', 'output');
const expectedPwdFile = join(agentWorkdir, '.cw', 'expected-pwd.txt');
const diagnosticFile = join(agentWorkdir, '.cw', 'spawn-diagnostic.json');
const outputDirExists = existsSync(outputDir);
const expectedPwdExists = existsSync(expectedPwdFile);
const diagnosticExists = existsSync(diagnosticFile);
log.info({
agentId,
agentWorkdir,
outputDirExists,
expectedPwdExists,
diagnosticExists,
verification: outputDirExists ? 'PASS' : 'FAIL'
}, 'agent workdir verification completed');
if (!outputDirExists) {
log.warn({
agentId,
agentWorkdir
}, 'No output files found in agent workdir! Agent may have run in wrong location.');
}
let signalText = active?.streamResultText;
// If the stream result indicated an error (e.g. auth failure, usage limit),
// route directly to error handling instead of trying to parse as signal JSON
if (signalText && active?.streamIsError) {
log.warn({ agentId, error: signalText }, 'agent returned error result');
await this.handleAgentError(agentId, new Error(signalText), provider, getAgentWorkdir);
return;
}
if (!signalText) {
try {
const outputFilePath = active?.outputFilePath ?? '';
if (outputFilePath) {
// First, check for robust signal.json completion before attempting incremental reading
log.debug({ agentId, worktreeId: agent.worktreeId, agentWorkdir }, 'checking signal completion');
const hasSignalCompletion = await this.readSignalCompletion(agentWorkdir);
log.debug({ agentId, agentWorkdir, hasSignalCompletion }, 'signal completion check result');
if (hasSignalCompletion) {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
const signalContent = await readFile(signalPath, 'utf-8');
log.debug({ agentId, signalPath }, 'detected completion via signal.json, processing');
await this.processSignalAndFiles(agentId, signalContent, agent.mode as AgentMode, getAgentWorkdir, active?.streamSessionId);
return;
} else {
log.debug({ agentId, agentWorkdir }, 'no signal completion found, proceeding with raw output');
}
// Read only complete lines from the file, avoiding race conditions
const lastPosition = this.filePositions.get(agentId) || 0;
const { content: fileContent, lastPosition: newPosition } = await this.readCompleteLines(outputFilePath, lastPosition);
if (fileContent.trim()) {
this.filePositions.set(agentId, newPosition);
await this.processAgentOutput(agentId, fileContent, provider, getAgentWorkdir);
return;
}
// If no new complete lines, but file might still be writing, try again with validation
if (await this.validateSignalFile(outputFilePath)) {
const fullContent = await readFile(outputFilePath, 'utf-8');
if (fullContent.trim() && fullContent.length > newPosition) {
// File is complete and has content beyond what we've read
await this.processAgentOutput(agentId, fullContent, provider, getAgentWorkdir);
return;
}
}
}
} catch { /* file empty or missing */ }
log.debug({ agentId }, 'no result from stream or file, marking as error');
await this.handleAgentError(agentId, new Error('No output received'), provider, getAgentWorkdir);
return;
}
// Check for signal.json file first, then fall back to stream text
if (await this.readSignalCompletion(agentWorkdir)) {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
const signalContent = await readFile(signalPath, 'utf-8');
log.debug({ agentId, signalPath }, 'using signal.json content for completion');
await this.processSignalAndFiles(agentId, signalContent, agent.mode as AgentMode, getAgentWorkdir, active?.streamSessionId);
} else {
log.debug({ agentId }, 'using stream text for completion (no signal.json found)');
await this.processSignalAndFiles(
agentId,
signalText,
agent.mode as AgentMode,
getAgentWorkdir,
active?.streamSessionId,
);
}
} finally {
this.completionLocks.delete(agentId);
this.filePositions.delete(agentId);
}
}
/**
* Process agent signal JSON and read output files.
* Universal handler for all providers and modes.
*/
async processSignalAndFiles(
agentId: string,
signalText: string,
mode: AgentMode,
getAgentWorkdir: (alias: string) => string,
sessionId?: string,
): Promise<void> {
const agent = await this.repository.findById(agentId);
if (!agent) return;
let signal;
let parsed;
// Step 1: JSON parsing
try {
parsed = JSON.parse(signalText.trim());
log.debug({ agentId }, 'signal JSON parsing successful');
} catch (jsonError) {
log.error({
agentId,
signalText: signalText.trim(),
error: jsonError instanceof Error ? jsonError.message : String(jsonError),
stack: jsonError instanceof Error ? jsonError.stack : undefined
}, 'signal JSON parsing failed');
await this.repository.update(agentId, { status: 'crashed' });
this.emitCrashed(agent, 'Failed to parse agent signal JSON');
return;
}
// Step 2: Schema validation
try {
signal = agentSignalSchema.parse(parsed);
log.debug({ agentId, signalStatus: signal.status }, 'signal schema validation passed');
} catch (schemaError) {
log.error({
agentId,
signalText: signalText.trim(),
parsed,
error: schemaError instanceof Error ? schemaError.message : String(schemaError),
stack: schemaError instanceof Error ? schemaError.stack : undefined
}, 'signal schema validation failed');
await this.repository.update(agentId, { status: 'crashed' });
this.emitCrashed(agent, 'Failed to validate agent signal schema');
return;
}
switch (signal.status) {
case 'done':
await this.processOutputFiles(agentId, agent, mode, getAgentWorkdir);
break;
case 'questions':
await this.handleQuestions(agentId, agent, signal.questions, sessionId);
break;
case 'error':
await this.handleSignalError(agentId, agent, signal.error);
break;
}
}
/**
* Process output files from agent workdir after successful completion.
* Performs direct writes to entities and records change sets.
*/
private async processOutputFiles(
agentId: string,
agent: { id: string; name: string; taskId: string | null; worktreeId: string; mode: string; initiativeId?: string | null },
mode: AgentMode,
getAgentWorkdir: (alias: string) => string,
): Promise<void> {
const agentWorkdir = getAgentWorkdir(agent.worktreeId);
const summary = readSummary(agentWorkdir);
const initiativeId = agent.initiativeId;
const canWriteChangeSets = this.changeSetRepository && initiativeId;
let resultMessage = summary?.body ?? 'Task completed';
switch (mode) {
case 'plan': {
const phases = readPhaseFiles(agentWorkdir);
if (canWriteChangeSets && this.phaseRepository && phases.length > 0) {
const entries: CreateChangeSetEntryData[] = [];
// First pass: create phases
for (const [i, p] of phases.entries()) {
try {
const tiptapContent = p.body ? JSON.stringify(markdownToTiptapJson(p.body)) : undefined;
const created = await this.phaseRepository.create({
id: p.id ?? undefined,
initiativeId,
name: p.title,
content: tiptapContent,
});
entries.push({
entityType: 'phase',
entityId: created.id,
action: 'create',
newState: JSON.stringify(created),
sortOrder: i,
});
this.eventBus?.emit({
type: 'phase:started' as const,
timestamp: new Date(),
payload: { phaseId: created.id, initiativeId },
});
} catch (err) {
log.warn({ agentId, phase: p.title, err: err instanceof Error ? err.message : String(err) }, 'failed to create phase');
}
}
// Second pass: create phase dependencies
let depSortOrder = entries.length;
for (const p of phases) {
const phaseId = p.id;
if (!phaseId || !Array.isArray(p.dependencies)) continue;
for (const depFileId of p.dependencies) {
try {
await this.phaseRepository.createDependency(phaseId, depFileId);
entries.push({
entityType: 'phase_dependency',
entityId: `${phaseId}:${depFileId}`,
action: 'create',
newState: JSON.stringify({ phaseId, dependsOnPhaseId: depFileId }),
sortOrder: depSortOrder++,
});
} catch (err) {
log.warn({ agentId, phaseId, depFileId, err: err instanceof Error ? err.message : String(err) }, 'failed to create phase dependency');
}
}
}
if (entries.length > 0) {
try {
const cs = await this.changeSetRepository!.createWithEntries({
agentId,
agentName: agent.name,
initiativeId,
mode: 'plan',
summary: summary?.body ?? `Created ${phases.length} phases`,
}, entries);
this.eventBus?.emit({
type: 'changeset:created' as const,
timestamp: new Date(),
payload: { changeSetId: cs.id, initiativeId, agentId, mode: 'plan', entryCount: entries.length },
});
} catch (err) {
log.warn({ agentId, err: err instanceof Error ? err.message : String(err) }, 'failed to record change set after successful writes');
}
}
resultMessage = summary?.body ?? `${phases.length} phases created`;
} else {
resultMessage = JSON.stringify({ summary: summary?.body, phases });
}
break;
}
case 'detail': {
const tasks = readTaskFiles(agentWorkdir);
if (canWriteChangeSets && this.taskRepository && tasks.length > 0) {
const phaseInput = readFrontmatterFile(join(agentWorkdir, '.cw', 'input', 'phase.md'));
const phaseId = (phaseInput?.data?.id as string) ?? null;
const entries: CreateChangeSetEntryData[] = [];
// Load existing tasks for dedup — prevents duplicates when multiple agents finish concurrently
const existingTasks = phaseId ? await this.taskRepository.findByPhaseId(phaseId) : [];
const existingNames = new Set(existingTasks.map(t => t.name));
for (const [i, t] of tasks.entries()) {
if (existingNames.has(t.title)) {
log.info({ agentId, task: t.title, phaseId }, 'skipped duplicate task');
continue;
}
try {
const created = await this.taskRepository.create({
initiativeId,
phaseId,
parentTaskId: agent.taskId ?? null,
name: t.title,
description: t.body ?? undefined,
category: (t.category as any) ?? 'execute',
type: (t.type as any) ?? 'auto',
});
existingNames.add(t.title); // prevent dupes within same agent output
entries.push({
entityType: 'task',
entityId: created.id,
action: 'create',
newState: JSON.stringify(created),
sortOrder: i,
});
this.eventBus?.emit({
type: 'task:completed' as const,
timestamp: new Date(),
payload: { taskId: created.id, agentId, success: true, message: 'Task created by detail' },
});
} catch (err) {
log.warn({ agentId, task: t.title, err: err instanceof Error ? err.message : String(err) }, 'failed to create task');
}
}
if (entries.length > 0) {
try {
const cs = await this.changeSetRepository!.createWithEntries({
agentId,
agentName: agent.name,
initiativeId,
mode: 'detail',
summary: summary?.body ?? `Created ${tasks.length} tasks`,
}, entries);
this.eventBus?.emit({
type: 'changeset:created' as const,
timestamp: new Date(),
payload: { changeSetId: cs.id, initiativeId, agentId, mode: 'detail', entryCount: entries.length },
});
} catch (err) {
log.warn({ agentId, err: err instanceof Error ? err.message : String(err) }, 'failed to record change set after successful writes');
}
}
resultMessage = summary?.body ?? `${tasks.length} tasks created`;
} else {
resultMessage = JSON.stringify({ summary: summary?.body, tasks });
}
break;
}
case 'discuss': {
const decisions = readDecisionFiles(agentWorkdir);
resultMessage = JSON.stringify({ summary: summary?.body, decisions });
break;
}
case 'refine': {
const pages = readPageFiles(agentWorkdir);
if (canWriteChangeSets && this.pageRepository && pages.length > 0) {
const entries: CreateChangeSetEntryData[] = [];
for (const [i, p] of pages.entries()) {
try {
if (!p.pageId) continue;
const existing = await this.pageRepository.findById(p.pageId);
if (!existing) {
log.warn({ agentId, pageId: p.pageId }, 'page not found for refine update');
continue;
}
const previousState = JSON.stringify(existing);
const tiptapJson = markdownToTiptapJson(p.body || '');
await this.pageRepository.update(p.pageId, {
content: JSON.stringify(tiptapJson),
title: p.title,
});
const updated = await this.pageRepository.findById(p.pageId);
entries.push({
entityType: 'page',
entityId: p.pageId,
action: 'update',
previousState,
newState: JSON.stringify(updated),
sortOrder: i,
});
this.eventBus?.emit({
type: 'page:updated' as const,
timestamp: new Date(),
payload: { pageId: p.pageId, initiativeId, title: p.title },
});
} catch (err) {
log.warn({ agentId, pageId: p.pageId, err: err instanceof Error ? err.message : String(err) }, 'failed to update page');
}
}
if (entries.length > 0) {
try {
const cs = await this.changeSetRepository!.createWithEntries({
agentId,
agentName: agent.name,
initiativeId,
mode: 'refine',
summary: summary?.body ?? `Updated ${entries.length} pages`,
}, entries);
this.eventBus?.emit({
type: 'changeset:created' as const,
timestamp: new Date(),
payload: { changeSetId: cs.id, initiativeId, agentId, mode: 'refine', entryCount: entries.length },
});
} catch (err) {
log.warn({ agentId, err: err instanceof Error ? err.message : String(err) }, 'failed to record change set after successful writes');
}
}
resultMessage = summary?.body ?? `${entries.length} pages updated`;
} else {
resultMessage = JSON.stringify({ summary: summary?.body, pages });
}
break;
}
}
const resultPayload: AgentResult = {
success: true,
message: resultMessage,
filesModified: summary?.filesModified,
};
await this.repository.update(agentId, { result: JSON.stringify(resultPayload), status: 'idle' });
const reason = this.getStoppedReason(mode);
if (this.eventBus) {
const event: AgentStoppedEvent = {
type: 'agent:stopped',
timestamp: new Date(),
payload: {
agentId,
name: agent.name,
taskId: agent.taskId ?? '',
reason,
},
};
this.eventBus.emit(event);
}
return;
}
/**
* Handle questions signal from agent.
*/
async handleQuestions(
agentId: string,
agent: { id: string; name: string; taskId: string | null; sessionId: string | null },
questions: QuestionItem[],
sessionId?: string,
): Promise<void> {
const questionsPayload: PendingQuestions = { questions };
await this.repository.update(agentId, { pendingQuestions: JSON.stringify(questionsPayload), status: 'waiting_for_input' });
if (this.eventBus) {
const event: AgentWaitingEvent = {
type: 'agent:waiting',
timestamp: new Date(),
payload: {
agentId,
name: agent.name,
taskId: agent.taskId ?? '',
sessionId: sessionId ?? agent.sessionId ?? '',
questions,
},
};
this.eventBus.emit(event);
}
}
/**
* Handle error signal from agent.
*/
async handleSignalError(
agentId: string,
agent: { id: string; name: string; taskId: string | null },
error: string,
): Promise<void> {
const errorResult: AgentResult = { success: false, message: error };
await this.repository.update(agentId, {
result: JSON.stringify(errorResult),
status: 'crashed'
});
this.emitCrashed(agent, error);
}
/**
* Map agent mode to stopped event reason.
*/
getStoppedReason(mode: AgentMode): AgentStoppedEvent['payload']['reason'] {
switch (mode) {
case 'discuss': return 'context_complete';
case 'plan': return 'plan_complete';
case 'detail': return 'detail_complete';
case 'refine': return 'refine_complete';
default: return 'task_complete';
}
}
/**
* Process raw output from an agent (from file or direct).
*/
async processAgentOutput(
agentId: string,
rawOutput: string,
provider: AgentProviderConfig,
getAgentWorkdir: (alias: string) => string,
): Promise<void> {
const agent = await this.repository.findById(agentId);
if (!agent) return;
// Extract session ID using provider's extraction config
let sessionId: string | null = null;
if (provider.sessionId) {
const outputLines = rawOutput.trim().split('\n');
if (provider.sessionId.extractFrom === 'result') {
// Find the result line in JSONL output
for (const line of outputLines) {
try {
const parsed = JSON.parse(line);
if (parsed.type === 'result' || parsed[provider.sessionId.field]) {
sessionId = parsed[provider.sessionId.field] ?? null;
if (sessionId) break;
}
} catch { /* intentional: skip non-JSON JSONL lines */ }
}
} else if (provider.sessionId.extractFrom === 'event') {
for (const line of outputLines) {
try {
const event = JSON.parse(line);
if (event.type === provider.sessionId.eventType) {
sessionId = event[provider.sessionId.field] ?? null;
}
} catch { /* intentional: skip non-JSON JSONL lines */ }
}
}
}
if (sessionId) {
await this.repository.update(agentId, { sessionId });
}
log.debug({ agentId, provider: provider.name, hasSessionId: !!sessionId }, 'processing agent output');
if (provider.name === 'claude') {
// rawOutput may be a single JSON object or multi-line JSONL — find the result line
let cliResult: ClaudeCliResult | null = null;
const lines = rawOutput.trim().split('\n');
for (const line of lines) {
try {
const parsed = JSON.parse(line);
if (parsed.type === 'result') {
cliResult = parsed;
}
} catch { /* intentional: skip non-JSON JSONL lines */ }
}
if (!cliResult) {
log.error({ agentId }, 'no result event found in agent output');
await this.handleAgentError(agentId, new Error('No result event in output'), provider, getAgentWorkdir);
return;
}
// Handle error results (auth failure, usage limits, etc.)
if (cliResult.is_error) {
log.warn({ agentId, error: cliResult.result }, 'agent returned error result from file');
await this.handleAgentError(agentId, new Error(cliResult.result), provider, getAgentWorkdir);
return;
}
let signalText: string;
try {
const signal = cliResult.structured_output ?? JSON.parse(cliResult.result);
signalText = JSON.stringify(signal);
} catch (parseErr) {
log.error({ agentId, err: parseErr instanceof Error ? parseErr.message : String(parseErr) }, 'failed to parse agent signal from result');
await this.handleAgentError(agentId, new Error('Failed to parse agent signal'), provider, getAgentWorkdir);
return;
}
await this.processSignalAndFiles(agentId, signalText, agent.mode as AgentMode, getAgentWorkdir, sessionId ?? undefined);
} else {
await this.processSignalAndFiles(agentId, rawOutput, agent.mode as AgentMode, getAgentWorkdir, sessionId ?? undefined);
}
}
/**
* Handle agent errors. Detects usage limit exhaustion patterns.
* Returns true if error was an exhaustion error (caller should attempt failover).
*/
async handleAgentError(
agentId: string,
error: unknown,
provider: AgentProviderConfig,
_getAgentWorkdir: (alias: string) => string,
): Promise<void> {
const errorMessage = error instanceof Error ? error.message : String(error);
const agent = await this.repository.findById(agentId);
if (!agent) return;
log.error({ agentId, err: errorMessage }, 'agent error');
const errorResult: AgentResult = {
success: false,
message: errorMessage,
};
await this.repository.update(agentId, {
status: 'crashed',
result: JSON.stringify(errorResult)
});
if (this.eventBus) {
const event: AgentCrashedEvent = {
type: 'agent:crashed',
timestamp: new Date(),
payload: {
agentId,
name: agent.name,
taskId: agent.taskId ?? '',
error: errorMessage,
},
};
this.eventBus.emit(event);
}
}
/**
* Format answers map as structured prompt.
* Handles special __instruction__ key for retry scenarios.
*/
formatAnswersAsPrompt(answers: Record<string, string>): string {
const instruction = answers['__instruction__'];
const realAnswers = { ...answers };
delete realAnswers['__instruction__'];
const lines = Object.entries(realAnswers).map(
([questionId, answer]) => `[${questionId}]: ${answer}`,
);
const basePrompt = `Here are my answers to your questions:\n${lines.join('\n')}`;
return instruction ? `${instruction.trim()}\n\n${basePrompt}` : basePrompt;
}
/**
* Get the result of an agent's work.
*/
async getResult(agentId: string, active?: ActiveAgent): Promise<AgentResult | null> {
if (active?.result) return active.result;
const agent = await this.repository.findById(agentId);
return agent?.result ? JSON.parse(agent.result) : null;
}
/**
* Get pending questions for an agent waiting for input.
*/
async getPendingQuestions(agentId: string, active?: ActiveAgent): Promise<PendingQuestions | null> {
if (active?.pendingQuestions) return active.pendingQuestions;
const agent = await this.repository.findById(agentId);
return agent?.pendingQuestions ? JSON.parse(agent.pendingQuestions) : null;
}
// =========================================================================
// Private Helpers
// =========================================================================
/**
* Read signal.json and return its content if the agent completed successfully.
* Uses SignalManager for atomic read-and-validate when available.
* Returns the raw JSON string on success, null if missing/invalid.
*/
private async readSignalCompletion(agentWorkdir: string): Promise<string | null> {
// Prefer SignalManager (unified implementation with proper validation)
if (this.signalManager) {
const signal = await this.signalManager.readSignal(agentWorkdir);
return signal ? JSON.stringify(signal) : null;
}
// Fallback: inline read (for tests that don't inject SignalManager)
try {
const signalPath = join(agentWorkdir, '.cw/output/signal.json');
if (!existsSync(signalPath)) return null;
const signalContent = await readFile(signalPath, 'utf-8');
const signal = JSON.parse(signalContent);
if (signal.status === 'done' || signal.status === 'questions' || signal.status === 'error') {
return signalContent;
}
return null;
} catch (err) {
log.debug({ agentWorkdir, err: err instanceof Error ? err.message : String(err) }, 'failed to read or parse signal.json');
return null;
}
}
private emitCrashed(agent: { id: string; name: string; taskId: string | null }, error: string): void {
if (this.eventBus) {
const event: AgentCrashedEvent = {
type: 'agent:crashed',
timestamp: new Date(),
payload: {
agentId: agent.id,
name: agent.name,
taskId: agent.taskId ?? '',
error,
},
};
this.eventBus.emit(event);
}
}
}

View File

@@ -0,0 +1,386 @@
/**
* ProcessManager Unit Tests
*
* Tests for ProcessManager class focusing on working directory handling,
* command building, and spawn validation.
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { ProcessManager } from './process-manager.js';
import type { ProjectRepository } from '../db/repositories/project-repository.js';
// Mock child_process.spawn
vi.mock('node:child_process', () => ({
spawn: vi.fn(),
}));
// Mock fs operations
vi.mock('node:fs', () => ({
writeFileSync: vi.fn(),
mkdirSync: vi.fn(),
openSync: vi.fn((path) => {
// Return different fd numbers for stdout and stderr
if (path.includes('output.jsonl')) return 99;
if (path.includes('stderr.log')) return 100;
return 101;
}),
closeSync: vi.fn(),
existsSync: vi.fn(),
}));
// Mock FileTailer
vi.mock('./file-tailer.js', () => ({
FileTailer: class MockFileTailer {
start = vi.fn().mockResolvedValue(undefined);
stop = vi.fn().mockResolvedValue(undefined);
},
}));
// Mock SimpleGitWorktreeManager
const mockCreate = vi.fn();
vi.mock('../git/manager.js', () => ({
SimpleGitWorktreeManager: class MockWorktreeManager {
create = mockCreate;
},
}));
// Mock project clones
vi.mock('../git/project-clones.js', () => ({
ensureProjectClone: vi.fn().mockResolvedValue('/mock/clone/path'),
getProjectCloneDir: vi.fn().mockReturnValue('/mock/clone/path'),
}));
// Mock providers
vi.mock('./providers/parsers/index.js', () => ({
getStreamParser: vi.fn().mockReturnValue({ parse: vi.fn() }),
}));
import { spawn } from 'node:child_process';
import { existsSync, writeFileSync, mkdirSync, openSync, closeSync } from 'node:fs';
import { ensureProjectClone } from '../git/project-clones.js';
const mockSpawn = vi.mocked(spawn);
const mockExistsSync = vi.mocked(existsSync);
const mockWriteFileSync = vi.mocked(writeFileSync);
const mockMkdirSync = vi.mocked(mkdirSync);
const mockOpenSync = vi.mocked(openSync);
const mockCloseSync = vi.mocked(closeSync);
describe('ProcessManager', () => {
let processManager: ProcessManager;
let mockProjectRepository: ProjectRepository;
const workspaceRoot = '/test/workspace';
beforeEach(() => {
vi.clearAllMocks();
// Mock child process
const mockChild = {
pid: 12345,
unref: vi.fn(),
on: vi.fn(),
kill: vi.fn(),
};
mockSpawn.mockReturnValue(mockChild as any);
// Mock project repository
mockProjectRepository = {
findProjectsByInitiativeId: vi.fn().mockResolvedValue([]),
create: vi.fn(),
findAll: vi.fn(),
findById: vi.fn(),
findByName: vi.fn(),
update: vi.fn(),
delete: vi.fn(),
setInitiativeProjects: vi.fn(),
addProjectToInitiative: vi.fn(),
removeProjectFromInitiative: vi.fn(),
};
processManager = new ProcessManager(workspaceRoot, mockProjectRepository);
});
afterEach(() => {
vi.resetAllMocks();
});
describe('getAgentWorkdir', () => {
it('returns correct agent workdir path', () => {
const alias = 'test-agent';
const expected = '/test/workspace/agent-workdirs/test-agent';
const result = processManager.getAgentWorkdir(alias);
expect(result).toBe(expected);
});
});
describe('createProjectWorktrees', () => {
beforeEach(() => {
// Mock the global worktree create function
mockCreate.mockResolvedValue({
id: 'project1',
path: '/test/workspace/agent-workdirs/test-agent/project1',
branch: 'agent/test-agent',
isMainWorktree: false,
});
// Mock project repository
vi.mocked(mockProjectRepository.findProjectsByInitiativeId).mockResolvedValue([
{ id: '1', name: 'project1', url: 'https://github.com/user/project1.git', defaultBranch: 'main', createdAt: new Date(), updatedAt: new Date() }
]);
// Mock existsSync to return true for worktree paths
mockExistsSync.mockImplementation((path) => {
return path.toString().includes('/agent-workdirs/');
});
});
it('creates worktrees for initiative projects', async () => {
const alias = 'test-agent';
const initiativeId = 'init-123';
const result = await processManager.createProjectWorktrees(alias, initiativeId);
expect(result).toBe('/test/workspace/agent-workdirs/test-agent');
expect(mockProjectRepository.findProjectsByInitiativeId).toHaveBeenCalledWith('init-123');
expect(ensureProjectClone).toHaveBeenCalled();
});
it('throws error when worktree creation fails', async () => {
// Mock worktree path to not exist after creation
mockExistsSync.mockReturnValue(false);
const alias = 'test-agent';
const initiativeId = 'init-123';
await expect(processManager.createProjectWorktrees(alias, initiativeId))
.rejects.toThrow('Worktree creation failed:');
});
});
describe('createStandaloneWorktree', () => {
beforeEach(() => {
mockCreate.mockResolvedValue({
id: 'workspace',
path: '/test/workspace/agent-workdirs/test-agent/workspace',
branch: 'agent/test-agent',
isMainWorktree: false,
});
mockExistsSync.mockImplementation((path) => {
return path.toString().includes('/workspace');
});
});
it('creates standalone worktree', async () => {
const alias = 'test-agent';
const result = await processManager.createStandaloneWorktree(alias);
expect(result).toBe('/test/workspace/agent-workdirs/test-agent/workspace');
});
it('throws error when standalone worktree creation fails', async () => {
mockExistsSync.mockReturnValue(false);
const alias = 'test-agent';
await expect(processManager.createStandaloneWorktree(alias))
.rejects.toThrow('Standalone worktree creation failed:');
});
});
describe('spawnDetached', () => {
beforeEach(() => {
mockExistsSync.mockReturnValue(true); // CWD exists
});
it('validates cwd exists before spawn', () => {
const agentId = 'agent-123';
const agentName = 'test-agent';
const command = 'claude';
const args = ['--help'];
const cwd = '/test/workspace/agent-workdirs/test-agent';
const env = { TEST_VAR: 'value' };
const providerName = 'claude';
processManager.spawnDetached(agentId, agentName, command, args, cwd, env, providerName);
expect(mockExistsSync).toHaveBeenCalledWith(cwd);
expect(mockSpawn).toHaveBeenCalledWith(command, args, {
cwd,
env: expect.objectContaining(env),
detached: true,
stdio: ['ignore', 99, 100],
});
});
it('throws error when cwd does not exist', () => {
mockExistsSync.mockReturnValue(false);
const agentId = 'agent-123';
const agentName = 'test-agent';
const command = 'claude';
const args = ['--help'];
const cwd = '/nonexistent/path';
const env = {};
const providerName = 'claude';
expect(() => {
processManager.spawnDetached(agentId, agentName, command, args, cwd, env, providerName);
}).toThrow('Agent working directory does not exist: /nonexistent/path');
});
it('passes correct cwd parameter to spawn', () => {
const agentId = 'agent-123';
const agentName = 'test-agent';
const command = 'claude';
const args = ['--help'];
const cwd = '/test/workspace/agent-workdirs/test-agent';
const env = { CLAUDE_CONFIG_DIR: '/config' };
const providerName = 'claude';
processManager.spawnDetached(agentId, agentName, command, args, cwd, env, providerName);
expect(mockSpawn).toHaveBeenCalledTimes(1);
const spawnCall = mockSpawn.mock.calls[0];
expect(spawnCall[0]).toBe(command);
expect(spawnCall[1]).toEqual(args);
expect(spawnCall[2]).toEqual({
cwd,
env: expect.objectContaining({
...process.env,
CLAUDE_CONFIG_DIR: '/config',
}),
detached: true,
stdio: ['ignore', 99, 100],
});
});
it('writes prompt file when provided', () => {
const agentId = 'agent-123';
const agentName = 'test-agent';
const command = 'claude';
const args = ['--help'];
const cwd = '/test/workspace/agent-workdirs/test-agent';
const env = {};
const providerName = 'claude';
const prompt = 'Test prompt';
processManager.spawnDetached(agentId, agentName, command, args, cwd, env, providerName, prompt);
expect(mockWriteFileSync).toHaveBeenCalledWith(
'/test/workspace/.cw/agent-logs/test-agent/PROMPT.md',
'Test prompt',
'utf-8'
);
});
});
describe('buildSpawnCommand', () => {
it('builds command with native prompt mode', () => {
const provider = {
name: 'claude',
command: 'claude',
args: ['--json-schema', 'schema.json'],
env: {},
promptMode: 'native' as const,
processNames: ['claude'],
resumeStyle: 'flag' as const,
resumeFlag: '--resume',
nonInteractive: {
subcommand: 'chat',
promptFlag: '-p',
outputFlag: '--output-format json',
},
};
const prompt = 'Test prompt';
const result = processManager.buildSpawnCommand(provider, prompt);
expect(result).toEqual({
command: 'claude',
args: ['chat', '--json-schema', 'schema.json', '-p', 'Test prompt', '--output-format', 'json'],
env: {},
});
});
it('builds command with flag prompt mode', () => {
const provider = {
name: 'codex',
command: 'codex',
args: ['--format', 'json'],
env: {},
promptMode: 'flag' as const,
processNames: ['codex'],
resumeStyle: 'subcommand' as const,
resumeFlag: 'resume',
nonInteractive: {
subcommand: 'run',
promptFlag: '--prompt',
outputFlag: '--json',
},
};
const prompt = 'Test prompt';
const result = processManager.buildSpawnCommand(provider, prompt);
expect(result).toEqual({
command: 'codex',
args: ['run', '--format', 'json', '--prompt', 'Test prompt', '--json'],
env: {},
});
});
});
describe('buildResumeCommand', () => {
it('builds resume command with flag style', () => {
const provider = {
name: 'claude',
command: 'claude',
args: [],
env: {},
promptMode: 'native' as const,
processNames: ['claude'],
resumeStyle: 'flag' as const,
resumeFlag: '--resume',
nonInteractive: {
subcommand: 'chat',
promptFlag: '-p',
outputFlag: '--json',
},
};
const sessionId = 'session-123';
const prompt = 'Continue working';
const result = processManager.buildResumeCommand(provider, sessionId, prompt);
expect(result).toEqual({
command: 'claude',
args: ['--resume', 'session-123', '-p', 'Continue working', '--json'],
env: {},
});
});
it('throws error for providers without resume support', () => {
const provider = {
name: 'noresume',
command: 'noresume',
args: [],
env: {},
promptMode: 'native' as const,
processNames: ['noresume'],
resumeStyle: 'none' as const,
};
const sessionId = 'session-123';
const prompt = 'Continue working';
expect(() => {
processManager.buildResumeCommand(provider, sessionId, prompt);
}).toThrow("Provider 'noresume' does not support resume");
});
});
});

View File

@@ -0,0 +1,394 @@
/**
* ProcessManager — Subprocess lifecycle, worktree creation, command building.
*
* Extracted from MultiProviderAgentManager. Manages the spawning of detached
* subprocesses, worktree creation per project, and provider-specific command
* construction.
*/
import { spawn } from 'node:child_process';
import { writeFileSync, mkdirSync, openSync, closeSync, existsSync } from 'node:fs';
import { join } from 'node:path';
import type { ProjectRepository } from '../db/repositories/project-repository.js';
import type { AgentProviderConfig } from './providers/types.js';
import type { StreamEvent } from './providers/parsers/index.js';
import { getStreamParser } from './providers/parsers/index.js';
import { SimpleGitWorktreeManager } from '../git/manager.js';
import { ensureProjectClone, getProjectCloneDir } from '../git/project-clones.js';
import { FileTailer } from './file-tailer.js';
import { createModuleLogger } from '../logger/index.js';
const log = createModuleLogger('process-manager');
/**
* Check if a process with the given PID is still alive.
*/
export function isPidAlive(pid: number): boolean {
try {
process.kill(pid, 0);
return true;
} catch {
return false;
}
}
export class ProcessManager {
constructor(
private workspaceRoot: string,
private projectRepository: ProjectRepository,
) {}
/**
* Resolve the agent's working directory path.
*/
getAgentWorkdir(alias: string): string {
return join(this.workspaceRoot, 'agent-workdirs', alias);
}
/**
* Create worktrees for all projects linked to an initiative.
* Returns the base agent workdir path.
*/
async createProjectWorktrees(
alias: string,
initiativeId: string,
baseBranch?: string,
branchName?: string,
): Promise<string> {
const projects = await this.projectRepository.findProjectsByInitiativeId(initiativeId);
const agentWorkdir = this.getAgentWorkdir(alias);
log.debug({
alias,
initiativeId,
projectCount: projects.length,
agentWorkdir,
baseBranch
}, 'creating project worktrees');
// No linked projects — fall back to standalone worktree so the agent
// always has a git-backed working directory.
if (projects.length === 0) {
log.info({ alias, initiativeId }, 'initiative has no linked projects, falling back to standalone worktree');
return this.createStandaloneWorktree(alias);
}
for (const project of projects) {
const clonePath = await ensureProjectClone(project, this.workspaceRoot);
const worktreeManager = new SimpleGitWorktreeManager(clonePath, undefined, agentWorkdir);
const effectiveBaseBranch = baseBranch ?? project.defaultBranch;
const worktree = await worktreeManager.create(project.name, branchName ?? `agent/${alias}`, effectiveBaseBranch);
const worktreePath = worktree.path;
const pathExists = existsSync(worktreePath);
log.debug({
alias,
agentWorkdir,
projectName: project.name,
worktreePath,
pathExists
}, 'worktree created');
if (!pathExists) {
log.error({ worktreePath }, 'Worktree path does not exist after creation!');
throw new Error(`Worktree creation failed: ${worktreePath}`);
}
}
return agentWorkdir;
}
/**
* Fallback: create a single "workspace" worktree for standalone agents.
*/
async createStandaloneWorktree(alias: string): Promise<string> {
const agentWorkdir = this.getAgentWorkdir(alias);
const worktreeManager = new SimpleGitWorktreeManager(this.workspaceRoot, undefined, agentWorkdir);
log.debug({ alias, agentWorkdir }, 'creating standalone worktree');
const worktree = await worktreeManager.create('workspace', `agent/${alias}`);
const worktreePath = worktree.path;
const pathExists = existsSync(worktreePath);
log.debug({
alias,
agentWorkdir,
worktreePath,
pathExists
}, 'standalone worktree created');
if (!pathExists) {
log.error({ worktreePath }, 'Standalone worktree path does not exist after creation!');
throw new Error(`Standalone worktree creation failed: ${worktreePath}`);
}
return worktree.path;
}
/**
* Build the spawn command for a given provider configuration.
*/
buildSpawnCommand(
provider: AgentProviderConfig,
prompt: string,
): { command: string; args: string[]; env: Record<string, string> } {
const args = [...provider.args];
const env: Record<string, string> = { ...provider.env };
if (provider.nonInteractive?.subcommand) {
args.unshift(provider.nonInteractive.subcommand);
}
if (provider.promptMode === 'native') {
args.push('-p', prompt);
} else if (provider.promptMode === 'flag' && provider.nonInteractive?.promptFlag) {
args.push(provider.nonInteractive.promptFlag, prompt);
}
if (provider.nonInteractive?.outputFlag) {
args.push(...provider.nonInteractive.outputFlag.split(' '));
}
return { command: provider.command, args, env };
}
/**
* Build the resume command for a given provider configuration.
*/
buildResumeCommand(
provider: AgentProviderConfig,
sessionId: string,
prompt: string,
): { command: string; args: string[]; env: Record<string, string> } {
const args = [...provider.args];
const env: Record<string, string> = { ...provider.env };
switch (provider.resumeStyle) {
case 'flag':
args.push(provider.resumeFlag!, sessionId);
break;
case 'subcommand':
if (provider.nonInteractive?.subcommand) {
args.unshift(provider.nonInteractive.subcommand);
}
args.push(provider.resumeFlag!, sessionId);
break;
case 'none':
throw new Error(`Provider '${provider.name}' does not support resume`);
}
if (provider.promptMode === 'native') {
args.push('-p', prompt);
} else if (provider.promptMode === 'flag' && provider.nonInteractive?.promptFlag) {
args.push(provider.nonInteractive.promptFlag, prompt);
}
if (provider.nonInteractive?.outputFlag) {
args.push(...provider.nonInteractive.outputFlag.split(' '));
}
return { command: provider.command, args, env };
}
/**
* Extract session ID from CLI output based on provider config.
*/
extractSessionId(
provider: AgentProviderConfig,
output: string,
): string | null {
if (!provider.sessionId) return null;
try {
if (provider.sessionId.extractFrom === 'result') {
const parsed = JSON.parse(output);
return parsed[provider.sessionId.field] ?? null;
}
if (provider.sessionId.extractFrom === 'event') {
const lines = output.trim().split('\n');
for (const line of lines) {
try {
const event = JSON.parse(line);
if (event.type === provider.sessionId.eventType) {
return event[provider.sessionId.field] ?? null;
}
} catch {
// Skip non-JSON lines
}
}
}
} catch {
// Parse failure
}
return null;
}
/**
* Spawn a detached subprocess with file redirection for crash resilience.
* The subprocess writes directly to files and survives server crashes.
* A FileTailer watches the output file and emits events in real-time.
*
* @param onEvent - Callback for stream events from the tailer
*/
spawnDetached(
agentId: string,
agentName: string,
command: string,
args: string[],
cwd: string,
env: Record<string, string>,
providerName: string,
prompt?: string,
onEvent?: (event: StreamEvent) => void,
onRawContent?: (content: string) => void,
): { pid: number; outputFilePath: string; tailer: FileTailer } {
// Pre-spawn validation and logging
const cwdExists = existsSync(cwd);
const commandWithArgs = [command, ...args].join(' ');
// Log environment variables that might affect working directory
const environmentInfo = {
PWD: process.env.PWD,
HOME: process.env.HOME,
CLAUDE_CONFIG_DIR: env.CLAUDE_CONFIG_DIR,
CW_CONFIG_DIR: env.CW_CONFIG_DIR
};
log.info({
agentId,
cwd,
cwdExists,
commandWithArgs,
providerName,
environmentInfo
}, 'spawning detached process with workdir validation');
if (!cwdExists) {
log.error({ cwd }, 'CWD does not exist before spawn!');
throw new Error(`Agent working directory does not exist: ${cwd}`);
}
const logDir = join(this.workspaceRoot, '.cw', 'agent-logs', agentName);
mkdirSync(logDir, { recursive: true });
const outputFilePath = join(logDir, 'output.jsonl');
const stderrFilePath = join(logDir, 'stderr.log');
if (prompt) {
writeFileSync(join(logDir, 'PROMPT.md'), prompt, 'utf-8');
}
const stdoutFd = openSync(outputFilePath, 'w');
const stderrFd = openSync(stderrFilePath, 'w');
const child = spawn(command, args, {
cwd,
env: { ...process.env, ...env },
detached: true,
stdio: ['ignore', stdoutFd, stderrFd],
});
closeSync(stdoutFd);
closeSync(stderrFd);
child.unref();
const pid = child.pid!;
log.info({
agentId,
pid,
command,
args: args.join(' '),
cwd,
spawnSuccess: true
}, 'spawned detached process successfully');
const parser = getStreamParser(providerName);
const tailer = new FileTailer({
filePath: outputFilePath,
agentId,
parser,
onEvent: onEvent ?? (() => {}),
startFromBeginning: true,
onRawContent,
});
tailer.start().catch((err) => {
log.warn({ agentId, err: err instanceof Error ? err.message : String(err) }, 'failed to start tailer');
});
return { pid, outputFilePath, tailer };
}
/**
* Poll for process completion by checking if PID is still alive.
* When the process exits, calls onComplete callback.
* Returns a cancel handle to stop polling (e.g. on agent cleanup or re-resume).
*
* @param onComplete - Called when the process is no longer alive
* @param getTailer - Function to get the current tailer for final flush
*/
pollForCompletion(
agentId: string,
pid: number,
onComplete: () => Promise<void>,
getTailer: () => FileTailer | undefined,
): { cancel: () => void } {
let cancelled = false;
const check = async () => {
if (cancelled) return;
if (!isPidAlive(pid)) {
const tailer = getTailer();
if (tailer) {
await new Promise((resolve) => setTimeout(resolve, 500));
await tailer.stop();
}
if (!cancelled) await onComplete();
return;
}
if (!cancelled) setTimeout(check, 1000);
};
check();
return { cancel: () => { cancelled = true; } };
}
/**
* Wait for a process to complete with Promise-based API.
* Returns when the process is no longer alive.
*/
async waitForProcessCompletion(pid: number, timeoutMs: number = 300000): Promise<{ exitCode: number | null }> {
return new Promise((resolve, reject) => {
const startTime = Date.now();
const check = () => {
if (!isPidAlive(pid)) {
// Process has exited, try to get exit code
// Note: Getting exact exit code from detached process is limited
resolve({ exitCode: null });
return;
}
if (Date.now() - startTime > timeoutMs) {
reject(new Error(`Process ${pid} did not complete within ${timeoutMs}ms`));
return;
}
setTimeout(check, 1000);
};
check();
});
}
/**
* Get the exit code of a completed process.
* Limited implementation since we use detached processes.
*/
async getExitCode(pid: number): Promise<number | null> {
// For detached processes, we can't easily get the exit code
// This would need to be enhanced with process tracking
return null;
}
}

View File

@@ -0,0 +1,96 @@
/**
* Detail mode prompt — break a phase into executable tasks.
*/
import { CONTEXT_MANAGEMENT, ID_GENERATION, INPUT_FILES, SIGNAL_FORMAT } from './shared.js';
export function buildDetailPrompt(): string {
return `<role>
You are an Architect agent in DETAIL mode. Break the phase into executable tasks. You do NOT write code.
</role>
${INPUT_FILES}
<output_format>
Write one file per task to \`.cw/output/tasks/{id}.md\`:
- Frontmatter: \`title\`, \`category\` (execute|research|discuss|plan|detail|refine|verify|merge|review), \`type\` (auto|checkpoint:human-verify|checkpoint:decision|checkpoint:human-action), \`dependencies\` (list of task IDs)
- Body: Detailed task description
</output_format>
${ID_GENERATION}
${SIGNAL_FORMAT}
<task_body_requirements>
Every task body must include:
1. **Files to create or modify** — specific paths (e.g., \`src/db/schema.ts\`, \`src/api/routes/users.ts\`)
2. **Expected behavior** — concrete examples, inputs/outputs, edge cases
3. **Test specification** — for every execute-category task:
- Test file path (e.g., \`src/api/validators/user.test.ts\`)
- Test scenarios (happy path, error cases, edge cases)
- Run command (e.g., \`npm test -- src/api/validators/user.test.ts\`)
Non-execute tasks may omit this.
4. **Verification command** — exact command to confirm completion
<examples>
<example label="bad">
Title: Add user validation
Body: Add validation to the user model. Make sure all fields are validated properly.
</example>
<example label="good">
Title: Add Zod validation schema for user creation
Body: Create \`src/api/validators/user.ts\` — Zod schema for CreateUserInput:
- email: valid format, lowercase, max 255 chars
- name: 1-100 chars, trimmed
- password: min 8 chars, uppercase + number required
Test file: \`src/api/validators/user.test.ts\`
Tests: valid input passes, missing fields rejected, invalid email rejected,
weak password rejected, whitespace-only name rejected
Files: src/api/validators/user.ts (create), user.test.ts (create)
Verify: \`npm test -- src/api/validators/user.test.ts\`
</example>
</examples>
</task_body_requirements>
<file_ownership>
Parallel tasks must not modify the same files. Include a file list per task:
\`\`\`
Files: src/db/schema/users.ts (create), src/db/migrations/001_users.sql (create)
\`\`\`
If two tasks touch the same file or one needs the other's output, add a dependency.
</file_ownership>
<task_sizing>
- **<150 lines, 1-3 files**: Sweet spot
- **150-300 lines, 4-5 files**: Only for mechanical/boilerplate work with precise specs
- **300+ lines or 5+ files**: Split it
- **<20 lines**: Merge with a related task
- **1 sentence description**: Too vague — add detail or merge
</task_sizing>
<checkpoint_tasks>
- \`checkpoint:human-verify\`: Visual changes, migrations, API contracts
- \`checkpoint:decision\`: Architecture choices affecting multiple phases
- \`checkpoint:human-action\`: External setup (DNS, credentials, third-party config)
~90% of tasks should be \`auto\`.
</checkpoint_tasks>
<existing_context>
- Read ALL \`context/tasks/\` files before generating output
- Only create tasks for THIS phase (\`phase.md\`)
- Do not duplicate work that exists in context/tasks/ (even under different names)
- Use pages as requirements source
</existing_context>
${CONTEXT_MANAGEMENT}
<definition_of_done>
Before signal.json "done":
- [ ] Every execute task has test file path + run command
- [ ] Every task has a file ownership list
- [ ] No parallel tasks share files
- [ ] Every task is executable without clarifying questions
- [ ] Tasks sized within ~20-300 lines changed
- [ ] No duplicates with existing context tasks
</definition_of_done>`;
}

View File

@@ -0,0 +1,78 @@
/**
* Discuss mode prompt — clarifying questions and decision capture.
*/
import { ID_GENERATION, INPUT_FILES, SIGNAL_FORMAT } from './shared.js';
export function buildDiscussPrompt(): string {
return `<role>
You are an Architect agent in the Codewalk multi-agent system operating in DISCUSS mode.
Transform user intent into clear, documented decisions. You do NOT write code — you capture decisions.
</role>
${INPUT_FILES}
<output_format>
Write decisions to \`.cw/output/decisions/{id}.md\`:
- Frontmatter: \`topic\`, \`decision\`, \`reason\`
- Body: Additional context or rationale
</output_format>
${ID_GENERATION}
${SIGNAL_FORMAT}
<analysis_method>
Work backward from the goal before asking anything:
1. **Observable outcome**: What will the user see/do when this is done?
2. **Artifacts needed**: What code, config, or infra produces that outcome?
3. **Wiring**: How do the artifacts connect (data flow, API contracts, events)?
4. **Failure points**: What can go wrong? Edge cases?
Only ask questions this analysis cannot answer from the codebase alone.
</analysis_method>
<question_quality>
Every question must explain what depends on the answer.
<examples>
<example label="bad">
"How should we handle errors?"
</example>
<example label="good">
"The current API returns HTTP 500 for all errors. Should we: (a) add specific error codes (400, 404, 409) with JSON error bodies, (b) keep 500 but add error details in the response body, or (c) add a custom error middleware that maps domain errors to HTTP codes?"
</example>
</examples>
</question_quality>
<decision_quality>
Include: what, why, rejected alternatives. For behavioral decisions, add verification criteria.
<examples>
<example label="bad">
"We'll use a database for storage"
</example>
<example label="good">
"Use SQLite via better-sqlite3 with drizzle-orm. Schema in src/db/schema.ts, migrations via drizzle-kit. Chosen over PostgreSQL because: single-node deployment, no external deps, existing pattern in the codebase."
</example>
</examples>
</decision_quality>
<question_categories>
- **User Journeys**: Workflows, success/failure paths, edge cases
- **Technical Constraints**: Patterns to follow, things to avoid
- **Data & Validation**: Structures, rules, constraints
- **Integration Points**: External systems, APIs, error handling
- **Testability**: Acceptance criteria, test strategies
Don't ask what the codebase already answers. If the project uses a framework, don't ask which framework to use.
</question_categories>
<rules>
- Ask 2-4 questions at a time, not more
</rules>
<definition_of_done>
- Every decision includes what, why, and rejected alternatives
- Behavioral decisions include verification criteria
- No questions the codebase already answers
</definition_of_done>`;
}

View File

@@ -0,0 +1,81 @@
/**
* Execute mode prompt — standard worker agent.
*/
import {
CONTEXT_MANAGEMENT,
DEVIATION_RULES,
GIT_WORKFLOW,
INPUT_FILES,
PROGRESS_TRACKING,
SESSION_STARTUP,
SIGNAL_FORMAT,
TEST_INTEGRITY,
} from './shared.js';
export function buildExecutePrompt(taskDescription?: string): string {
const taskSection = taskDescription
? `
<task>
${taskDescription}
Read \`.cw/input/task.md\` for the full structured task with metadata, priority, and dependencies.
</task>`
: '';
return `<role>
You are a Worker agent in the Codewalk multi-agent system. Execute the assigned coding task using RED-GREEN-REFACTOR.
</role>
${taskSection}
${INPUT_FILES}
${SIGNAL_FORMAT}
${SESSION_STARTUP}
<execution_protocol>
Follow these steps in order. Signal done only after the Definition of Done checklist passes.
1. **Startup**: Verify environment per Session Startup. If baseline tests fail, signal error.
2. **Read & orient**: Read all input files. Run \`git log --oneline -10\` to check recent changes.
3. **Write failing tests (RED)**: Write tests for the expected behavior. Run them — they must fail. If they pass before implementation, they're testing existing state; rewrite until they genuinely fail.
4. **Implement (GREEN)**: Minimum code to pass tests. Choose one approach and execute — don't deliberate between alternatives.
5. **Verify green**: Run the full relevant test suite. If a pre-existing test fails, fix your code, not the test (unless the task explicitly changes expected behavior).
6. **Commit**: Stage specific files, commit with a descriptive message, update progress file.
7. **Iterate**: For multi-part tasks, repeat 3-6 per part. Each cycle produces a commit.
If the task has no testable behavior (config, docs), skip steps 3 and 5 but note why in your progress file.
</execution_protocol>
${TEST_INTEGRITY}
<anti_patterns>
- **Mega-commits**: Commit after each logical unit, not one giant commit at the end.
- **Silent reinterpretation**: Task says X, do X. Don't substitute Y because you think it's better.
- **Hard-coded solutions**: Implement general logic, not code that only works for specific test inputs.
</anti_patterns>
<scope_rules>
- Do exactly what the task says — no unrelated fixes, refactors, or improvements. Other agents may own those files.
- If you need to modify a file another task owns, coordinate via \`cw ask\` first.
- Touching 7+ files? You're probably overscoping. Re-read the task.
</scope_rules>
${DEVIATION_RULES}
${GIT_WORKFLOW}
${PROGRESS_TRACKING}
${CONTEXT_MANAGEMENT}
<definition_of_done>
Before writing signal.json with status "done":
- [ ] All tests pass (full relevant suite)
- [ ] No uncommitted changes
- [ ] Progress file updated
- [ ] Implemented exactly what the task asked — no more, no less
If any item fails, fix it. If unfixable, signal "error" explaining what's wrong.
</definition_of_done>`;
}

View File

@@ -0,0 +1,14 @@
/**
* Agent Prompts — per-mode prompt builders and shared instructions.
*
* Each agent type lives in its own file. Shared instructions (signal format,
* input files, ID generation) are in shared.ts.
*/
export { SIGNAL_FORMAT, INPUT_FILES, ID_GENERATION, CONTEXT_MANAGEMENT, DEVIATION_RULES, GIT_WORKFLOW, TEST_INTEGRITY, SESSION_STARTUP, PROGRESS_TRACKING, buildInterAgentCommunication } from './shared.js';
export { buildExecutePrompt } from './execute.js';
export { buildDiscussPrompt } from './discuss.js';
export { buildPlanPrompt } from './plan.js';
export { buildDetailPrompt } from './detail.js';
export { buildRefinePrompt } from './refine.js';
export { buildWorkspaceLayout } from './workspace.js';

View File

@@ -0,0 +1,96 @@
/**
* Plan mode prompt — plan initiative into phases.
*/
import { CONTEXT_MANAGEMENT, ID_GENERATION, INPUT_FILES, SIGNAL_FORMAT } from './shared.js';
export function buildPlanPrompt(): string {
return `<role>
You are an Architect agent in PLAN mode. Plan the initiative into phases. You do NOT write code.
</role>
${INPUT_FILES}
<output_format>
Write one file per phase to \`.cw/output/phases/{id}.md\`:
- Frontmatter: \`title\`, \`dependencies\` (list of phase IDs this depends on)
- Body: what gets built, specific enough for a detail agent to break into tasks without clarifying questions
</output_format>
${ID_GENERATION}
${SIGNAL_FORMAT}
<phase_design>
- Single concern, independently deliverable, testable
- Foundation phases first; minimize cross-phase dependencies
- 2-5 tasks each. Action-oriented names (what gets built, not how)
- Tests are part of every phase, not a separate phase
<examples>
<example label="bad">
Phase 1: Database → Phase 2: API → Phase 3: Frontend → Phase 4: Tests
</example>
<example label="good">
Phase 1: Database + schema tests → Phase 2: API + endpoint tests → Phase 3: Frontend + component tests
</example>
</examples>
</phase_design>
<dependencies>
Maximize parallelism. If your plan is fully serial, reconsider.
<examples>
<example label="good">
\`\`\`
Wave 1 (parallel): "Database schema", "API skeleton"
Wave 2 (parallel): "User endpoints" (depends: API skeleton, DB schema), "Auth middleware" (depends: API skeleton)
Wave 3: "Integration tests" (depends: User endpoints, Auth middleware)
\`\`\`
</example>
<example label="bad">
\`\`\`
Phase 1 → Phase 2 → Phase 3 → Phase 4 (fully serial, no parallelism)
\`\`\`
</example>
</examples>
</dependencies>
<file_ownership>
Parallel phases MUST NOT modify the same files.
<examples>
<example label="bad">
Phase A "Add user model" and Phase B "Add product model" both modify \`schema.ts\` and \`index.ts\`
</example>
<example label="good">
Phase A creates \`user-schema.ts\`, Phase B creates \`product-schema.ts\`, Phase C "Wire models into index" depends on both
</example>
</examples>
</file_ownership>
<specificity>
Each phase must pass: **"Could a detail agent break this into tasks without clarifying questions?"**
<examples>
<example label="bad">
"Set up the backend" — what backend? What framework? What endpoints?
</example>
<example label="good">
"Create Express API server with health check endpoint at /api/health, CORS configured for localhost:3000, error handling middleware returning JSON errors"
</example>
</examples>
</specificity>
<existing_context>
- Account for existing phases/tasks — don't plan work already covered
- Always generate new phase IDs — never reuse existing ones
</existing_context>
${CONTEXT_MANAGEMENT}
<definition_of_done>
- [ ] Every phase has explicit dependencies (or explicitly none)
- [ ] Parallel phases do not modify the same files
- [ ] Each phase specific enough for detail agent — no clarifying questions needed
- [ ] Tests included in each phase, not trailing
- [ ] Existing work accounted for
</definition_of_done>`;
}

View File

@@ -0,0 +1,42 @@
/**
* Refine mode prompt — review and propose edits to initiative pages.
*/
import { INPUT_FILES, SIGNAL_FORMAT } from './shared.js';
export function buildRefinePrompt(): string {
return `<role>
You are an Architect agent reviewing initiative pages. You do NOT write code.
</role>
${INPUT_FILES}
${SIGNAL_FORMAT}
<output_format>
Write one file per modified page to \`.cw/output/pages/{pageId}.md\`:
- Frontmatter: \`title\`, \`summary\` (what changed and why)
- Body: Full replacement markdown content for the page
</output_format>
<improvement_priorities>
1. **Ambiguity**: Requirements interpretable multiple ways → make specific
2. **Missing details**: Gaps forcing agents to guess → fill with concrete decisions
3. **Contradictions**: Conflicting statements → resolve
4. **Unverifiable requirements**: "Make it fast" → add testable criteria. Better: "Response time under 200ms". Best: "GET /api/users with 1000 records < 200ms (verify: \`npm run bench -- api/users\`)"
5. **Missing edge cases**: Happy path only → add error/empty/boundary scenarios. E.g. "When cart is empty and user clicks checkout → show 'Your cart is empty', disable payment button"
Ignore style, grammar, formatting unless they cause genuine ambiguity. Rough but precise beats polished but vague.
If all pages are already clear, signal done with no output files.
</improvement_priorities>
<rules>
- Ask 2-4 questions if you need clarification
- Preserve [[page:\$id|title]] cross-references
- Only reference page IDs that exist in .cw/input/pages/
</rules>
<definition_of_done>
- [ ] Every modified requirement has specific, testable acceptance criteria
- [ ] No style-only changes — every edit fixes a real clarity problem
</definition_of_done>`;
}

View File

@@ -0,0 +1,124 @@
/**
* Shared prompt instructions reused across agent types.
* Each constant is wrapped in a descriptive XML tag for unambiguous
* first-order / second-order delimiter separation per Anthropic best practices.
*/
export const SIGNAL_FORMAT = `
<signal_format>
As your final action, write \`.cw/output/signal.json\`:
- Done: \`{ "status": "done" }\`
- Need clarification: \`{ "status": "questions", "questions": [{ "id": "q1", "question": "..." }] }\`
- Unrecoverable error: \`{ "status": "error", "error": "..." }\`
</signal_format>`;
export const INPUT_FILES = `
<input_files>
Read \`.cw/input/manifest.json\` first, then read listed files from \`.cw/input/\`.
**Assignment Files**
- \`initiative.md\` — frontmatter: id, name, status
- \`phase.md\` — frontmatter: id, name, status; body: description
- \`task.md\` — frontmatter: id, name, category, type, priority, status; body: description
- \`pages/\` — one per page; frontmatter: title, parentPageId, sortOrder; body: markdown
**Context Files (read-only)**
Present when \`contextFiles\` exists in manifest:
- \`context/phases/\` — frontmatter: id, name, status, dependsOn; body: description
- \`context/tasks/\` — frontmatter: id, name, phaseId, parentTaskId, category, type, priority, status; body: description
Do not duplicate or contradict context file content in your output.
</input_files>`;
export const ID_GENERATION = `
<id_generation>
When creating new entities (phases, tasks, decisions), generate a unique ID by running:
\`\`\`
cw id
\`\`\`
Use the output as the filename (e.g., \`{id}.md\`).
</id_generation>`;
export const DEVIATION_RULES = `
<deviation_rules>
1. **Typo in assigned files** → Fix silently
2. **Bug in files you're modifying** → Fix if < 10 lines, otherwise note and move on
3. **Missing dependency** → Check context files for another agent's work; \`cw ask\` if yes, create if within scope
4. **Architectural mismatch** → STOP. Signal "questions" with what you found vs. what the task assumes
5. **Ambiguous requirement** → STOP. Signal "questions" with the ambiguity and 2-3 concrete options
6. **Task wrong or impossible** → STOP. Signal "error" explaining why
Never silently reinterpret a task.
</deviation_rules>`;
export const GIT_WORKFLOW = `
<git_workflow>
You are in an isolated git worktree. Other agents work in parallel on separate branches.
- Stage specific files with \`git add <file>\`, not \`git add .\`
- Never force-push
- Run \`git status\` before committing
</git_workflow>`;
export const CONTEXT_MANAGEMENT = `
<context_management>
When reading multiple files or running independent commands, execute them in parallel rather than sequentially. After each commit, update your progress file (see Progress Tracking).
</context_management>`;
export const TEST_INTEGRITY = `
<test_integrity>
1. **Never mirror implementation logic in assertions.** Hardcode expected values from requirements, don't recalculate them.
2. **Never modify existing test assertions to make them pass.** If a test expects X and your code produces Y, fix your code. Exception: your task explicitly changes expected behavior.
3. **Never skip or disable tests.** No \`it.skip()\`, \`.todo()\`, or commenting out. If unfixable, signal error.
4. **Each test must be independent.** No shared mutable state, no order dependence.
5. **Run the full relevant test suite**, not just your new tests.
</test_integrity>`;
export const SESSION_STARTUP = `
<session_startup>
1. \`pwd\` — confirm working directory
2. \`git status\` — check for unexpected state
3. Run test suite — establish green baseline. If already failing, signal "error". Don't build on a broken foundation.
4. Read \`.cw/input/manifest.json\` and all listed input files
</session_startup>`;
export const PROGRESS_TRACKING = `
<progress_tracking>
Update \`.cw/output/progress.md\` after each commit:
\`\`\`markdown
## Current Status
[What you just completed]
## Next Steps
[What you're working on next]
## Blockers
[Any issues or questions — empty if none]
\`\`\`
Survives context compaction — read this first if your context is refreshed.
</progress_tracking>`;
export function buildInterAgentCommunication(agentId: string): string {
return `
<inter_agent_communication>
Your agent ID: **${agentId}**
**CLI Commands**
- \`cw listen --agent-id ${agentId}\` — Waits for incoming question. Prints JSON (\`{ conversationId, fromAgentId, question, phaseId, taskId }\`) and exits.
- \`cw ask "<question>" --from ${agentId} --agent-id <TARGET>\` — Blocks until answered. Target with one of: \`--agent-id <id>\`, \`--task-id <id>\`, \`--phase-id <id>\`.
- \`cw answer "<answer>" --conversation-id <ID>\` — Answer a pending question.
**Usage Pattern**
Run \`cw listen > "$file" &\` at session start. Check periodically. On question: answer, restart listener. Before signal.json: kill listener, clean up.
**When to Communicate**
- Need interface/schema/API contract info from another agent
- About to modify a shared resource
- Have a dependency on another agent's work
- Don't ask questions you can answer by reading the codebase
</inter_agent_communication>`;
}

View File

@@ -0,0 +1,40 @@
/**
* Workspace layout section describing the agent's working directory.
*/
import { readdirSync } from 'node:fs';
import { join } from 'node:path';
export function buildWorkspaceLayout(agentCwd: string): string {
let entries: string[];
try {
entries = readdirSync(agentCwd, { withFileTypes: true })
.filter(d => d.isDirectory() && d.name !== '.cw')
.map(d => d.name);
} catch {
return '';
}
if (entries.length === 0) {
return `
<workspace>
Your working directory is: ${agentCwd}
This is an isolated git worktree. Other agents may be working in parallel on separate branches — do not assume you have exclusive access to the repository.
</workspace>`;
}
const lines = entries.map(
name => `- \`${name}/\`${join(agentCwd, name)}`
);
return `
<workspace>
Your working directory is: ${agentCwd}
This is an isolated git worktree. Other agents may be working in parallel on separate branches — do not assume you have exclusive access to the repository.
The following project directories contain the source code (git worktrees):
${lines.join('\n')}
</workspace>`;
}

View File

@@ -0,0 +1,40 @@
/**
* Agent Providers Module - Public API
*
* Re-exports provider types, presets, and registry functions.
*/
export type {
AgentProviderConfig,
StructuredOutputConfig,
SessionIdConfig,
NonInteractiveConfig,
} from './types.js';
export { PROVIDER_PRESETS } from './presets.js';
export {
getProvider,
listProviders,
registerProvider,
loadProvidersFromFile,
} from './registry.js';
// Stream parsing
export type {
StreamEvent,
StreamParser,
StreamInitEvent,
StreamTextDeltaEvent,
StreamToolUseStartEvent,
StreamToolResultEvent,
StreamTurnEndEvent,
StreamResultEvent,
StreamErrorEvent,
} from './stream-types.js';
export {
getStreamParser,
ClaudeStreamParser,
GenericStreamParser,
} from './parsers/index.js';

View File

@@ -0,0 +1,167 @@
/**
* Claude Code Stream Parser
*
* Parses Claude Code CLI `--output-format stream-json` NDJSON output
* into standardized StreamEvents.
*
* Key line types handled:
* - system (subtype=init): session_id
* - stream_event (content_block_delta, text_delta): delta.text
* - stream_event (content_block_start, tool_use): content_block.name, .id
* - stream_event (message_delta): delta.stop_reason
* - result: result, session_id, total_cost_usd
* - any with is_error: true: error message
*/
import type { StreamEvent, StreamParser } from '../stream-types.js';
interface ClaudeSystemEvent {
type: 'system';
subtype?: string;
session_id?: string;
}
interface ClaudeStreamEvent {
type: 'stream_event';
event?: {
type: string;
index?: number;
delta?: {
type?: string;
text?: string;
stop_reason?: string;
};
content_block?: {
type?: string;
id?: string;
name?: string;
};
};
}
interface ClaudeAssistantEvent {
type: 'assistant';
message?: {
content?: Array<{
type: string;
text?: string;
id?: string;
name?: string;
}>;
};
}
interface ClaudeResultEvent {
type: 'result';
result?: string;
session_id?: string;
total_cost_usd?: number;
is_error?: boolean;
}
type ClaudeEvent = ClaudeSystemEvent | ClaudeStreamEvent | ClaudeAssistantEvent | ClaudeResultEvent | { type: string; is_error?: boolean; result?: string };
export class ClaudeStreamParser implements StreamParser {
readonly provider = 'claude';
parseLine(line: string): StreamEvent[] {
const trimmed = line.trim();
if (!trimmed) return [];
let parsed: ClaudeEvent;
try {
parsed = JSON.parse(trimmed);
} catch {
// Not valid JSON, ignore
return [];
}
// Check for error on non-result events (e.g. stream errors)
// Result events with is_error are handled in the 'result' case below
if ('is_error' in parsed && parsed.is_error && 'result' in parsed && parsed.type !== 'result') {
return [{ type: 'error', message: String(parsed.result) }];
}
const events: StreamEvent[] = [];
switch (parsed.type) {
case 'system': {
const sysEvent = parsed as ClaudeSystemEvent;
if (sysEvent.subtype === 'init' && sysEvent.session_id) {
events.push({ type: 'init', sessionId: sysEvent.session_id });
}
break;
}
case 'stream_event': {
const streamEvent = parsed as ClaudeStreamEvent;
const inner = streamEvent.event;
if (!inner) break;
switch (inner.type) {
case 'content_block_delta': {
if (inner.delta?.type === 'text_delta' && inner.delta.text) {
events.push({ type: 'text_delta', text: inner.delta.text });
}
break;
}
case 'content_block_start': {
if (inner.content_block?.type === 'tool_use') {
const name = inner.content_block.name || 'unknown';
const id = inner.content_block.id || '';
events.push({ type: 'tool_use_start', name, id });
}
break;
}
case 'message_delta': {
if (inner.delta?.stop_reason) {
events.push({ type: 'turn_end', stopReason: inner.delta.stop_reason });
}
break;
}
}
break;
}
case 'assistant': {
// Claude CLI stream-json now emits complete assistant messages
// instead of granular stream_event deltas
const assistantEvent = parsed as ClaudeAssistantEvent;
const content = assistantEvent.message?.content;
if (Array.isArray(content)) {
for (const block of content) {
if (block.type === 'text' && block.text) {
events.push({ type: 'text_delta', text: block.text });
} else if (block.type === 'tool_use' && block.name) {
events.push({ type: 'tool_use_start', name: block.name, id: block.id || '' });
}
}
}
break;
}
case 'result': {
const resultEvent = parsed as ClaudeResultEvent;
events.push({
type: 'result',
text: resultEvent.result || '',
sessionId: resultEvent.session_id,
costUsd: resultEvent.total_cost_usd,
isError: resultEvent.is_error === true,
});
break;
}
// Ignore: message_start, content_block_stop, message_stop, user
}
return events;
}
end(): StreamEvent[] {
// Claude emits a result event, so nothing needed at end
return [];
}
}

View File

@@ -0,0 +1,32 @@
/**
* Generic Fallback Stream Parser
*
* For providers without a dedicated parser. Treats each line as text output.
* Accumulates all output and emits a final result event on stream end.
*/
import type { StreamEvent, StreamParser } from '../stream-types.js';
export class GenericStreamParser implements StreamParser {
readonly provider = 'generic';
private accumulated: string[] = [];
parseLine(line: string): StreamEvent[] {
if (!line) return [];
this.accumulated.push(line);
// Emit each line as a text delta
return [{ type: 'text_delta', text: line + '\n' }];
}
end(): StreamEvent[] {
// Emit the accumulated output as the result
const fullText = this.accumulated.join('\n');
this.accumulated = [];
if (!fullText) return [];
return [{ type: 'result', text: fullText }];
}
}

View File

@@ -0,0 +1,31 @@
/**
* Stream Parser Registry
*
* Factory function to get the appropriate stream parser for a provider.
*/
import type { StreamParser } from '../stream-types.js';
import { ClaudeStreamParser } from './claude.js';
import { GenericStreamParser } from './generic.js';
/** Map of provider names to parser constructors */
const parserRegistry: Record<string, new () => StreamParser> = {
claude: ClaudeStreamParser,
};
/**
* Get a stream parser for the given provider.
* Returns a provider-specific parser if available, otherwise the generic fallback.
*/
export function getStreamParser(providerName: string): StreamParser {
const ParserClass = parserRegistry[providerName];
if (ParserClass) {
return new ParserClass();
}
return new GenericStreamParser();
}
// Re-export types and parsers for direct access
export type { StreamParser, StreamEvent } from '../stream-types.js';
export { ClaudeStreamParser } from './claude.js';
export { GenericStreamParser } from './generic.js';

View File

@@ -0,0 +1,145 @@
/**
* Built-in Agent Provider Presets
*
* Data-driven configuration for all supported agent CLI providers.
* Ported from reference/gastown/internal/config/agents.go builtinPresets.
*/
import type { AgentProviderConfig } from './types.js';
export const PROVIDER_PRESETS: Record<string, AgentProviderConfig> = {
claude: {
name: 'claude',
command: 'claude',
args: ['--dangerously-skip-permissions', '--verbose'],
processNames: ['node', 'claude'],
configDirEnv: 'CLAUDE_CONFIG_DIR',
resumeFlag: '--resume',
resumeStyle: 'flag',
promptMode: 'native',
// No structuredOutput - schema enforcement via prompt text + validation
sessionId: {
extractFrom: 'event',
field: 'session_id',
eventType: 'system',
},
nonInteractive: {
outputFlag: '--output-format stream-json',
},
},
codex: {
name: 'codex',
command: 'codex',
args: ['--full-auto'],
processNames: ['codex'],
resumeFlag: 'resume',
resumeStyle: 'subcommand',
promptMode: 'native',
structuredOutput: {
flag: '--output-schema',
schemaMode: 'file',
outputFormat: 'jsonl',
},
sessionId: {
extractFrom: 'event',
field: 'thread_id',
eventType: 'thread.started',
},
nonInteractive: {
subcommand: 'exec',
outputFlag: '--json',
},
},
gemini: {
name: 'gemini',
command: 'gemini',
args: ['--sandbox=off'],
processNames: ['gemini'],
resumeFlag: '--resume',
resumeStyle: 'flag',
promptMode: 'flag',
structuredOutput: {
flag: '--output-format',
schemaMode: 'none',
outputFormat: 'json',
},
sessionId: {
extractFrom: 'result',
field: 'session_id',
},
nonInteractive: {
promptFlag: '-p',
outputFlag: '--output-format json',
},
},
cursor: {
name: 'cursor',
command: 'cursor-agent',
args: ['-f'],
processNames: ['cursor-agent'],
resumeStyle: 'none',
promptMode: 'flag',
structuredOutput: {
flag: '--output-format',
schemaMode: 'none',
outputFormat: 'json',
},
nonInteractive: {
promptFlag: '-p',
outputFlag: '--output-format json',
},
},
auggie: {
name: 'auggie',
command: 'aug',
args: ['--allow-indexing'],
processNames: ['aug'],
resumeStyle: 'none',
promptMode: 'flag',
nonInteractive: {
promptFlag: '-p',
},
},
amp: {
name: 'amp',
command: 'amp',
args: ['--allow-all'],
processNames: ['amp'],
resumeFlag: '--thread',
resumeStyle: 'flag',
promptMode: 'flag',
sessionId: {
extractFrom: 'result',
field: 'thread_id',
},
nonInteractive: {
promptFlag: '-p',
outputFlag: '--json',
},
},
opencode: {
name: 'opencode',
command: 'opencode',
args: [],
env: { OPENCODE_PERMISSION: '{"*":"allow"}' },
processNames: ['opencode', 'node', 'bun'],
resumeStyle: 'none',
promptMode: 'flag',
structuredOutput: {
flag: '--format',
schemaMode: 'none',
outputFormat: 'json',
},
nonInteractive: {
subcommand: 'run',
promptFlag: '-p',
outputFlag: '--format json',
},
},
};

View File

@@ -0,0 +1,50 @@
/**
* Agent Provider Registry
*
* In-memory registry of agent provider configurations.
* Pre-populated with built-in presets, extensible via registerProvider()
* or loadProvidersFromFile() for custom/override configs.
*/
import { readFileSync } from 'node:fs';
import type { AgentProviderConfig } from './types.js';
import { PROVIDER_PRESETS } from './presets.js';
const providers = new Map<string, AgentProviderConfig>(
Object.entries(PROVIDER_PRESETS),
);
/**
* Get a provider configuration by name.
* Returns null if the provider is not registered.
*/
export function getProvider(name: string): AgentProviderConfig | null {
return providers.get(name) ?? null;
}
/**
* List all registered provider names.
*/
export function listProviders(): string[] {
return Array.from(providers.keys());
}
/**
* Register or override a provider configuration.
*/
export function registerProvider(config: AgentProviderConfig): void {
providers.set(config.name, config);
}
/**
* Load provider configurations from a JSON file and merge into the registry.
* File should contain a JSON object mapping provider names to AgentProviderConfig objects.
* Existing providers with matching names will be overridden.
*/
export function loadProvidersFromFile(path: string): void {
const raw = readFileSync(path, 'utf-8');
const parsed = JSON.parse(raw) as Record<string, AgentProviderConfig>;
for (const [name, config] of Object.entries(parsed)) {
providers.set(name, { ...config, name });
}
}

View File

@@ -0,0 +1,79 @@
/**
* Stream Event Types and Parser Interface
*
* Standardized events emitted by all provider stream parsers.
* Each provider's NDJSON output is normalized to these common events.
*/
/** Initialization event - emitted at stream start, may contain session ID */
export interface StreamInitEvent {
type: 'init';
sessionId?: string;
}
/** Text delta - chunk of assistant text output */
export interface StreamTextDeltaEvent {
type: 'text_delta';
text: string;
}
/** Tool use started - agent is calling a tool */
export interface StreamToolUseStartEvent {
type: 'tool_use_start';
name: string;
id: string;
}
/** Tool result received */
export interface StreamToolResultEvent {
type: 'tool_result';
id: string;
}
/** Turn ended - assistant stopped responding */
export interface StreamTurnEndEvent {
type: 'turn_end';
stopReason: string;
}
/** Final result - emitted at stream end with complete output */
export interface StreamResultEvent {
type: 'result';
text: string;
sessionId?: string;
costUsd?: number;
/** True when the CLI returned an error result (e.g. auth failure, usage limit) */
isError?: boolean;
}
/** Error event */
export interface StreamErrorEvent {
type: 'error';
message: string;
}
/** Union of all stream event types */
export type StreamEvent =
| StreamInitEvent
| StreamTextDeltaEvent
| StreamToolUseStartEvent
| StreamToolResultEvent
| StreamTurnEndEvent
| StreamResultEvent
| StreamErrorEvent;
/**
* Stream Parser Interface
*
* Implementations parse provider-specific NDJSON into standardized events.
*/
export interface StreamParser {
/** Provider name this parser handles */
readonly provider: string;
/** Parse a single NDJSON line into zero or more standardized events */
parseLine(line: string): StreamEvent[];
/** Signal end of stream - allows parser to emit final events */
end(): StreamEvent[];
}

View File

@@ -0,0 +1,61 @@
/**
* Agent Provider Configuration Types
*
* Data-driven configuration for multi-provider agent spawning.
* Each provider (Claude, Codex, Gemini, etc.) has a config that describes
* how to invoke its CLI, pass prompts, extract session IDs, and resume.
*/
export interface StructuredOutputConfig {
/** CLI flag for structured output (e.g. "--json-schema", "--output-schema") */
flag: string;
/** How to pass the schema: inline JSON string, file path, or not supported */
schemaMode: 'inline' | 'file' | 'none';
/** Format of CLI output: single JSON object, JSONL stream, or raw text */
outputFormat: 'json' | 'jsonl' | 'text';
}
export interface SessionIdConfig {
/** Where to find the session ID in CLI output */
extractFrom: 'result' | 'event';
/** Field name containing the session ID */
field: string;
/** For JSONL: which event type contains the session ID */
eventType?: string;
}
export interface NonInteractiveConfig {
/** Subcommand for non-interactive mode (e.g. "exec" for codex, "run" for opencode) */
subcommand?: string;
/** Flag to pass the prompt (e.g. "-p" for gemini/cursor) */
promptFlag?: string;
/** Flag(s) for JSON output (e.g. "--json", "--output-format json") */
outputFlag?: string;
}
export interface AgentProviderConfig {
/** Provider name identifier */
name: string;
/** CLI binary command */
command: string;
/** Default autonomous-mode args */
args: string[];
/** Extra environment variables to set */
env?: Record<string, string>;
/** Process names for detection (ps matching) */
processNames: string[];
/** Env var name for config dir isolation (e.g. "CLAUDE_CONFIG_DIR") */
configDirEnv?: string;
/** Flag or subcommand for resume (e.g. "--resume", "resume") */
resumeFlag?: string;
/** How resume works: flag-based, subcommand-based, or unsupported */
resumeStyle: 'flag' | 'subcommand' | 'none';
/** How prompts are passed: native (-p built-in), flag (use nonInteractive.promptFlag), or none */
promptMode: 'native' | 'flag' | 'none';
/** Structured output configuration */
structuredOutput?: StructuredOutputConfig;
/** Session ID extraction configuration */
sessionId?: SessionIdConfig;
/** Non-interactive mode configuration */
nonInteractive?: NonInteractiveConfig;
}

View File

@@ -0,0 +1,97 @@
/**
* Agent Signal Schema
*
* Agents communicate via a trivial JSON signal: done, questions, or error.
* All structured output is file-based (see file-io.ts).
*/
import { z } from 'zod';
// =============================================================================
// SHARED SCHEMAS
// =============================================================================
const optionSchema = z.object({
label: z.string(),
description: z.string().optional(),
});
export const questionItemSchema = z.object({
id: z.string(),
question: z.string(),
options: z.array(optionSchema).optional(),
multiSelect: z.boolean().optional(),
});
export type QuestionItem = z.infer<typeof questionItemSchema>;
// =============================================================================
// UNIVERSAL SIGNAL SCHEMA
// =============================================================================
export const agentSignalSchema = z.discriminatedUnion('status', [
z.object({ status: z.literal('done') }),
z.object({ status: z.literal('questions'), questions: z.array(questionItemSchema) }),
z.object({ status: z.literal('error'), error: z.string() }),
]);
export type AgentSignal = z.infer<typeof agentSignalSchema>;
export const agentSignalJsonSchema = {
type: 'object',
oneOf: [
{
properties: {
status: { const: 'done' },
},
required: ['status'],
},
{
properties: {
status: { const: 'questions' },
questions: {
type: 'array',
items: {
type: 'object',
properties: {
id: { type: 'string' },
question: { type: 'string' },
options: {
type: 'array',
items: {
type: 'object',
properties: {
label: { type: 'string' },
description: { type: 'string' },
},
required: ['label'],
},
},
multiSelect: { type: 'boolean' },
},
required: ['id', 'question'],
},
},
},
required: ['status', 'questions'],
},
{
properties: {
status: { const: 'error' },
error: { type: 'string' },
},
required: ['status', 'error'],
},
],
};
// =============================================================================
// BACKWARD COMPATIBILITY
// =============================================================================
/** @deprecated Use agentSignalSchema */
export const agentOutputSchema = agentSignalSchema;
/** @deprecated Use AgentSignal */
export type AgentOutput = AgentSignal;
/** @deprecated Use agentSignalJsonSchema */
export const agentOutputJsonSchema = agentSignalJsonSchema;

240
apps/server/agent/types.ts Normal file
View File

@@ -0,0 +1,240 @@
/**
* Agent Module Types
*
* Port interface for agent lifecycle management.
* AgentManager is the PORT. Implementations are ADAPTERS.
*/
export type AgentStatus = 'idle' | 'running' | 'waiting_for_input' | 'stopped' | 'crashed';
/**
* Agent operation mode.
*
* - execute: Standard task execution (default)
* - discuss: Gather context through questions, output decisions
* - plan: Plan initiative into phases
* - detail: Detail phase into individual tasks
*/
export type AgentMode = 'execute' | 'discuss' | 'plan' | 'detail' | 'refine';
/**
* Context data written as input files in agent workdir before spawn.
*/
export interface AgentInputContext {
initiative?: import('../db/schema.js').Initiative;
pages?: import('./content-serializer.js').PageForSerialization[];
phase?: import('../db/schema.js').Phase;
task?: import('../db/schema.js').Task;
/** All phases for the initiative (read-only context for agents) */
phases?: Array<import('../db/schema.js').Phase & { dependsOn?: string[] }>;
/** All tasks for the initiative (read-only context for agents) */
tasks?: import('../db/schema.js').Task[];
/** Agent ID for inter-agent communication */
agentId?: string;
/** Agent name for inter-agent communication */
agentName?: string;
}
/**
* Options for spawning a new agent
*/
export interface SpawnAgentOptions {
/** Human-readable name/alias for the agent (auto-generated if omitted) */
name?: string;
/** Task ID to assign to agent (optional for architect modes) */
taskId?: string | null;
/** Initial prompt/instruction for the agent */
prompt: string;
/** Optional working directory (defaults to worktree path) */
cwd?: string;
/** Agent operation mode (defaults to 'execute') */
mode?: AgentMode;
/** Provider name (defaults to 'claude') */
provider?: string;
/** Initiative ID — when set, worktrees are created for all linked projects */
initiativeId?: string;
/** Phase ID — used by dispatch for branch-aware spawning */
phaseId?: string;
/** Base branch for worktree creation (defaults to 'main') */
baseBranch?: string;
/** Explicit branch name for worktree (overrides 'agent/<alias>') */
branchName?: string;
/** Context data to write as input files in agent workdir */
inputContext?: AgentInputContext;
}
/**
* Represents a Claude agent instance
*/
export interface AgentInfo {
/** Unique identifier for this agent */
id: string;
/** Human-readable alias for the agent (e.g. 'jolly-penguin') */
name: string;
/** Task this agent is working on (null for architect agents) */
taskId: string | null;
/** Initiative this agent is linked to (null if standalone) */
initiativeId: string | null;
/** CLI session ID for resumption (null until first run completes) */
sessionId: string | null;
/** Agent alias / worktree key (deterministic path: agent-workdirs/<alias>/) */
worktreeId: string;
/** Current status (waiting_for_input = paused on AskUserQuestion) */
status: AgentStatus;
/** Current operation mode */
mode: AgentMode;
/** Provider name (e.g. 'claude', 'codex', 'gemini') */
provider: string;
/** Account ID used for this agent (null if no account management) */
accountId: string | null;
/** When the agent was created */
createdAt: Date;
/** Last activity timestamp */
updatedAt: Date;
/** When the user dismissed this agent (null if not dismissed) */
userDismissedAt?: Date | null;
}
/**
* Result from agent execution
*/
export interface AgentResult {
/** Whether the task completed successfully */
success: boolean;
/** Result message or error description */
message: string;
/** Files modified during execution */
filesModified?: string[];
}
/**
* Individual question item with unique ID for answer matching
*/
export interface QuestionItem {
/** Unique identifier for matching answers */
id: string;
/** The question being asked */
question: string;
/** Optional predefined options for the question */
options?: Array<{ label: string; description?: string }>;
/** Whether multiple options can be selected */
multiSelect?: boolean;
}
/**
* Pending questions when agent is waiting for input
*/
export interface PendingQuestions {
/** Array of questions the agent is asking */
questions: QuestionItem[];
}
/**
* AgentManager Port Interface
*
* Manages Claude agent lifecycle - spawn, stop, list, resume.
*
* Covers requirements:
* - AGENT-01: Spawn new agent with task assignment
* - AGENT-02: Stop running agent
* - AGENT-03: List all agents with status
* - AGENT-04: Resume agent session
* - AGENT-05: Background mode (implementation detail)
*/
export interface AgentManager {
/**
* Spawn a new agent to work on a task.
*
* Creates isolated worktree, starts Claude SDK session,
* and begins executing the prompt.
*
* @param options - Spawn configuration
* @returns Agent info with session ID for later resumption
*/
spawn(options: SpawnAgentOptions): Promise<AgentInfo>;
/**
* Stop a running agent.
*
* Gracefully stops the agent's work. Worktree is preserved
* for potential resumption.
*
* @param agentId - Agent to stop
*/
stop(agentId: string): Promise<void>;
/**
* List all agents with their current status.
*
* @returns Array of all agents
*/
list(): Promise<AgentInfo[]>;
/**
* Get a specific agent by ID.
*
* @param agentId - Agent ID
* @returns Agent if found, null otherwise
*/
get(agentId: string): Promise<AgentInfo | null>;
/**
* Get a specific agent by name.
*
* @param name - Agent name (human-readable)
* @returns Agent if found, null otherwise
*/
getByName(name: string): Promise<AgentInfo | null>;
/**
* Resume an agent that's waiting for input.
*
* Used when agent paused on questions and user provides responses.
* Uses stored session ID to continue with full context.
* Agent must be in 'waiting_for_input' status.
*
* @param agentId - Agent to resume
* @param answers - Map of question ID to user's answer
*/
resume(agentId: string, answers: Record<string, string>): Promise<void>;
/**
* Get the result of an agent's work.
*
* Only available after agent completes or stops.
*
* @param agentId - Agent ID
* @returns Result if available, null if agent still running
*/
getResult(agentId: string): Promise<AgentResult | null>;
/**
* Get pending questions for an agent waiting for input.
*
* Only available when agent status is 'waiting_for_input'.
*
* @param agentId - Agent ID
* @returns Pending questions if available, null otherwise
*/
getPendingQuestions(agentId: string): Promise<PendingQuestions | null>;
/**
* Delete an agent and clean up all associated resources.
*
* Tears down worktrees, branches, logs, and removes the DB record.
* If the agent is still running, kills the process first.
*
* @param agentId - Agent to delete
*/
delete(agentId: string): Promise<void>;
/**
* Dismiss an agent.
*
* Marks the agent as dismissed by the user, which excludes it from
* active agent queries. The agent record and worktree are preserved.
*
* @param agentId - Agent to dismiss
*/
dismiss(agentId: string): Promise<void>;
}

31
apps/server/bin/cw.ts Normal file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env node
/**
* Codewalk District CLI Entry Point
*
* Users can install globally via:
* - npm link (during development)
* - npm i -g codewalk-district (for release)
*/
import { runCli } from '../cli/index.js';
import { logger } from '../logger/index.js';
// Handle uncaught errors gracefully
process.on('uncaughtException', (error) => {
logger.fatal({ err: error }, 'uncaught exception');
console.error('Fatal error:', error.message);
process.exit(1);
});
process.on('unhandledRejection', (reason) => {
logger.fatal({ err: reason }, 'unhandled rejection');
console.error('Unhandled promise rejection:', reason);
process.exit(1);
});
// Run the CLI
runCli().catch((error) => {
logger.fatal({ err: error }, 'CLI fatal error');
console.error('CLI error:', error.message);
process.exit(1);
});

1500
apps/server/cli/index.ts Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,57 @@
/**
* tRPC Client for CLI
*
* Type-safe client for communicating with the coordination server.
* Uses splitLink to route subscriptions to SSE and queries/mutations to HTTP batch.
*/
import { createTRPCClient, httpBatchLink, splitLink, httpSubscriptionLink } from '@trpc/client';
import type { AppRouter } from '../trpc/index.js';
/** Default server port */
const DEFAULT_PORT = 3847;
/** Default server host */
const DEFAULT_HOST = '127.0.0.1';
/**
* Type-safe tRPC client for the coordination server.
*/
export type TrpcClient = ReturnType<typeof createTRPCClient<AppRouter>>;
/**
* Creates a tRPC client for the coordination server.
*
* @param port - Server port (default: 3847)
* @param host - Server host (default: 127.0.0.1)
* @returns Type-safe tRPC client
*/
export function createTrpcClient(
port: number = DEFAULT_PORT,
host: string = DEFAULT_HOST
): TrpcClient {
const url = `http://${host}:${port}/trpc`;
return createTRPCClient<AppRouter>({
links: [
splitLink({
condition: (op) => op.type === 'subscription',
true: httpSubscriptionLink({ url }),
false: httpBatchLink({ url }),
}),
],
});
}
/**
* Creates a tRPC client using environment variables or defaults.
*
* Uses CW_PORT and CW_HOST environment variables if available,
* falling back to defaults (127.0.0.1:3847).
*
* @returns Type-safe tRPC client
*/
export function createDefaultTrpcClient(): TrpcClient {
const port = process.env.CW_PORT ? parseInt(process.env.CW_PORT, 10) : DEFAULT_PORT;
const host = process.env.CW_HOST ?? DEFAULT_HOST;
return createTrpcClient(port, host);
}

View File

@@ -0,0 +1,66 @@
/**
* .cwrc File Operations
*
* Find, read, and write the .cwrc configuration file.
* The file's presence marks the workspace root directory.
*/
import { readFileSync, writeFileSync, existsSync } from 'node:fs';
import { join, dirname, parse } from 'node:path';
import type { CwConfig } from './types.js';
/** Default filename */
const CWRC_FILENAME = '.cwrc';
/**
* Walk up from `startDir` looking for a .cwrc file.
* Returns the absolute path to the directory containing it,
* or null if the filesystem root is reached.
*/
export function findWorkspaceRoot(startDir: string = process.cwd()): string | null {
let dir = startDir;
while (true) {
const candidate = join(dir, CWRC_FILENAME);
if (existsSync(candidate)) {
return dir;
}
const parent = dirname(dir);
if (parent === dir) {
// Reached filesystem root
return null;
}
dir = parent;
}
}
/**
* Read and parse the .cwrc file in the given directory.
* Returns null if the file doesn't exist.
* Throws on malformed JSON.
*/
export function readCwrc(dir: string): CwConfig | null {
const filePath = join(dir, CWRC_FILENAME);
if (!existsSync(filePath)) {
return null;
}
const raw = readFileSync(filePath, 'utf-8');
return JSON.parse(raw) as CwConfig;
}
/**
* Write a .cwrc file to the given directory.
*/
export function writeCwrc(dir: string, config: CwConfig): void {
const filePath = join(dir, CWRC_FILENAME);
writeFileSync(filePath, JSON.stringify(config, null, 2) + '\n', 'utf-8');
}
/**
* Create a default .cwrc config.
*/
export function defaultCwConfig(): CwConfig {
return { version: 1 };
}

View File

@@ -0,0 +1,13 @@
/**
* Configuration Module
*
* Handles .cwrc workspace configuration.
*/
export type { CwConfig } from './types.js';
export {
findWorkspaceRoot,
readCwrc,
writeCwrc,
defaultCwConfig,
} from './cwrc.js';

View File

@@ -0,0 +1,15 @@
/**
* .cwrc Configuration Types
*
* Defines the shape of the .cwrc configuration file.
* Add new top-level keys here as the config grows.
*/
/**
* Root configuration file schema.
* Lives at the workspace root as `.cwrc`.
*/
export interface CwConfig {
/** Schema version for forward compatibility */
version: 1;
}

266
apps/server/container.ts Normal file
View File

@@ -0,0 +1,266 @@
/**
* Dependency Container
*
* Factory functions for creating the full dependency graph.
* Keeps startServer() thin and makes repo wiring reusable by the test harness.
*/
import type { DrizzleDatabase } from './db/index.js';
import {
createDatabase,
ensureSchema,
DrizzleInitiativeRepository,
DrizzlePhaseRepository,
DrizzleTaskRepository,
DrizzleMessageRepository,
DrizzleAgentRepository,
DrizzlePageRepository,
DrizzleProjectRepository,
DrizzleAccountRepository,
DrizzleChangeSetRepository,
DrizzleLogChunkRepository,
DrizzleConversationRepository,
} from './db/index.js';
import type { InitiativeRepository } from './db/repositories/initiative-repository.js';
import type { PhaseRepository } from './db/repositories/phase-repository.js';
import type { TaskRepository } from './db/repositories/task-repository.js';
import type { MessageRepository } from './db/repositories/message-repository.js';
import type { AgentRepository } from './db/repositories/agent-repository.js';
import type { PageRepository } from './db/repositories/page-repository.js';
import type { ProjectRepository } from './db/repositories/project-repository.js';
import type { AccountRepository } from './db/repositories/account-repository.js';
import type { ChangeSetRepository } from './db/repositories/change-set-repository.js';
import type { LogChunkRepository } from './db/repositories/log-chunk-repository.js';
import type { ConversationRepository } from './db/repositories/conversation-repository.js';
import type { EventBus } from './events/index.js';
import { createEventBus } from './events/index.js';
import { ProcessManager, ProcessRegistry } from './process/index.js';
import { LogManager } from './logging/index.js';
import { MultiProviderAgentManager } from './agent/index.js';
import { DefaultAccountCredentialManager } from './agent/credentials/index.js';
import type { AccountCredentialManager } from './agent/credentials/types.js';
import { DefaultDispatchManager } from './dispatch/manager.js';
import { DefaultPhaseDispatchManager } from './dispatch/phase-manager.js';
import type { DispatchManager, PhaseDispatchManager } from './dispatch/types.js';
import { SimpleGitBranchManager } from './git/simple-git-branch-manager.js';
import type { BranchManager } from './git/branch-manager.js';
import { ExecutionOrchestrator } from './execution/orchestrator.js';
import { DefaultConflictResolutionService } from './coordination/conflict-resolution-service.js';
import { PreviewManager } from './preview/index.js';
import { findWorkspaceRoot } from './config/index.js';
import { createModuleLogger } from './logger/index.js';
import type { ServerContextDeps } from './server/index.js';
// =============================================================================
// Repositories
// =============================================================================
/**
* All 11 repository ports.
*/
export interface Repositories {
initiativeRepository: InitiativeRepository;
phaseRepository: PhaseRepository;
taskRepository: TaskRepository;
messageRepository: MessageRepository;
agentRepository: AgentRepository;
pageRepository: PageRepository;
projectRepository: ProjectRepository;
accountRepository: AccountRepository;
changeSetRepository: ChangeSetRepository;
logChunkRepository: LogChunkRepository;
conversationRepository: ConversationRepository;
}
/**
* Create all 11 Drizzle repository adapters from a database instance.
* Reusable by both the production server and the test harness.
*/
export function createRepositories(db: DrizzleDatabase): Repositories {
return {
initiativeRepository: new DrizzleInitiativeRepository(db),
phaseRepository: new DrizzlePhaseRepository(db),
taskRepository: new DrizzleTaskRepository(db),
messageRepository: new DrizzleMessageRepository(db),
agentRepository: new DrizzleAgentRepository(db),
pageRepository: new DrizzlePageRepository(db),
projectRepository: new DrizzleProjectRepository(db),
accountRepository: new DrizzleAccountRepository(db),
changeSetRepository: new DrizzleChangeSetRepository(db),
logChunkRepository: new DrizzleLogChunkRepository(db),
conversationRepository: new DrizzleConversationRepository(db),
};
}
// =============================================================================
// Container
// =============================================================================
/**
* Full dependency graph for the coordination server.
*/
export interface Container extends Repositories {
db: DrizzleDatabase;
eventBus: EventBus;
processManager: ProcessManager;
logManager: LogManager;
workspaceRoot: string;
credentialManager: AccountCredentialManager;
agentManager: MultiProviderAgentManager;
dispatchManager: DispatchManager;
phaseDispatchManager: PhaseDispatchManager;
branchManager: BranchManager;
executionOrchestrator: ExecutionOrchestrator;
previewManager: PreviewManager;
/** Extract the subset of deps that CoordinationServer needs. */
toContextDeps(): ServerContextDeps;
}
/**
* Options for container creation.
*/
export interface ContainerOptions {
debug?: boolean;
}
/**
* Create the full dependency container.
*
* Wires: ProcessRegistry → EventBus → ProcessManager → LogManager →
* Database → Repositories → CredentialManager → AgentManager.
* Runs ensureSchema() and reconcileAfterRestart() before returning.
*/
export async function createContainer(options?: ContainerOptions): Promise<Container> {
const log = createModuleLogger('container');
// Infrastructure
const registry = new ProcessRegistry();
const eventBus = createEventBus();
const processManager = new ProcessManager(registry, eventBus);
const logManager = new LogManager();
// Database
const db = createDatabase();
ensureSchema(db);
log.info('database initialized');
// Repositories
const repos = createRepositories(db);
log.info('repositories created');
// Workspace root
const workspaceRoot = findWorkspaceRoot(process.cwd()) ?? process.cwd();
log.info({ workspaceRoot }, 'workspace root resolved');
// Credential manager
const credentialManager = new DefaultAccountCredentialManager(eventBus);
log.info('credential manager created');
// Agent manager
const agentManager = new MultiProviderAgentManager(
repos.agentRepository,
workspaceRoot,
repos.projectRepository,
repos.accountRepository,
eventBus,
credentialManager,
repos.changeSetRepository,
repos.phaseRepository,
repos.taskRepository,
repos.pageRepository,
repos.logChunkRepository,
options?.debug ?? false,
);
log.info('agent manager created');
// Reconcile agent state from any previous server session
await agentManager.reconcileAfterRestart();
log.info('agent reconciliation complete');
// Branch manager
const branchManager = new SimpleGitBranchManager();
log.info('branch manager created');
// Dispatch managers
const dispatchManager = new DefaultDispatchManager(
repos.taskRepository,
repos.messageRepository,
agentManager,
eventBus,
repos.initiativeRepository,
repos.phaseRepository,
);
const phaseDispatchManager = new DefaultPhaseDispatchManager(
repos.phaseRepository,
repos.taskRepository,
dispatchManager,
eventBus,
repos.initiativeRepository,
repos.projectRepository,
branchManager,
workspaceRoot,
);
log.info('dispatch managers created');
// Conflict resolution service (for orchestrator)
const conflictResolutionService = new DefaultConflictResolutionService(
repos.taskRepository,
repos.agentRepository,
repos.messageRepository,
eventBus,
);
// Execution orchestrator
const executionOrchestrator = new ExecutionOrchestrator(
branchManager,
repos.phaseRepository,
repos.taskRepository,
repos.initiativeRepository,
repos.projectRepository,
phaseDispatchManager,
conflictResolutionService,
eventBus,
workspaceRoot,
);
executionOrchestrator.start();
log.info('execution orchestrator started');
// Preview manager
const previewManager = new PreviewManager(
repos.projectRepository,
eventBus,
workspaceRoot,
);
log.info('preview manager created');
return {
db,
eventBus,
processManager,
logManager,
workspaceRoot,
credentialManager,
agentManager,
dispatchManager,
phaseDispatchManager,
branchManager,
executionOrchestrator,
previewManager,
...repos,
toContextDeps(): ServerContextDeps {
return {
agentManager,
credentialManager,
dispatchManager,
phaseDispatchManager,
branchManager,
executionOrchestrator,
previewManager,
workspaceRoot,
...repos,
};
},
};
}

View File

@@ -0,0 +1,371 @@
/**
* ConflictResolutionService Tests
*
* Tests for the conflict resolution service that handles merge conflicts
* by creating resolution tasks, updating statuses, and notifying agents.
*/
import { describe, it, expect, beforeEach, vi } from 'vitest';
import { DefaultConflictResolutionService } from './conflict-resolution-service.js';
import { DrizzleTaskRepository } from '../db/repositories/drizzle/task.js';
import { DrizzleAgentRepository } from '../db/repositories/drizzle/agent.js';
import { DrizzleMessageRepository } from '../db/repositories/drizzle/message.js';
import { DrizzlePhaseRepository } from '../db/repositories/drizzle/phase.js';
import { DrizzleInitiativeRepository } from '../db/repositories/drizzle/initiative.js';
import { createTestDatabase } from '../db/repositories/drizzle/test-helpers.js';
import type { DrizzleDatabase } from '../db/index.js';
import type { EventBus, DomainEvent } from '../events/types.js';
import type { TaskRepository } from '../db/repositories/task-repository.js';
import type { AgentRepository } from '../db/repositories/agent-repository.js';
import type { MessageRepository } from '../db/repositories/message-repository.js';
// =============================================================================
// Test Helpers
// =============================================================================
/**
* Create a mock EventBus that captures emitted events.
*/
function createMockEventBus(): EventBus & { emittedEvents: DomainEvent[] } {
const emittedEvents: DomainEvent[] = [];
return {
emittedEvents,
emit<T extends DomainEvent>(event: T): void {
emittedEvents.push(event);
},
on: vi.fn(),
off: vi.fn(),
once: vi.fn(),
};
}
// =============================================================================
// Tests
// =============================================================================
describe('DefaultConflictResolutionService', () => {
let db: DrizzleDatabase;
let taskRepository: TaskRepository;
let agentRepository: AgentRepository;
let messageRepository: MessageRepository;
let eventBus: EventBus & { emittedEvents: DomainEvent[] };
let service: DefaultConflictResolutionService;
let testPhaseId: string;
let testInitiativeId: string;
beforeEach(async () => {
// Set up test database
db = createTestDatabase();
taskRepository = new DrizzleTaskRepository(db);
agentRepository = new DrizzleAgentRepository(db);
messageRepository = new DrizzleMessageRepository(db);
// Create required hierarchy for tasks
const initiativeRepo = new DrizzleInitiativeRepository(db);
const phaseRepo = new DrizzlePhaseRepository(db);
const initiative = await initiativeRepo.create({
name: 'Test Initiative',
});
testInitiativeId = initiative.id;
const phase = await phaseRepo.create({
initiativeId: initiative.id,
name: 'Test Phase',
});
testPhaseId = phase.id;
// Create mocks
eventBus = createMockEventBus();
// Create service
service = new DefaultConflictResolutionService(
taskRepository,
agentRepository,
messageRepository,
eventBus
);
});
// ===========================================================================
// handleConflict() Tests
// ===========================================================================
describe('handleConflict', () => {
it('should create conflict resolution task with correct properties', async () => {
// Create original task
const originalTask = await taskRepository.create({
phaseId: testPhaseId,
initiativeId: testInitiativeId,
name: 'Original Task',
description: 'Original task description',
priority: 'medium',
order: 1,
});
// Create agent for task
const agent = await agentRepository.create({
name: 'agent-test',
taskId: originalTask.id,
worktreeId: 'wt-test',
});
const conflicts = ['src/file1.ts', 'src/file2.ts'];
await service.handleConflict(originalTask.id, conflicts);
// Check resolution task was created
const tasks = await taskRepository.findByPhaseId(testPhaseId);
const resolutionTask = tasks.find(t => t.name.startsWith('Resolve conflicts:'));
expect(resolutionTask).toBeDefined();
expect(resolutionTask!.name).toBe('Resolve conflicts: Original Task');
expect(resolutionTask!.priority).toBe('high');
expect(resolutionTask!.type).toBe('auto');
expect(resolutionTask!.status).toBe('pending');
expect(resolutionTask!.order).toBe(originalTask.order + 1);
expect(resolutionTask!.phaseId).toBe(testPhaseId);
expect(resolutionTask!.initiativeId).toBe(testInitiativeId);
expect(resolutionTask!.parentTaskId).toBe(originalTask.parentTaskId);
// Check description contains conflict files
expect(resolutionTask!.description).toContain('src/file1.ts');
expect(resolutionTask!.description).toContain('src/file2.ts');
expect(resolutionTask!.description).toContain('Original Task');
});
it('should update original task status to blocked', async () => {
const originalTask = await taskRepository.create({
phaseId: testPhaseId,
initiativeId: testInitiativeId,
name: 'Task To Block',
status: 'in_progress',
order: 1,
});
await agentRepository.create({
name: 'agent-block',
taskId: originalTask.id,
worktreeId: 'wt-block',
});
await service.handleConflict(originalTask.id, ['conflict.ts']);
// Check original task is blocked
const updatedTask = await taskRepository.findById(originalTask.id);
expect(updatedTask!.status).toBe('blocked');
});
it('should create message to agent about conflict', async () => {
const originalTask = await taskRepository.create({
phaseId: testPhaseId,
initiativeId: testInitiativeId,
name: 'Message Task',
order: 1,
});
const agent = await agentRepository.create({
name: 'agent-msg',
taskId: originalTask.id,
worktreeId: 'wt-msg',
});
const conflicts = ['conflict.ts'];
await service.handleConflict(originalTask.id, conflicts);
// Check message was created
const messages = await messageRepository.findByRecipient('agent', agent.id);
expect(messages.length).toBe(1);
expect(messages[0].recipientType).toBe('agent');
expect(messages[0].recipientId).toBe(agent.id);
expect(messages[0].senderType).toBe('user');
expect(messages[0].type).toBe('info');
expect(messages[0].requiresResponse).toBe(false);
// Check message content
expect(messages[0].content).toContain('Merge conflict detected');
expect(messages[0].content).toContain('Message Task');
expect(messages[0].content).toContain('conflict.ts');
expect(messages[0].content).toContain('Resolve conflicts: Message Task');
});
it('should emit TaskQueuedEvent for resolution task', async () => {
const originalTask = await taskRepository.create({
phaseId: testPhaseId,
initiativeId: testInitiativeId,
name: 'Event Task',
order: 1,
});
await agentRepository.create({
name: 'agent-event',
taskId: originalTask.id,
worktreeId: 'wt-event',
});
await service.handleConflict(originalTask.id, ['event.ts']);
// Check TaskQueuedEvent was emitted
expect(eventBus.emittedEvents.length).toBe(1);
expect(eventBus.emittedEvents[0].type).toBe('task:queued');
const event = eventBus.emittedEvents[0] as any;
expect(event.payload.priority).toBe('high');
expect(event.payload.dependsOn).toEqual([]);
// Check taskId matches the created resolution task
const tasks = await taskRepository.findByPhaseId(testPhaseId);
const resolutionTask = tasks.find(t => t.name.startsWith('Resolve conflicts:'));
expect(event.payload.taskId).toBe(resolutionTask!.id);
});
it('should work without messageRepository', async () => {
// Create service without messageRepository
const serviceNoMsg = new DefaultConflictResolutionService(
taskRepository,
agentRepository,
undefined, // No message repository
eventBus
);
const originalTask = await taskRepository.create({
phaseId: testPhaseId,
initiativeId: testInitiativeId,
name: 'No Message Task',
order: 1,
});
await agentRepository.create({
name: 'agent-no-msg',
taskId: originalTask.id,
worktreeId: 'wt-no-msg',
});
// Should not throw and should still create task
await expect(serviceNoMsg.handleConflict(originalTask.id, ['test.ts']))
.resolves.not.toThrow();
// Check resolution task was still created
const tasks = await taskRepository.findByPhaseId(testPhaseId);
const resolutionTask = tasks.find(t => t.name.startsWith('Resolve conflicts:'));
expect(resolutionTask).toBeDefined();
});
it('should work without eventBus', async () => {
// Create service without eventBus
const serviceNoEvents = new DefaultConflictResolutionService(
taskRepository,
agentRepository,
messageRepository,
undefined // No event bus
);
const originalTask = await taskRepository.create({
phaseId: testPhaseId,
initiativeId: testInitiativeId,
name: 'No Events Task',
order: 1,
});
await agentRepository.create({
name: 'agent-no-events',
taskId: originalTask.id,
worktreeId: 'wt-no-events',
});
// Should not throw and should still create task
await expect(serviceNoEvents.handleConflict(originalTask.id, ['test.ts']))
.resolves.not.toThrow();
// Check resolution task was still created
const tasks = await taskRepository.findByPhaseId(testPhaseId);
const resolutionTask = tasks.find(t => t.name.startsWith('Resolve conflicts:'));
expect(resolutionTask).toBeDefined();
});
it('should throw error when task not found', async () => {
await expect(service.handleConflict('non-existent-id', ['test.ts']))
.rejects.toThrow('Original task not found: non-existent-id');
});
it('should throw error when no agent found for task', async () => {
const task = await taskRepository.create({
phaseId: testPhaseId,
initiativeId: testInitiativeId,
name: 'Orphan Task',
order: 1,
});
await expect(service.handleConflict(task.id, ['test.ts']))
.rejects.toThrow(`No agent found for task: ${task.id}`);
});
it('should handle multiple conflict files correctly', async () => {
const originalTask = await taskRepository.create({
phaseId: testPhaseId,
initiativeId: testInitiativeId,
name: 'Multi-Conflict Task',
order: 1,
});
await agentRepository.create({
name: 'agent-multi',
taskId: originalTask.id,
worktreeId: 'wt-multi',
});
const conflicts = [
'src/components/Header.tsx',
'src/utils/helpers.ts',
'package.json',
'README.md'
];
await service.handleConflict(originalTask.id, conflicts);
// Check all conflict files are in the description
const tasks = await taskRepository.findByPhaseId(testPhaseId);
const resolutionTask = tasks.find(t => t.name.startsWith('Resolve conflicts:'));
expect(resolutionTask!.description).toContain('src/components/Header.tsx');
expect(resolutionTask!.description).toContain('src/utils/helpers.ts');
expect(resolutionTask!.description).toContain('package.json');
expect(resolutionTask!.description).toContain('README.md');
});
it('should preserve parentTaskId from original task', async () => {
// Create parent task first
const parentTask = await taskRepository.create({
phaseId: testPhaseId,
initiativeId: testInitiativeId,
name: 'Parent Task',
order: 1,
});
// Create child task
const childTask = await taskRepository.create({
phaseId: testPhaseId,
initiativeId: testInitiativeId,
parentTaskId: parentTask.id,
name: 'Child Task',
order: 2,
});
await agentRepository.create({
name: 'agent-child',
taskId: childTask.id,
worktreeId: 'wt-child',
});
await service.handleConflict(childTask.id, ['conflict.ts']);
// Check resolution task has same parentTaskId
const tasks = await taskRepository.findByPhaseId(testPhaseId);
const resolutionTask = tasks.find(t => t.name.startsWith('Resolve conflicts:'));
expect(resolutionTask!.parentTaskId).toBe(parentTask.id);
});
});
});

View File

@@ -0,0 +1,159 @@
/**
* ConflictResolutionService
*
* Service responsible for handling merge conflicts by:
* - Creating conflict resolution tasks
* - Updating original task status
* - Notifying agents via messages
* - Emitting appropriate events
*
* This service is used by the CoordinationManager when merge conflicts occur.
*/
import type { EventBus, TaskQueuedEvent } from '../events/index.js';
import type { TaskRepository } from '../db/repositories/task-repository.js';
import type { AgentRepository } from '../db/repositories/agent-repository.js';
import type { MessageRepository } from '../db/repositories/message-repository.js';
// =============================================================================
// ConflictResolutionService Interface (Port)
// =============================================================================
/**
* Service interface for handling merge conflicts.
* This is the PORT - implementations are ADAPTERS.
*/
/**
* Branch context for merge conflicts from the branch hierarchy.
*/
export interface MergeContext {
sourceBranch: string;
targetBranch: string;
}
export interface ConflictResolutionService {
/**
* Handle a merge conflict by creating resolution task and notifying agent.
*
* @param taskId - ID of the task that conflicted
* @param conflicts - List of conflicting file paths
* @param mergeContext - Optional branch context for branch hierarchy merges
*/
handleConflict(taskId: string, conflicts: string[], mergeContext?: MergeContext): Promise<void>;
}
// =============================================================================
// DefaultConflictResolutionService Implementation (Adapter)
// =============================================================================
/**
* Default implementation of ConflictResolutionService.
*
* Creates conflict resolution tasks, updates task statuses, sends messages
* to agents, and emits events when merge conflicts occur.
*/
export class DefaultConflictResolutionService implements ConflictResolutionService {
constructor(
private taskRepository: TaskRepository,
private agentRepository: AgentRepository,
private messageRepository?: MessageRepository,
private eventBus?: EventBus
) {}
/**
* Handle a merge conflict.
* Creates a conflict-resolution task and notifies the agent via message.
*/
async handleConflict(taskId: string, conflicts: string[], mergeContext?: MergeContext): Promise<void> {
// Get original task for context
const originalTask = await this.taskRepository.findById(taskId);
if (!originalTask) {
throw new Error(`Original task not found: ${taskId}`);
}
// Get agent that was working on the task
const agent = await this.agentRepository.findByTaskId(taskId);
if (!agent) {
throw new Error(`No agent found for task: ${taskId}`);
}
// Build conflict description
const descriptionLines = [
'Merge conflicts detected. Resolve conflicts in the following files:',
'',
...conflicts.map((f) => `- ${f}`),
'',
`Original task: ${originalTask.name}`,
'',
];
if (mergeContext) {
descriptionLines.push(
`Resolve merge conflicts between branch "${mergeContext.sourceBranch}" and "${mergeContext.targetBranch}".`,
`Run: git merge ${mergeContext.sourceBranch} --no-edit`,
'Resolve all conflicts, then: git add . && git commit',
);
} else {
descriptionLines.push(
'Instructions: Resolve merge conflicts in the listed files, then mark task complete.',
);
}
const conflictDescription = descriptionLines.join('\n');
// Create new conflict-resolution task
const conflictTask = await this.taskRepository.create({
parentTaskId: originalTask.parentTaskId,
phaseId: originalTask.phaseId,
initiativeId: originalTask.initiativeId,
name: `Resolve conflicts: ${originalTask.name}`,
description: conflictDescription,
category: mergeContext ? 'merge' : 'execute',
type: 'auto',
priority: 'high',
status: 'pending',
order: originalTask.order + 1,
});
// Update original task status to blocked
await this.taskRepository.update(taskId, { status: 'blocked' });
// Create message to agent if messageRepository is configured
if (this.messageRepository) {
const messageContent = [
`Merge conflict detected for task: ${originalTask.name}`,
'',
'Conflicting files:',
...conflicts.map((f) => `- ${f}`),
'',
`A new task has been created to resolve these conflicts: ${conflictTask.name}`,
'',
'Please resolve the merge conflicts in the listed files and mark the resolution task as complete.',
].join('\n');
await this.messageRepository.create({
senderType: 'user', // System-generated messages appear as from user
senderId: null,
recipientType: 'agent',
recipientId: agent.id,
type: 'info',
content: messageContent,
requiresResponse: false,
});
}
// Emit TaskQueuedEvent for the new conflict-resolution task
if (this.eventBus) {
const event: TaskQueuedEvent = {
type: 'task:queued',
timestamp: new Date(),
payload: {
taskId: conflictTask.id,
priority: 'high',
dependsOn: [],
},
};
this.eventBus.emit(event);
}
}
}

View File

@@ -0,0 +1,28 @@
/**
* Coordination Module - Public API
*
* Exports the CoordinationManager port interface and related types.
* All modules should import from this index file.
*
* Port-Adapter Pattern:
* - CoordinationManager is the PORT (interface contract)
* - Implementations (e.g., InMemoryCoordinationManager) are ADAPTERS
* - Consumers depend only on the port interface
* - Adapters can be swapped without changing consumer code
*
* This enables:
* - Testing with mock/in-memory implementations
* - Future swapping to persistent/distributed implementations
* - Clean separation between domain logic and infrastructure
*/
// Port interfaces (what consumers depend on)
export type { CoordinationManager } from './types.js';
export type { ConflictResolutionService } from './conflict-resolution-service.js';
// Domain types
export type { MergeQueueItem, MergeStatus, MergeResult } from './types.js';
// Adapters
export { DefaultCoordinationManager } from './manager.js';
export { DefaultConflictResolutionService } from './conflict-resolution-service.js';

View File

@@ -0,0 +1,693 @@
/**
* DefaultCoordinationManager Tests
*
* Tests for the CoordinationManager adapter with dependency-ordered
* merging and conflict handling.
*/
import { describe, it, expect, beforeEach, vi } from 'vitest';
import { DefaultCoordinationManager } from './manager.js';
import { DrizzleTaskRepository } from '../db/repositories/drizzle/task.js';
import { DrizzleAgentRepository } from '../db/repositories/drizzle/agent.js';
import { DrizzleMessageRepository } from '../db/repositories/drizzle/message.js';
import { DrizzlePhaseRepository } from '../db/repositories/drizzle/phase.js';
import { DrizzleInitiativeRepository } from '../db/repositories/drizzle/initiative.js';
import { createTestDatabase } from '../db/repositories/drizzle/test-helpers.js';
import type { DrizzleDatabase } from '../db/index.js';
import type { EventBus, DomainEvent } from '../events/types.js';
import type { WorktreeManager, MergeResult as GitMergeResult } from '../git/types.js';
import type { TaskRepository } from '../db/repositories/task-repository.js';
import type { AgentRepository } from '../db/repositories/agent-repository.js';
import type { MessageRepository } from '../db/repositories/message-repository.js';
import type { ConflictResolutionService } from './conflict-resolution-service.js';
// =============================================================================
// Test Helpers
// =============================================================================
/**
* Create a mock EventBus that captures emitted events.
*/
function createMockEventBus(): EventBus & { emittedEvents: DomainEvent[] } {
const emittedEvents: DomainEvent[] = [];
return {
emittedEvents,
emit<T extends DomainEvent>(event: T): void {
emittedEvents.push(event);
},
on: vi.fn(),
off: vi.fn(),
once: vi.fn(),
};
}
/**
* Create a mock WorktreeManager with configurable merge results.
*/
function createMockWorktreeManager(
mergeResults: Map<string, GitMergeResult> = new Map()
): WorktreeManager {
return {
create: vi.fn(),
remove: vi.fn(),
list: vi.fn().mockResolvedValue([]),
get: vi.fn(),
diff: vi.fn(),
merge: vi.fn().mockImplementation(async (worktreeId: string) => {
const result = mergeResults.get(worktreeId);
if (result) return result;
// Default: successful merge
return { success: true, message: 'Merged successfully' };
}),
};
}
/**
* Create a mock ConflictResolutionService.
*/
function createMockConflictResolutionService(): ConflictResolutionService {
return {
handleConflict: vi.fn(),
};
}
// =============================================================================
// Tests
// =============================================================================
describe('DefaultCoordinationManager', () => {
let db: DrizzleDatabase;
let taskRepository: TaskRepository;
let agentRepository: AgentRepository;
let messageRepository: MessageRepository;
let eventBus: EventBus & { emittedEvents: DomainEvent[] };
let worktreeManager: WorktreeManager;
let conflictResolutionService: ConflictResolutionService;
let manager: DefaultCoordinationManager;
let testPhaseId: string;
beforeEach(async () => {
// Set up test database
db = createTestDatabase();
taskRepository = new DrizzleTaskRepository(db);
agentRepository = new DrizzleAgentRepository(db);
messageRepository = new DrizzleMessageRepository(db);
// Create required hierarchy for tasks
const initiativeRepo = new DrizzleInitiativeRepository(db);
const phaseRepo = new DrizzlePhaseRepository(db);
const initiative = await initiativeRepo.create({
name: 'Test Initiative',
});
const phase = await phaseRepo.create({
initiativeId: initiative.id,
name: 'Test Phase',
});
testPhaseId = phase.id;
// Create mocks
eventBus = createMockEventBus();
worktreeManager = createMockWorktreeManager();
// Create coordination manager
manager = new DefaultCoordinationManager(
worktreeManager,
taskRepository,
agentRepository,
messageRepository,
eventBus
);
});
// ===========================================================================
// queueMerge() Tests
// ===========================================================================
describe('queueMerge', () => {
it('should add task to queue and emit MergeQueuedEvent', async () => {
// Create task
const task = await taskRepository.create({
phaseId: testPhaseId,
name: 'Test Task',
priority: 'high',
order: 1,
});
// Create agent for task
const agent = await agentRepository.create({
name: 'agent-test',
taskId: task.id,
worktreeId: 'worktree-123',
});
await manager.queueMerge(task.id);
const state = await manager.getQueueState();
expect(state.queued.length).toBe(1);
expect(state.queued[0].taskId).toBe(task.id);
expect(state.queued[0].agentId).toBe(agent.id);
expect(state.queued[0].worktreeId).toBe('worktree-123');
expect(state.queued[0].priority).toBe('high');
// Check event was emitted
expect(eventBus.emittedEvents.length).toBe(1);
expect(eventBus.emittedEvents[0].type).toBe('merge:queued');
expect((eventBus.emittedEvents[0] as any).payload.taskId).toBe(task.id);
});
it('should throw error when task not found', async () => {
await expect(manager.queueMerge('non-existent-id')).rejects.toThrow(
'Task not found'
);
});
it('should throw error when no agent assigned to task', async () => {
const task = await taskRepository.create({
phaseId: testPhaseId,
name: 'Orphan Task',
order: 1,
});
await expect(manager.queueMerge(task.id)).rejects.toThrow(
'No agent found for task'
);
});
});
// ===========================================================================
// getNextMergeable() Tests
// ===========================================================================
describe('getNextMergeable', () => {
it('should return null when queue is empty', async () => {
const next = await manager.getNextMergeable();
expect(next).toBeNull();
});
it('should return item when all dependencies merged', async () => {
const task = await taskRepository.create({
phaseId: testPhaseId,
name: 'Mergeable Task',
priority: 'medium',
order: 1,
});
await agentRepository.create({
name: 'agent-merge',
taskId: task.id,
worktreeId: 'worktree-merge',
});
await manager.queueMerge(task.id);
const next = await manager.getNextMergeable();
expect(next).not.toBeNull();
expect(next!.taskId).toBe(task.id);
});
it('should respect priority ordering (high > medium > low)', async () => {
// Create tasks in different priority order
const lowTask = await taskRepository.create({
phaseId: testPhaseId,
name: 'Low Priority',
priority: 'low',
order: 1,
});
const highTask = await taskRepository.create({
phaseId: testPhaseId,
name: 'High Priority',
priority: 'high',
order: 2,
});
const mediumTask = await taskRepository.create({
phaseId: testPhaseId,
name: 'Medium Priority',
priority: 'medium',
order: 3,
});
// Create agents for all tasks
await agentRepository.create({
name: 'agent-low',
taskId: lowTask.id,
worktreeId: 'wt-low',
});
await agentRepository.create({
name: 'agent-high',
taskId: highTask.id,
worktreeId: 'wt-high',
});
await agentRepository.create({
name: 'agent-medium',
taskId: mediumTask.id,
worktreeId: 'wt-medium',
});
// Queue in wrong order (low, high, medium)
await manager.queueMerge(lowTask.id);
await manager.queueMerge(highTask.id);
await manager.queueMerge(mediumTask.id);
// Should get high priority first
const next = await manager.getNextMergeable();
expect(next).not.toBeNull();
expect(next!.taskId).toBe(highTask.id);
expect(next!.priority).toBe('high');
});
it('should order by queuedAt within same priority (oldest first)', async () => {
const task1 = await taskRepository.create({
phaseId: testPhaseId,
name: 'First Task',
priority: 'medium',
order: 1,
});
const task2 = await taskRepository.create({
phaseId: testPhaseId,
name: 'Second Task',
priority: 'medium',
order: 2,
});
await agentRepository.create({
name: 'agent-1',
taskId: task1.id,
worktreeId: 'wt-1',
});
await agentRepository.create({
name: 'agent-2',
taskId: task2.id,
worktreeId: 'wt-2',
});
// Queue first task, wait, then queue second
await manager.queueMerge(task1.id);
await new Promise((resolve) => setTimeout(resolve, 10));
await manager.queueMerge(task2.id);
// Should get the first queued task
const next = await manager.getNextMergeable();
expect(next).not.toBeNull();
expect(next!.taskId).toBe(task1.id);
});
});
// ===========================================================================
// processMerges() Tests - Success Path
// ===========================================================================
describe('processMerges - success path', () => {
it('should complete clean merges and emit MergeCompletedEvent', async () => {
const task = await taskRepository.create({
phaseId: testPhaseId,
name: 'Mergeable Task',
priority: 'high',
order: 1,
});
const agent = await agentRepository.create({
name: 'agent-merge',
taskId: task.id,
worktreeId: 'worktree-success',
});
await manager.queueMerge(task.id);
const results = await manager.processMerges('main');
expect(results.length).toBe(1);
expect(results[0].taskId).toBe(task.id);
expect(results[0].success).toBe(true);
// Check state updated
const state = await manager.getQueueState();
expect(state.queued.length).toBe(0);
expect(state.merged).toContain(task.id);
// Check events emitted: queued, started, completed
const eventTypes = eventBus.emittedEvents.map((e) => e.type);
expect(eventTypes).toContain('merge:queued');
expect(eventTypes).toContain('merge:started');
expect(eventTypes).toContain('merge:completed');
});
it('should process multiple tasks in priority order', async () => {
const lowTask = await taskRepository.create({
phaseId: testPhaseId,
name: 'Low Priority',
priority: 'low',
order: 1,
});
const highTask = await taskRepository.create({
phaseId: testPhaseId,
name: 'High Priority',
priority: 'high',
order: 2,
});
await agentRepository.create({
name: 'agent-low',
taskId: lowTask.id,
worktreeId: 'wt-low',
});
await agentRepository.create({
name: 'agent-high',
taskId: highTask.id,
worktreeId: 'wt-high',
});
await manager.queueMerge(lowTask.id);
await manager.queueMerge(highTask.id);
const results = await manager.processMerges('main');
// Should process high priority first
expect(results.length).toBe(2);
expect(results[0].taskId).toBe(highTask.id);
expect(results[1].taskId).toBe(lowTask.id);
});
});
// ===========================================================================
// processMerges() Tests - Conflict Handling
// ===========================================================================
describe('processMerges - conflict handling', () => {
it('should detect conflicts and emit MergeConflictedEvent', async () => {
const task = await taskRepository.create({
phaseId: testPhaseId,
name: 'Conflicting Task',
priority: 'high',
order: 1,
});
await agentRepository.create({
name: 'agent-conflict',
taskId: task.id,
worktreeId: 'wt-conflict',
});
// Configure merge to fail with conflicts
const mergeResults = new Map<string, GitMergeResult>();
mergeResults.set('wt-conflict', {
success: false,
conflicts: ['file1.ts', 'file2.ts'],
message: 'Merge conflicts detected',
});
worktreeManager = createMockWorktreeManager(mergeResults);
manager = new DefaultCoordinationManager(
worktreeManager,
taskRepository,
agentRepository,
messageRepository,
eventBus
);
await manager.queueMerge(task.id);
const results = await manager.processMerges('main');
expect(results.length).toBe(1);
expect(results[0].taskId).toBe(task.id);
expect(results[0].success).toBe(false);
expect(results[0].conflicts).toEqual(['file1.ts', 'file2.ts']);
// Check state updated
const state = await manager.getQueueState();
expect(state.conflicted.length).toBe(1);
expect(state.conflicted[0].taskId).toBe(task.id);
expect(state.conflicted[0].conflicts).toEqual(['file1.ts', 'file2.ts']);
// Check MergeConflictedEvent emitted
const conflictEvent = eventBus.emittedEvents.find(
(e) => e.type === 'merge:conflicted'
);
expect(conflictEvent).toBeDefined();
expect((conflictEvent as any).payload.conflictingFiles).toEqual([
'file1.ts',
'file2.ts',
]);
});
it('should create resolution task on conflict', async () => {
const task = await taskRepository.create({
phaseId: testPhaseId,
name: 'Original Task',
priority: 'medium',
order: 1,
});
await agentRepository.create({
name: 'agent-conflict',
taskId: task.id,
worktreeId: 'wt-conflict',
});
// Configure merge to fail
const mergeResults = new Map<string, GitMergeResult>();
mergeResults.set('wt-conflict', {
success: false,
conflicts: ['src/index.ts'],
message: 'Merge conflicts detected',
});
worktreeManager = createMockWorktreeManager(mergeResults);
manager = new DefaultCoordinationManager(
worktreeManager,
taskRepository,
agentRepository,
messageRepository,
eventBus
);
await manager.queueMerge(task.id);
await manager.processMerges('main');
// Check new task was created
const tasks = await taskRepository.findByPhaseId(testPhaseId);
const conflictTask = tasks.find((t) =>
t.name.startsWith('Resolve conflicts:')
);
expect(conflictTask).toBeDefined();
expect(conflictTask!.name).toBe('Resolve conflicts: Original Task');
expect(conflictTask!.priority).toBe('high');
expect(conflictTask!.description).toContain('src/index.ts');
// Check original task blocked
const updatedOriginal = await taskRepository.findById(task.id);
expect(updatedOriginal!.status).toBe('blocked');
// Check TaskQueuedEvent emitted for conflict task
const queuedEvent = eventBus.emittedEvents.find(
(e) =>
e.type === 'task:queued' &&
(e as any).payload.taskId === conflictTask!.id
);
expect(queuedEvent).toBeDefined();
});
it('should create message to agent on conflict', async () => {
const task = await taskRepository.create({
phaseId: testPhaseId,
name: 'Task with Message',
priority: 'medium',
order: 1,
});
const agent = await agentRepository.create({
name: 'agent-msg',
taskId: task.id,
worktreeId: 'wt-msg',
});
// Configure merge to fail
const mergeResults = new Map<string, GitMergeResult>();
mergeResults.set('wt-msg', {
success: false,
conflicts: ['conflict.ts'],
message: 'Merge conflicts detected',
});
worktreeManager = createMockWorktreeManager(mergeResults);
manager = new DefaultCoordinationManager(
worktreeManager,
taskRepository,
agentRepository,
messageRepository,
eventBus
);
await manager.queueMerge(task.id);
await manager.processMerges('main');
// Check message was created
const messages = await messageRepository.findByRecipient('agent', agent.id);
expect(messages.length).toBe(1);
expect(messages[0].recipientType).toBe('agent');
expect(messages[0].recipientId).toBe(agent.id);
expect(messages[0].senderType).toBe('user');
expect(messages[0].content).toContain('Merge conflict detected');
expect(messages[0].content).toContain('conflict.ts');
});
});
// ===========================================================================
// getQueueState() Tests
// ===========================================================================
describe('getQueueState', () => {
it('should return correct counts for all states', async () => {
// Create tasks
const task1 = await taskRepository.create({
phaseId: testPhaseId,
name: 'Queued Task',
priority: 'high',
order: 1,
});
const task2 = await taskRepository.create({
phaseId: testPhaseId,
name: 'Conflict Task',
priority: 'medium',
order: 2,
});
await agentRepository.create({
name: 'agent-1',
taskId: task1.id,
worktreeId: 'wt-1',
});
await agentRepository.create({
name: 'agent-2',
taskId: task2.id,
worktreeId: 'wt-2',
});
// Configure task2 to conflict
const mergeResults = new Map<string, GitMergeResult>();
mergeResults.set('wt-2', {
success: false,
conflicts: ['test.ts'],
message: 'Conflict',
});
worktreeManager = createMockWorktreeManager(mergeResults);
manager = new DefaultCoordinationManager(
worktreeManager,
taskRepository,
agentRepository,
messageRepository,
eventBus
);
// Queue both, process task2 (will conflict), leave task1 queued
await manager.queueMerge(task2.id);
await manager.processMerges('main');
await manager.queueMerge(task1.id);
const state = await manager.getQueueState();
// task1 should be queued, task2 should be conflicted
expect(state.queued.length).toBe(1);
expect(state.queued[0].taskId).toBe(task1.id);
expect(state.inProgress.length).toBe(0);
expect(state.merged.length).toBe(0);
expect(state.conflicted.length).toBe(1);
expect(state.conflicted[0].taskId).toBe(task2.id);
});
});
// ===========================================================================
// handleConflict() Tests
// ===========================================================================
describe('handleConflict', () => {
it('should throw error when task not found', async () => {
await expect(
manager.handleConflict('non-existent', ['file.ts'])
).rejects.toThrow('Original task not found');
});
it('should throw error when no agent for task', async () => {
const task = await taskRepository.create({
phaseId: testPhaseId,
name: 'Orphan Task',
order: 1,
});
await expect(manager.handleConflict(task.id, ['file.ts'])).rejects.toThrow(
'No agent found for task'
);
});
});
// ===========================================================================
// Error Handling Tests
// ===========================================================================
describe('error handling', () => {
it('should throw when TaskRepository not configured', async () => {
const managerNoRepo = new DefaultCoordinationManager(
worktreeManager,
undefined, // No task repo
agentRepository,
messageRepository,
eventBus
);
await expect(managerNoRepo.queueMerge('task-id')).rejects.toThrow(
'TaskRepository not configured'
);
});
it('should throw when AgentRepository not configured', async () => {
const managerNoAgentRepo = new DefaultCoordinationManager(
worktreeManager,
taskRepository,
undefined, // No agent repo
messageRepository,
eventBus
);
const task = await taskRepository.create({
phaseId: testPhaseId,
name: 'Test',
order: 1,
});
await expect(managerNoAgentRepo.queueMerge(task.id)).rejects.toThrow(
'AgentRepository not configured'
);
});
it('should throw when WorktreeManager not configured', async () => {
const managerNoWorktree = new DefaultCoordinationManager(
undefined, // No worktree manager
taskRepository,
agentRepository,
messageRepository,
eventBus
);
const task = await taskRepository.create({
phaseId: testPhaseId,
name: 'Test',
order: 1,
});
await agentRepository.create({
name: 'agent-test',
taskId: task.id,
worktreeId: 'wt-test',
});
await managerNoWorktree.queueMerge(task.id);
await expect(managerNoWorktree.processMerges('main')).rejects.toThrow(
'WorktreeManager not configured'
);
});
});
});

View File

@@ -0,0 +1,332 @@
/**
* Default Coordination Manager - Adapter Implementation
*
* Implements CoordinationManager interface with in-memory merge queue
* and dependency-ordered merging.
*
* This is the ADAPTER for the CoordinationManager PORT.
*/
import type {
EventBus,
MergeQueuedEvent,
MergeStartedEvent,
MergeCompletedEvent,
MergeConflictedEvent,
} from '../events/index.js';
import type { WorktreeManager } from '../git/types.js';
import type { TaskRepository } from '../db/repositories/task-repository.js';
import type { AgentRepository } from '../db/repositories/agent-repository.js';
import type { MessageRepository } from '../db/repositories/message-repository.js';
import type { CoordinationManager, MergeQueueItem, MergeResult } from './types.js';
import type { ConflictResolutionService } from './conflict-resolution-service.js';
import { DefaultConflictResolutionService } from './conflict-resolution-service.js';
// =============================================================================
// Internal Types
// =============================================================================
/**
* Internal representation of a merge queue item with status.
*/
interface InternalMergeQueueItem extends MergeQueueItem {
status: 'queued' | 'in_progress';
}
// =============================================================================
// DefaultCoordinationManager Implementation
// =============================================================================
/**
* In-memory implementation of CoordinationManager.
*
* Uses Map for queue management and processes merges in dependency order.
* Handles conflicts by creating resolution tasks.
*/
export class DefaultCoordinationManager implements CoordinationManager {
/** Internal merge queue */
private mergeQueue: Map<string, InternalMergeQueueItem> = new Map();
/** Task IDs that have been successfully merged */
private mergedTasks: Set<string> = new Set();
/** Tasks with conflicts awaiting resolution */
private conflictedTasks: Map<string, string[]> = new Map();
/** Service for handling merge conflicts */
private conflictResolutionService?: ConflictResolutionService;
constructor(
private worktreeManager?: WorktreeManager,
private taskRepository?: TaskRepository,
private agentRepository?: AgentRepository,
private messageRepository?: MessageRepository,
private eventBus?: EventBus,
conflictResolutionService?: ConflictResolutionService
) {
// Create default conflict resolution service if none provided
if (conflictResolutionService) {
this.conflictResolutionService = conflictResolutionService;
} else if (taskRepository && agentRepository) {
this.conflictResolutionService = new DefaultConflictResolutionService(
taskRepository,
agentRepository,
messageRepository,
eventBus
);
}
}
/**
* Queue a completed task for merge.
* Extracts agent/worktree information from the task.
*/
async queueMerge(taskId: string): Promise<void> {
// Look up task to get dependencies
if (!this.taskRepository) {
throw new Error('TaskRepository not configured');
}
const task = await this.taskRepository.findById(taskId);
if (!task) {
throw new Error(`Task not found: ${taskId}`);
}
// Look up agent assigned to task to get worktreeId
if (!this.agentRepository) {
throw new Error('AgentRepository not configured');
}
const agent = await this.agentRepository.findByTaskId(taskId);
if (!agent) {
throw new Error(`No agent found for task: ${taskId}`);
}
// For now, dependsOn is empty - would need to query task_dependencies table
// to get actual dependencies
const dependsOn: string[] = [];
const queueItem: InternalMergeQueueItem = {
taskId,
agentId: agent.id,
worktreeId: agent.worktreeId,
priority: task.priority,
queuedAt: new Date(),
dependsOn,
status: 'queued',
};
this.mergeQueue.set(taskId, queueItem);
// Emit MergeQueuedEvent
const event: MergeQueuedEvent = {
type: 'merge:queued',
timestamp: new Date(),
payload: {
taskId,
agentId: agent.id,
worktreeId: agent.worktreeId,
priority: task.priority,
},
};
this.eventBus?.emit(event);
}
/**
* Get next task ready to merge.
* Returns task with all dependency tasks already merged.
*/
async getNextMergeable(): Promise<MergeQueueItem | null> {
const queuedItems = Array.from(this.mergeQueue.values()).filter(
(item) => item.status === 'queued'
);
if (queuedItems.length === 0) {
return null;
}
// Filter to only items where ALL dependsOn tasks are in mergedTasks
const readyItems = queuedItems.filter((item) =>
item.dependsOn.every((depTaskId) => this.mergedTasks.has(depTaskId))
);
if (readyItems.length === 0) {
return null;
}
// Sort by priority (high > medium > low), then by queuedAt (oldest first)
const priorityOrder: Record<string, number> = { high: 0, medium: 1, low: 2 };
readyItems.sort((a, b) => {
const priorityDiff = priorityOrder[a.priority] - priorityOrder[b.priority];
if (priorityDiff !== 0) {
return priorityDiff;
}
return a.queuedAt.getTime() - b.queuedAt.getTime();
});
// Return as MergeQueueItem (without internal status)
const item = readyItems[0];
return {
taskId: item.taskId,
agentId: item.agentId,
worktreeId: item.worktreeId,
priority: item.priority,
queuedAt: item.queuedAt,
dependsOn: item.dependsOn,
};
}
/**
* Process all ready merges in dependency order.
* Merges each ready task into the target branch.
*/
async processMerges(targetBranch: string): Promise<MergeResult[]> {
if (!this.worktreeManager) {
throw new Error('WorktreeManager not configured');
}
const results: MergeResult[] = [];
// Loop while there are mergeable items
let nextItem = await this.getNextMergeable();
while (nextItem) {
const queueItem = this.mergeQueue.get(nextItem.taskId)!;
// Mark as in_progress
queueItem.status = 'in_progress';
// Emit MergeStartedEvent
const startEvent: MergeStartedEvent = {
type: 'merge:started',
timestamp: new Date(),
payload: {
taskId: nextItem.taskId,
agentId: nextItem.agentId,
worktreeId: nextItem.worktreeId,
targetBranch,
},
};
this.eventBus?.emit(startEvent);
// Attempt merge via worktreeManager
const mergeResult = await this.worktreeManager.merge(nextItem.worktreeId, targetBranch);
if (mergeResult.success) {
// Success - add to mergedTasks and remove from queue
this.mergedTasks.add(nextItem.taskId);
this.mergeQueue.delete(nextItem.taskId);
// Emit MergeCompletedEvent
const completedEvent: MergeCompletedEvent = {
type: 'merge:completed',
timestamp: new Date(),
payload: {
taskId: nextItem.taskId,
agentId: nextItem.agentId,
worktreeId: nextItem.worktreeId,
targetBranch,
},
};
this.eventBus?.emit(completedEvent);
results.push({
taskId: nextItem.taskId,
success: true,
message: mergeResult.message,
});
} else {
// Conflict - add to conflictedTasks and remove from queue
const conflicts = mergeResult.conflicts || [];
this.conflictedTasks.set(nextItem.taskId, conflicts);
this.mergeQueue.delete(nextItem.taskId);
// Emit MergeConflictedEvent
const conflictEvent: MergeConflictedEvent = {
type: 'merge:conflicted',
timestamp: new Date(),
payload: {
taskId: nextItem.taskId,
agentId: nextItem.agentId,
worktreeId: nextItem.worktreeId,
targetBranch,
conflictingFiles: conflicts,
},
};
this.eventBus?.emit(conflictEvent);
// Handle conflict - create resolution task
await this.handleConflict(nextItem.taskId, conflicts);
results.push({
taskId: nextItem.taskId,
success: false,
conflicts,
message: mergeResult.message,
});
}
// Get next item
nextItem = await this.getNextMergeable();
}
return results;
}
/**
* Handle a merge conflict.
* Delegates to the ConflictResolutionService.
*/
async handleConflict(taskId: string, conflicts: string[]): Promise<void> {
if (!this.conflictResolutionService) {
throw new Error('ConflictResolutionService not configured');
}
await this.conflictResolutionService.handleConflict(taskId, conflicts);
}
/**
* Get current state of the merge queue.
*/
async getQueueState(): Promise<{
queued: MergeQueueItem[];
inProgress: MergeQueueItem[];
merged: string[];
conflicted: Array<{ taskId: string; conflicts: string[] }>;
}> {
const allItems = Array.from(this.mergeQueue.values());
// Filter by status
const queued = allItems
.filter((item) => item.status === 'queued')
.map((item) => ({
taskId: item.taskId,
agentId: item.agentId,
worktreeId: item.worktreeId,
priority: item.priority,
queuedAt: item.queuedAt,
dependsOn: item.dependsOn,
}));
const inProgress = allItems
.filter((item) => item.status === 'in_progress')
.map((item) => ({
taskId: item.taskId,
agentId: item.agentId,
worktreeId: item.worktreeId,
priority: item.priority,
queuedAt: item.queuedAt,
dependsOn: item.dependsOn,
}));
const merged = Array.from(this.mergedTasks);
const conflicted = Array.from(this.conflictedTasks.entries()).map(([taskId, conflicts]) => ({
taskId,
conflicts,
}));
return { queued, inProgress, merged, conflicted };
}
}

View File

@@ -0,0 +1,129 @@
/**
* Coordination Module Types
*
* Port interface for merge coordination management.
* CoordinationManager is the PORT. Implementations are ADAPTERS.
*
* This follows the same hexagonal architecture pattern as EventBus, AgentManager, and DispatchManager:
* - Interface defines the contract (port)
* - Implementations can be swapped without changing consumers
* - Enables testing with in-memory/mock implementations
*/
// =============================================================================
// Coordination Domain Types
// =============================================================================
/**
* Represents a task queued for merge.
* Tasks are queued after completion and merged in dependency order.
*/
export interface MergeQueueItem {
/** ID of the task to merge */
taskId: string;
/** ID of the agent that completed the task */
agentId: string;
/** ID of the worktree containing the changes */
worktreeId: string;
/** Priority level for merge ordering */
priority: 'low' | 'medium' | 'high';
/** When the task was queued for merge */
queuedAt: Date;
/** Task IDs that must merge first (dependency ordering) */
dependsOn: string[];
}
/**
* Status of a merge operation.
*/
export type MergeStatus = 'queued' | 'in_progress' | 'merged' | 'conflict';
/**
* Result of a merge operation with task context.
* Mirrors git MergeResult but includes task-level information.
*/
export interface MergeResult {
/** ID of the task that was merged */
taskId: string;
/** True if merge completed without conflicts */
success: boolean;
/** List of conflicting files (only present if success is false) */
conflicts?: string[];
/** Human-readable message describing the result */
message: string;
}
// =============================================================================
// CoordinationManager Port Interface
// =============================================================================
/**
* CoordinationManager Port Interface
*
* Manages merge coordination for completed tasks.
* Ensures merges happen in dependency order and handles conflicts.
*
* This is the PORT - implementations (adapters) include:
* - InMemoryCoordinationManager: In-memory for testing
* - (future) PersistentCoordinationManager: Database-backed
*
* Covers requirements:
* - COORD-01: Queue completed tasks for merge
* - COORD-02: Get next mergeable task (dependencies resolved)
* - COORD-03: Process merges in dependency order
* - COORD-04: Handle conflicts (bounce-back to agent)
* - COORD-05: Track queue state
*/
export interface CoordinationManager {
/**
* Queue a completed task for merge.
* Extracts agent/worktree information from the task.
*
* @param taskId - ID of the completed task to queue
*/
queueMerge(taskId: string): Promise<void>;
/**
* Get next task ready to merge.
* Returns task with all dependency tasks already merged.
* Returns null if no tasks ready.
*
* @returns Next mergeable task or null
*/
getNextMergeable(): Promise<MergeQueueItem | null>;
/**
* Process all ready merges in dependency order.
* Merges each ready task into the target branch.
*
* @param targetBranch - Branch to merge into (e.g., 'main', 'integration')
* @returns Results of all merge operations
*/
processMerges(targetBranch: string): Promise<MergeResult[]>;
/**
* Handle a merge conflict.
* Creates a conflict-resolution task and assigns back to the agent.
*
* @param taskId - ID of the task that conflicted
* @param conflicts - List of conflicting file paths
*/
handleConflict(taskId: string, conflicts: string[]): Promise<void>;
/**
* Get current state of the merge queue.
* Shows all tasks by their merge status.
*
* @returns Queue state grouped by status
*/
getQueueState(): Promise<{
/** Tasks waiting to be merged */
queued: MergeQueueItem[];
/** Tasks currently being merged */
inProgress: MergeQueueItem[];
/** Task IDs that have been merged */
merged: string[];
/** Tasks with conflicts awaiting resolution */
conflicted: Array<{ taskId: string; conflicts: string[] }>;
}>;
}

40
apps/server/db/config.ts Normal file
View File

@@ -0,0 +1,40 @@
import { mkdirSync } from 'node:fs';
import { dirname, join } from 'node:path';
import { findWorkspaceRoot } from '../config/cwrc.js';
/**
* Get the database path.
*
* - Default: <workspace-root>/.cw/cw.db
* - Throws if no .cwrc workspace is found
* - Override via CW_DB_PATH environment variable
* - For testing, pass ':memory:' as CW_DB_PATH
*/
export function getDbPath(): string {
const envPath = process.env.CW_DB_PATH;
if (envPath) {
return envPath;
}
const root = findWorkspaceRoot();
if (!root) {
throw new Error(
'No .cwrc workspace found. Run `cw init` to initialize a workspace.',
);
}
return join(root, '.cw', 'cw.db');
}
/**
* Ensure the parent directory for the database file exists.
* No-op for in-memory databases.
*/
export function ensureDbDirectory(dbPath: string): void {
// Skip for in-memory database
if (dbPath === ':memory:') {
return;
}
const dir = dirname(dbPath);
mkdirSync(dir, { recursive: true });
}

View File

@@ -0,0 +1,42 @@
/**
* Database Migration
*
* Runs drizzle-kit migrations from the drizzle/ directory.
* Safe to call on every startup - only applies pending migrations.
*/
import { migrate } from 'drizzle-orm/better-sqlite3/migrator';
import { join, dirname } from 'node:path';
import { fileURLToPath } from 'node:url';
import { existsSync } from 'node:fs';
import type { DrizzleDatabase } from './index.js';
import { createModuleLogger } from '../logger/index.js';
const log = createModuleLogger('db');
/**
* Resolve the migrations directory relative to the package root.
* Works both in development (src/) and after build (dist/).
*/
function getMigrationsPath(): string {
const currentDir = dirname(fileURLToPath(import.meta.url));
// In dev (tsx): apps/server/db/ — need 3 levels up to workspace root
// In dist (tsc): dist/db/ — need 2 levels up to workspace root
const upThree = join(currentDir, '..', '..', '..', 'drizzle');
if (existsSync(upThree)) return upThree;
return join(currentDir, '..', '..', 'drizzle');
}
/**
* Run all pending database migrations.
*
* Uses drizzle-kit's migration system which tracks applied migrations
* in a __drizzle_migrations table. Safe to call on every startup.
*
* @param db - Drizzle database instance
*/
export function ensureSchema(db: DrizzleDatabase): void {
log.info('applying database migrations');
migrate(db, { migrationsFolder: getMigrationsPath() });
log.info('database migrations complete');
}

52
apps/server/db/index.ts Normal file
View File

@@ -0,0 +1,52 @@
import Database from 'better-sqlite3';
import { drizzle } from 'drizzle-orm/better-sqlite3';
import type { BetterSQLite3Database } from 'drizzle-orm/better-sqlite3';
import { getDbPath, ensureDbDirectory } from './config.js';
import * as schema from './schema.js';
export type DrizzleDatabase = BetterSQLite3Database<typeof schema>;
/**
* Create a new database connection.
*
* This is a factory function (not a singleton) to allow multiple instances
* for testing with isolated databases.
*
* @param path - Optional path override. Defaults to getDbPath().
* Use ':memory:' for in-memory testing database.
* @returns Drizzle database instance with schema
*/
export function createDatabase(path?: string): DrizzleDatabase {
const dbPath = path ?? getDbPath();
// Ensure directory exists for file-based databases
ensureDbDirectory(dbPath);
// Create SQLite connection
const sqlite = new Database(dbPath);
// Enable WAL mode for better concurrent read performance
sqlite.pragma('journal_mode = WAL');
// Enable foreign keys (SQLite has them disabled by default)
sqlite.pragma('foreign_keys = ON');
// Create Drizzle instance with schema
return drizzle(sqlite, { schema });
}
// Re-export config utilities
export { getDbPath, ensureDbDirectory } from './config.js';
// Re-export schema initialization
export { ensureSchema } from './ensure-schema.js';
// Re-export schema and types
export * from './schema.js';
// Re-export repository interfaces (ports)
export * from './repositories/index.js';
// Re-export Drizzle adapters
export * from './repositories/drizzle/index.js';

View File

@@ -0,0 +1,61 @@
/**
* Account Repository Port Interface
*
* Port for Account aggregate operations.
* Accounts represent authenticated provider logins (e.g. Claude OAuth accounts)
* used for round-robin agent spawning and usage-limit failover.
*/
import type { Account } from '../schema.js';
export interface CreateAccountData {
email: string;
provider?: string; // defaults to 'claude'
configJson?: string; // .claude.json content
credentials?: string; // .credentials.json content
}
export interface AccountRepository {
/** Create a new account. Generates id and sets timestamps. */
create(data: CreateAccountData): Promise<Account>;
/** Find an account by its ID. */
findById(id: string): Promise<Account | null>;
/** Find an account by email. */
findByEmail(email: string): Promise<Account | null>;
/** Find all accounts for a given provider. */
findByProvider(provider: string): Promise<Account[]>;
/**
* Find the next available (non-exhausted) account for a provider.
* Uses round-robin via lastUsedAt ordering (least-recently-used first).
* Automatically clears expired exhaustion before querying.
*/
findNextAvailable(provider: string): Promise<Account | null>;
/** Mark an account as exhausted until a given time. */
markExhausted(id: string, until: Date): Promise<Account>;
/** Mark an account as available (clear exhaustion). */
markAvailable(id: string): Promise<Account>;
/** Update the lastUsedAt timestamp for an account. */
updateLastUsed(id: string): Promise<Account>;
/** Clear exhaustion for all accounts whose exhaustedUntil has passed. Returns count cleared. */
clearExpiredExhaustion(): Promise<number>;
/** Find all accounts. */
findAll(): Promise<Account[]>;
/** Update stored credentials for an account. */
updateCredentials(id: string, credentials: string): Promise<Account>;
/** Update both configJson and credentials for an account (used by account add upsert). */
updateAccountAuth(id: string, configJson: string, credentials: string): Promise<Account>;
/** Delete an account. Throws if not found. */
delete(id: string): Promise<void>;
}

View File

@@ -0,0 +1,119 @@
/**
* Agent Repository Port Interface
*
* Port for Agent aggregate operations.
* Implementations (Drizzle, etc.) are adapters.
*/
import type { Agent } from '../schema.js';
import type { AgentMode } from '../../agent/types.js';
/**
* Agent status values.
*/
export type AgentStatus = 'idle' | 'running' | 'waiting_for_input' | 'stopped' | 'crashed';
/**
* Data for creating a new agent.
* Omits system-managed fields and makes optional fields explicit.
*/
export interface CreateAgentData {
name: string;
worktreeId: string;
taskId?: string | null;
initiativeId?: string | null;
sessionId?: string | null;
status?: AgentStatus;
mode?: AgentMode; // Defaults to 'execute' if not provided
provider?: string; // Defaults to 'claude' if not provided
accountId?: string | null;
}
/**
* Data for updating an existing agent.
* All fields optional. System-managed fields (id, createdAt, updatedAt) are excluded.
*/
export interface UpdateAgentData {
name?: string;
worktreeId?: string;
taskId?: string | null;
initiativeId?: string | null;
sessionId?: string | null;
status?: AgentStatus;
mode?: AgentMode;
provider?: string;
accountId?: string | null;
pid?: number | null;
exitCode?: number | null;
outputFilePath?: string | null;
result?: string | null;
pendingQuestions?: string | null;
userDismissedAt?: Date | null;
updatedAt?: Date;
}
/**
* Agent Repository Port
*
* Defines operations for the Agent aggregate.
* Enables agent state persistence for session resumption and listing.
*/
export interface AgentRepository {
/**
* Create a new agent.
* Generates id and sets timestamps automatically.
* Name must be unique.
*/
create(agent: CreateAgentData): Promise<Agent>;
/**
* Find an agent by its ID.
* Returns null if not found.
*/
findById(id: string): Promise<Agent | null>;
/**
* Find an agent by its human-readable name.
* Returns null if not found.
*/
findByName(name: string): Promise<Agent | null>;
/**
* Find an agent by its associated task.
* Returns null if no agent is assigned to that task.
*/
findByTaskId(taskId: string): Promise<Agent | null>;
/**
* Find an agent by its Claude CLI session ID.
* Used for session resumption.
* Returns null if not found.
*/
findBySessionId(sessionId: string): Promise<Agent | null>;
/**
* Find all agents.
* Returns empty array if none exist.
*/
findAll(): Promise<Agent[]>;
/**
* Find agents by status.
* Returns empty array if no agents have that status.
*/
findByStatus(status: AgentStatus): Promise<Agent[]>;
/**
* Update an agent with partial data.
* Only provided fields are updated, others remain unchanged.
* Throws if agent not found.
* Updates updatedAt timestamp automatically.
*/
update(id: string, data: UpdateAgentData): Promise<Agent>;
/**
* Delete an agent.
* Throws if agent not found.
*/
delete(id: string): Promise<void>;
}

View File

@@ -0,0 +1,36 @@
/**
* Change Set Repository Port Interface
*
* Port for ChangeSet aggregate operations.
* Implementations (Drizzle, etc.) are adapters.
*/
import type { ChangeSet, ChangeSetEntry } from '../schema.js';
export type CreateChangeSetData = {
agentId: string | null;
agentName: string;
initiativeId: string;
mode: 'plan' | 'detail' | 'refine';
summary?: string | null;
};
export type CreateChangeSetEntryData = {
entityType: 'page' | 'phase' | 'task' | 'phase_dependency';
entityId: string;
action: 'create' | 'update' | 'delete';
previousState?: string | null;
newState?: string | null;
sortOrder?: number;
};
export type ChangeSetWithEntries = ChangeSet & { entries: ChangeSetEntry[] };
export interface ChangeSetRepository {
createWithEntries(data: CreateChangeSetData, entries: CreateChangeSetEntryData[]): Promise<ChangeSet>;
findById(id: string): Promise<ChangeSet | null>;
findByIdWithEntries(id: string): Promise<ChangeSetWithEntries | null>;
findByInitiativeId(initiativeId: string): Promise<ChangeSet[]>;
findByAgentId(agentId: string): Promise<ChangeSet[]>;
markReverted(id: string): Promise<ChangeSet>;
}

View File

@@ -0,0 +1,23 @@
/**
* Conversation Repository Port Interface
*
* Port for inter-agent conversation persistence operations.
*/
import type { Conversation } from '../schema.js';
export interface CreateConversationData {
fromAgentId: string;
toAgentId: string;
initiativeId?: string | null;
phaseId?: string | null;
taskId?: string | null;
question: string;
}
export interface ConversationRepository {
create(data: CreateConversationData): Promise<Conversation>;
findById(id: string): Promise<Conversation | null>;
findPendingForAgent(toAgentId: string): Promise<Conversation[]>;
answer(id: string, answer: string): Promise<Conversation | null>;
}

View File

@@ -0,0 +1,203 @@
/**
* Drizzle Account Repository Adapter
*
* Implements AccountRepository interface using Drizzle ORM.
* Handles round-robin selection via lastUsedAt ordering
* and automatic exhaustion expiry.
*/
import { eq, and, asc, lte } from 'drizzle-orm';
import { nanoid } from 'nanoid';
import type { DrizzleDatabase } from '../../index.js';
import { accounts, agents, type Account } from '../../schema.js';
import type { AccountRepository, CreateAccountData } from '../account-repository.js';
export class DrizzleAccountRepository implements AccountRepository {
constructor(private db: DrizzleDatabase) {}
async create(data: CreateAccountData): Promise<Account> {
const id = nanoid();
const now = new Date();
const [created] = await this.db.insert(accounts).values({
id,
email: data.email,
provider: data.provider ?? 'claude',
configJson: data.configJson ?? null,
credentials: data.credentials ?? null,
isExhausted: false,
exhaustedUntil: null,
lastUsedAt: null,
sortOrder: 0,
createdAt: now,
updatedAt: now,
}).returning();
return created;
}
async findById(id: string): Promise<Account | null> {
const result = await this.db
.select()
.from(accounts)
.where(eq(accounts.id, id))
.limit(1);
return result[0] ?? null;
}
async findByEmail(email: string): Promise<Account | null> {
const result = await this.db
.select()
.from(accounts)
.where(eq(accounts.email, email))
.limit(1);
return result[0] ?? null;
}
async findByProvider(provider: string): Promise<Account[]> {
return this.db
.select()
.from(accounts)
.where(eq(accounts.provider, provider));
}
async findNextAvailable(provider: string): Promise<Account | null> {
await this.clearExpiredExhaustion();
const result = await this.db
.select()
.from(accounts)
.where(
and(
eq(accounts.provider, provider),
eq(accounts.isExhausted, false),
),
)
.orderBy(asc(accounts.lastUsedAt))
.limit(1);
return result[0] ?? null;
}
async markExhausted(id: string, until: Date): Promise<Account> {
const now = new Date();
const [updated] = await this.db
.update(accounts)
.set({
isExhausted: true,
exhaustedUntil: until,
updatedAt: now,
})
.where(eq(accounts.id, id))
.returning();
if (!updated) {
throw new Error(`Account not found: ${id}`);
}
return updated;
}
async markAvailable(id: string): Promise<Account> {
const now = new Date();
const [updated] = await this.db
.update(accounts)
.set({
isExhausted: false,
exhaustedUntil: null,
updatedAt: now,
})
.where(eq(accounts.id, id))
.returning();
if (!updated) {
throw new Error(`Account not found: ${id}`);
}
return updated;
}
async updateLastUsed(id: string): Promise<Account> {
const now = new Date();
const [updated] = await this.db
.update(accounts)
.set({ lastUsedAt: now, updatedAt: now })
.where(eq(accounts.id, id))
.returning();
if (!updated) {
throw new Error(`Account not found: ${id}`);
}
return updated;
}
async clearExpiredExhaustion(): Promise<number> {
const now = new Date();
const cleared = await this.db
.update(accounts)
.set({
isExhausted: false,
exhaustedUntil: null,
updatedAt: now,
})
.where(
and(
eq(accounts.isExhausted, true),
lte(accounts.exhaustedUntil, now),
),
)
.returning({ id: accounts.id });
return cleared.length;
}
async findAll(): Promise<Account[]> {
return this.db.select().from(accounts);
}
async updateCredentials(id: string, credentials: string): Promise<Account> {
const now = new Date();
const [updated] = await this.db
.update(accounts)
.set({ credentials, updatedAt: now })
.where(eq(accounts.id, id))
.returning();
if (!updated) {
throw new Error(`Account not found: ${id}`);
}
return updated;
}
async updateAccountAuth(id: string, configJson: string, credentials: string): Promise<Account> {
const now = new Date();
const [updated] = await this.db
.update(accounts)
.set({ configJson, credentials, updatedAt: now })
.where(eq(accounts.id, id))
.returning();
if (!updated) {
throw new Error(`Account not found: ${id}`);
}
return updated;
}
async delete(id: string): Promise<void> {
// Manually nullify agent FK — the migration lacks ON DELETE SET NULL
await this.db
.update(agents)
.set({ accountId: null })
.where(eq(agents.accountId, id));
const [deleted] = await this.db.delete(accounts).where(eq(accounts.id, id)).returning();
if (!deleted) {
throw new Error(`Account not found: ${id}`);
}
}
}

View File

@@ -0,0 +1,279 @@
/**
* DrizzleAgentRepository Tests
*
* Tests for the Agent repository adapter.
*/
import { describe, it, expect, beforeEach } from 'vitest';
import { DrizzleAgentRepository } from './agent.js';
import { DrizzleTaskRepository } from './task.js';
import { DrizzlePhaseRepository } from './phase.js';
import { DrizzleInitiativeRepository } from './initiative.js';
import { createTestDatabase } from './test-helpers.js';
import type { DrizzleDatabase } from '../../index.js';
describe('DrizzleAgentRepository', () => {
let db: DrizzleDatabase;
let agentRepo: DrizzleAgentRepository;
let taskRepo: DrizzleTaskRepository;
let phaseRepo: DrizzlePhaseRepository;
let initiativeRepo: DrizzleInitiativeRepository;
let testTaskId: string;
beforeEach(async () => {
db = createTestDatabase();
agentRepo = new DrizzleAgentRepository(db);
taskRepo = new DrizzleTaskRepository(db);
phaseRepo = new DrizzlePhaseRepository(db);
initiativeRepo = new DrizzleInitiativeRepository(db);
// Create full hierarchy for FK constraint
const initiative = await initiativeRepo.create({
name: 'Test Initiative',
});
const phase = await phaseRepo.create({
initiativeId: initiative.id,
name: 'Test Phase',
});
const task = await taskRepo.create({
phaseId: phase.id,
name: 'Test Task',
order: 1,
});
testTaskId = task.id;
});
describe('create', () => {
it('should create an agent with generated id and timestamps', async () => {
const agent = await agentRepo.create({
name: 'gastown',
worktreeId: 'worktree-123',
taskId: testTaskId,
});
expect(agent.id).toBeDefined();
expect(agent.id.length).toBeGreaterThan(0);
expect(agent.name).toBe('gastown');
expect(agent.worktreeId).toBe('worktree-123');
expect(agent.taskId).toBe(testTaskId);
expect(agent.sessionId).toBeNull();
expect(agent.status).toBe('idle');
expect(agent.createdAt).toBeInstanceOf(Date);
expect(agent.updatedAt).toBeInstanceOf(Date);
});
it('should create agent without taskId', async () => {
const agent = await agentRepo.create({
name: 'standalone',
worktreeId: 'worktree-456',
});
expect(agent.taskId).toBeNull();
});
it('should reject duplicate names', async () => {
await agentRepo.create({
name: 'unique-name',
worktreeId: 'worktree-1',
});
await expect(
agentRepo.create({
name: 'unique-name',
worktreeId: 'worktree-2',
})
).rejects.toThrow();
});
});
describe('findById', () => {
it('should return null for non-existent agent', async () => {
const result = await agentRepo.findById('non-existent-id');
expect(result).toBeNull();
});
it('should find an existing agent', async () => {
const created = await agentRepo.create({
name: 'findme',
worktreeId: 'worktree-123',
});
const found = await agentRepo.findById(created.id);
expect(found).not.toBeNull();
expect(found!.id).toBe(created.id);
expect(found!.name).toBe('findme');
});
});
describe('findByName', () => {
it('should return null for non-existent name', async () => {
const result = await agentRepo.findByName('nonexistent');
expect(result).toBeNull();
});
it('should find agent by human-readable name', async () => {
await agentRepo.create({
name: 'chinatown',
worktreeId: 'worktree-123',
});
const found = await agentRepo.findByName('chinatown');
expect(found).not.toBeNull();
expect(found!.name).toBe('chinatown');
});
});
describe('findByTaskId', () => {
it('should return null when no agent assigned to task', async () => {
const result = await agentRepo.findByTaskId(testTaskId);
expect(result).toBeNull();
});
it('should find agent by task', async () => {
await agentRepo.create({
name: 'task-agent',
worktreeId: 'worktree-123',
taskId: testTaskId,
});
const found = await agentRepo.findByTaskId(testTaskId);
expect(found).not.toBeNull();
expect(found!.taskId).toBe(testTaskId);
});
});
describe('findBySessionId', () => {
it('should return null when no agent has session', async () => {
const result = await agentRepo.findBySessionId('session-123');
expect(result).toBeNull();
});
it('should find agent by session ID', async () => {
const agent = await agentRepo.create({
name: 'session-agent',
worktreeId: 'worktree-123',
});
await agentRepo.update(agent.id, { sessionId: 'session-abc' });
const found = await agentRepo.findBySessionId('session-abc');
expect(found).not.toBeNull();
expect(found!.sessionId).toBe('session-abc');
});
});
describe('findAll', () => {
it('should return empty array when no agents', async () => {
const agents = await agentRepo.findAll();
expect(agents).toEqual([]);
});
it('should return all agents', async () => {
await agentRepo.create({ name: 'agent-1', worktreeId: 'wt-1' });
await agentRepo.create({ name: 'agent-2', worktreeId: 'wt-2' });
await agentRepo.create({ name: 'agent-3', worktreeId: 'wt-3' });
const agents = await agentRepo.findAll();
expect(agents.length).toBe(3);
});
});
describe('findByStatus', () => {
it('should return empty array when no agents have status', async () => {
const agents = await agentRepo.findByStatus('running');
expect(agents).toEqual([]);
});
it('should filter by status correctly', async () => {
const agent1 = await agentRepo.create({
name: 'idle-agent',
worktreeId: 'wt-1',
});
const agent2 = await agentRepo.create({
name: 'running-agent',
worktreeId: 'wt-2',
});
await agentRepo.update(agent2.id, { status: 'running' });
const idleAgents = await agentRepo.findByStatus('idle');
const runningAgents = await agentRepo.findByStatus('running');
expect(idleAgents.length).toBe(1);
expect(idleAgents[0].name).toBe('idle-agent');
expect(runningAgents.length).toBe(1);
expect(runningAgents[0].name).toBe('running-agent');
});
it('should filter by waiting_for_input status', async () => {
const agent = await agentRepo.create({
name: 'waiting-agent',
worktreeId: 'wt-1',
});
await agentRepo.update(agent.id, { status: 'waiting_for_input' });
const waitingAgents = await agentRepo.findByStatus('waiting_for_input');
expect(waitingAgents.length).toBe(1);
expect(waitingAgents[0].status).toBe('waiting_for_input');
});
});
describe('update', () => {
it('should change status and updatedAt', async () => {
const created = await agentRepo.create({
name: 'status-test',
worktreeId: 'wt-1',
});
await new Promise((resolve) => setTimeout(resolve, 10));
const updated = await agentRepo.update(created.id, { status: 'running' });
expect(updated.status).toBe('running');
expect(updated.updatedAt.getTime()).toBeGreaterThanOrEqual(
created.updatedAt.getTime()
);
});
it('should change sessionId and updatedAt', async () => {
const created = await agentRepo.create({
name: 'session-test',
worktreeId: 'wt-1',
});
expect(created.sessionId).toBeNull();
await new Promise((resolve) => setTimeout(resolve, 10));
const updated = await agentRepo.update(created.id, { sessionId: 'new-session-id' });
expect(updated.sessionId).toBe('new-session-id');
expect(updated.updatedAt.getTime()).toBeGreaterThanOrEqual(
created.updatedAt.getTime()
);
});
it('should throw for non-existent agent', async () => {
await expect(
agentRepo.update('non-existent-id', { status: 'running' })
).rejects.toThrow('Agent not found');
});
});
describe('delete', () => {
it('should delete an existing agent', async () => {
const created = await agentRepo.create({
name: 'to-delete',
worktreeId: 'wt-1',
});
await agentRepo.delete(created.id);
const found = await agentRepo.findById(created.id);
expect(found).toBeNull();
});
it('should throw for non-existent agent', async () => {
await expect(agentRepo.delete('non-existent-id')).rejects.toThrow(
'Agent not found'
);
});
});
});

View File

@@ -0,0 +1,119 @@
/**
* Drizzle Agent Repository Adapter
*
* Implements AgentRepository interface using Drizzle ORM.
*/
import { eq } from 'drizzle-orm';
import { nanoid } from 'nanoid';
import type { DrizzleDatabase } from '../../index.js';
import { agents, type Agent } from '../../schema.js';
import type {
AgentRepository,
AgentStatus,
CreateAgentData,
UpdateAgentData,
} from '../agent-repository.js';
/**
* Drizzle adapter for AgentRepository.
*
* Uses dependency injection for database instance,
* enabling isolated test databases.
*/
export class DrizzleAgentRepository implements AgentRepository {
constructor(private db: DrizzleDatabase) {}
async create(data: CreateAgentData): Promise<Agent> {
const id = nanoid();
const now = new Date();
const [created] = await this.db.insert(agents).values({
id,
name: data.name,
taskId: data.taskId ?? null,
initiativeId: data.initiativeId ?? null,
sessionId: data.sessionId ?? null,
worktreeId: data.worktreeId,
provider: data.provider ?? 'claude',
accountId: data.accountId ?? null,
status: data.status ?? 'idle',
mode: data.mode ?? 'execute',
createdAt: now,
updatedAt: now,
}).returning();
return created;
}
async findById(id: string): Promise<Agent | null> {
const result = await this.db
.select()
.from(agents)
.where(eq(agents.id, id))
.limit(1);
return result[0] ?? null;
}
async findByName(name: string): Promise<Agent | null> {
const result = await this.db
.select()
.from(agents)
.where(eq(agents.name, name))
.limit(1);
return result[0] ?? null;
}
async findByTaskId(taskId: string): Promise<Agent | null> {
const result = await this.db
.select()
.from(agents)
.where(eq(agents.taskId, taskId))
.limit(1);
return result[0] ?? null;
}
async findBySessionId(sessionId: string): Promise<Agent | null> {
const result = await this.db
.select()
.from(agents)
.where(eq(agents.sessionId, sessionId))
.limit(1);
return result[0] ?? null;
}
async findAll(): Promise<Agent[]> {
return this.db.select().from(agents);
}
async findByStatus(status: AgentStatus): Promise<Agent[]> {
return this.db.select().from(agents).where(eq(agents.status, status));
}
async update(id: string, data: UpdateAgentData): Promise<Agent> {
const now = new Date();
const [updated] = await this.db
.update(agents)
.set({ ...data, updatedAt: now })
.where(eq(agents.id, id))
.returning();
if (!updated) {
throw new Error(`Agent not found: ${id}`);
}
return updated;
}
async delete(id: string): Promise<void> {
const [deleted] = await this.db.delete(agents).where(eq(agents.id, id)).returning();
if (!deleted) {
throw new Error(`Agent not found: ${id}`);
}
}
}

View File

@@ -0,0 +1,301 @@
/**
* Cascade Delete Tests
*
* Tests that cascade deletes work correctly through the repository layer.
* Verifies the SQLite foreign key cascade behavior configured in schema.ts.
*/
import { describe, it, expect, beforeEach } from 'vitest';
import { nanoid } from 'nanoid';
import { DrizzleInitiativeRepository } from './initiative.js';
import { DrizzlePhaseRepository } from './phase.js';
import { DrizzleTaskRepository } from './task.js';
import { DrizzlePageRepository } from './page.js';
import { DrizzleProjectRepository } from './project.js';
import { DrizzleChangeSetRepository } from './change-set.js';
import { DrizzleAgentRepository } from './agent.js';
import { DrizzleConversationRepository } from './conversation.js';
import { createTestDatabase } from './test-helpers.js';
import { changeSets } from '../../schema.js';
import type { DrizzleDatabase } from '../../index.js';
describe('Cascade Deletes', () => {
let db: DrizzleDatabase;
let initiativeRepo: DrizzleInitiativeRepository;
let phaseRepo: DrizzlePhaseRepository;
let taskRepo: DrizzleTaskRepository;
let pageRepo: DrizzlePageRepository;
let projectRepo: DrizzleProjectRepository;
let changeSetRepo: DrizzleChangeSetRepository;
let agentRepo: DrizzleAgentRepository;
let conversationRepo: DrizzleConversationRepository;
beforeEach(() => {
db = createTestDatabase();
initiativeRepo = new DrizzleInitiativeRepository(db);
phaseRepo = new DrizzlePhaseRepository(db);
taskRepo = new DrizzleTaskRepository(db);
pageRepo = new DrizzlePageRepository(db);
projectRepo = new DrizzleProjectRepository(db);
changeSetRepo = new DrizzleChangeSetRepository(db);
agentRepo = new DrizzleAgentRepository(db);
conversationRepo = new DrizzleConversationRepository(db);
});
/**
* Helper to create a full hierarchy for testing.
* Uses parent tasks (detail category) to group child tasks.
*/
async function createFullHierarchy() {
const initiative = await initiativeRepo.create({
name: 'Test Initiative',
});
const phase1 = await phaseRepo.create({
initiativeId: initiative.id,
name: 'Phase 1',
});
const phase2 = await phaseRepo.create({
initiativeId: initiative.id,
name: 'Phase 2',
});
// Create parent (detail) tasks that group child tasks
const parentTask1 = await taskRepo.create({
phaseId: phase1.id,
initiativeId: initiative.id,
name: 'Parent Task 1-1',
category: 'detail',
order: 1,
});
const parentTask2 = await taskRepo.create({
phaseId: phase1.id,
initiativeId: initiative.id,
name: 'Parent Task 1-2',
category: 'detail',
order: 2,
});
const parentTask3 = await taskRepo.create({
phaseId: phase2.id,
initiativeId: initiative.id,
name: 'Parent Task 2-1',
category: 'detail',
order: 1,
});
// Create child tasks under parent tasks
const task1 = await taskRepo.create({
parentTaskId: parentTask1.id,
phaseId: phase1.id,
initiativeId: initiative.id,
name: 'Task 1-1-1',
order: 1,
});
const task2 = await taskRepo.create({
parentTaskId: parentTask1.id,
phaseId: phase1.id,
initiativeId: initiative.id,
name: 'Task 1-1-2',
order: 2,
});
const task3 = await taskRepo.create({
parentTaskId: parentTask2.id,
phaseId: phase1.id,
initiativeId: initiative.id,
name: 'Task 1-2-1',
order: 1,
});
const task4 = await taskRepo.create({
parentTaskId: parentTask3.id,
phaseId: phase2.id,
initiativeId: initiative.id,
name: 'Task 2-1-1',
order: 1,
});
// Create a page for the initiative
const page = await pageRepo.create({
initiativeId: initiative.id,
parentPageId: null,
title: 'Root Page',
content: null,
sortOrder: 0,
});
// Create a project and link it via junction table
const project = await projectRepo.create({
name: 'test-project',
url: 'https://github.com/test/test-project.git',
});
await projectRepo.setInitiativeProjects(initiative.id, [project.id]);
// Create two agents (need two for conversations, and one for changeSet FK)
const agent1 = await agentRepo.create({
name: 'agent-1',
worktreeId: 'wt-1',
initiativeId: initiative.id,
});
const agent2 = await agentRepo.create({
name: 'agent-2',
worktreeId: 'wt-2',
initiativeId: initiative.id,
});
// Insert change set directly (createWithEntries uses async tx, incompatible with better-sqlite3 sync driver)
const changeSetId = nanoid();
await db.insert(changeSets).values({
id: changeSetId,
agentId: agent1.id,
agentName: agent1.name,
initiativeId: initiative.id,
mode: 'plan',
status: 'applied',
createdAt: new Date(),
});
const changeSet = (await changeSetRepo.findById(changeSetId))!;
// Create a conversation between agents with initiative context
const conversation = await conversationRepo.create({
fromAgentId: agent1.id,
toAgentId: agent2.id,
initiativeId: initiative.id,
question: 'Test question',
});
return {
initiative,
phases: { phase1, phase2 },
parentTasks: { parentTask1, parentTask2, parentTask3 },
tasks: { task1, task2, task3, task4 },
page,
project,
changeSet,
agents: { agent1, agent2 },
conversation,
};
}
describe('delete initiative', () => {
it('should cascade delete all phases, tasks, pages, junction rows, and change sets', async () => {
const { initiative, phases, parentTasks, tasks, page, project, changeSet } =
await createFullHierarchy();
// Verify everything exists
expect(await initiativeRepo.findById(initiative.id)).not.toBeNull();
expect(await phaseRepo.findById(phases.phase1.id)).not.toBeNull();
expect(await phaseRepo.findById(phases.phase2.id)).not.toBeNull();
expect(await taskRepo.findById(parentTasks.parentTask1.id)).not.toBeNull();
expect(await taskRepo.findById(parentTasks.parentTask2.id)).not.toBeNull();
expect(await taskRepo.findById(parentTasks.parentTask3.id)).not.toBeNull();
expect(await taskRepo.findById(tasks.task1.id)).not.toBeNull();
expect(await taskRepo.findById(tasks.task2.id)).not.toBeNull();
expect(await taskRepo.findById(tasks.task3.id)).not.toBeNull();
expect(await taskRepo.findById(tasks.task4.id)).not.toBeNull();
expect(await pageRepo.findById(page.id)).not.toBeNull();
expect(await changeSetRepo.findById(changeSet.id)).not.toBeNull();
const linkedProjects = await projectRepo.findProjectsByInitiativeId(initiative.id);
expect(linkedProjects).toHaveLength(1);
// Delete initiative
await initiativeRepo.delete(initiative.id);
// Verify cascade deletes — all gone
expect(await initiativeRepo.findById(initiative.id)).toBeNull();
expect(await phaseRepo.findById(phases.phase1.id)).toBeNull();
expect(await phaseRepo.findById(phases.phase2.id)).toBeNull();
expect(await taskRepo.findById(parentTasks.parentTask1.id)).toBeNull();
expect(await taskRepo.findById(parentTasks.parentTask2.id)).toBeNull();
expect(await taskRepo.findById(parentTasks.parentTask3.id)).toBeNull();
expect(await taskRepo.findById(tasks.task1.id)).toBeNull();
expect(await taskRepo.findById(tasks.task2.id)).toBeNull();
expect(await taskRepo.findById(tasks.task3.id)).toBeNull();
expect(await taskRepo.findById(tasks.task4.id)).toBeNull();
expect(await pageRepo.findById(page.id)).toBeNull();
expect(await changeSetRepo.findById(changeSet.id)).toBeNull();
// Junction row gone but project itself survives
const remainingLinks = await projectRepo.findProjectsByInitiativeId(initiative.id);
expect(remainingLinks).toHaveLength(0);
expect(await projectRepo.findById(project.id)).not.toBeNull();
});
it('should set null on agents and conversations (not cascade)', async () => {
const { initiative, agents, conversation } = await createFullHierarchy();
// Verify agents are linked
const a1Before = await agentRepo.findById(agents.agent1.id);
expect(a1Before!.initiativeId).toBe(initiative.id);
// Delete initiative
await initiativeRepo.delete(initiative.id);
// Agents survive with initiativeId set to null
const a1After = await agentRepo.findById(agents.agent1.id);
expect(a1After).not.toBeNull();
expect(a1After!.initiativeId).toBeNull();
const a2After = await agentRepo.findById(agents.agent2.id);
expect(a2After).not.toBeNull();
expect(a2After!.initiativeId).toBeNull();
// Conversation survives with initiativeId set to null
const convAfter = await conversationRepo.findById(conversation.id);
expect(convAfter).not.toBeNull();
expect(convAfter!.initiativeId).toBeNull();
});
});
describe('delete phase', () => {
it('should cascade delete tasks under that phase only', async () => {
const { initiative, phases, parentTasks, tasks } = await createFullHierarchy();
// Delete phase 1
await phaseRepo.delete(phases.phase1.id);
// Initiative still exists
expect(await initiativeRepo.findById(initiative.id)).not.toBeNull();
// Phase 1 and its tasks are gone
expect(await phaseRepo.findById(phases.phase1.id)).toBeNull();
expect(await taskRepo.findById(parentTasks.parentTask1.id)).toBeNull();
expect(await taskRepo.findById(parentTasks.parentTask2.id)).toBeNull();
expect(await taskRepo.findById(tasks.task1.id)).toBeNull();
expect(await taskRepo.findById(tasks.task2.id)).toBeNull();
expect(await taskRepo.findById(tasks.task3.id)).toBeNull();
// Phase 2 and its tasks still exist
expect(await phaseRepo.findById(phases.phase2.id)).not.toBeNull();
expect(await taskRepo.findById(parentTasks.parentTask3.id)).not.toBeNull();
expect(await taskRepo.findById(tasks.task4.id)).not.toBeNull();
});
});
describe('delete parent task', () => {
it('should cascade delete child tasks under that parent only', async () => {
const { phases, parentTasks, tasks } = await createFullHierarchy();
// Delete parent task 1
await taskRepo.delete(parentTasks.parentTask1.id);
// Phase still exists
expect(await phaseRepo.findById(phases.phase1.id)).not.toBeNull();
// Parent task 1 and its children are gone
expect(await taskRepo.findById(parentTasks.parentTask1.id)).toBeNull();
expect(await taskRepo.findById(tasks.task1.id)).toBeNull();
expect(await taskRepo.findById(tasks.task2.id)).toBeNull();
// Other parent tasks and their children still exist
expect(await taskRepo.findById(parentTasks.parentTask2.id)).not.toBeNull();
expect(await taskRepo.findById(parentTasks.parentTask3.id)).not.toBeNull();
expect(await taskRepo.findById(tasks.task3.id)).not.toBeNull();
expect(await taskRepo.findById(tasks.task4.id)).not.toBeNull();
});
});
});

View File

@@ -0,0 +1,110 @@
/**
* Drizzle Change Set Repository Adapter
*
* Implements ChangeSetRepository interface using Drizzle ORM.
*/
import { eq, desc, asc } from 'drizzle-orm';
import { nanoid } from 'nanoid';
import type { DrizzleDatabase } from '../../index.js';
import { changeSets, changeSetEntries, type ChangeSet } from '../../schema.js';
import type {
ChangeSetRepository,
CreateChangeSetData,
CreateChangeSetEntryData,
ChangeSetWithEntries,
} from '../change-set-repository.js';
export class DrizzleChangeSetRepository implements ChangeSetRepository {
constructor(private db: DrizzleDatabase) {}
async createWithEntries(data: CreateChangeSetData, entries: CreateChangeSetEntryData[]): Promise<ChangeSet> {
const id = nanoid();
const now = new Date();
// Use transaction for atomicity
return this.db.transaction(async (tx) => {
const [created] = await tx.insert(changeSets).values({
id,
agentId: data.agentId,
agentName: data.agentName,
initiativeId: data.initiativeId,
mode: data.mode,
summary: data.summary ?? null,
status: 'applied',
createdAt: now,
}).returning();
if (entries.length > 0) {
const entryRows = entries.map((e, i) => ({
id: nanoid(),
changeSetId: id,
entityType: e.entityType,
entityId: e.entityId,
action: e.action,
previousState: e.previousState ?? null,
newState: e.newState ?? null,
sortOrder: e.sortOrder ?? i,
createdAt: now,
}));
await tx.insert(changeSetEntries).values(entryRows);
}
return created;
});
}
async findById(id: string): Promise<ChangeSet | null> {
const result = await this.db
.select()
.from(changeSets)
.where(eq(changeSets.id, id))
.limit(1);
return result[0] ?? null;
}
async findByIdWithEntries(id: string): Promise<ChangeSetWithEntries | null> {
const cs = await this.findById(id);
if (!cs) return null;
const entries = await this.db
.select()
.from(changeSetEntries)
.where(eq(changeSetEntries.changeSetId, id))
.orderBy(asc(changeSetEntries.sortOrder));
return { ...cs, entries };
}
async findByInitiativeId(initiativeId: string): Promise<ChangeSet[]> {
return this.db
.select()
.from(changeSets)
.where(eq(changeSets.initiativeId, initiativeId))
.orderBy(desc(changeSets.createdAt));
}
async findByAgentId(agentId: string): Promise<ChangeSet[]> {
return this.db
.select()
.from(changeSets)
.where(eq(changeSets.agentId, agentId))
.orderBy(desc(changeSets.createdAt));
}
async markReverted(id: string): Promise<ChangeSet> {
const [updated] = await this.db
.update(changeSets)
.set({ status: 'reverted', revertedAt: new Date() })
.where(eq(changeSets.id, id))
.returning();
if (!updated) {
throw new Error(`ChangeSet not found: ${id}`);
}
return updated;
}
}

View File

@@ -0,0 +1,67 @@
/**
* Drizzle Conversation Repository Adapter
*
* Implements ConversationRepository interface using Drizzle ORM.
*/
import { eq, and, asc } from 'drizzle-orm';
import { nanoid } from 'nanoid';
import type { DrizzleDatabase } from '../../index.js';
import { conversations, type Conversation } from '../../schema.js';
import type { ConversationRepository, CreateConversationData } from '../conversation-repository.js';
export class DrizzleConversationRepository implements ConversationRepository {
constructor(private db: DrizzleDatabase) {}
async create(data: CreateConversationData): Promise<Conversation> {
const now = new Date();
const id = nanoid();
await this.db.insert(conversations).values({
id,
fromAgentId: data.fromAgentId,
toAgentId: data.toAgentId,
initiativeId: data.initiativeId ?? null,
phaseId: data.phaseId ?? null,
taskId: data.taskId ?? null,
question: data.question,
status: 'pending',
createdAt: now,
updatedAt: now,
});
return this.findById(id) as Promise<Conversation>;
}
async findById(id: string): Promise<Conversation | null> {
const rows = await this.db
.select()
.from(conversations)
.where(eq(conversations.id, id))
.limit(1);
return rows[0] ?? null;
}
async findPendingForAgent(toAgentId: string): Promise<Conversation[]> {
return this.db
.select()
.from(conversations)
.where(
and(
eq(conversations.toAgentId, toAgentId),
eq(conversations.status, 'pending' as 'pending' | 'answered'),
),
)
.orderBy(asc(conversations.createdAt));
}
async answer(id: string, answer: string): Promise<Conversation | null> {
await this.db
.update(conversations)
.set({
answer,
status: 'answered' as 'pending' | 'answered',
updatedAt: new Date(),
})
.where(eq(conversations.id, id));
return this.findById(id);
}
}

View File

@@ -0,0 +1,18 @@
/**
* Drizzle Repository Adapters
*
* Re-exports all Drizzle implementations of repository interfaces.
* These are the ADAPTERS for the repository PORTS.
*/
export { DrizzleInitiativeRepository } from './initiative.js';
export { DrizzlePhaseRepository } from './phase.js';
export { DrizzleTaskRepository } from './task.js';
export { DrizzleAgentRepository } from './agent.js';
export { DrizzleMessageRepository } from './message.js';
export { DrizzlePageRepository } from './page.js';
export { DrizzleProjectRepository } from './project.js';
export { DrizzleAccountRepository } from './account.js';
export { DrizzleChangeSetRepository } from './change-set.js';
export { DrizzleLogChunkRepository } from './log-chunk.js';
export { DrizzleConversationRepository } from './conversation.js';

View File

@@ -0,0 +1,150 @@
/**
* DrizzleInitiativeRepository Tests
*
* Tests for the Initiative repository adapter.
*/
import { describe, it, expect, beforeEach } from 'vitest';
import { DrizzleInitiativeRepository } from './initiative.js';
import { createTestDatabase } from './test-helpers.js';
import type { DrizzleDatabase } from '../../index.js';
describe('DrizzleInitiativeRepository', () => {
let db: DrizzleDatabase;
let repo: DrizzleInitiativeRepository;
beforeEach(() => {
db = createTestDatabase();
repo = new DrizzleInitiativeRepository(db);
});
describe('create', () => {
it('should create an initiative with generated id and timestamps', async () => {
const initiative = await repo.create({
name: 'Test Initiative',
});
expect(initiative.id).toBeDefined();
expect(initiative.id.length).toBeGreaterThan(0);
expect(initiative.name).toBe('Test Initiative');
expect(initiative.status).toBe('active');
expect(initiative.createdAt).toBeInstanceOf(Date);
expect(initiative.updatedAt).toBeInstanceOf(Date);
});
it('should use provided status', async () => {
const initiative = await repo.create({
name: 'Completed Initiative',
status: 'completed',
});
expect(initiative.status).toBe('completed');
});
});
describe('findById', () => {
it('should return null for non-existent initiative', async () => {
const result = await repo.findById('non-existent-id');
expect(result).toBeNull();
});
it('should find an existing initiative', async () => {
const created = await repo.create({
name: 'Find Me',
});
const found = await repo.findById(created.id);
expect(found).not.toBeNull();
expect(found!.id).toBe(created.id);
expect(found!.name).toBe('Find Me');
});
});
describe('findAll', () => {
it('should return empty array initially', async () => {
const all = await repo.findAll();
expect(all).toEqual([]);
});
it('should return all initiatives', async () => {
await repo.create({ name: 'Initiative 1' });
await repo.create({ name: 'Initiative 2' });
await repo.create({ name: 'Initiative 3' });
const all = await repo.findAll();
expect(all.length).toBe(3);
});
});
describe('update', () => {
it('should update fields and updatedAt', async () => {
const created = await repo.create({
name: 'Original Name',
status: 'active',
});
// Small delay to ensure updatedAt differs
await new Promise((resolve) => setTimeout(resolve, 10));
const updated = await repo.update(created.id, {
name: 'Updated Name',
status: 'completed',
});
expect(updated.name).toBe('Updated Name');
expect(updated.status).toBe('completed');
expect(updated.updatedAt.getTime()).toBeGreaterThanOrEqual(created.updatedAt.getTime());
});
it('should throw for non-existent initiative', async () => {
await expect(
repo.update('non-existent-id', { name: 'New Name' })
).rejects.toThrow('Initiative not found');
});
});
describe('delete', () => {
it('should delete an existing initiative', async () => {
const created = await repo.create({ name: 'To Delete' });
await repo.delete(created.id);
const found = await repo.findById(created.id);
expect(found).toBeNull();
});
it('should throw for non-existent initiative', async () => {
await expect(repo.delete('non-existent-id')).rejects.toThrow(
'Initiative not found'
);
});
});
describe('findByStatus', () => {
it('should return empty array for no matches', async () => {
await repo.create({ name: 'Active 1', status: 'active' });
const completed = await repo.findByStatus('completed');
expect(completed).toEqual([]);
});
it('should filter by status', async () => {
await repo.create({ name: 'Active 1', status: 'active' });
await repo.create({ name: 'Active 2', status: 'active' });
await repo.create({ name: 'Completed', status: 'completed' });
await repo.create({ name: 'Archived', status: 'archived' });
const active = await repo.findByStatus('active');
expect(active).toHaveLength(2);
expect(active.every((i) => i.status === 'active')).toBe(true);
const completed = await repo.findByStatus('completed');
expect(completed).toHaveLength(1);
expect(completed[0].name).toBe('Completed');
const archived = await repo.findByStatus('archived');
expect(archived).toHaveLength(1);
expect(archived[0].name).toBe('Archived');
});
});
});

View File

@@ -0,0 +1,87 @@
/**
* Drizzle Initiative Repository Adapter
*
* Implements InitiativeRepository interface using Drizzle ORM.
*/
import { eq } from 'drizzle-orm';
import { nanoid } from 'nanoid';
import type { DrizzleDatabase } from '../../index.js';
import { agents, initiatives, type Initiative } from '../../schema.js';
import type {
InitiativeRepository,
CreateInitiativeData,
UpdateInitiativeData,
} from '../initiative-repository.js';
/**
* Drizzle adapter for InitiativeRepository.
*
* Uses dependency injection for database instance,
* enabling isolated test databases.
*/
export class DrizzleInitiativeRepository implements InitiativeRepository {
constructor(private db: DrizzleDatabase) {}
async create(data: CreateInitiativeData): Promise<Initiative> {
const id = nanoid();
const now = new Date();
const [created] = await this.db.insert(initiatives).values({
id,
...data,
status: data.status ?? 'active',
createdAt: now,
updatedAt: now,
}).returning();
return created;
}
async findById(id: string): Promise<Initiative | null> {
const result = await this.db
.select()
.from(initiatives)
.where(eq(initiatives.id, id))
.limit(1);
return result[0] ?? null;
}
async findAll(): Promise<Initiative[]> {
return this.db.select().from(initiatives);
}
async findByStatus(status: 'active' | 'completed' | 'archived'): Promise<Initiative[]> {
return this.db
.select()
.from(initiatives)
.where(eq(initiatives.status, status));
}
async update(id: string, data: UpdateInitiativeData): Promise<Initiative> {
const [updated] = await this.db
.update(initiatives)
.set({ ...data, updatedAt: new Date() })
.where(eq(initiatives.id, id))
.returning();
if (!updated) {
throw new Error(`Initiative not found: ${id}`);
}
return updated;
}
async delete(id: string): Promise<void> {
// Detach agents before deleting — agents.initiative_id FK may lack ON DELETE SET NULL
// in databases that haven't applied migration 0025 yet.
await this.db.update(agents).set({ initiativeId: null }).where(eq(agents.initiativeId, id));
const [deleted] = await this.db.delete(initiatives).where(eq(initiatives.id, id)).returning();
if (!deleted) {
throw new Error(`Initiative not found: ${id}`);
}
}
}

View File

@@ -0,0 +1,58 @@
/**
* Drizzle Log Chunk Repository Adapter
*
* Implements LogChunkRepository interface using Drizzle ORM.
*/
import { eq, asc, max } from 'drizzle-orm';
import { nanoid } from 'nanoid';
import type { DrizzleDatabase } from '../../index.js';
import { agentLogChunks } from '../../schema.js';
import type { LogChunkRepository } from '../log-chunk-repository.js';
export class DrizzleLogChunkRepository implements LogChunkRepository {
constructor(private db: DrizzleDatabase) {}
async insertChunk(data: {
agentId: string;
agentName: string;
sessionNumber: number;
content: string;
}): Promise<void> {
await this.db.insert(agentLogChunks).values({
id: nanoid(),
agentId: data.agentId,
agentName: data.agentName,
sessionNumber: data.sessionNumber,
content: data.content,
createdAt: new Date(),
});
}
async findByAgentId(agentId: string): Promise<{ content: string; sessionNumber: number; createdAt: Date }[]> {
return this.db
.select({
content: agentLogChunks.content,
sessionNumber: agentLogChunks.sessionNumber,
createdAt: agentLogChunks.createdAt,
})
.from(agentLogChunks)
.where(eq(agentLogChunks.agentId, agentId))
.orderBy(asc(agentLogChunks.createdAt));
}
async deleteByAgentId(agentId: string): Promise<void> {
await this.db
.delete(agentLogChunks)
.where(eq(agentLogChunks.agentId, agentId));
}
async getSessionCount(agentId: string): Promise<number> {
const result = await this.db
.select({ maxSession: max(agentLogChunks.sessionNumber) })
.from(agentLogChunks)
.where(eq(agentLogChunks.agentId, agentId));
return result[0]?.maxSession ?? 0;
}
}

View File

@@ -0,0 +1,456 @@
/**
* DrizzleMessageRepository Tests
*
* Tests for the Message repository adapter.
*/
import { describe, it, expect, beforeEach } from 'vitest';
import { DrizzleMessageRepository } from './message.js';
import { DrizzleAgentRepository } from './agent.js';
import { DrizzleTaskRepository } from './task.js';
import { DrizzlePhaseRepository } from './phase.js';
import { DrizzleInitiativeRepository } from './initiative.js';
import { createTestDatabase } from './test-helpers.js';
import type { DrizzleDatabase } from '../../index.js';
describe('DrizzleMessageRepository', () => {
let db: DrizzleDatabase;
let messageRepo: DrizzleMessageRepository;
let agentRepo: DrizzleAgentRepository;
let testAgentId: string;
beforeEach(async () => {
db = createTestDatabase();
messageRepo = new DrizzleMessageRepository(db);
agentRepo = new DrizzleAgentRepository(db);
// Create required hierarchy for agent FK
const taskRepo = new DrizzleTaskRepository(db);
const phaseRepo = new DrizzlePhaseRepository(db);
const initiativeRepo = new DrizzleInitiativeRepository(db);
const initiative = await initiativeRepo.create({
name: 'Test Initiative',
});
const phase = await phaseRepo.create({
initiativeId: initiative.id,
name: 'Test Phase',
});
const task = await taskRepo.create({
phaseId: phase.id,
name: 'Test Task',
order: 1,
});
// Create a test agent
const agent = await agentRepo.create({
name: 'test-agent',
worktreeId: 'worktree-123',
taskId: task.id,
});
testAgentId = agent.id;
});
describe('create', () => {
it('should create agent→user message (question)', async () => {
const message = await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
type: 'question',
content: 'Do you want to proceed with deployment?',
requiresResponse: true,
});
expect(message.id).toBeDefined();
expect(message.id.length).toBeGreaterThan(0);
expect(message.senderType).toBe('agent');
expect(message.senderId).toBe(testAgentId);
expect(message.recipientType).toBe('user');
expect(message.recipientId).toBeNull();
expect(message.type).toBe('question');
expect(message.content).toBe('Do you want to proceed with deployment?');
expect(message.requiresResponse).toBe(true);
expect(message.status).toBe('pending');
expect(message.parentMessageId).toBeNull();
expect(message.createdAt).toBeInstanceOf(Date);
expect(message.updatedAt).toBeInstanceOf(Date);
});
it('should create agent→user message (notification, requiresResponse=false)', async () => {
const message = await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
type: 'info',
content: 'Build completed successfully.',
requiresResponse: false,
});
expect(message.type).toBe('info');
expect(message.requiresResponse).toBe(false);
expect(message.status).toBe('pending');
});
it('should create user→agent response', async () => {
// First create the question
const question = await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
type: 'question',
content: 'Which database?',
requiresResponse: true,
});
// Then create user response
const response = await messageRepo.create({
senderType: 'user',
recipientType: 'agent',
recipientId: testAgentId,
type: 'response',
content: 'Use PostgreSQL',
parentMessageId: question.id,
});
expect(response.senderType).toBe('user');
expect(response.senderId).toBeNull();
expect(response.recipientType).toBe('agent');
expect(response.recipientId).toBe(testAgentId);
expect(response.parentMessageId).toBe(question.id);
});
it('should default type to info', async () => {
const message = await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
content: 'Status update',
});
expect(message.type).toBe('info');
});
});
describe('findById', () => {
it('should return null for non-existent message', async () => {
const result = await messageRepo.findById('non-existent-id');
expect(result).toBeNull();
});
it('should find an existing message', async () => {
const created = await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
content: 'Test message',
});
const found = await messageRepo.findById(created.id);
expect(found).not.toBeNull();
expect(found!.id).toBe(created.id);
expect(found!.content).toBe('Test message');
});
});
describe('findBySender', () => {
it('should find messages by agent sender', async () => {
await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
content: 'Message 1',
});
await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
content: 'Message 2',
});
const messages = await messageRepo.findBySender('agent', testAgentId);
expect(messages.length).toBe(2);
});
it('should find messages by user sender', async () => {
await messageRepo.create({
senderType: 'user',
recipientType: 'agent',
recipientId: testAgentId,
content: 'User message 1',
});
await messageRepo.create({
senderType: 'user',
recipientType: 'agent',
recipientId: testAgentId,
content: 'User message 2',
});
const messages = await messageRepo.findBySender('user');
expect(messages.length).toBe(2);
});
it('should return empty array when no messages from sender', async () => {
const messages = await messageRepo.findBySender('agent', 'nonexistent-id');
expect(messages).toEqual([]);
});
});
describe('findByRecipient', () => {
it('should find messages by user recipient', async () => {
await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
content: 'For user 1',
});
await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
content: 'For user 2',
});
const messages = await messageRepo.findByRecipient('user');
expect(messages.length).toBe(2);
});
it('should find messages by agent recipient', async () => {
await messageRepo.create({
senderType: 'user',
recipientType: 'agent',
recipientId: testAgentId,
content: 'For agent',
});
const messages = await messageRepo.findByRecipient('agent', testAgentId);
expect(messages.length).toBe(1);
});
it('should return empty array when no messages for recipient', async () => {
const messages = await messageRepo.findByRecipient('agent', 'nonexistent-id');
expect(messages).toEqual([]);
});
});
describe('findPendingForUser', () => {
it('should return only user-targeted pending messages', async () => {
// Create pending message for user
await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
content: 'Pending for user',
status: 'pending',
});
// Create read message for user (should not be returned)
const readMessage = await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
content: 'Already read',
});
await messageRepo.update(readMessage.id, { status: 'read' });
// Create pending message for agent (should not be returned)
await messageRepo.create({
senderType: 'user',
recipientType: 'agent',
recipientId: testAgentId,
content: 'For agent not user',
});
const pending = await messageRepo.findPendingForUser();
expect(pending.length).toBe(1);
expect(pending[0].content).toBe('Pending for user');
});
it('should return empty array when no pending messages for user', async () => {
const pending = await messageRepo.findPendingForUser();
expect(pending).toEqual([]);
});
});
describe('findRequiringResponse', () => {
it('should return only messages needing response', async () => {
// Create message requiring response
await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
type: 'question',
content: 'Requires answer',
requiresResponse: true,
});
// Create message not requiring response
await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
type: 'info',
content: 'Just info',
requiresResponse: false,
});
// Create message that required response but was responded
const responded = await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
type: 'question',
content: 'Already answered',
requiresResponse: true,
});
await messageRepo.update(responded.id, { status: 'responded' });
const requiring = await messageRepo.findRequiringResponse();
expect(requiring.length).toBe(1);
expect(requiring[0].content).toBe('Requires answer');
});
it('should return empty array when no messages require response', async () => {
const requiring = await messageRepo.findRequiringResponse();
expect(requiring).toEqual([]);
});
});
describe('findReplies (message threading)', () => {
it('should find all replies to a parent message', async () => {
// Create original question
const question = await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
type: 'question',
content: 'Original question',
requiresResponse: true,
});
// Create two replies
await messageRepo.create({
senderType: 'user',
recipientType: 'agent',
recipientId: testAgentId,
type: 'response',
content: 'First reply',
parentMessageId: question.id,
});
await messageRepo.create({
senderType: 'user',
recipientType: 'agent',
recipientId: testAgentId,
type: 'response',
content: 'Second reply',
parentMessageId: question.id,
});
// Create unrelated message (should not be in replies)
await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
content: 'Unrelated message',
});
const replies = await messageRepo.findReplies(question.id);
expect(replies.length).toBe(2);
expect(replies.every((r) => r.parentMessageId === question.id)).toBe(true);
});
it('should return empty array when message has no replies', async () => {
const message = await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
content: 'No replies',
});
const replies = await messageRepo.findReplies(message.id);
expect(replies).toEqual([]);
});
});
describe('update status flow', () => {
it('should update status: pending → read → responded', async () => {
const message = await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
type: 'question',
content: 'Status flow test',
requiresResponse: true,
});
expect(message.status).toBe('pending');
// Wait to ensure different timestamps
await new Promise((resolve) => setTimeout(resolve, 10));
// Update to read
const readMessage = await messageRepo.update(message.id, { status: 'read' });
expect(readMessage.status).toBe('read');
expect(readMessage.updatedAt.getTime()).toBeGreaterThanOrEqual(message.updatedAt.getTime());
// Wait again
await new Promise((resolve) => setTimeout(resolve, 10));
// Update to responded
const respondedMessage = await messageRepo.update(readMessage.id, {
status: 'responded',
});
expect(respondedMessage.status).toBe('responded');
expect(respondedMessage.updatedAt.getTime()).toBeGreaterThanOrEqual(
readMessage.updatedAt.getTime()
);
});
});
describe('update', () => {
it('should throw for non-existent message', async () => {
await expect(
messageRepo.update('non-existent-id', { status: 'read' })
).rejects.toThrow('Message not found');
});
it('should update content and updatedAt', async () => {
const created = await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
content: 'Original content',
});
await new Promise((resolve) => setTimeout(resolve, 10));
const updated = await messageRepo.update(created.id, {
content: 'Updated content',
});
expect(updated.content).toBe('Updated content');
expect(updated.updatedAt.getTime()).toBeGreaterThanOrEqual(created.updatedAt.getTime());
});
});
describe('delete', () => {
it('should delete an existing message', async () => {
const created = await messageRepo.create({
senderType: 'agent',
senderId: testAgentId,
recipientType: 'user',
content: 'To delete',
});
await messageRepo.delete(created.id);
const found = await messageRepo.findById(created.id);
expect(found).toBeNull();
});
it('should throw for non-existent message', async () => {
await expect(messageRepo.delete('non-existent-id')).rejects.toThrow(
'Message not found'
);
});
});
});

View File

@@ -0,0 +1,138 @@
/**
* Drizzle Message Repository Adapter
*
* Implements MessageRepository interface using Drizzle ORM.
*/
import { eq, and, desc } from 'drizzle-orm';
import { nanoid } from 'nanoid';
import type { DrizzleDatabase } from '../../index.js';
import { messages, type Message } from '../../schema.js';
import type {
MessageRepository,
CreateMessageData,
UpdateMessageData,
MessageParticipantType,
} from '../message-repository.js';
/**
* Drizzle adapter for MessageRepository.
*
* Uses dependency injection for database instance,
* enabling isolated test databases.
*/
export class DrizzleMessageRepository implements MessageRepository {
constructor(private db: DrizzleDatabase) {}
async create(data: CreateMessageData): Promise<Message> {
const id = nanoid();
const now = new Date();
const [created] = await this.db.insert(messages).values({
id,
senderType: data.senderType,
senderId: data.senderId ?? null,
recipientType: data.recipientType,
recipientId: data.recipientId ?? null,
type: data.type ?? 'info',
content: data.content,
requiresResponse: data.requiresResponse ?? false,
status: data.status ?? 'pending',
parentMessageId: data.parentMessageId ?? null,
createdAt: now,
updatedAt: now,
}).returning();
return created;
}
async findById(id: string): Promise<Message | null> {
const result = await this.db
.select()
.from(messages)
.where(eq(messages.id, id))
.limit(1);
return result[0] ?? null;
}
async findBySender(type: MessageParticipantType, id?: string): Promise<Message[]> {
if (id) {
return this.db
.select()
.from(messages)
.where(and(eq(messages.senderType, type), eq(messages.senderId, id)))
.orderBy(desc(messages.createdAt));
}
// For user sender (no id), find where senderType='user' and senderId is null
return this.db
.select()
.from(messages)
.where(eq(messages.senderType, type))
.orderBy(desc(messages.createdAt));
}
async findByRecipient(type: MessageParticipantType, id?: string): Promise<Message[]> {
if (id) {
return this.db
.select()
.from(messages)
.where(and(eq(messages.recipientType, type), eq(messages.recipientId, id)))
.orderBy(desc(messages.createdAt));
}
// For user recipient (no id), find where recipientType='user' and recipientId is null
return this.db
.select()
.from(messages)
.where(eq(messages.recipientType, type))
.orderBy(desc(messages.createdAt));
}
async findPendingForUser(): Promise<Message[]> {
return this.db
.select()
.from(messages)
.where(and(eq(messages.recipientType, 'user'), eq(messages.status, 'pending')))
.orderBy(desc(messages.createdAt));
}
async findRequiringResponse(): Promise<Message[]> {
return this.db
.select()
.from(messages)
.where(and(eq(messages.requiresResponse, true), eq(messages.status, 'pending')))
.orderBy(desc(messages.createdAt));
}
async findReplies(parentMessageId: string): Promise<Message[]> {
return this.db
.select()
.from(messages)
.where(eq(messages.parentMessageId, parentMessageId))
.orderBy(desc(messages.createdAt));
}
async update(id: string, data: UpdateMessageData): Promise<Message> {
const [updated] = await this.db
.update(messages)
.set({ ...data, updatedAt: new Date() })
.where(eq(messages.id, id))
.returning();
if (!updated) {
throw new Error(`Message not found: ${id}`);
}
return updated;
}
async delete(id: string): Promise<void> {
const [deleted] = await this.db.delete(messages).where(eq(messages.id, id)).returning();
if (!deleted) {
throw new Error(`Message not found: ${id}`);
}
}
}

View File

@@ -0,0 +1,117 @@
/**
* Drizzle Page Repository Adapter
*
* Implements PageRepository interface using Drizzle ORM.
*/
import { eq, isNull, and, asc, inArray } from 'drizzle-orm';
import { nanoid } from 'nanoid';
import type { DrizzleDatabase } from '../../index.js';
import { pages, type Page } from '../../schema.js';
import type {
PageRepository,
CreatePageData,
UpdatePageData,
} from '../page-repository.js';
export class DrizzlePageRepository implements PageRepository {
constructor(private db: DrizzleDatabase) {}
async create(data: CreatePageData): Promise<Page> {
const id = nanoid();
const now = new Date();
const [created] = await this.db.insert(pages).values({
id,
...data,
createdAt: now,
updatedAt: now,
}).returning();
return created;
}
async findById(id: string): Promise<Page | null> {
const result = await this.db
.select()
.from(pages)
.where(eq(pages.id, id))
.limit(1);
return result[0] ?? null;
}
async findByIds(ids: string[]): Promise<Page[]> {
if (ids.length === 0) return [];
return this.db
.select()
.from(pages)
.where(inArray(pages.id, ids));
}
async findByInitiativeId(initiativeId: string): Promise<Page[]> {
return this.db
.select()
.from(pages)
.where(eq(pages.initiativeId, initiativeId))
.orderBy(asc(pages.sortOrder));
}
async findByParentPageId(parentPageId: string): Promise<Page[]> {
return this.db
.select()
.from(pages)
.where(eq(pages.parentPageId, parentPageId))
.orderBy(asc(pages.sortOrder));
}
async findRootPage(initiativeId: string): Promise<Page | null> {
const result = await this.db
.select()
.from(pages)
.where(
and(
eq(pages.initiativeId, initiativeId),
isNull(pages.parentPageId),
),
)
.limit(1);
return result[0] ?? null;
}
async getOrCreateRootPage(initiativeId: string): Promise<Page> {
const existing = await this.findRootPage(initiativeId);
if (existing) return existing;
return this.create({
initiativeId,
parentPageId: null,
title: 'Untitled',
content: null,
sortOrder: 0,
});
}
async update(id: string, data: UpdatePageData): Promise<Page> {
const [updated] = await this.db
.update(pages)
.set({ ...data, updatedAt: new Date() })
.where(eq(pages.id, id))
.returning();
if (!updated) {
throw new Error(`Page not found: ${id}`);
}
return updated;
}
async delete(id: string): Promise<void> {
const [deleted] = await this.db.delete(pages).where(eq(pages.id, id)).returning();
if (!deleted) {
throw new Error(`Page not found: ${id}`);
}
}
}

View File

@@ -0,0 +1,408 @@
/**
* DrizzlePhaseRepository Tests
*
* Tests for the Phase repository adapter.
*/
import { describe, it, expect, beforeEach } from 'vitest';
import { DrizzlePhaseRepository } from './phase.js';
import { DrizzleInitiativeRepository } from './initiative.js';
import { createTestDatabase } from './test-helpers.js';
import type { DrizzleDatabase } from '../../index.js';
describe('DrizzlePhaseRepository', () => {
let db: DrizzleDatabase;
let phaseRepo: DrizzlePhaseRepository;
let initiativeRepo: DrizzleInitiativeRepository;
let testInitiativeId: string;
beforeEach(async () => {
db = createTestDatabase();
phaseRepo = new DrizzlePhaseRepository(db);
initiativeRepo = new DrizzleInitiativeRepository(db);
// Create a test initiative for FK constraint
const initiative = await initiativeRepo.create({
name: 'Test Initiative',
});
testInitiativeId = initiative.id;
});
describe('create', () => {
it('should create a phase with generated id and timestamps', async () => {
const phase = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Test Phase',
});
expect(phase.id).toBeDefined();
expect(phase.id.length).toBeGreaterThan(0);
expect(phase.initiativeId).toBe(testInitiativeId);
expect(phase.name).toBe('Test Phase');
expect(phase.status).toBe('pending');
expect(phase.createdAt).toBeInstanceOf(Date);
expect(phase.updatedAt).toBeInstanceOf(Date);
});
it('should throw for invalid initiativeId (FK constraint)', async () => {
await expect(
phaseRepo.create({
initiativeId: 'invalid-initiative-id',
name: 'Invalid Phase',
})
).rejects.toThrow();
});
});
describe('findById', () => {
it('should return null for non-existent phase', async () => {
const result = await phaseRepo.findById('non-existent-id');
expect(result).toBeNull();
});
it('should find an existing phase', async () => {
const created = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Find Me',
});
const found = await phaseRepo.findById(created.id);
expect(found).not.toBeNull();
expect(found!.id).toBe(created.id);
expect(found!.name).toBe('Find Me');
});
});
describe('findByInitiativeId', () => {
it('should return empty array for initiative with no phases', async () => {
const phases = await phaseRepo.findByInitiativeId(testInitiativeId);
expect(phases).toEqual([]);
});
it('should return only matching phases ordered by createdAt', async () => {
await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase A',
});
await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase B',
});
await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase C',
});
// Create another initiative with phases
const otherInitiative = await initiativeRepo.create({
name: 'Other Initiative',
});
await phaseRepo.create({
initiativeId: otherInitiative.id,
name: 'Other Phase',
});
const phases = await phaseRepo.findByInitiativeId(testInitiativeId);
expect(phases.length).toBe(3);
expect(phases[0].name).toBe('Phase A');
expect(phases[1].name).toBe('Phase B');
expect(phases[2].name).toBe('Phase C');
});
});
describe('update', () => {
it('should update fields and updatedAt', async () => {
const created = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Original Name',
status: 'pending',
});
await new Promise((resolve) => setTimeout(resolve, 10));
const updated = await phaseRepo.update(created.id, {
name: 'Updated Name',
status: 'in_progress',
});
expect(updated.name).toBe('Updated Name');
expect(updated.status).toBe('in_progress');
expect(updated.updatedAt.getTime()).toBeGreaterThanOrEqual(created.updatedAt.getTime());
});
it('should throw for non-existent phase', async () => {
await expect(
phaseRepo.update('non-existent-id', { name: 'New Name' })
).rejects.toThrow('Phase not found');
});
});
describe('delete', () => {
it('should delete an existing phase', async () => {
const created = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'To Delete',
});
await phaseRepo.delete(created.id);
const found = await phaseRepo.findById(created.id);
expect(found).toBeNull();
});
it('should throw for non-existent phase', async () => {
await expect(phaseRepo.delete('non-existent-id')).rejects.toThrow(
'Phase not found'
);
});
});
// ===========================================================================
// Phase Dependency Tests
// ===========================================================================
describe('createDependency', () => {
it('should create dependency between two phases', async () => {
const phase1 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 1',
});
const phase2 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 2',
});
await phaseRepo.createDependency(phase2.id, phase1.id);
const deps = await phaseRepo.getDependencies(phase2.id);
expect(deps).toContain(phase1.id);
});
it('should throw if phase does not exist', async () => {
const phase1 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 1',
});
// Creating dependency with non-existent phase should throw (FK constraint)
await expect(
phaseRepo.createDependency('non-existent-phase', phase1.id)
).rejects.toThrow();
});
it('should allow multiple dependencies for same phase', async () => {
const phase1 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 1',
});
const phase2 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 2',
});
const phase3 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 3',
});
// Phase 3 depends on both Phase 1 and Phase 2
await phaseRepo.createDependency(phase3.id, phase1.id);
await phaseRepo.createDependency(phase3.id, phase2.id);
const deps = await phaseRepo.getDependencies(phase3.id);
expect(deps.length).toBe(2);
expect(deps).toContain(phase1.id);
expect(deps).toContain(phase2.id);
});
});
describe('getDependencies', () => {
it('should return empty array for phase with no dependencies', async () => {
const phase = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 1',
});
const deps = await phaseRepo.getDependencies(phase.id);
expect(deps).toEqual([]);
});
it('should return dependency IDs for phase with dependencies', async () => {
const phase1 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 1',
});
const phase2 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 2',
});
const phase3 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 3',
});
// Phase 3 depends on Phase 1 and Phase 2
await phaseRepo.createDependency(phase3.id, phase1.id);
await phaseRepo.createDependency(phase3.id, phase2.id);
const deps = await phaseRepo.getDependencies(phase3.id);
expect(deps.length).toBe(2);
expect(deps).toContain(phase1.id);
expect(deps).toContain(phase2.id);
});
it('should return only direct dependencies (not transitive)', async () => {
// Phase 1 -> Phase 2 -> Phase 3 (linear chain)
const phase1 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 1',
});
const phase2 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 2',
});
const phase3 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 3',
});
// Phase 2 depends on Phase 1
await phaseRepo.createDependency(phase2.id, phase1.id);
// Phase 3 depends on Phase 2
await phaseRepo.createDependency(phase3.id, phase2.id);
// Phase 3's dependencies should only include Phase 2, not Phase 1
const depsPhase3 = await phaseRepo.getDependencies(phase3.id);
expect(depsPhase3.length).toBe(1);
expect(depsPhase3).toContain(phase2.id);
expect(depsPhase3).not.toContain(phase1.id);
});
});
describe('getDependents', () => {
it('should return empty array for phase with no dependents', async () => {
const phase = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 1',
});
const dependents = await phaseRepo.getDependents(phase.id);
expect(dependents).toEqual([]);
});
it('should return dependent phase IDs', async () => {
const phase1 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 1',
});
const phase2 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 2',
});
const phase3 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 3',
});
// Phase 2 and Phase 3 both depend on Phase 1
await phaseRepo.createDependency(phase2.id, phase1.id);
await phaseRepo.createDependency(phase3.id, phase1.id);
const dependents = await phaseRepo.getDependents(phase1.id);
expect(dependents.length).toBe(2);
expect(dependents).toContain(phase2.id);
expect(dependents).toContain(phase3.id);
});
it('should return only direct dependents', async () => {
// Phase 1 -> Phase 2 -> Phase 3 (linear chain)
const phase1 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 1',
});
const phase2 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 2',
});
const phase3 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 3',
});
// Phase 2 depends on Phase 1
await phaseRepo.createDependency(phase2.id, phase1.id);
// Phase 3 depends on Phase 2
await phaseRepo.createDependency(phase3.id, phase2.id);
// Phase 1's dependents should only include Phase 2, not Phase 3
const dependentsPhase1 = await phaseRepo.getDependents(phase1.id);
expect(dependentsPhase1.length).toBe(1);
expect(dependentsPhase1).toContain(phase2.id);
expect(dependentsPhase1).not.toContain(phase3.id);
});
});
describe('findDependenciesByInitiativeId', () => {
it('should return empty array for initiative with no dependencies', async () => {
await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 1',
});
const deps = await phaseRepo.findDependenciesByInitiativeId(testInitiativeId);
expect(deps).toEqual([]);
});
it('should return all dependency edges for an initiative', async () => {
const phase1 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 1',
});
const phase2 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 2',
});
const phase3 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 3',
});
await phaseRepo.createDependency(phase2.id, phase1.id);
await phaseRepo.createDependency(phase3.id, phase1.id);
await phaseRepo.createDependency(phase3.id, phase2.id);
const deps = await phaseRepo.findDependenciesByInitiativeId(testInitiativeId);
expect(deps.length).toBe(3);
expect(deps).toContainEqual({ phaseId: phase2.id, dependsOnPhaseId: phase1.id });
expect(deps).toContainEqual({ phaseId: phase3.id, dependsOnPhaseId: phase1.id });
expect(deps).toContainEqual({ phaseId: phase3.id, dependsOnPhaseId: phase2.id });
});
it('should not return dependencies from other initiatives', async () => {
const phase1 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 1',
});
const phase2 = await phaseRepo.create({
initiativeId: testInitiativeId,
name: 'Phase 2',
});
await phaseRepo.createDependency(phase2.id, phase1.id);
const otherInitiative = await initiativeRepo.create({ name: 'Other' });
const otherPhase1 = await phaseRepo.create({
initiativeId: otherInitiative.id,
name: 'Other Phase 1',
});
const otherPhase2 = await phaseRepo.create({
initiativeId: otherInitiative.id,
name: 'Other Phase 2',
});
await phaseRepo.createDependency(otherPhase2.id, otherPhase1.id);
const deps = await phaseRepo.findDependenciesByInitiativeId(testInitiativeId);
expect(deps.length).toBe(1);
expect(deps[0].phaseId).toBe(phase2.id);
expect(deps[0].dependsOnPhaseId).toBe(phase1.id);
});
});
});

View File

@@ -0,0 +1,130 @@
/**
* Drizzle Phase Repository Adapter
*
* Implements PhaseRepository interface using Drizzle ORM.
*/
import { eq, asc, and } from 'drizzle-orm';
import { nanoid } from 'nanoid';
import type { DrizzleDatabase } from '../../index.js';
import { phases, phaseDependencies, type Phase } from '../../schema.js';
import type {
PhaseRepository,
CreatePhaseData,
UpdatePhaseData,
} from '../phase-repository.js';
/**
* Drizzle adapter for PhaseRepository.
*
* Uses dependency injection for database instance,
* enabling isolated test databases.
*/
export class DrizzlePhaseRepository implements PhaseRepository {
constructor(private db: DrizzleDatabase) {}
async create(data: CreatePhaseData): Promise<Phase> {
const { id: providedId, ...rest } = data;
const id = providedId ?? nanoid();
const now = new Date();
const [created] = await this.db.insert(phases).values({
id,
...rest,
status: data.status ?? 'pending',
createdAt: now,
updatedAt: now,
}).returning();
return created;
}
async findById(id: string): Promise<Phase | null> {
const result = await this.db
.select()
.from(phases)
.where(eq(phases.id, id))
.limit(1);
return result[0] ?? null;
}
async findByInitiativeId(initiativeId: string): Promise<Phase[]> {
return this.db
.select()
.from(phases)
.where(eq(phases.initiativeId, initiativeId))
.orderBy(asc(phases.createdAt));
}
async findDependenciesByInitiativeId(initiativeId: string): Promise<Array<{ phaseId: string; dependsOnPhaseId: string }>> {
return this.db
.select({ phaseId: phaseDependencies.phaseId, dependsOnPhaseId: phaseDependencies.dependsOnPhaseId })
.from(phaseDependencies)
.innerJoin(phases, eq(phaseDependencies.phaseId, phases.id))
.where(eq(phases.initiativeId, initiativeId));
}
async update(id: string, data: UpdatePhaseData): Promise<Phase> {
const [updated] = await this.db
.update(phases)
.set({ ...data, updatedAt: new Date() })
.where(eq(phases.id, id))
.returning();
if (!updated) {
throw new Error(`Phase not found: ${id}`);
}
return updated;
}
async delete(id: string): Promise<void> {
const [deleted] = await this.db.delete(phases).where(eq(phases.id, id)).returning();
if (!deleted) {
throw new Error(`Phase not found: ${id}`);
}
}
async createDependency(phaseId: string, dependsOnPhaseId: string): Promise<void> {
const id = nanoid();
const now = new Date();
await this.db.insert(phaseDependencies).values({
id,
phaseId,
dependsOnPhaseId,
createdAt: now,
});
}
async getDependencies(phaseId: string): Promise<string[]> {
const result = await this.db
.select({ dependsOnPhaseId: phaseDependencies.dependsOnPhaseId })
.from(phaseDependencies)
.where(eq(phaseDependencies.phaseId, phaseId));
return result.map((row) => row.dependsOnPhaseId);
}
async getDependents(phaseId: string): Promise<string[]> {
const result = await this.db
.select({ phaseId: phaseDependencies.phaseId })
.from(phaseDependencies)
.where(eq(phaseDependencies.dependsOnPhaseId, phaseId));
return result.map((row) => row.phaseId);
}
async removeDependency(phaseId: string, dependsOnPhaseId: string): Promise<void> {
await this.db
.delete(phaseDependencies)
.where(
and(
eq(phaseDependencies.phaseId, phaseId),
eq(phaseDependencies.dependsOnPhaseId, dependsOnPhaseId),
),
);
}
}

View File

@@ -0,0 +1,154 @@
/**
* Drizzle Project Repository Adapter
*
* Implements ProjectRepository interface using Drizzle ORM.
*/
import { eq, and, inArray } from 'drizzle-orm';
import { nanoid } from 'nanoid';
import type { DrizzleDatabase } from '../../index.js';
import { projects, initiativeProjects, type Project } from '../../schema.js';
import type {
ProjectRepository,
CreateProjectData,
UpdateProjectData,
} from '../project-repository.js';
export class DrizzleProjectRepository implements ProjectRepository {
constructor(private db: DrizzleDatabase) {}
async create(data: CreateProjectData): Promise<Project> {
const id = nanoid();
const now = new Date();
const [created] = await this.db.insert(projects).values({
id,
...data,
createdAt: now,
updatedAt: now,
}).returning();
return created;
}
async findById(id: string): Promise<Project | null> {
const result = await this.db
.select()
.from(projects)
.where(eq(projects.id, id))
.limit(1);
return result[0] ?? null;
}
async findByName(name: string): Promise<Project | null> {
const result = await this.db
.select()
.from(projects)
.where(eq(projects.name, name))
.limit(1);
return result[0] ?? null;
}
async findAll(): Promise<Project[]> {
return this.db.select().from(projects);
}
async update(id: string, data: UpdateProjectData): Promise<Project> {
const [updated] = await this.db
.update(projects)
.set({ ...data, updatedAt: new Date() })
.where(eq(projects.id, id))
.returning();
if (!updated) {
throw new Error(`Project not found: ${id}`);
}
return updated;
}
async delete(id: string): Promise<void> {
const [deleted] = await this.db.delete(projects).where(eq(projects.id, id)).returning();
if (!deleted) {
throw new Error(`Project not found: ${id}`);
}
}
// Junction ops
async addProjectToInitiative(initiativeId: string, projectId: string): Promise<void> {
const id = nanoid();
const now = new Date();
await this.db.insert(initiativeProjects).values({
id,
initiativeId,
projectId,
createdAt: now,
});
}
async removeProjectFromInitiative(initiativeId: string, projectId: string): Promise<void> {
await this.db
.delete(initiativeProjects)
.where(
and(
eq(initiativeProjects.initiativeId, initiativeId),
eq(initiativeProjects.projectId, projectId),
),
);
}
async findProjectsByInitiativeId(initiativeId: string): Promise<Project[]> {
const rows = await this.db
.select({ project: projects })
.from(initiativeProjects)
.innerJoin(projects, eq(initiativeProjects.projectId, projects.id))
.where(eq(initiativeProjects.initiativeId, initiativeId));
return rows.map((r) => r.project);
}
async setInitiativeProjects(initiativeId: string, projectIds: string[]): Promise<void> {
// Get current associations
const currentRows = await this.db
.select({ projectId: initiativeProjects.projectId })
.from(initiativeProjects)
.where(eq(initiativeProjects.initiativeId, initiativeId));
const currentIds = new Set(currentRows.map((r) => r.projectId));
const desiredIds = new Set(projectIds);
// Compute diff
const toRemove = [...currentIds].filter((id) => !desiredIds.has(id));
const toAdd = [...desiredIds].filter((id) => !currentIds.has(id));
// Remove
if (toRemove.length > 0) {
await this.db
.delete(initiativeProjects)
.where(
and(
eq(initiativeProjects.initiativeId, initiativeId),
inArray(initiativeProjects.projectId, toRemove),
),
);
}
// Add
if (toAdd.length > 0) {
const now = new Date();
await this.db.insert(initiativeProjects).values(
toAdd.map((projectId) => ({
id: nanoid(),
initiativeId,
projectId,
createdAt: now,
})),
);
}
}
}

View File

@@ -0,0 +1,199 @@
/**
* DrizzleTaskRepository Tests
*
* Tests for the Task repository adapter.
*/
import { describe, it, expect, beforeEach } from 'vitest';
import { DrizzleTaskRepository } from './task.js';
import { DrizzlePhaseRepository } from './phase.js';
import { DrizzleInitiativeRepository } from './initiative.js';
import { createTestDatabase } from './test-helpers.js';
import type { DrizzleDatabase } from '../../index.js';
describe('DrizzleTaskRepository', () => {
let db: DrizzleDatabase;
let taskRepo: DrizzleTaskRepository;
let phaseRepo: DrizzlePhaseRepository;
let initiativeRepo: DrizzleInitiativeRepository;
let testPhaseId: string;
beforeEach(async () => {
db = createTestDatabase();
taskRepo = new DrizzleTaskRepository(db);
phaseRepo = new DrizzlePhaseRepository(db);
initiativeRepo = new DrizzleInitiativeRepository(db);
// Create full hierarchy for FK constraint
const initiative = await initiativeRepo.create({
name: 'Test Initiative',
});
const phase = await phaseRepo.create({
initiativeId: initiative.id,
name: 'Test Phase',
});
testPhaseId = phase.id;
});
describe('create', () => {
it('should create a task with generated id and timestamps', async () => {
const task = await taskRepo.create({
phaseId: testPhaseId,
name: 'Test Task',
description: 'A test task',
order: 1,
});
expect(task.id).toBeDefined();
expect(task.id.length).toBeGreaterThan(0);
expect(task.phaseId).toBe(testPhaseId);
expect(task.name).toBe('Test Task');
expect(task.type).toBe('auto');
expect(task.priority).toBe('medium');
expect(task.status).toBe('pending');
expect(task.order).toBe(1);
expect(task.createdAt).toBeInstanceOf(Date);
expect(task.updatedAt).toBeInstanceOf(Date);
});
it('should throw for invalid phaseId (FK constraint)', async () => {
await expect(
taskRepo.create({
phaseId: 'invalid-phase-id',
name: 'Invalid Task',
order: 1,
})
).rejects.toThrow();
});
it('should accept custom type and priority', async () => {
const task = await taskRepo.create({
phaseId: testPhaseId,
name: 'Checkpoint Task',
type: 'checkpoint:human-verify',
priority: 'high',
order: 1,
});
expect(task.type).toBe('checkpoint:human-verify');
expect(task.priority).toBe('high');
});
});
describe('findById', () => {
it('should return null for non-existent task', async () => {
const result = await taskRepo.findById('non-existent-id');
expect(result).toBeNull();
});
it('should find an existing task', async () => {
const created = await taskRepo.create({
phaseId: testPhaseId,
name: 'Find Me',
order: 1,
});
const found = await taskRepo.findById(created.id);
expect(found).not.toBeNull();
expect(found!.id).toBe(created.id);
expect(found!.name).toBe('Find Me');
});
});
describe('findByPhaseId', () => {
it('should return empty array for phase with no tasks', async () => {
const tasks = await taskRepo.findByPhaseId(testPhaseId);
expect(tasks).toEqual([]);
});
it('should return only matching tasks ordered by order field', async () => {
// Create tasks out of order
await taskRepo.create({
phaseId: testPhaseId,
name: 'Task 3',
order: 3,
});
await taskRepo.create({
phaseId: testPhaseId,
name: 'Task 1',
order: 1,
});
await taskRepo.create({
phaseId: testPhaseId,
name: 'Task 2',
order: 2,
});
const tasks = await taskRepo.findByPhaseId(testPhaseId);
expect(tasks.length).toBe(3);
expect(tasks[0].name).toBe('Task 1');
expect(tasks[1].name).toBe('Task 2');
expect(tasks[2].name).toBe('Task 3');
});
});
describe('update', () => {
it('should update status correctly', async () => {
const created = await taskRepo.create({
phaseId: testPhaseId,
name: 'Status Test',
status: 'pending',
order: 1,
});
const updated = await taskRepo.update(created.id, {
status: 'in_progress',
});
expect(updated.status).toBe('in_progress');
});
it('should update fields and updatedAt', async () => {
const created = await taskRepo.create({
phaseId: testPhaseId,
name: 'Original Name',
order: 1,
});
await new Promise((resolve) => setTimeout(resolve, 10));
const updated = await taskRepo.update(created.id, {
name: 'Updated Name',
priority: 'low',
});
expect(updated.name).toBe('Updated Name');
expect(updated.priority).toBe('low');
expect(updated.updatedAt.getTime()).toBeGreaterThanOrEqual(created.updatedAt.getTime());
});
it('should throw for non-existent task', async () => {
await expect(
taskRepo.update('non-existent-id', { name: 'New Name' })
).rejects.toThrow('Task not found');
});
});
describe('delete', () => {
it('should delete an existing task', async () => {
const created = await taskRepo.create({
phaseId: testPhaseId,
name: 'To Delete',
order: 1,
});
await taskRepo.delete(created.id);
const found = await taskRepo.findById(created.id);
expect(found).toBeNull();
});
it('should throw for non-existent task', async () => {
await expect(taskRepo.delete('non-existent-id')).rejects.toThrow(
'Task not found'
);
});
});
});

View File

@@ -0,0 +1,142 @@
/**
* Drizzle Task Repository Adapter
*
* Implements TaskRepository interface using Drizzle ORM.
*/
import { eq, asc, and } from 'drizzle-orm';
import { nanoid } from 'nanoid';
import type { DrizzleDatabase } from '../../index.js';
import { tasks, taskDependencies, type Task } from '../../schema.js';
import type {
TaskRepository,
CreateTaskData,
UpdateTaskData,
PendingApprovalFilters,
} from '../task-repository.js';
/**
* Drizzle adapter for TaskRepository.
*
* Uses dependency injection for database instance,
* enabling isolated test databases.
*/
export class DrizzleTaskRepository implements TaskRepository {
constructor(private db: DrizzleDatabase) {}
async create(data: CreateTaskData): Promise<Task> {
const id = nanoid();
const now = new Date();
const [created] = await this.db.insert(tasks).values({
id,
...data,
type: data.type ?? 'auto',
category: data.category ?? 'execute',
priority: data.priority ?? 'medium',
status: data.status ?? 'pending',
order: data.order ?? 0,
createdAt: now,
updatedAt: now,
}).returning();
return created;
}
async findById(id: string): Promise<Task | null> {
const result = await this.db
.select()
.from(tasks)
.where(eq(tasks.id, id))
.limit(1);
return result[0] ?? null;
}
async findByParentTaskId(parentTaskId: string): Promise<Task[]> {
return this.db
.select()
.from(tasks)
.where(eq(tasks.parentTaskId, parentTaskId))
.orderBy(asc(tasks.order));
}
async findByInitiativeId(initiativeId: string): Promise<Task[]> {
return this.db
.select()
.from(tasks)
.where(eq(tasks.initiativeId, initiativeId))
.orderBy(asc(tasks.order));
}
async findByPhaseId(phaseId: string): Promise<Task[]> {
return this.db
.select()
.from(tasks)
.where(eq(tasks.phaseId, phaseId))
.orderBy(asc(tasks.order));
}
async findPendingApproval(filters?: PendingApprovalFilters): Promise<Task[]> {
const conditions = [eq(tasks.status, 'pending_approval')];
if (filters?.initiativeId) {
conditions.push(eq(tasks.initiativeId, filters.initiativeId));
}
if (filters?.phaseId) {
conditions.push(eq(tasks.phaseId, filters.phaseId));
}
if (filters?.category) {
conditions.push(eq(tasks.category, filters.category));
}
return this.db
.select()
.from(tasks)
.where(and(...conditions))
.orderBy(asc(tasks.createdAt));
}
async update(id: string, data: UpdateTaskData): Promise<Task> {
const [updated] = await this.db
.update(tasks)
.set({ ...data, updatedAt: new Date() })
.where(eq(tasks.id, id))
.returning();
if (!updated) {
throw new Error(`Task not found: ${id}`);
}
return updated;
}
async delete(id: string): Promise<void> {
const [deleted] = await this.db.delete(tasks).where(eq(tasks.id, id)).returning();
if (!deleted) {
throw new Error(`Task not found: ${id}`);
}
}
async createDependency(taskId: string, dependsOnTaskId: string): Promise<void> {
const id = nanoid();
const now = new Date();
await this.db.insert(taskDependencies).values({
id,
taskId,
dependsOnTaskId,
createdAt: now,
});
}
async getDependencies(taskId: string): Promise<string[]> {
const deps = await this.db
.select({ dependsOnTaskId: taskDependencies.dependsOnTaskId })
.from(taskDependencies)
.where(eq(taskDependencies.taskId, taskId));
return deps.map((d) => d.dependsOnTaskId);
}
}

View File

@@ -0,0 +1,30 @@
/**
* Test helpers for repository tests.
*
* Provides utilities for setting up in-memory test databases
* with schema applied.
*/
import Database from 'better-sqlite3';
import { drizzle } from 'drizzle-orm/better-sqlite3';
import type { DrizzleDatabase } from '../../index.js';
import { ensureSchema } from '../../ensure-schema.js';
import * as schema from '../../schema.js';
/**
* Create an in-memory test database with schema applied.
* Returns a fresh Drizzle instance for each call.
*/
export function createTestDatabase(): DrizzleDatabase {
const sqlite = new Database(':memory:');
// Enable foreign keys
sqlite.pragma('foreign_keys = ON');
const db = drizzle(sqlite, { schema });
// Create all tables
ensureSchema(db);
return db;
}

View File

@@ -0,0 +1,74 @@
/**
* Repository Port Interfaces
*
* Re-exports all repository port interfaces.
* These are the PORTS in hexagonal architecture.
* Implementations in ./drizzle/ are ADAPTERS.
*/
export type {
InitiativeRepository,
CreateInitiativeData,
UpdateInitiativeData,
} from './initiative-repository.js';
export type {
PhaseRepository,
CreatePhaseData,
UpdatePhaseData,
} from './phase-repository.js';
export type {
TaskRepository,
CreateTaskData,
UpdateTaskData,
PendingApprovalFilters,
} from './task-repository.js';
export type {
AgentRepository,
AgentStatus,
CreateAgentData,
} from './agent-repository.js';
export type {
MessageRepository,
MessageParticipantType,
MessageType,
MessageStatus,
CreateMessageData,
UpdateMessageData,
} from './message-repository.js';
export type {
PageRepository,
CreatePageData,
UpdatePageData,
} from './page-repository.js';
export type {
ProjectRepository,
CreateProjectData,
UpdateProjectData,
} from './project-repository.js';
export type {
AccountRepository,
CreateAccountData,
} from './account-repository.js';
export type {
ChangeSetRepository,
CreateChangeSetData,
CreateChangeSetEntryData,
ChangeSetWithEntries,
} from './change-set-repository.js';
export type {
LogChunkRepository,
} from './log-chunk-repository.js';
export type {
ConversationRepository,
CreateConversationData,
} from './conversation-repository.js';

View File

@@ -0,0 +1,66 @@
/**
* Initiative Repository Port Interface
*
* Port for Initiative aggregate operations.
* Implementations (Drizzle, etc.) are adapters.
*/
import type { Initiative, NewInitiative } from '../schema.js';
/**
* Data for creating a new initiative.
* Omits system-managed fields (id, createdAt, updatedAt).
*/
export type CreateInitiativeData = Omit<NewInitiative, 'id' | 'createdAt' | 'updatedAt'>;
/**
* Data for updating an initiative.
* Partial of creation data - all fields optional.
*/
export type UpdateInitiativeData = Partial<CreateInitiativeData>;
/**
* Initiative Repository Port
*
* Defines operations for the Initiative aggregate.
* Only knows about initiatives - no knowledge of child entities.
*/
export interface InitiativeRepository {
/**
* Create a new initiative.
* Generates id and sets timestamps automatically.
*/
create(data: CreateInitiativeData): Promise<Initiative>;
/**
* Find an initiative by its ID.
* Returns null if not found.
*/
findById(id: string): Promise<Initiative | null>;
/**
* Find all initiatives.
* Returns empty array if none exist.
*/
findAll(): Promise<Initiative[]>;
/**
* Find all initiatives with a specific status.
* Returns empty array if none exist.
*/
findByStatus(status: 'active' | 'completed' | 'archived'): Promise<Initiative[]>;
/**
* Update an initiative.
* Throws if initiative not found.
* Updates updatedAt timestamp automatically.
*/
update(id: string, data: UpdateInitiativeData): Promise<Initiative>;
/**
* Delete an initiative.
* Throws if initiative not found.
* Cascades to child phases, plans, tasks via FK constraints.
*/
delete(id: string): Promise<void>;
}

View File

@@ -0,0 +1,23 @@
/**
* Log Chunk Repository Port Interface
*
* Port for agent log chunk persistence operations.
* No FK to agents — chunks survive agent deletion.
*/
import type { AgentLogChunk } from '../schema.js';
export interface LogChunkRepository {
insertChunk(data: {
agentId: string;
agentName: string;
sessionNumber: number;
content: string;
}): Promise<void>;
findByAgentId(agentId: string): Promise<Pick<AgentLogChunk, 'content' | 'sessionNumber' | 'createdAt'>[]>;
deleteByAgentId(agentId: string): Promise<void>;
getSessionCount(agentId: string): Promise<number>;
}

View File

@@ -0,0 +1,118 @@
/**
* Message Repository Port Interface
*
* Port for Message aggregate operations.
* Implementations (Drizzle, etc.) are adapters.
*
* Messages persist agent questions for users to query and respond later.
* Supports threading via parentMessageId for response linking.
*/
import type { Message } from '../schema.js';
/**
* Message sender/recipient type.
*/
export type MessageParticipantType = 'agent' | 'user';
/**
* Message type.
*/
export type MessageType = 'question' | 'info' | 'error' | 'response';
/**
* Message status.
*/
export type MessageStatus = 'pending' | 'read' | 'responded';
/**
* Data for creating a new message.
* Omits system-managed fields (id, createdAt, updatedAt).
*/
export interface CreateMessageData {
senderType: MessageParticipantType;
senderId?: string | null;
recipientType: MessageParticipantType;
recipientId?: string | null;
type?: MessageType;
content: string;
requiresResponse?: boolean;
status?: MessageStatus;
parentMessageId?: string | null;
}
/**
* Data for updating a message.
* Partial of create data - all fields optional.
*/
export type UpdateMessageData = Partial<CreateMessageData>;
/**
* Message Repository Port
*
* Defines operations for the Message aggregate.
* Enables message persistence for agent-user communication.
*/
export interface MessageRepository {
/**
* Create a new message.
* Generates id and sets timestamps automatically.
*/
create(data: CreateMessageData): Promise<Message>;
/**
* Find a message by its ID.
* Returns null if not found.
*/
findById(id: string): Promise<Message | null>;
/**
* Find messages by sender.
* @param type - 'agent' or 'user'
* @param id - Optional sender ID (agent ID if type='agent', omit for user)
* Returns messages ordered by createdAt DESC.
*/
findBySender(type: MessageParticipantType, id?: string): Promise<Message[]>;
/**
* Find messages by recipient.
* @param type - 'agent' or 'user'
* @param id - Optional recipient ID (agent ID if type='agent', omit for user)
* Returns messages ordered by createdAt DESC.
*/
findByRecipient(type: MessageParticipantType, id?: string): Promise<Message[]>;
/**
* Find all pending messages for user.
* Returns messages where recipientType='user' and status='pending'.
* Ordered by createdAt DESC.
*/
findPendingForUser(): Promise<Message[]>;
/**
* Find all messages requiring a response.
* Returns messages where requiresResponse=true and status='pending'.
* Ordered by createdAt DESC.
*/
findRequiringResponse(): Promise<Message[]>;
/**
* Find all replies to a message.
* @param parentMessageId - The ID of the parent message
* Returns messages ordered by createdAt DESC.
*/
findReplies(parentMessageId: string): Promise<Message[]>;
/**
* Update a message.
* Throws if message not found.
* Updates updatedAt timestamp automatically.
*/
update(id: string, data: UpdateMessageData): Promise<Message>;
/**
* Delete a message.
* Throws if message not found.
*/
delete(id: string): Promise<void>;
}

View File

@@ -0,0 +1,34 @@
/**
* Page Repository Port Interface
*
* Port for Page aggregate operations.
* Implementations (Drizzle, etc.) are adapters.
*/
import type { Page, NewPage } from '../schema.js';
/**
* Data for creating a new page.
* Omits system-managed fields (id, createdAt, updatedAt).
*/
export type CreatePageData = Omit<NewPage, 'id' | 'createdAt' | 'updatedAt'>;
/**
* Data for updating a page.
*/
export type UpdatePageData = Partial<Pick<NewPage, 'title' | 'content' | 'sortOrder'>>;
/**
* Page Repository Port
*/
export interface PageRepository {
create(data: CreatePageData): Promise<Page>;
findById(id: string): Promise<Page | null>;
findByIds(ids: string[]): Promise<Page[]>;
findByInitiativeId(initiativeId: string): Promise<Page[]>;
findByParentPageId(parentPageId: string): Promise<Page[]>;
findRootPage(initiativeId: string): Promise<Page | null>;
getOrCreateRootPage(initiativeId: string): Promise<Page>;
update(id: string, data: UpdatePageData): Promise<Page>;
delete(id: string): Promise<void>;
}

Some files were not shown because too many files have changed in this diff Show More