Coder Agent - reza899/AutoSDLC GitHub Wiki
#AutoSDLC #Agent #Coder #Specification
The Coder Agent is the implementation specialist within the AutoSDLC system. It transforms technical specifications into working code, writes comprehensive tests, creates documentation, and ensures code quality through best practices and design patterns.
- Analyzes technical specifications from GitHub issues
- Generates production-ready code following best practices
- Implements features across multiple programming languages
- Applies appropriate design patterns and architectures
- CRITICAL: Always follows strict TDD workflow
- Receives test specifications or failing tests from PM Agent
- Verifies ALL tests are red before any implementation
- Verifies test coverage matches product specifications
- Writes minimal code to make tests pass (no extra features)
- Refactors only after tests are green
- Never uses mocks - works with real implementations only
- Updates Agent_Output.md with test status at each phase
- Generates inline code documentation
- Creates API documentation
- Updates README files
- Writes technical documentation for complex features
- Follows coding standards and conventions
- Implements error handling and logging
- Optimizes for performance and scalability
- Ensures security best practices
export class CoderAgent extends BaseAgent {
private codeGenerator: CodeGenerator;
private testRunner: TestRunner;
private tddImplementer: TDDImplementer;
private documentationEngine: DocumentationEngine;
private codeAnalyzer: CodeAnalyzer;
private workingDir: string = './agents/coder-agent';
constructor(config: CoderAgentConfig) {
super(config);
this.type = AgentType.CODER;
this.capabilities = [
'tdd_implementation',
'code_generation',
'test_verification',
'documentation_writing',
'refactoring',
'bug_fixing',
'api_development'
];
}
async initialize(): Promise<void> {
await super.initialize();
await this.loadLanguageModels();
await this.initializeCodeTemplates();
await this.setupDevelopmentEnvironment();
await this.initializeClaudeCode();
await this.startMCPServer(8083); // Agent as MCP server
}
async processTask(task: Task): Promise<TaskResult> {
// Update Agent_Output.md with task start
await this.updateAgentOutput({
status: 'processing',
currentTask: task.type,
timestamp: new Date()
});
let result: TaskResult;
switch (task.type) {
case 'tdd_implementation':
result = await this.implementWithTDD(task);
break;
case 'implement_feature':
result = await this.implementFeature(task);
break;
case 'fix_bug':
result = await this.fixBug(task);
break;
case 'verify_tests_red':
result = await this.verifyTestsRed(task);
break;
case 'refactor_code':
result = await this.refactorCode(task);
break;
case 'generate_documentation':
result = await this.generateDocumentation(task);
break;
default:
throw new Error(`Unknown task type: ${task.type}`);
}
// Update Agent_Output.md with task completion
await this.updateAgentOutput({
status: 'idle',
lastCompletedTask: task.type,
lastResult: result.success ? 'success' : 'failed',
testStatus: result.testsPassing ? 'green' : 'red',
timestamp: new Date()
});
return result;
}
}
interface CoderAgentState extends AgentState {
currentImplementation: Implementation;
codebaseContext: CodebaseContext;
testCoverage: TestCoverage;
technicalDebt: TechnicalDebt[];
performanceMetrics: CodePerformanceMetrics;
}
interface Implementation {
id: string;
taskId: string;
githubIssue: number;
branch: string;
files: ImplementedFile[];
tests: TestFile[];
status: ImplementationStatus;
pullRequestUrl?: string;
}
interface CodebaseContext {
projectStructure: ProjectStructure;
dependencies: Dependency[];
codingStandards: CodingStandard[];
architecturePatterns: Pattern[];
existingAPIs: API[];
}
class CodeGenerator {
async generateCode(
specification: TechnicalSpecification,
context: CodebaseContext
): Promise<GeneratedCode> {
// Analyze specification
const analysis = await this.analyzeSpecification(specification);
// Determine implementation approach
const approach = await this.determineApproach(analysis, context);
// Generate code structure
const structure = await this.generateStructure(approach);
// Implement each component
const implementations = await Promise.all(
structure.components.map(component =>
this.implementComponent(component, context)
)
);
// Integrate components
const integratedCode = await this.integrateComponents(implementations);
// Optimize and refine
return await this.optimizeCode(integratedCode);
}
private async implementComponent(
component: Component,
context: CodebaseContext
): Promise<ComponentImplementation> {
const template = await this.selectTemplate(component.type);
const customized = await this.customizeTemplate(template, component);
const validated = await this.validateImplementation(customized);
return {
component: component,
code: validated,
dependencies: await this.resolveDependencies(component),
interfaces: await this.defineInterfaces(component)
};
}
}
class TDDImplementer {
async implementWithTDD(
failingTests: TestSuite,
specification: TechnicalSpecification
): Promise<TDDImplementation> {
// Step 1: Verify all tests are red
const redVerification = await this.verifyTestsRed(failingTests);
if (!redVerification.allRed) {
throw new Error('Not all tests are failing. Cannot proceed with TDD.');
}
// Step 2: Analyze failing tests to understand requirements
const requirements = await this.analyzeTestRequirements(failingTests);
// Step 3: Implement minimal code to pass tests
let implementation = await this.implementMinimalCode(requirements);
// Step 4: Run tests iteratively until all pass
let testResults = await this.runTests(failingTests, implementation);
let iterations = 0;
while (!testResults.allPassing && iterations < 10) {
// Analyze failures and adjust implementation
const failures = this.analyzeFailures(testResults);
implementation = await this.adjustImplementation(implementation, failures);
// Re-run tests
testResults = await this.runTests(failingTests, implementation);
iterations++;
// Update progress in Agent_Output.md
await this.updateTDDProgress({
iteration: iterations,
passingTests: testResults.passing,
totalTests: testResults.total,
currentStatus: 'implementing'
});
}
// Step 5: Refactor while keeping tests green
const refactoredCode = await this.refactorCode(implementation, failingTests);
// Verify tests still pass after refactoring
const finalResults = await this.runTests(failingTests, refactoredCode);
if (!finalResults.allPassing) {
throw new Error('Refactoring broke tests!');
}
return {
implementation: refactoredCode,
testResults: finalResults,
iterations: iterations,
coverage: await this.calculateCoverage(refactoredCode)
};
}
private async verifyTestsRed(tests: TestSuite): Promise<RedVerification> {
// Run tests without any implementation
const results = await this.testRunner.run(tests, null);
return {
allRed: results.failing === results.total,
results: results,
missingTests: results.total === 0
};
}
private async implementMinimalCode(
requirements: TestRequirements
): Promise<Implementation> {
// Implement just enough to make tests pass
const minimalImpl = {
files: [],
dependencies: requirements.dependencies
};
for (const req of requirements.functionRequirements) {
const impl = await this.implementFunction(req, {
minimal: true,
noExtraFeatures: true,
justPassTests: true
});
minimalImpl.files.push(impl);
}
return minimalImpl;
}
}
// Test Runner without mocks
class TestRunner {
async run(
tests: TestSuite,
implementation: Implementation | null
): Promise<TestResults> {
// Set up real test environment
const testEnv = await this.setupRealEnvironment();
// Deploy implementation if provided
if (implementation) {
await this.deployImplementation(implementation, testEnv);
}
// Run tests against real implementation
const results = {
total: tests.tests.length,
passing: 0,
failing: 0,
failures: []
};
for (const test of tests.tests) {
try {
// Run test without mocks
await this.executeTest(test, testEnv);
results.passing++;
} catch (error) {
results.failing++;
results.failures.push({
test: test.name,
error: error.message,
stack: error.stack
});
}
}
// Clean up
await this.teardownEnvironment(testEnv);
return results;
}
}
class BugFixer {
async fixBug(
bugReport: BugReport,
codebase: Codebase
): Promise<BugFix> {
// Reproduce the bug
const reproduction = await this.reproduceBug(bugReport);
// Identify root cause
const rootCause = await this.identifyRootCause(reproduction, codebase);
// Generate fix strategies
const strategies = await this.generateFixStrategies(rootCause);
// Implement best strategy
const implementation = await this.implementFix(
strategies[0],
rootCause
);
// Verify fix
const verification = await this.verifyFix(implementation, bugReport);
// Add regression tests
const regressionTests = await this.createRegressionTests(
bugReport,
implementation
);
return {
bug: bugReport,
rootCause: rootCause,
fix: implementation,
tests: regressionTests,
verification: verification
};
}
}
class CodeRefactorer {
async refactorCode(
code: Code,
goals: RefactoringGoal[]
): Promise<RefactoredCode> {
// Analyze current code
const analysis = await this.analyzeCode(code);
// Identify refactoring opportunities
const opportunities = await this.identifyOpportunities(analysis, goals);
// Plan refactoring steps
const plan = await this.createRefactoringPlan(opportunities);
// Execute refactoring
let refactoredCode = code;
for (const step of plan.steps) {
refactoredCode = await this.executeRefactoringStep(
step,
refactoredCode
);
// Ensure tests still pass
const testsPass = await this.runTests(refactoredCode);
if (!testsPass) {
throw new Error(`Refactoring broke tests at step: ${step.name}`);
}
}
// Verify improvements
const improvements = await this.measureImprovements(
code,
refactoredCode
);
return {
original: code,
refactored: refactoredCode,
improvements: improvements,
plan: plan
};
}
}
// Task receipt acknowledgment
interface TaskAcknowledgment {
type: 'TASK_ACKNOWLEDGED';
from: 'coder-agent';
to: 'pm-agent';
payload: {
taskId: string;
estimatedCompletion: Date;
dependencies: string[];
risks: Risk[];
};
}
// Implementation complete notification
interface ImplementationComplete {
type: 'IMPLEMENTATION_COMPLETE';
from: 'coder-agent';
to: 'pm-agent';
payload: {
taskId: string;
pullRequestUrl: string;
filesChanged: number;
testCoverage: number;
documentation: boolean;
};
}
// Review request
interface ReviewRequest {
type: 'REVIEW_REQUEST';
from: 'coder-agent';
to: 'reviewer-agent';
payload: {
pullRequestId: string;
urgency: 'low' | 'medium' | 'high';
focusAreas: string[];
context: ReviewContext;
};
}
// Review feedback response
interface ReviewFeedbackResponse {
type: 'FEEDBACK_ADDRESSED';
from: 'coder-agent';
to: 'reviewer-agent';
payload: {
pullRequestId: string;
feedbackId: string;
changes: Change[];
explanation: string;
};
}
# agents/coder-agent/CLAUDE.md
## Coder Agent Instructions
You are the Coder Agent in the AutoSDLC system. Your primary role is to implement code using Test-Driven Development (TDD) methodology.
### Working Directory
- **Write Access**: `./Agent_Output.md` and all code files in your directory
- **Read Access**:
- Your own directory (full access)
- `../shared/Agent_Status/` (other agents' status)
- Test specifications and requirements
### Key Responsibilities
1. Verify tests are red before implementation
2. Write minimal code to make tests pass
3. Refactor while keeping tests green
4. Never use mocks - work with real implementations
5. Document code and update Agent_Output.md
### TDD Workflow
1. **Receive failing tests** from PM Agent
2. **Verify all tests fail** (red phase)
3. **Implement minimal code** to pass tests (green phase)
4. **Refactor** for quality while keeping tests green
5. **Update status** in Agent_Output.md
### Communication Protocol
1. Update `Agent_Output.md` every 60 seconds with:
- Current implementation status
- Test results (passing/failing counts)
- Code coverage metrics
- Current file being worked on
2. Check PM Agent status for new tasks:
- `../shared/Agent_Status/pm_status.md`
3. Notify when implementation is complete
### Available Commands
Execute from `.claude/commands/`:
- `run-tests.sh [TEST_FILE]` - Run specific test file
- `verify-red.sh` - Verify all tests are failing
- `check-coverage.sh` - Check test coverage
- `commit-implementation.sh [MESSAGE]` - Commit code changes
### Guidelines
- Write clean, efficient, and well-documented code
- Use meaningful variable and function names
- Implement proper error handling
- Follow DRY principles
- Consider edge cases
- Document complex logic
# agents/coder-agent/.claude/commands/verify-red.sh
#!/bin/bash
# Verify all tests are failing before implementation
TEST_DIR=$1
echo "Verifying tests are red..."
# Run tests and capture output
npm test -- --no-coverage --json > test-results.json
# Check if all tests failed
FAILED=$(jq '.numFailedTests' test-results.json)
TOTAL=$(jq '.numTotalTests' test-results.json)
if [ "$FAILED" -eq "$TOTAL" ]; then
echo "✓ All $TOTAL tests are failing (red)"
echo "Ready to start implementation"
else
echo "✗ Not all tests are failing!"
echo "Failed: $FAILED / Total: $TOTAL"
echo "Cannot proceed with TDD - some tests are already passing"
exit 1
fi
# Update Agent_Output.md
echo "Test verification completed at $(date)" >> ../Agent_Output.md
echo "Status: $FAILED/$TOTAL tests failing (red)" >> ../Agent_Output.md
You are the Coder Agent in the AutoSDLC system. Follow the instructions in CLAUDE.md for all operations.
Your primary focus is Test-Driven Development:
1. Always verify tests are red before implementing
2. Write minimal code to make tests pass
3. Never use mocks - work with real implementations
4. Refactor only after tests are green
5. Update Agent_Output.md regularly
Use commands in .claude/commands/ for test operations.
Task: Implement the following feature based on the technical specification
GitHub Issue: #{issue_number}
Title: {issue_title}
Technical Specification:
{technical_spec}
Acceptance Criteria:
{acceptance_criteria}
Existing Codebase Context:
- Project Structure: {project_structure}
- Language/Framework: {language_framework}
- Coding Standards: {coding_standards}
- Related Code: {related_code}
Please implement:
1. Core functionality meeting all requirements
2. Comprehensive unit tests (>80% coverage)
3. Integration tests for API endpoints
4. Error handling and validation
5. Logging for debugging
6. Documentation (inline comments and API docs)
7. Performance optimizations where applicable
Deliverables:
- Implementation files with clear structure
- Test files with comprehensive coverage
- Updated documentation
- Any necessary configuration changes
Task: Fix the following bug
Bug Report:
- Issue: {bug_description}
- Steps to Reproduce: {reproduction_steps}
- Expected Behavior: {expected}
- Actual Behavior: {actual}
- Error Messages: {errors}
Context:
- Affected Code: {affected_files}
- Recent Changes: {recent_commits}
- Related Issues: {related_issues}
Please:
1. Identify the root cause of the bug
2. Implement a fix that addresses the root cause
3. Add regression tests to prevent recurrence
4. Ensure no side effects or breaking changes
5. Document the fix and reasoning
Provide:
- Root cause analysis
- Fix implementation
- Regression tests
- Verification steps
# config/agents/coder.yaml
agent:
id: coder-001
type: coder
name: "Coder Agent"
workingDirectory: "./agents/coder-agent"
# Claude Code configuration
claudeCode:
mode: "headless"
profile: "coder-agent"
instructionsFile: "CLAUDE.md"
outputFile: "Agent_Output.md"
commandsDirectory: ".claude/commands"
# MCP configuration (agent as both client and server)
mcp:
client:
serverUrl: "http://localhost:8080"
server:
port: 8083
capabilities:
- "implementation_status"
- "test_results"
- "code_metrics"
capabilities:
- tdd_implementation
- code_generation
- test_verification
- documentation_writing
- refactoring
- bug_fixing
- api_development
languages:
primary:
- typescript
- javascript
- python
- java
secondary:
- go
- rust
- c#
- ruby
frameworks:
frontend:
- react
- vue
- angular
backend:
- node.js
- django
- spring
- fastapi
testing:
- jest
- pytest
- junit
- mocha
code_quality:
linting:
enabled: true
rules: "eslint:recommended"
formatting:
enabled: true
style: "prettier"
complexity:
maxCyclomaticComplexity: 10
maxFileLength: 500
maxFunctionLength: 50
testing:
coverageThreshold: 80
testTypes:
- unit
- integration
- e2e
mockingEnabled: false # No mocks in TDD
tdd:
enabled: true
verifyRedBeforeImplementation: true
minimalImplementationFirst: true
performance:
optimizationLevel: "balanced"
targetResponseTime: 200 # ms
memoryLimit: "512MB"
prompts:
systemPrompt: "prompts/coder/system.txt"
implementationPrompt: "prompts/coder/implementation.txt"
testingPrompt: "prompts/coder/testing.txt"
bugFixPrompt: "prompts/coder/bugfix.txt"
# config/code-templates.yaml
templates:
api_endpoint:
express:
file: "templates/express-endpoint.ts"
variables:
- route
- method
- handler
- validation
- authentication
react_component:
functional:
file: "templates/react-functional.tsx"
variables:
- name
- props
- state
- hooks
test_suite:
jest:
file: "templates/jest-test-suite.ts"
variables:
- testTarget
- testCases
- mocks
patterns:
repository:
description: "Repository pattern for data access"
files:
- "templates/patterns/repository-interface.ts"
- "templates/patterns/repository-implementation.ts"
factory:
description: "Factory pattern for object creation"
files:
- "templates/patterns/factory.ts"
graph TB
A[Receive Failing Tests] --> B[Verify Tests Are Red]
B --> C{All Tests Failing?}
C -->|No| D[Report Error to PM]
C -->|Yes| E[Analyze Test Requirements]
E --> F[Implement Minimal Code]
F --> G[Run Tests]
G --> H{Tests Passing?}
H -->|No| I[Adjust Implementation]
I --> G
H -->|Yes| J[Refactor Code]
J --> K[Run Tests Again]
K --> L{Still Green?}
L -->|No| M[Fix Refactoring]
M --> K
L -->|Yes| N[Update Documentation]
N --> O[Create Pull Request]
O --> P[Update Agent_Output.md]
P --> Q[Notify PM Agent]
graph LR
A[Bug Report] --> B[Reproduce Bug]
B --> C[Debug & Analyze]
C --> D[Identify Root Cause]
D --> E[Develop Fix]
E --> F[Test Fix]
F --> G[Regression Tests]
G --> H[Update PR]
H --> I[Request Review]
interface CoderAgentMetrics {
// Productivity metrics
linesOfCodePerHour: number;
featuresImplementedPerSprint: number;
averageImplementationTime: number;
// Quality metrics
defectDensity: number; // bugs per 1000 lines
testCoverage: number; // percentage
codeComplexity: number; // cyclomatic complexity
documentationCompleteness: number; // percentage
// Efficiency metrics
firstTimeSuccessRate: number; // % PRs approved without changes
reworkRate: number; // % code requiring changes
buildSuccessRate: number; // % builds passing
// Collaboration metrics
reviewTurnaroundTime: number; // hours
communicationResponseTime: number; // minutes
}
class CoderPerformanceOptimizer {
async optimizePerformance(
metrics: CoderAgentMetrics
): Promise<Optimization[]> {
const optimizations = [];
// Optimize code quality
if (metrics.defectDensity > 5) {
optimizations.push({
area: 'testing',
action: 'increase_test_coverage',
expected: '40% reduction in defects'
});
}
// Optimize productivity
if (metrics.averageImplementationTime > 240) { // 4 hours
optimizations.push({
area: 'templates',
action: 'expand_code_templates',
expected: '30% faster implementation'
});
}
// Optimize collaboration
if (metrics.firstTimeSuccessRate < 0.7) {
optimizations.push({
area: 'standards',
action: 'improve_coding_standards_adherence',
expected: '25% improvement in first-time success'
});
}
return optimizations;
}
}
enum CoderAgentError {
SPECIFICATION_UNCLEAR = 'SPECIFICATION_UNCLEAR',
DEPENDENCY_CONFLICT = 'DEPENDENCY_CONFLICT',
TEST_FAILURE = 'TEST_FAILURE',
BUILD_FAILURE = 'BUILD_FAILURE',
MERGE_CONFLICT = 'MERGE_CONFLICT',
PERFORMANCE_REGRESSION = 'PERFORMANCE_REGRESSION'
}
class CoderAgentErrorHandler {
async handle(
error: CoderAgentError,
context: ErrorContext
): Promise<Resolution> {
switch (error) {
case CoderAgentError.SPECIFICATION_UNCLEAR:
return await this.requestClarification(context);
case CoderAgentError.DEPENDENCY_CONFLICT:
return await this.resolveDependencyConflict(context);
case CoderAgentError.TEST_FAILURE:
return await this.debugAndFixTests(context);
case CoderAgentError.MERGE_CONFLICT:
return await this.resolveMergeConflict(context);
case CoderAgentError.PERFORMANCE_REGRESSION:
return await this.optimizePerformance(context);
default:
return await this.escalateToPM(error, context);
}
}
}
import { CoderAgent } from '@autosdlc/agents';
const agent = new CoderAgent({
id: 'coder-001',
mcpServerUrl: process.env.MCP_SERVER_URL,
config: {
languages: ['typescript', 'python'],
frameworks: ['react', 'fastapi'],
testingFramework: 'jest',
codeQuality: {
linting: true,
formatting: true,
coverageThreshold: 80
}
}
});
await agent.initialize();
await agent.start();
console.log('Coder Agent ready for implementation tasks');
// Listen for implementation tasks
agent.on('task:implement_feature', async (task) => {
try {
// Parse GitHub issue
const issue = await agent.parseGitHubIssue(task.payload.issueNumber);
// Create feature branch
await agent.createFeatureBranch(issue);
// Implement feature
const implementation = await agent.implementFeature({
specification: task.payload.specification,
issue: issue,
context: await agent.getCodebaseContext()
});
// Write tests
const tests = await agent.writeTests(implementation);
// Create pull request
const pr = await agent.createPullRequest({
implementation,
tests,
issue
});
// Notify completion
await agent.notifyCompletion(task.id, pr);
} catch (error) {
await agent.handleError(error, task);
}
});
- Follow established coding standards religiously
- Write self-documenting code with clear naming
- Keep functions small and focused (single responsibility)
- Implement comprehensive error handling
- Write tests before or alongside implementation (TDD)
- Cover edge cases and error scenarios
- Use meaningful test descriptions
- Maintain high coverage without sacrificing quality
- Document complex algorithms and business logic
- Keep API documentation up to date
- Use clear, concise comments
- Include examples in documentation
- Profile code for performance bottlenecks
- Optimize algorithms and data structures
- Consider caching strategies
- Monitor memory usage
- Validate all inputs
- Use parameterized queries
- Implement proper authentication/authorization
- Keep dependencies updated
- Agent Framework Overview
- Product Manager Agent
- Code Reviewer Agent
- Tester Agent
- GitHub Integration Guide
Tags: #AutoSDLC #Agent #Coder #Implementation #Specification Last Updated: 2025-06-09 Next: Code Reviewer Agent →