Security incidents can happen to any system. This guide will help you build a robust incident response infrastructure on Alpine Linux, covering everything from initial detection to post-incident analysis, ensuring you’re prepared to handle security threats effectively.
Table of Contents
- Prerequisites
- Understanding Incident Response
- Setting Up Detection Systems
- Log Management and Analysis
- Intrusion Detection Systems
- File Integrity Monitoring
- Network Monitoring
- Forensic Tools Installation
- Incident Response Procedures
- Automated Response Actions
- Evidence Collection
- Threat Intelligence Integration
- Recovery Procedures
- Reporting and Documentation
- Troubleshooting
- Best Practices
- Conclusion
Prerequisites
Before implementing incident response, ensure you have:
- Alpine Linux with root access
- At least 4GB RAM for analysis tools
- 100GB+ storage for logs and evidence
- Basic understanding of security concepts
- Network access for threat intelligence feeds
- Backup system configured
Understanding Incident Response
Incident Response Phases
# Check current security posture
ps aux | grep -E 'sshd|nginx|apache'
netstat -tulpn
last -10
Key phases:
- Preparation: Tools and procedures setup
- Detection: Identifying security events
- Analysis: Understanding the incident
- Containment: Limiting damage
- Eradication: Removing threats
- Recovery: Restoring normal operations
Setting Up Detection Systems
Step 1: Install Core Monitoring Tools
# Update package repository
apk update
# Install system monitoring tools
apk add htop iotop iftop nethogs
apk add sysstat procps lsof strace
# Install security tools
apk add nmap tcpdump wireshark-cli
apk add fail2ban aide tripwire
# Install log analysis tools
apk add logwatch goaccess
Step 2: Configure System Auditing
# Install audit daemon
apk add audit audit-libs
# Configure audit rules
cat > /etc/audit/audit.rules << 'EOF'
# Delete all existing rules
-D
# Buffer size
-b 8192
# Failure mode
-f 1
# Monitor authentication
-w /etc/passwd -p wa -k passwd_changes
-w /etc/shadow -p wa -k shadow_changes
-w /etc/group -p wa -k group_changes
-w /etc/sudoers -p wa -k sudoers_changes
# Monitor system calls
-a always,exit -F arch=b64 -S execve -k exec
-a always,exit -F arch=b64 -S connect -k network
-a always,exit -F arch=b64 -S accept -k network
-a always,exit -F arch=b64 -S open -F dir=/etc -F success=0 -k unauth_access
-a always,exit -F arch=b64 -S unlink -S rmdir -k deletion
# Monitor privilege escalation
-a always,exit -F arch=b64 -S setuid -S setgid -k priv_esc
-a always,exit -F arch=b64 -S chmod -F a1&0111 -k exec_perm
# Monitor kernel modules
-w /sbin/insmod -p x -k modules
-w /sbin/rmmod -p x -k modules
-w /sbin/modprobe -p x -k modules
EOF
# Enable and start audit
rc-update add auditd default
rc-service auditd start
# Configure audit log rotation
cat > /etc/logrotate.d/audit << 'EOF'
/var/log/audit/audit.log {
daily
rotate 30
compress
delaycompress
notifempty
missingok
postrotate
/usr/sbin/service auditd restart >/dev/null 2>&1 || true
endscript
}
EOF
Step 3: Real-time Monitoring Setup
# Create monitoring dashboard script
cat > /usr/local/bin/security-monitor << 'EOF'
#!/bin/sh
# Real-time security monitoring dashboard
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
while true; do
clear
echo "=== Security Monitor - $(date) ==="
echo
# Active connections
echo "${YELLOW}Active Network Connections:${NC}"
netstat -tupn 2>/dev/null | grep ESTABLISHED | head -5
echo
# Failed login attempts
echo "${RED}Recent Failed Logins:${NC}"
grep "Failed password" /var/log/auth.log 2>/dev/null | tail -5
echo
# System processes
echo "${GREEN}Suspicious Processes:${NC}"
ps aux | awk '$3 > 80.0 || $4 > 80.0' | grep -v "COMMAND"
echo
# File changes
echo "${YELLOW}Recent File Changes:${NC}"
find /etc -type f -mmin -5 2>/dev/null | head -5
echo
# Active users
echo "${GREEN}Active Users:${NC}"
w -h
sleep 5
done
EOF
chmod +x /usr/local/bin/security-monitor
Log Management and Analysis
Step 1: Centralized Logging Setup
# Install rsyslog
apk add rsyslog rsyslog-tls
# Configure centralized logging
cat > /etc/rsyslog.conf << 'EOF'
# Modules
module(load="imuxsock")
module(load="imklog")
module(load="immark")
module(load="imfile")
module(load="imtcp")
input(type="imtcp" port="514")
# Global directives
$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat
$FileOwner root
$FileGroup adm
$FileCreateMode 0640
$DirCreateMode 0755
$Umask 0022
$WorkDirectory /var/spool/rsyslog
# Log everything to central file for analysis
*.* /var/log/syslog
# Separate logs by facility
auth,authpriv.* /var/log/auth.log
*.*;auth,authpriv.none /var/log/messages
kern.* /var/log/kern.log
mail.* /var/log/mail.log
# Security-specific logging
$template SecurityTemplate,"%timegenerated% %hostname% %syslogtag% %msg%\n"
if $programname == 'sshd' then /var/log/security/sshd.log;SecurityTemplate
if $programname == 'sudo' then /var/log/security/sudo.log;SecurityTemplate
if $programname == 'auditd' then /var/log/security/audit.log;SecurityTemplate
# Remote logging (optional)
*.* @@remote-syslog-server:514
EOF
# Create log directories
mkdir -p /var/log/security
chmod 750 /var/log/security
# Enable rsyslog
rc-update add rsyslog default
rc-service rsyslog start
Step 2: Log Analysis Tools
# Install ELK stack alternative (lighter weight)
apk add elasticsearch logstash kibana
# Configure Logstash for security logs
cat > /etc/logstash/conf.d/security.conf << 'EOF'
input {
file {
path => "/var/log/auth.log"
type => "auth"
start_position => "beginning"
}
file {
path => "/var/log/audit/audit.log"
type => "audit"
start_position => "beginning"
}
}
filter {
if [type] == "auth" {
grok {
match => { "message" => "%{SYSLOGTIMESTAMP:timestamp} %{HOSTNAME:hostname} %{PROG:program}: %{GREEDYDATA:message}" }
}
date {
match => [ "timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
}
if [type] == "audit" {
grok {
match => { "message" => "type=%{WORD:audit_type} msg=audit\(%{NUMBER:audit_time}:%{NUMBER:audit_id}\): %{GREEDYDATA:audit_message}" }
}
}
}
output {
elasticsearch {
hosts => ["localhost:9200"]
index => "security-%{+YYYY.MM.dd}"
}
}
EOF
Step 3: Custom Log Analysis Scripts
# Create security log analyzer
cat > /usr/local/bin/analyze-security-logs << 'EOF'
#!/usr/bin/env python3
import re
import sys
from collections import defaultdict
from datetime import datetime, timedelta
def analyze_auth_log(logfile):
"""Analyze authentication logs for suspicious activity"""
failed_attempts = defaultdict(int)
successful_logins = defaultdict(list)
sudo_commands = []
with open(logfile, 'r') as f:
for line in f:
# Failed login attempts
if 'Failed password' in line:
match = re.search(r'Failed password for (\S+) from (\S+)', line)
if match:
user, ip = match.groups()
failed_attempts[f"{user}@{ip}"] += 1
# Successful logins
elif 'Accepted password' in line or 'Accepted publickey' in line:
match = re.search(r'Accepted \w+ for (\S+) from (\S+)', line)
if match:
user, ip = match.groups()
timestamp = line.split()[0:3]
successful_logins[user].append((ip, ' '.join(timestamp)))
# Sudo usage
elif 'sudo:' in line and 'COMMAND=' in line:
sudo_commands.append(line.strip())
# Report findings
print("=== Security Log Analysis Report ===\n")
print("Failed Login Attempts:")
for attempt, count in sorted(failed_attempts.items(), key=lambda x: x[1], reverse=True):
if count > 3:
print(f" {attempt}: {count} attempts")
print("\nSuccessful Logins:")
for user, logins in successful_logins.items():
print(f" {user}:")
for ip, time in logins[-5:]: # Last 5 logins
print(f" - from {ip} at {time}")
print("\nRecent Sudo Commands:")
for cmd in sudo_commands[-10:]: # Last 10 commands
print(f" {cmd}")
if __name__ == "__main__":
logfile = sys.argv[1] if len(sys.argv) > 1 else "/var/log/auth.log"
analyze_auth_log(logfile)
EOF
chmod +x /usr/local/bin/analyze-security-logs
Intrusion Detection Systems
Step 1: Install and Configure Snort
# Install Snort IDS
apk add snort
# Configure Snort
cat > /etc/snort/snort.conf << 'EOF'
# Network variables
var HOME_NET 192.168.1.0/24
var EXTERNAL_NET !$HOME_NET
var DNS_SERVERS $HOME_NET
var SMTP_SERVERS $HOME_NET
var HTTP_SERVERS $HOME_NET
var SQL_SERVERS $HOME_NET
var TELNET_SERVERS $HOME_NET
var SSH_SERVERS $HOME_NET
var FTP_SERVERS $HOME_NET
# Port variables
var HTTP_PORTS [80,8080,8000,8443]
var SHELLCODE_PORTS !80
var ORACLE_PORTS 1024:
var SSH_PORTS 22
var FTP_PORTS [21,2100,3535]
# Path variables
var RULE_PATH /etc/snort/rules
var SO_RULE_PATH /etc/snort/so_rules
var PREPROC_RULE_PATH /etc/snort/preproc_rules
# Decoder config
config disable_decode_alerts
config disable_tcpopt_experimental_alerts
config disable_tcpopt_obsolete_alerts
config disable_tcpopt_ttcp_alerts
config disable_tcpopt_alerts
config disable_ipopt_alerts
# Detection engine
config detection: search-method ac-full-q
# Output plugins
output alert_fast: /var/log/snort/alerts.log
output log_tcpdump: /var/log/snort/snort.pcap
# Include rules
include $RULE_PATH/local.rules
include $RULE_PATH/emerging-threats.rules
EOF
# Download Emerging Threats rules
wget https://rules.emergingthreats.net/open/snort-2.9.0/emerging.rules.tar.gz
tar -xzf emerging.rules.tar.gz -C /etc/snort/
Step 2: Configure OSSEC HIDS
# Build OSSEC from source (not in Alpine repos)
cd /tmp
wget https://github.com/ossec/ossec-hids/archive/3.6.0.tar.gz
tar -xzf 3.6.0.tar.gz
cd ossec-hids-3.6.0
# Install dependencies
apk add libevent-dev pcre2-dev zlib-dev openssl-dev
# Install OSSEC
./install.sh
# Configure OSSEC
cat > /var/ossec/etc/ossec.conf << 'EOF'
<ossec_config>
<global>
<email_notification>yes</email_notification>
<email_to>[email protected]</email_to>
<smtp_server>localhost</smtp_server>
<email_from>[email protected]</email_from>
</global>
<syscheck>
<frequency>7200</frequency>
<directories check_all="yes">/etc,/usr/bin,/usr/sbin</directories>
<directories check_all="yes">/bin,/sbin</directories>
<ignore>/etc/mtab</ignore>
<ignore>/etc/hosts.deny</ignore>
<ignore>/etc/mail/statistics</ignore>
<ignore>/etc/random-seed</ignore>
<ignore>/etc/adjtime</ignore>
<ignore>/etc/httpd/logs</ignore>
</syscheck>
<rootcheck>
<rootkit_files>/var/ossec/etc/rootcheck/rootkit_files.txt</rootkit_files>
<rootkit_trojans>/var/ossec/etc/rootcheck/rootkit_trojans.txt</rootkit_trojans>
<system_audit>/var/ossec/etc/rootcheck/system_audit_rcl.txt</system_audit>
</rootcheck>
<active-response>
<disabled>no</disabled>
<ca_store>/var/ossec/etc/ca_store.conf</ca_store>
</active-response>
<localfile>
<log_format>syslog</log_format>
<location>/var/log/messages</location>
</localfile>
<localfile>
<log_format>syslog</log_format>
<location>/var/log/auth.log</location>
</localfile>
<localfile>
<log_format>apache</log_format>
<location>/var/log/apache2/access.log</location>
</localfile>
<localfile>
<log_format>apache</log_format>
<location>/var/log/apache2/error.log</location>
</localfile>
</ossec_config>
EOF
File Integrity Monitoring
Step 1: Configure AIDE
# Install AIDE
apk add aide
# Initialize AIDE database
cat > /etc/aide.conf << 'EOF'
# AIDE configuration
# Database locations
database=file:/var/lib/aide/aide.db
database_out=file:/var/lib/aide/aide.db.new
# Log file
report_url=file:/var/log/aide/aide.log
report_url=stdout
# Rule definitions
NORMAL = p+i+n+u+g+s+m+c+md5+sha256
DIR = p+i+n+u+g
PERMS = p+i+u+g+acl+selinux
LOG = >
DATAONLY = p+n+u+g+s+acl+selinux+md5+sha256
# Directories to monitor
/boot NORMAL
/bin NORMAL
/sbin NORMAL
/lib NORMAL
/lib64 NORMAL
/usr NORMAL
/etc NORMAL
# Exclude patterns
!/proc
!/sys
!/dev
!/tmp
!/var/log
!/var/cache
!/var/tmp
EOF
# Initialize AIDE database
aide --init
mv /var/lib/aide/aide.db.new /var/lib/aide/aide.db
# Create AIDE check script
cat > /usr/local/bin/aide-check << 'EOF'
#!/bin/sh
# AIDE integrity check
LOG_FILE="/var/log/aide/aide-check-$(date +%Y%m%d).log"
ALERT_EMAIL="[email protected]"
# Run AIDE check
aide --check > "$LOG_FILE" 2>&1
# Check for changes
if grep -q "changed files" "$LOG_FILE"; then
echo "File integrity changes detected" | \
mail -s "AIDE Alert: File Changes Detected" "$ALERT_EMAIL" < "$LOG_FILE"
fi
# Update database if requested
if [ "$1" = "--update" ]; then
aide --update
mv /var/lib/aide/aide.db.new /var/lib/aide/aide.db
fi
EOF
chmod +x /usr/local/bin/aide-check
# Add to cron
echo "0 3 * * * /usr/local/bin/aide-check" | crontab -
Step 2: Real-time File Monitoring
# Install inotify-tools
apk add inotify-tools
# Create real-time file monitor
cat > /usr/local/bin/file-monitor << 'EOF'
#!/bin/sh
# Real-time file system monitoring
WATCH_DIRS="/etc /usr/bin /usr/sbin /bin /sbin"
LOG_FILE="/var/log/file-monitor.log"
ALERT_SCRIPT="/usr/local/bin/security-alert"
# Monitor function
monitor_files() {
inotifywait -mr \
--exclude '(\.log|\.tmp|\.cache)' \
--format '%w%f %e %T' \
--timefmt '%Y-%m-%d %H:%M:%S' \
-e modify,create,delete,move \
$WATCH_DIRS | while read file event time; do
echo "[$time] $event: $file" >> "$LOG_FILE"
# Alert on suspicious events
case "$event" in
*CREATE*|*MODIFY*)
if echo "$file" | grep -qE '\.(sh|py|pl|rb|exe|bin)$'; then
$ALERT_SCRIPT "Executable file modified: $file"
fi
;;
*DELETE*)
if echo "$file" | grep -qE '^/etc/'; then
$ALERT_SCRIPT "System file deleted: $file"
fi
;;
esac
done
}
# Daemonize
if [ "$1" = "-d" ]; then
monitor_files &
echo $! > /var/run/file-monitor.pid
else
monitor_files
fi
EOF
chmod +x /usr/local/bin/file-monitor
# Create alert script
cat > /usr/local/bin/security-alert << 'EOF'
#!/bin/sh
# Security alert notification
MESSAGE="$1"
PRIORITY="${2:-high}"
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
# Log alert
echo "[$TIMESTAMP] [$PRIORITY] $MESSAGE" >> /var/log/security-alerts.log
# Send notification (configure as needed)
# Email
echo "$MESSAGE" | mail -s "Security Alert: $PRIORITY" [email protected]
# Syslog
logger -t security-alert -p security.err "$MESSAGE"
# Console notification
if [ "$PRIORITY" = "critical" ]; then
wall "SECURITY ALERT: $MESSAGE"
fi
EOF
chmod +x /usr/local/bin/security-alert
Network Monitoring
Step 1: Network Traffic Analysis
# Install network monitoring tools
apk add tcpdump tshark ngrep iptraf-ng darkstat
# Create network monitor script
cat > /usr/local/bin/network-monitor << 'EOF'
#!/bin/sh
# Network traffic monitoring
INTERFACE="eth0"
CAPTURE_DIR="/var/log/network-captures"
SUSPICIOUS_LOG="/var/log/suspicious-network.log"
mkdir -p "$CAPTURE_DIR"
# Capture suspicious traffic
tcpdump -i "$INTERFACE" -n \
'(tcp[13] & 2 != 0) or
(dst port 22 and tcp[13] & 18 = 2) or
(dst port 23 or dst port 2323 or dst port 5555) or
(udp port 53 and not src net 192.168.0.0/16)' \
-C 100 -W 10 \
-w "$CAPTURE_DIR/suspicious-%Y%m%d-%H%M%S.pcap" &
# Monitor for port scans
tcpdump -i "$INTERFACE" -n \
'tcp[tcpflags] & (tcp-syn) != 0 and
tcp[tcpflags] & (tcp-ack) = 0' | \
awk '{print $3}' | cut -d. -f1-4 | \
sort | uniq -c | sort -nr | \
awk '$1 > 20 {print "Potential port scan from " $2 " (" $1 " SYN packets)"}' \
>> "$SUSPICIOUS_LOG"
# Monitor for data exfiltration
watch_large_transfers() {
tcpdump -i "$INTERFACE" -n -q | \
awk '$NF ~ /length/ {
split($NF, a, " ");
if (a[2] > 10000) {
print strftime("%Y-%m-%d %H:%M:%S"), $0
}
}' >> "$CAPTURE_DIR/large-transfers.log"
}
watch_large_transfers &
EOF
chmod +x /usr/local/bin/network-monitor
Step 2: Connection Monitoring
# Create connection monitor
cat > /usr/local/bin/connection-monitor << 'EOF'
#!/usr/bin/env python3
import subprocess
import time
import json
from collections import defaultdict
def get_connections():
"""Get current network connections"""
cmd = "netstat -tupn 2>/dev/null | grep ESTABLISHED"
try:
output = subprocess.check_output(cmd, shell=True).decode()
connections = []
for line in output.strip().split('\n'):
if line:
parts = line.split()
if len(parts) >= 7:
connections.append({
'proto': parts[0],
'local': parts[3],
'remote': parts[4],
'state': parts[5],
'process': parts[6] if len(parts) > 6 else 'unknown'
})
return connections
except:
return []
def analyze_connections(connections):
"""Analyze connections for suspicious activity"""
alerts = []
# Count connections per remote IP
remote_counts = defaultdict(int)
for conn in connections:
remote_ip = conn['remote'].split(':')[0]
remote_counts[remote_ip] += 1
# Alert on too many connections from single IP
for ip, count in remote_counts.items():
if count > 10:
alerts.append(f"High connection count from {ip}: {count} connections")
# Check for suspicious ports
suspicious_ports = ['23', '135', '139', '445', '3389', '5900']
for conn in connections:
remote_port = conn['remote'].split(':')[-1]
if remote_port in suspicious_ports:
alerts.append(f"Connection to suspicious port {remote_port}: {conn['remote']}")
return alerts
def main():
"""Main monitoring loop"""
print("Starting connection monitor...")
while True:
connections = get_connections()
alerts = analyze_connections(connections)
if alerts:
print(f"\n=== Alerts at {time.strftime('%Y-%m-%d %H:%M:%S')} ===")
for alert in alerts:
print(f"[!] {alert}")
# Log to file
with open('/var/log/connection-alerts.log', 'a') as f:
f.write(f"{time.strftime('%Y-%m-%d %H:%M:%S')} - {alert}\n")
time.sleep(10)
if __name__ == "__main__":
main()
EOF
chmod +x /usr/local/bin/connection-monitor
Forensic Tools Installation
Step 1: Install Forensic Toolkit
# Install forensic analysis tools
apk add sleuthkit foremost ddrescue
apk add volatility yara john hashcat
apk add exiftool binwalk
# Install memory analysis tools
cd /opt
git clone https://github.com/volatilityfoundation/volatility3.git
cd volatility3
pip3 install -r requirements.txt
# Install additional forensic tools
pip3 install rekall oletools
Step 2: Create Forensic Analysis Scripts
# Create forensic collection script
cat > /usr/local/bin/collect-forensics << 'EOF'
#!/bin/sh
# Forensic evidence collection
CASE_ID="$1"
OUTPUT_DIR="/forensics/$CASE_ID"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
if [ -z "$CASE_ID" ]; then
echo "Usage: $0 <case-id>"
exit 1
fi
echo "Starting forensic collection for case: $CASE_ID"
mkdir -p "$OUTPUT_DIR"/{memory,disk,network,system}
# System information
echo "Collecting system information..."
uname -a > "$OUTPUT_DIR/system/uname.txt"
date > "$OUTPUT_DIR/system/date.txt"
uptime > "$OUTPUT_DIR/system/uptime.txt"
ps auxww > "$OUTPUT_DIR/system/processes.txt"
netstat -anp > "$OUTPUT_DIR/system/netstat.txt"
lsof -n > "$OUTPUT_DIR/system/lsof.txt"
w > "$OUTPUT_DIR/system/users.txt"
last -100 > "$OUTPUT_DIR/system/last.txt"
# Memory dump (if possible)
echo "Attempting memory dump..."
if [ -r /proc/kcore ]; then
dd if=/proc/kcore of="$OUTPUT_DIR/memory/kcore.dump" bs=1M
fi
# Network connections
echo "Collecting network data..."
ss -anp > "$OUTPUT_DIR/network/ss.txt"
iptables -L -n -v > "$OUTPUT_DIR/network/iptables.txt"
ip addr show > "$OUTPUT_DIR/network/ip_addr.txt"
ip route show > "$OUTPUT_DIR/network/ip_route.txt"
arp -an > "$OUTPUT_DIR/network/arp.txt"
# File system timeline
echo "Creating filesystem timeline..."
find / -type f -printf "%T@ %Tc %p\n" 2>/dev/null | \
sort -n > "$OUTPUT_DIR/disk/timeline.txt"
# Hash critical files
echo "Hashing critical files..."
find /bin /sbin /usr/bin /usr/sbin -type f -exec sha256sum {} \; \
> "$OUTPUT_DIR/system/binary_hashes.txt"
# Capture running processes memory
for pid in $(ps -eo pid --no-headers); do
if [ -r "/proc/$pid/maps" ] && [ -r "/proc/$pid/mem" ]; then
gcore -o "$OUTPUT_DIR/memory/process_$pid" "$pid" 2>/dev/null
fi
done
# Package archive
echo "Creating evidence archive..."
cd /forensics
tar -czf "case_${CASE_ID}_${TIMESTAMP}.tar.gz" "$CASE_ID/"
echo "Forensic collection complete: /forensics/case_${CASE_ID}_${TIMESTAMP}.tar.gz"
EOF
chmod +x /usr/local/bin/collect-forensics
Step 3: Memory Analysis Tools
# Create memory analysis script
cat > /usr/local/bin/analyze-memory << 'EOF'
#!/usr/bin/env python3
import os
import sys
import subprocess
def analyze_memory_dump(dump_file):
"""Analyze memory dump using Volatility"""
print(f"Analyzing memory dump: {dump_file}")
# Determine profile
print("Determining memory profile...")
cmd = f"volatility -f {dump_file} imageinfo"
subprocess.run(cmd, shell=True)
# Common analysis commands
analyses = [
("Process List", "pslist"),
("Network Connections", "netscan"),
("Command History", "cmdscan"),
("Hidden Processes", "psscan"),
("Loaded Modules", "modules"),
("Registry Hives", "hivelist"),
]
profile = input("Enter profile name: ")
for name, plugin in analyses:
print(f"\n=== {name} ===")
cmd = f"volatility -f {dump_file} --profile={profile} {plugin}"
subprocess.run(cmd, shell=True)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: analyze-memory <memory-dump>")
sys.exit(1)
analyze_memory_dump(sys.argv[1])
EOF
chmod +x /usr/local/bin/analyze-memory
Incident Response Procedures
Step 1: Create Incident Response Runbook
# Create IR runbook script
cat > /usr/local/bin/incident-response << 'EOF'
#!/bin/sh
# Incident Response Runbook
INCIDENT_ID=$(date +%Y%m%d%H%M%S)
IR_DIR="/incident-response/$INCIDENT_ID"
LOG_FILE="$IR_DIR/incident.log"
mkdir -p "$IR_DIR"
log_action() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"
}
echo "=== Incident Response Initiated ==="
echo "Incident ID: $INCIDENT_ID"
echo
# Step 1: Initial Assessment
log_action "Step 1: Initial Assessment"
echo "1. What type of incident is suspected?"
echo " [ ] Malware infection"
echo " [ ] Unauthorized access"
echo " [ ] Data breach"
echo " [ ] DoS/DDoS attack"
echo " [ ] Insider threat"
read -p "Enter incident type: " INCIDENT_TYPE
log_action "Incident type: $INCIDENT_TYPE"
# Step 2: Containment
log_action "Step 2: Containment Actions"
echo "2. Immediate containment actions:"
echo " [ ] Isolate affected systems"
echo " [ ] Disable compromised accounts"
echo " [ ] Block malicious IPs"
echo " [ ] Preserve evidence"
read -p "Isolate system from network? (y/n): " ISOLATE
if [ "$ISOLATE" = "y" ]; then
log_action "Isolating system from network"
iptables -I INPUT -j DROP
iptables -I OUTPUT -j DROP
iptables -I INPUT -s 127.0.0.1 -j ACCEPT
iptables -I OUTPUT -d 127.0.0.1 -j ACCEPT
fi
# Step 3: Evidence Collection
log_action "Step 3: Evidence Collection"
/usr/local/bin/collect-forensics "$INCIDENT_ID"
# Step 4: Analysis
log_action "Step 4: Initial Analysis"
/usr/local/bin/analyze-security-logs > "$IR_DIR/log-analysis.txt"
netstat -anp > "$IR_DIR/connections.txt"
ps auxww > "$IR_DIR/processes.txt"
# Step 5: Eradication
log_action "Step 5: Eradication Planning"
echo "5. Eradication actions needed:"
echo " [ ] Remove malware"
echo " [ ] Patch vulnerabilities"
echo " [ ] Reset credentials"
echo " [ ] Update security controls"
# Step 6: Recovery
log_action "Step 6: Recovery Planning"
echo "6. Recovery steps:"
echo " [ ] Restore from clean backups"
echo " [ ] Rebuild affected systems"
echo " [ ] Verify system integrity"
echo " [ ] Monitor for reinfection"
# Step 7: Lessons Learned
log_action "Step 7: Document lessons learned"
cat > "$IR_DIR/lessons-learned.md" << EOL
# Incident Response Report - $INCIDENT_ID
## Incident Type: $INCIDENT_TYPE
## Timeline
- Detection: $(date)
- Containment:
- Eradication:
- Recovery:
## Impact Assessment
- Systems affected:
- Data compromised:
- Service downtime:
## Root Cause
[To be determined]
## Remediation Actions
[List actions taken]
## Recommendations
[Future prevention measures]
EOL
echo
echo "Incident response checklist completed."
echo "Documentation saved to: $IR_DIR"
EOF
chmod +x /usr/local/bin/incident-response
Step 2: Automated Response Playbooks
# Create automated response for common incidents
cat > /usr/local/bin/auto-respond << 'EOF'
#!/usr/bin/env python3
import subprocess
import time
import sys
from datetime import datetime
class IncidentResponder:
def __init__(self):
self.log_file = f"/var/log/auto-response-{datetime.now().strftime('%Y%m%d')}.log"
def log(self, message):
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
log_entry = f"[{timestamp}] {message}"
print(log_entry)
with open(self.log_file, 'a') as f:
f.write(log_entry + '\n')
def block_ip(self, ip):
"""Block malicious IP address"""
self.log(f"Blocking IP: {ip}")
subprocess.run(f"iptables -A INPUT -s {ip} -j DROP", shell=True)
subprocess.run(f"iptables -A OUTPUT -d {ip} -j DROP", shell=True)
def disable_user(self, username):
"""Disable compromised user account"""
self.log(f"Disabling user: {username}")
subprocess.run(f"usermod -L {username}", shell=True)
subprocess.run(f"pkill -u {username}", shell=True)
def isolate_process(self, pid):
"""Isolate suspicious process"""
self.log(f"Isolating process: {pid}")
# Suspend process
subprocess.run(f"kill -STOP {pid}", shell=True)
# Dump process memory
subprocess.run(f"gcore -o /forensics/process_{pid} {pid}", shell=True)
def respond_to_brute_force(self, source_ip, username):
"""Respond to brute force attack"""
self.log(f"Responding to brute force from {source_ip} against {username}")
self.block_ip(source_ip)
if username != "root":
self.disable_user(username)
def respond_to_malware(self, file_path, process_id):
"""Respond to malware detection"""
self.log(f"Responding to malware: {file_path} (PID: {process_id})")
# Quarantine file
subprocess.run(f"mv {file_path} /quarantine/", shell=True)
# Kill process
subprocess.run(f"kill -9 {process_id}", shell=True)
# Scan system
subprocess.run("clamscan -r / --infected --remove", shell=True)
def respond_to_data_exfiltration(self, process_id, destination_ip):
"""Respond to data exfiltration attempt"""
self.log(f"Responding to data exfiltration to {destination_ip}")
self.block_ip(destination_ip)
self.isolate_process(process_id)
# Capture network traffic
subprocess.run(
f"tcpdump -i any -w /forensics/exfil_{process_id}.pcap host {destination_ip} &",
shell=True
)
def main():
responder = IncidentResponder()
if len(sys.argv) < 2:
print("Usage: auto-respond <incident-type> [parameters]")
print("Types: brute-force, malware, exfiltration")
sys.exit(1)
incident_type = sys.argv[1]
if incident_type == "brute-force" and len(sys.argv) >= 4:
responder.respond_to_brute_force(sys.argv[2], sys.argv[3])
elif incident_type == "malware" and len(sys.argv) >= 4:
responder.respond_to_malware(sys.argv[2], sys.argv[3])
elif incident_type == "exfiltration" and len(sys.argv) >= 4:
responder.respond_to_data_exfiltration(sys.argv[2], sys.argv[3])
else:
print("Invalid parameters")
if __name__ == "__main__":
main()
EOF
chmod +x /usr/local/bin/auto-respond
Automated Response Actions
Step 1: Fail2ban Configuration
# Install and configure Fail2ban
apk add fail2ban
# Configure Fail2ban
cat > /etc/fail2ban/jail.local << 'EOF'
[DEFAULT]
bantime = 3600
findtime = 600
maxretry = 3
destemail = [email protected]
sendername = Fail2ban
action = %(action_mwl)s
[sshd]
enabled = true
port = ssh
filter = sshd
logpath = /var/log/auth.log
maxretry = 3
bantime = 3600
[sshd-ddos]
enabled = true
port = ssh
filter = sshd-ddos
logpath = /var/log/auth.log
maxretry = 10
findtime = 60
bantime = 600
[apache-auth]
enabled = true
port = http,https
filter = apache-auth
logpath = /var/log/apache2/error.log
maxretry = 3
bantime = 3600
[apache-noscript]
enabled = true
port = http,https
filter = apache-noscript
logpath = /var/log/apache2/error.log
maxretry = 3
bantime = 3600
[port-scan]
enabled = true
filter = port-scan
action = iptables-allports[name=port-scan]
logpath = /var/log/messages
maxretry = 2
bantime = 7200
EOF
# Create custom port scan filter
cat > /etc/fail2ban/filter.d/port-scan.conf << 'EOF'
[Definition]
failregex = ^.* kernel: .* SRC=<HOST> .* SYN .*$
ignoreregex =
EOF
# Enable Fail2ban
rc-update add fail2ban default
rc-service fail2ban start
Step 2: Automated Threat Response
# Create threat response automation
cat > /usr/local/bin/threat-response-daemon << 'EOF'
#!/usr/bin/env python3
import time
import re
import subprocess
import threading
from collections import defaultdict
from datetime import datetime, timedelta
class ThreatResponseDaemon:
def __init__(self):
self.threat_scores = defaultdict(int)
self.blocked_ips = set()
self.incident_count = defaultdict(int)
def monitor_auth_log(self):
"""Monitor authentication logs for threats"""
cmd = "tail -F /var/log/auth.log"
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
for line in iter(process.stdout.readline, b''):
line = line.decode('utf-8').strip()
# Failed login attempt
if 'Failed password' in line:
match = re.search(r'from (\d+\.\d+\.\d+\.\d+)', line)
if match:
ip = match.group(1)
self.threat_scores[ip] += 10
self.check_threshold(ip)
# Invalid user attempt
if 'Invalid user' in line:
match = re.search(r'from (\d+\.\d+\.\d+\.\d+)', line)
if match:
ip = match.group(1)
self.threat_scores[ip] += 15
self.check_threshold(ip)
def monitor_network_traffic(self):
"""Monitor network traffic for anomalies"""
while True:
# Check for port scans
cmd = "netstat -an | grep SYN_RECV | awk '{print $5}' | cut -d: -f1 | sort | uniq -c | sort -rn"
output = subprocess.check_output(cmd, shell=True).decode()
for line in output.strip().split('\n'):
if line:
parts = line.strip().split()
if len(parts) == 2:
count = int(parts[0])
ip = parts[1]
if count > 20: # More than 20 SYN_RECV from same IP
self.threat_scores[ip] += 30
self.check_threshold(ip)
time.sleep(10)
def check_threshold(self, ip):
"""Check if IP threat score exceeds threshold"""
if ip in self.blocked_ips:
return
score = self.threat_scores[ip]
if score >= 50: # High threat
self.block_ip(ip, "High threat score")
self.create_incident("high_threat", ip)
elif score >= 30: # Medium threat
self.rate_limit_ip(ip)
self.create_incident("medium_threat", ip)
def block_ip(self, ip, reason):
"""Block IP address"""
print(f"BLOCKING {ip}: {reason}")
subprocess.run(f"iptables -A INPUT -s {ip} -j DROP", shell=True)
self.blocked_ips.add(ip)
# Log action
with open('/var/log/threat-response.log', 'a') as f:
f.write(f"{datetime.now()} - Blocked {ip}: {reason}\n")
def rate_limit_ip(self, ip):
"""Apply rate limiting to IP"""
print(f"RATE LIMITING {ip}")
cmd = f"iptables -A INPUT -s {ip} -m limit --limit 10/min --limit-burst 20 -j ACCEPT"
subprocess.run(cmd, shell=True)
subprocess.run(f"iptables -A INPUT -s {ip} -j DROP", shell=True)
def create_incident(self, incident_type, ip):
"""Create incident record"""
incident_id = f"{datetime.now().strftime('%Y%m%d%H%M%S')}_{incident_type}_{ip.replace('.', '_')}"
# Collect evidence
subprocess.run(f"/usr/local/bin/collect-forensics {incident_id}", shell=True)
# Send alert
message = f"Security Incident: {incident_type} from {ip}"
subprocess.run(f'/usr/local/bin/security-alert "{message}" critical', shell=True)
def cleanup_old_blocks(self):
"""Remove old IP blocks after 24 hours"""
while True:
time.sleep(3600) # Check every hour
# Implementation depends on tracking block times
pass
def run(self):
"""Run the daemon"""
print("Threat Response Daemon started...")
# Start monitoring threads
auth_thread = threading.Thread(target=self.monitor_auth_log)
auth_thread.daemon = True
auth_thread.start()
network_thread = threading.Thread(target=self.monitor_network_traffic)
network_thread.daemon = True
network_thread.start()
cleanup_thread = threading.Thread(target=self.cleanup_old_blocks)
cleanup_thread.daemon = True
cleanup_thread.start()
# Keep daemon running
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print("\nShutting down...")
if __name__ == "__main__":
daemon = ThreatResponseDaemon()
daemon.run()
EOF
chmod +x /usr/local/bin/threat-response-daemon
# Create systemd service (or OpenRC script for Alpine)
cat > /etc/init.d/threat-response << 'EOF'
#!/sbin/openrc-run
name="threat-response"
description="Automated Threat Response Daemon"
command="/usr/local/bin/threat-response-daemon"
command_background=true
pidfile="/var/run/${name}.pid"
output_log="/var/log/${name}.log"
error_log="/var/log/${name}.err"
depend() {
need net
after firewall
}
EOF
chmod +x /etc/init.d/threat-response
Evidence Collection
Step 1: Evidence Collection Framework
# Create comprehensive evidence collection
cat > /usr/local/bin/collect-evidence << 'EOF'
#!/bin/sh
# Comprehensive evidence collection
CASE_ID="$1"
EVIDENCE_DIR="/evidence/$CASE_ID"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
# Create evidence structure
mkdir -p "$EVIDENCE_DIR"/{live,disk,memory,network,logs,malware}
echo "Starting evidence collection for case: $CASE_ID"
# Live system data
collect_live_data() {
echo "Collecting live system data..."
# System info
date > "$EVIDENCE_DIR/live/collection_time.txt"
hostname > "$EVIDENCE_DIR/live/hostname.txt"
uname -a > "$EVIDENCE_DIR/live/uname.txt"
uptime > "$EVIDENCE_DIR/live/uptime.txt"
# Process information
ps auxww > "$EVIDENCE_DIR/live/ps_aux.txt"
ps -ejH > "$EVIDENCE_DIR/live/ps_tree.txt"
lsof -n > "$EVIDENCE_DIR/live/lsof.txt"
# Network state
netstat -anp > "$EVIDENCE_DIR/live/netstat.txt"
ss -anp > "$EVIDENCE_DIR/live/ss.txt"
iptables -L -n -v > "$EVIDENCE_DIR/live/iptables.txt"
# User activity
w > "$EVIDENCE_DIR/live/w.txt"
who -a > "$EVIDENCE_DIR/live/who.txt"
last -50 > "$EVIDENCE_DIR/live/last.txt"
lastlog > "$EVIDENCE_DIR/live/lastlog.txt"
# System resources
free -m > "$EVIDENCE_DIR/live/free.txt"
df -h > "$EVIDENCE_DIR/live/df.txt"
mount > "$EVIDENCE_DIR/live/mount.txt"
}
# Memory collection
collect_memory() {
echo "Collecting memory artifacts..."
# Attempt memory dump
if command -v LiME >/dev/null; then
insmod /path/to/lime.ko "path=$EVIDENCE_DIR/memory/memory.lime format=lime"
elif [ -r /proc/kcore ]; then
dd if=/proc/kcore of="$EVIDENCE_DIR/memory/kcore.dump" bs=1M
fi
# Process memory
for pid in $(ps -eo pid --no-headers); do
if [ -d "/proc/$pid" ]; then
# Process maps
cat "/proc/$pid/maps" > "$EVIDENCE_DIR/memory/proc_${pid}_maps.txt" 2>/dev/null
# Environment
cat "/proc/$pid/environ" | tr '\0' '\n' > "$EVIDENCE_DIR/memory/proc_${pid}_environ.txt" 2>/dev/null
# Command line
cat "/proc/$pid/cmdline" | tr '\0' ' ' > "$EVIDENCE_DIR/memory/proc_${pid}_cmdline.txt" 2>/dev/null
fi
done
}
# Disk artifacts
collect_disk_artifacts() {
echo "Collecting disk artifacts..."
# File timeline
find / -type f -printf "%T@ %Tc %p\n" 2>/dev/null | \
sort -rn | head -10000 > "$EVIDENCE_DIR/disk/recent_files.txt"
# Suspicious files
find / -type f \( -perm -4000 -o -perm -2000 \) -ls 2>/dev/null > "$EVIDENCE_DIR/disk/suid_files.txt"
find /tmp /var/tmp /dev/shm -type f -ls 2>/dev/null > "$EVIDENCE_DIR/disk/temp_files.txt"
# Hash executables
find /bin /sbin /usr/bin /usr/sbin -type f -exec sha256sum {} \; > "$EVIDENCE_DIR/disk/system_hashes.txt"
# Deleted files still in use
lsof -n | grep deleted > "$EVIDENCE_DIR/disk/deleted_inuse.txt"
}
# Network evidence
collect_network_evidence() {
echo "Collecting network evidence..."
# Capture current traffic
timeout 60 tcpdump -i any -s 0 -w "$EVIDENCE_DIR/network/capture_${TIMESTAMP}.pcap" &
# DNS cache
rndc dumpdb -cache 2>/dev/null
cp /var/named/data/cache_dump.db "$EVIDENCE_DIR/network/" 2>/dev/null
# ARP cache
arp -an > "$EVIDENCE_DIR/network/arp.txt"
# Routing table
ip route show > "$EVIDENCE_DIR/network/routes.txt"
# Connection tracking
conntrack -L > "$EVIDENCE_DIR/network/conntrack.txt" 2>/dev/null
}
# Log collection
collect_logs() {
echo "Collecting log files..."
# System logs
cp -r /var/log/* "$EVIDENCE_DIR/logs/" 2>/dev/null
# Journal logs (if systemd)
journalctl --since "7 days ago" > "$EVIDENCE_DIR/logs/journal.txt" 2>/dev/null
# Audit logs
cp -r /var/log/audit/* "$EVIDENCE_DIR/logs/" 2>/dev/null
}
# Main collection
collect_live_data
collect_memory
collect_disk_artifacts
collect_network_evidence
collect_logs
# Create evidence manifest
cat > "$EVIDENCE_DIR/manifest.txt" << EOL
Evidence Collection Manifest
Case ID: $CASE_ID
Collection Started: $TIMESTAMP
Collector: $(whoami)
System: $(hostname)
Files Collected:
$(find "$EVIDENCE_DIR" -type f | wc -l) files
$(du -sh "$EVIDENCE_DIR" | cut -f1) total size
Hash Verification:
$(find "$EVIDENCE_DIR" -type f -exec sha256sum {} \; > "$EVIDENCE_DIR/evidence_hashes.txt")
EOL
# Compress evidence
cd /evidence
tar -czf "evidence_${CASE_ID}_${TIMESTAMP}.tar.gz" "$CASE_ID/"
echo "Evidence collection complete: /evidence/evidence_${CASE_ID}_${TIMESTAMP}.tar.gz"
EOF
chmod +x /usr/local/bin/collect-evidence
Step 2: Chain of Custody
# Create chain of custody tracking
cat > /usr/local/bin/chain-of-custody << 'EOF'
#!/usr/bin/env python3
import hashlib
import json
import datetime
import os
import sys
class ChainOfCustody:
def __init__(self, case_id):
self.case_id = case_id
self.custody_file = f"/evidence/{case_id}/chain_of_custody.json"
self.entries = []
self.load()
def load(self):
"""Load existing chain of custody"""
if os.path.exists(self.custody_file):
with open(self.custody_file, 'r') as f:
self.entries = json.load(f)
def save(self):
"""Save chain of custody"""
os.makedirs(os.path.dirname(self.custody_file), exist_ok=True)
with open(self.custody_file, 'w') as f:
json.dump(self.entries, f, indent=2)
def add_entry(self, action, custodian, description, evidence_files=None):
"""Add custody entry"""
entry = {
'timestamp': datetime.datetime.now().isoformat(),
'action': action,
'custodian': custodian,
'description': description,
'evidence_files': evidence_files or [],
'hashes': {}
}
# Calculate hashes for evidence files
for file_path in entry['evidence_files']:
if os.path.exists(file_path):
with open(file_path, 'rb') as f:
entry['hashes'][file_path] = {
'sha256': hashlib.sha256(f.read()).hexdigest(),
'size': os.path.getsize(file_path)
}
self.entries.append(entry)
self.save()
print(f"Chain of custody updated: {action}")
def verify_integrity(self):
"""Verify evidence integrity"""
print("Verifying evidence integrity...")
for entry in self.entries:
for file_path, file_info in entry['hashes'].items():
if os.path.exists(file_path):
with open(file_path, 'rb') as f:
current_hash = hashlib.sha256(f.read()).hexdigest()
if current_hash == file_info['sha256']:
print(f"✓ {file_path}: Integrity verified")
else:
print(f"✗ {file_path}: INTEGRITY VIOLATION!")
else:
print(f"? {file_path}: File missing")
def generate_report(self):
"""Generate chain of custody report"""
report = f"Chain of Custody Report\nCase ID: {self.case_id}\n"
report += f"Generated: {datetime.datetime.now()}\n\n"
for i, entry in enumerate(self.entries):
report += f"Entry {i+1}:\n"
report += f" Time: {entry['timestamp']}\n"
report += f" Action: {entry['action']}\n"
report += f" Custodian: {entry['custodian']}\n"
report += f" Description: {entry['description']}\n"
report += f" Files: {len(entry['evidence_files'])}\n\n"
return report
def main():
if len(sys.argv) < 3:
print("Usage: chain-of-custody <case-id> <action> [options]")
print("Actions: create, add, verify, report")
sys.exit(1)
case_id = sys.argv[1]
action = sys.argv[2]
coc = ChainOfCustody(case_id)
if action == "create":
custodian = input("Custodian name: ")
description = input("Initial evidence description: ")
coc.add_entry("Evidence Created", custodian, description)
elif action == "add":
custodian = input("Custodian name: ")
description = input("Action description: ")
files = input("Evidence files (comma-separated): ").split(',')
coc.add_entry("Evidence Updated", custodian, description, files)
elif action == "verify":
coc.verify_integrity()
elif action == "report":
print(coc.generate_report())
if __name__ == "__main__":
main()
EOF
chmod +x /usr/local/bin/chain-of-custody
Threat Intelligence Integration
Step 1: Threat Intel Feeds
# Create threat intelligence collector
cat > /usr/local/bin/threat-intel-collector << 'EOF'
#!/usr/bin/env python3
import requests
import json
import sqlite3
import datetime
import ipaddress
class ThreatIntelCollector:
def __init__(self):
self.db_path = "/var/lib/threat-intel/threats.db"
self.init_db()
# Threat intelligence feeds
self.feeds = {
'abuse.ch': {
'url': 'https://sslbl.abuse.ch/blacklist/sslipblacklist.txt',
'type': 'ip'
},
'emerging-threats': {
'url': 'https://rules.emergingthreats.net/blockrules/compromised-ips.txt',
'type': 'ip'
},
'malware-domains': {
'url': 'http://mirror1.malwaredomains.com/files/justdomains',
'type': 'domain'
}
}
def init_db(self):
"""Initialize threat database"""
conn = sqlite3.connect(self.db_path)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS threats
(indicator TEXT PRIMARY KEY,
type TEXT,
source TEXT,
first_seen TEXT,
last_seen TEXT,
confidence INTEGER)''')
c.execute('''CREATE TABLE IF NOT EXISTS alerts
(id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp TEXT,
indicator TEXT,
event_type TEXT,
details TEXT)''')
conn.commit()
conn.close()
def update_feeds(self):
"""Update threat intelligence feeds"""
print("Updating threat intelligence feeds...")
for source, feed in self.feeds.items():
try:
print(f"Fetching {source}...")
response = requests.get(feed['url'], timeout=30)
if response.status_code == 200:
self.process_feed(source, feed['type'], response.text)
except Exception as e:
print(f"Error fetching {source}: {e}")
def process_feed(self, source, threat_type, data):
"""Process threat feed data"""
conn = sqlite3.connect(self.db_path)
c = conn.cursor()
count = 0
for line in data.strip().split('\n'):
line = line.strip()
if line and not line.startswith('#'):
# Clean indicator
indicator = line.split()[0]
# Validate indicator
if threat_type == 'ip':
try:
ipaddress.ip_address(indicator)
except ValueError:
continue
# Insert or update threat
c.execute('''INSERT OR REPLACE INTO threats
(indicator, type, source, first_seen, last_seen, confidence)
VALUES (?, ?, ?, ?, ?, ?)''',
(indicator, threat_type, source,
datetime.datetime.now().isoformat(),
datetime.datetime.now().isoformat(),
80))
count += 1
conn.commit()
conn.close()
print(f"Added {count} indicators from {source}")
def check_indicator(self, indicator):
"""Check if indicator is in threat database"""
conn = sqlite3.connect(self.db_path)
c = conn.cursor()
c.execute('SELECT * FROM threats WHERE indicator = ?', (indicator,))
result = c.fetchone()
conn.close()
return result
def log_alert(self, indicator, event_type, details):
"""Log threat alert"""
conn = sqlite3.connect(self.db_path)
c = conn.cursor()
c.execute('''INSERT INTO alerts (timestamp, indicator, event_type, details)
VALUES (?, ?, ?, ?)''',
(datetime.datetime.now().isoformat(), indicator, event_type, details))
conn.commit()
conn.close()
# Integration script
def main():
collector = ThreatIntelCollector()
# Update feeds daily
collector.update_feeds()
# Check live connections against threat intel
import subprocess
# Get current connections
cmd = "netstat -tn | grep ESTABLISHED | awk '{print $5}' | cut -d: -f1 | sort -u"
ips = subprocess.check_output(cmd, shell=True).decode().strip().split('\n')
for ip in ips:
if ip:
threat = collector.check_indicator(ip)
if threat:
print(f"THREAT DETECTED: {ip} - {threat}")
collector.log_alert(ip, 'connection', f'Active connection to known threat: {threat}')
# Take action
subprocess.run(f"iptables -A INPUT -s {ip} -j DROP", shell=True)
if __name__ == "__main__":
main()
EOF
chmod +x /usr/local/bin/threat-intel-collector
# Add to cron
echo "0 */6 * * * /usr/local/bin/threat-intel-collector" | crontab -
Step 2: MISP Integration
# Create MISP integration script
cat > /usr/local/bin/misp-integration << 'EOF'
#!/usr/bin/env python3
from pymisp import PyMISP
import json
import sys
class MISPIntegration:
def __init__(self, url, api_key):
self.misp = PyMISP(url, api_key, False)
def search_indicators(self, value):
"""Search for indicators in MISP"""
result = self.misp.search(value=value)
return result
def create_event(self, info, threat_level=2):
"""Create new MISP event"""
event = self.misp.new_event(
info=info,
threat_level_id=threat_level,
analysis=1,
distribution=0
)
return event
def add_indicator(self, event_id, indicator_type, value):
"""Add indicator to event"""
attribute = self.misp.add_attribute(
event_id,
{
'type': indicator_type,
'value': value,
'to_ids': True
}
)
return attribute
# Usage example
if __name__ == "__main__":
misp_url = "https://misp.example.com"
api_key = "your-api-key"
misp = MISPIntegration(misp_url, api_key)
# Search for indicator
results = misp.search_indicators("192.168.1.100")
print(json.dumps(results, indent=2))
EOF
chmod +x /usr/local/bin/misp-integration
Recovery Procedures
Step 1: System Recovery Playbook
# Create recovery playbook
cat > /usr/local/bin/system-recovery << 'EOF'
#!/bin/sh
# System Recovery Playbook
RECOVERY_LOG="/var/log/recovery-$(date +%Y%m%d_%H%M%S).log"
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$RECOVERY_LOG"
}
echo "=== System Recovery Procedure ==="
echo
# Step 1: Assess Damage
log "Step 1: Assessing system damage"
echo "Checking system integrity..."
# Check critical files
CRITICAL_FILES="/bin/bash /bin/sh /usr/bin/sudo /etc/passwd /etc/shadow"
for file in $CRITICAL_FILES; do
if [ ! -f "$file" ]; then
log "CRITICAL: Missing $file"
else
# Verify against known good hashes
if [ -f "/backup/hashes/$(basename $file).sha256" ]; then
current_hash=$(sha256sum "$file" | cut -d' ' -f1)
known_hash=$(cat "/backup/hashes/$(basename $file).sha256")
if [ "$current_hash" != "$known_hash" ]; then
log "WARNING: $file has been modified"
fi
fi
fi
done
# Step 2: Isolate Threats
log "Step 2: Isolating remaining threats"
# Kill suspicious processes
ps aux | grep -E '(nc|netcat|/tmp/|/dev/shm/)' | grep -v grep | awk '{print $2}' | xargs kill -9 2>/dev/null
# Remove suspicious files
find /tmp /var/tmp /dev/shm -type f -executable -delete
# Step 3: Restore System Files
log "Step 3: Restoring system files"
restore_from_backup() {
local file=$1
local backup="/backup/system/$file"
if [ -f "$backup" ]; then
cp "$backup" "$file"
log "Restored: $file"
else
log "ERROR: No backup found for $file"
fi
}
# Step 4: Reset Credentials
log "Step 4: Resetting all credentials"
# Force password changes
for user in $(getent passwd | awk -F: '$3 >= 1000 {print $1}'); do
chage -d 0 "$user"
log "Password reset required for: $user"
done
# Regenerate SSH host keys
rm -f /etc/ssh/ssh_host_*
ssh-keygen -A
log "SSH host keys regenerated"
# Step 5: Update Security Measures
log "Step 5: Updating security measures"
# Update system
apk update
apk upgrade
# Reinstall security tools
apk add --force-reinstall aide snort fail2ban
# Step 6: Verify Recovery
log "Step 6: Verifying system recovery"
# Run security audit
/usr/local/bin/security-audit > "$RECOVERY_LOG.audit"
# Check services
for service in sshd nginx postgresql; do
if rc-service "$service" status >/dev/null 2>&1; then
log "Service $service: Running"
else
log "Service $service: Not running"
fi
done
echo
echo "Recovery procedure completed. Review log: $RECOVERY_LOG"
EOF
chmod +x /usr/local/bin/system-recovery
Step 2: Data Recovery Tools
# Create data recovery script
cat > /usr/local/bin/data-recovery << 'EOF'
#!/bin/sh
# Data recovery procedures
RECOVERY_DIR="/recovery"
mkdir -p "$RECOVERY_DIR"
# Recover deleted files
recover_deleted_files() {
echo "Attempting to recover deleted files..."
# Use extundelete for ext filesystems
if command -v extundelete >/dev/null; then
for partition in $(mount | grep ext | awk '{print $1}'); do
echo "Scanning $partition..."
extundelete "$partition" --restore-all -o "$RECOVERY_DIR"
done
fi
# Use photorec for general recovery
if command -v photorec >/dev/null; then
photorec /d "$RECOVERY_DIR" /cmd /dev/sda fileopt,everything,enable
fi
}
# Recover from backups
recover_from_backup() {
BACKUP_SOURCE="$1"
RESTORE_PATH="$2"
echo "Restoring from backup: $BACKUP_SOURCE"
if [ -f "$BACKUP_SOURCE" ]; then
case "$BACKUP_SOURCE" in
*.tar.gz|*.tgz)
tar -xzf "$BACKUP_SOURCE" -C "$RESTORE_PATH"
;;
*.tar.bz2)
tar -xjf "$BACKUP_SOURCE" -C "$RESTORE_PATH"
;;
*.zip)
unzip "$BACKUP_SOURCE" -d "$RESTORE_PATH"
;;
*)
echo "Unknown backup format"
return 1
;;
esac
else
echo "Backup not found: $BACKUP_SOURCE"
return 1
fi
}
# Database recovery
recover_database() {
DB_TYPE="$1"
BACKUP_FILE="$2"
case "$DB_TYPE" in
postgresql)
sudo -u postgres psql < "$BACKUP_FILE"
;;
mysql)
mysql < "$BACKUP_FILE"
;;
*)
echo "Unknown database type: $DB_TYPE"
return 1
;;
esac
}
# Main menu
echo "Data Recovery Options:"
echo "1. Recover deleted files"
echo "2. Restore from backup"
echo "3. Database recovery"
echo "4. Full system restore"
read -p "Select option: " option
case $option in
1) recover_deleted_files ;;
2)
read -p "Backup file path: " backup
read -p "Restore to: " restore_path
recover_from_backup "$backup" "$restore_path"
;;
3)
read -p "Database type (postgresql/mysql): " db_type
read -p "Backup file: " backup_file
recover_database "$db_type" "$backup_file"
;;
4)
echo "Full system restore requires reboot"
read -p "Continue? (y/n): " confirm
if [ "$confirm" = "y" ]; then
/usr/local/bin/system-recovery
fi
;;
esac
EOF
chmod +x /usr/local/bin/data-recovery
Reporting and Documentation
Step 1: Incident Report Generator
# Create incident report generator
cat > /usr/local/bin/generate-incident-report << 'EOF'
#!/usr/bin/env python3
import os
import json
import datetime
from jinja2 import Template
class IncidentReportGenerator:
def __init__(self, incident_id):
self.incident_id = incident_id
self.incident_dir = f"/incident-response/{incident_id}"
self.data = self.collect_incident_data()
def collect_incident_data(self):
"""Collect all incident data"""
data = {
'incident_id': self.incident_id,
'report_date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'timeline': [],
'indicators': [],
'affected_systems': [],
'actions_taken': [],
'recommendations': []
}
# Load incident log
log_file = f"{self.incident_dir}/incident.log"
if os.path.exists(log_file):
with open(log_file, 'r') as f:
for line in f:
if line.strip():
timestamp, message = line.split('] ', 1)
data['timeline'].append({
'time': timestamp.strip('['),
'event': message.strip()
})
# Load analysis results
analysis_file = f"{self.incident_dir}/analysis.json"
if os.path.exists(analysis_file):
with open(analysis_file, 'r') as f:
analysis = json.load(f)
data.update(analysis)
return data
def generate_html_report(self):
"""Generate HTML report"""
template = Template('''
<!DOCTYPE html>
<html>
<head>
<title>Incident Report - {{ incident_id }}</title>
<style>
body { font-family: Arial, sans-serif; margin: 20px; }
h1, h2, h3 { color: #333; }
table { border-collapse: collapse; width: 100%; margin: 20px 0; }
th, td { border: 1px solid #ddd; padding: 8px; text-align: left; }
th { background-color: #f2f2f2; }
.critical { color: #d32f2f; }
.warning { color: #f57c00; }
.info { color: #0288d1; }
</style>
</head>
<body>
<h1>Security Incident Report</h1>
<p><strong>Incident ID:</strong> {{ incident_id }}</p>
<p><strong>Report Generated:</strong> {{ report_date }}</p>
<h2>Executive Summary</h2>
<p>{{ executive_summary }}</p>
<h2>Incident Timeline</h2>
<table>
<tr><th>Time</th><th>Event</th></tr>
{% for event in timeline %}
<tr><td>{{ event.time }}</td><td>{{ event.event }}</td></tr>
{% endfor %}
</table>
<h2>Indicators of Compromise</h2>
<ul>
{% for ioc in indicators %}
<li>{{ ioc.type }}: {{ ioc.value }}</li>
{% endfor %}
</ul>
<h2>Affected Systems</h2>
<ul>
{% for system in affected_systems %}
<li>{{ system }}</li>
{% endfor %}
</ul>
<h2>Actions Taken</h2>
<ol>
{% for action in actions_taken %}
<li>{{ action }}</li>
{% endfor %}
</ol>
<h2>Recommendations</h2>
<ol>
{% for rec in recommendations %}
<li>{{ rec }}</li>
{% endfor %}
</ol>
</body>
</html>
''')
html = template.render(**self.data)
report_file = f"{self.incident_dir}/incident_report.html"
with open(report_file, 'w') as f:
f.write(html)
print(f"Report generated: {report_file}")
return report_file
def generate_pdf_report(self):
"""Generate PDF report from HTML"""
html_file = self.generate_html_report()
pdf_file = html_file.replace('.html', '.pdf')
# Convert HTML to PDF using wkhtmltopdf
os.system(f"wkhtmltopdf {html_file} {pdf_file}")
print(f"PDF report generated: {pdf_file}")
return pdf_file
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print("Usage: generate-incident-report <incident-id>")
sys.exit(1)
generator = IncidentReportGenerator(sys.argv[1])
generator.generate_html_report()
generator.generate_pdf_report()
EOF
chmod +x /usr/local/bin/generate-incident-report
Step 2: Compliance Reporting
# Create compliance report generator
cat > /usr/local/bin/compliance-report << 'EOF'
#!/bin/sh
# Generate compliance reports
REPORT_DIR="/var/reports/compliance"
DATE=$(date +%Y%m%d)
REPORT_FILE="$REPORT_DIR/compliance_report_$DATE.txt"
mkdir -p "$REPORT_DIR"
echo "Security Compliance Report - $(date)" > "$REPORT_FILE"
echo "======================================" >> "$REPORT_FILE"
echo >> "$REPORT_FILE"
# Check security configurations
check_compliance() {
local check_name="$1"
local command="$2"
local expected="$3"
echo -n "Checking $check_name... " >> "$REPORT_FILE"
result=$(eval "$command" 2>/dev/null)
if [ "$result" = "$expected" ]; then
echo "PASS" >> "$REPORT_FILE"
return 0
else
echo "FAIL (Expected: $expected, Got: $result)" >> "$REPORT_FILE"
return 1
fi
}
# Security checks
echo "Security Configuration Checks:" >> "$REPORT_FILE"
echo "------------------------------" >> "$REPORT_FILE"
check_compliance "SSH Root Login Disabled" \
"grep '^PermitRootLogin' /etc/ssh/sshd_config | awk '{print \$2}'" \
"no"
check_compliance "Password Authentication Disabled" \
"grep '^PasswordAuthentication' /etc/ssh/sshd_config | awk '{print \$2}'" \
"no"
check_compliance "Firewall Enabled" \
"iptables -L | grep -c 'Chain INPUT'" \
"1"
check_compliance "SELinux/AppArmor Enabled" \
"aa-status --enabled 2>/dev/null && echo enabled || echo disabled" \
"enabled"
check_compliance "Automatic Updates Enabled" \
"grep -c 'APK::Periodic::Update-Package-Lists \"1\"' /etc/apk/apk.conf" \
"1"
# Log monitoring
echo >> "$REPORT_FILE"
echo "Log Monitoring Status:" >> "$REPORT_FILE"
echo "---------------------" >> "$REPORT_FILE"
for log in auth.log syslog audit.log; do
if [ -f "/var/log/$log" ]; then
size=$(du -h "/var/log/$log" | cut -f1)
echo "$log: Present ($size)" >> "$REPORT_FILE"
else
echo "$log: Missing" >> "$REPORT_FILE"
fi
done
# User audit
echo >> "$REPORT_FILE"
echo "User Account Audit:" >> "$REPORT_FILE"
echo "------------------" >> "$REPORT_FILE"
# Check for users with UID 0
echo "Users with UID 0:" >> "$REPORT_FILE"
awk -F: '$3==0 {print $1}' /etc/passwd >> "$REPORT_FILE"
# Check for users without passwords
echo >> "$REPORT_FILE"
echo "Users without passwords:" >> "$REPORT_FILE"
awk -F: '$2=="" {print $1}' /etc/shadow >> "$REPORT_FILE"
# Service audit
echo >> "$REPORT_FILE"
echo "Running Services:" >> "$REPORT_FILE"
echo "----------------" >> "$REPORT_FILE"
rc-status >> "$REPORT_FILE"
echo >> "$REPORT_FILE"
echo "Report generated: $REPORT_FILE"
# Send report
mail -s "Compliance Report - $DATE" [email protected] < "$REPORT_FILE"
EOF
chmod +x /usr/local/bin/compliance-report
# Add to cron
echo "0 8 * * 1 /usr/local/bin/compliance-report" | crontab -
Troubleshooting
Common Issues
- High false positive rate:
# Tune detection rules
vi /etc/snort/snort.conf
# Adjust threshold settings
# Update fail2ban filters
vi /etc/fail2ban/filter.d/custom.conf
# Add ignoreregex patterns
- Performance impact:
# Check resource usage
htop
iotop
# Adjust monitoring frequency
# Reduce log verbosity
- Log storage issues:
# Implement log rotation
cat >> /etc/logrotate.d/security << 'EOF'
/var/log/security/*.log {
daily
rotate 30
compress
delaycompress
missingok
notifempty
}
EOF
# Archive old logs
find /var/log -name "*.log.gz" -mtime +90 -delete
Debug Mode
# Enable debug logging
cat >> /etc/rsyslog.conf << 'EOF'
*.debug /var/log/debug.log
EOF
# Monitor in real-time
tail -f /var/log/debug.log | grep -i security
Best Practices
Security Incident Response Checklist
# Create IR checklist validator
cat > /usr/local/bin/ir-checklist << 'EOF'
#!/bin/sh
# Incident Response Checklist Validator
echo "=== Incident Response Readiness Check ==="
echo
# Check tools
echo "Required Tools:"
for tool in tcpdump aide snort fail2ban auditd; do
printf "%-20s: " "$tool"
if command -v "$tool" >/dev/null 2>&1; then
echo "✓ Installed"
else
echo "✗ Missing"
fi
done
echo
echo "Monitoring Status:"
# Check services
for service in auditd rsyslog fail2ban; do
printf "%-20s: " "$service"
if rc-service "$service" status >/dev/null 2>&1; then
echo "✓ Running"
else
echo "✗ Not running"
fi
done
echo
echo "Log Files:"
# Check logs
for log in /var/log/auth.log /var/log/audit/audit.log /var/log/security/alerts.log; do
printf "%-40s: " "$log"
if [ -f "$log" ]; then
echo "✓ Present"
else
echo "✗ Missing"
fi
done
echo
echo "Backup Status:"
# Check backups
if [ -d "/backup" ]; then
latest=$(ls -t /backup/*.tar.gz 2>/dev/null | head -1)
if [ -n "$latest" ]; then
age=$(( ($(date +%s) - $(stat -c %Y "$latest")) / 86400 ))
echo "Latest backup: $age days old"
else
echo "No backups found"
fi
fi
echo
echo "Documentation:"
# Check documentation
for doc in /incident-response/runbook.md /evidence/chain-of-custody-template.json; do
printf "%-50s: " "$doc"
if [ -f "$doc" ]; then
echo "✓ Present"
else
echo "✗ Missing"
fi
done
EOF
chmod +x /usr/local/bin/ir-checklist
Regular Testing
# Create incident response drill script
cat > /usr/local/bin/ir-drill << 'EOF'
#!/bin/sh
# Incident Response Drill
echo "=== Incident Response Drill ==="
echo "Simulating security incident..."
echo
# Create test indicators
TEST_IP="192.168.99.99"
TEST_FILE="/tmp/test-malware-$(date +%s)"
TEST_USER="testintruder"
# Simulate suspicious activity
echo "1. Simulating failed login attempts..."
for i in $(seq 1 5); do
logger -t sshd "Failed password for $TEST_USER from $TEST_IP port 22 ssh2"
done
echo "2. Creating suspicious file..."
touch "$TEST_FILE"
chmod +x "$TEST_FILE"
echo "3. Simulating network scan..."
logger -t kernel "possible SYN flooding on port 80. Sending cookies."
echo
echo "Waiting for detection systems to respond..."
sleep 30
echo
echo "Checking detection results:"
echo "- Fail2ban: $(fail2ban-client status sshd | grep $TEST_IP)"
echo "- Audit log: $(ausearch -i -a $(date +%s) 2>/dev/null | grep -c $TEST_FILE)"
echo "- Alert log: $(grep -c $TEST_IP /var/log/security/alerts.log 2>/dev/null)"
# Cleanup
rm -f "$TEST_FILE"
fail2ban-client unban $TEST_IP 2>/dev/null
echo
echo "Drill completed. Review detection and response effectiveness."
EOF
chmod +x /usr/local/bin/ir-drill
Conclusion
You’ve successfully built a comprehensive security incident response system on Alpine Linux. This infrastructure provides detection capabilities, automated response mechanisms, forensic tools, and detailed procedures for handling security incidents. Your system can now detect threats in real-time, collect evidence properly, and respond to incidents systematically.
Regular testing, updating threat intelligence, and refining response procedures will ensure your incident response capabilities remain effective. Remember that incident response is an ongoing process that requires continuous improvement based on lessons learned from each incident.