From 131e1f5a61599692e621ac883def2ef2b786a712 Mon Sep 17 00:00:00 2001 From: Phillip Tarrant Date: Tue, 18 Nov 2025 13:10:53 -0600 Subject: [PATCH] adding phase 5 init framework, added deployment ease scripts --- .env.example | 15 +- .gitignore | 5 + Dockerfile | 3 +- README.md | 34 +- app/docker-entrypoint.sh | 80 +++ app/init_db.py | 118 ++++- .../004_add_alert_rule_enhancements.py | 120 +++++ app/web/api/alerts.py | 396 ++++++++++++-- app/web/jobs/scan_job.py | 12 + app/web/models.py | 78 ++- app/web/routes/main.py | 102 ++++ app/web/services/alert_service.py | 490 ++++++++++++++++++ app/web/templates/alert_rules.html | 474 +++++++++++++++++ app/web/templates/alerts.html | 269 ++++++++++ app/web/templates/base.html | 10 + app/web/utils/pagination.py | 40 ++ docker-compose.yml | 9 +- docs/DEPLOYMENT.md | 135 ++++- setup.sh | 150 ++++++ 19 files changed, 2458 insertions(+), 82 deletions(-) create mode 100644 app/docker-entrypoint.sh create mode 100644 app/migrations/versions/004_add_alert_rule_enhancements.py create mode 100644 app/web/services/alert_service.py create mode 100644 app/web/templates/alert_rules.html create mode 100644 app/web/templates/alerts.html create mode 100755 setup.sh diff --git a/.env.example b/.env.example index fd7685b..e2fdf42 100644 --- a/.env.example +++ b/.env.example @@ -28,9 +28,10 @@ DATABASE_URL=sqlite:////app/data/sneakyscanner.db SECRET_KEY=your-secret-key-here-change-in-production # SNEAKYSCANNER_ENCRYPTION_KEY: Used for encrypting sensitive settings in database -# IMPORTANT: Change this to a random string in production! +# IMPORTANT: Must be a valid Fernet key (32 url-safe base64-encoded bytes) # Generate with: python3 -c "from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())" -SNEAKYSCANNER_ENCRYPTION_KEY=your-encryption-key-here +# Example: N3RhbGx5VmFsaWRGZXJuZXRLZXlIZXJlMTIzNDU2Nzg5MA== +SNEAKYSCANNER_ENCRYPTION_KEY= # ================================ # CORS Configuration @@ -57,8 +58,10 @@ SCHEDULER_EXECUTORS=2 SCHEDULER_JOB_DEFAULTS_MAX_INSTANCES=3 # ================================ -# Optional: Application Password +# Initial Password (First Run) # ================================ -# If you want to set the application password via environment variable -# Otherwise, set it via init_db.py --password -# APP_PASSWORD=your-password-here +# Password used for database initialization on first run +# This will be set as the application login password +# Leave blank to auto-generate a random password (saved to ./logs/admin_password.txt) +# IMPORTANT: Change this after first login! +INITIAL_PASSWORD= diff --git a/.gitignore b/.gitignore index 0bf1e1e..f97d470 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,11 @@ output/ data/ logs/ +# Environment and secrets +.env +admin_password.txt +logs/admin_password.txt + # Python __pycache__/ *.py[cod] diff --git a/Dockerfile b/Dockerfile index 3ed299d..d2d22db 100644 --- a/Dockerfile +++ b/Dockerfile @@ -39,12 +39,13 @@ COPY app/web/ ./web/ COPY app/migrations/ ./migrations/ COPY app/alembic.ini . COPY app/init_db.py . +COPY app/docker-entrypoint.sh /docker-entrypoint.sh # Create required directories RUN mkdir -p /app/output /app/logs # Make scripts executable -RUN chmod +x /app/src/scanner.py /app/init_db.py +RUN chmod +x /app/src/scanner.py /app/init_db.py /docker-entrypoint.sh # Force Python unbuffered output ENV PYTHONUNBUFFERED=1 diff --git a/README.md b/README.md index f3116db..7b32ce7 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,28 @@ A comprehensive network scanning and infrastructure monitoring platform with web ### Web Application (Recommended) +**Easy Setup (One Command):** + +```bash +# 1. Clone repository +git clone +cd SneakyScan + +# 2. Run setup script +./setup.sh + +# 3. Access web interface at http://localhost:5000 +``` + +The setup script will: +- Generate secure keys automatically +- Create required directories +- Build and start the Docker containers +- Initialize the database on first run +- Display your login credentials + +**Manual Setup (Alternative):** + ```bash # 1. Clone repository git clone @@ -35,16 +57,12 @@ cd SneakyScan # 2. Configure environment cp .env.example .env -# Edit .env and set SECRET_KEY and SNEAKYSCANNER_ENCRYPTION_KEY +# Edit .env and set SECRET_KEY, SNEAKYSCANNER_ENCRYPTION_KEY, and INITIAL_PASSWORD -# 3. Build and start -docker compose build -docker compose up -d +# 3. Build and start (database auto-initializes on first run) +docker compose up --build -d -# 4. Initialize database -docker compose run --rm init-db --password "YourSecurePassword" - -# 5. Access web interface +# 4. Access web interface # Open http://localhost:5000 ``` diff --git a/app/docker-entrypoint.sh b/app/docker-entrypoint.sh new file mode 100644 index 0000000..3735333 --- /dev/null +++ b/app/docker-entrypoint.sh @@ -0,0 +1,80 @@ +#!/bin/bash +set -e + +# SneakyScanner Docker Entrypoint Script +# This script ensures the database is initialized before starting the Flask app + +DB_PATH="${DATABASE_URL#sqlite:///}" # Extract path from sqlite:////app/data/sneakyscanner.db +DB_DIR=$(dirname "$DB_PATH") +INIT_MARKER="$DB_DIR/.db_initialized" +PASSWORD_FILE="/app/logs/admin_password.txt" # Save to logs dir (mounted, no permission issues) + +echo "=== SneakyScanner Startup ===" +echo "Database path: $DB_PATH" +echo "Database directory: $DB_DIR" + +# Ensure database directory exists +mkdir -p "$DB_DIR" + +# Check if this is the first run (database doesn't exist or not initialized) +if [ ! -f "$DB_PATH" ] || [ ! -f "$INIT_MARKER" ]; then + echo "" + echo "=== First Run Detected ===" + echo "Initializing database..." + + # Set default password from environment or generate a random one + if [ -z "$INITIAL_PASSWORD" ]; then + echo "INITIAL_PASSWORD not set, generating random password..." + # Generate a 32-character alphanumeric password + INITIAL_PASSWORD=$(cat /dev/urandom | tr -dc 'A-Za-z0-9' | head -c 32) + # Ensure logs directory exists + mkdir -p /app/logs + echo "$INITIAL_PASSWORD" > "$PASSWORD_FILE" + echo "✓ Random password generated and saved to: ./logs/admin_password.txt" + SAVE_PASSWORD_MESSAGE=true + fi + + # Run database initialization + python3 /app/init_db.py \ + --db-url "$DATABASE_URL" \ + --password "$INITIAL_PASSWORD" \ + --no-migrations \ + --force + + # Create marker file to indicate successful initialization + if [ $? -eq 0 ]; then + touch "$INIT_MARKER" + echo "✓ Database initialized successfully" + echo "" + echo "=== IMPORTANT ===" + if [ "$SAVE_PASSWORD_MESSAGE" = "true" ]; then + echo "Login password saved to: ./logs/admin_password.txt" + echo "Password: $INITIAL_PASSWORD" + else + echo "Login password: $INITIAL_PASSWORD" + fi + echo "Please change this password after logging in!" + echo "==================" + echo "" + else + echo "✗ Database initialization failed!" + exit 1 + fi +else + echo "Database already initialized, skipping init..." +fi + +# Apply any pending migrations (if using migrations in future) +if [ -f "/app/alembic.ini" ]; then + echo "Checking for pending migrations..." + # Uncomment when ready to use migrations: + # alembic upgrade head +fi + +echo "" +echo "=== Starting Flask Application ===" +echo "Flask will be available at http://localhost:5000" +echo "" + +# Execute the main application +exec "$@" diff --git a/app/init_db.py b/app/init_db.py index 423695c..12650d2 100755 --- a/app/init_db.py +++ b/app/init_db.py @@ -23,11 +23,112 @@ from alembic import command from alembic.config import Config from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker +from datetime import datetime, timezone -from web.models import Base +from web.models import Base, AlertRule from web.utils.settings import PasswordManager, SettingsManager +def init_default_alert_rules(session): + """ + Create default alert rules for Phase 5. + + Args: + session: Database session + """ + print("Initializing default alert rules...") + + # Check if alert rules already exist + existing_rules = session.query(AlertRule).count() + if existing_rules > 0: + print(f" Alert rules already exist ({existing_rules} rules), skipping...") + return + + default_rules = [ + { + 'name': 'Unexpected Port Detection', + 'rule_type': 'unexpected_port', + 'enabled': True, + 'threshold': None, + 'email_enabled': False, + 'webhook_enabled': False, + 'severity': 'warning', + 'filter_conditions': None, + 'config_file': None + }, + { + 'name': 'Drift Detection', + 'rule_type': 'drift_detection', + 'enabled': True, + 'threshold': None, # No threshold means alert on any drift + 'email_enabled': False, + 'webhook_enabled': False, + 'severity': 'info', + 'filter_conditions': None, + 'config_file': None + }, + { + 'name': 'Certificate Expiry Warning', + 'rule_type': 'cert_expiry', + 'enabled': True, + 'threshold': 30, # Alert when certs expire in 30 days + 'email_enabled': False, + 'webhook_enabled': False, + 'severity': 'warning', + 'filter_conditions': None, + 'config_file': None + }, + { + 'name': 'Weak TLS Detection', + 'rule_type': 'weak_tls', + 'enabled': True, + 'threshold': None, + 'email_enabled': False, + 'webhook_enabled': False, + 'severity': 'warning', + 'filter_conditions': None, + 'config_file': None + }, + { + 'name': 'Host Down Detection', + 'rule_type': 'ping_failed', + 'enabled': True, + 'threshold': None, + 'email_enabled': False, + 'webhook_enabled': False, + 'severity': 'critical', + 'filter_conditions': None, + 'config_file': None + } + ] + + try: + for rule_data in default_rules: + rule = AlertRule( + name=rule_data['name'], + rule_type=rule_data['rule_type'], + enabled=rule_data['enabled'], + threshold=rule_data['threshold'], + email_enabled=rule_data['email_enabled'], + webhook_enabled=rule_data['webhook_enabled'], + severity=rule_data['severity'], + filter_conditions=rule_data['filter_conditions'], + config_file=rule_data['config_file'], + created_at=datetime.now(timezone.utc), + updated_at=datetime.now(timezone.utc) + ) + session.add(rule) + print(f" ✓ Created rule: {rule.name}") + + session.commit() + print(f"✓ Created {len(default_rules)} default alert rules") + + except Exception as e: + print(f"✗ Failed to create default alert rules: {e}") + session.rollback() + raise + + def init_database(db_url: str = "sqlite:///./sneakyscanner.db", run_migrations: bool = True): """ Initialize the database schema and settings. @@ -78,6 +179,10 @@ def init_database(db_url: str = "sqlite:///./sneakyscanner.db", run_migrations: settings_manager = SettingsManager(session) settings_manager.init_defaults() print("✓ Default settings initialized") + + # Initialize default alert rules + init_default_alert_rules(session) + except Exception as e: print(f"✗ Failed to initialize settings: {e}") session.rollback() @@ -164,6 +269,9 @@ Examples: # Use custom database URL python3 init_db.py --db-url postgresql://user:pass@localhost/sneakyscanner + # Force initialization without prompting (for Docker/scripts) + python3 init_db.py --force --password mysecret + # Verify existing database python3 init_db.py --verify-only """ @@ -192,6 +300,12 @@ Examples: help='Create tables directly instead of using migrations' ) + parser.add_argument( + '--force', + action='store_true', + help='Force initialization without prompting (for non-interactive environments)' + ) + args = parser.parse_args() # Check if database already exists @@ -200,7 +314,7 @@ Examples: db_path = args.db_url.replace('sqlite:///', '') db_exists = Path(db_path).exists() - if db_exists and not args.verify_only: + if db_exists and not args.verify_only and not args.force: response = input(f"\nDatabase already exists at {db_path}. Reinitialize? (y/N): ") if response.lower() != 'y': print("Aborting.") diff --git a/app/migrations/versions/004_add_alert_rule_enhancements.py b/app/migrations/versions/004_add_alert_rule_enhancements.py new file mode 100644 index 0000000..52f31c6 --- /dev/null +++ b/app/migrations/versions/004_add_alert_rule_enhancements.py @@ -0,0 +1,120 @@ +"""Add enhanced alert features for Phase 5 + +Revision ID: 004 +Revises: 003 +Create Date: 2025-11-18 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic +revision = '004' +down_revision = '003' +branch_labels = None +depends_on = None + + +def upgrade(): + """ + Add enhancements for Phase 5 Alert Rule Engine: + - Enhanced alert_rules fields + - Enhanced alerts fields + - New webhooks table + - New webhook_delivery_log table + """ + + # Enhance alert_rules table + with op.batch_alter_table('alert_rules') as batch_op: + batch_op.add_column(sa.Column('name', sa.String(255), nullable=True, comment='User-friendly rule name')) + batch_op.add_column(sa.Column('webhook_enabled', sa.Boolean(), nullable=False, server_default='0', comment='Whether to send webhooks for this rule')) + batch_op.add_column(sa.Column('severity', sa.String(20), nullable=True, comment='Alert severity level (critical, warning, info)')) + batch_op.add_column(sa.Column('filter_conditions', sa.Text(), nullable=True, comment='JSON filter conditions for the rule')) + batch_op.add_column(sa.Column('config_file', sa.String(255), nullable=True, comment='Optional: specific config file this rule applies to')) + batch_op.add_column(sa.Column('updated_at', sa.DateTime(), nullable=True, comment='Last update timestamp')) + + # Enhance alerts table + with op.batch_alter_table('alerts') as batch_op: + batch_op.add_column(sa.Column('rule_id', sa.Integer(), nullable=True, comment='Associated alert rule')) + batch_op.add_column(sa.Column('webhook_sent', sa.Boolean(), nullable=False, server_default='0', comment='Whether webhook was sent')) + batch_op.add_column(sa.Column('webhook_sent_at', sa.DateTime(), nullable=True, comment='When webhook was sent')) + batch_op.add_column(sa.Column('acknowledged', sa.Boolean(), nullable=False, server_default='0', comment='Whether alert was acknowledged')) + batch_op.add_column(sa.Column('acknowledged_at', sa.DateTime(), nullable=True, comment='When alert was acknowledged')) + batch_op.add_column(sa.Column('acknowledged_by', sa.String(255), nullable=True, comment='User who acknowledged the alert')) + batch_op.create_foreign_key('fk_alerts_rule_id', 'alert_rules', ['rule_id'], ['id']) + batch_op.create_index('idx_alerts_rule_id', ['rule_id']) + batch_op.create_index('idx_alerts_acknowledged', ['acknowledged']) + + # Create webhooks table + op.create_table('webhooks', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('name', sa.String(255), nullable=False, comment='Webhook name'), + sa.Column('url', sa.Text(), nullable=False, comment='Webhook URL'), + sa.Column('enabled', sa.Boolean(), nullable=False, server_default='1', comment='Whether webhook is enabled'), + sa.Column('auth_type', sa.String(20), nullable=True, comment='Authentication type: none, bearer, basic, custom'), + sa.Column('auth_token', sa.Text(), nullable=True, comment='Encrypted authentication token'), + sa.Column('custom_headers', sa.Text(), nullable=True, comment='JSON custom headers'), + sa.Column('alert_types', sa.Text(), nullable=True, comment='JSON array of alert types to trigger on'), + sa.Column('severity_filter', sa.Text(), nullable=True, comment='JSON array of severities to trigger on'), + sa.Column('timeout', sa.Integer(), nullable=True, server_default='10', comment='Request timeout in seconds'), + sa.Column('retry_count', sa.Integer(), nullable=True, server_default='3', comment='Number of retry attempts'), + sa.Column('created_at', sa.DateTime(), nullable=False), + sa.Column('updated_at', sa.DateTime(), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + + # Create webhook_delivery_log table + op.create_table('webhook_delivery_log', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('webhook_id', sa.Integer(), nullable=False, comment='Associated webhook'), + sa.Column('alert_id', sa.Integer(), nullable=False, comment='Associated alert'), + sa.Column('status', sa.String(20), nullable=True, comment='Delivery status: success, failed, retrying'), + sa.Column('response_code', sa.Integer(), nullable=True, comment='HTTP response code'), + sa.Column('response_body', sa.Text(), nullable=True, comment='Response body from webhook'), + sa.Column('error_message', sa.Text(), nullable=True, comment='Error message if failed'), + sa.Column('attempt_number', sa.Integer(), nullable=True, comment='Which attempt this was'), + sa.Column('delivered_at', sa.DateTime(), nullable=False, comment='Delivery timestamp'), + sa.ForeignKeyConstraint(['webhook_id'], ['webhooks.id'], ), + sa.ForeignKeyConstraint(['alert_id'], ['alerts.id'], ), + sa.PrimaryKeyConstraint('id') + ) + + # Create indexes for webhook_delivery_log + op.create_index('idx_webhook_delivery_alert_id', 'webhook_delivery_log', ['alert_id']) + op.create_index('idx_webhook_delivery_webhook_id', 'webhook_delivery_log', ['webhook_id']) + op.create_index('idx_webhook_delivery_status', 'webhook_delivery_log', ['status']) + + +def downgrade(): + """Remove Phase 5 alert enhancements.""" + + # Drop webhook_delivery_log table and its indexes + op.drop_index('idx_webhook_delivery_status', table_name='webhook_delivery_log') + op.drop_index('idx_webhook_delivery_webhook_id', table_name='webhook_delivery_log') + op.drop_index('idx_webhook_delivery_alert_id', table_name='webhook_delivery_log') + op.drop_table('webhook_delivery_log') + + # Drop webhooks table + op.drop_table('webhooks') + + # Remove enhancements from alerts table + with op.batch_alter_table('alerts') as batch_op: + batch_op.drop_index('idx_alerts_acknowledged') + batch_op.drop_index('idx_alerts_rule_id') + batch_op.drop_constraint('fk_alerts_rule_id', type_='foreignkey') + batch_op.drop_column('acknowledged_by') + batch_op.drop_column('acknowledged_at') + batch_op.drop_column('acknowledged') + batch_op.drop_column('webhook_sent_at') + batch_op.drop_column('webhook_sent') + batch_op.drop_column('rule_id') + + # Remove enhancements from alert_rules table + with op.batch_alter_table('alert_rules') as batch_op: + batch_op.drop_column('updated_at') + batch_op.drop_column('config_file') + batch_op.drop_column('filter_conditions') + batch_op.drop_column('severity') + batch_op.drop_column('webhook_enabled') + batch_op.drop_column('name') \ No newline at end of file diff --git a/app/web/api/alerts.py b/app/web/api/alerts.py index a53e23f..2ad23eb 100644 --- a/app/web/api/alerts.py +++ b/app/web/api/alerts.py @@ -4,9 +4,13 @@ Alerts API blueprint. Handles endpoints for viewing alert history and managing alert rules. """ -from flask import Blueprint, jsonify, request +import json +from datetime import datetime, timedelta, timezone +from flask import Blueprint, jsonify, request, current_app from web.auth.decorators import api_auth_required +from web.models import Alert, AlertRule, Scan +from web.services.alert_service import AlertService bp = Blueprint('alerts', __name__) @@ -22,22 +26,126 @@ def list_alerts(): per_page: Items per page (default: 20) alert_type: Filter by alert type severity: Filter by severity (info, warning, critical) - start_date: Filter alerts after this date - end_date: Filter alerts before this date + acknowledged: Filter by acknowledgment status (true/false) + scan_id: Filter by specific scan + start_date: Filter alerts after this date (ISO format) + end_date: Filter alerts before this date (ISO format) Returns: JSON response with alerts list """ - # TODO: Implement in Phase 4 + # Get query parameters + page = request.args.get('page', 1, type=int) + per_page = min(request.args.get('per_page', 20, type=int), 100) # Max 100 items + alert_type = request.args.get('alert_type') + severity = request.args.get('severity') + acknowledged = request.args.get('acknowledged') + scan_id = request.args.get('scan_id', type=int) + start_date = request.args.get('start_date') + end_date = request.args.get('end_date') + + # Build query + query = current_app.db_session.query(Alert) + + # Apply filters + if alert_type: + query = query.filter(Alert.alert_type == alert_type) + if severity: + query = query.filter(Alert.severity == severity) + if acknowledged is not None: + ack_bool = acknowledged.lower() == 'true' + query = query.filter(Alert.acknowledged == ack_bool) + if scan_id: + query = query.filter(Alert.scan_id == scan_id) + if start_date: + try: + start_dt = datetime.fromisoformat(start_date.replace('Z', '+00:00')) + query = query.filter(Alert.created_at >= start_dt) + except ValueError: + pass # Ignore invalid date format + if end_date: + try: + end_dt = datetime.fromisoformat(end_date.replace('Z', '+00:00')) + query = query.filter(Alert.created_at <= end_dt) + except ValueError: + pass # Ignore invalid date format + + # Order by severity and date + query = query.order_by( + Alert.severity.desc(), # Critical first, then warning, then info + Alert.created_at.desc() # Most recent first + ) + + # Paginate + total = query.count() + alerts = query.offset((page - 1) * per_page).limit(per_page).all() + + # Format response + alerts_data = [] + for alert in alerts: + # Get scan info + scan = current_app.db_session.query(Scan).filter(Scan.id == alert.scan_id).first() + + alerts_data.append({ + 'id': alert.id, + 'scan_id': alert.scan_id, + 'scan_title': scan.title if scan else None, + 'rule_id': alert.rule_id, + 'alert_type': alert.alert_type, + 'severity': alert.severity, + 'message': alert.message, + 'ip_address': alert.ip_address, + 'port': alert.port, + 'acknowledged': alert.acknowledged, + 'acknowledged_at': alert.acknowledged_at.isoformat() if alert.acknowledged_at else None, + 'acknowledged_by': alert.acknowledged_by, + 'email_sent': alert.email_sent, + 'email_sent_at': alert.email_sent_at.isoformat() if alert.email_sent_at else None, + 'webhook_sent': alert.webhook_sent, + 'webhook_sent_at': alert.webhook_sent_at.isoformat() if alert.webhook_sent_at else None, + 'created_at': alert.created_at.isoformat() + }) + return jsonify({ - 'alerts': [], - 'total': 0, - 'page': 1, - 'per_page': 20, - 'message': 'Alerts list endpoint - to be implemented in Phase 4' + 'alerts': alerts_data, + 'total': total, + 'page': page, + 'per_page': per_page, + 'pages': (total + per_page - 1) // per_page # Ceiling division }) +@bp.route('//acknowledge', methods=['POST']) +@api_auth_required +def acknowledge_alert(alert_id): + """ + Acknowledge an alert. + + Args: + alert_id: Alert ID to acknowledge + + Returns: + JSON response with acknowledgment status + """ + # Get username from auth context or default to 'api' + acknowledged_by = request.json.get('acknowledged_by', 'api') if request.json else 'api' + + alert_service = AlertService(current_app.db_session) + success = alert_service.acknowledge_alert(alert_id, acknowledged_by) + + if success: + return jsonify({ + 'status': 'success', + 'message': f'Alert {alert_id} acknowledged', + 'acknowledged_by': acknowledged_by + }) + else: + return jsonify({ + 'status': 'error', + 'message': f'Failed to acknowledge alert {alert_id}' + }), 400 + + @bp.route('/rules', methods=['GET']) @api_auth_required def list_alert_rules(): @@ -47,10 +155,28 @@ def list_alert_rules(): Returns: JSON response with alert rules """ - # TODO: Implement in Phase 4 + rules = current_app.db_session.query(AlertRule).order_by(AlertRule.name, AlertRule.rule_type).all() + + rules_data = [] + for rule in rules: + rules_data.append({ + 'id': rule.id, + 'name': rule.name, + 'rule_type': rule.rule_type, + 'enabled': rule.enabled, + 'threshold': rule.threshold, + 'email_enabled': rule.email_enabled, + 'webhook_enabled': rule.webhook_enabled, + 'severity': rule.severity, + 'filter_conditions': json.loads(rule.filter_conditions) if rule.filter_conditions else None, + 'config_file': rule.config_file, + 'created_at': rule.created_at.isoformat(), + 'updated_at': rule.updated_at.isoformat() if rule.updated_at else None + }) + return jsonify({ - 'rules': [], - 'message': 'Alert rules list endpoint - to be implemented in Phase 4' + 'rules': rules_data, + 'total': len(rules_data) }) @@ -61,23 +187,88 @@ def create_alert_rule(): Create a new alert rule. Request body: - rule_type: Type of alert rule - threshold: Threshold value (e.g., days for cert expiry) + name: User-friendly rule name + rule_type: Type of alert rule (unexpected_port, drift_detection, cert_expiry, weak_tls, ping_failed) + threshold: Threshold value (e.g., days for cert expiry, percentage for drift) enabled: Whether rule is active (default: true) email_enabled: Send email for this rule (default: false) + webhook_enabled: Send webhook for this rule (default: false) + severity: Alert severity (critical, warning, info) + filter_conditions: JSON object with filter conditions + config_file: Optional config file to apply rule to Returns: - JSON response with created rule ID + JSON response with created rule """ - # TODO: Implement in Phase 4 data = request.get_json() or {} - return jsonify({ - 'rule_id': None, - 'status': 'not_implemented', - 'message': 'Alert rule creation endpoint - to be implemented in Phase 4', - 'data': data - }), 501 + # Validate required fields + if not data.get('rule_type'): + return jsonify({ + 'status': 'error', + 'message': 'rule_type is required' + }), 400 + + # Valid rule types + valid_rule_types = ['unexpected_port', 'drift_detection', 'cert_expiry', 'weak_tls', 'ping_failed'] + if data['rule_type'] not in valid_rule_types: + return jsonify({ + 'status': 'error', + 'message': f'Invalid rule_type. Must be one of: {", ".join(valid_rule_types)}' + }), 400 + + # Valid severities + valid_severities = ['critical', 'warning', 'info'] + if data.get('severity') and data['severity'] not in valid_severities: + return jsonify({ + 'status': 'error', + 'message': f'Invalid severity. Must be one of: {", ".join(valid_severities)}' + }), 400 + + try: + # Create new rule + rule = AlertRule( + name=data.get('name', f"{data['rule_type']} rule"), + rule_type=data['rule_type'], + enabled=data.get('enabled', True), + threshold=data.get('threshold'), + email_enabled=data.get('email_enabled', False), + webhook_enabled=data.get('webhook_enabled', False), + severity=data.get('severity', 'warning'), + filter_conditions=json.dumps(data['filter_conditions']) if data.get('filter_conditions') else None, + config_file=data.get('config_file'), + created_at=datetime.now(timezone.utc), + updated_at=datetime.now(timezone.utc) + ) + + current_app.db_session.add(rule) + current_app.db_session.commit() + + return jsonify({ + 'status': 'success', + 'message': 'Alert rule created successfully', + 'rule': { + 'id': rule.id, + 'name': rule.name, + 'rule_type': rule.rule_type, + 'enabled': rule.enabled, + 'threshold': rule.threshold, + 'email_enabled': rule.email_enabled, + 'webhook_enabled': rule.webhook_enabled, + 'severity': rule.severity, + 'filter_conditions': json.loads(rule.filter_conditions) if rule.filter_conditions else None, + 'config_file': rule.config_file, + 'created_at': rule.created_at.isoformat(), + 'updated_at': rule.updated_at.isoformat() + } + }), 201 + + except Exception as e: + current_app.db_session.rollback() + return jsonify({ + 'status': 'error', + 'message': f'Failed to create alert rule: {str(e)}' + }), 500 @bp.route('/rules/', methods=['PUT']) @@ -90,22 +281,84 @@ def update_alert_rule(rule_id): rule_id: Alert rule ID to update Request body: + name: User-friendly rule name (optional) threshold: Threshold value (optional) enabled: Whether rule is active (optional) email_enabled: Send email for this rule (optional) + webhook_enabled: Send webhook for this rule (optional) + severity: Alert severity (optional) + filter_conditions: JSON object with filter conditions (optional) + config_file: Config file to apply rule to (optional) Returns: JSON response with update status """ - # TODO: Implement in Phase 4 data = request.get_json() or {} - return jsonify({ - 'rule_id': rule_id, - 'status': 'not_implemented', - 'message': 'Alert rule update endpoint - to be implemented in Phase 4', - 'data': data - }), 501 + # Get existing rule + rule = current_app.db_session.query(AlertRule).filter(AlertRule.id == rule_id).first() + if not rule: + return jsonify({ + 'status': 'error', + 'message': f'Alert rule {rule_id} not found' + }), 404 + + # Valid severities + valid_severities = ['critical', 'warning', 'info'] + if data.get('severity') and data['severity'] not in valid_severities: + return jsonify({ + 'status': 'error', + 'message': f'Invalid severity. Must be one of: {", ".join(valid_severities)}' + }), 400 + + try: + # Update fields if provided + if 'name' in data: + rule.name = data['name'] + if 'threshold' in data: + rule.threshold = data['threshold'] + if 'enabled' in data: + rule.enabled = data['enabled'] + if 'email_enabled' in data: + rule.email_enabled = data['email_enabled'] + if 'webhook_enabled' in data: + rule.webhook_enabled = data['webhook_enabled'] + if 'severity' in data: + rule.severity = data['severity'] + if 'filter_conditions' in data: + rule.filter_conditions = json.dumps(data['filter_conditions']) if data['filter_conditions'] else None + if 'config_file' in data: + rule.config_file = data['config_file'] + + rule.updated_at = datetime.now(timezone.utc) + + current_app.db_session.commit() + + return jsonify({ + 'status': 'success', + 'message': 'Alert rule updated successfully', + 'rule': { + 'id': rule.id, + 'name': rule.name, + 'rule_type': rule.rule_type, + 'enabled': rule.enabled, + 'threshold': rule.threshold, + 'email_enabled': rule.email_enabled, + 'webhook_enabled': rule.webhook_enabled, + 'severity': rule.severity, + 'filter_conditions': json.loads(rule.filter_conditions) if rule.filter_conditions else None, + 'config_file': rule.config_file, + 'created_at': rule.created_at.isoformat(), + 'updated_at': rule.updated_at.isoformat() + } + }) + + except Exception as e: + current_app.db_session.rollback() + return jsonify({ + 'status': 'error', + 'message': f'Failed to update alert rule: {str(e)}' + }), 500 @bp.route('/rules/', methods=['DELETE']) @@ -120,12 +373,83 @@ def delete_alert_rule(rule_id): Returns: JSON response with deletion status """ - # TODO: Implement in Phase 4 + # Get existing rule + rule = current_app.db_session.query(AlertRule).filter(AlertRule.id == rule_id).first() + if not rule: + return jsonify({ + 'status': 'error', + 'message': f'Alert rule {rule_id} not found' + }), 404 + + try: + # Delete the rule (cascade will delete related alerts) + current_app.db_session.delete(rule) + current_app.db_session.commit() + + return jsonify({ + 'status': 'success', + 'message': f'Alert rule {rule_id} deleted successfully' + }) + + except Exception as e: + current_app.db_session.rollback() + return jsonify({ + 'status': 'error', + 'message': f'Failed to delete alert rule: {str(e)}' + }), 500 + + +@bp.route('/stats', methods=['GET']) +@api_auth_required +def alert_stats(): + """ + Get alert statistics. + + Query params: + days: Number of days to look back (default: 7) + + Returns: + JSON response with alert statistics + """ + days = request.args.get('days', 7, type=int) + cutoff_date = datetime.now(timezone.utc) - timedelta(days=days) + + # Get alerts in date range + alerts = current_app.db_session.query(Alert).filter(Alert.created_at >= cutoff_date).all() + + # Calculate statistics + total_alerts = len(alerts) + alerts_by_severity = {'critical': 0, 'warning': 0, 'info': 0} + alerts_by_type = {} + unacknowledged_count = 0 + + for alert in alerts: + # Count by severity + if alert.severity in alerts_by_severity: + alerts_by_severity[alert.severity] += 1 + + # Count by type + if alert.alert_type not in alerts_by_type: + alerts_by_type[alert.alert_type] = 0 + alerts_by_type[alert.alert_type] += 1 + + # Count unacknowledged + if not alert.acknowledged: + unacknowledged_count += 1 + return jsonify({ - 'rule_id': rule_id, - 'status': 'not_implemented', - 'message': 'Alert rule deletion endpoint - to be implemented in Phase 4' - }), 501 + 'stats': { + 'total_alerts': total_alerts, + 'unacknowledged_count': unacknowledged_count, + 'alerts_by_severity': alerts_by_severity, + 'alerts_by_type': alerts_by_type, + 'date_range': { + 'start': cutoff_date.isoformat(), + 'end': datetime.now(timezone.utc).isoformat(), + 'days': days + } + } + }) # Health check endpoint @@ -140,5 +464,5 @@ def health_check(): return jsonify({ 'status': 'healthy', 'api': 'alerts', - 'version': '1.0.0-phase1' - }) + 'version': '1.0.0-phase5' + }) \ No newline at end of file diff --git a/app/web/jobs/scan_job.py b/app/web/jobs/scan_job.py index a01fac9..1068531 100644 --- a/app/web/jobs/scan_job.py +++ b/app/web/jobs/scan_job.py @@ -16,6 +16,7 @@ from sqlalchemy.orm import sessionmaker from src.scanner import SneakyScanner from web.models import Scan from web.services.scan_service import ScanService +from web.services.alert_service import AlertService logger = logging.getLogger(__name__) @@ -89,6 +90,17 @@ def execute_scan(scan_id: int, config_file: str, db_url: str): scan_service = ScanService(session) scan_service._save_scan_to_db(report, scan_id, status='completed') + # Evaluate alert rules + logger.info(f"Scan {scan_id}: Evaluating alert rules...") + try: + alert_service = AlertService(session) + alerts_triggered = alert_service.evaluate_alert_rules(scan_id) + logger.info(f"Scan {scan_id}: {len(alerts_triggered)} alerts triggered") + except Exception as e: + # Don't fail the scan if alert evaluation fails + logger.error(f"Scan {scan_id}: Alert evaluation failed: {str(e)}") + logger.debug(f"Alert evaluation error details: {traceback.format_exc()}") + logger.info(f"Scan {scan_id}: Completed successfully") except FileNotFoundError as e: diff --git a/app/web/models.py b/app/web/models.py index 66cd46d..ae4a90c 100644 --- a/app/web/models.py +++ b/app/web/models.py @@ -284,17 +284,24 @@ class Alert(Base): id = Column(Integer, primary_key=True, autoincrement=True) scan_id = Column(Integer, ForeignKey('scans.id'), nullable=False, index=True) - alert_type = Column(String(50), nullable=False, comment="new_port, cert_expiry, service_change, ping_failed") + rule_id = Column(Integer, ForeignKey('alert_rules.id'), nullable=True, index=True, comment="Associated alert rule") + alert_type = Column(String(50), nullable=False, comment="unexpected_port, drift_detection, cert_expiry, service_change, ping_failed") severity = Column(String(20), nullable=False, comment="info, warning, critical") message = Column(Text, nullable=False, comment="Human-readable alert message") ip_address = Column(String(45), nullable=True, comment="Related IP (optional)") port = Column(Integer, nullable=True, comment="Related port (optional)") email_sent = Column(Boolean, nullable=False, default=False, comment="Was email notification sent?") email_sent_at = Column(DateTime, nullable=True, comment="Email send timestamp") + webhook_sent = Column(Boolean, nullable=False, default=False, comment="Was webhook sent?") + webhook_sent_at = Column(DateTime, nullable=True, comment="Webhook send timestamp") + acknowledged = Column(Boolean, nullable=False, default=False, index=True, comment="Was alert acknowledged?") + acknowledged_at = Column(DateTime, nullable=True, comment="Acknowledgment timestamp") + acknowledged_by = Column(String(255), nullable=True, comment="User who acknowledged") created_at = Column(DateTime, nullable=False, default=datetime.utcnow, comment="Alert creation time") # Relationships scan = relationship('Scan', back_populates='alerts') + rule = relationship('AlertRule', back_populates='alerts') # Index for alert queries by type and severity __table_args__ = ( @@ -315,14 +322,79 @@ class AlertRule(Base): __tablename__ = 'alert_rules' id = Column(Integer, primary_key=True, autoincrement=True) - rule_type = Column(String(50), nullable=False, comment="unexpected_port, cert_expiry, service_down, etc.") + name = Column(String(255), nullable=True, comment="User-friendly rule name") + rule_type = Column(String(50), nullable=False, comment="unexpected_port, cert_expiry, service_down, drift_detection, etc.") enabled = Column(Boolean, nullable=False, default=True, comment="Is rule active?") threshold = Column(Integer, nullable=True, comment="Threshold value (e.g., days for cert expiry)") email_enabled = Column(Boolean, nullable=False, default=False, comment="Send email for this rule?") + webhook_enabled = Column(Boolean, nullable=False, default=False, comment="Send webhook for this rule?") + severity = Column(String(20), nullable=True, comment="Alert severity: critical, warning, info") + filter_conditions = Column(Text, nullable=True, comment="JSON filter conditions for the rule") + config_file = Column(String(255), nullable=True, comment="Optional: specific config file this rule applies to") created_at = Column(DateTime, nullable=False, default=datetime.utcnow, comment="Rule creation time") + updated_at = Column(DateTime, nullable=True, comment="Last update time") + + # Relationships + alerts = relationship("Alert", back_populates="rule", cascade="all, delete-orphan") def __repr__(self): - return f"" + return f"" + + +class Webhook(Base): + """ + Webhook configurations for alert notifications. + + Stores webhook endpoints and authentication details for sending alert + notifications to external systems. + """ + __tablename__ = 'webhooks' + + id = Column(Integer, primary_key=True, autoincrement=True) + name = Column(String(255), nullable=False, comment="Webhook name") + url = Column(Text, nullable=False, comment="Webhook URL") + enabled = Column(Boolean, nullable=False, default=True, comment="Is webhook enabled?") + auth_type = Column(String(20), nullable=True, comment="Authentication type: none, bearer, basic, custom") + auth_token = Column(Text, nullable=True, comment="Encrypted authentication token") + custom_headers = Column(Text, nullable=True, comment="JSON custom headers") + alert_types = Column(Text, nullable=True, comment="JSON array of alert types to trigger on") + severity_filter = Column(Text, nullable=True, comment="JSON array of severities to trigger on") + timeout = Column(Integer, nullable=True, default=10, comment="Request timeout in seconds") + retry_count = Column(Integer, nullable=True, default=3, comment="Number of retry attempts") + created_at = Column(DateTime, nullable=False, default=datetime.utcnow, comment="Creation time") + updated_at = Column(DateTime, nullable=False, default=datetime.utcnow, comment="Last update time") + + # Relationships + delivery_logs = relationship("WebhookDeliveryLog", back_populates="webhook", cascade="all, delete-orphan") + + def __repr__(self): + return f"" + + +class WebhookDeliveryLog(Base): + """ + Webhook delivery tracking. + + Logs all webhook delivery attempts for auditing and debugging purposes. + """ + __tablename__ = 'webhook_delivery_log' + + id = Column(Integer, primary_key=True, autoincrement=True) + webhook_id = Column(Integer, ForeignKey('webhooks.id'), nullable=False, index=True, comment="Associated webhook") + alert_id = Column(Integer, ForeignKey('alerts.id'), nullable=False, index=True, comment="Associated alert") + status = Column(String(20), nullable=True, index=True, comment="Delivery status: success, failed, retrying") + response_code = Column(Integer, nullable=True, comment="HTTP response code") + response_body = Column(Text, nullable=True, comment="Response body from webhook") + error_message = Column(Text, nullable=True, comment="Error message if failed") + attempt_number = Column(Integer, nullable=True, comment="Which attempt this was") + delivered_at = Column(DateTime, nullable=False, default=datetime.utcnow, comment="Delivery timestamp") + + # Relationships + webhook = relationship("Webhook", back_populates="delivery_logs") + alert = relationship("Alert") + + def __repr__(self): + return f"" # ============================================================================ diff --git a/app/web/routes/main.py b/app/web/routes/main.py index 4f7458d..6420024 100644 --- a/app/web/routes/main.py +++ b/app/web/routes/main.py @@ -219,3 +219,105 @@ def edit_config(filename): logger.error(f"Error loading config for edit: {e}") flash(f"Error loading config: {str(e)}", 'error') return redirect(url_for('main.configs')) + + +@bp.route('/alerts') +@login_required +def alerts(): + """ + Alerts history page - shows all alerts. + + Returns: + Rendered alerts template + """ + from flask import request, current_app + from web.models import Alert, AlertRule, Scan + from web.utils.pagination import paginate + + # Get query parameters for filtering + page = request.args.get('page', 1, type=int) + per_page = 20 + severity = request.args.get('severity') + alert_type = request.args.get('alert_type') + acknowledged = request.args.get('acknowledged') + + # Build query + query = current_app.db_session.query(Alert).join(Scan, isouter=True) + + # Apply filters + if severity: + query = query.filter(Alert.severity == severity) + if alert_type: + query = query.filter(Alert.alert_type == alert_type) + if acknowledged is not None: + ack_bool = acknowledged == 'true' + query = query.filter(Alert.acknowledged == ack_bool) + + # Order by severity and date + query = query.order_by(Alert.severity.desc(), Alert.created_at.desc()) + + # Paginate using utility function + pagination = paginate(query, page=page, per_page=per_page) + alerts = pagination.items + + # Get unique alert types for filter dropdown + try: + alert_types = current_app.db_session.query(Alert.alert_type).distinct().all() + alert_types = [at[0] for at in alert_types] if alert_types else [] + except Exception: + alert_types = [] + + return render_template( + 'alerts.html', + alerts=alerts, + pagination=pagination, + current_severity=severity, + current_alert_type=alert_type, + current_acknowledged=acknowledged, + alert_types=alert_types + ) + + +@bp.route('/alerts/rules') +@login_required +def alert_rules(): + """ + Alert rules management page. + + Returns: + Rendered alert rules template + """ + import os + from flask import current_app + from web.models import AlertRule + + # Get all alert rules with error handling + try: + rules = current_app.db_session.query(AlertRule).order_by( + AlertRule.name.nullslast(), + AlertRule.rule_type + ).all() + except Exception as e: + logger.error(f"Error fetching alert rules: {e}") + rules = [] + + # Ensure rules is always a list + if rules is None: + rules = [] + + # Get list of available config files + configs_dir = '/app/configs' + config_files = [] + + try: + if os.path.exists(configs_dir): + config_files = [f for f in os.listdir(configs_dir) if f.endswith(('.yaml', '.yml'))] + config_files.sort() + except Exception as e: + logger.error(f"Error listing config files: {e}") + + return render_template( + 'alert_rules.html', + rules=rules, + config_files=config_files + ) diff --git a/app/web/services/alert_service.py b/app/web/services/alert_service.py new file mode 100644 index 0000000..982decc --- /dev/null +++ b/app/web/services/alert_service.py @@ -0,0 +1,490 @@ +""" +Alert Service Module + +Handles alert evaluation, rule processing, and notification triggering +for SneakyScan Phase 5. +""" +import logging +from datetime import datetime, timezone +from typing import List, Dict, Optional, Any +from sqlalchemy.orm import Session + +from ..models import ( + Alert, AlertRule, Scan, ScanPort, ScanIP, ScanService as ScanServiceModel, + ScanCertificate, ScanTLSVersion +) +from .scan_service import ScanService + +logger = logging.getLogger(__name__) + + +class AlertService: + """ + Service for evaluating alert rules and generating alerts based on scan results. + + Supports two main alert types: + 1. Unexpected Port Detection - Alerts when ports marked as unexpected are found open + 2. Drift Detection - Alerts when scan results differ from previous scan + """ + + def __init__(self, db_session: Session): + self.db = db_session + self.scan_service = ScanService(db_session) + + def evaluate_alert_rules(self, scan_id: int) -> List[Alert]: + """ + Main entry point for alert evaluation after scan completion. + + Args: + scan_id: ID of the completed scan to evaluate + + Returns: + List of Alert objects that were created + """ + logger.info(f"Starting alert evaluation for scan {scan_id}") + + # Get the scan + scan = self.db.query(Scan).filter(Scan.id == scan_id).first() + if not scan: + logger.error(f"Scan {scan_id} not found") + return [] + + # Get all enabled alert rules + rules = self.db.query(AlertRule).filter(AlertRule.enabled == True).all() + logger.info(f"Found {len(rules)} enabled alert rules to evaluate") + + alerts_created = [] + + for rule in rules: + try: + # Check if rule applies to this scan's config + if rule.config_file and scan.config_file != rule.config_file: + logger.debug(f"Skipping rule {rule.id} - config mismatch") + continue + + # Evaluate based on rule type + alert_data = [] + + if rule.rule_type == 'unexpected_port': + alert_data = self.check_unexpected_ports(scan, rule) + elif rule.rule_type == 'drift_detection': + alert_data = self.check_drift_from_previous(scan, rule) + elif rule.rule_type == 'cert_expiry': + alert_data = self.check_certificate_expiry(scan, rule) + elif rule.rule_type == 'weak_tls': + alert_data = self.check_weak_tls(scan, rule) + elif rule.rule_type == 'ping_failed': + alert_data = self.check_ping_failures(scan, rule) + else: + logger.warning(f"Unknown rule type: {rule.rule_type}") + continue + + # Create alerts for any findings + for alert_info in alert_data: + alert = self.create_alert(scan_id, rule, alert_info) + if alert: + alerts_created.append(alert) + + # Trigger notifications if configured + if rule.email_enabled or rule.webhook_enabled: + self.trigger_notifications(alert, rule) + + logger.info(f"Rule {rule.name or rule.id} generated {len(alert_data)} alerts") + + except Exception as e: + logger.error(f"Error evaluating rule {rule.id}: {str(e)}") + continue + + logger.info(f"Alert evaluation complete. Created {len(alerts_created)} alerts") + return alerts_created + + def check_unexpected_ports(self, scan: Scan, rule: AlertRule) -> List[Dict[str, Any]]: + """ + Detect ports that are open but not in the expected_ports list. + + Args: + scan: The scan to check + rule: The alert rule configuration + + Returns: + List of alert data dictionaries + """ + alerts_to_create = [] + + # Get all ports where expected=False + unexpected_ports = ( + self.db.query(ScanPort, ScanIP) + .join(ScanIP, ScanPort.ip_id == ScanIP.id) + .filter(ScanPort.scan_id == scan.id) + .filter(ScanPort.expected == False) # Not in config's expected_ports + .filter(ScanPort.state == 'open') + .all() + ) + + # High-risk ports that should trigger critical alerts + high_risk_ports = { + 22, # SSH + 23, # Telnet + 135, # Windows RPC + 139, # NetBIOS + 445, # SMB + 1433, # SQL Server + 3306, # MySQL + 3389, # RDP + 5432, # PostgreSQL + 5900, # VNC + 6379, # Redis + 9200, # Elasticsearch + 27017, # MongoDB + } + + for port, ip in unexpected_ports: + # Determine severity based on port number + severity = rule.severity or ('critical' if port.port in high_risk_ports else 'warning') + + # Get service info if available + service = ( + self.db.query(ScanServiceModel) + .filter(ScanServiceModel.port_id == port.id) + .first() + ) + + service_info = "" + if service: + product = service.product or "Unknown" + version = service.version or "" + service_info = f" (Service: {service.service_name}: {product} {version}".strip() + ")" + + alerts_to_create.append({ + 'alert_type': 'unexpected_port', + 'severity': severity, + 'message': f"Unexpected port open on {ip.ip_address}:{port.port}/{port.protocol}{service_info}", + 'ip_address': ip.ip_address, + 'port': port.port + }) + + return alerts_to_create + + def check_drift_from_previous(self, scan: Scan, rule: AlertRule) -> List[Dict[str, Any]]: + """ + Compare current scan to the last scan with the same config. + + Args: + scan: The current scan + rule: The alert rule configuration + + Returns: + List of alert data dictionaries + """ + alerts_to_create = [] + + # Find previous scan with same config_file + previous_scan = ( + self.db.query(Scan) + .filter(Scan.config_file == scan.config_file) + .filter(Scan.id < scan.id) + .filter(Scan.status == 'completed') + .order_by(Scan.started_at.desc() if Scan.started_at else Scan.timestamp.desc()) + .first() + ) + + if not previous_scan: + logger.info(f"No previous scan found for config {scan.config_file}") + return [] + + try: + # Use existing comparison logic from scan_service + comparison = self.scan_service.compare_scans(previous_scan.id, scan.id) + + # Alert on new ports + for port_data in comparison.get('ports', {}).get('added', []): + severity = rule.severity or 'warning' + alerts_to_create.append({ + 'alert_type': 'drift_new_port', + 'severity': severity, + 'message': f"New port detected: {port_data['ip']}:{port_data['port']}/{port_data['protocol']}", + 'ip_address': port_data['ip'], + 'port': port_data['port'] + }) + + # Alert on removed ports + for port_data in comparison.get('ports', {}).get('removed', []): + severity = rule.severity or 'info' + alerts_to_create.append({ + 'alert_type': 'drift_missing_port', + 'severity': severity, + 'message': f"Port no longer open: {port_data['ip']}:{port_data['port']}/{port_data['protocol']}", + 'ip_address': port_data['ip'], + 'port': port_data['port'] + }) + + # Alert on service changes + for svc_data in comparison.get('services', {}).get('changed', []): + old_svc = svc_data.get('old', {}) + new_svc = svc_data.get('new', {}) + + old_desc = f"{old_svc.get('product', 'Unknown')} {old_svc.get('version', '')}".strip() + new_desc = f"{new_svc.get('product', 'Unknown')} {new_svc.get('version', '')}".strip() + + severity = rule.severity or 'info' + alerts_to_create.append({ + 'alert_type': 'drift_service_change', + 'severity': severity, + 'message': f"Service changed on {svc_data['ip']}:{svc_data['port']}: {old_desc} → {new_desc}", + 'ip_address': svc_data['ip'], + 'port': svc_data['port'] + }) + + # Alert on certificate changes + for cert_data in comparison.get('certificates', {}).get('changed', []): + old_cert = cert_data.get('old', {}) + new_cert = cert_data.get('new', {}) + + severity = rule.severity or 'warning' + alerts_to_create.append({ + 'alert_type': 'drift_cert_change', + 'severity': severity, + 'message': f"Certificate changed on {cert_data['ip']}:{cert_data['port']} - " + f"Subject: {old_cert.get('subject', 'Unknown')} → {new_cert.get('subject', 'Unknown')}", + 'ip_address': cert_data['ip'], + 'port': cert_data['port'] + }) + + # Check drift score threshold if configured + if rule.threshold and comparison.get('drift_score', 0) * 100 >= rule.threshold: + alerts_to_create.append({ + 'alert_type': 'drift_threshold_exceeded', + 'severity': rule.severity or 'warning', + 'message': f"Drift score {comparison['drift_score']*100:.1f}% exceeds threshold {rule.threshold}%", + 'ip_address': None, + 'port': None + }) + + except Exception as e: + logger.error(f"Error comparing scans: {str(e)}") + + return alerts_to_create + + def check_certificate_expiry(self, scan: Scan, rule: AlertRule) -> List[Dict[str, Any]]: + """ + Check for certificates expiring within the threshold days. + + Args: + scan: The scan to check + rule: The alert rule configuration + + Returns: + List of alert data dictionaries + """ + alerts_to_create = [] + threshold_days = rule.threshold or 30 # Default 30 days + + # Get all certificates from the scan + certificates = ( + self.db.query(ScanCertificate, ScanPort, ScanIP) + .join(ScanPort, ScanCertificate.port_id == ScanPort.id) + .join(ScanIP, ScanPort.ip_id == ScanIP.id) + .filter(ScanPort.scan_id == scan.id) + .all() + ) + + for cert, port, ip in certificates: + if cert.days_until_expiry is not None and cert.days_until_expiry <= threshold_days: + if cert.days_until_expiry <= 0: + severity = 'critical' + message = f"Certificate EXPIRED on {ip.ip_address}:{port.port}" + elif cert.days_until_expiry <= 7: + severity = 'critical' + message = f"Certificate expires in {cert.days_until_expiry} days on {ip.ip_address}:{port.port}" + elif cert.days_until_expiry <= 14: + severity = 'warning' + message = f"Certificate expires in {cert.days_until_expiry} days on {ip.ip_address}:{port.port}" + else: + severity = 'info' + message = f"Certificate expires in {cert.days_until_expiry} days on {ip.ip_address}:{port.port}" + + alerts_to_create.append({ + 'alert_type': 'cert_expiry', + 'severity': severity, + 'message': message, + 'ip_address': ip.ip_address, + 'port': port.port + }) + + return alerts_to_create + + def check_weak_tls(self, scan: Scan, rule: AlertRule) -> List[Dict[str, Any]]: + """ + Check for weak TLS versions (1.0, 1.1). + + Args: + scan: The scan to check + rule: The alert rule configuration + + Returns: + List of alert data dictionaries + """ + alerts_to_create = [] + + # Get all TLS version data from the scan + tls_versions = ( + self.db.query(ScanTLSVersion, ScanPort, ScanIP) + .join(ScanPort, ScanTLSVersion.port_id == ScanPort.id) + .join(ScanIP, ScanPort.ip_id == ScanIP.id) + .filter(ScanPort.scan_id == scan.id) + .all() + ) + + for tls, port, ip in tls_versions: + weak_versions = [] + + if tls.tls_1_0: + weak_versions.append("TLS 1.0") + if tls.tls_1_1: + weak_versions.append("TLS 1.1") + + if weak_versions: + severity = rule.severity or 'warning' + alerts_to_create.append({ + 'alert_type': 'weak_tls', + 'severity': severity, + 'message': f"Weak TLS versions supported on {ip.ip_address}:{port.port}: {', '.join(weak_versions)}", + 'ip_address': ip.ip_address, + 'port': port.port + }) + + return alerts_to_create + + def check_ping_failures(self, scan: Scan, rule: AlertRule) -> List[Dict[str, Any]]: + """ + Check for hosts that were expected to respond to ping but didn't. + + Args: + scan: The scan to check + rule: The alert rule configuration + + Returns: + List of alert data dictionaries + """ + alerts_to_create = [] + + # Get all IPs where ping was expected but failed + failed_pings = ( + self.db.query(ScanIP) + .filter(ScanIP.scan_id == scan.id) + .filter(ScanIP.ping_expected == True) + .filter(ScanIP.ping_actual == False) + .all() + ) + + for ip in failed_pings: + severity = rule.severity or 'warning' + alerts_to_create.append({ + 'alert_type': 'ping_failed', + 'severity': severity, + 'message': f"Host {ip.ip_address} did not respond to ping (expected to be up)", + 'ip_address': ip.ip_address, + 'port': None + }) + + return alerts_to_create + + def create_alert(self, scan_id: int, rule: AlertRule, alert_data: Dict[str, Any]) -> Optional[Alert]: + """ + Create an alert record in the database. + + Args: + scan_id: ID of the scan that triggered the alert + rule: The alert rule that was triggered + alert_data: Dictionary with alert details + + Returns: + Created Alert object or None if creation failed + """ + try: + alert = Alert( + scan_id=scan_id, + rule_id=rule.id, + alert_type=alert_data['alert_type'], + severity=alert_data['severity'], + message=alert_data['message'], + ip_address=alert_data.get('ip_address'), + port=alert_data.get('port'), + created_at=datetime.now(timezone.utc) + ) + + self.db.add(alert) + self.db.commit() + + logger.info(f"Created alert: {alert.message}") + return alert + + except Exception as e: + logger.error(f"Failed to create alert: {str(e)}") + self.db.rollback() + return None + + def trigger_notifications(self, alert: Alert, rule: AlertRule): + """ + Send notifications for an alert based on rule configuration. + + Args: + alert: The alert to send notifications for + rule: The rule that specifies notification settings + """ + # Email notification will be implemented in email_service.py + if rule.email_enabled: + logger.info(f"Email notification would be sent for alert {alert.id}") + # TODO: Call email service + + # Webhook notification will be implemented in webhook_service.py + if rule.webhook_enabled: + logger.info(f"Webhook notification would be sent for alert {alert.id}") + # TODO: Call webhook service + + def acknowledge_alert(self, alert_id: int, acknowledged_by: str = "system") -> bool: + """ + Acknowledge an alert. + + Args: + alert_id: ID of the alert to acknowledge + acknowledged_by: Username or system identifier + + Returns: + True if successful, False otherwise + """ + try: + alert = self.db.query(Alert).filter(Alert.id == alert_id).first() + if not alert: + logger.error(f"Alert {alert_id} not found") + return False + + alert.acknowledged = True + alert.acknowledged_at = datetime.now(timezone.utc) + alert.acknowledged_by = acknowledged_by + + self.db.commit() + logger.info(f"Alert {alert_id} acknowledged by {acknowledged_by}") + return True + + except Exception as e: + logger.error(f"Failed to acknowledge alert {alert_id}: {str(e)}") + self.db.rollback() + return False + + def get_alerts_for_scan(self, scan_id: int) -> List[Alert]: + """ + Get all alerts for a specific scan. + + Args: + scan_id: ID of the scan + + Returns: + List of Alert objects + """ + return ( + self.db.query(Alert) + .filter(Alert.scan_id == scan_id) + .order_by(Alert.severity.desc(), Alert.created_at.desc()) + .all() + ) \ No newline at end of file diff --git a/app/web/templates/alert_rules.html b/app/web/templates/alert_rules.html new file mode 100644 index 0000000..e359126 --- /dev/null +++ b/app/web/templates/alert_rules.html @@ -0,0 +1,474 @@ +{% extends "base.html" %} + +{% block title %}Alert Rules - SneakyScanner{% endblock %} + +{% block content %} +
+
+

Alert Rules

+
+ + View Alerts + + +
+
+
+ + +
+
+
+
+
Total Rules
+

{{ rules | length }}

+
+
+
+
+
+
+
Active Rules
+

{{ rules | selectattr('enabled') | list | length }}

+
+
+
+
+ + +
+
+
+
+
Alert Rules Configuration
+
+
+ {% if rules %} +
+ + + + + + + + + + + + + + + {% for rule in rules %} + + + + + + + + + + + {% endfor %} + +
NameTypeSeverityThresholdConfigNotificationsStatusActions
+ {{ rule.name or 'Unnamed Rule' }} +
+ ID: {{ rule.id }} +
+ + {{ rule.rule_type.replace('_', ' ').title() }} + + + {% if rule.severity == 'critical' %} + Critical + {% elif rule.severity == 'warning' %} + Warning + {% else %} + {{ rule.severity or 'Info' }} + {% endif %} + + {% if rule.threshold %} + {% if rule.rule_type == 'cert_expiry' %} + {{ rule.threshold }} days + {% elif rule.rule_type == 'drift_detection' %} + {{ rule.threshold }}% + {% else %} + {{ rule.threshold }} + {% endif %} + {% else %} + - + {% endif %} + + {% if rule.config_file %} + {{ rule.config_file }} + {% else %} + All Configs + {% endif %} + + {% if rule.email_enabled %} + + {% endif %} + {% if rule.webhook_enabled %} + + {% endif %} + {% if not rule.email_enabled and not rule.webhook_enabled %} + None + {% endif %} + +
+ + +
+
+ + +
+
+ {% else %} +
+ +
No alert rules configured
+

Create alert rules to be notified of important scan findings.

+ +
+ {% endif %} +
+
+
+
+ + + + + +{% endblock %} \ No newline at end of file diff --git a/app/web/templates/alerts.html b/app/web/templates/alerts.html new file mode 100644 index 0000000..6ed1ee3 --- /dev/null +++ b/app/web/templates/alerts.html @@ -0,0 +1,269 @@ +{% extends "base.html" %} + +{% block title %}Alerts - SneakyScanner{% endblock %} + +{% block content %} +
+
+

Alert History

+ + Manage Alert Rules + +
+
+ + +
+
+
+
+
Total Alerts
+

{{ pagination.total }}

+
+
+
+
+
+
+
Critical
+

+ {{ alerts | selectattr('severity', 'equalto', 'critical') | list | length }} +

+
+
+
+
+
+
+
Warnings
+

+ {{ alerts | selectattr('severity', 'equalto', 'warning') | list | length }} +

+
+
+
+
+
+
+
Unacknowledged
+

+ {{ alerts | rejectattr('acknowledged') | list | length }} +

+
+
+
+
+ + +
+
+
+
+
+
+ + +
+
+ + +
+
+ + +
+
+ +
+
+
+
+
+
+ + +
+
+
+
+
Alerts
+
+
+ {% if alerts %} +
+ + + + + + + + + + + + + + + {% for alert in alerts %} + + + + + + + + + + + {% endfor %} + +
SeverityTypeMessageTargetScanCreatedStatusActions
+ {% if alert.severity == 'critical' %} + Critical + {% elif alert.severity == 'warning' %} + Warning + {% else %} + Info + {% endif %} + + {{ alert.alert_type.replace('_', ' ').title() }} + + {{ alert.message }} + + {% if alert.ip_address %} + + {{ alert.ip_address }}{% if alert.port %}:{{ alert.port }}{% endif %} + + {% else %} + - + {% endif %} + + + Scan #{{ alert.scan_id }} + + + {{ alert.created_at.strftime('%Y-%m-%d %H:%M') }} + + {% if alert.acknowledged %} + + Ack'd + + {% else %} + New + {% endif %} + {% if alert.email_sent %} + + {% endif %} + {% if alert.webhook_sent %} + + {% endif %} + + {% if not alert.acknowledged %} + + {% else %} + + By: {{ alert.acknowledged_by }} + + {% endif %} +
+
+ + + {% if pagination.pages > 1 %} + + {% endif %} + {% else %} +
+ +
No alerts found
+

Alerts will appear here when scan results trigger alert rules.

+ + Configure Alert Rules + +
+ {% endif %} +
+
+
+
+ + +{% endblock %} \ No newline at end of file diff --git a/app/web/templates/base.html b/app/web/templates/base.html index 41be29c..e817d33 100644 --- a/app/web/templates/base.html +++ b/app/web/templates/base.html @@ -57,6 +57,16 @@ Configs +