diff --git a/.env.example b/.env.example index e2fdf42..c057551 100644 --- a/.env.example +++ b/.env.example @@ -57,6 +57,17 @@ SCHEDULER_EXECUTORS=2 # Recommended: 3 for typical usage SCHEDULER_JOB_DEFAULTS_MAX_INSTANCES=3 +# ================================ +# UDP Scanning Configuration +# ================================ +# Enable UDP port scanning (disabled by default as it's slower) +UDP_SCAN_ENABLED=false + +# UDP ports to scan when enabled +# Supports ranges (e.g., 100-200), lists (e.g., 53,67,68), or mixed (e.g., 53,67-69,123) +# Default: common UDP services +UDP_PORTS=53,67,68,69,123,161,500,514,1900 + # ================================ # Initial Password (First Run) # ================================ diff --git a/app/migrations/versions/012_add_scan_progress.py b/app/migrations/versions/012_add_scan_progress.py new file mode 100644 index 0000000..2da3309 --- /dev/null +++ b/app/migrations/versions/012_add_scan_progress.py @@ -0,0 +1,58 @@ +"""Add scan progress tracking + +Revision ID: 012 +Revises: 011 +Create Date: 2024-01-01 00:00:00.000000 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '012' +down_revision = '011' +branch_labels = None +depends_on = None + + +def upgrade(): + # Add progress tracking columns to scans table + op.add_column('scans', sa.Column('current_phase', sa.String(50), nullable=True, + comment='Current scan phase: ping, tcp_scan, udp_scan, service_detection, http_analysis')) + op.add_column('scans', sa.Column('total_ips', sa.Integer(), nullable=True, + comment='Total number of IPs to scan')) + op.add_column('scans', sa.Column('completed_ips', sa.Integer(), nullable=True, default=0, + comment='Number of IPs completed in current phase')) + + # Create scan_progress table for per-IP progress tracking + op.create_table( + 'scan_progress', + sa.Column('id', sa.Integer(), primary_key=True, autoincrement=True), + sa.Column('scan_id', sa.Integer(), sa.ForeignKey('scans.id'), nullable=False, index=True), + sa.Column('ip_address', sa.String(45), nullable=False, comment='IP address being scanned'), + sa.Column('site_name', sa.String(255), nullable=True, comment='Site name this IP belongs to'), + sa.Column('phase', sa.String(50), nullable=False, + comment='Phase: ping, tcp_scan, udp_scan, service_detection, http_analysis'), + sa.Column('status', sa.String(20), nullable=False, default='pending', + comment='pending, in_progress, completed, failed'), + sa.Column('ping_result', sa.Boolean(), nullable=True, comment='Ping response result'), + sa.Column('tcp_ports', sa.Text(), nullable=True, comment='JSON array of discovered TCP ports'), + sa.Column('udp_ports', sa.Text(), nullable=True, comment='JSON array of discovered UDP ports'), + sa.Column('services', sa.Text(), nullable=True, comment='JSON array of detected services'), + sa.Column('created_at', sa.DateTime(), nullable=False, server_default=sa.func.now(), + comment='Entry creation time'), + sa.Column('updated_at', sa.DateTime(), nullable=False, server_default=sa.func.now(), + onupdate=sa.func.now(), comment='Last update time'), + sa.UniqueConstraint('scan_id', 'ip_address', name='uix_scan_progress_ip') + ) + + +def downgrade(): + # Drop scan_progress table + op.drop_table('scan_progress') + + # Remove progress tracking columns from scans table + op.drop_column('scans', 'completed_ips') + op.drop_column('scans', 'total_ips') + op.drop_column('scans', 'current_phase') diff --git a/app/src/scanner.py b/app/src/scanner.py index 3964cb3..1cbffbc 100644 --- a/app/src/scanner.py +++ b/app/src/scanner.py @@ -6,14 +6,17 @@ SneakyScanner - Masscan-based network scanner with YAML configuration import argparse import json import logging +import os +import signal import subprocess import sys import tempfile +import threading import time import zipfile from datetime import datetime from pathlib import Path -from typing import Dict, List, Any +from typing import Dict, List, Any, Callable, Optional import xml.etree.ElementTree as ET import yaml @@ -22,12 +25,18 @@ from libnmap.parser import NmapParser from src.screenshot_capture import ScreenshotCapture from src.report_generator import HTMLReportGenerator +from web.config import NMAP_HOST_TIMEOUT # Force unbuffered output for Docker sys.stdout.reconfigure(line_buffering=True) sys.stderr.reconfigure(line_buffering=True) +class ScanCancelledError(Exception): + """Raised when a scan is cancelled by the user.""" + pass + + class SneakyScanner: """Wrapper for masscan to perform network scans based on YAML config or database config""" @@ -61,6 +70,34 @@ class SneakyScanner: self.screenshot_capture = None + # Cancellation support + self._cancelled = False + self._cancel_lock = threading.Lock() + self._active_process = None + self._process_lock = threading.Lock() + + def cancel(self): + """ + Cancel the running scan. + + Terminates any active subprocess and sets cancellation flag. + """ + with self._cancel_lock: + self._cancelled = True + + with self._process_lock: + if self._active_process and self._active_process.poll() is None: + try: + # Terminate the process group + os.killpg(os.getpgid(self._active_process.pid), signal.SIGTERM) + except (ProcessLookupError, OSError): + pass + + def is_cancelled(self) -> bool: + """Check if scan has been cancelled.""" + with self._cancel_lock: + return self._cancelled + def _load_config(self) -> Dict[str, Any]: """ Load and validate configuration from file or database. @@ -381,11 +418,31 @@ class SneakyScanner: raise ValueError(f"Invalid protocol: {protocol}") print(f"Running: {' '.join(cmd)}", flush=True) - result = subprocess.run(cmd, capture_output=True, text=True) + + # Use Popen for cancellation support + with self._process_lock: + self._active_process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + start_new_session=True + ) + + stdout, stderr = self._active_process.communicate() + returncode = self._active_process.returncode + + with self._process_lock: + self._active_process = None + + # Check if cancelled + if self.is_cancelled(): + return [] + print(f"Masscan {protocol.upper()} scan completed", flush=True) - if result.returncode != 0: - print(f"Masscan stderr: {result.stderr}", file=sys.stderr) + if returncode != 0: + print(f"Masscan stderr: {stderr}", file=sys.stderr) # Parse masscan JSON output results = [] @@ -433,11 +490,31 @@ class SneakyScanner: ] print(f"Running: {' '.join(cmd)}", flush=True) - result = subprocess.run(cmd, capture_output=True, text=True) + + # Use Popen for cancellation support + with self._process_lock: + self._active_process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + start_new_session=True + ) + + stdout, stderr = self._active_process.communicate() + returncode = self._active_process.returncode + + with self._process_lock: + self._active_process = None + + # Check if cancelled + if self.is_cancelled(): + return {} + print(f"Masscan PING scan completed", flush=True) - if result.returncode != 0: - print(f"Masscan stderr: {result.stderr}", file=sys.stderr, flush=True) + if returncode != 0: + print(f"Masscan stderr: {stderr}", file=sys.stderr, flush=True) # Parse results responding_ips = set() @@ -475,6 +552,10 @@ class SneakyScanner: all_services = {} for ip, ports in ip_ports.items(): + # Check if cancelled before each host + if self.is_cancelled(): + break + if not ports: all_services[ip] = [] continue @@ -496,14 +577,33 @@ class SneakyScanner: '--version-intensity', '5', # Balanced speed/accuracy '-p', port_list, '-oX', xml_output, # XML output - '--host-timeout', '5m', # Timeout per host + '--host-timeout', NMAP_HOST_TIMEOUT, # Timeout per host ip ] - result = subprocess.run(cmd, capture_output=True, text=True, timeout=600) + # Use Popen for cancellation support + with self._process_lock: + self._active_process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + start_new_session=True + ) - if result.returncode != 0: - print(f" Nmap warning for {ip}: {result.stderr}", file=sys.stderr, flush=True) + stdout, stderr = self._active_process.communicate(timeout=600) + returncode = self._active_process.returncode + + with self._process_lock: + self._active_process = None + + # Check if cancelled + if self.is_cancelled(): + Path(xml_output).unlink(missing_ok=True) + break + + if returncode != 0: + print(f" Nmap warning for {ip}: {stderr}", file=sys.stderr, flush=True) # Parse XML output services = self._parse_nmap_xml(xml_output) @@ -832,10 +932,17 @@ class SneakyScanner: return all_results - def scan(self) -> Dict[str, Any]: + def scan(self, progress_callback: Optional[Callable] = None) -> Dict[str, Any]: """ Perform complete scan based on configuration + Args: + progress_callback: Optional callback function for progress updates. + Called with (phase, ip, data) where: + - phase: 'init', 'ping', 'tcp_scan', 'udp_scan', 'service_detection', 'http_analysis' + - ip: IP address being processed (or None for phase start) + - data: Dict with progress data (results, counts, etc.) + Returns: Dictionary containing scan results """ @@ -872,17 +979,61 @@ class SneakyScanner: all_ips = sorted(list(all_ips)) print(f"Total IPs to scan: {len(all_ips)}", flush=True) + # Report initialization with total IP count + if progress_callback: + progress_callback('init', None, { + 'total_ips': len(all_ips), + 'ip_to_site': ip_to_site + }) + # Perform ping scan print(f"\n[1/5] Performing ping scan on {len(all_ips)} IPs...", flush=True) + if progress_callback: + progress_callback('ping', None, {'status': 'starting'}) ping_results = self._run_ping_scan(all_ips) + # Check for cancellation + if self.is_cancelled(): + print("\nScan cancelled by user", flush=True) + raise ScanCancelledError("Scan cancelled by user") + + # Report ping results + if progress_callback: + progress_callback('ping', None, { + 'status': 'completed', + 'results': ping_results + }) + # Perform TCP scan (all ports) print(f"\n[2/5] Performing TCP scan on {len(all_ips)} IPs (ports 0-65535)...", flush=True) + if progress_callback: + progress_callback('tcp_scan', None, {'status': 'starting'}) tcp_results = self._run_masscan(all_ips, '0-65535', 'tcp') - # Perform UDP scan (all ports) - print(f"\n[3/5] Performing UDP scan on {len(all_ips)} IPs (ports 0-65535)...", flush=True) - udp_results = self._run_masscan(all_ips, '0-65535', 'udp') + # Check for cancellation + if self.is_cancelled(): + print("\nScan cancelled by user", flush=True) + raise ScanCancelledError("Scan cancelled by user") + + # Perform UDP scan (if enabled) + udp_enabled = os.environ.get('UDP_SCAN_ENABLED', 'false').lower() == 'true' + udp_ports = os.environ.get('UDP_PORTS', '53,67,68,69,123,161,500,514,1900') + + if udp_enabled: + print(f"\n[3/5] Performing UDP scan on {len(all_ips)} IPs (ports {udp_ports})...", flush=True) + if progress_callback: + progress_callback('udp_scan', None, {'status': 'starting'}) + udp_results = self._run_masscan(all_ips, udp_ports, 'udp') + + # Check for cancellation + if self.is_cancelled(): + print("\nScan cancelled by user", flush=True) + raise ScanCancelledError("Scan cancelled by user") + else: + print(f"\n[3/5] Skipping UDP scan (disabled)...", flush=True) + if progress_callback: + progress_callback('udp_scan', None, {'status': 'skipped'}) + udp_results = [] # Organize results by IP results_by_ip = {} @@ -917,20 +1068,56 @@ class SneakyScanner: results_by_ip[ip]['actual']['tcp_ports'].sort() results_by_ip[ip]['actual']['udp_ports'].sort() + # Report TCP/UDP scan results with discovered ports per IP + if progress_callback: + tcp_udp_results = {} + for ip in all_ips: + tcp_udp_results[ip] = { + 'tcp_ports': results_by_ip[ip]['actual']['tcp_ports'], + 'udp_ports': results_by_ip[ip]['actual']['udp_ports'] + } + progress_callback('tcp_scan', None, { + 'status': 'completed', + 'results': tcp_udp_results + }) + # Perform service detection on TCP ports print(f"\n[4/5] Performing service detection on discovered TCP ports...", flush=True) + if progress_callback: + progress_callback('service_detection', None, {'status': 'starting'}) ip_ports = {ip: results_by_ip[ip]['actual']['tcp_ports'] for ip in all_ips} service_results = self._run_nmap_service_detection(ip_ports) + # Check for cancellation + if self.is_cancelled(): + print("\nScan cancelled by user", flush=True) + raise ScanCancelledError("Scan cancelled by user") + # Add service information to results for ip, services in service_results.items(): if ip in results_by_ip: results_by_ip[ip]['actual']['services'] = services + # Report service detection results + if progress_callback: + progress_callback('service_detection', None, { + 'status': 'completed', + 'results': service_results + }) + # Perform HTTP/HTTPS analysis on web services print(f"\n[5/5] Analyzing HTTP/HTTPS services and SSL/TLS configuration...", flush=True) + if progress_callback: + progress_callback('http_analysis', None, {'status': 'starting'}) http_results = self._run_http_analysis(service_results) + # Report HTTP analysis completion + if progress_callback: + progress_callback('http_analysis', None, { + 'status': 'completed', + 'results': http_results + }) + # Merge HTTP analysis into service results for ip, port_results in http_results.items(): if ip in results_by_ip: diff --git a/app/web/api/scans.py b/app/web/api/scans.py index f1d0edf..1f6a314 100644 --- a/app/web/api/scans.py +++ b/app/web/api/scans.py @@ -5,13 +5,16 @@ Handles endpoints for triggering scans, listing scan history, and retrieving scan results. """ +import json import logging from flask import Blueprint, current_app, jsonify, request from sqlalchemy.exc import SQLAlchemyError from web.auth.decorators import api_auth_required +from web.models import Scan, ScanProgress from web.services.scan_service import ScanService from web.utils.pagination import validate_page_params +from web.jobs.scan_job import stop_scan bp = Blueprint('scans', __name__) logger = logging.getLogger(__name__) @@ -240,6 +243,71 @@ def delete_scan(scan_id): }), 500 +@bp.route('//stop', methods=['POST']) +@api_auth_required +def stop_running_scan(scan_id): + """ + Stop a running scan. + + Args: + scan_id: Scan ID to stop + + Returns: + JSON response with stop status + """ + try: + session = current_app.db_session + + # Check if scan exists and is running + scan = session.query(Scan).filter_by(id=scan_id).first() + if not scan: + logger.warning(f"Scan not found for stop request: {scan_id}") + return jsonify({ + 'error': 'Not found', + 'message': f'Scan with ID {scan_id} not found' + }), 404 + + if scan.status != 'running': + logger.warning(f"Cannot stop scan {scan_id}: status is '{scan.status}'") + return jsonify({ + 'error': 'Invalid state', + 'message': f"Cannot stop scan: status is '{scan.status}'" + }), 400 + + # Get database URL from app config + db_url = current_app.config['SQLALCHEMY_DATABASE_URI'] + + # Attempt to stop the scan + stopped = stop_scan(scan_id, db_url) + + if stopped: + logger.info(f"Stop signal sent to scan {scan_id}") + return jsonify({ + 'scan_id': scan_id, + 'message': 'Stop signal sent to scan', + 'status': 'stopping' + }), 200 + else: + logger.warning(f"Failed to stop scan {scan_id}: not found in running scanners") + return jsonify({ + 'error': 'Stop failed', + 'message': 'Scan not found in running scanners registry' + }), 404 + + except SQLAlchemyError as e: + logger.error(f"Database error stopping scan {scan_id}: {str(e)}") + return jsonify({ + 'error': 'Database error', + 'message': 'Failed to stop scan' + }), 500 + except Exception as e: + logger.error(f"Unexpected error stopping scan {scan_id}: {str(e)}", exc_info=True) + return jsonify({ + 'error': 'Internal server error', + 'message': 'An unexpected error occurred' + }), 500 + + @bp.route('//status', methods=['GET']) @api_auth_required def get_scan_status(scan_id): @@ -281,6 +349,141 @@ def get_scan_status(scan_id): }), 500 +@bp.route('//progress', methods=['GET']) +@api_auth_required +def get_scan_progress(scan_id): + """ + Get detailed progress for a running scan including per-IP results. + + Args: + scan_id: Scan ID + + Returns: + JSON response with scan progress including: + - current_phase: Current scan phase + - total_ips: Total IPs being scanned + - completed_ips: Number of IPs completed in current phase + - progress_entries: List of per-IP progress with discovered results + """ + try: + session = current_app.db_session + + # Get scan record + scan = session.query(Scan).filter_by(id=scan_id).first() + if not scan: + logger.warning(f"Scan not found for progress check: {scan_id}") + return jsonify({ + 'error': 'Not found', + 'message': f'Scan with ID {scan_id} not found' + }), 404 + + # Get progress entries + progress_entries = session.query(ScanProgress).filter_by(scan_id=scan_id).all() + + # Build progress data + entries = [] + for entry in progress_entries: + entry_data = { + 'ip_address': entry.ip_address, + 'site_name': entry.site_name, + 'phase': entry.phase, + 'status': entry.status, + 'ping_result': entry.ping_result + } + + # Parse JSON fields + if entry.tcp_ports: + entry_data['tcp_ports'] = json.loads(entry.tcp_ports) + else: + entry_data['tcp_ports'] = [] + + if entry.udp_ports: + entry_data['udp_ports'] = json.loads(entry.udp_ports) + else: + entry_data['udp_ports'] = [] + + if entry.services: + entry_data['services'] = json.loads(entry.services) + else: + entry_data['services'] = [] + + entries.append(entry_data) + + # Sort entries by site name then IP (numerically) + def ip_sort_key(ip_str): + """Convert IP to tuple of integers for proper numeric sorting.""" + try: + return tuple(int(octet) for octet in ip_str.split('.')) + except (ValueError, AttributeError): + return (0, 0, 0, 0) + + entries.sort(key=lambda x: (x['site_name'] or '', ip_sort_key(x['ip_address']))) + + response = { + 'scan_id': scan_id, + 'status': scan.status, + 'current_phase': scan.current_phase or 'pending', + 'total_ips': scan.total_ips or 0, + 'completed_ips': scan.completed_ips or 0, + 'progress_entries': entries + } + + logger.debug(f"Retrieved progress for scan {scan_id}: phase={scan.current_phase}, {scan.completed_ips}/{scan.total_ips} IPs") + return jsonify(response) + + except SQLAlchemyError as e: + logger.error(f"Database error retrieving scan progress {scan_id}: {str(e)}") + return jsonify({ + 'error': 'Database error', + 'message': 'Failed to retrieve scan progress' + }), 500 + except Exception as e: + logger.error(f"Unexpected error retrieving scan progress {scan_id}: {str(e)}", exc_info=True) + return jsonify({ + 'error': 'Internal server error', + 'message': 'An unexpected error occurred' + }), 500 + + +@bp.route('/by-ip/', methods=['GET']) +@api_auth_required +def get_scans_by_ip(ip_address): + """ + Get last 10 scans containing a specific IP address. + + Args: + ip_address: IP address to search for + + Returns: + JSON response with list of scans containing the IP + """ + try: + # Get scans from service + scan_service = ScanService(current_app.db_session) + scans = scan_service.get_scans_by_ip(ip_address) + + logger.info(f"Retrieved {len(scans)} scans for IP: {ip_address}") + + return jsonify({ + 'ip_address': ip_address, + 'scans': scans, + 'count': len(scans) + }) + + except SQLAlchemyError as e: + logger.error(f"Database error retrieving scans for IP {ip_address}: {str(e)}") + return jsonify({ + 'error': 'Database error', + 'message': 'Failed to retrieve scans' + }), 500 + except Exception as e: + logger.error(f"Unexpected error retrieving scans for IP {ip_address}: {str(e)}", exc_info=True) + return jsonify({ + 'error': 'Internal server error', + 'message': 'An unexpected error occurred' + }), 500 + + @bp.route('//compare/', methods=['GET']) @api_auth_required def compare_scans(scan_id1, scan_id2): diff --git a/app/web/api/sites.py b/app/web/api/sites.py index 6440c3e..c1ccc5a 100644 --- a/app/web/api/sites.py +++ b/app/web/api/sites.py @@ -36,9 +36,15 @@ def list_sites(): if request.args.get('all', '').lower() == 'true': site_service = SiteService(current_app.db_session) sites = site_service.list_all_sites() + ip_stats = site_service.get_global_ip_stats() logger.info(f"Listed all sites (count={len(sites)})") - return jsonify({'sites': sites}) + return jsonify({ + 'sites': sites, + 'total_ips': ip_stats['total_ips'], + 'unique_ips': ip_stats['unique_ips'], + 'duplicate_ips': ip_stats['duplicate_ips'] + }) # Get and validate query parameters page = request.args.get('page', 1, type=int) diff --git a/app/web/config.py b/app/web/config.py index 3d74f10..13d8785 100644 --- a/app/web/config.py +++ b/app/web/config.py @@ -11,3 +11,6 @@ APP_VERSION = '1.0.0-beta' # Repository URL REPO_URL = 'https://git.sneakygeek.net/sneakygeek/SneakyScan' + +# Scanner settings +NMAP_HOST_TIMEOUT = '2m' # Timeout per host for nmap service detection diff --git a/app/web/jobs/scan_job.py b/app/web/jobs/scan_job.py index d7fcb72..6b48c21 100644 --- a/app/web/jobs/scan_job.py +++ b/app/web/jobs/scan_job.py @@ -5,7 +5,9 @@ This module handles the execution of scans in background threads, updating database status and handling errors. """ +import json import logging +import threading import traceback from datetime import datetime from pathlib import Path @@ -13,13 +15,168 @@ from pathlib import Path from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker -from src.scanner import SneakyScanner -from web.models import Scan +from src.scanner import SneakyScanner, ScanCancelledError +from web.models import Scan, ScanProgress from web.services.scan_service import ScanService from web.services.alert_service import AlertService logger = logging.getLogger(__name__) +# Registry for tracking running scanners (scan_id -> SneakyScanner instance) +_running_scanners = {} +_running_scanners_lock = threading.Lock() + + +def get_running_scanner(scan_id: int): + """Get a running scanner instance by scan ID.""" + with _running_scanners_lock: + return _running_scanners.get(scan_id) + + +def stop_scan(scan_id: int, db_url: str) -> bool: + """ + Stop a running scan. + + Args: + scan_id: ID of the scan to stop + db_url: Database connection URL + + Returns: + True if scan was cancelled, False if not found or already stopped + """ + logger.info(f"Attempting to stop scan {scan_id}") + + # Get the scanner instance + scanner = get_running_scanner(scan_id) + if not scanner: + logger.warning(f"Scanner for scan {scan_id} not found in registry") + return False + + # Cancel the scanner + scanner.cancel() + logger.info(f"Cancellation signal sent to scan {scan_id}") + + return True + + +def create_progress_callback(scan_id: int, session): + """ + Create a progress callback function for updating scan progress in database. + + Args: + scan_id: ID of the scan record + session: Database session + + Returns: + Callback function that accepts (phase, ip, data) + """ + ip_to_site = {} + + def progress_callback(phase: str, ip: str, data: dict): + """Update scan progress in database.""" + nonlocal ip_to_site + + try: + # Get scan record + scan = session.query(Scan).filter_by(id=scan_id).first() + if not scan: + return + + # Handle initialization phase + if phase == 'init': + scan.total_ips = data.get('total_ips', 0) + scan.completed_ips = 0 + scan.current_phase = 'ping' + ip_to_site = data.get('ip_to_site', {}) + + # Create progress entries for all IPs + for ip_addr, site_name in ip_to_site.items(): + progress = ScanProgress( + scan_id=scan_id, + ip_address=ip_addr, + site_name=site_name, + phase='pending', + status='pending' + ) + session.add(progress) + + session.commit() + return + + # Update current phase + if data.get('status') == 'starting': + scan.current_phase = phase + scan.completed_ips = 0 + session.commit() + return + + # Handle phase completion with results + if data.get('status') == 'completed': + results = data.get('results', {}) + + if phase == 'ping': + # Update progress entries with ping results + for ip_addr, ping_result in results.items(): + progress = session.query(ScanProgress).filter_by( + scan_id=scan_id, ip_address=ip_addr + ).first() + if progress: + progress.ping_result = ping_result + progress.phase = 'ping' + progress.status = 'completed' + + scan.completed_ips = len(results) + + elif phase == 'tcp_scan': + # Update progress entries with TCP/UDP port results + for ip_addr, port_data in results.items(): + progress = session.query(ScanProgress).filter_by( + scan_id=scan_id, ip_address=ip_addr + ).first() + if progress: + progress.tcp_ports = json.dumps(port_data.get('tcp_ports', [])) + progress.udp_ports = json.dumps(port_data.get('udp_ports', [])) + progress.phase = 'tcp_scan' + progress.status = 'completed' + + scan.completed_ips = len(results) + + elif phase == 'service_detection': + # Update progress entries with service detection results + for ip_addr, services in results.items(): + progress = session.query(ScanProgress).filter_by( + scan_id=scan_id, ip_address=ip_addr + ).first() + if progress: + # Simplify service data for storage + service_list = [] + for svc in services: + service_list.append({ + 'port': svc.get('port'), + 'service': svc.get('service', 'unknown'), + 'product': svc.get('product', ''), + 'version': svc.get('version', '') + }) + progress.services = json.dumps(service_list) + progress.phase = 'service_detection' + progress.status = 'completed' + + scan.completed_ips = len(results) + + elif phase == 'http_analysis': + # Mark HTTP analysis as complete + scan.current_phase = 'completed' + scan.completed_ips = scan.total_ips + + session.commit() + + except Exception as e: + logger.error(f"Progress callback error for scan {scan_id}: {str(e)}") + # Don't re-raise - we don't want to break the scan + session.rollback() + + return progress_callback + def execute_scan(scan_id: int, config_id: int, db_url: str = None): """ @@ -66,10 +223,18 @@ def execute_scan(scan_id: int, config_id: int, db_url: str = None): # Initialize scanner with database config scanner = SneakyScanner(config_id=config_id) - # Execute scan + # Register scanner in the running registry + with _running_scanners_lock: + _running_scanners[scan_id] = scanner + logger.debug(f"Scan {scan_id}: Registered in running scanners registry") + + # Create progress callback + progress_callback = create_progress_callback(scan_id, session) + + # Execute scan with progress tracking logger.info(f"Scan {scan_id}: Running scanner...") start_time = datetime.utcnow() - report, timestamp = scanner.scan() + report, timestamp = scanner.scan(progress_callback=progress_callback) end_time = datetime.utcnow() scan_duration = (end_time - start_time).total_seconds() @@ -97,6 +262,19 @@ def execute_scan(scan_id: int, config_id: int, db_url: str = None): logger.info(f"Scan {scan_id}: Completed successfully") + except ScanCancelledError: + # Scan was cancelled by user + logger.info(f"Scan {scan_id}: Cancelled by user") + + scan = session.query(Scan).filter_by(id=scan_id).first() + if scan: + scan.status = 'cancelled' + scan.error_message = 'Scan cancelled by user' + scan.completed_at = datetime.utcnow() + if scan.started_at: + scan.duration = (datetime.utcnow() - scan.started_at).total_seconds() + session.commit() + except FileNotFoundError as e: # Config file not found error_msg = f"Configuration file not found: {str(e)}" @@ -126,6 +304,12 @@ def execute_scan(scan_id: int, config_id: int, db_url: str = None): logger.error(f"Scan {scan_id}: Failed to update error status in database: {str(db_error)}") finally: + # Unregister scanner from registry + with _running_scanners_lock: + if scan_id in _running_scanners: + del _running_scanners[scan_id] + logger.debug(f"Scan {scan_id}: Unregistered from running scanners registry") + # Always close the session session.close() logger.info(f"Scan {scan_id}: Background job completed, session closed") diff --git a/app/web/models.py b/app/web/models.py index caa543a..feeb130 100644 --- a/app/web/models.py +++ b/app/web/models.py @@ -59,6 +59,11 @@ class Scan(Base): completed_at = Column(DateTime, nullable=True, comment="Scan execution completion time") error_message = Column(Text, nullable=True, comment="Error message if scan failed") + # Progress tracking fields + current_phase = Column(String(50), nullable=True, comment="Current scan phase: ping, tcp_scan, udp_scan, service_detection, http_analysis") + total_ips = Column(Integer, nullable=True, comment="Total number of IPs to scan") + completed_ips = Column(Integer, nullable=True, default=0, comment="Number of IPs completed in current phase") + # Relationships sites = relationship('ScanSite', back_populates='scan', cascade='all, delete-orphan') ips = relationship('ScanIP', back_populates='scan', cascade='all, delete-orphan') @@ -70,6 +75,7 @@ class Scan(Base): schedule = relationship('Schedule', back_populates='scans') config = relationship('ScanConfig', back_populates='scans') site_associations = relationship('ScanSiteAssociation', back_populates='scan', cascade='all, delete-orphan') + progress_entries = relationship('ScanProgress', back_populates='scan', cascade='all, delete-orphan') def __repr__(self): return f"" @@ -244,6 +250,43 @@ class ScanTLSVersion(Base): return f"" +class ScanProgress(Base): + """ + Real-time progress tracking for individual IPs during scan execution. + + Stores intermediate results as they become available, allowing users to + see progress and results before the full scan completes. + """ + __tablename__ = 'scan_progress' + + id = Column(Integer, primary_key=True, autoincrement=True) + scan_id = Column(Integer, ForeignKey('scans.id'), nullable=False, index=True) + ip_address = Column(String(45), nullable=False, comment="IP address being scanned") + site_name = Column(String(255), nullable=True, comment="Site name this IP belongs to") + phase = Column(String(50), nullable=False, comment="Phase: ping, tcp_scan, udp_scan, service_detection, http_analysis") + status = Column(String(20), nullable=False, default='pending', comment="pending, in_progress, completed, failed") + + # Results data (stored as JSON) + ping_result = Column(Boolean, nullable=True, comment="Ping response result") + tcp_ports = Column(Text, nullable=True, comment="JSON array of discovered TCP ports") + udp_ports = Column(Text, nullable=True, comment="JSON array of discovered UDP ports") + services = Column(Text, nullable=True, comment="JSON array of detected services") + + created_at = Column(DateTime, nullable=False, default=datetime.utcnow, comment="Entry creation time") + updated_at = Column(DateTime, nullable=False, default=datetime.utcnow, onupdate=datetime.utcnow, comment="Last update time") + + # Relationships + scan = relationship('Scan', back_populates='progress_entries') + + # Index for efficient lookups + __table_args__ = ( + UniqueConstraint('scan_id', 'ip_address', name='uix_scan_progress_ip'), + ) + + def __repr__(self): + return f"" + + # ============================================================================ # Reusable Site Definition Tables # ============================================================================ diff --git a/app/web/routes/main.py b/app/web/routes/main.py index 8c5f366..ee09097 100644 --- a/app/web/routes/main.py +++ b/app/web/routes/main.py @@ -7,7 +7,7 @@ Provides dashboard and scan viewing pages. import logging import os -from flask import Blueprint, current_app, redirect, render_template, send_from_directory, url_for +from flask import Blueprint, current_app, redirect, render_template, request, send_from_directory, url_for from web.auth.decorators import login_required @@ -83,6 +83,19 @@ def compare_scans(scan_id1, scan_id2): return render_template('scan_compare.html', scan_id1=scan_id1, scan_id2=scan_id2) +@bp.route('/search/ip') +@login_required +def search_ip(): + """ + IP search results page - shows scans containing a specific IP address. + + Returns: + Rendered search results template + """ + ip_address = request.args.get('ip', '').strip() + return render_template('ip_search_results.html', ip_address=ip_address) + + @bp.route('/schedules') @login_required def schedules(): diff --git a/app/web/services/scan_service.py b/app/web/services/scan_service.py index 9aea0b8..ba518c6 100644 --- a/app/web/services/scan_service.py +++ b/app/web/services/scan_service.py @@ -16,7 +16,7 @@ from sqlalchemy.orm import Session, joinedload from web.models import ( Scan, ScanSite, ScanIP, ScanPort, ScanService as ScanServiceModel, - ScanCertificate, ScanTLSVersion, Site, ScanSiteAssociation + ScanCertificate, ScanTLSVersion, Site, ScanSiteAssociation, SiteIP ) from web.utils.pagination import paginate, PaginatedResult from web.utils.validators import validate_scan_status @@ -257,9 +257,35 @@ class ScanService: elif scan.status == 'failed': status_info['progress'] = 'Failed' status_info['error_message'] = scan.error_message + elif scan.status == 'cancelled': + status_info['progress'] = 'Cancelled' + status_info['error_message'] = scan.error_message return status_info + def get_scans_by_ip(self, ip_address: str, limit: int = 10) -> List[Dict[str, Any]]: + """ + Get the last N scans containing a specific IP address. + + Args: + ip_address: IP address to search for + limit: Maximum number of scans to return (default: 10) + + Returns: + List of scan summary dictionaries, most recent first + """ + scans = ( + self.db.query(Scan) + .join(ScanIP, Scan.id == ScanIP.scan_id) + .filter(ScanIP.ip_address == ip_address) + .filter(Scan.status == 'completed') + .order_by(Scan.timestamp.desc()) + .limit(limit) + .all() + ) + + return [self._scan_to_summary_dict(scan) for scan in scans] + def cleanup_orphaned_scans(self) -> int: """ Clean up orphaned scans that are stuck in 'running' status. @@ -604,17 +630,47 @@ class ScanService: def _site_to_dict(self, site: ScanSite) -> Dict[str, Any]: """Convert ScanSite to dictionary.""" + # Look up the master Site ID from ScanSiteAssociation + master_site_id = None + assoc = ( + self.db.query(ScanSiteAssociation) + .filter( + ScanSiteAssociation.scan_id == site.scan_id, + ) + .join(Site) + .filter(Site.name == site.site_name) + .first() + ) + if assoc: + master_site_id = assoc.site_id + return { 'id': site.id, 'name': site.site_name, - 'ips': [self._ip_to_dict(ip) for ip in site.ips] + 'site_id': master_site_id, # The actual Site ID for config updates + 'ips': [self._ip_to_dict(ip, master_site_id) for ip in site.ips] } - def _ip_to_dict(self, ip: ScanIP) -> Dict[str, Any]: + def _ip_to_dict(self, ip: ScanIP, site_id: Optional[int] = None) -> Dict[str, Any]: """Convert ScanIP to dictionary.""" + # Look up the SiteIP ID for this IP address in the master Site + site_ip_id = None + if site_id: + site_ip = ( + self.db.query(SiteIP) + .filter( + SiteIP.site_id == site_id, + SiteIP.ip_address == ip.ip_address + ) + .first() + ) + if site_ip: + site_ip_id = site_ip.id + return { 'id': ip.id, 'address': ip.ip_address, + 'site_ip_id': site_ip_id, # The actual SiteIP ID for config updates 'ping_expected': ip.ping_expected, 'ping_actual': ip.ping_actual, 'ports': [self._port_to_dict(port) for port in ip.ports] diff --git a/app/web/services/site_service.py b/app/web/services/site_service.py index f4a4fb7..739e60d 100644 --- a/app/web/services/site_service.py +++ b/app/web/services/site_service.py @@ -228,6 +228,34 @@ class SiteService: return [self._site_to_dict(site) for site in sites] + def get_global_ip_stats(self) -> Dict[str, int]: + """ + Get global IP statistics across all sites. + + Returns: + Dictionary with: + - total_ips: Total count of IP entries (including duplicates) + - unique_ips: Count of distinct IP addresses + - duplicate_ips: Number of duplicate entries (total - unique) + """ + # Total IP entries + total_ips = ( + self.db.query(func.count(SiteIP.id)) + .scalar() or 0 + ) + + # Unique IP addresses + unique_ips = ( + self.db.query(func.count(func.distinct(SiteIP.ip_address))) + .scalar() or 0 + ) + + return { + 'total_ips': total_ips, + 'unique_ips': unique_ips, + 'duplicate_ips': total_ips - unique_ips + } + def bulk_add_ips_from_cidr(self, site_id: int, cidr: str, expected_ping: Optional[bool] = None, expected_tcp_ports: Optional[List[int]] = None, diff --git a/app/web/templates/base.html b/app/web/templates/base.html index 8496d07..b5f1c9d 100644 --- a/app/web/templates/base.html +++ b/app/web/templates/base.html @@ -76,6 +76,13 @@ +
+ + +