Compare commits
30 Commits
949bccf644
...
nightly
| Author | SHA1 | Date | |
|---|---|---|---|
| 52378eaaf4 | |||
| 7667d80d2f | |||
| 9a0b7c7920 | |||
| d02a065bde | |||
| 4c22948ea2 | |||
| 51fa4caaf5 | |||
| 8c34f8b2eb | |||
| 136276497d | |||
| 6bc733fefd | |||
| 4b197e0b3d | |||
| 30f0987a99 | |||
| 9e2fc348b7 | |||
| 847e05abbe | |||
| 07c2bcfd11 | |||
| a560bae800 | |||
| 56828e4184 | |||
| 5e3a70f837 | |||
| 451c7e92ff | |||
| 8b89fd506d | |||
| f24bd11dfd | |||
| 9bd2f67150 | |||
| 3058c69c39 | |||
| 04dc238aea | |||
| c592000c96 | |||
| 4c6b4bf35d | |||
| 3adb51ece2 | |||
| c4cbbee280 | |||
| 889e1eaac3 | |||
| a682e5233c | |||
| 7a14f1602b |
15
.env.example
15
.env.example
File diff suppressed because one or more lines are too long
@@ -39,13 +39,12 @@ COPY app/web/ ./web/
|
||||
COPY app/migrations/ ./migrations/
|
||||
COPY app/alembic.ini .
|
||||
COPY app/init_db.py .
|
||||
COPY app/docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
# Create required directories
|
||||
RUN mkdir -p /app/output /app/logs
|
||||
|
||||
# Make scripts executable
|
||||
RUN chmod +x /app/src/scanner.py /app/init_db.py /docker-entrypoint.sh
|
||||
RUN chmod +x /app/src/scanner.py /app/init_db.py
|
||||
|
||||
# Force Python unbuffered output
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
@@ -69,8 +69,12 @@ def run_migrations_online() -> None:
|
||||
)
|
||||
|
||||
with connectable.connect() as connection:
|
||||
# Enable batch mode for SQLite to support ALTER TABLE operations
|
||||
# like DROP COLUMN which SQLite doesn't natively support
|
||||
context.configure(
|
||||
connection=connection, target_metadata=target_metadata
|
||||
connection=connection,
|
||||
target_metadata=target_metadata,
|
||||
render_as_batch=True
|
||||
)
|
||||
|
||||
with context.begin_transaction():
|
||||
|
||||
@@ -1,125 +1,214 @@
|
||||
"""Initial database schema for SneakyScanner
|
||||
"""Initial schema for SneakyScanner
|
||||
|
||||
Revision ID: 001
|
||||
Revises:
|
||||
Create Date: 2025-11-13 18:00:00.000000
|
||||
Revises: None
|
||||
Create Date: 2025-12-24
|
||||
|
||||
This is the complete initial schema for SneakyScanner. All tables are created
|
||||
in the correct order to satisfy foreign key constraints.
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
# revision identifiers, used by Alembic
|
||||
revision = '001'
|
||||
down_revision = None
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Create all initial tables for SneakyScanner."""
|
||||
def upgrade():
|
||||
"""Create all tables for SneakyScanner."""
|
||||
|
||||
# Create schedules table first (referenced by scans)
|
||||
op.create_table('schedules',
|
||||
# =========================================================================
|
||||
# Settings Table (no dependencies)
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
'settings',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('name', sa.String(length=255), nullable=False, comment='Schedule name (e.g., \'Daily prod scan\')'),
|
||||
sa.Column('config_file', sa.Text(), nullable=False, comment='Path to YAML config'),
|
||||
sa.Column('cron_expression', sa.String(length=100), nullable=False, comment='Cron-like schedule (e.g., \'0 2 * * *\')'),
|
||||
sa.Column('enabled', sa.Boolean(), nullable=False, comment='Is schedule active?'),
|
||||
sa.Column('last_run', sa.DateTime(), nullable=True, comment='Last execution time'),
|
||||
sa.Column('next_run', sa.DateTime(), nullable=True, comment='Next scheduled execution'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Schedule creation time'),
|
||||
sa.Column('key', sa.String(length=255), nullable=False, comment='Setting key'),
|
||||
sa.Column('value', sa.Text(), nullable=True, comment='Setting value (JSON for complex values)'),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=False, comment='Last modification time'),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('key')
|
||||
)
|
||||
op.create_index('ix_settings_key', 'settings', ['key'], unique=True)
|
||||
|
||||
# =========================================================================
|
||||
# Reusable Site Definition Tables
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
'sites',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('name', sa.String(length=255), nullable=False, comment='Unique site name'),
|
||||
sa.Column('description', sa.Text(), nullable=True, comment='Site description'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Site creation time'),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=False, comment='Last modification time'),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('name')
|
||||
)
|
||||
op.create_index('ix_sites_name', 'sites', ['name'], unique=True)
|
||||
|
||||
op.create_table(
|
||||
'site_ips',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('site_id', sa.Integer(), nullable=False, comment='FK to sites'),
|
||||
sa.Column('ip_address', sa.String(length=45), nullable=False, comment='IPv4 or IPv6 address'),
|
||||
sa.Column('expected_ping', sa.Boolean(), nullable=True, comment='Expected ping response'),
|
||||
sa.Column('expected_tcp_ports', sa.Text(), nullable=True, comment='JSON array of expected TCP ports'),
|
||||
sa.Column('expected_udp_ports', sa.Text(), nullable=True, comment='JSON array of expected UDP ports'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='IP creation time'),
|
||||
sa.ForeignKeyConstraint(['site_id'], ['sites.id']),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('site_id', 'ip_address', name='uix_site_ip_address')
|
||||
)
|
||||
op.create_index('ix_site_ips_site_id', 'site_ips', ['site_id'])
|
||||
|
||||
# =========================================================================
|
||||
# Scan Configuration Tables
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
'scan_configs',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('title', sa.String(length=255), nullable=False, comment='Configuration title'),
|
||||
sa.Column('description', sa.Text(), nullable=True, comment='Configuration description'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Config creation time'),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=False, comment='Last modification time'),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
|
||||
# Create scans table
|
||||
op.create_table('scans',
|
||||
op.create_table(
|
||||
'scan_config_sites',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('config_id', sa.Integer(), nullable=False),
|
||||
sa.Column('site_id', sa.Integer(), nullable=False),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Association creation time'),
|
||||
sa.ForeignKeyConstraint(['config_id'], ['scan_configs.id']),
|
||||
sa.ForeignKeyConstraint(['site_id'], ['sites.id']),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('config_id', 'site_id', name='uix_config_site')
|
||||
)
|
||||
op.create_index('ix_scan_config_sites_config_id', 'scan_config_sites', ['config_id'])
|
||||
op.create_index('ix_scan_config_sites_site_id', 'scan_config_sites', ['site_id'])
|
||||
|
||||
# =========================================================================
|
||||
# Scheduling Tables
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
'schedules',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('name', sa.String(length=255), nullable=False, comment='Schedule name'),
|
||||
sa.Column('config_id', sa.Integer(), nullable=True, comment='FK to scan_configs table'),
|
||||
sa.Column('cron_expression', sa.String(length=100), nullable=False, comment='Cron-like schedule'),
|
||||
sa.Column('enabled', sa.Boolean(), nullable=False, default=True, comment='Is schedule active?'),
|
||||
sa.Column('last_run', sa.DateTime(), nullable=True, comment='Last execution time'),
|
||||
sa.Column('next_run', sa.DateTime(), nullable=True, comment='Next scheduled execution'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Schedule creation time'),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=False, comment='Last modification time'),
|
||||
sa.ForeignKeyConstraint(['config_id'], ['scan_configs.id']),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index('ix_schedules_config_id', 'schedules', ['config_id'])
|
||||
|
||||
# =========================================================================
|
||||
# Core Scan Tables
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
'scans',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('timestamp', sa.DateTime(), nullable=False, comment='Scan start time (UTC)'),
|
||||
sa.Column('duration', sa.Float(), nullable=True, comment='Total scan duration in seconds'),
|
||||
sa.Column('status', sa.String(length=20), nullable=False, comment='running, completed, failed'),
|
||||
sa.Column('config_file', sa.Text(), nullable=True, comment='Path to YAML config used'),
|
||||
sa.Column('status', sa.String(length=20), nullable=False, default='running', comment='running, finalizing, completed, failed, cancelled'),
|
||||
sa.Column('config_id', sa.Integer(), nullable=True, comment='FK to scan_configs table'),
|
||||
sa.Column('title', sa.Text(), nullable=True, comment='Scan title from config'),
|
||||
sa.Column('json_path', sa.Text(), nullable=True, comment='Path to JSON report'),
|
||||
sa.Column('html_path', sa.Text(), nullable=True, comment='Path to HTML report'),
|
||||
sa.Column('zip_path', sa.Text(), nullable=True, comment='Path to ZIP archive'),
|
||||
sa.Column('screenshot_dir', sa.Text(), nullable=True, comment='Path to screenshot directory'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Record creation time'),
|
||||
sa.Column('triggered_by', sa.String(length=50), nullable=False, comment='manual, scheduled, api'),
|
||||
sa.Column('triggered_by', sa.String(length=50), nullable=False, default='manual', comment='manual, scheduled, api'),
|
||||
sa.Column('schedule_id', sa.Integer(), nullable=True, comment='FK to schedules if triggered by schedule'),
|
||||
sa.ForeignKeyConstraint(['schedule_id'], ['schedules.id'], ),
|
||||
sa.Column('started_at', sa.DateTime(), nullable=True, comment='Scan execution start time'),
|
||||
sa.Column('completed_at', sa.DateTime(), nullable=True, comment='Scan execution completion time'),
|
||||
sa.Column('error_message', sa.Text(), nullable=True, comment='Error message if scan failed'),
|
||||
sa.Column('current_phase', sa.String(length=50), nullable=True, comment='Current scan phase'),
|
||||
sa.Column('total_ips', sa.Integer(), nullable=True, comment='Total number of IPs to scan'),
|
||||
sa.Column('completed_ips', sa.Integer(), nullable=True, default=0, comment='Number of IPs completed'),
|
||||
sa.ForeignKeyConstraint(['config_id'], ['scan_configs.id']),
|
||||
sa.ForeignKeyConstraint(['schedule_id'], ['schedules.id']),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_scans_timestamp'), 'scans', ['timestamp'], unique=False)
|
||||
op.create_index('ix_scans_timestamp', 'scans', ['timestamp'])
|
||||
op.create_index('ix_scans_config_id', 'scans', ['config_id'])
|
||||
|
||||
# Create scan_sites table
|
||||
op.create_table('scan_sites',
|
||||
op.create_table(
|
||||
'scan_sites',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False, comment='FK to scans'),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False),
|
||||
sa.Column('site_name', sa.String(length=255), nullable=False, comment='Site name from config'),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id'], ),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id']),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_scan_sites_scan_id'), 'scan_sites', ['scan_id'], unique=False)
|
||||
op.create_index('ix_scan_sites_scan_id', 'scan_sites', ['scan_id'])
|
||||
|
||||
# Create scan_ips table
|
||||
op.create_table('scan_ips',
|
||||
op.create_table(
|
||||
'scan_ips',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False, comment='FK to scans'),
|
||||
sa.Column('site_id', sa.Integer(), nullable=False, comment='FK to scan_sites'),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False),
|
||||
sa.Column('site_id', sa.Integer(), nullable=False),
|
||||
sa.Column('ip_address', sa.String(length=45), nullable=False, comment='IPv4 or IPv6 address'),
|
||||
sa.Column('ping_expected', sa.Boolean(), nullable=True, comment='Expected ping response'),
|
||||
sa.Column('ping_actual', sa.Boolean(), nullable=True, comment='Actual ping response'),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id'], ),
|
||||
sa.ForeignKeyConstraint(['site_id'], ['scan_sites.id'], ),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id']),
|
||||
sa.ForeignKeyConstraint(['site_id'], ['scan_sites.id']),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('scan_id', 'ip_address', name='uix_scan_ip')
|
||||
)
|
||||
op.create_index(op.f('ix_scan_ips_scan_id'), 'scan_ips', ['scan_id'], unique=False)
|
||||
op.create_index(op.f('ix_scan_ips_site_id'), 'scan_ips', ['site_id'], unique=False)
|
||||
op.create_index('ix_scan_ips_scan_id', 'scan_ips', ['scan_id'])
|
||||
op.create_index('ix_scan_ips_site_id', 'scan_ips', ['site_id'])
|
||||
|
||||
# Create scan_ports table
|
||||
op.create_table('scan_ports',
|
||||
op.create_table(
|
||||
'scan_ports',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False, comment='FK to scans'),
|
||||
sa.Column('ip_id', sa.Integer(), nullable=False, comment='FK to scan_ips'),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False),
|
||||
sa.Column('ip_id', sa.Integer(), nullable=False),
|
||||
sa.Column('port', sa.Integer(), nullable=False, comment='Port number (1-65535)'),
|
||||
sa.Column('protocol', sa.String(length=10), nullable=False, comment='tcp or udp'),
|
||||
sa.Column('expected', sa.Boolean(), nullable=True, comment='Was this port expected?'),
|
||||
sa.Column('state', sa.String(length=20), nullable=False, comment='open, closed, filtered'),
|
||||
sa.ForeignKeyConstraint(['ip_id'], ['scan_ips.id'], ),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id'], ),
|
||||
sa.Column('state', sa.String(length=20), nullable=False, default='open', comment='open, closed, filtered'),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id']),
|
||||
sa.ForeignKeyConstraint(['ip_id'], ['scan_ips.id']),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('scan_id', 'ip_id', 'port', 'protocol', name='uix_scan_ip_port')
|
||||
)
|
||||
op.create_index(op.f('ix_scan_ports_ip_id'), 'scan_ports', ['ip_id'], unique=False)
|
||||
op.create_index(op.f('ix_scan_ports_scan_id'), 'scan_ports', ['scan_id'], unique=False)
|
||||
op.create_index('ix_scan_ports_scan_id', 'scan_ports', ['scan_id'])
|
||||
op.create_index('ix_scan_ports_ip_id', 'scan_ports', ['ip_id'])
|
||||
|
||||
# Create scan_services table
|
||||
op.create_table('scan_services',
|
||||
op.create_table(
|
||||
'scan_services',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False, comment='FK to scans'),
|
||||
sa.Column('port_id', sa.Integer(), nullable=False, comment='FK to scan_ports'),
|
||||
sa.Column('service_name', sa.String(length=100), nullable=True, comment='Service name (e.g., ssh, http)'),
|
||||
sa.Column('product', sa.String(length=255), nullable=True, comment='Product name (e.g., OpenSSH)'),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False),
|
||||
sa.Column('port_id', sa.Integer(), nullable=False),
|
||||
sa.Column('service_name', sa.String(length=100), nullable=True, comment='Service name'),
|
||||
sa.Column('product', sa.String(length=255), nullable=True, comment='Product name'),
|
||||
sa.Column('version', sa.String(length=100), nullable=True, comment='Version string'),
|
||||
sa.Column('extrainfo', sa.Text(), nullable=True, comment='Additional nmap info'),
|
||||
sa.Column('ostype', sa.String(length=100), nullable=True, comment='OS type if detected'),
|
||||
sa.Column('http_protocol', sa.String(length=10), nullable=True, comment='http or https (if web service)'),
|
||||
sa.Column('http_protocol', sa.String(length=10), nullable=True, comment='http or https'),
|
||||
sa.Column('screenshot_path', sa.Text(), nullable=True, comment='Relative path to screenshot'),
|
||||
sa.ForeignKeyConstraint(['port_id'], ['scan_ports.id'], ),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id'], ),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id']),
|
||||
sa.ForeignKeyConstraint(['port_id'], ['scan_ports.id']),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_scan_services_port_id'), 'scan_services', ['port_id'], unique=False)
|
||||
op.create_index(op.f('ix_scan_services_scan_id'), 'scan_services', ['scan_id'], unique=False)
|
||||
op.create_index('ix_scan_services_scan_id', 'scan_services', ['scan_id'])
|
||||
op.create_index('ix_scan_services_port_id', 'scan_services', ['port_id'])
|
||||
|
||||
# Create scan_certificates table
|
||||
op.create_table('scan_certificates',
|
||||
op.create_table(
|
||||
'scan_certificates',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False, comment='FK to scans'),
|
||||
sa.Column('service_id', sa.Integer(), nullable=False, comment='FK to scan_services'),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False),
|
||||
sa.Column('service_id', sa.Integer(), nullable=False),
|
||||
sa.Column('subject', sa.Text(), nullable=True, comment='Certificate subject (CN)'),
|
||||
sa.Column('issuer', sa.Text(), nullable=True, comment='Certificate issuer'),
|
||||
sa.Column('serial_number', sa.Text(), nullable=True, comment='Serial number'),
|
||||
@@ -127,95 +216,177 @@ def upgrade() -> None:
|
||||
sa.Column('not_valid_after', sa.DateTime(), nullable=True, comment='Validity end date'),
|
||||
sa.Column('days_until_expiry', sa.Integer(), nullable=True, comment='Days until expiration'),
|
||||
sa.Column('sans', sa.Text(), nullable=True, comment='JSON array of SANs'),
|
||||
sa.Column('is_self_signed', sa.Boolean(), nullable=True, comment='Self-signed certificate flag'),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id'], ),
|
||||
sa.ForeignKeyConstraint(['service_id'], ['scan_services.id'], ),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
comment='Index on expiration date for alert queries'
|
||||
sa.Column('is_self_signed', sa.Boolean(), nullable=True, default=False, comment='Self-signed flag'),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id']),
|
||||
sa.ForeignKeyConstraint(['service_id'], ['scan_services.id']),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_scan_certificates_scan_id'), 'scan_certificates', ['scan_id'], unique=False)
|
||||
op.create_index(op.f('ix_scan_certificates_service_id'), 'scan_certificates', ['service_id'], unique=False)
|
||||
op.create_index('ix_scan_certificates_scan_id', 'scan_certificates', ['scan_id'])
|
||||
op.create_index('ix_scan_certificates_service_id', 'scan_certificates', ['service_id'])
|
||||
|
||||
# Create scan_tls_versions table
|
||||
op.create_table('scan_tls_versions',
|
||||
op.create_table(
|
||||
'scan_tls_versions',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False, comment='FK to scans'),
|
||||
sa.Column('certificate_id', sa.Integer(), nullable=False, comment='FK to scan_certificates'),
|
||||
sa.Column('tls_version', sa.String(length=20), nullable=False, comment='TLS 1.0, TLS 1.1, TLS 1.2, TLS 1.3'),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False),
|
||||
sa.Column('certificate_id', sa.Integer(), nullable=False),
|
||||
sa.Column('tls_version', sa.String(length=20), nullable=False, comment='TLS 1.0, 1.1, 1.2, 1.3'),
|
||||
sa.Column('supported', sa.Boolean(), nullable=False, comment='Is this version supported?'),
|
||||
sa.Column('cipher_suites', sa.Text(), nullable=True, comment='JSON array of cipher suites'),
|
||||
sa.ForeignKeyConstraint(['certificate_id'], ['scan_certificates.id'], ),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id'], ),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id']),
|
||||
sa.ForeignKeyConstraint(['certificate_id'], ['scan_certificates.id']),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_scan_tls_versions_certificate_id'), 'scan_tls_versions', ['certificate_id'], unique=False)
|
||||
op.create_index(op.f('ix_scan_tls_versions_scan_id'), 'scan_tls_versions', ['scan_id'], unique=False)
|
||||
op.create_index('ix_scan_tls_versions_scan_id', 'scan_tls_versions', ['scan_id'])
|
||||
op.create_index('ix_scan_tls_versions_certificate_id', 'scan_tls_versions', ['certificate_id'])
|
||||
|
||||
# Create alerts table
|
||||
op.create_table('alerts',
|
||||
op.create_table(
|
||||
'scan_progress',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False, comment='FK to scans'),
|
||||
sa.Column('alert_type', sa.String(length=50), nullable=False, comment='new_port, cert_expiry, service_change, ping_failed'),
|
||||
sa.Column('severity', sa.String(length=20), nullable=False, comment='info, warning, critical'),
|
||||
sa.Column('message', sa.Text(), nullable=False, comment='Human-readable alert message'),
|
||||
sa.Column('ip_address', sa.String(length=45), nullable=True, comment='Related IP (optional)'),
|
||||
sa.Column('port', sa.Integer(), nullable=True, comment='Related port (optional)'),
|
||||
sa.Column('email_sent', sa.Boolean(), nullable=False, comment='Was email notification sent?'),
|
||||
sa.Column('email_sent_at', sa.DateTime(), nullable=True, comment='Email send timestamp'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Alert creation time'),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id'], ),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False),
|
||||
sa.Column('ip_address', sa.String(length=45), nullable=False, comment='IP address being scanned'),
|
||||
sa.Column('site_name', sa.String(length=255), nullable=True, comment='Site name'),
|
||||
sa.Column('phase', sa.String(length=50), nullable=False, comment='Phase: ping, tcp_scan, etc.'),
|
||||
sa.Column('status', sa.String(length=20), nullable=False, default='pending', comment='pending, in_progress, completed, failed'),
|
||||
sa.Column('ping_result', sa.Boolean(), nullable=True, comment='Ping response result'),
|
||||
sa.Column('tcp_ports', sa.Text(), nullable=True, comment='JSON array of TCP ports'),
|
||||
sa.Column('udp_ports', sa.Text(), nullable=True, comment='JSON array of UDP ports'),
|
||||
sa.Column('services', sa.Text(), nullable=True, comment='JSON array of services'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Entry creation time'),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=False, comment='Last update time'),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id']),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
comment='Indexes for alert filtering'
|
||||
sa.UniqueConstraint('scan_id', 'ip_address', name='uix_scan_progress_ip')
|
||||
)
|
||||
op.create_index(op.f('ix_alerts_scan_id'), 'alerts', ['scan_id'], unique=False)
|
||||
op.create_index('ix_scan_progress_scan_id', 'scan_progress', ['scan_id'])
|
||||
|
||||
# Create alert_rules table
|
||||
op.create_table('alert_rules',
|
||||
op.create_table(
|
||||
'scan_site_associations',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('rule_type', sa.String(length=50), nullable=False, comment='unexpected_port, cert_expiry, service_down, etc.'),
|
||||
sa.Column('enabled', sa.Boolean(), nullable=False, comment='Is rule active?'),
|
||||
sa.Column('threshold', sa.Integer(), nullable=True, comment='Threshold value (e.g., days for cert expiry)'),
|
||||
sa.Column('email_enabled', sa.Boolean(), nullable=False, comment='Send email for this rule?'),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False),
|
||||
sa.Column('site_id', sa.Integer(), nullable=False),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Association creation time'),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id']),
|
||||
sa.ForeignKeyConstraint(['site_id'], ['sites.id']),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('scan_id', 'site_id', name='uix_scan_site')
|
||||
)
|
||||
op.create_index('ix_scan_site_associations_scan_id', 'scan_site_associations', ['scan_id'])
|
||||
op.create_index('ix_scan_site_associations_site_id', 'scan_site_associations', ['site_id'])
|
||||
|
||||
# =========================================================================
|
||||
# Alert Tables
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
'alert_rules',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('name', sa.String(length=255), nullable=True, comment='User-friendly rule name'),
|
||||
sa.Column('rule_type', sa.String(length=50), nullable=False, comment='unexpected_port, cert_expiry, etc.'),
|
||||
sa.Column('enabled', sa.Boolean(), nullable=False, default=True, comment='Is rule active?'),
|
||||
sa.Column('threshold', sa.Integer(), nullable=True, comment='Threshold value'),
|
||||
sa.Column('email_enabled', sa.Boolean(), nullable=False, default=False, comment='Send email?'),
|
||||
sa.Column('webhook_enabled', sa.Boolean(), nullable=False, default=False, comment='Send webhook?'),
|
||||
sa.Column('severity', sa.String(length=20), nullable=True, comment='critical, warning, info'),
|
||||
sa.Column('filter_conditions', sa.Text(), nullable=True, comment='JSON filter conditions'),
|
||||
sa.Column('config_id', sa.Integer(), nullable=True, comment='Optional: specific config this rule applies to'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Rule creation time'),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=True, comment='Last update time'),
|
||||
sa.ForeignKeyConstraint(['config_id'], ['scan_configs.id']),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index('ix_alert_rules_config_id', 'alert_rules', ['config_id'])
|
||||
|
||||
op.create_table(
|
||||
'alerts',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False),
|
||||
sa.Column('rule_id', sa.Integer(), nullable=True, comment='Associated alert rule'),
|
||||
sa.Column('alert_type', sa.String(length=50), nullable=False, comment='Alert type'),
|
||||
sa.Column('severity', sa.String(length=20), nullable=False, comment='info, warning, critical'),
|
||||
sa.Column('message', sa.Text(), nullable=False, comment='Human-readable message'),
|
||||
sa.Column('ip_address', sa.String(length=45), nullable=True, comment='Related IP'),
|
||||
sa.Column('port', sa.Integer(), nullable=True, comment='Related port'),
|
||||
sa.Column('email_sent', sa.Boolean(), nullable=False, default=False, comment='Was email sent?'),
|
||||
sa.Column('email_sent_at', sa.DateTime(), nullable=True, comment='Email send timestamp'),
|
||||
sa.Column('webhook_sent', sa.Boolean(), nullable=False, default=False, comment='Was webhook sent?'),
|
||||
sa.Column('webhook_sent_at', sa.DateTime(), nullable=True, comment='Webhook send timestamp'),
|
||||
sa.Column('acknowledged', sa.Boolean(), nullable=False, default=False, comment='Was alert acknowledged?'),
|
||||
sa.Column('acknowledged_at', sa.DateTime(), nullable=True, comment='Acknowledgment timestamp'),
|
||||
sa.Column('acknowledged_by', sa.String(length=255), nullable=True, comment='User who acknowledged'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Alert creation time'),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id']),
|
||||
sa.ForeignKeyConstraint(['rule_id'], ['alert_rules.id']),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index('ix_alerts_scan_id', 'alerts', ['scan_id'])
|
||||
op.create_index('ix_alerts_rule_id', 'alerts', ['rule_id'])
|
||||
op.create_index('ix_alerts_acknowledged', 'alerts', ['acknowledged'])
|
||||
|
||||
# =========================================================================
|
||||
# Webhook Tables
|
||||
# =========================================================================
|
||||
op.create_table(
|
||||
'webhooks',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('name', sa.String(length=255), nullable=False, comment='Webhook name'),
|
||||
sa.Column('url', sa.Text(), nullable=False, comment='Webhook URL'),
|
||||
sa.Column('enabled', sa.Boolean(), nullable=False, default=True, comment='Is webhook enabled?'),
|
||||
sa.Column('auth_type', sa.String(length=20), nullable=True, comment='none, bearer, basic, custom'),
|
||||
sa.Column('auth_token', sa.Text(), nullable=True, comment='Encrypted auth token'),
|
||||
sa.Column('custom_headers', sa.Text(), nullable=True, comment='JSON custom headers'),
|
||||
sa.Column('alert_types', sa.Text(), nullable=True, comment='JSON array of alert types'),
|
||||
sa.Column('severity_filter', sa.Text(), nullable=True, comment='JSON array of severities'),
|
||||
sa.Column('timeout', sa.Integer(), nullable=True, default=10, comment='Request timeout'),
|
||||
sa.Column('retry_count', sa.Integer(), nullable=True, default=3, comment='Retry attempts'),
|
||||
sa.Column('template', sa.Text(), nullable=True, comment='Jinja2 template for payload'),
|
||||
sa.Column('template_format', sa.String(length=20), nullable=True, default='json', comment='json, text'),
|
||||
sa.Column('content_type_override', sa.String(length=100), nullable=True, comment='Custom Content-Type'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Creation time'),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=False, comment='Last update time'),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
|
||||
# Create settings table
|
||||
op.create_table('settings',
|
||||
op.create_table(
|
||||
'webhook_delivery_log',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('key', sa.String(length=255), nullable=False, comment='Setting key (e.g., smtp_server)'),
|
||||
sa.Column('value', sa.Text(), nullable=True, comment='Setting value (JSON for complex values)'),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=False, comment='Last modification time'),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('key')
|
||||
sa.Column('webhook_id', sa.Integer(), nullable=False, comment='Associated webhook'),
|
||||
sa.Column('alert_id', sa.Integer(), nullable=False, comment='Associated alert'),
|
||||
sa.Column('status', sa.String(length=20), nullable=True, comment='success, failed, retrying'),
|
||||
sa.Column('response_code', sa.Integer(), nullable=True, comment='HTTP response code'),
|
||||
sa.Column('response_body', sa.Text(), nullable=True, comment='Response body'),
|
||||
sa.Column('error_message', sa.Text(), nullable=True, comment='Error message if failed'),
|
||||
sa.Column('attempt_number', sa.Integer(), nullable=True, comment='Which attempt'),
|
||||
sa.Column('delivered_at', sa.DateTime(), nullable=False, comment='Delivery timestamp'),
|
||||
sa.ForeignKeyConstraint(['webhook_id'], ['webhooks.id']),
|
||||
sa.ForeignKeyConstraint(['alert_id'], ['alerts.id']),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_settings_key'), 'settings', ['key'], unique=True)
|
||||
op.create_index('ix_webhook_delivery_log_webhook_id', 'webhook_delivery_log', ['webhook_id'])
|
||||
op.create_index('ix_webhook_delivery_log_alert_id', 'webhook_delivery_log', ['alert_id'])
|
||||
op.create_index('ix_webhook_delivery_log_status', 'webhook_delivery_log', ['status'])
|
||||
|
||||
print("\n✓ Initial schema created successfully")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Drop all tables."""
|
||||
op.drop_index(op.f('ix_settings_key'), table_name='settings')
|
||||
op.drop_table('settings')
|
||||
op.drop_table('alert_rules')
|
||||
op.drop_index(op.f('ix_alerts_scan_id'), table_name='alerts')
|
||||
def downgrade():
|
||||
"""Drop all tables in reverse order."""
|
||||
op.drop_table('webhook_delivery_log')
|
||||
op.drop_table('webhooks')
|
||||
op.drop_table('alerts')
|
||||
op.drop_index(op.f('ix_scan_tls_versions_scan_id'), table_name='scan_tls_versions')
|
||||
op.drop_index(op.f('ix_scan_tls_versions_certificate_id'), table_name='scan_tls_versions')
|
||||
op.drop_table('alert_rules')
|
||||
op.drop_table('scan_site_associations')
|
||||
op.drop_table('scan_progress')
|
||||
op.drop_table('scan_tls_versions')
|
||||
op.drop_index(op.f('ix_scan_certificates_service_id'), table_name='scan_certificates')
|
||||
op.drop_index(op.f('ix_scan_certificates_scan_id'), table_name='scan_certificates')
|
||||
op.drop_table('scan_certificates')
|
||||
op.drop_index(op.f('ix_scan_services_scan_id'), table_name='scan_services')
|
||||
op.drop_index(op.f('ix_scan_services_port_id'), table_name='scan_services')
|
||||
op.drop_table('scan_services')
|
||||
op.drop_index(op.f('ix_scan_ports_scan_id'), table_name='scan_ports')
|
||||
op.drop_index(op.f('ix_scan_ports_ip_id'), table_name='scan_ports')
|
||||
op.drop_table('scan_ports')
|
||||
op.drop_index(op.f('ix_scan_ips_site_id'), table_name='scan_ips')
|
||||
op.drop_index(op.f('ix_scan_ips_scan_id'), table_name='scan_ips')
|
||||
op.drop_table('scan_ips')
|
||||
op.drop_index(op.f('ix_scan_sites_scan_id'), table_name='scan_sites')
|
||||
op.drop_table('scan_sites')
|
||||
op.drop_index(op.f('ix_scans_timestamp'), table_name='scans')
|
||||
op.drop_table('scans')
|
||||
op.drop_table('schedules')
|
||||
op.drop_table('scan_config_sites')
|
||||
op.drop_table('scan_configs')
|
||||
op.drop_table('site_ips')
|
||||
op.drop_table('sites')
|
||||
op.drop_table('settings')
|
||||
|
||||
print("\n✓ All tables dropped")
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
"""Add indexes for scan queries
|
||||
|
||||
Revision ID: 002
|
||||
Revises: 001
|
||||
Create Date: 2025-11-14 00:30:00.000000
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '002'
|
||||
down_revision = '001'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Add database indexes for better query performance."""
|
||||
# Add index on scans.status for filtering
|
||||
# Note: index on scans.timestamp already exists from migration 001
|
||||
op.create_index('ix_scans_status', 'scans', ['status'], unique=False)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Remove indexes."""
|
||||
op.drop_index('ix_scans_status', table_name='scans')
|
||||
@@ -1,39 +0,0 @@
|
||||
"""Add timing and error fields to scans table
|
||||
|
||||
Revision ID: 003
|
||||
Revises: 002
|
||||
Create Date: 2025-11-14
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic
|
||||
revision = '003'
|
||||
down_revision = '002'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
"""
|
||||
Add fields for tracking scan execution timing and errors.
|
||||
|
||||
New fields:
|
||||
- started_at: When scan execution actually started
|
||||
- completed_at: When scan execution finished (success or failure)
|
||||
- error_message: Error message if scan failed
|
||||
"""
|
||||
with op.batch_alter_table('scans') as batch_op:
|
||||
batch_op.add_column(sa.Column('started_at', sa.DateTime(), nullable=True, comment='Scan execution start time'))
|
||||
batch_op.add_column(sa.Column('completed_at', sa.DateTime(), nullable=True, comment='Scan execution completion time'))
|
||||
batch_op.add_column(sa.Column('error_message', sa.Text(), nullable=True, comment='Error message if scan failed'))
|
||||
|
||||
|
||||
def downgrade():
|
||||
"""Remove the timing and error fields."""
|
||||
with op.batch_alter_table('scans') as batch_op:
|
||||
batch_op.drop_column('error_message')
|
||||
batch_op.drop_column('completed_at')
|
||||
batch_op.drop_column('started_at')
|
||||
@@ -1,120 +0,0 @@
|
||||
"""Add enhanced alert features for Phase 5
|
||||
|
||||
Revision ID: 004
|
||||
Revises: 003
|
||||
Create Date: 2025-11-18
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic
|
||||
revision = '004'
|
||||
down_revision = '003'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
"""
|
||||
Add enhancements for Phase 5 Alert Rule Engine:
|
||||
- Enhanced alert_rules fields
|
||||
- Enhanced alerts fields
|
||||
- New webhooks table
|
||||
- New webhook_delivery_log table
|
||||
"""
|
||||
|
||||
# Enhance alert_rules table
|
||||
with op.batch_alter_table('alert_rules') as batch_op:
|
||||
batch_op.add_column(sa.Column('name', sa.String(255), nullable=True, comment='User-friendly rule name'))
|
||||
batch_op.add_column(sa.Column('webhook_enabled', sa.Boolean(), nullable=False, server_default='0', comment='Whether to send webhooks for this rule'))
|
||||
batch_op.add_column(sa.Column('severity', sa.String(20), nullable=True, comment='Alert severity level (critical, warning, info)'))
|
||||
batch_op.add_column(sa.Column('filter_conditions', sa.Text(), nullable=True, comment='JSON filter conditions for the rule'))
|
||||
batch_op.add_column(sa.Column('config_file', sa.String(255), nullable=True, comment='Optional: specific config file this rule applies to'))
|
||||
batch_op.add_column(sa.Column('updated_at', sa.DateTime(), nullable=True, comment='Last update timestamp'))
|
||||
|
||||
# Enhance alerts table
|
||||
with op.batch_alter_table('alerts') as batch_op:
|
||||
batch_op.add_column(sa.Column('rule_id', sa.Integer(), nullable=True, comment='Associated alert rule'))
|
||||
batch_op.add_column(sa.Column('webhook_sent', sa.Boolean(), nullable=False, server_default='0', comment='Whether webhook was sent'))
|
||||
batch_op.add_column(sa.Column('webhook_sent_at', sa.DateTime(), nullable=True, comment='When webhook was sent'))
|
||||
batch_op.add_column(sa.Column('acknowledged', sa.Boolean(), nullable=False, server_default='0', comment='Whether alert was acknowledged'))
|
||||
batch_op.add_column(sa.Column('acknowledged_at', sa.DateTime(), nullable=True, comment='When alert was acknowledged'))
|
||||
batch_op.add_column(sa.Column('acknowledged_by', sa.String(255), nullable=True, comment='User who acknowledged the alert'))
|
||||
batch_op.create_foreign_key('fk_alerts_rule_id', 'alert_rules', ['rule_id'], ['id'])
|
||||
batch_op.create_index('idx_alerts_rule_id', ['rule_id'])
|
||||
batch_op.create_index('idx_alerts_acknowledged', ['acknowledged'])
|
||||
|
||||
# Create webhooks table
|
||||
op.create_table('webhooks',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('name', sa.String(255), nullable=False, comment='Webhook name'),
|
||||
sa.Column('url', sa.Text(), nullable=False, comment='Webhook URL'),
|
||||
sa.Column('enabled', sa.Boolean(), nullable=False, server_default='1', comment='Whether webhook is enabled'),
|
||||
sa.Column('auth_type', sa.String(20), nullable=True, comment='Authentication type: none, bearer, basic, custom'),
|
||||
sa.Column('auth_token', sa.Text(), nullable=True, comment='Encrypted authentication token'),
|
||||
sa.Column('custom_headers', sa.Text(), nullable=True, comment='JSON custom headers'),
|
||||
sa.Column('alert_types', sa.Text(), nullable=True, comment='JSON array of alert types to trigger on'),
|
||||
sa.Column('severity_filter', sa.Text(), nullable=True, comment='JSON array of severities to trigger on'),
|
||||
sa.Column('timeout', sa.Integer(), nullable=True, server_default='10', comment='Request timeout in seconds'),
|
||||
sa.Column('retry_count', sa.Integer(), nullable=True, server_default='3', comment='Number of retry attempts'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
|
||||
# Create webhook_delivery_log table
|
||||
op.create_table('webhook_delivery_log',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('webhook_id', sa.Integer(), nullable=False, comment='Associated webhook'),
|
||||
sa.Column('alert_id', sa.Integer(), nullable=False, comment='Associated alert'),
|
||||
sa.Column('status', sa.String(20), nullable=True, comment='Delivery status: success, failed, retrying'),
|
||||
sa.Column('response_code', sa.Integer(), nullable=True, comment='HTTP response code'),
|
||||
sa.Column('response_body', sa.Text(), nullable=True, comment='Response body from webhook'),
|
||||
sa.Column('error_message', sa.Text(), nullable=True, comment='Error message if failed'),
|
||||
sa.Column('attempt_number', sa.Integer(), nullable=True, comment='Which attempt this was'),
|
||||
sa.Column('delivered_at', sa.DateTime(), nullable=False, comment='Delivery timestamp'),
|
||||
sa.ForeignKeyConstraint(['webhook_id'], ['webhooks.id'], ),
|
||||
sa.ForeignKeyConstraint(['alert_id'], ['alerts.id'], ),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
|
||||
# Create indexes for webhook_delivery_log
|
||||
op.create_index('idx_webhook_delivery_alert_id', 'webhook_delivery_log', ['alert_id'])
|
||||
op.create_index('idx_webhook_delivery_webhook_id', 'webhook_delivery_log', ['webhook_id'])
|
||||
op.create_index('idx_webhook_delivery_status', 'webhook_delivery_log', ['status'])
|
||||
|
||||
|
||||
def downgrade():
|
||||
"""Remove Phase 5 alert enhancements."""
|
||||
|
||||
# Drop webhook_delivery_log table and its indexes
|
||||
op.drop_index('idx_webhook_delivery_status', table_name='webhook_delivery_log')
|
||||
op.drop_index('idx_webhook_delivery_webhook_id', table_name='webhook_delivery_log')
|
||||
op.drop_index('idx_webhook_delivery_alert_id', table_name='webhook_delivery_log')
|
||||
op.drop_table('webhook_delivery_log')
|
||||
|
||||
# Drop webhooks table
|
||||
op.drop_table('webhooks')
|
||||
|
||||
# Remove enhancements from alerts table
|
||||
with op.batch_alter_table('alerts') as batch_op:
|
||||
batch_op.drop_index('idx_alerts_acknowledged')
|
||||
batch_op.drop_index('idx_alerts_rule_id')
|
||||
batch_op.drop_constraint('fk_alerts_rule_id', type_='foreignkey')
|
||||
batch_op.drop_column('acknowledged_by')
|
||||
batch_op.drop_column('acknowledged_at')
|
||||
batch_op.drop_column('acknowledged')
|
||||
batch_op.drop_column('webhook_sent_at')
|
||||
batch_op.drop_column('webhook_sent')
|
||||
batch_op.drop_column('rule_id')
|
||||
|
||||
# Remove enhancements from alert_rules table
|
||||
with op.batch_alter_table('alert_rules') as batch_op:
|
||||
batch_op.drop_column('updated_at')
|
||||
batch_op.drop_column('config_file')
|
||||
batch_op.drop_column('filter_conditions')
|
||||
batch_op.drop_column('severity')
|
||||
batch_op.drop_column('webhook_enabled')
|
||||
batch_op.drop_column('name')
|
||||
@@ -1,83 +0,0 @@
|
||||
"""Add webhook template support
|
||||
|
||||
Revision ID: 005
|
||||
Revises: 004
|
||||
Create Date: 2025-11-18
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
import json
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic
|
||||
revision = '005'
|
||||
down_revision = '004'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
# Default template that matches the current JSON payload structure
|
||||
DEFAULT_TEMPLATE = """{
|
||||
"event": "alert.created",
|
||||
"alert": {
|
||||
"id": {{ alert.id }},
|
||||
"type": "{{ alert.type }}",
|
||||
"severity": "{{ alert.severity }}",
|
||||
"message": "{{ alert.message }}",
|
||||
{% if alert.ip_address %}"ip_address": "{{ alert.ip_address }}",{% endif %}
|
||||
{% if alert.port %}"port": {{ alert.port }},{% endif %}
|
||||
"acknowledged": {{ alert.acknowledged|lower }},
|
||||
"created_at": "{{ alert.created_at.isoformat() }}"
|
||||
},
|
||||
"scan": {
|
||||
"id": {{ scan.id }},
|
||||
"title": "{{ scan.title }}",
|
||||
"timestamp": "{{ scan.timestamp.isoformat() }}",
|
||||
"status": "{{ scan.status }}"
|
||||
},
|
||||
"rule": {
|
||||
"id": {{ rule.id }},
|
||||
"name": "{{ rule.name }}",
|
||||
"type": "{{ rule.type }}",
|
||||
"threshold": {{ rule.threshold if rule.threshold else 'null' }}
|
||||
}
|
||||
}"""
|
||||
|
||||
|
||||
def upgrade():
|
||||
"""
|
||||
Add webhook template fields:
|
||||
- template: Jinja2 template for payload
|
||||
- template_format: Output format (json, text)
|
||||
- content_type_override: Optional custom Content-Type
|
||||
"""
|
||||
|
||||
# Add new columns to webhooks table
|
||||
with op.batch_alter_table('webhooks') as batch_op:
|
||||
batch_op.add_column(sa.Column('template', sa.Text(), nullable=True, comment='Jinja2 template for webhook payload'))
|
||||
batch_op.add_column(sa.Column('template_format', sa.String(20), nullable=True, server_default='json', comment='Template output format: json, text'))
|
||||
batch_op.add_column(sa.Column('content_type_override', sa.String(100), nullable=True, comment='Optional custom Content-Type header'))
|
||||
|
||||
# Populate existing webhooks with default template
|
||||
# This ensures backward compatibility by converting existing webhooks to use the
|
||||
# same JSON structure they're currently sending
|
||||
connection = op.get_bind()
|
||||
connection.execute(
|
||||
sa.text("""
|
||||
UPDATE webhooks
|
||||
SET template = :template,
|
||||
template_format = 'json'
|
||||
WHERE template IS NULL
|
||||
"""),
|
||||
{"template": DEFAULT_TEMPLATE}
|
||||
)
|
||||
|
||||
|
||||
def downgrade():
|
||||
"""Remove webhook template fields."""
|
||||
|
||||
with op.batch_alter_table('webhooks') as batch_op:
|
||||
batch_op.drop_column('content_type_override')
|
||||
batch_op.drop_column('template_format')
|
||||
batch_op.drop_column('template')
|
||||
@@ -1,161 +0,0 @@
|
||||
"""Add reusable site definitions
|
||||
|
||||
Revision ID: 006
|
||||
Revises: 005
|
||||
Create Date: 2025-11-19
|
||||
|
||||
This migration introduces reusable site definitions that can be shared across
|
||||
multiple scans. Sites are defined once with CIDR ranges and can be referenced
|
||||
in multiple scan configurations.
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import text
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic
|
||||
revision = '006'
|
||||
down_revision = '005'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
"""
|
||||
Create new site tables and migrate existing scan_sites data to the new structure.
|
||||
"""
|
||||
|
||||
# Create sites table (master site definitions)
|
||||
op.create_table('sites',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('name', sa.String(length=255), nullable=False, comment='Unique site name'),
|
||||
sa.Column('description', sa.Text(), nullable=True, comment='Site description'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Site creation time'),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=False, comment='Last modification time'),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('name', name='uix_site_name')
|
||||
)
|
||||
op.create_index(op.f('ix_sites_name'), 'sites', ['name'], unique=True)
|
||||
|
||||
# Create site_cidrs table (CIDR ranges for each site)
|
||||
op.create_table('site_cidrs',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('site_id', sa.Integer(), nullable=False, comment='FK to sites'),
|
||||
sa.Column('cidr', sa.String(length=45), nullable=False, comment='CIDR notation (e.g., 10.0.0.0/24)'),
|
||||
sa.Column('expected_ping', sa.Boolean(), nullable=True, comment='Expected ping response for this CIDR'),
|
||||
sa.Column('expected_tcp_ports', sa.Text(), nullable=True, comment='JSON array of expected TCP ports'),
|
||||
sa.Column('expected_udp_ports', sa.Text(), nullable=True, comment='JSON array of expected UDP ports'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='CIDR creation time'),
|
||||
sa.ForeignKeyConstraint(['site_id'], ['sites.id'], ),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('site_id', 'cidr', name='uix_site_cidr')
|
||||
)
|
||||
op.create_index(op.f('ix_site_cidrs_site_id'), 'site_cidrs', ['site_id'], unique=False)
|
||||
|
||||
# Create site_ips table (IP-level overrides within CIDRs)
|
||||
op.create_table('site_ips',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('site_cidr_id', sa.Integer(), nullable=False, comment='FK to site_cidrs'),
|
||||
sa.Column('ip_address', sa.String(length=45), nullable=False, comment='IPv4 or IPv6 address'),
|
||||
sa.Column('expected_ping', sa.Boolean(), nullable=True, comment='Override ping expectation for this IP'),
|
||||
sa.Column('expected_tcp_ports', sa.Text(), nullable=True, comment='JSON array of expected TCP ports (overrides CIDR)'),
|
||||
sa.Column('expected_udp_ports', sa.Text(), nullable=True, comment='JSON array of expected UDP ports (overrides CIDR)'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='IP override creation time'),
|
||||
sa.ForeignKeyConstraint(['site_cidr_id'], ['site_cidrs.id'], ),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('site_cidr_id', 'ip_address', name='uix_site_cidr_ip')
|
||||
)
|
||||
op.create_index(op.f('ix_site_ips_site_cidr_id'), 'site_ips', ['site_cidr_id'], unique=False)
|
||||
|
||||
# Create scan_site_associations table (many-to-many between scans and sites)
|
||||
op.create_table('scan_site_associations',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('scan_id', sa.Integer(), nullable=False, comment='FK to scans'),
|
||||
sa.Column('site_id', sa.Integer(), nullable=False, comment='FK to sites'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Association creation time'),
|
||||
sa.ForeignKeyConstraint(['scan_id'], ['scans.id'], ),
|
||||
sa.ForeignKeyConstraint(['site_id'], ['sites.id'], ),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('scan_id', 'site_id', name='uix_scan_site')
|
||||
)
|
||||
op.create_index(op.f('ix_scan_site_associations_scan_id'), 'scan_site_associations', ['scan_id'], unique=False)
|
||||
op.create_index(op.f('ix_scan_site_associations_site_id'), 'scan_site_associations', ['site_id'], unique=False)
|
||||
|
||||
# Migrate existing data
|
||||
connection = op.get_bind()
|
||||
|
||||
# 1. Extract unique site names from existing scan_sites and create master Site records
|
||||
# This groups all historical scan sites by name and creates one master site per unique name
|
||||
connection.execute(text("""
|
||||
INSERT INTO sites (name, description, created_at, updated_at)
|
||||
SELECT DISTINCT
|
||||
site_name,
|
||||
'Migrated from scan_sites' as description,
|
||||
datetime('now') as created_at,
|
||||
datetime('now') as updated_at
|
||||
FROM scan_sites
|
||||
WHERE site_name NOT IN (SELECT name FROM sites)
|
||||
"""))
|
||||
|
||||
# 2. Create scan_site_associations linking scans to their sites
|
||||
# This maintains the historical relationship between scans and the sites they used
|
||||
connection.execute(text("""
|
||||
INSERT INTO scan_site_associations (scan_id, site_id, created_at)
|
||||
SELECT DISTINCT
|
||||
ss.scan_id,
|
||||
s.id as site_id,
|
||||
datetime('now') as created_at
|
||||
FROM scan_sites ss
|
||||
INNER JOIN sites s ON s.name = ss.site_name
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM scan_site_associations ssa
|
||||
WHERE ssa.scan_id = ss.scan_id AND ssa.site_id = s.id
|
||||
)
|
||||
"""))
|
||||
|
||||
# 3. For each migrated site, create a CIDR entry from the IPs in scan_ips
|
||||
# Since historical data has individual IPs, we'll create /32 CIDRs for each unique IP
|
||||
# This preserves the exact IP addresses while fitting them into the new CIDR-based model
|
||||
connection.execute(text("""
|
||||
INSERT INTO site_cidrs (site_id, cidr, expected_ping, expected_tcp_ports, expected_udp_ports, created_at)
|
||||
SELECT DISTINCT
|
||||
s.id as site_id,
|
||||
si.ip_address || '/32' as cidr,
|
||||
si.ping_expected,
|
||||
'[]' as expected_tcp_ports,
|
||||
'[]' as expected_udp_ports,
|
||||
datetime('now') as created_at
|
||||
FROM scan_ips si
|
||||
INNER JOIN scan_sites ss ON ss.id = si.site_id
|
||||
INNER JOIN sites s ON s.name = ss.site_name
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM site_cidrs sc
|
||||
WHERE sc.site_id = s.id AND sc.cidr = si.ip_address || '/32'
|
||||
)
|
||||
GROUP BY s.id, si.ip_address, si.ping_expected
|
||||
"""))
|
||||
|
||||
print("✓ Migration complete: Reusable sites created from historical scan data")
|
||||
print(f" - Created {connection.execute(text('SELECT COUNT(*) FROM sites')).scalar()} master site(s)")
|
||||
print(f" - Created {connection.execute(text('SELECT COUNT(*) FROM site_cidrs')).scalar()} CIDR range(s)")
|
||||
print(f" - Created {connection.execute(text('SELECT COUNT(*) FROM scan_site_associations')).scalar()} scan-site association(s)")
|
||||
|
||||
|
||||
def downgrade():
|
||||
"""Remove reusable site tables."""
|
||||
|
||||
# Drop tables in reverse order of creation (respecting foreign keys)
|
||||
op.drop_index(op.f('ix_scan_site_associations_site_id'), table_name='scan_site_associations')
|
||||
op.drop_index(op.f('ix_scan_site_associations_scan_id'), table_name='scan_site_associations')
|
||||
op.drop_table('scan_site_associations')
|
||||
|
||||
op.drop_index(op.f('ix_site_ips_site_cidr_id'), table_name='site_ips')
|
||||
op.drop_table('site_ips')
|
||||
|
||||
op.drop_index(op.f('ix_site_cidrs_site_id'), table_name='site_cidrs')
|
||||
op.drop_table('site_cidrs')
|
||||
|
||||
op.drop_index(op.f('ix_sites_name'), table_name='sites')
|
||||
op.drop_table('sites')
|
||||
|
||||
print("✓ Downgrade complete: Reusable site tables removed")
|
||||
@@ -1,102 +0,0 @@
|
||||
"""Add database-stored scan configurations
|
||||
|
||||
Revision ID: 007
|
||||
Revises: 006
|
||||
Create Date: 2025-11-19
|
||||
|
||||
This migration introduces database-stored scan configurations to replace YAML
|
||||
config files. Configs reference sites from the sites table, enabling visual
|
||||
config builder and better data management.
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import text
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic
|
||||
revision = '007'
|
||||
down_revision = '006'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
"""
|
||||
Create scan_configs and scan_config_sites tables.
|
||||
Add config_id foreign keys to scans and schedules tables.
|
||||
"""
|
||||
|
||||
# Create scan_configs table
|
||||
op.create_table('scan_configs',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('title', sa.String(length=255), nullable=False, comment='Configuration title'),
|
||||
sa.Column('description', sa.Text(), nullable=True, comment='Configuration description'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Config creation time'),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=False, comment='Last modification time'),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
|
||||
# Create scan_config_sites table (many-to-many between configs and sites)
|
||||
op.create_table('scan_config_sites',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('config_id', sa.Integer(), nullable=False, comment='FK to scan_configs'),
|
||||
sa.Column('site_id', sa.Integer(), nullable=False, comment='FK to sites'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False, comment='Association creation time'),
|
||||
sa.ForeignKeyConstraint(['config_id'], ['scan_configs.id'], ),
|
||||
sa.ForeignKeyConstraint(['site_id'], ['sites.id'], ),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('config_id', 'site_id', name='uix_config_site')
|
||||
)
|
||||
op.create_index(op.f('ix_scan_config_sites_config_id'), 'scan_config_sites', ['config_id'], unique=False)
|
||||
op.create_index(op.f('ix_scan_config_sites_site_id'), 'scan_config_sites', ['site_id'], unique=False)
|
||||
|
||||
# Add config_id to scans table
|
||||
with op.batch_alter_table('scans', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('config_id', sa.Integer(), nullable=True, comment='FK to scan_configs table'))
|
||||
batch_op.create_index('ix_scans_config_id', ['config_id'], unique=False)
|
||||
batch_op.create_foreign_key('fk_scans_config_id', 'scan_configs', ['config_id'], ['id'])
|
||||
# Mark config_file as deprecated in comment (already has nullable=True)
|
||||
|
||||
# Add config_id to schedules table and make config_file nullable
|
||||
with op.batch_alter_table('schedules', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('config_id', sa.Integer(), nullable=True, comment='FK to scan_configs table'))
|
||||
batch_op.create_index('ix_schedules_config_id', ['config_id'], unique=False)
|
||||
batch_op.create_foreign_key('fk_schedules_config_id', 'scan_configs', ['config_id'], ['id'])
|
||||
# Make config_file nullable (it was required before)
|
||||
batch_op.alter_column('config_file', existing_type=sa.Text(), nullable=True)
|
||||
|
||||
connection = op.get_bind()
|
||||
|
||||
print("✓ Migration complete: Scan configs tables created")
|
||||
print(" - Created scan_configs table for database-stored configurations")
|
||||
print(" - Created scan_config_sites association table")
|
||||
print(" - Added config_id to scans table")
|
||||
print(" - Added config_id to schedules table")
|
||||
print(" - Existing YAML configs remain in config_file column for backward compatibility")
|
||||
|
||||
|
||||
def downgrade():
|
||||
"""Remove scan config tables and columns."""
|
||||
|
||||
# Remove foreign keys and columns from schedules
|
||||
with op.batch_alter_table('schedules', schema=None) as batch_op:
|
||||
batch_op.drop_constraint('fk_schedules_config_id', type_='foreignkey')
|
||||
batch_op.drop_index('ix_schedules_config_id')
|
||||
batch_op.drop_column('config_id')
|
||||
# Restore config_file as required
|
||||
batch_op.alter_column('config_file', existing_type=sa.Text(), nullable=False)
|
||||
|
||||
# Remove foreign keys and columns from scans
|
||||
with op.batch_alter_table('scans', schema=None) as batch_op:
|
||||
batch_op.drop_constraint('fk_scans_config_id', type_='foreignkey')
|
||||
batch_op.drop_index('ix_scans_config_id')
|
||||
batch_op.drop_column('config_id')
|
||||
|
||||
# Drop tables in reverse order
|
||||
op.drop_index(op.f('ix_scan_config_sites_site_id'), table_name='scan_config_sites')
|
||||
op.drop_index(op.f('ix_scan_config_sites_config_id'), table_name='scan_config_sites')
|
||||
op.drop_table('scan_config_sites')
|
||||
|
||||
op.drop_table('scan_configs')
|
||||
|
||||
print("✓ Downgrade complete: Scan config tables and columns removed")
|
||||
@@ -1,270 +0,0 @@
|
||||
"""Expand CIDRs to individual IPs with per-IP settings
|
||||
|
||||
Revision ID: 008
|
||||
Revises: 007
|
||||
Create Date: 2025-11-19
|
||||
|
||||
This migration changes the site architecture to automatically expand CIDRs into
|
||||
individual IPs in the database. Each IP has its own port and ping settings.
|
||||
|
||||
Changes:
|
||||
- Add site_id to site_ips (direct link to sites, support standalone IPs)
|
||||
- Make site_cidr_id nullable (IPs can exist without a CIDR parent)
|
||||
- Remove settings from site_cidrs (settings now only at IP level)
|
||||
- Add unique constraint: no duplicate IPs within a site
|
||||
- Expand existing CIDRs to individual IPs
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import text
|
||||
import ipaddress
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic
|
||||
revision = '008'
|
||||
down_revision = '007'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
"""
|
||||
Modify schema to support per-IP settings and auto-expand CIDRs.
|
||||
"""
|
||||
|
||||
connection = op.get_bind()
|
||||
|
||||
# Check if site_id column already exists
|
||||
inspector = sa.inspect(connection)
|
||||
site_ips_columns = [col['name'] for col in inspector.get_columns('site_ips')]
|
||||
site_cidrs_columns = [col['name'] for col in inspector.get_columns('site_cidrs')]
|
||||
|
||||
# Step 1: Add site_id column to site_ips (will be populated from site_cidr_id)
|
||||
if 'site_id' not in site_ips_columns:
|
||||
print("Adding site_id column to site_ips...")
|
||||
op.add_column('site_ips', sa.Column('site_id', sa.Integer(), nullable=True, comment='FK to sites (direct link)'))
|
||||
else:
|
||||
print("site_id column already exists in site_ips, skipping...")
|
||||
|
||||
# Step 2: Populate site_id from site_cidr_id (before we make it nullable)
|
||||
print("Populating site_id from existing site_cidr relationships...")
|
||||
connection.execute(text("""
|
||||
UPDATE site_ips
|
||||
SET site_id = (
|
||||
SELECT site_id
|
||||
FROM site_cidrs
|
||||
WHERE site_cidrs.id = site_ips.site_cidr_id
|
||||
)
|
||||
WHERE site_cidr_id IS NOT NULL
|
||||
"""))
|
||||
|
||||
# Step 3: Make site_id NOT NULL and add foreign key
|
||||
# Check if foreign key exists before creating
|
||||
try:
|
||||
op.alter_column('site_ips', 'site_id', nullable=False)
|
||||
print("Made site_id NOT NULL")
|
||||
except Exception as e:
|
||||
print(f"site_id already NOT NULL or error: {e}")
|
||||
|
||||
# Check if foreign key exists
|
||||
try:
|
||||
op.create_foreign_key('fk_site_ips_site_id', 'site_ips', 'sites', ['site_id'], ['id'])
|
||||
print("Created foreign key fk_site_ips_site_id")
|
||||
except Exception as e:
|
||||
print(f"Foreign key already exists or error: {e}")
|
||||
|
||||
# Check if index exists
|
||||
try:
|
||||
op.create_index(op.f('ix_site_ips_site_id'), 'site_ips', ['site_id'], unique=False)
|
||||
print("Created index ix_site_ips_site_id")
|
||||
except Exception as e:
|
||||
print(f"Index already exists or error: {e}")
|
||||
|
||||
# Step 4: Make site_cidr_id nullable (for standalone IPs)
|
||||
try:
|
||||
op.alter_column('site_ips', 'site_cidr_id', nullable=True)
|
||||
print("Made site_cidr_id nullable")
|
||||
except Exception as e:
|
||||
print(f"site_cidr_id already nullable or error: {e}")
|
||||
|
||||
# Step 5: Drop old unique constraint and create new one (site_id, ip_address)
|
||||
# This prevents duplicate IPs within a site (across all CIDRs and standalone)
|
||||
try:
|
||||
op.drop_constraint('uix_site_cidr_ip', 'site_ips', type_='unique')
|
||||
print("Dropped old constraint uix_site_cidr_ip")
|
||||
except Exception as e:
|
||||
print(f"Constraint already dropped or doesn't exist: {e}")
|
||||
|
||||
try:
|
||||
op.create_unique_constraint('uix_site_ip_address', 'site_ips', ['site_id', 'ip_address'])
|
||||
print("Created new constraint uix_site_ip_address")
|
||||
except Exception as e:
|
||||
print(f"Constraint already exists or error: {e}")
|
||||
|
||||
# Step 6: Expand existing CIDRs to individual IPs
|
||||
print("Expanding existing CIDRs to individual IPs...")
|
||||
|
||||
# Get all existing CIDRs
|
||||
cidrs = connection.execute(text("""
|
||||
SELECT id, site_id, cidr, expected_ping, expected_tcp_ports, expected_udp_ports
|
||||
FROM site_cidrs
|
||||
""")).fetchall()
|
||||
|
||||
expanded_count = 0
|
||||
skipped_count = 0
|
||||
|
||||
for cidr_row in cidrs:
|
||||
cidr_id, site_id, cidr_str, expected_ping, expected_tcp_ports, expected_udp_ports = cidr_row
|
||||
|
||||
try:
|
||||
# Parse CIDR
|
||||
network = ipaddress.ip_network(cidr_str, strict=False)
|
||||
|
||||
# Check size - skip if too large (> /24 for IPv4, > /64 for IPv6)
|
||||
if isinstance(network, ipaddress.IPv4Network) and network.prefixlen < 24:
|
||||
print(f" ⚠ Skipping large CIDR {cidr_str} (>{network.num_addresses} IPs)")
|
||||
skipped_count += 1
|
||||
continue
|
||||
elif isinstance(network, ipaddress.IPv6Network) and network.prefixlen < 64:
|
||||
print(f" ⚠ Skipping large CIDR {cidr_str} (>{network.num_addresses} IPs)")
|
||||
skipped_count += 1
|
||||
continue
|
||||
|
||||
# Expand to individual IPs
|
||||
for ip in network.hosts() if network.num_addresses > 2 else [network.network_address]:
|
||||
ip_str = str(ip)
|
||||
|
||||
# Check if this IP already exists (from previous IP overrides)
|
||||
existing = connection.execute(text("""
|
||||
SELECT id FROM site_ips
|
||||
WHERE site_cidr_id = :cidr_id AND ip_address = :ip_address
|
||||
"""), {'cidr_id': cidr_id, 'ip_address': ip_str}).fetchone()
|
||||
|
||||
if not existing:
|
||||
# Insert new IP with settings from CIDR
|
||||
connection.execute(text("""
|
||||
INSERT INTO site_ips (
|
||||
site_id, site_cidr_id, ip_address,
|
||||
expected_ping, expected_tcp_ports, expected_udp_ports,
|
||||
created_at
|
||||
)
|
||||
VALUES (
|
||||
:site_id, :cidr_id, :ip_address,
|
||||
:expected_ping, :expected_tcp_ports, :expected_udp_ports,
|
||||
datetime('now')
|
||||
)
|
||||
"""), {
|
||||
'site_id': site_id,
|
||||
'cidr_id': cidr_id,
|
||||
'ip_address': ip_str,
|
||||
'expected_ping': expected_ping,
|
||||
'expected_tcp_ports': expected_tcp_ports,
|
||||
'expected_udp_ports': expected_udp_ports
|
||||
})
|
||||
expanded_count += 1
|
||||
|
||||
except Exception as e:
|
||||
print(f" ✗ Error expanding CIDR {cidr_str}: {e}")
|
||||
skipped_count += 1
|
||||
continue
|
||||
|
||||
print(f" ✓ Expanded {expanded_count} IPs from CIDRs")
|
||||
if skipped_count > 0:
|
||||
print(f" ⚠ Skipped {skipped_count} CIDRs (too large or errors)")
|
||||
|
||||
# Step 7: Remove settings columns from site_cidrs (now only at IP level)
|
||||
print("Removing settings columns from site_cidrs...")
|
||||
# Re-inspect to get current columns
|
||||
site_cidrs_columns = [col['name'] for col in inspector.get_columns('site_cidrs')]
|
||||
|
||||
if 'expected_ping' in site_cidrs_columns:
|
||||
try:
|
||||
op.drop_column('site_cidrs', 'expected_ping')
|
||||
print("Dropped expected_ping from site_cidrs")
|
||||
except Exception as e:
|
||||
print(f"Error dropping expected_ping: {e}")
|
||||
else:
|
||||
print("expected_ping already dropped from site_cidrs")
|
||||
|
||||
if 'expected_tcp_ports' in site_cidrs_columns:
|
||||
try:
|
||||
op.drop_column('site_cidrs', 'expected_tcp_ports')
|
||||
print("Dropped expected_tcp_ports from site_cidrs")
|
||||
except Exception as e:
|
||||
print(f"Error dropping expected_tcp_ports: {e}")
|
||||
else:
|
||||
print("expected_tcp_ports already dropped from site_cidrs")
|
||||
|
||||
if 'expected_udp_ports' in site_cidrs_columns:
|
||||
try:
|
||||
op.drop_column('site_cidrs', 'expected_udp_ports')
|
||||
print("Dropped expected_udp_ports from site_cidrs")
|
||||
except Exception as e:
|
||||
print(f"Error dropping expected_udp_ports: {e}")
|
||||
else:
|
||||
print("expected_udp_ports already dropped from site_cidrs")
|
||||
|
||||
# Print summary
|
||||
total_sites = connection.execute(text('SELECT COUNT(*) FROM sites')).scalar()
|
||||
total_cidrs = connection.execute(text('SELECT COUNT(*) FROM site_cidrs')).scalar()
|
||||
total_ips = connection.execute(text('SELECT COUNT(*) FROM site_ips')).scalar()
|
||||
|
||||
print("\n✓ Migration 008 complete: CIDRs expanded to individual IPs")
|
||||
print(f" - Total sites: {total_sites}")
|
||||
print(f" - Total CIDRs: {total_cidrs}")
|
||||
print(f" - Total IPs: {total_ips}")
|
||||
|
||||
|
||||
def downgrade():
|
||||
"""
|
||||
Revert schema changes (restore CIDR-level settings).
|
||||
Note: This will lose per-IP granularity!
|
||||
"""
|
||||
|
||||
connection = op.get_bind()
|
||||
|
||||
print("Rolling back to CIDR-level settings...")
|
||||
|
||||
# Step 1: Add settings columns back to site_cidrs
|
||||
op.add_column('site_cidrs', sa.Column('expected_ping', sa.Boolean(), nullable=True))
|
||||
op.add_column('site_cidrs', sa.Column('expected_tcp_ports', sa.Text(), nullable=True))
|
||||
op.add_column('site_cidrs', sa.Column('expected_udp_ports', sa.Text(), nullable=True))
|
||||
|
||||
# Step 2: Populate CIDR settings from first IP in each CIDR (approximation)
|
||||
connection.execute(text("""
|
||||
UPDATE site_cidrs
|
||||
SET
|
||||
expected_ping = (
|
||||
SELECT expected_ping FROM site_ips
|
||||
WHERE site_ips.site_cidr_id = site_cidrs.id
|
||||
LIMIT 1
|
||||
),
|
||||
expected_tcp_ports = (
|
||||
SELECT expected_tcp_ports FROM site_ips
|
||||
WHERE site_ips.site_cidr_id = site_cidrs.id
|
||||
LIMIT 1
|
||||
),
|
||||
expected_udp_ports = (
|
||||
SELECT expected_udp_ports FROM site_ips
|
||||
WHERE site_ips.site_cidr_id = site_cidrs.id
|
||||
LIMIT 1
|
||||
)
|
||||
"""))
|
||||
|
||||
# Step 3: Delete auto-expanded IPs (keep only original overrides)
|
||||
# In practice, this is difficult to determine, so we'll keep all IPs
|
||||
# and just remove the schema changes
|
||||
|
||||
# Step 4: Drop new unique constraint and restore old one
|
||||
op.drop_constraint('uix_site_ip_address', 'site_ips', type_='unique')
|
||||
op.create_unique_constraint('uix_site_cidr_ip', 'site_ips', ['site_cidr_id', 'ip_address'])
|
||||
|
||||
# Step 5: Make site_cidr_id NOT NULL again
|
||||
op.alter_column('site_ips', 'site_cidr_id', nullable=False)
|
||||
|
||||
# Step 6: Drop site_id column and related constraints
|
||||
op.drop_index(op.f('ix_site_ips_site_id'), table_name='site_ips')
|
||||
op.drop_constraint('fk_site_ips_site_id', 'site_ips', type_='foreignkey')
|
||||
op.drop_column('site_ips', 'site_id')
|
||||
|
||||
print("✓ Downgrade complete: Reverted to CIDR-level settings")
|
||||
@@ -1,210 +0,0 @@
|
||||
"""Remove CIDR table - make sites IP-only
|
||||
|
||||
Revision ID: 009
|
||||
Revises: 008
|
||||
Create Date: 2025-11-19
|
||||
|
||||
This migration removes the SiteCIDR table entirely, making sites purely
|
||||
IP-based. CIDRs are now only used as a convenience for bulk IP addition,
|
||||
not stored as permanent entities.
|
||||
|
||||
Changes:
|
||||
- Set all site_ips.site_cidr_id to NULL (preserve all IPs)
|
||||
- Drop foreign key from site_ips to site_cidrs
|
||||
- Drop site_cidrs table
|
||||
- Remove site_cidr_id column from site_ips
|
||||
|
||||
All existing IPs are preserved. They become "standalone" IPs without
|
||||
a CIDR parent.
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import text
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic
|
||||
revision = '009'
|
||||
down_revision = '008'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
"""
|
||||
Remove CIDR table and make all IPs standalone.
|
||||
"""
|
||||
|
||||
connection = op.get_bind()
|
||||
inspector = sa.inspect(connection)
|
||||
|
||||
print("\n=== Migration 009: Remove CIDR Table ===\n")
|
||||
|
||||
# Get counts before migration
|
||||
try:
|
||||
total_cidrs = connection.execute(text('SELECT COUNT(*) FROM site_cidrs')).scalar()
|
||||
total_ips = connection.execute(text('SELECT COUNT(*) FROM site_ips')).scalar()
|
||||
ips_with_cidr = connection.execute(text(
|
||||
'SELECT COUNT(*) FROM site_ips WHERE site_cidr_id IS NOT NULL'
|
||||
)).scalar()
|
||||
|
||||
print(f"Before migration:")
|
||||
print(f" - Total CIDRs: {total_cidrs}")
|
||||
print(f" - Total IPs: {total_ips}")
|
||||
print(f" - IPs linked to CIDRs: {ips_with_cidr}")
|
||||
print(f" - Standalone IPs: {total_ips - ips_with_cidr}\n")
|
||||
except Exception as e:
|
||||
print(f"Could not get pre-migration stats: {e}\n")
|
||||
|
||||
# Step 1: Set all site_cidr_id to NULL (preserve all IPs as standalone)
|
||||
print("Step 1: Converting all IPs to standalone (nulling CIDR associations)...")
|
||||
try:
|
||||
result = connection.execute(text("""
|
||||
UPDATE site_ips
|
||||
SET site_cidr_id = NULL
|
||||
WHERE site_cidr_id IS NOT NULL
|
||||
"""))
|
||||
print(f" ✓ Converted {result.rowcount} IPs to standalone\n")
|
||||
except Exception as e:
|
||||
print(f" ⚠ Error or already done: {e}\n")
|
||||
|
||||
# Step 2: Drop foreign key constraint from site_ips to site_cidrs
|
||||
print("Step 2: Dropping foreign key constraint from site_ips to site_cidrs...")
|
||||
foreign_keys = inspector.get_foreign_keys('site_ips')
|
||||
fk_to_drop = None
|
||||
|
||||
for fk in foreign_keys:
|
||||
if fk['referred_table'] == 'site_cidrs':
|
||||
fk_to_drop = fk['name']
|
||||
break
|
||||
|
||||
if fk_to_drop:
|
||||
try:
|
||||
op.drop_constraint(fk_to_drop, 'site_ips', type_='foreignkey')
|
||||
print(f" ✓ Dropped foreign key constraint: {fk_to_drop}\n")
|
||||
except Exception as e:
|
||||
print(f" ⚠ Could not drop foreign key: {e}\n")
|
||||
else:
|
||||
print(" ⚠ Foreign key constraint not found or already dropped\n")
|
||||
|
||||
# Step 3: Drop index on site_cidr_id (if exists)
|
||||
print("Step 3: Dropping index on site_cidr_id...")
|
||||
indexes = inspector.get_indexes('site_ips')
|
||||
index_to_drop = None
|
||||
|
||||
for idx in indexes:
|
||||
if 'site_cidr_id' in idx['column_names']:
|
||||
index_to_drop = idx['name']
|
||||
break
|
||||
|
||||
if index_to_drop:
|
||||
try:
|
||||
op.drop_index(index_to_drop, table_name='site_ips')
|
||||
print(f" ✓ Dropped index: {index_to_drop}\n")
|
||||
except Exception as e:
|
||||
print(f" ⚠ Could not drop index: {e}\n")
|
||||
else:
|
||||
print(" ⚠ Index not found or already dropped\n")
|
||||
|
||||
# Step 4: Drop site_cidrs table
|
||||
print("Step 4: Dropping site_cidrs table...")
|
||||
tables = inspector.get_table_names()
|
||||
|
||||
if 'site_cidrs' in tables:
|
||||
try:
|
||||
op.drop_table('site_cidrs')
|
||||
print(" ✓ Dropped site_cidrs table\n")
|
||||
except Exception as e:
|
||||
print(f" ⚠ Could not drop table: {e}\n")
|
||||
else:
|
||||
print(" ⚠ Table site_cidrs not found or already dropped\n")
|
||||
|
||||
# Step 5: Drop site_cidr_id column from site_ips
|
||||
print("Step 5: Dropping site_cidr_id column from site_ips...")
|
||||
site_ips_columns = [col['name'] for col in inspector.get_columns('site_ips')]
|
||||
|
||||
if 'site_cidr_id' in site_ips_columns:
|
||||
try:
|
||||
op.drop_column('site_ips', 'site_cidr_id')
|
||||
print(" ✓ Dropped site_cidr_id column from site_ips\n")
|
||||
except Exception as e:
|
||||
print(f" ⚠ Could not drop column: {e}\n")
|
||||
else:
|
||||
print(" ⚠ Column site_cidr_id not found or already dropped\n")
|
||||
|
||||
# Get counts after migration
|
||||
try:
|
||||
final_ips = connection.execute(text('SELECT COUNT(*) FROM site_ips')).scalar()
|
||||
total_sites = connection.execute(text('SELECT COUNT(*) FROM sites')).scalar()
|
||||
|
||||
print("After migration:")
|
||||
print(f" - Total sites: {total_sites}")
|
||||
print(f" - Total IPs (all standalone): {final_ips}")
|
||||
print(f" - CIDRs: N/A (table removed)")
|
||||
except Exception as e:
|
||||
print(f"Could not get post-migration stats: {e}")
|
||||
|
||||
print("\n✓ Migration 009 complete: Sites are now IP-only")
|
||||
print(" All IPs preserved as standalone. CIDRs can still be used")
|
||||
print(" via the API/UI for bulk IP creation, but are not stored.\n")
|
||||
|
||||
|
||||
def downgrade():
|
||||
"""
|
||||
Recreate site_cidrs table (CANNOT restore original CIDR associations).
|
||||
|
||||
WARNING: This downgrade creates an empty site_cidrs table structure but
|
||||
cannot restore the original CIDR-to-IP associations since that data was
|
||||
deleted. All IPs will remain standalone.
|
||||
"""
|
||||
|
||||
connection = op.get_bind()
|
||||
|
||||
print("\n=== Downgrade 009: Recreate CIDR Table Structure ===\n")
|
||||
print("⚠ WARNING: Cannot restore original CIDR associations!")
|
||||
print(" The site_cidrs table structure will be recreated but will be empty.")
|
||||
print(" All IPs will remain standalone. This is a PARTIAL downgrade.\n")
|
||||
|
||||
# Step 1: Recreate site_cidrs table (empty)
|
||||
print("Step 1: Recreating site_cidrs table structure...")
|
||||
try:
|
||||
op.create_table(
|
||||
'site_cidrs',
|
||||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
|
||||
sa.Column('site_id', sa.Integer(), nullable=False),
|
||||
sa.Column('cidr', sa.String(length=45), nullable=False, comment='CIDR notation (e.g., 10.0.0.0/24)'),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.ForeignKeyConstraint(['site_id'], ['sites.id'], ),
|
||||
sa.UniqueConstraint('site_id', 'cidr', name='uix_site_cidr')
|
||||
)
|
||||
print(" ✓ Recreated site_cidrs table (empty)\n")
|
||||
except Exception as e:
|
||||
print(f" ⚠ Could not create table: {e}\n")
|
||||
|
||||
# Step 2: Add site_cidr_id column back to site_ips (nullable)
|
||||
print("Step 2: Adding site_cidr_id column back to site_ips...")
|
||||
try:
|
||||
op.add_column('site_ips', sa.Column('site_cidr_id', sa.Integer(), nullable=True, comment='FK to site_cidrs (optional, for grouping)'))
|
||||
print(" ✓ Added site_cidr_id column (nullable)\n")
|
||||
except Exception as e:
|
||||
print(f" ⚠ Could not add column: {e}\n")
|
||||
|
||||
# Step 3: Add foreign key constraint
|
||||
print("Step 3: Adding foreign key constraint...")
|
||||
try:
|
||||
op.create_foreign_key('fk_site_ips_site_cidr_id', 'site_ips', 'site_cidrs', ['site_cidr_id'], ['id'])
|
||||
print(" ✓ Created foreign key constraint\n")
|
||||
except Exception as e:
|
||||
print(f" ⚠ Could not create foreign key: {e}\n")
|
||||
|
||||
# Step 4: Add index on site_cidr_id
|
||||
print("Step 4: Adding index on site_cidr_id...")
|
||||
try:
|
||||
op.create_index('ix_site_ips_site_cidr_id', 'site_ips', ['site_cidr_id'], unique=False)
|
||||
print(" ✓ Created index on site_cidr_id\n")
|
||||
except Exception as e:
|
||||
print(f" ⚠ Could not create index: {e}\n")
|
||||
|
||||
print("✓ Downgrade complete: CIDR table structure restored (but empty)")
|
||||
print(" All IPs remain standalone. You would need to manually recreate")
|
||||
print(" CIDR records and associate IPs with them.\n")
|
||||
@@ -1,53 +0,0 @@
|
||||
"""Add config_id to alert_rules table
|
||||
|
||||
Revision ID: 010
|
||||
Revises: 009
|
||||
Create Date: 2025-11-19
|
||||
|
||||
This migration adds config_id foreign key to alert_rules table to replace
|
||||
the config_file column, completing the migration from file-based to
|
||||
database-based configurations.
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic
|
||||
revision = '010'
|
||||
down_revision = '009'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
"""
|
||||
Add config_id to alert_rules table and remove config_file.
|
||||
"""
|
||||
|
||||
with op.batch_alter_table('alert_rules', schema=None) as batch_op:
|
||||
# Add config_id column with foreign key
|
||||
batch_op.add_column(sa.Column('config_id', sa.Integer(), nullable=True, comment='FK to scan_configs table'))
|
||||
batch_op.create_index('ix_alert_rules_config_id', ['config_id'], unique=False)
|
||||
batch_op.create_foreign_key('fk_alert_rules_config_id', 'scan_configs', ['config_id'], ['id'])
|
||||
|
||||
# Remove the old config_file column
|
||||
batch_op.drop_column('config_file')
|
||||
|
||||
print("✓ Migration complete: AlertRule now uses config_id")
|
||||
print(" - Added config_id foreign key to alert_rules table")
|
||||
print(" - Removed deprecated config_file column")
|
||||
|
||||
|
||||
def downgrade():
|
||||
"""Remove config_id and restore config_file on alert_rules."""
|
||||
|
||||
with op.batch_alter_table('alert_rules', schema=None) as batch_op:
|
||||
# Remove foreign key and config_id column
|
||||
batch_op.drop_constraint('fk_alert_rules_config_id', type_='foreignkey')
|
||||
batch_op.drop_index('ix_alert_rules_config_id')
|
||||
batch_op.drop_column('config_id')
|
||||
|
||||
# Restore config_file column
|
||||
batch_op.add_column(sa.Column('config_file', sa.String(255), nullable=True, comment='Optional: specific config file this rule applies to'))
|
||||
|
||||
print("✓ Downgrade complete: AlertRule config_id removed, config_file restored")
|
||||
@@ -1,86 +0,0 @@
|
||||
"""Drop deprecated config_file columns
|
||||
|
||||
Revision ID: 011
|
||||
Revises: 010
|
||||
Create Date: 2025-11-19
|
||||
|
||||
This migration removes the deprecated config_file columns from scans and schedules
|
||||
tables. All functionality now uses config_id to reference database-stored configs.
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic
|
||||
revision = '011'
|
||||
down_revision = '010'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
"""
|
||||
Drop config_file columns from scans and schedules tables.
|
||||
|
||||
Prerequisites:
|
||||
- All scans must have config_id set
|
||||
- All schedules must have config_id set
|
||||
- Code must be updated to no longer reference config_file
|
||||
"""
|
||||
|
||||
connection = op.get_bind()
|
||||
|
||||
# Check for any records missing config_id
|
||||
result = connection.execute(sa.text(
|
||||
"SELECT COUNT(*) FROM scans WHERE config_id IS NULL"
|
||||
))
|
||||
scans_without_config = result.scalar()
|
||||
|
||||
result = connection.execute(sa.text(
|
||||
"SELECT COUNT(*) FROM schedules WHERE config_id IS NULL"
|
||||
))
|
||||
schedules_without_config = result.scalar()
|
||||
|
||||
if scans_without_config > 0:
|
||||
print(f"WARNING: {scans_without_config} scans have NULL config_id")
|
||||
print(" These scans will lose their config reference after migration")
|
||||
|
||||
if schedules_without_config > 0:
|
||||
raise Exception(
|
||||
f"Cannot proceed: {schedules_without_config} schedules have NULL config_id. "
|
||||
"Please set config_id for all schedules before running this migration."
|
||||
)
|
||||
|
||||
# Drop config_file from scans table
|
||||
with op.batch_alter_table('scans', schema=None) as batch_op:
|
||||
batch_op.drop_column('config_file')
|
||||
|
||||
# Drop config_file from schedules table
|
||||
with op.batch_alter_table('schedules', schema=None) as batch_op:
|
||||
batch_op.drop_column('config_file')
|
||||
|
||||
print("✓ Migration complete: Dropped config_file columns")
|
||||
print(" - Removed config_file from scans table")
|
||||
print(" - Removed config_file from schedules table")
|
||||
print(" - All references should now use config_id")
|
||||
|
||||
|
||||
def downgrade():
|
||||
"""Re-add config_file columns (data will be lost)."""
|
||||
|
||||
# Add config_file back to scans
|
||||
with op.batch_alter_table('scans', schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column('config_file', sa.Text(), nullable=True,
|
||||
comment='Path to YAML config used (deprecated)')
|
||||
)
|
||||
|
||||
# Add config_file back to schedules
|
||||
with op.batch_alter_table('schedules', schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column('config_file', sa.Text(), nullable=True,
|
||||
comment='Path to YAML config (deprecated)')
|
||||
)
|
||||
|
||||
print("✓ Downgrade complete: Re-added config_file columns")
|
||||
print(" WARNING: config_file values are lost and will be NULL")
|
||||
@@ -6,14 +6,17 @@ SneakyScanner - Masscan-based network scanner with YAML configuration
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
import zipfile
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any
|
||||
from typing import Dict, List, Any, Callable, Optional
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
import yaml
|
||||
@@ -22,12 +25,18 @@ from libnmap.parser import NmapParser
|
||||
|
||||
from src.screenshot_capture import ScreenshotCapture
|
||||
from src.report_generator import HTMLReportGenerator
|
||||
from web.config import NMAP_HOST_TIMEOUT
|
||||
|
||||
# Force unbuffered output for Docker
|
||||
sys.stdout.reconfigure(line_buffering=True)
|
||||
sys.stderr.reconfigure(line_buffering=True)
|
||||
|
||||
|
||||
class ScanCancelledError(Exception):
|
||||
"""Raised when a scan is cancelled by the user."""
|
||||
pass
|
||||
|
||||
|
||||
class SneakyScanner:
|
||||
"""Wrapper for masscan to perform network scans based on YAML config or database config"""
|
||||
|
||||
@@ -61,6 +70,34 @@ class SneakyScanner:
|
||||
|
||||
self.screenshot_capture = None
|
||||
|
||||
# Cancellation support
|
||||
self._cancelled = False
|
||||
self._cancel_lock = threading.Lock()
|
||||
self._active_process = None
|
||||
self._process_lock = threading.Lock()
|
||||
|
||||
def cancel(self):
|
||||
"""
|
||||
Cancel the running scan.
|
||||
|
||||
Terminates any active subprocess and sets cancellation flag.
|
||||
"""
|
||||
with self._cancel_lock:
|
||||
self._cancelled = True
|
||||
|
||||
with self._process_lock:
|
||||
if self._active_process and self._active_process.poll() is None:
|
||||
try:
|
||||
# Terminate the process group
|
||||
os.killpg(os.getpgid(self._active_process.pid), signal.SIGTERM)
|
||||
except (ProcessLookupError, OSError):
|
||||
pass
|
||||
|
||||
def is_cancelled(self) -> bool:
|
||||
"""Check if scan has been cancelled."""
|
||||
with self._cancel_lock:
|
||||
return self._cancelled
|
||||
|
||||
def _load_config(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Load and validate configuration from file or database.
|
||||
@@ -381,11 +418,31 @@ class SneakyScanner:
|
||||
raise ValueError(f"Invalid protocol: {protocol}")
|
||||
|
||||
print(f"Running: {' '.join(cmd)}", flush=True)
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
|
||||
# Use Popen for cancellation support
|
||||
with self._process_lock:
|
||||
self._active_process = subprocess.Popen(
|
||||
cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
start_new_session=True
|
||||
)
|
||||
|
||||
stdout, stderr = self._active_process.communicate()
|
||||
returncode = self._active_process.returncode
|
||||
|
||||
with self._process_lock:
|
||||
self._active_process = None
|
||||
|
||||
# Check if cancelled
|
||||
if self.is_cancelled():
|
||||
return []
|
||||
|
||||
print(f"Masscan {protocol.upper()} scan completed", flush=True)
|
||||
|
||||
if result.returncode != 0:
|
||||
print(f"Masscan stderr: {result.stderr}", file=sys.stderr)
|
||||
if returncode != 0:
|
||||
print(f"Masscan stderr: {stderr}", file=sys.stderr)
|
||||
|
||||
# Parse masscan JSON output
|
||||
results = []
|
||||
@@ -433,11 +490,31 @@ class SneakyScanner:
|
||||
]
|
||||
|
||||
print(f"Running: {' '.join(cmd)}", flush=True)
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
|
||||
# Use Popen for cancellation support
|
||||
with self._process_lock:
|
||||
self._active_process = subprocess.Popen(
|
||||
cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
start_new_session=True
|
||||
)
|
||||
|
||||
stdout, stderr = self._active_process.communicate()
|
||||
returncode = self._active_process.returncode
|
||||
|
||||
with self._process_lock:
|
||||
self._active_process = None
|
||||
|
||||
# Check if cancelled
|
||||
if self.is_cancelled():
|
||||
return {}
|
||||
|
||||
print(f"Masscan PING scan completed", flush=True)
|
||||
|
||||
if result.returncode != 0:
|
||||
print(f"Masscan stderr: {result.stderr}", file=sys.stderr, flush=True)
|
||||
if returncode != 0:
|
||||
print(f"Masscan stderr: {stderr}", file=sys.stderr, flush=True)
|
||||
|
||||
# Parse results
|
||||
responding_ips = set()
|
||||
@@ -475,6 +552,10 @@ class SneakyScanner:
|
||||
all_services = {}
|
||||
|
||||
for ip, ports in ip_ports.items():
|
||||
# Check if cancelled before each host
|
||||
if self.is_cancelled():
|
||||
break
|
||||
|
||||
if not ports:
|
||||
all_services[ip] = []
|
||||
continue
|
||||
@@ -496,14 +577,33 @@ class SneakyScanner:
|
||||
'--version-intensity', '5', # Balanced speed/accuracy
|
||||
'-p', port_list,
|
||||
'-oX', xml_output, # XML output
|
||||
'--host-timeout', '5m', # Timeout per host
|
||||
'--host-timeout', NMAP_HOST_TIMEOUT, # Timeout per host
|
||||
ip
|
||||
]
|
||||
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, timeout=600)
|
||||
# Use Popen for cancellation support
|
||||
with self._process_lock:
|
||||
self._active_process = subprocess.Popen(
|
||||
cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
start_new_session=True
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
print(f" Nmap warning for {ip}: {result.stderr}", file=sys.stderr, flush=True)
|
||||
stdout, stderr = self._active_process.communicate(timeout=600)
|
||||
returncode = self._active_process.returncode
|
||||
|
||||
with self._process_lock:
|
||||
self._active_process = None
|
||||
|
||||
# Check if cancelled
|
||||
if self.is_cancelled():
|
||||
Path(xml_output).unlink(missing_ok=True)
|
||||
break
|
||||
|
||||
if returncode != 0:
|
||||
print(f" Nmap warning for {ip}: {stderr}", file=sys.stderr, flush=True)
|
||||
|
||||
# Parse XML output
|
||||
services = self._parse_nmap_xml(xml_output)
|
||||
@@ -576,29 +676,57 @@ class SneakyScanner:
|
||||
|
||||
return services
|
||||
|
||||
def _is_likely_web_service(self, service: Dict) -> bool:
|
||||
def _is_likely_web_service(self, service: Dict, ip: str = None) -> bool:
|
||||
"""
|
||||
Check if a service is likely HTTP/HTTPS based on nmap detection or common web ports
|
||||
Check if a service is a web server by actually making an HTTP request
|
||||
|
||||
Args:
|
||||
service: Service dictionary from nmap results
|
||||
ip: IP address to test (required for HTTP probe)
|
||||
|
||||
Returns:
|
||||
True if service appears to be web-related
|
||||
True if service responds to HTTP/HTTPS requests
|
||||
"""
|
||||
# Check service name
|
||||
import requests
|
||||
import urllib3
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
# Quick check for known web service names first
|
||||
web_services = ['http', 'https', 'ssl', 'http-proxy', 'https-alt',
|
||||
'http-alt', 'ssl/http', 'ssl/https']
|
||||
service_name = service.get('service', '').lower()
|
||||
|
||||
# If no IP provided, can't do HTTP probe
|
||||
port = service.get('port')
|
||||
if not ip or not port:
|
||||
# check just the service if no IP - honestly shouldn't get here, but just incase...
|
||||
if service_name in web_services:
|
||||
return True
|
||||
return False
|
||||
|
||||
# Check common non-standard web ports
|
||||
web_ports = [80, 443, 8000, 8006, 8008, 8080, 8081, 8443, 8888, 9443]
|
||||
port = service.get('port')
|
||||
# Actually try to connect - this is the definitive test
|
||||
# Try HTTPS first, then HTTP
|
||||
for protocol in ['https', 'http']:
|
||||
url = f"{protocol}://{ip}:{port}/"
|
||||
try:
|
||||
response = requests.get(
|
||||
url,
|
||||
timeout=3,
|
||||
verify=False,
|
||||
allow_redirects=False
|
||||
)
|
||||
# Any status code means it's a web server
|
||||
# (including 404, 500, etc. - still a web server)
|
||||
return True
|
||||
except requests.exceptions.SSLError:
|
||||
# SSL error on HTTPS, try HTTP next
|
||||
continue
|
||||
except (requests.exceptions.ConnectionError,
|
||||
requests.exceptions.Timeout,
|
||||
requests.exceptions.RequestException):
|
||||
continue
|
||||
|
||||
return port in web_ports
|
||||
return False
|
||||
|
||||
def _detect_http_https(self, ip: str, port: int, timeout: int = 5) -> str:
|
||||
"""
|
||||
@@ -786,7 +914,7 @@ class SneakyScanner:
|
||||
ip_results = {}
|
||||
|
||||
for service in services:
|
||||
if not self._is_likely_web_service(service):
|
||||
if not self._is_likely_web_service(service, ip):
|
||||
continue
|
||||
|
||||
port = service['port']
|
||||
@@ -832,10 +960,17 @@ class SneakyScanner:
|
||||
|
||||
return all_results
|
||||
|
||||
def scan(self) -> Dict[str, Any]:
|
||||
def scan(self, progress_callback: Optional[Callable] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Perform complete scan based on configuration
|
||||
|
||||
Args:
|
||||
progress_callback: Optional callback function for progress updates.
|
||||
Called with (phase, ip, data) where:
|
||||
- phase: 'init', 'ping', 'tcp_scan', 'udp_scan', 'service_detection', 'http_analysis'
|
||||
- ip: IP address being processed (or None for phase start)
|
||||
- data: Dict with progress data (results, counts, etc.)
|
||||
|
||||
Returns:
|
||||
Dictionary containing scan results
|
||||
"""
|
||||
@@ -872,17 +1007,61 @@ class SneakyScanner:
|
||||
all_ips = sorted(list(all_ips))
|
||||
print(f"Total IPs to scan: {len(all_ips)}", flush=True)
|
||||
|
||||
# Report initialization with total IP count
|
||||
if progress_callback:
|
||||
progress_callback('init', None, {
|
||||
'total_ips': len(all_ips),
|
||||
'ip_to_site': ip_to_site
|
||||
})
|
||||
|
||||
# Perform ping scan
|
||||
print(f"\n[1/5] Performing ping scan on {len(all_ips)} IPs...", flush=True)
|
||||
if progress_callback:
|
||||
progress_callback('ping', None, {'status': 'starting'})
|
||||
ping_results = self._run_ping_scan(all_ips)
|
||||
|
||||
# Check for cancellation
|
||||
if self.is_cancelled():
|
||||
print("\nScan cancelled by user", flush=True)
|
||||
raise ScanCancelledError("Scan cancelled by user")
|
||||
|
||||
# Report ping results
|
||||
if progress_callback:
|
||||
progress_callback('ping', None, {
|
||||
'status': 'completed',
|
||||
'results': ping_results
|
||||
})
|
||||
|
||||
# Perform TCP scan (all ports)
|
||||
print(f"\n[2/5] Performing TCP scan on {len(all_ips)} IPs (ports 0-65535)...", flush=True)
|
||||
if progress_callback:
|
||||
progress_callback('tcp_scan', None, {'status': 'starting'})
|
||||
tcp_results = self._run_masscan(all_ips, '0-65535', 'tcp')
|
||||
|
||||
# Perform UDP scan (all ports)
|
||||
print(f"\n[3/5] Performing UDP scan on {len(all_ips)} IPs (ports 0-65535)...", flush=True)
|
||||
udp_results = self._run_masscan(all_ips, '0-65535', 'udp')
|
||||
# Check for cancellation
|
||||
if self.is_cancelled():
|
||||
print("\nScan cancelled by user", flush=True)
|
||||
raise ScanCancelledError("Scan cancelled by user")
|
||||
|
||||
# Perform UDP scan (if enabled)
|
||||
udp_enabled = os.environ.get('UDP_SCAN_ENABLED', 'false').lower() == 'true'
|
||||
udp_ports = os.environ.get('UDP_PORTS', '53,67,68,69,123,161,500,514,1900')
|
||||
|
||||
if udp_enabled:
|
||||
print(f"\n[3/5] Performing UDP scan on {len(all_ips)} IPs (ports {udp_ports})...", flush=True)
|
||||
if progress_callback:
|
||||
progress_callback('udp_scan', None, {'status': 'starting'})
|
||||
udp_results = self._run_masscan(all_ips, udp_ports, 'udp')
|
||||
|
||||
# Check for cancellation
|
||||
if self.is_cancelled():
|
||||
print("\nScan cancelled by user", flush=True)
|
||||
raise ScanCancelledError("Scan cancelled by user")
|
||||
else:
|
||||
print(f"\n[3/5] Skipping UDP scan (disabled)...", flush=True)
|
||||
if progress_callback:
|
||||
progress_callback('udp_scan', None, {'status': 'skipped'})
|
||||
udp_results = []
|
||||
|
||||
# Organize results by IP
|
||||
results_by_ip = {}
|
||||
@@ -917,20 +1096,56 @@ class SneakyScanner:
|
||||
results_by_ip[ip]['actual']['tcp_ports'].sort()
|
||||
results_by_ip[ip]['actual']['udp_ports'].sort()
|
||||
|
||||
# Report TCP/UDP scan results with discovered ports per IP
|
||||
if progress_callback:
|
||||
tcp_udp_results = {}
|
||||
for ip in all_ips:
|
||||
tcp_udp_results[ip] = {
|
||||
'tcp_ports': results_by_ip[ip]['actual']['tcp_ports'],
|
||||
'udp_ports': results_by_ip[ip]['actual']['udp_ports']
|
||||
}
|
||||
progress_callback('tcp_scan', None, {
|
||||
'status': 'completed',
|
||||
'results': tcp_udp_results
|
||||
})
|
||||
|
||||
# Perform service detection on TCP ports
|
||||
print(f"\n[4/5] Performing service detection on discovered TCP ports...", flush=True)
|
||||
if progress_callback:
|
||||
progress_callback('service_detection', None, {'status': 'starting'})
|
||||
ip_ports = {ip: results_by_ip[ip]['actual']['tcp_ports'] for ip in all_ips}
|
||||
service_results = self._run_nmap_service_detection(ip_ports)
|
||||
|
||||
# Check for cancellation
|
||||
if self.is_cancelled():
|
||||
print("\nScan cancelled by user", flush=True)
|
||||
raise ScanCancelledError("Scan cancelled by user")
|
||||
|
||||
# Add service information to results
|
||||
for ip, services in service_results.items():
|
||||
if ip in results_by_ip:
|
||||
results_by_ip[ip]['actual']['services'] = services
|
||||
|
||||
# Report service detection results
|
||||
if progress_callback:
|
||||
progress_callback('service_detection', None, {
|
||||
'status': 'completed',
|
||||
'results': service_results
|
||||
})
|
||||
|
||||
# Perform HTTP/HTTPS analysis on web services
|
||||
print(f"\n[5/5] Analyzing HTTP/HTTPS services and SSL/TLS configuration...", flush=True)
|
||||
if progress_callback:
|
||||
progress_callback('http_analysis', None, {'status': 'starting'})
|
||||
http_results = self._run_http_analysis(service_results)
|
||||
|
||||
# Report HTTP analysis completion
|
||||
if progress_callback:
|
||||
progress_callback('http_analysis', None, {
|
||||
'status': 'completed',
|
||||
'results': http_results
|
||||
})
|
||||
|
||||
# Merge HTTP analysis into service results
|
||||
for ip, port_results in http_results.items():
|
||||
if ip in results_by_ip:
|
||||
|
||||
@@ -5,18 +5,107 @@ Handles endpoints for triggering scans, listing scan history, and retrieving
|
||||
scan results.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
from flask import Blueprint, current_app, jsonify, request
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
|
||||
from web.auth.decorators import api_auth_required
|
||||
from web.models import Scan, ScanProgress
|
||||
from web.services.scan_service import ScanService
|
||||
from web.utils.pagination import validate_page_params
|
||||
from web.jobs.scan_job import stop_scan
|
||||
|
||||
bp = Blueprint('scans', __name__)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _recover_orphaned_scan(scan: Scan, session) -> dict:
|
||||
"""
|
||||
Recover an orphaned scan by checking for output files.
|
||||
|
||||
If output files exist: mark as 'completed' (smart recovery)
|
||||
If no output files: mark as 'cancelled'
|
||||
|
||||
Args:
|
||||
scan: The orphaned Scan object
|
||||
session: Database session
|
||||
|
||||
Returns:
|
||||
Dictionary with recovery result for API response
|
||||
"""
|
||||
# Check for existing output files
|
||||
output_exists = False
|
||||
output_files_found = []
|
||||
|
||||
# Check paths stored in database
|
||||
if scan.json_path and Path(scan.json_path).exists():
|
||||
output_exists = True
|
||||
output_files_found.append('json')
|
||||
if scan.html_path and Path(scan.html_path).exists():
|
||||
output_files_found.append('html')
|
||||
if scan.zip_path and Path(scan.zip_path).exists():
|
||||
output_files_found.append('zip')
|
||||
|
||||
# Also check by timestamp pattern if paths not stored yet
|
||||
if not output_exists and scan.started_at:
|
||||
output_dir = Path('/app/output')
|
||||
if output_dir.exists():
|
||||
timestamp_pattern = scan.started_at.strftime('%Y%m%d')
|
||||
for json_file in output_dir.glob(f'scan_report_{timestamp_pattern}*.json'):
|
||||
output_exists = True
|
||||
output_files_found.append('json')
|
||||
# Update scan record with found paths
|
||||
scan.json_path = str(json_file)
|
||||
html_file = json_file.with_suffix('.html')
|
||||
if html_file.exists():
|
||||
scan.html_path = str(html_file)
|
||||
output_files_found.append('html')
|
||||
zip_file = json_file.with_suffix('.zip')
|
||||
if zip_file.exists():
|
||||
scan.zip_path = str(zip_file)
|
||||
output_files_found.append('zip')
|
||||
break
|
||||
|
||||
if output_exists:
|
||||
# Smart recovery: outputs exist, mark as completed
|
||||
scan.status = 'completed'
|
||||
scan.completed_at = datetime.utcnow()
|
||||
if scan.started_at:
|
||||
scan.duration = (datetime.utcnow() - scan.started_at).total_seconds()
|
||||
scan.error_message = None
|
||||
session.commit()
|
||||
|
||||
logger.info(f"Scan {scan.id}: Recovered as completed (files: {output_files_found})")
|
||||
|
||||
return {
|
||||
'scan_id': scan.id,
|
||||
'status': 'completed',
|
||||
'message': f'Scan recovered as completed (output files found: {", ".join(output_files_found)})',
|
||||
'recovery_type': 'smart_recovery'
|
||||
}
|
||||
else:
|
||||
# No outputs: mark as cancelled
|
||||
scan.status = 'cancelled'
|
||||
scan.completed_at = datetime.utcnow()
|
||||
if scan.started_at:
|
||||
scan.duration = (datetime.utcnow() - scan.started_at).total_seconds()
|
||||
scan.error_message = 'Scan process was interrupted before completion. No output files were generated.'
|
||||
session.commit()
|
||||
|
||||
logger.info(f"Scan {scan.id}: Marked as cancelled (orphaned, no output files)")
|
||||
|
||||
return {
|
||||
'scan_id': scan.id,
|
||||
'status': 'cancelled',
|
||||
'message': 'Orphaned scan cancelled (no output files found)',
|
||||
'recovery_type': 'orphan_cleanup'
|
||||
}
|
||||
|
||||
|
||||
@bp.route('', methods=['GET'])
|
||||
@api_auth_required
|
||||
def list_scans():
|
||||
@@ -240,6 +329,77 @@ def delete_scan(scan_id):
|
||||
}), 500
|
||||
|
||||
|
||||
@bp.route('/<int:scan_id>/stop', methods=['POST'])
|
||||
@api_auth_required
|
||||
def stop_running_scan(scan_id):
|
||||
"""
|
||||
Stop a running scan with smart recovery for orphaned scans.
|
||||
|
||||
If the scan is actively running in the registry, sends a cancel signal.
|
||||
If the scan shows as running/finalizing but is not in the registry (orphaned),
|
||||
performs smart recovery: marks as 'completed' if output files exist,
|
||||
otherwise marks as 'cancelled'.
|
||||
|
||||
Args:
|
||||
scan_id: Scan ID to stop
|
||||
|
||||
Returns:
|
||||
JSON response with stop status or recovery result
|
||||
"""
|
||||
try:
|
||||
session = current_app.db_session
|
||||
|
||||
# Check if scan exists
|
||||
scan = session.query(Scan).filter_by(id=scan_id).first()
|
||||
if not scan:
|
||||
logger.warning(f"Scan not found for stop request: {scan_id}")
|
||||
return jsonify({
|
||||
'error': 'Not found',
|
||||
'message': f'Scan with ID {scan_id} not found'
|
||||
}), 404
|
||||
|
||||
# Allow stopping scans with status 'running' or 'finalizing'
|
||||
if scan.status not in ('running', 'finalizing'):
|
||||
logger.warning(f"Cannot stop scan {scan_id}: status is '{scan.status}'")
|
||||
return jsonify({
|
||||
'error': 'Invalid state',
|
||||
'message': f"Cannot stop scan: status is '{scan.status}'"
|
||||
}), 400
|
||||
|
||||
# Get database URL from app config
|
||||
db_url = current_app.config['SQLALCHEMY_DATABASE_URI']
|
||||
|
||||
# Attempt to stop the scan
|
||||
stopped = stop_scan(scan_id, db_url)
|
||||
|
||||
if stopped:
|
||||
logger.info(f"Stop signal sent to scan {scan_id}")
|
||||
return jsonify({
|
||||
'scan_id': scan_id,
|
||||
'message': 'Stop signal sent to scan',
|
||||
'status': 'stopping'
|
||||
}), 200
|
||||
else:
|
||||
# Scanner not in registry - this is an orphaned scan
|
||||
# Attempt smart recovery
|
||||
logger.warning(f"Scan {scan_id} not in registry, attempting smart recovery")
|
||||
recovery_result = _recover_orphaned_scan(scan, session)
|
||||
return jsonify(recovery_result), 200
|
||||
|
||||
except SQLAlchemyError as e:
|
||||
logger.error(f"Database error stopping scan {scan_id}: {str(e)}")
|
||||
return jsonify({
|
||||
'error': 'Database error',
|
||||
'message': 'Failed to stop scan'
|
||||
}), 500
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error stopping scan {scan_id}: {str(e)}", exc_info=True)
|
||||
return jsonify({
|
||||
'error': 'Internal server error',
|
||||
'message': 'An unexpected error occurred'
|
||||
}), 500
|
||||
|
||||
|
||||
@bp.route('/<int:scan_id>/status', methods=['GET'])
|
||||
@api_auth_required
|
||||
def get_scan_status(scan_id):
|
||||
@@ -281,6 +441,141 @@ def get_scan_status(scan_id):
|
||||
}), 500
|
||||
|
||||
|
||||
@bp.route('/<int:scan_id>/progress', methods=['GET'])
|
||||
@api_auth_required
|
||||
def get_scan_progress(scan_id):
|
||||
"""
|
||||
Get detailed progress for a running scan including per-IP results.
|
||||
|
||||
Args:
|
||||
scan_id: Scan ID
|
||||
|
||||
Returns:
|
||||
JSON response with scan progress including:
|
||||
- current_phase: Current scan phase
|
||||
- total_ips: Total IPs being scanned
|
||||
- completed_ips: Number of IPs completed in current phase
|
||||
- progress_entries: List of per-IP progress with discovered results
|
||||
"""
|
||||
try:
|
||||
session = current_app.db_session
|
||||
|
||||
# Get scan record
|
||||
scan = session.query(Scan).filter_by(id=scan_id).first()
|
||||
if not scan:
|
||||
logger.warning(f"Scan not found for progress check: {scan_id}")
|
||||
return jsonify({
|
||||
'error': 'Not found',
|
||||
'message': f'Scan with ID {scan_id} not found'
|
||||
}), 404
|
||||
|
||||
# Get progress entries
|
||||
progress_entries = session.query(ScanProgress).filter_by(scan_id=scan_id).all()
|
||||
|
||||
# Build progress data
|
||||
entries = []
|
||||
for entry in progress_entries:
|
||||
entry_data = {
|
||||
'ip_address': entry.ip_address,
|
||||
'site_name': entry.site_name,
|
||||
'phase': entry.phase,
|
||||
'status': entry.status,
|
||||
'ping_result': entry.ping_result
|
||||
}
|
||||
|
||||
# Parse JSON fields
|
||||
if entry.tcp_ports:
|
||||
entry_data['tcp_ports'] = json.loads(entry.tcp_ports)
|
||||
else:
|
||||
entry_data['tcp_ports'] = []
|
||||
|
||||
if entry.udp_ports:
|
||||
entry_data['udp_ports'] = json.loads(entry.udp_ports)
|
||||
else:
|
||||
entry_data['udp_ports'] = []
|
||||
|
||||
if entry.services:
|
||||
entry_data['services'] = json.loads(entry.services)
|
||||
else:
|
||||
entry_data['services'] = []
|
||||
|
||||
entries.append(entry_data)
|
||||
|
||||
# Sort entries by site name then IP (numerically)
|
||||
def ip_sort_key(ip_str):
|
||||
"""Convert IP to tuple of integers for proper numeric sorting."""
|
||||
try:
|
||||
return tuple(int(octet) for octet in ip_str.split('.'))
|
||||
except (ValueError, AttributeError):
|
||||
return (0, 0, 0, 0)
|
||||
|
||||
entries.sort(key=lambda x: (x['site_name'] or '', ip_sort_key(x['ip_address'])))
|
||||
|
||||
response = {
|
||||
'scan_id': scan_id,
|
||||
'status': scan.status,
|
||||
'current_phase': scan.current_phase or 'pending',
|
||||
'total_ips': scan.total_ips or 0,
|
||||
'completed_ips': scan.completed_ips or 0,
|
||||
'progress_entries': entries
|
||||
}
|
||||
|
||||
logger.debug(f"Retrieved progress for scan {scan_id}: phase={scan.current_phase}, {scan.completed_ips}/{scan.total_ips} IPs")
|
||||
return jsonify(response)
|
||||
|
||||
except SQLAlchemyError as e:
|
||||
logger.error(f"Database error retrieving scan progress {scan_id}: {str(e)}")
|
||||
return jsonify({
|
||||
'error': 'Database error',
|
||||
'message': 'Failed to retrieve scan progress'
|
||||
}), 500
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error retrieving scan progress {scan_id}: {str(e)}", exc_info=True)
|
||||
return jsonify({
|
||||
'error': 'Internal server error',
|
||||
'message': 'An unexpected error occurred'
|
||||
}), 500
|
||||
|
||||
|
||||
@bp.route('/by-ip/<ip_address>', methods=['GET'])
|
||||
@api_auth_required
|
||||
def get_scans_by_ip(ip_address):
|
||||
"""
|
||||
Get last 10 scans containing a specific IP address.
|
||||
|
||||
Args:
|
||||
ip_address: IP address to search for
|
||||
|
||||
Returns:
|
||||
JSON response with list of scans containing the IP
|
||||
"""
|
||||
try:
|
||||
# Get scans from service
|
||||
scan_service = ScanService(current_app.db_session)
|
||||
scans = scan_service.get_scans_by_ip(ip_address)
|
||||
|
||||
logger.info(f"Retrieved {len(scans)} scans for IP: {ip_address}")
|
||||
|
||||
return jsonify({
|
||||
'ip_address': ip_address,
|
||||
'scans': scans,
|
||||
'count': len(scans)
|
||||
})
|
||||
|
||||
except SQLAlchemyError as e:
|
||||
logger.error(f"Database error retrieving scans for IP {ip_address}: {str(e)}")
|
||||
return jsonify({
|
||||
'error': 'Database error',
|
||||
'message': 'Failed to retrieve scans'
|
||||
}), 500
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error retrieving scans for IP {ip_address}: {str(e)}", exc_info=True)
|
||||
return jsonify({
|
||||
'error': 'Internal server error',
|
||||
'message': 'An unexpected error occurred'
|
||||
}), 500
|
||||
|
||||
|
||||
@bp.route('/<int:scan_id1>/compare/<int:scan_id2>', methods=['GET'])
|
||||
@api_auth_required
|
||||
def compare_scans(scan_id1, scan_id2):
|
||||
|
||||
@@ -36,9 +36,15 @@ def list_sites():
|
||||
if request.args.get('all', '').lower() == 'true':
|
||||
site_service = SiteService(current_app.db_session)
|
||||
sites = site_service.list_all_sites()
|
||||
ip_stats = site_service.get_global_ip_stats()
|
||||
|
||||
logger.info(f"Listed all sites (count={len(sites)})")
|
||||
return jsonify({'sites': sites})
|
||||
return jsonify({
|
||||
'sites': sites,
|
||||
'total_ips': ip_stats['total_ips'],
|
||||
'unique_ips': ip_stats['unique_ips'],
|
||||
'duplicate_ips': ip_stats['duplicate_ips']
|
||||
})
|
||||
|
||||
# Get and validate query parameters
|
||||
page = request.args.get('page', 1, type=int)
|
||||
|
||||
@@ -307,9 +307,12 @@ def init_scheduler(app: Flask) -> None:
|
||||
with app.app_context():
|
||||
# Clean up any orphaned scans from previous crashes/restarts
|
||||
scan_service = ScanService(app.db_session)
|
||||
orphaned_count = scan_service.cleanup_orphaned_scans()
|
||||
if orphaned_count > 0:
|
||||
app.logger.warning(f"Cleaned up {orphaned_count} orphaned scan(s) on startup")
|
||||
cleanup_result = scan_service.cleanup_orphaned_scans()
|
||||
if cleanup_result['total'] > 0:
|
||||
app.logger.warning(
|
||||
f"Cleaned up {cleanup_result['total']} orphaned scan(s) on startup: "
|
||||
f"{cleanup_result['recovered']} recovered, {cleanup_result['failed']} failed"
|
||||
)
|
||||
|
||||
# Load all enabled schedules from database
|
||||
scheduler.load_schedules_on_startup()
|
||||
|
||||
@@ -7,7 +7,10 @@ that are managed by developers, not stored in the database.
|
||||
|
||||
# Application metadata
|
||||
APP_NAME = 'SneakyScanner'
|
||||
APP_VERSION = '1.0.0-alpha'
|
||||
APP_VERSION = '1.0.0-beta'
|
||||
|
||||
# Repository URL
|
||||
REPO_URL = 'https://git.sneakygeek.net/sneakygeek/SneakyScan'
|
||||
|
||||
# Scanner settings
|
||||
NMAP_HOST_TIMEOUT = '2m' # Timeout per host for nmap service detection
|
||||
|
||||
@@ -5,7 +5,9 @@ This module handles the execution of scans in background threads,
|
||||
updating database status and handling errors.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
@@ -13,13 +15,168 @@ from pathlib import Path
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
from src.scanner import SneakyScanner
|
||||
from web.models import Scan
|
||||
from src.scanner import SneakyScanner, ScanCancelledError
|
||||
from web.models import Scan, ScanProgress
|
||||
from web.services.scan_service import ScanService
|
||||
from web.services.alert_service import AlertService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Registry for tracking running scanners (scan_id -> SneakyScanner instance)
|
||||
_running_scanners = {}
|
||||
_running_scanners_lock = threading.Lock()
|
||||
|
||||
|
||||
def get_running_scanner(scan_id: int):
|
||||
"""Get a running scanner instance by scan ID."""
|
||||
with _running_scanners_lock:
|
||||
return _running_scanners.get(scan_id)
|
||||
|
||||
|
||||
def stop_scan(scan_id: int, db_url: str) -> bool:
|
||||
"""
|
||||
Stop a running scan.
|
||||
|
||||
Args:
|
||||
scan_id: ID of the scan to stop
|
||||
db_url: Database connection URL
|
||||
|
||||
Returns:
|
||||
True if scan was cancelled, False if not found or already stopped
|
||||
"""
|
||||
logger.info(f"Attempting to stop scan {scan_id}")
|
||||
|
||||
# Get the scanner instance
|
||||
scanner = get_running_scanner(scan_id)
|
||||
if not scanner:
|
||||
logger.warning(f"Scanner for scan {scan_id} not found in registry")
|
||||
return False
|
||||
|
||||
# Cancel the scanner
|
||||
scanner.cancel()
|
||||
logger.info(f"Cancellation signal sent to scan {scan_id}")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def create_progress_callback(scan_id: int, session):
|
||||
"""
|
||||
Create a progress callback function for updating scan progress in database.
|
||||
|
||||
Args:
|
||||
scan_id: ID of the scan record
|
||||
session: Database session
|
||||
|
||||
Returns:
|
||||
Callback function that accepts (phase, ip, data)
|
||||
"""
|
||||
ip_to_site = {}
|
||||
|
||||
def progress_callback(phase: str, ip: str, data: dict):
|
||||
"""Update scan progress in database."""
|
||||
nonlocal ip_to_site
|
||||
|
||||
try:
|
||||
# Get scan record
|
||||
scan = session.query(Scan).filter_by(id=scan_id).first()
|
||||
if not scan:
|
||||
return
|
||||
|
||||
# Handle initialization phase
|
||||
if phase == 'init':
|
||||
scan.total_ips = data.get('total_ips', 0)
|
||||
scan.completed_ips = 0
|
||||
scan.current_phase = 'ping'
|
||||
ip_to_site = data.get('ip_to_site', {})
|
||||
|
||||
# Create progress entries for all IPs
|
||||
for ip_addr, site_name in ip_to_site.items():
|
||||
progress = ScanProgress(
|
||||
scan_id=scan_id,
|
||||
ip_address=ip_addr,
|
||||
site_name=site_name,
|
||||
phase='pending',
|
||||
status='pending'
|
||||
)
|
||||
session.add(progress)
|
||||
|
||||
session.commit()
|
||||
return
|
||||
|
||||
# Update current phase
|
||||
if data.get('status') == 'starting':
|
||||
scan.current_phase = phase
|
||||
scan.completed_ips = 0
|
||||
session.commit()
|
||||
return
|
||||
|
||||
# Handle phase completion with results
|
||||
if data.get('status') == 'completed':
|
||||
results = data.get('results', {})
|
||||
|
||||
if phase == 'ping':
|
||||
# Update progress entries with ping results
|
||||
for ip_addr, ping_result in results.items():
|
||||
progress = session.query(ScanProgress).filter_by(
|
||||
scan_id=scan_id, ip_address=ip_addr
|
||||
).first()
|
||||
if progress:
|
||||
progress.ping_result = ping_result
|
||||
progress.phase = 'ping'
|
||||
progress.status = 'completed'
|
||||
|
||||
scan.completed_ips = len(results)
|
||||
|
||||
elif phase == 'tcp_scan':
|
||||
# Update progress entries with TCP/UDP port results
|
||||
for ip_addr, port_data in results.items():
|
||||
progress = session.query(ScanProgress).filter_by(
|
||||
scan_id=scan_id, ip_address=ip_addr
|
||||
).first()
|
||||
if progress:
|
||||
progress.tcp_ports = json.dumps(port_data.get('tcp_ports', []))
|
||||
progress.udp_ports = json.dumps(port_data.get('udp_ports', []))
|
||||
progress.phase = 'tcp_scan'
|
||||
progress.status = 'completed'
|
||||
|
||||
scan.completed_ips = len(results)
|
||||
|
||||
elif phase == 'service_detection':
|
||||
# Update progress entries with service detection results
|
||||
for ip_addr, services in results.items():
|
||||
progress = session.query(ScanProgress).filter_by(
|
||||
scan_id=scan_id, ip_address=ip_addr
|
||||
).first()
|
||||
if progress:
|
||||
# Simplify service data for storage
|
||||
service_list = []
|
||||
for svc in services:
|
||||
service_list.append({
|
||||
'port': svc.get('port'),
|
||||
'service': svc.get('service', 'unknown'),
|
||||
'product': svc.get('product', ''),
|
||||
'version': svc.get('version', '')
|
||||
})
|
||||
progress.services = json.dumps(service_list)
|
||||
progress.phase = 'service_detection'
|
||||
progress.status = 'completed'
|
||||
|
||||
scan.completed_ips = len(results)
|
||||
|
||||
elif phase == 'http_analysis':
|
||||
# Mark HTTP analysis as complete
|
||||
scan.current_phase = 'completed'
|
||||
scan.completed_ips = scan.total_ips
|
||||
|
||||
session.commit()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Progress callback error for scan {scan_id}: {str(e)}")
|
||||
# Don't re-raise - we don't want to break the scan
|
||||
session.rollback()
|
||||
|
||||
return progress_callback
|
||||
|
||||
|
||||
def execute_scan(scan_id: int, config_id: int, db_url: str = None):
|
||||
"""
|
||||
@@ -66,20 +223,61 @@ def execute_scan(scan_id: int, config_id: int, db_url: str = None):
|
||||
# Initialize scanner with database config
|
||||
scanner = SneakyScanner(config_id=config_id)
|
||||
|
||||
# Execute scan
|
||||
# Register scanner in the running registry
|
||||
with _running_scanners_lock:
|
||||
_running_scanners[scan_id] = scanner
|
||||
logger.debug(f"Scan {scan_id}: Registered in running scanners registry")
|
||||
|
||||
# Create progress callback
|
||||
progress_callback = create_progress_callback(scan_id, session)
|
||||
|
||||
# Execute scan with progress tracking
|
||||
logger.info(f"Scan {scan_id}: Running scanner...")
|
||||
start_time = datetime.utcnow()
|
||||
report, timestamp = scanner.scan()
|
||||
report, timestamp = scanner.scan(progress_callback=progress_callback)
|
||||
end_time = datetime.utcnow()
|
||||
|
||||
scan_duration = (end_time - start_time).total_seconds()
|
||||
logger.info(f"Scan {scan_id}: Scanner completed in {scan_duration:.2f} seconds")
|
||||
|
||||
# Generate output files (JSON, HTML, ZIP)
|
||||
# Transition to 'finalizing' status before output generation
|
||||
try:
|
||||
scan = session.query(Scan).filter_by(id=scan_id).first()
|
||||
if scan:
|
||||
scan.status = 'finalizing'
|
||||
scan.current_phase = 'generating_outputs'
|
||||
session.commit()
|
||||
logger.info(f"Scan {scan_id}: Status changed to 'finalizing'")
|
||||
except Exception as e:
|
||||
logger.error(f"Scan {scan_id}: Failed to update status to finalizing: {e}")
|
||||
session.rollback()
|
||||
|
||||
# Generate output files (JSON, HTML, ZIP) with error handling
|
||||
output_paths = {}
|
||||
output_generation_failed = False
|
||||
try:
|
||||
logger.info(f"Scan {scan_id}: Generating output files...")
|
||||
output_paths = scanner.generate_outputs(report, timestamp)
|
||||
except Exception as e:
|
||||
output_generation_failed = True
|
||||
logger.error(f"Scan {scan_id}: Output generation failed: {str(e)}")
|
||||
logger.error(f"Scan {scan_id}: Traceback:\n{traceback.format_exc()}")
|
||||
# Still mark scan as completed with warning since scan data is valid
|
||||
try:
|
||||
scan = session.query(Scan).filter_by(id=scan_id).first()
|
||||
if scan:
|
||||
scan.status = 'completed'
|
||||
scan.error_message = f"Scan completed but output file generation failed: {str(e)}"
|
||||
scan.completed_at = datetime.utcnow()
|
||||
if scan.started_at:
|
||||
scan.duration = (datetime.utcnow() - scan.started_at).total_seconds()
|
||||
session.commit()
|
||||
logger.info(f"Scan {scan_id}: Marked as completed with output generation warning")
|
||||
except Exception as db_error:
|
||||
logger.error(f"Scan {scan_id}: Failed to update status after output error: {db_error}")
|
||||
|
||||
# Save results to database
|
||||
# Save results to database (only if output generation succeeded)
|
||||
if not output_generation_failed:
|
||||
logger.info(f"Scan {scan_id}: Saving results to database...")
|
||||
scan_service = ScanService(session)
|
||||
scan_service._save_scan_to_db(report, scan_id, status='completed', output_paths=output_paths)
|
||||
@@ -97,6 +295,19 @@ def execute_scan(scan_id: int, config_id: int, db_url: str = None):
|
||||
|
||||
logger.info(f"Scan {scan_id}: Completed successfully")
|
||||
|
||||
except ScanCancelledError:
|
||||
# Scan was cancelled by user
|
||||
logger.info(f"Scan {scan_id}: Cancelled by user")
|
||||
|
||||
scan = session.query(Scan).filter_by(id=scan_id).first()
|
||||
if scan:
|
||||
scan.status = 'cancelled'
|
||||
scan.error_message = 'Scan cancelled by user'
|
||||
scan.completed_at = datetime.utcnow()
|
||||
if scan.started_at:
|
||||
scan.duration = (datetime.utcnow() - scan.started_at).total_seconds()
|
||||
session.commit()
|
||||
|
||||
except FileNotFoundError as e:
|
||||
# Config file not found
|
||||
error_msg = f"Configuration file not found: {str(e)}"
|
||||
@@ -126,6 +337,12 @@ def execute_scan(scan_id: int, config_id: int, db_url: str = None):
|
||||
logger.error(f"Scan {scan_id}: Failed to update error status in database: {str(db_error)}")
|
||||
|
||||
finally:
|
||||
# Unregister scanner from registry
|
||||
with _running_scanners_lock:
|
||||
if scan_id in _running_scanners:
|
||||
del _running_scanners[scan_id]
|
||||
logger.debug(f"Scan {scan_id}: Unregistered from running scanners registry")
|
||||
|
||||
# Always close the session
|
||||
session.close()
|
||||
logger.info(f"Scan {scan_id}: Background job completed, session closed")
|
||||
|
||||
@@ -45,7 +45,7 @@ class Scan(Base):
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
timestamp = Column(DateTime, nullable=False, index=True, comment="Scan start time (UTC)")
|
||||
duration = Column(Float, nullable=True, comment="Total scan duration in seconds")
|
||||
status = Column(String(20), nullable=False, default='running', comment="running, completed, failed")
|
||||
status = Column(String(20), nullable=False, default='running', comment="running, finalizing, completed, failed, cancelled")
|
||||
config_id = Column(Integer, ForeignKey('scan_configs.id'), nullable=True, index=True, comment="FK to scan_configs table")
|
||||
title = Column(Text, nullable=True, comment="Scan title from config")
|
||||
json_path = Column(Text, nullable=True, comment="Path to JSON report")
|
||||
@@ -59,6 +59,11 @@ class Scan(Base):
|
||||
completed_at = Column(DateTime, nullable=True, comment="Scan execution completion time")
|
||||
error_message = Column(Text, nullable=True, comment="Error message if scan failed")
|
||||
|
||||
# Progress tracking fields
|
||||
current_phase = Column(String(50), nullable=True, comment="Current scan phase: ping, tcp_scan, udp_scan, service_detection, http_analysis")
|
||||
total_ips = Column(Integer, nullable=True, comment="Total number of IPs to scan")
|
||||
completed_ips = Column(Integer, nullable=True, default=0, comment="Number of IPs completed in current phase")
|
||||
|
||||
# Relationships
|
||||
sites = relationship('ScanSite', back_populates='scan', cascade='all, delete-orphan')
|
||||
ips = relationship('ScanIP', back_populates='scan', cascade='all, delete-orphan')
|
||||
@@ -70,6 +75,7 @@ class Scan(Base):
|
||||
schedule = relationship('Schedule', back_populates='scans')
|
||||
config = relationship('ScanConfig', back_populates='scans')
|
||||
site_associations = relationship('ScanSiteAssociation', back_populates='scan', cascade='all, delete-orphan')
|
||||
progress_entries = relationship('ScanProgress', back_populates='scan', cascade='all, delete-orphan')
|
||||
|
||||
def __repr__(self):
|
||||
return f"<Scan(id={self.id}, title='{self.title}', status='{self.status}')>"
|
||||
@@ -244,6 +250,43 @@ class ScanTLSVersion(Base):
|
||||
return f"<ScanTLSVersion(id={self.id}, tls_version='{self.tls_version}', supported={self.supported})>"
|
||||
|
||||
|
||||
class ScanProgress(Base):
|
||||
"""
|
||||
Real-time progress tracking for individual IPs during scan execution.
|
||||
|
||||
Stores intermediate results as they become available, allowing users to
|
||||
see progress and results before the full scan completes.
|
||||
"""
|
||||
__tablename__ = 'scan_progress'
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
scan_id = Column(Integer, ForeignKey('scans.id'), nullable=False, index=True)
|
||||
ip_address = Column(String(45), nullable=False, comment="IP address being scanned")
|
||||
site_name = Column(String(255), nullable=True, comment="Site name this IP belongs to")
|
||||
phase = Column(String(50), nullable=False, comment="Phase: ping, tcp_scan, udp_scan, service_detection, http_analysis")
|
||||
status = Column(String(20), nullable=False, default='pending', comment="pending, in_progress, completed, failed")
|
||||
|
||||
# Results data (stored as JSON)
|
||||
ping_result = Column(Boolean, nullable=True, comment="Ping response result")
|
||||
tcp_ports = Column(Text, nullable=True, comment="JSON array of discovered TCP ports")
|
||||
udp_ports = Column(Text, nullable=True, comment="JSON array of discovered UDP ports")
|
||||
services = Column(Text, nullable=True, comment="JSON array of detected services")
|
||||
|
||||
created_at = Column(DateTime, nullable=False, default=datetime.utcnow, comment="Entry creation time")
|
||||
updated_at = Column(DateTime, nullable=False, default=datetime.utcnow, onupdate=datetime.utcnow, comment="Last update time")
|
||||
|
||||
# Relationships
|
||||
scan = relationship('Scan', back_populates='progress_entries')
|
||||
|
||||
# Index for efficient lookups
|
||||
__table_args__ = (
|
||||
UniqueConstraint('scan_id', 'ip_address', name='uix_scan_progress_ip'),
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ScanProgress(id={self.id}, ip='{self.ip_address}', phase='{self.phase}', status='{self.status}')>"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Reusable Site Definition Tables
|
||||
# ============================================================================
|
||||
|
||||
@@ -7,7 +7,7 @@ Provides dashboard and scan viewing pages.
|
||||
import logging
|
||||
import os
|
||||
|
||||
from flask import Blueprint, current_app, redirect, render_template, send_from_directory, url_for
|
||||
from flask import Blueprint, current_app, redirect, render_template, request, send_from_directory, url_for
|
||||
|
||||
from web.auth.decorators import login_required
|
||||
|
||||
@@ -83,6 +83,19 @@ def compare_scans(scan_id1, scan_id2):
|
||||
return render_template('scan_compare.html', scan_id1=scan_id1, scan_id2=scan_id2)
|
||||
|
||||
|
||||
@bp.route('/search/ip')
|
||||
@login_required
|
||||
def search_ip():
|
||||
"""
|
||||
IP search results page - shows scans containing a specific IP address.
|
||||
|
||||
Returns:
|
||||
Rendered search results template
|
||||
"""
|
||||
ip_address = request.args.get('ip', '').strip()
|
||||
return render_template('ip_search_results.html', ip_address=ip_address)
|
||||
|
||||
|
||||
@bp.route('/schedules')
|
||||
@login_required
|
||||
def schedules():
|
||||
|
||||
@@ -16,7 +16,7 @@ from sqlalchemy.orm import Session, joinedload
|
||||
|
||||
from web.models import (
|
||||
Scan, ScanSite, ScanIP, ScanPort, ScanService as ScanServiceModel,
|
||||
ScanCertificate, ScanTLSVersion, Site, ScanSiteAssociation
|
||||
ScanCertificate, ScanTLSVersion, Site, ScanSiteAssociation, SiteIP
|
||||
)
|
||||
from web.utils.pagination import paginate, PaginatedResult
|
||||
from web.utils.validators import validate_scan_status
|
||||
@@ -257,55 +257,125 @@ class ScanService:
|
||||
elif scan.status == 'failed':
|
||||
status_info['progress'] = 'Failed'
|
||||
status_info['error_message'] = scan.error_message
|
||||
elif scan.status == 'cancelled':
|
||||
status_info['progress'] = 'Cancelled'
|
||||
status_info['error_message'] = scan.error_message
|
||||
|
||||
return status_info
|
||||
|
||||
def cleanup_orphaned_scans(self) -> int:
|
||||
def get_scans_by_ip(self, ip_address: str, limit: int = 10) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Clean up orphaned scans that are stuck in 'running' status.
|
||||
Get the last N scans containing a specific IP address.
|
||||
|
||||
Args:
|
||||
ip_address: IP address to search for
|
||||
limit: Maximum number of scans to return (default: 10)
|
||||
|
||||
Returns:
|
||||
List of scan summary dictionaries, most recent first
|
||||
"""
|
||||
scans = (
|
||||
self.db.query(Scan)
|
||||
.join(ScanIP, Scan.id == ScanIP.scan_id)
|
||||
.filter(ScanIP.ip_address == ip_address)
|
||||
.filter(Scan.status == 'completed')
|
||||
.order_by(Scan.timestamp.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return [self._scan_to_summary_dict(scan) for scan in scans]
|
||||
|
||||
def cleanup_orphaned_scans(self) -> dict:
|
||||
"""
|
||||
Clean up orphaned scans with smart recovery.
|
||||
|
||||
For scans stuck in 'running' or 'finalizing' status:
|
||||
- If output files exist: mark as 'completed' (smart recovery)
|
||||
- If no output files: mark as 'failed'
|
||||
|
||||
This should be called on application startup to handle scans that
|
||||
were running when the system crashed or was restarted.
|
||||
|
||||
Scans in 'running' status are marked as 'failed' with an appropriate
|
||||
error message indicating they were orphaned.
|
||||
|
||||
Returns:
|
||||
Number of orphaned scans cleaned up
|
||||
Dictionary with cleanup results: {'recovered': N, 'failed': N, 'total': N}
|
||||
"""
|
||||
# Find all scans with status='running'
|
||||
orphaned_scans = self.db.query(Scan).filter(Scan.status == 'running').all()
|
||||
# Find all scans with status='running' or 'finalizing'
|
||||
orphaned_scans = self.db.query(Scan).filter(
|
||||
Scan.status.in_(['running', 'finalizing'])
|
||||
).all()
|
||||
|
||||
if not orphaned_scans:
|
||||
logger.info("No orphaned scans found")
|
||||
return 0
|
||||
return {'recovered': 0, 'failed': 0, 'total': 0}
|
||||
|
||||
count = len(orphaned_scans)
|
||||
logger.warning(f"Found {count} orphaned scan(s) in 'running' status, marking as failed")
|
||||
logger.warning(f"Found {count} orphaned scan(s), attempting smart recovery")
|
||||
|
||||
recovered_count = 0
|
||||
failed_count = 0
|
||||
output_dir = Path('/app/output')
|
||||
|
||||
# Mark each orphaned scan as failed
|
||||
for scan in orphaned_scans:
|
||||
# Check for existing output files
|
||||
output_exists = False
|
||||
output_files_found = []
|
||||
|
||||
# Check paths stored in database
|
||||
if scan.json_path and Path(scan.json_path).exists():
|
||||
output_exists = True
|
||||
output_files_found.append('json')
|
||||
if scan.html_path and Path(scan.html_path).exists():
|
||||
output_files_found.append('html')
|
||||
if scan.zip_path and Path(scan.zip_path).exists():
|
||||
output_files_found.append('zip')
|
||||
|
||||
# Also check by timestamp pattern if paths not stored yet
|
||||
if not output_exists and scan.started_at and output_dir.exists():
|
||||
timestamp_pattern = scan.started_at.strftime('%Y%m%d')
|
||||
for json_file in output_dir.glob(f'scan_report_{timestamp_pattern}*.json'):
|
||||
output_exists = True
|
||||
output_files_found.append('json')
|
||||
# Update scan record with found paths
|
||||
scan.json_path = str(json_file)
|
||||
html_file = json_file.with_suffix('.html')
|
||||
if html_file.exists():
|
||||
scan.html_path = str(html_file)
|
||||
output_files_found.append('html')
|
||||
zip_file = json_file.with_suffix('.zip')
|
||||
if zip_file.exists():
|
||||
scan.zip_path = str(zip_file)
|
||||
output_files_found.append('zip')
|
||||
break
|
||||
|
||||
if output_exists:
|
||||
# Smart recovery: outputs exist, mark as completed
|
||||
scan.status = 'completed'
|
||||
scan.error_message = f'Recovered from orphaned state (output files found: {", ".join(output_files_found)})'
|
||||
recovered_count += 1
|
||||
logger.info(f"Recovered orphaned scan {scan.id} as completed (files: {output_files_found})")
|
||||
else:
|
||||
# No outputs: mark as failed
|
||||
scan.status = 'failed'
|
||||
scan.completed_at = datetime.utcnow()
|
||||
scan.error_message = (
|
||||
"Scan was interrupted by system shutdown or crash. "
|
||||
"The scan was running but did not complete normally."
|
||||
"No output files were generated."
|
||||
)
|
||||
failed_count += 1
|
||||
logger.info(f"Marked orphaned scan {scan.id} as failed (no output files)")
|
||||
|
||||
# Calculate duration if we have a started_at time
|
||||
scan.completed_at = datetime.utcnow()
|
||||
if scan.started_at:
|
||||
duration = (datetime.utcnow() - scan.started_at).total_seconds()
|
||||
scan.duration = duration
|
||||
|
||||
logger.info(
|
||||
f"Marked orphaned scan {scan.id} as failed "
|
||||
f"(started: {scan.started_at.isoformat() if scan.started_at else 'unknown'})"
|
||||
)
|
||||
scan.duration = (datetime.utcnow() - scan.started_at).total_seconds()
|
||||
|
||||
self.db.commit()
|
||||
logger.info(f"Cleaned up {count} orphaned scan(s)")
|
||||
logger.info(f"Cleaned up {count} orphaned scan(s): {recovered_count} recovered, {failed_count} failed")
|
||||
|
||||
return count
|
||||
return {
|
||||
'recovered': recovered_count,
|
||||
'failed': failed_count,
|
||||
'total': count
|
||||
}
|
||||
|
||||
def _save_scan_to_db(self, report: Dict[str, Any], scan_id: int,
|
||||
status: str = 'completed', output_paths: Dict = None) -> None:
|
||||
@@ -604,17 +674,47 @@ class ScanService:
|
||||
|
||||
def _site_to_dict(self, site: ScanSite) -> Dict[str, Any]:
|
||||
"""Convert ScanSite to dictionary."""
|
||||
# Look up the master Site ID from ScanSiteAssociation
|
||||
master_site_id = None
|
||||
assoc = (
|
||||
self.db.query(ScanSiteAssociation)
|
||||
.filter(
|
||||
ScanSiteAssociation.scan_id == site.scan_id,
|
||||
)
|
||||
.join(Site)
|
||||
.filter(Site.name == site.site_name)
|
||||
.first()
|
||||
)
|
||||
if assoc:
|
||||
master_site_id = assoc.site_id
|
||||
|
||||
return {
|
||||
'id': site.id,
|
||||
'name': site.site_name,
|
||||
'ips': [self._ip_to_dict(ip) for ip in site.ips]
|
||||
'site_id': master_site_id, # The actual Site ID for config updates
|
||||
'ips': [self._ip_to_dict(ip, master_site_id) for ip in site.ips]
|
||||
}
|
||||
|
||||
def _ip_to_dict(self, ip: ScanIP) -> Dict[str, Any]:
|
||||
def _ip_to_dict(self, ip: ScanIP, site_id: Optional[int] = None) -> Dict[str, Any]:
|
||||
"""Convert ScanIP to dictionary."""
|
||||
# Look up the SiteIP ID for this IP address in the master Site
|
||||
site_ip_id = None
|
||||
if site_id:
|
||||
site_ip = (
|
||||
self.db.query(SiteIP)
|
||||
.filter(
|
||||
SiteIP.site_id == site_id,
|
||||
SiteIP.ip_address == ip.ip_address
|
||||
)
|
||||
.first()
|
||||
)
|
||||
if site_ip:
|
||||
site_ip_id = site_ip.id
|
||||
|
||||
return {
|
||||
'id': ip.id,
|
||||
'address': ip.ip_address,
|
||||
'site_ip_id': site_ip_id, # The actual SiteIP ID for config updates
|
||||
'ping_expected': ip.ping_expected,
|
||||
'ping_actual': ip.ping_actual,
|
||||
'ports': [self._port_to_dict(port) for port in ip.ports]
|
||||
|
||||
@@ -6,7 +6,7 @@ scheduled scans with cron expressions.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from croniter import croniter
|
||||
@@ -71,6 +71,7 @@ class ScheduleService:
|
||||
next_run = self.calculate_next_run(cron_expression) if enabled else None
|
||||
|
||||
# Create schedule record
|
||||
now_utc = datetime.now(timezone.utc)
|
||||
schedule = Schedule(
|
||||
name=name,
|
||||
config_id=config_id,
|
||||
@@ -78,8 +79,8 @@ class ScheduleService:
|
||||
enabled=enabled,
|
||||
last_run=None,
|
||||
next_run=next_run,
|
||||
created_at=datetime.utcnow(),
|
||||
updated_at=datetime.utcnow()
|
||||
created_at=now_utc,
|
||||
updated_at=now_utc
|
||||
)
|
||||
|
||||
self.db.add(schedule)
|
||||
@@ -103,7 +104,14 @@ class ScheduleService:
|
||||
Raises:
|
||||
ValueError: If schedule not found
|
||||
"""
|
||||
schedule = self.db.query(Schedule).filter(Schedule.id == schedule_id).first()
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
schedule = (
|
||||
self.db.query(Schedule)
|
||||
.options(joinedload(Schedule.config))
|
||||
.filter(Schedule.id == schedule_id)
|
||||
.first()
|
||||
)
|
||||
|
||||
if not schedule:
|
||||
raise ValueError(f"Schedule {schedule_id} not found")
|
||||
@@ -138,8 +146,10 @@ class ScheduleService:
|
||||
'pages': int
|
||||
}
|
||||
"""
|
||||
# Build query
|
||||
query = self.db.query(Schedule)
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
# Build query and eagerly load config relationship
|
||||
query = self.db.query(Schedule).options(joinedload(Schedule.config))
|
||||
|
||||
# Apply filter
|
||||
if enabled_filter is not None:
|
||||
@@ -215,7 +225,7 @@ class ScheduleService:
|
||||
if hasattr(schedule, key):
|
||||
setattr(schedule, key, value)
|
||||
|
||||
schedule.updated_at = datetime.utcnow()
|
||||
schedule.updated_at = datetime.now(timezone.utc)
|
||||
|
||||
self.db.commit()
|
||||
self.db.refresh(schedule)
|
||||
@@ -298,7 +308,7 @@ class ScheduleService:
|
||||
|
||||
schedule.last_run = last_run
|
||||
schedule.next_run = next_run
|
||||
schedule.updated_at = datetime.utcnow()
|
||||
schedule.updated_at = datetime.now(timezone.utc)
|
||||
|
||||
self.db.commit()
|
||||
|
||||
@@ -311,23 +321,43 @@ class ScheduleService:
|
||||
Validate a cron expression.
|
||||
|
||||
Args:
|
||||
cron_expr: Cron expression to validate
|
||||
cron_expr: Cron expression to validate in standard crontab format
|
||||
Format: minute hour day month day_of_week
|
||||
Day of week: 0=Sunday, 1=Monday, ..., 6=Saturday
|
||||
(APScheduler will convert this to its internal format automatically)
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid, error_message)
|
||||
- (True, None) if valid
|
||||
- (False, error_message) if invalid
|
||||
|
||||
Note:
|
||||
This validates using croniter which uses standard crontab format.
|
||||
APScheduler's from_crontab() will handle the conversion when the
|
||||
schedule is registered with the scheduler.
|
||||
"""
|
||||
try:
|
||||
# Try to create a croniter instance
|
||||
base_time = datetime.utcnow()
|
||||
# croniter uses standard crontab format (Sunday=0)
|
||||
from datetime import timezone
|
||||
base_time = datetime.now(timezone.utc)
|
||||
cron = croniter(cron_expr, base_time)
|
||||
|
||||
# Try to get the next run time (validates the expression)
|
||||
cron.get_next(datetime)
|
||||
|
||||
# Validate basic format (5 fields)
|
||||
fields = cron_expr.split()
|
||||
if len(fields) != 5:
|
||||
return (False, f"Cron expression must have 5 fields (minute hour day month day_of_week), got {len(fields)}")
|
||||
|
||||
return (True, None)
|
||||
except (ValueError, KeyError) as e:
|
||||
error_msg = str(e)
|
||||
# Add helpful hint for day_of_week errors
|
||||
if "day" in error_msg.lower() and len(cron_expr.split()) >= 5:
|
||||
hint = "\nNote: Use standard crontab format where 0=Sunday, 1=Monday, ..., 6=Saturday"
|
||||
return (False, f"{error_msg}{hint}")
|
||||
return (False, str(e))
|
||||
except Exception as e:
|
||||
return (False, f"Unexpected error: {str(e)}")
|
||||
@@ -345,17 +375,24 @@ class ScheduleService:
|
||||
from_time: Base time (defaults to now UTC)
|
||||
|
||||
Returns:
|
||||
Next run datetime (UTC)
|
||||
Next run datetime (UTC, timezone-aware)
|
||||
|
||||
Raises:
|
||||
ValueError: If cron expression is invalid
|
||||
"""
|
||||
if from_time is None:
|
||||
from_time = datetime.utcnow()
|
||||
from_time = datetime.now(timezone.utc)
|
||||
|
||||
try:
|
||||
cron = croniter(cron_expr, from_time)
|
||||
return cron.get_next(datetime)
|
||||
next_run = cron.get_next(datetime)
|
||||
|
||||
# croniter returns naive datetime, so we need to add timezone info
|
||||
# Since we're using UTC for all calculations, add UTC timezone
|
||||
if next_run.tzinfo is None:
|
||||
next_run = next_run.replace(tzinfo=timezone.utc)
|
||||
|
||||
return next_run
|
||||
except Exception as e:
|
||||
raise ValueError(f"Invalid cron expression '{cron_expr}': {str(e)}")
|
||||
|
||||
@@ -403,10 +440,16 @@ class ScheduleService:
|
||||
Returns:
|
||||
Dictionary representation
|
||||
"""
|
||||
# Get config title if relationship is loaded
|
||||
config_name = None
|
||||
if schedule.config:
|
||||
config_name = schedule.config.title
|
||||
|
||||
return {
|
||||
'id': schedule.id,
|
||||
'name': schedule.name,
|
||||
'config_id': schedule.config_id,
|
||||
'config_name': config_name,
|
||||
'cron_expression': schedule.cron_expression,
|
||||
'enabled': schedule.enabled,
|
||||
'last_run': schedule.last_run.isoformat() if schedule.last_run else None,
|
||||
@@ -421,7 +464,7 @@ class ScheduleService:
|
||||
Format datetime as relative time.
|
||||
|
||||
Args:
|
||||
dt: Datetime to format (UTC)
|
||||
dt: Datetime to format (UTC, can be naive or aware)
|
||||
|
||||
Returns:
|
||||
Human-readable relative time (e.g., "in 2 hours", "yesterday")
|
||||
@@ -429,7 +472,13 @@ class ScheduleService:
|
||||
if dt is None:
|
||||
return None
|
||||
|
||||
now = datetime.utcnow()
|
||||
# Ensure both datetimes are timezone-aware for comparison
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
# If dt is naive, assume it's UTC and add timezone info
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=timezone.utc)
|
||||
|
||||
diff = dt - now
|
||||
|
||||
# Future times
|
||||
|
||||
@@ -149,6 +149,51 @@ class SchedulerService:
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading schedules on startup: {str(e)}", exc_info=True)
|
||||
|
||||
@staticmethod
|
||||
def validate_cron_expression(cron_expression: str) -> tuple[bool, str]:
|
||||
"""
|
||||
Validate a cron expression and provide helpful feedback.
|
||||
|
||||
Args:
|
||||
cron_expression: Cron expression to validate
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid: bool, message: str)
|
||||
- If valid: (True, "Valid cron expression")
|
||||
- If invalid: (False, "Error message with details")
|
||||
|
||||
Note:
|
||||
Standard crontab format: minute hour day month day_of_week
|
||||
Day of week: 0=Sunday, 1=Monday, ..., 6=Saturday (or 7=Sunday)
|
||||
"""
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
|
||||
try:
|
||||
# Try to parse the expression
|
||||
trigger = CronTrigger.from_crontab(cron_expression)
|
||||
|
||||
# Validate basic format (5 fields)
|
||||
fields = cron_expression.split()
|
||||
if len(fields) != 5:
|
||||
return False, f"Cron expression must have 5 fields (minute hour day month day_of_week), got {len(fields)}"
|
||||
|
||||
return True, "Valid cron expression"
|
||||
|
||||
except (ValueError, KeyError) as e:
|
||||
error_msg = str(e)
|
||||
|
||||
# Provide helpful hints for common errors
|
||||
if "day_of_week" in error_msg.lower() or (len(cron_expression.split()) >= 5):
|
||||
# Check if day_of_week field might be using APScheduler format by mistake
|
||||
fields = cron_expression.split()
|
||||
if len(fields) == 5:
|
||||
dow_field = fields[4]
|
||||
if dow_field.isdigit() and int(dow_field) >= 0:
|
||||
hint = "\nNote: Use standard crontab format where 0=Sunday, 1=Monday, ..., 6=Saturday"
|
||||
return False, f"Invalid cron expression: {error_msg}{hint}"
|
||||
|
||||
return False, f"Invalid cron expression: {error_msg}"
|
||||
|
||||
def queue_scan(self, scan_id: int, config_id: int) -> str:
|
||||
"""
|
||||
Queue a scan for immediate background execution.
|
||||
@@ -188,6 +233,10 @@ class SchedulerService:
|
||||
schedule_id: Database ID of the schedule
|
||||
config_id: Database config ID
|
||||
cron_expression: Cron expression (e.g., "0 2 * * *" for 2am daily)
|
||||
IMPORTANT: Use standard crontab format where:
|
||||
- Day of week: 0 = Sunday, 1 = Monday, ..., 6 = Saturday
|
||||
- APScheduler automatically converts to its internal format
|
||||
- from_crontab() handles the conversion properly
|
||||
|
||||
Returns:
|
||||
Job ID from APScheduler
|
||||
@@ -195,18 +244,29 @@ class SchedulerService:
|
||||
Raises:
|
||||
RuntimeError: If scheduler not initialized
|
||||
ValueError: If cron expression is invalid
|
||||
|
||||
Note:
|
||||
APScheduler internally uses Monday=0, but from_crontab() accepts
|
||||
standard crontab format (Sunday=0) and converts it automatically.
|
||||
"""
|
||||
if not self.scheduler:
|
||||
raise RuntimeError("Scheduler not initialized. Call init_scheduler() first.")
|
||||
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
|
||||
# Validate cron expression first to provide helpful error messages
|
||||
is_valid, message = self.validate_cron_expression(cron_expression)
|
||||
if not is_valid:
|
||||
raise ValueError(message)
|
||||
|
||||
# Create cron trigger from expression using local timezone
|
||||
# This allows users to specify times in their local timezone
|
||||
# from_crontab() parses standard crontab format (Sunday=0)
|
||||
# and converts to APScheduler's internal format (Monday=0) automatically
|
||||
try:
|
||||
trigger = CronTrigger.from_crontab(cron_expression)
|
||||
# timezone defaults to local system timezone
|
||||
except (ValueError, KeyError) as e:
|
||||
# This should not happen due to validation above, but catch anyway
|
||||
raise ValueError(f"Invalid cron expression '{cron_expression}': {str(e)}")
|
||||
|
||||
# Add cron job
|
||||
@@ -294,11 +354,16 @@ class SchedulerService:
|
||||
|
||||
# Update schedule's last_run and next_run
|
||||
from croniter import croniter
|
||||
next_run = croniter(schedule['cron_expression'], datetime.utcnow()).get_next(datetime)
|
||||
now_utc = datetime.now(timezone.utc)
|
||||
next_run = croniter(schedule['cron_expression'], now_utc).get_next(datetime)
|
||||
|
||||
# croniter returns naive datetime, add UTC timezone
|
||||
if next_run.tzinfo is None:
|
||||
next_run = next_run.replace(tzinfo=timezone.utc)
|
||||
|
||||
schedule_service.update_run_times(
|
||||
schedule_id=schedule_id,
|
||||
last_run=datetime.utcnow(),
|
||||
last_run=now_utc,
|
||||
next_run=next_run
|
||||
)
|
||||
|
||||
|
||||
@@ -228,6 +228,34 @@ class SiteService:
|
||||
|
||||
return [self._site_to_dict(site) for site in sites]
|
||||
|
||||
def get_global_ip_stats(self) -> Dict[str, int]:
|
||||
"""
|
||||
Get global IP statistics across all sites.
|
||||
|
||||
Returns:
|
||||
Dictionary with:
|
||||
- total_ips: Total count of IP entries (including duplicates)
|
||||
- unique_ips: Count of distinct IP addresses
|
||||
- duplicate_ips: Number of duplicate entries (total - unique)
|
||||
"""
|
||||
# Total IP entries
|
||||
total_ips = (
|
||||
self.db.query(func.count(SiteIP.id))
|
||||
.scalar() or 0
|
||||
)
|
||||
|
||||
# Unique IP addresses
|
||||
unique_ips = (
|
||||
self.db.query(func.count(func.distinct(SiteIP.ip_address)))
|
||||
.scalar() or 0
|
||||
)
|
||||
|
||||
return {
|
||||
'total_ips': total_ips,
|
||||
'unique_ips': unique_ips,
|
||||
'duplicate_ips': total_ips - unique_ips
|
||||
}
|
||||
|
||||
def bulk_add_ips_from_cidr(self, site_id: int, cidr: str,
|
||||
expected_ping: Optional[bool] = None,
|
||||
expected_tcp_ports: Optional[List[int]] = None,
|
||||
|
||||
@@ -76,6 +76,13 @@
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<form class="d-flex me-3" action="{{ url_for('main.search_ip') }}" method="GET">
|
||||
<input class="form-control form-control-sm me-2" type="search" name="ip"
|
||||
placeholder="Search IP..." aria-label="Search IP" style="width: 150px;">
|
||||
<button class="btn btn-outline-primary btn-sm" type="submit">
|
||||
<i class="bi bi-search"></i>
|
||||
</button>
|
||||
</form>
|
||||
<ul class="navbar-nav">
|
||||
<li class="nav-item">
|
||||
<a class="nav-link {% if request.endpoint == 'main.help' %}active{% endif %}"
|
||||
|
||||
175
app/web/templates/ip_search_results.html
Normal file
175
app/web/templates/ip_search_results.html
Normal file
@@ -0,0 +1,175 @@
|
||||
{% extends "base.html" %}
|
||||
|
||||
{% block title %}Search Results for {{ ip_address }} - SneakyScanner{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div class="row mt-4">
|
||||
<div class="col-12 d-flex justify-content-between align-items-center mb-4">
|
||||
<h1>
|
||||
<i class="bi bi-search"></i>
|
||||
Search Results
|
||||
{% if ip_address %}
|
||||
<small class="text-muted">for {{ ip_address }}</small>
|
||||
{% endif %}
|
||||
</h1>
|
||||
<a href="{{ url_for('main.scans') }}" class="btn btn-secondary">
|
||||
<i class="bi bi-arrow-left"></i> Back to Scans
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% if not ip_address %}
|
||||
<!-- No IP provided -->
|
||||
<div class="row">
|
||||
<div class="col-12">
|
||||
<div class="card">
|
||||
<div class="card-body text-center py-5">
|
||||
<i class="bi bi-exclamation-circle text-warning" style="font-size: 3rem;"></i>
|
||||
<h4 class="mt-3">No IP Address Provided</h4>
|
||||
<p class="text-muted">Please enter an IP address in the search box to find related scans.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% else %}
|
||||
<!-- Results Table -->
|
||||
<div class="row">
|
||||
<div class="col-12">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h5 class="mb-0">Last 10 Scans Containing {{ ip_address }}</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<div id="results-loading" class="text-center py-5">
|
||||
<div class="spinner-border" role="status">
|
||||
<span class="visually-hidden">Loading...</span>
|
||||
</div>
|
||||
<p class="mt-3 text-muted">Searching for scans...</p>
|
||||
</div>
|
||||
<div id="results-error" class="alert alert-danger" style="display: none;"></div>
|
||||
<div id="results-empty" class="text-center py-5 text-muted" style="display: none;">
|
||||
<i class="bi bi-search" style="font-size: 3rem;"></i>
|
||||
<h5 class="mt-3">No Scans Found</h5>
|
||||
<p>No completed scans contain the IP address <strong>{{ ip_address }}</strong>.</p>
|
||||
</div>
|
||||
<div id="results-table-container" style="display: none;">
|
||||
<div class="table-responsive">
|
||||
<table class="table table-hover">
|
||||
<thead>
|
||||
<tr>
|
||||
<th style="width: 80px;">ID</th>
|
||||
<th>Title</th>
|
||||
<th style="width: 200px;">Timestamp</th>
|
||||
<th style="width: 100px;">Duration</th>
|
||||
<th style="width: 120px;">Status</th>
|
||||
<th style="width: 100px;">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="results-tbody">
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<div class="text-muted mt-3">
|
||||
Found <span id="result-count">0</span> scan(s) containing this IP address.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
{% block scripts %}
|
||||
<script>
|
||||
const ipAddress = "{{ ip_address | e }}";
|
||||
|
||||
// Load results when page loads
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
if (ipAddress) {
|
||||
loadResults();
|
||||
}
|
||||
});
|
||||
|
||||
// Load search results from API
|
||||
async function loadResults() {
|
||||
const loadingEl = document.getElementById('results-loading');
|
||||
const errorEl = document.getElementById('results-error');
|
||||
const emptyEl = document.getElementById('results-empty');
|
||||
const tableEl = document.getElementById('results-table-container');
|
||||
|
||||
// Show loading state
|
||||
loadingEl.style.display = 'block';
|
||||
errorEl.style.display = 'none';
|
||||
emptyEl.style.display = 'none';
|
||||
tableEl.style.display = 'none';
|
||||
|
||||
try {
|
||||
const response = await fetch(`/api/scans/by-ip/${encodeURIComponent(ipAddress)}`);
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to search for scans');
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const scans = data.scans || [];
|
||||
|
||||
loadingEl.style.display = 'none';
|
||||
|
||||
if (scans.length === 0) {
|
||||
emptyEl.style.display = 'block';
|
||||
} else {
|
||||
tableEl.style.display = 'block';
|
||||
renderResultsTable(scans);
|
||||
document.getElementById('result-count').textContent = data.count;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error searching for scans:', error);
|
||||
loadingEl.style.display = 'none';
|
||||
errorEl.textContent = 'Failed to search for scans. Please try again.';
|
||||
errorEl.style.display = 'block';
|
||||
}
|
||||
}
|
||||
|
||||
// Render results table
|
||||
function renderResultsTable(scans) {
|
||||
const tbody = document.getElementById('results-tbody');
|
||||
tbody.innerHTML = '';
|
||||
|
||||
scans.forEach(scan => {
|
||||
const row = document.createElement('tr');
|
||||
row.classList.add('scan-row');
|
||||
|
||||
// Format timestamp
|
||||
const timestamp = new Date(scan.timestamp).toLocaleString();
|
||||
|
||||
// Format duration
|
||||
const duration = scan.duration ? `${scan.duration.toFixed(1)}s` : '-';
|
||||
|
||||
// Status badge
|
||||
let statusBadge = '';
|
||||
if (scan.status === 'completed') {
|
||||
statusBadge = '<span class="badge badge-success">Completed</span>';
|
||||
} else if (scan.status === 'running') {
|
||||
statusBadge = '<span class="badge badge-info">Running</span>';
|
||||
} else if (scan.status === 'failed') {
|
||||
statusBadge = '<span class="badge badge-danger">Failed</span>';
|
||||
} else {
|
||||
statusBadge = `<span class="badge badge-info">${scan.status}</span>`;
|
||||
}
|
||||
|
||||
row.innerHTML = `
|
||||
<td class="mono">${scan.id}</td>
|
||||
<td>${scan.title || 'Untitled Scan'}</td>
|
||||
<td class="text-muted">${timestamp}</td>
|
||||
<td class="mono">${duration}</td>
|
||||
<td>${statusBadge}</td>
|
||||
<td>
|
||||
<a href="/scans/${scan.id}" class="btn btn-sm btn-secondary">View</a>
|
||||
</td>
|
||||
`;
|
||||
|
||||
tbody.appendChild(row);
|
||||
});
|
||||
}
|
||||
</script>
|
||||
{% endblock %}
|
||||
@@ -20,6 +20,10 @@
|
||||
<span id="refresh-text">Refresh</span>
|
||||
<span id="refresh-spinner" class="spinner-border spinner-border-sm ms-1" style="display: none;"></span>
|
||||
</button>
|
||||
<button class="btn btn-warning ms-2" onclick="stopScan()" id="stop-btn" style="display: none;">
|
||||
<span id="stop-text">Stop Scan</span>
|
||||
<span id="stop-spinner" class="spinner-border spinner-border-sm ms-1" style="display: none;"></span>
|
||||
</button>
|
||||
<button class="btn btn-danger ms-2" onclick="deleteScan()" id="delete-btn">Delete Scan</button>
|
||||
</div>
|
||||
</div>
|
||||
@@ -84,6 +88,50 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Progress Section (shown when scan is running) -->
|
||||
<div class="row mb-4" id="progress-section" style="display: none;">
|
||||
<div class="col-12">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h5 class="mb-0" style="color: #60a5fa;">
|
||||
<i class="bi bi-hourglass-split"></i> Scan Progress
|
||||
</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<!-- Phase and Progress Bar -->
|
||||
<div class="mb-3">
|
||||
<div class="d-flex justify-content-between align-items-center mb-2">
|
||||
<span>Current Phase: <strong id="current-phase">Initializing...</strong></span>
|
||||
<span id="progress-count">0 / 0 IPs</span>
|
||||
</div>
|
||||
<div class="progress" style="height: 20px; background-color: #334155;">
|
||||
<div id="progress-bar" class="progress-bar bg-info" role="progressbar" style="width: 0%"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Per-IP Results Table -->
|
||||
<div class="table-responsive" style="max-height: 400px; overflow-y: auto;">
|
||||
<table class="table table-sm">
|
||||
<thead style="position: sticky; top: 0; background-color: #1e293b;">
|
||||
<tr>
|
||||
<th>Site</th>
|
||||
<th>IP Address</th>
|
||||
<th>Ping</th>
|
||||
<th>TCP Ports</th>
|
||||
<th>UDP Ports</th>
|
||||
<th>Services</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="progress-table-body">
|
||||
<tr><td colspan="6" class="text-center text-muted">Waiting for results...</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Stats Row -->
|
||||
<div class="row mb-4">
|
||||
<div class="col-md-3">
|
||||
@@ -222,6 +270,7 @@
|
||||
const scanId = {{ scan_id }};
|
||||
let scanData = null;
|
||||
let historyChart = null; // Store chart instance to prevent duplicates
|
||||
let progressInterval = null; // Store progress polling interval
|
||||
|
||||
// Show alert notification
|
||||
function showAlert(type, message) {
|
||||
@@ -247,16 +296,136 @@
|
||||
loadScan().then(() => {
|
||||
findPreviousScan();
|
||||
loadHistoricalChart();
|
||||
|
||||
// Start progress polling if scan is running
|
||||
if (scanData && scanData.status === 'running') {
|
||||
startProgressPolling();
|
||||
}
|
||||
});
|
||||
|
||||
// Auto-refresh every 10 seconds if scan is running
|
||||
setInterval(function() {
|
||||
if (scanData && scanData.status === 'running') {
|
||||
loadScan();
|
||||
}
|
||||
}, 10000);
|
||||
});
|
||||
|
||||
// Start polling for progress updates
|
||||
function startProgressPolling() {
|
||||
// Show progress section
|
||||
document.getElementById('progress-section').style.display = 'block';
|
||||
|
||||
// Initial load
|
||||
loadProgress();
|
||||
|
||||
// Poll every 3 seconds
|
||||
progressInterval = setInterval(loadProgress, 3000);
|
||||
}
|
||||
|
||||
// Stop polling for progress updates
|
||||
function stopProgressPolling() {
|
||||
if (progressInterval) {
|
||||
clearInterval(progressInterval);
|
||||
progressInterval = null;
|
||||
}
|
||||
// Hide progress section when scan completes
|
||||
document.getElementById('progress-section').style.display = 'none';
|
||||
}
|
||||
|
||||
// Load progress data
|
||||
async function loadProgress() {
|
||||
try {
|
||||
const response = await fetch(`/api/scans/${scanId}/progress`);
|
||||
if (!response.ok) return;
|
||||
|
||||
const progress = await response.json();
|
||||
|
||||
// Check if scan is still running
|
||||
if (progress.status !== 'running') {
|
||||
stopProgressPolling();
|
||||
loadScan(); // Refresh full scan data
|
||||
return;
|
||||
}
|
||||
|
||||
renderProgress(progress);
|
||||
} catch (error) {
|
||||
console.error('Error loading progress:', error);
|
||||
}
|
||||
}
|
||||
|
||||
// Render progress data
|
||||
function renderProgress(progress) {
|
||||
// Update phase display
|
||||
const phaseNames = {
|
||||
'pending': 'Initializing',
|
||||
'ping': 'Ping Scan',
|
||||
'tcp_scan': 'TCP Port Scan',
|
||||
'udp_scan': 'UDP Port Scan',
|
||||
'service_detection': 'Service Detection',
|
||||
'http_analysis': 'HTTP/HTTPS Analysis',
|
||||
'completed': 'Completing'
|
||||
};
|
||||
|
||||
const phaseName = phaseNames[progress.current_phase] || progress.current_phase;
|
||||
document.getElementById('current-phase').textContent = phaseName;
|
||||
|
||||
// Update progress count and bar
|
||||
const total = progress.total_ips || 0;
|
||||
const completed = progress.completed_ips || 0;
|
||||
const percent = total > 0 ? Math.round((completed / total) * 100) : 0;
|
||||
|
||||
document.getElementById('progress-count').textContent = `${completed} / ${total} IPs`;
|
||||
document.getElementById('progress-bar').style.width = `${percent}%`;
|
||||
|
||||
// Update progress table
|
||||
const tbody = document.getElementById('progress-table-body');
|
||||
const entries = progress.progress_entries || [];
|
||||
|
||||
if (entries.length === 0) {
|
||||
tbody.innerHTML = '<tr><td colspan="6" class="text-center text-muted">Waiting for results...</td></tr>';
|
||||
return;
|
||||
}
|
||||
|
||||
let html = '';
|
||||
entries.forEach(entry => {
|
||||
// Ping result
|
||||
let pingDisplay = '-';
|
||||
if (entry.ping_result !== null && entry.ping_result !== undefined) {
|
||||
pingDisplay = entry.ping_result
|
||||
? '<span class="badge badge-success">Yes</span>'
|
||||
: '<span class="badge badge-danger">No</span>';
|
||||
}
|
||||
|
||||
// TCP ports
|
||||
const tcpPorts = entry.tcp_ports || [];
|
||||
let tcpDisplay = tcpPorts.length > 0
|
||||
? `<span class="badge bg-info">${tcpPorts.length}</span> <small class="text-muted">${tcpPorts.slice(0, 5).join(', ')}${tcpPorts.length > 5 ? '...' : ''}</small>`
|
||||
: '-';
|
||||
|
||||
// UDP ports
|
||||
const udpPorts = entry.udp_ports || [];
|
||||
let udpDisplay = udpPorts.length > 0
|
||||
? `<span class="badge bg-info">${udpPorts.length}</span>`
|
||||
: '-';
|
||||
|
||||
// Services
|
||||
const services = entry.services || [];
|
||||
let svcDisplay = '-';
|
||||
if (services.length > 0) {
|
||||
const svcNames = services.map(s => s.service || 'unknown').slice(0, 3);
|
||||
svcDisplay = `<span class="badge bg-info">${services.length}</span> <small class="text-muted">${svcNames.join(', ')}${services.length > 3 ? '...' : ''}</small>`;
|
||||
}
|
||||
|
||||
html += `
|
||||
<tr class="scan-row">
|
||||
<td>${entry.site_name || '-'}</td>
|
||||
<td class="mono">${entry.ip_address}</td>
|
||||
<td>${pingDisplay}</td>
|
||||
<td>${tcpDisplay}</td>
|
||||
<td>${udpDisplay}</td>
|
||||
<td>${svcDisplay}</td>
|
||||
</tr>
|
||||
`;
|
||||
});
|
||||
|
||||
tbody.innerHTML = html;
|
||||
}
|
||||
|
||||
// Load scan details
|
||||
async function loadScan() {
|
||||
const loadingEl = document.getElementById('scan-loading');
|
||||
@@ -306,8 +475,11 @@
|
||||
} else if (scan.status === 'running') {
|
||||
statusBadge = '<span class="badge badge-info">Running</span>';
|
||||
document.getElementById('delete-btn').disabled = true;
|
||||
document.getElementById('stop-btn').style.display = 'inline-block';
|
||||
} else if (scan.status === 'failed') {
|
||||
statusBadge = '<span class="badge badge-danger">Failed</span>';
|
||||
} else if (scan.status === 'cancelled') {
|
||||
statusBadge = '<span class="badge badge-warning">Cancelled</span>';
|
||||
} else {
|
||||
statusBadge = `<span class="badge badge-info">${scan.status}</span>`;
|
||||
}
|
||||
@@ -414,6 +586,19 @@
|
||||
const screenshotPath = service && service.screenshot_path ? service.screenshot_path : null;
|
||||
const certificate = service && service.certificates && service.certificates.length > 0 ? service.certificates[0] : null;
|
||||
|
||||
// Build status cell with optional "Mark Expected" button
|
||||
let statusCell;
|
||||
if (port.expected) {
|
||||
statusCell = '<span class="badge badge-good">Expected</span>';
|
||||
} else {
|
||||
// Show "Unexpected" badge with "Mark Expected" button if site_id and site_ip_id are available
|
||||
const canMarkExpected = site.site_id && ip.site_ip_id;
|
||||
statusCell = `<span class="badge badge-warning">Unexpected</span>`;
|
||||
if (canMarkExpected) {
|
||||
statusCell += ` <button class="btn btn-sm btn-outline-success ms-1" onclick="markPortExpected(${site.site_id}, ${ip.site_ip_id}, ${port.port}, '${port.protocol}')" title="Add to expected ports"><i class="bi bi-plus-circle"></i></button>`;
|
||||
}
|
||||
}
|
||||
|
||||
const row = document.createElement('tr');
|
||||
row.classList.add('scan-row'); // Fix white row bug
|
||||
row.innerHTML = `
|
||||
@@ -423,7 +608,7 @@
|
||||
<td>${service ? service.service_name : '-'}</td>
|
||||
<td>${service ? service.product || '-' : '-'}</td>
|
||||
<td class="mono">${service ? service.version || '-' : '-'}</td>
|
||||
<td>${port.expected ? '<span class="badge badge-good">Expected</span>' : '<span class="badge badge-warning">Unexpected</span>'}</td>
|
||||
<td>${statusCell}</td>
|
||||
<td>${screenshotPath ? `<a href="/output/${screenshotPath.replace(/^\/?(?:app\/)?output\/?/, '')}" target="_blank" class="btn btn-sm btn-outline-primary" title="View Screenshot"><i class="bi bi-image"></i></a>` : '-'}</td>
|
||||
<td>${certificate ? `<button class="btn btn-sm btn-outline-info" onclick='showCertificateModal(${JSON.stringify(certificate).replace(/'/g, "'")})' title="View Certificate"><i class="bi bi-shield-lock"></i></button>` : '-'}</td>
|
||||
`;
|
||||
@@ -532,6 +717,127 @@
|
||||
}
|
||||
}
|
||||
|
||||
// Stop scan
|
||||
async function stopScan() {
|
||||
if (!confirm(`Are you sure you want to stop scan ${scanId}?`)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const stopBtn = document.getElementById('stop-btn');
|
||||
const stopText = document.getElementById('stop-text');
|
||||
const stopSpinner = document.getElementById('stop-spinner');
|
||||
|
||||
// Show loading state
|
||||
stopBtn.disabled = true;
|
||||
stopText.style.display = 'none';
|
||||
stopSpinner.style.display = 'inline-block';
|
||||
|
||||
try {
|
||||
const response = await fetch(`/api/scans/${scanId}/stop`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
let errorMessage = `HTTP ${response.status}: Failed to stop scan`;
|
||||
try {
|
||||
const data = await response.json();
|
||||
errorMessage = data.message || errorMessage;
|
||||
} catch (e) {
|
||||
// Ignore JSON parse errors
|
||||
}
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
|
||||
// Show success message
|
||||
showAlert('success', `Stop signal sent to scan ${scanId}.`);
|
||||
|
||||
// Refresh scan data after a short delay
|
||||
setTimeout(() => {
|
||||
loadScan();
|
||||
}, 1000);
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error stopping scan:', error);
|
||||
showAlert('danger', `Failed to stop scan: ${error.message}`);
|
||||
|
||||
// Re-enable button on error
|
||||
stopBtn.disabled = false;
|
||||
stopText.style.display = 'inline';
|
||||
stopSpinner.style.display = 'none';
|
||||
}
|
||||
}
|
||||
|
||||
// Mark a port as expected in the site config
|
||||
async function markPortExpected(siteId, ipId, portNumber, protocol) {
|
||||
try {
|
||||
// First, get the current IP settings - fetch all IPs with high per_page to find the one we need
|
||||
const getResponse = await fetch(`/api/sites/${siteId}/ips?per_page=200`);
|
||||
if (!getResponse.ok) {
|
||||
throw new Error('Failed to get site IPs');
|
||||
}
|
||||
const ipsData = await getResponse.json();
|
||||
|
||||
// Find the IP in the site
|
||||
const ipData = ipsData.ips.find(ip => ip.id === ipId);
|
||||
if (!ipData) {
|
||||
throw new Error('IP not found in site');
|
||||
}
|
||||
|
||||
// Get current expected ports
|
||||
let expectedTcpPorts = ipData.expected_tcp_ports || [];
|
||||
let expectedUdpPorts = ipData.expected_udp_ports || [];
|
||||
|
||||
// Add the new port to the appropriate list
|
||||
if (protocol.toLowerCase() === 'tcp') {
|
||||
if (!expectedTcpPorts.includes(portNumber)) {
|
||||
expectedTcpPorts.push(portNumber);
|
||||
expectedTcpPorts.sort((a, b) => a - b);
|
||||
}
|
||||
} else if (protocol.toLowerCase() === 'udp') {
|
||||
if (!expectedUdpPorts.includes(portNumber)) {
|
||||
expectedUdpPorts.push(portNumber);
|
||||
expectedUdpPorts.sort((a, b) => a - b);
|
||||
}
|
||||
}
|
||||
|
||||
// Update the IP settings
|
||||
const updateResponse = await fetch(`/api/sites/${siteId}/ips/${ipId}`, {
|
||||
method: 'PUT',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
expected_tcp_ports: expectedTcpPorts,
|
||||
expected_udp_ports: expectedUdpPorts
|
||||
})
|
||||
});
|
||||
|
||||
if (!updateResponse.ok) {
|
||||
let errorMessage = 'Failed to update IP settings';
|
||||
try {
|
||||
const errorData = await updateResponse.json();
|
||||
errorMessage = errorData.message || errorMessage;
|
||||
} catch (e) {
|
||||
// Ignore JSON parse errors
|
||||
}
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
|
||||
// Show success message
|
||||
showAlert('success', `Port ${portNumber}/${protocol.toUpperCase()} added to expected ports for this IP. Refresh the page to see updated status.`);
|
||||
|
||||
// Optionally refresh the scan data to show the change
|
||||
// Note: The scan data itself won't change, but the user knows it's been updated
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error marking port as expected:', error);
|
||||
showAlert('danger', `Failed to mark port as expected: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Find previous scan and show compare button
|
||||
let previousScanId = null;
|
||||
let currentConfigId = null;
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
<option value="running">Running</option>
|
||||
<option value="completed">Completed</option>
|
||||
<option value="failed">Failed</option>
|
||||
<option value="cancelled">Cancelled</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="col-md-4">
|
||||
@@ -248,20 +249,27 @@
|
||||
statusBadge = '<span class="badge badge-info">Running</span>';
|
||||
} else if (scan.status === 'failed') {
|
||||
statusBadge = '<span class="badge badge-danger">Failed</span>';
|
||||
} else if (scan.status === 'cancelled') {
|
||||
statusBadge = '<span class="badge badge-warning">Cancelled</span>';
|
||||
} else {
|
||||
statusBadge = `<span class="badge badge-info">${scan.status}</span>`;
|
||||
}
|
||||
|
||||
// Action buttons
|
||||
let actionButtons = `<a href="/scans/${scan.id}" class="btn btn-sm btn-secondary">View</a>`;
|
||||
if (scan.status === 'running') {
|
||||
actionButtons += `<button class="btn btn-sm btn-warning ms-1" onclick="stopScan(${scan.id})">Stop</button>`;
|
||||
} else {
|
||||
actionButtons += `<button class="btn btn-sm btn-danger ms-1" onclick="deleteScan(${scan.id})">Delete</button>`;
|
||||
}
|
||||
|
||||
row.innerHTML = `
|
||||
<td class="mono">${scan.id}</td>
|
||||
<td>${scan.title || 'Untitled Scan'}</td>
|
||||
<td class="text-muted">${timestamp}</td>
|
||||
<td class="mono">${duration}</td>
|
||||
<td>${statusBadge}</td>
|
||||
<td>
|
||||
<a href="/scans/${scan.id}" class="btn btn-sm btn-secondary">View</a>
|
||||
${scan.status !== 'running' ? `<button class="btn btn-sm btn-danger ms-1" onclick="deleteScan(${scan.id})">Delete</button>` : ''}
|
||||
</td>
|
||||
<td>${actionButtons}</td>
|
||||
`;
|
||||
|
||||
tbody.appendChild(row);
|
||||
@@ -489,6 +497,33 @@
|
||||
}
|
||||
}
|
||||
|
||||
// Stop scan
|
||||
async function stopScan(scanId) {
|
||||
if (!confirm(`Are you sure you want to stop scan ${scanId}?`)) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(`/api/scans/${scanId}/stop`, {
|
||||
method: 'POST'
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const data = await response.json();
|
||||
throw new Error(data.message || 'Failed to stop scan');
|
||||
}
|
||||
|
||||
// Show success message
|
||||
showAlert('success', `Stop signal sent to scan ${scanId}.`);
|
||||
|
||||
// Refresh scans after a short delay
|
||||
setTimeout(() => loadScans(), 1000);
|
||||
} catch (error) {
|
||||
console.error('Error stopping scan:', error);
|
||||
showAlert('danger', `Failed to stop scan: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Delete scan
|
||||
async function deleteScan(scanId) {
|
||||
if (!confirm(`Are you sure you want to delete scan ${scanId}?`)) {
|
||||
|
||||
@@ -298,7 +298,11 @@ async function loadSchedule() {
|
||||
function populateForm(schedule) {
|
||||
document.getElementById('schedule-id').value = schedule.id;
|
||||
document.getElementById('schedule-name').value = schedule.name;
|
||||
document.getElementById('config-id').value = schedule.config_id;
|
||||
// Display config name and ID in the readonly config-file field
|
||||
const configDisplay = schedule.config_name
|
||||
? `${schedule.config_name} (ID: ${schedule.config_id})`
|
||||
: `Config ID: ${schedule.config_id}`;
|
||||
document.getElementById('config-file').value = configDisplay;
|
||||
document.getElementById('cron-expression').value = schedule.cron_expression;
|
||||
document.getElementById('schedule-enabled').checked = schedule.enabled;
|
||||
|
||||
|
||||
@@ -26,8 +26,11 @@
|
||||
</div>
|
||||
<div class="col-md-4">
|
||||
<div class="stat-card">
|
||||
<div class="stat-value" id="total-ips">-</div>
|
||||
<div class="stat-label">Total IPs</div>
|
||||
<div class="stat-value" id="unique-ips">-</div>
|
||||
<div class="stat-label">Unique IPs</div>
|
||||
<div class="stat-sublabel" id="duplicate-ips-label" style="display: none; font-size: 0.75rem; color: #fbbf24;">
|
||||
(<span id="duplicate-ips">0</span> duplicates)
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-4">
|
||||
@@ -499,7 +502,7 @@ async function loadSites() {
|
||||
const data = await response.json();
|
||||
sitesData = data.sites || [];
|
||||
|
||||
updateStats();
|
||||
updateStats(data.unique_ips, data.duplicate_ips);
|
||||
renderSites(sitesData);
|
||||
|
||||
document.getElementById('sites-loading').style.display = 'none';
|
||||
@@ -514,12 +517,20 @@ async function loadSites() {
|
||||
}
|
||||
|
||||
// Update summary stats
|
||||
function updateStats() {
|
||||
function updateStats(uniqueIps, duplicateIps) {
|
||||
const totalSites = sitesData.length;
|
||||
const totalIps = sitesData.reduce((sum, site) => sum + (site.ip_count || 0), 0);
|
||||
|
||||
document.getElementById('total-sites').textContent = totalSites;
|
||||
document.getElementById('total-ips').textContent = totalIps;
|
||||
document.getElementById('unique-ips').textContent = uniqueIps || 0;
|
||||
|
||||
// Show duplicate count if there are any
|
||||
if (duplicateIps && duplicateIps > 0) {
|
||||
document.getElementById('duplicate-ips').textContent = duplicateIps;
|
||||
document.getElementById('duplicate-ips-label').style.display = 'block';
|
||||
} else {
|
||||
document.getElementById('duplicate-ips-label').style.display = 'none';
|
||||
}
|
||||
|
||||
document.getElementById('sites-in-use').textContent = '-'; // Will be updated async
|
||||
|
||||
// Count sites in use (async)
|
||||
@@ -688,6 +699,18 @@ async function loadSiteIps(siteId) {
|
||||
const data = await response.json();
|
||||
const ips = data.ips || [];
|
||||
|
||||
// Sort IPs by numeric octets
|
||||
ips.sort((a, b) => {
|
||||
const partsA = a.ip_address.split('.').map(Number);
|
||||
const partsB = b.ip_address.split('.').map(Number);
|
||||
for (let i = 0; i < 4; i++) {
|
||||
if (partsA[i] !== partsB[i]) {
|
||||
return partsA[i] - partsB[i];
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
});
|
||||
|
||||
document.getElementById('ip-count').textContent = data.total || ips.length;
|
||||
|
||||
// Render flat IP table
|
||||
|
||||
@@ -23,7 +23,7 @@ def validate_scan_status(status: str) -> tuple[bool, Optional[str]]:
|
||||
>>> validate_scan_status('invalid')
|
||||
(False, 'Invalid status: invalid. Must be one of: running, completed, failed')
|
||||
"""
|
||||
valid_statuses = ['running', 'completed', 'failed']
|
||||
valid_statuses = ['running', 'finalizing', 'completed', 'failed', 'cancelled']
|
||||
|
||||
if status not in valid_statuses:
|
||||
return False, f'Invalid status: {status}. Must be one of: {", ".join(valid_statuses)}'
|
||||
|
||||
@@ -2,12 +2,10 @@ version: '3.8'
|
||||
|
||||
services:
|
||||
web:
|
||||
build: .
|
||||
image: sneakyscanner:latest
|
||||
image: sneakyscan
|
||||
container_name: sneakyscanner-web
|
||||
# Use entrypoint script that auto-initializes database on first run
|
||||
entrypoint: ["/docker-entrypoint.sh"]
|
||||
command: ["python3", "-u", "-m", "web.app"]
|
||||
working_dir: /app
|
||||
entrypoint: ["python3", "-u", "-m", "web.app"]
|
||||
# Note: Using host network mode for scanner capabilities, so no port mapping needed
|
||||
# The Flask app will be accessible at http://localhost:5000
|
||||
volumes:
|
||||
@@ -41,6 +39,9 @@ services:
|
||||
# Scheduler configuration (APScheduler)
|
||||
- SCHEDULER_EXECUTORS=${SCHEDULER_EXECUTORS:-2}
|
||||
- SCHEDULER_JOB_DEFAULTS_MAX_INSTANCES=${SCHEDULER_JOB_DEFAULTS_MAX_INSTANCES:-3}
|
||||
# UDP scanning configuration
|
||||
- UDP_SCAN_ENABLED=${UDP_SCAN_ENABLED:-false}
|
||||
- UDP_PORTS=${UDP_PORTS:-53,67,68,69,123,161,500,514,1900}
|
||||
# Scanner functionality requires privileged mode and host network for masscan/nmap
|
||||
privileged: true
|
||||
network_mode: host
|
||||
@@ -56,8 +57,7 @@ services:
|
||||
# Optional: Initialize database on first run
|
||||
# Run with: docker-compose -f docker-compose-web.yml run --rm init-db
|
||||
init-db:
|
||||
build: .
|
||||
image: sneakyscanner:latest
|
||||
image: sneakyscan
|
||||
container_name: sneakyscanner-init-db
|
||||
entrypoint: ["python3"]
|
||||
command: ["init_db.py", "--db-url", "sqlite:////app/data/sneakyscanner.db"]
|
||||
@@ -65,3 +65,4 @@ services:
|
||||
- ./data:/app/data
|
||||
profiles:
|
||||
- tools
|
||||
networks: []
|
||||
|
||||
@@ -117,7 +117,7 @@ Retrieve a paginated list of all sites.
|
||||
| `per_page` | integer | No | 20 | Items per page (1-100) |
|
||||
| `all` | string | No | - | Set to "true" to return all sites without pagination |
|
||||
|
||||
**Success Response (200 OK):**
|
||||
**Success Response (200 OK) - Paginated:**
|
||||
```json
|
||||
{
|
||||
"sites": [
|
||||
@@ -139,13 +139,40 @@ Retrieve a paginated list of all sites.
|
||||
}
|
||||
```
|
||||
|
||||
**Success Response (200 OK) - All Sites (all=true):**
|
||||
```json
|
||||
{
|
||||
"sites": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Production DC",
|
||||
"description": "Production datacenter servers",
|
||||
"ip_count": 25,
|
||||
"created_at": "2025-11-19T10:30:00Z",
|
||||
"updated_at": "2025-11-19T10:30:00Z"
|
||||
}
|
||||
],
|
||||
"total_ips": 100,
|
||||
"unique_ips": 85,
|
||||
"duplicate_ips": 15
|
||||
}
|
||||
```
|
||||
|
||||
**Response Fields (all=true):**
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `total_ips` | integer | Total count of IP entries across all sites (including duplicates) |
|
||||
| `unique_ips` | integer | Count of distinct IP addresses |
|
||||
| `duplicate_ips` | integer | Number of duplicate IP entries (total_ips - unique_ips) |
|
||||
|
||||
**Usage Example:**
|
||||
```bash
|
||||
# List first page
|
||||
curl -X GET http://localhost:5000/api/sites \
|
||||
-b cookies.txt
|
||||
|
||||
# Get all sites (for dropdowns)
|
||||
# Get all sites with global IP stats
|
||||
curl -X GET "http://localhost:5000/api/sites?all=true" \
|
||||
-b cookies.txt
|
||||
```
|
||||
@@ -989,6 +1016,56 @@ curl -X DELETE http://localhost:5000/api/scans/42 \
|
||||
-b cookies.txt
|
||||
```
|
||||
|
||||
### Get Scans by IP
|
||||
|
||||
Get the last 10 scans containing a specific IP address.
|
||||
|
||||
**Endpoint:** `GET /api/scans/by-ip/{ip_address}`
|
||||
|
||||
**Authentication:** Required
|
||||
|
||||
**Path Parameters:**
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `ip_address` | string | Yes | IP address to search for |
|
||||
|
||||
**Success Response (200 OK):**
|
||||
```json
|
||||
{
|
||||
"ip_address": "192.168.1.10",
|
||||
"scans": [
|
||||
{
|
||||
"id": 42,
|
||||
"timestamp": "2025-11-14T10:30:00Z",
|
||||
"duration": 125.5,
|
||||
"status": "completed",
|
||||
"title": "Production Network Scan",
|
||||
"config_id": 1,
|
||||
"triggered_by": "manual",
|
||||
"created_at": "2025-11-14T10:30:00Z"
|
||||
},
|
||||
{
|
||||
"id": 38,
|
||||
"timestamp": "2025-11-13T10:30:00Z",
|
||||
"duration": 98.2,
|
||||
"status": "completed",
|
||||
"title": "Production Network Scan",
|
||||
"config_id": 1,
|
||||
"triggered_by": "scheduled",
|
||||
"created_at": "2025-11-13T10:30:00Z"
|
||||
}
|
||||
],
|
||||
"count": 2
|
||||
}
|
||||
```
|
||||
|
||||
**Usage Example:**
|
||||
```bash
|
||||
curl -X GET http://localhost:5000/api/scans/by-ip/192.168.1.10 \
|
||||
-b cookies.txt
|
||||
```
|
||||
|
||||
### Compare Scans
|
||||
|
||||
Compare two scans to identify differences in ports, services, and certificates.
|
||||
|
||||
@@ -24,10 +24,10 @@ SneakyScanner is deployed as a Docker container running a Flask web application
|
||||
|
||||
**Architecture:**
|
||||
- **Web Application**: Flask app on port 5000 with modern web UI
|
||||
- **Database**: SQLite (persisted to volume)
|
||||
- **Database**: SQLite (persisted to volume) - stores all configurations, scan results, and settings
|
||||
- **Background Jobs**: APScheduler for async scan execution
|
||||
- **Scanner**: masscan, nmap, sslyze, Playwright
|
||||
- **Config Creator**: Web-based CIDR-to-YAML configuration builder
|
||||
- **Config Management**: Database-backed configuration system managed entirely via web UI
|
||||
- **Scheduling**: Cron-based scheduled scans with dashboard management
|
||||
|
||||
---
|
||||
@@ -143,6 +143,13 @@ docker compose -f docker-compose-standalone.yml up
|
||||
|
||||
SneakyScanner is configured via environment variables. The recommended approach is to use a `.env` file.
|
||||
|
||||
|
||||
**UDP Port Scanning**
|
||||
|
||||
- UDP Port scanning is disabled by default.
|
||||
- You can turn it on via the .env variable.
|
||||
- By Default, UDP port scanning only scans the top 20 ports, for convenience I have included the NMAP top 100 UDP ports as well.
|
||||
|
||||
#### Creating Your .env File
|
||||
|
||||
```bash
|
||||
@@ -160,6 +167,7 @@ python3 -c "from cryptography.fernet import Fernet; print('SNEAKYSCANNER_ENCRYPT
|
||||
nano .env
|
||||
```
|
||||
|
||||
|
||||
#### Key Configuration Options
|
||||
|
||||
| Variable | Description | Default | Required |
|
||||
@@ -190,54 +198,30 @@ The application needs these directories (created automatically by Docker):
|
||||
|
||||
```bash
|
||||
# Verify directories exist
|
||||
ls -la configs/ data/ output/ logs/
|
||||
ls -la data/ output/ logs/
|
||||
|
||||
# If missing, create them
|
||||
mkdir -p configs data output logs
|
||||
mkdir -p data output logs
|
||||
```
|
||||
|
||||
### Step 2: Configure Scan Targets
|
||||
|
||||
You can create scan configurations in two ways:
|
||||
After starting the application, create scan configurations using the web UI:
|
||||
|
||||
**Option A: Using the Web UI (Recommended - Phase 4 Feature)**
|
||||
**Creating Configurations via Web UI**
|
||||
|
||||
1. Navigate to **Configs** in the web interface
|
||||
2. Click **"Create New Config"**
|
||||
3. Use the CIDR-based config creator for quick setup:
|
||||
3. Use the form-based config creator:
|
||||
- Enter site name
|
||||
- Enter CIDR range (e.g., `192.168.1.0/24`)
|
||||
- Select expected ports from dropdowns
|
||||
- Click **"Generate Config"**
|
||||
4. Or use the **YAML Editor** for advanced configurations
|
||||
5. Save and use immediately in scans or schedules
|
||||
- Select expected TCP/UDP ports from dropdowns
|
||||
- Optionally enable ping checks
|
||||
4. Click **"Save Configuration"**
|
||||
5. Configuration is saved to database and immediately available for scans and schedules
|
||||
|
||||
**Option B: Manual YAML Files**
|
||||
**Note**: All configurations are stored in the database, not as files. This provides better reliability, easier backup, and seamless management through the web interface.
|
||||
|
||||
Create YAML configuration files manually in the `configs/` directory:
|
||||
|
||||
```bash
|
||||
# Example configuration
|
||||
cat > configs/my-network.yaml <<EOF
|
||||
title: "My Network Infrastructure"
|
||||
sites:
|
||||
- name: "Web Servers"
|
||||
cidr: "192.168.1.0/24" # Scan entire subnet
|
||||
expected_ports:
|
||||
- port: 80
|
||||
protocol: tcp
|
||||
service: "http"
|
||||
- port: 443
|
||||
protocol: tcp
|
||||
service: "https"
|
||||
- port: 22
|
||||
protocol: tcp
|
||||
service: "ssh"
|
||||
ping_expected: true
|
||||
EOF
|
||||
```
|
||||
|
||||
**Note**: Phase 4 introduced a powerful config creator in the web UI that makes it easy to generate configs from CIDR ranges without manually editing YAML.
|
||||
|
||||
### Step 3: Build Docker Image
|
||||
|
||||
@@ -389,38 +373,37 @@ The dashboard provides a central view of your scanning activity:
|
||||
- **Trend Charts**: Port count trends over time using Chart.js
|
||||
- **Quick Actions**: Buttons to run scans, create configs, manage schedules
|
||||
|
||||
### Managing Scan Configurations (Phase 4)
|
||||
### Managing Scan Configurations
|
||||
|
||||
All scan configurations are stored in the database and managed entirely through the web interface.
|
||||
|
||||
**Creating Configs:**
|
||||
1. Navigate to **Configs** → **Create New Config**
|
||||
2. **CIDR Creator Mode**:
|
||||
2. Fill in the configuration form:
|
||||
- Enter site name (e.g., "Production Servers")
|
||||
- Enter CIDR range (e.g., `192.168.1.0/24`)
|
||||
- Select expected TCP/UDP ports from dropdowns
|
||||
- Click **"Generate Config"** to create YAML
|
||||
3. **YAML Editor Mode**:
|
||||
- Switch to editor tab for advanced configurations
|
||||
- Syntax highlighting with line numbers
|
||||
- Validate YAML before saving
|
||||
- Enable/disable ping checks
|
||||
3. Click **"Save Configuration"**
|
||||
4. Configuration is immediately stored in database and available for use
|
||||
|
||||
**Editing Configs:**
|
||||
1. Navigate to **Configs** → Select config
|
||||
1. Navigate to **Configs** → Select config from list
|
||||
2. Click **"Edit"** button
|
||||
3. Make changes in YAML editor
|
||||
4. Save changes (validates YAML automatically)
|
||||
3. Modify any fields in the configuration form
|
||||
4. Click **"Save Changes"** to update database
|
||||
|
||||
**Uploading Configs:**
|
||||
1. Navigate to **Configs** → **Upload**
|
||||
2. Select YAML file from your computer
|
||||
3. File is validated and saved to `configs/` directory
|
||||
|
||||
**Downloading Configs:**
|
||||
- Click **"Download"** button next to any config
|
||||
- Saves YAML file to your local machine
|
||||
**Viewing Configs:**
|
||||
- Navigate to **Configs** page to see all saved configurations
|
||||
- Each config shows site name, CIDR range, and expected ports
|
||||
- Click on any config to view full details
|
||||
|
||||
**Deleting Configs:**
|
||||
- Click **"Delete"** button
|
||||
- Click **"Delete"** button next to any config
|
||||
- **Warning**: Cannot delete configs used by active schedules
|
||||
- Deletion removes the configuration from the database permanently
|
||||
|
||||
**Note**: All configurations are database-backed, providing automatic backups when you backup the database file.
|
||||
|
||||
### Running Scans
|
||||
|
||||
@@ -477,12 +460,11 @@ SneakyScanner uses several mounted volumes for data persistence:
|
||||
|
||||
| Volume | Container Path | Purpose | Important? |
|
||||
|--------|----------------|---------|------------|
|
||||
| `./configs` | `/app/configs` | Scan configuration files (managed via web UI) | Yes |
|
||||
| `./data` | `/app/data` | SQLite database (contains all scan history) | **Critical** |
|
||||
| `./data` | `/app/data` | SQLite database (contains configurations, scan history, settings) | **Critical** |
|
||||
| `./output` | `/app/output` | Scan results (JSON, HTML, ZIP, screenshots) | Yes |
|
||||
| `./logs` | `/app/logs` | Application logs (rotating file handler) | No |
|
||||
|
||||
**Note**: As of Phase 4, the `./configs` volume is read-write to support the web-based config creator and editor. The web UI can now create, edit, and delete configuration files directly.
|
||||
**Note**: All scan configurations are stored in the SQLite database (`./data/sneakyscanner.db`). There is no separate configs directory or YAML files. Backing up the database file ensures all your configurations are preserved.
|
||||
|
||||
### Backing Up Data
|
||||
|
||||
@@ -490,23 +472,22 @@ SneakyScanner uses several mounted volumes for data persistence:
|
||||
# Create backup directory
|
||||
mkdir -p backups/$(date +%Y%m%d)
|
||||
|
||||
# Backup database
|
||||
# Backup database (includes all configurations)
|
||||
cp data/sneakyscanner.db backups/$(date +%Y%m%d)/
|
||||
|
||||
# Backup scan outputs
|
||||
tar -czf backups/$(date +%Y%m%d)/output.tar.gz output/
|
||||
|
||||
# Backup configurations
|
||||
tar -czf backups/$(date +%Y%m%d)/configs.tar.gz configs/
|
||||
```
|
||||
|
||||
**Important**: The database backup includes all scan configurations, settings, schedules, and scan history. No separate configuration file backup is needed.
|
||||
|
||||
### Restoring Data
|
||||
|
||||
```bash
|
||||
# Stop application
|
||||
docker compose -f docker-compose.yml down
|
||||
|
||||
# Restore database
|
||||
# Restore database (includes all configurations)
|
||||
cp backups/YYYYMMDD/sneakyscanner.db data/
|
||||
|
||||
# Restore outputs
|
||||
@@ -516,6 +497,8 @@ tar -xzf backups/YYYYMMDD/output.tar.gz
|
||||
docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
**Note**: Restoring the database file restores all configurations, settings, schedules, and scan history.
|
||||
|
||||
### Cleaning Up Old Scan Results
|
||||
|
||||
**Option A: Using the Web UI (Recommended)**
|
||||
@@ -564,50 +547,52 @@ curl -X POST http://localhost:5000/api/auth/logout \
|
||||
-b cookies.txt
|
||||
```
|
||||
|
||||
### Config Management (Phase 4)
|
||||
### Config Management
|
||||
|
||||
```bash
|
||||
# List all configs
|
||||
curl http://localhost:5000/api/configs \
|
||||
-b cookies.txt
|
||||
|
||||
# Get specific config
|
||||
curl http://localhost:5000/api/configs/prod-network.yaml \
|
||||
# Get specific config by ID
|
||||
curl http://localhost:5000/api/configs/1 \
|
||||
-b cookies.txt
|
||||
|
||||
# Create new config
|
||||
curl -X POST http://localhost:5000/api/configs \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"filename": "test-network.yaml",
|
||||
"content": "title: Test Network\nsites:\n - name: Test\n cidr: 10.0.0.0/24"
|
||||
"name": "Test Network",
|
||||
"cidr": "10.0.0.0/24",
|
||||
"expected_ports": [
|
||||
{"port": 80, "protocol": "tcp", "service": "http"},
|
||||
{"port": 443, "protocol": "tcp", "service": "https"}
|
||||
],
|
||||
"ping_expected": true
|
||||
}' \
|
||||
-b cookies.txt
|
||||
|
||||
# Update config
|
||||
curl -X PUT http://localhost:5000/api/configs/test-network.yaml \
|
||||
curl -X PUT http://localhost:5000/api/configs/1 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"content": "title: Updated Test Network\nsites:\n - name: Test Site\n cidr: 10.0.0.0/24"
|
||||
"name": "Updated Test Network",
|
||||
"cidr": "10.0.1.0/24"
|
||||
}' \
|
||||
-b cookies.txt
|
||||
|
||||
# Download config
|
||||
curl http://localhost:5000/api/configs/test-network.yaml/download \
|
||||
-b cookies.txt -o test-network.yaml
|
||||
|
||||
# Delete config
|
||||
curl -X DELETE http://localhost:5000/api/configs/test-network.yaml \
|
||||
curl -X DELETE http://localhost:5000/api/configs/1 \
|
||||
-b cookies.txt
|
||||
```
|
||||
|
||||
### Scan Management
|
||||
|
||||
```bash
|
||||
# Trigger a scan
|
||||
# Trigger a scan (using config ID from database)
|
||||
curl -X POST http://localhost:5000/api/scans \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"config_id": "/app/configs/prod-network.yaml"}' \
|
||||
-d '{"config_id": 1}' \
|
||||
-b cookies.txt
|
||||
|
||||
# List all scans
|
||||
@@ -634,12 +619,12 @@ curl -X DELETE http://localhost:5000/api/scans/123 \
|
||||
curl http://localhost:5000/api/schedules \
|
||||
-b cookies.txt
|
||||
|
||||
# Create schedule
|
||||
# Create schedule (using config ID from database)
|
||||
curl -X POST http://localhost:5000/api/schedules \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "Daily Production Scan",
|
||||
"config_id": "/app/configs/prod-network.yaml",
|
||||
"config_id": 1,
|
||||
"cron_expression": "0 2 * * *",
|
||||
"enabled": true
|
||||
}' \
|
||||
@@ -875,24 +860,25 @@ docker compose -f docker-compose.yml logs web | grep -E "(ERROR|Exception|Traceb
|
||||
docker compose -f docker-compose.yml exec web which masscan nmap
|
||||
```
|
||||
|
||||
### Config Files Not Appearing in Web UI
|
||||
### Configs Not Appearing in Web UI
|
||||
|
||||
**Problem**: Manually created configs don't show up in web interface
|
||||
**Problem**: Created configs don't show up in web interface
|
||||
|
||||
```bash
|
||||
# Check file permissions (must be readable by web container)
|
||||
ls -la configs/
|
||||
# Check database connectivity
|
||||
docker compose -f docker-compose.yml logs web | grep -i "database"
|
||||
|
||||
# Fix permissions if needed
|
||||
sudo chown -R 1000:1000 configs/
|
||||
chmod 644 configs/*.yaml
|
||||
# Verify database file exists and is readable
|
||||
ls -lh data/sneakyscanner.db
|
||||
|
||||
# Verify YAML syntax is valid
|
||||
docker compose -f docker-compose.yml exec web python3 -c \
|
||||
"import yaml; yaml.safe_load(open('/app/configs/your-config.yaml'))"
|
||||
|
||||
# Check web logs for parsing errors
|
||||
# Check for errors when creating configs
|
||||
docker compose -f docker-compose.yml logs web | grep -i "config"
|
||||
|
||||
# Try accessing configs via API
|
||||
curl http://localhost:5000/api/configs -b cookies.txt
|
||||
|
||||
# If database is corrupted, check integrity
|
||||
docker compose -f docker-compose.yml exec web sqlite3 /app/data/sneakyscanner.db "PRAGMA integrity_check;"
|
||||
```
|
||||
|
||||
### Health Check Failing
|
||||
@@ -979,11 +965,11 @@ server {
|
||||
# Ensure proper ownership of data directories
|
||||
sudo chown -R $USER:$USER data/ output/ logs/
|
||||
|
||||
# Restrict database file permissions
|
||||
# Restrict database file permissions (contains configurations and sensitive data)
|
||||
chmod 600 data/sneakyscanner.db
|
||||
|
||||
# Configs should be read-only
|
||||
chmod 444 configs/*.yaml
|
||||
# Ensure database directory is writable
|
||||
chmod 700 data/
|
||||
```
|
||||
|
||||
---
|
||||
@@ -1051,19 +1037,17 @@ mkdir -p "$BACKUP_DIR"
|
||||
# Stop application for consistent backup
|
||||
docker compose -f docker-compose.yml stop web
|
||||
|
||||
# Backup database
|
||||
# Backup database (includes all configurations)
|
||||
cp data/sneakyscanner.db "$BACKUP_DIR/"
|
||||
|
||||
# Backup outputs (last 30 days only)
|
||||
find output/ -type f -mtime -30 -exec cp --parents {} "$BACKUP_DIR/" \;
|
||||
|
||||
# Backup configs
|
||||
cp -r configs/ "$BACKUP_DIR/"
|
||||
|
||||
# Restart application
|
||||
docker compose -f docker-compose.yml start web
|
||||
|
||||
echo "Backup complete: $BACKUP_DIR"
|
||||
echo "Database backup includes all configurations, settings, and scan history"
|
||||
```
|
||||
|
||||
Make executable and schedule with cron:
|
||||
@@ -1083,15 +1067,18 @@ crontab -e
|
||||
# Stop application
|
||||
docker compose -f docker-compose.yml down
|
||||
|
||||
# Restore files
|
||||
# Restore database (includes all configurations)
|
||||
cp backups/YYYYMMDD_HHMMSS/sneakyscanner.db data/
|
||||
cp -r backups/YYYYMMDD_HHMMSS/configs/* configs/
|
||||
|
||||
# Restore output files
|
||||
cp -r backups/YYYYMMDD_HHMMSS/output/* output/
|
||||
|
||||
# Start application
|
||||
docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
**Note**: Restoring the database file will restore all configurations, settings, schedules, and scan history from the backup.
|
||||
|
||||
---
|
||||
|
||||
## Support and Further Reading
|
||||
@@ -1105,13 +1092,13 @@ docker compose -f docker-compose.yml up -d
|
||||
|
||||
## What's New
|
||||
|
||||
### Phase 4 (2025-11-17) - Config Creator ✅
|
||||
- **CIDR-based Config Creator**: Web UI for generating scan configs from CIDR ranges
|
||||
- **YAML Editor**: Built-in editor with syntax highlighting (CodeMirror)
|
||||
- **Config Management UI**: List, view, edit, download, and delete configs via web interface
|
||||
- **Config Upload**: Direct YAML file upload for advanced users
|
||||
- **REST API**: 7 new config management endpoints
|
||||
### Phase 4+ (2025-11-17) - Database-Backed Configuration System ✅
|
||||
- **Database-Backed Configs**: All configurations stored in SQLite database (no YAML files)
|
||||
- **Web-Based Config Creator**: Form-based UI for creating scan configs from CIDR ranges
|
||||
- **Config Management UI**: List, view, edit, and delete configs via web interface
|
||||
- **REST API**: Full config management via RESTful API with database storage
|
||||
- **Schedule Protection**: Prevents deleting configs used by active schedules
|
||||
- **Automatic Backups**: Configurations included in database backups
|
||||
|
||||
### Phase 3 (2025-11-14) - Dashboard & Scheduling ✅
|
||||
- **Dashboard**: Summary stats, recent scans, trend charts
|
||||
@@ -1133,5 +1120,5 @@ docker compose -f docker-compose.yml up -d
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: 2025-11-17
|
||||
**Version**: Phase 4 - Config Creator Complete
|
||||
**Last Updated**: 2025-11-24
|
||||
**Version**: Phase 4+ - Database-Backed Configuration System
|
||||
|
||||
0
docs/KNOWN_ISSUES.md
Normal file
0
docs/KNOWN_ISSUES.md
Normal file
700
docs/ROADMAP.md
700
docs/ROADMAP.md
@@ -4,677 +4,115 @@
|
||||
|
||||
SneakyScanner is a comprehensive **Flask web application** for infrastructure monitoring and security auditing. The primary interface is the web GUI, with a CLI API client planned for scripting and automation needs.
|
||||
|
||||
**Current Phase:** Phase 5 Complete ✅ | Phase 6 Next Up
|
||||
## Version 1.0.0 - Complete ✅
|
||||
|
||||
## Progress Overview
|
||||
|
||||
**Note:** For detailed architecture and technology stack information, see [README.md](../README.md)
|
||||
|
||||
- ✅ **Phase 1: Foundation** - Complete (2025-11-13)
|
||||
- Database schema, SQLAlchemy models, settings system, Flask app structure
|
||||
- ✅ **Phase 2: Flask Web App Core** - Complete (2025-11-14)
|
||||
- REST API, background jobs, authentication, web UI, testing (100 tests)
|
||||
- ✅ **Phase 3: Dashboard & Scheduling** - Complete (2025-11-14)
|
||||
- Dashboard, scan history, scheduled scans, trend charts
|
||||
- ✅ **Phase 4: Config Creator** - Complete (2025-11-17)
|
||||
- CIDR-based config creation, YAML editor, config management UI
|
||||
- ✅ **Phase 5: Webhooks & Alerting** - Complete (2025-11-19)
|
||||
- Webhook notifications, alert rules, notification templates
|
||||
- 📋 **Phase 6: CLI as API Client** - Planned
|
||||
- CLI for scripting and automation via API
|
||||
- 📋 **Phase 7: Advanced Features** - Future
|
||||
- Email notifications, scan comparison, CVE integration, timeline view, PDF export
|
||||
|
||||
|
||||
## Target Users
|
||||
|
||||
- **Infrastructure teams** monitoring on-premises networks
|
||||
- **Security professionals** performing periodic security audits
|
||||
- **DevOps engineers** tracking infrastructure drift
|
||||
- **Single users or small teams** (not enterprise multi-tenant)
|
||||
|
||||
## Database Schema Design
|
||||
|
||||
### Core Tables
|
||||
|
||||
#### `scans`
|
||||
Stores metadata about each scan execution.
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | INTEGER PRIMARY KEY | Unique scan ID |
|
||||
| `timestamp` | DATETIME | Scan start time (UTC) |
|
||||
| `duration` | FLOAT | Total scan duration (seconds) |
|
||||
| `status` | VARCHAR(20) | `running`, `completed`, `failed` |
|
||||
| `config_id` | INTEGER | FK to scan_configs table |
|
||||
| `title` | TEXT | Scan title from config |
|
||||
| `json_path` | TEXT | Path to JSON report |
|
||||
| `html_path` | TEXT | Path to HTML report |
|
||||
| `zip_path` | TEXT | Path to ZIP archive |
|
||||
| `screenshot_dir` | TEXT | Path to screenshot directory |
|
||||
| `created_at` | DATETIME | Record creation time |
|
||||
| `triggered_by` | VARCHAR(50) | `manual`, `scheduled`, `api` |
|
||||
| `schedule_id` | INTEGER | FK to schedules (if triggered by schedule) |
|
||||
|
||||
#### `scan_sites`
|
||||
Logical grouping of IPs by site.
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | INTEGER PRIMARY KEY | Unique site record ID |
|
||||
| `scan_id` | INTEGER | FK to scans |
|
||||
| `site_name` | VARCHAR(255) | Site name from config |
|
||||
|
||||
#### `scan_ips`
|
||||
IP addresses scanned in each scan.
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | INTEGER PRIMARY KEY | Unique IP record ID |
|
||||
| `scan_id` | INTEGER | FK to scans |
|
||||
| `site_id` | INTEGER | FK to scan_sites |
|
||||
| `ip_address` | VARCHAR(45) | IPv4 or IPv6 address |
|
||||
| `ping_expected` | BOOLEAN | Expected ping response |
|
||||
| `ping_actual` | BOOLEAN | Actual ping response |
|
||||
|
||||
#### `scan_ports`
|
||||
Discovered TCP/UDP ports.
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | INTEGER PRIMARY KEY | Unique port record ID |
|
||||
| `scan_id` | INTEGER | FK to scans |
|
||||
| `ip_id` | INTEGER | FK to scan_ips |
|
||||
| `port` | INTEGER | Port number (1-65535) |
|
||||
| `protocol` | VARCHAR(10) | `tcp` or `udp` |
|
||||
| `expected` | BOOLEAN | Was this port expected? |
|
||||
| `state` | VARCHAR(20) | `open`, `closed`, `filtered` |
|
||||
|
||||
#### `scan_services`
|
||||
Detected services on open ports.
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | INTEGER PRIMARY KEY | Unique service record ID |
|
||||
| `scan_id` | INTEGER | FK to scans |
|
||||
| `port_id` | INTEGER | FK to scan_ports |
|
||||
| `service_name` | VARCHAR(100) | Service name (e.g., `ssh`, `http`) |
|
||||
| `product` | VARCHAR(255) | Product name (e.g., `OpenSSH`) |
|
||||
| `version` | VARCHAR(100) | Version string |
|
||||
| `extrainfo` | TEXT | Additional nmap info |
|
||||
| `ostype` | VARCHAR(100) | OS type if detected |
|
||||
| `http_protocol` | VARCHAR(10) | `http` or `https` (if web service) |
|
||||
| `screenshot_path` | TEXT | Relative path to screenshot |
|
||||
|
||||
#### `scan_certificates`
|
||||
SSL/TLS certificates discovered on HTTPS services.
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | INTEGER PRIMARY KEY | Unique certificate record ID |
|
||||
| `scan_id` | INTEGER | FK to scans |
|
||||
| `service_id` | INTEGER | FK to scan_services |
|
||||
| `subject` | TEXT | Certificate subject (CN) |
|
||||
| `issuer` | TEXT | Certificate issuer |
|
||||
| `serial_number` | TEXT | Serial number |
|
||||
| `not_valid_before` | DATETIME | Validity start date |
|
||||
| `not_valid_after` | DATETIME | Validity end date |
|
||||
| `days_until_expiry` | INTEGER | Days until expiration |
|
||||
| `sans` | TEXT | JSON array of SANs |
|
||||
| `is_self_signed` | BOOLEAN | Self-signed certificate flag |
|
||||
|
||||
#### `scan_tls_versions`
|
||||
TLS version support and cipher suites.
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | INTEGER PRIMARY KEY | Unique TLS version record ID |
|
||||
| `scan_id` | INTEGER | FK to scans |
|
||||
| `certificate_id` | INTEGER | FK to scan_certificates |
|
||||
| `tls_version` | VARCHAR(20) | `TLS 1.0`, `TLS 1.1`, `TLS 1.2`, `TLS 1.3` |
|
||||
| `supported` | BOOLEAN | Is this version supported? |
|
||||
| `cipher_suites` | TEXT | JSON array of cipher suites |
|
||||
|
||||
### Scheduling & Notifications Tables
|
||||
|
||||
#### `schedules`
|
||||
Scheduled scan configurations.
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | INTEGER PRIMARY KEY | Unique schedule ID |
|
||||
| `name` | VARCHAR(255) | Schedule name (e.g., "Daily prod scan") |
|
||||
| `config_id` | INTEGER | FK to scan_configs table |
|
||||
| `cron_expression` | VARCHAR(100) | Cron-like schedule (e.g., `0 2 * * *`) |
|
||||
| `enabled` | BOOLEAN | Is schedule active? |
|
||||
| `last_run` | DATETIME | Last execution time |
|
||||
| `next_run` | DATETIME | Next scheduled execution |
|
||||
| `created_at` | DATETIME | Schedule creation time |
|
||||
| `updated_at` | DATETIME | Last modification time |
|
||||
|
||||
#### `alerts`
|
||||
Alert history and notifications sent.
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | INTEGER PRIMARY KEY | Unique alert ID |
|
||||
| `scan_id` | INTEGER | FK to scans |
|
||||
| `alert_type` | VARCHAR(50) | `new_port`, `cert_expiry`, `service_change`, `ping_failed` |
|
||||
| `severity` | VARCHAR(20) | `info`, `warning`, `critical` |
|
||||
| `message` | TEXT | Human-readable alert message |
|
||||
| `ip_address` | VARCHAR(45) | Related IP (optional) |
|
||||
| `port` | INTEGER | Related port (optional) |
|
||||
| `email_sent` | BOOLEAN | Was email notification sent? |
|
||||
| `email_sent_at` | DATETIME | Email send timestamp |
|
||||
| `created_at` | DATETIME | Alert creation time |
|
||||
|
||||
#### `alert_rules`
|
||||
User-defined alert rules.
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | INTEGER PRIMARY KEY | Unique rule ID |
|
||||
| `rule_type` | VARCHAR(50) | `unexpected_port`, `cert_expiry`, `service_down`, etc. |
|
||||
| `enabled` | BOOLEAN | Is rule active? |
|
||||
| `threshold` | INTEGER | Threshold value (e.g., days for cert expiry) |
|
||||
| `email_enabled` | BOOLEAN | Send email for this rule? |
|
||||
| `created_at` | DATETIME | Rule creation time |
|
||||
|
||||
### Settings Table
|
||||
|
||||
#### `settings`
|
||||
Application configuration key-value store.
|
||||
|
||||
| Column | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `id` | INTEGER PRIMARY KEY | Unique setting ID |
|
||||
| `key` | VARCHAR(255) UNIQUE | Setting key (e.g., `smtp_server`) |
|
||||
| `value` | TEXT | Setting value (JSON for complex values) |
|
||||
| `updated_at` | DATETIME | Last modification time |
|
||||
|
||||
**Example Settings:**
|
||||
- `smtp_server` - SMTP server hostname
|
||||
- `smtp_port` - SMTP port (587, 465, 25)
|
||||
- `smtp_username` - SMTP username
|
||||
- `smtp_password` - SMTP password (encrypted)
|
||||
- `smtp_from_email` - From email address
|
||||
- `smtp_to_emails` - JSON array of recipient emails
|
||||
- `app_password` - Single-user password hash (bcrypt)
|
||||
- `retention_days` - How long to keep old scans (0 = forever)
|
||||
|
||||
## API Design
|
||||
|
||||
### REST API Endpoints
|
||||
|
||||
All API endpoints return JSON and follow RESTful conventions.
|
||||
|
||||
#### Scans
|
||||
|
||||
| Method | Endpoint | Description | Request Body | Response |
|
||||
|--------|----------|-------------|--------------|----------|
|
||||
| `GET` | `/api/scans` | List all scans (paginated) | - | `{ "scans": [...], "total": N, "page": 1 }` |
|
||||
| `GET` | `/api/scans/{id}` | Get scan details | - | `{ "scan": {...} }` |
|
||||
| `POST` | `/api/scans` | Trigger new scan | `{ "config_id": "path" }` | `{ "scan_id": N, "status": "running" }` |
|
||||
| `DELETE` | `/api/scans/{id}` | Delete scan and files | - | `{ "status": "deleted" }` |
|
||||
| `GET` | `/api/scans/{id}/status` | Get scan status | - | `{ "status": "running", "progress": "45%" }` |
|
||||
| `GET` | `/api/scans/{id1}/compare/{id2}` | Compare two scans | - | `{ "diff": {...} }` |
|
||||
|
||||
#### Schedules
|
||||
|
||||
| Method | Endpoint | Description | Request Body | Response |
|
||||
|--------|----------|-------------|--------------|----------|
|
||||
| `GET` | `/api/schedules` | List all schedules | - | `{ "schedules": [...] }` |
|
||||
| `GET` | `/api/schedules/{id}` | Get schedule details | - | `{ "schedule": {...} }` |
|
||||
| `POST` | `/api/schedules` | Create new schedule | `{ "name": "...", "config_id": "...", "cron_expression": "..." }` | `{ "schedule_id": N }` |
|
||||
| `PUT` | `/api/schedules/{id}` | Update schedule | `{ "enabled": true, "cron_expression": "..." }` | `{ "status": "updated" }` |
|
||||
| `DELETE` | `/api/schedules/{id}` | Delete schedule | - | `{ "status": "deleted" }` |
|
||||
| `POST` | `/api/schedules/{id}/trigger` | Manually trigger scheduled scan | - | `{ "scan_id": N }` |
|
||||
|
||||
#### Alerts
|
||||
|
||||
| Method | Endpoint | Description | Request Body | Response |
|
||||
|--------|----------|-------------|--------------|----------|
|
||||
| `GET` | `/api/alerts` | List recent alerts | - | `{ "alerts": [...] }` |
|
||||
| `GET` | `/api/alerts/rules` | List alert rules | - | `{ "rules": [...] }` |
|
||||
| `POST` | `/api/alerts/rules` | Create alert rule | `{ "rule_type": "...", "threshold": N }` | `{ "rule_id": N }` |
|
||||
| `PUT` | `/api/alerts/rules/{id}` | Update alert rule | `{ "enabled": false }` | `{ "status": "updated" }` |
|
||||
| `DELETE` | `/api/alerts/rules/{id}` | Delete alert rule | - | `{ "status": "deleted" }` |
|
||||
|
||||
#### Settings
|
||||
|
||||
| Method | Endpoint | Description | Request Body | Response |
|
||||
|--------|----------|-------------|--------------|----------|
|
||||
| `GET` | `/api/settings` | Get all settings (sanitized) | - | `{ "settings": {...} }` |
|
||||
| `PUT` | `/api/settings` | Update settings | `{ "smtp_server": "...", ... }` | `{ "status": "updated" }` |
|
||||
| `POST` | `/api/settings/test-email` | Test email configuration | - | `{ "status": "sent" }` |
|
||||
|
||||
#### Statistics & Trends
|
||||
|
||||
| Method | Endpoint | Description | Request Body | Response |
|
||||
|--------|----------|-------------|--------------|----------|
|
||||
| `GET` | `/api/stats/summary` | Dashboard summary stats | - | `{ "total_scans": N, "last_scan": "...", ... }` |
|
||||
| `GET` | `/api/stats/trends` | Trend data for charts | `?days=30&metric=port_count` | `{ "data": [...] }` |
|
||||
| `GET` | `/api/stats/certificates` | Certificate expiry overview | - | `{ "expiring_soon": [...], "expired": [...] }` |
|
||||
|
||||
### Authentication
|
||||
|
||||
**Phase 2-3:** Simple session-based authentication (single-user)
|
||||
- Login endpoint: `POST /api/auth/login` (username/password)
|
||||
- Logout endpoint: `POST /api/auth/logout`
|
||||
- Session cookies with Flask-Login
|
||||
- Password stored as bcrypt hash in settings table
|
||||
|
||||
**Phase 6:** API token authentication for CLI client
|
||||
- Generate API token: `POST /api/auth/token`
|
||||
- Revoke token: `DELETE /api/auth/token`
|
||||
- CLI sends token in `Authorization: Bearer <token>` header
|
||||
|
||||
## Phased Roadmap
|
||||
|
||||
### Phase 1: Foundation ✅ COMPLETE
|
||||
### Phase 1: Foundation ✅
|
||||
**Completed:** 2025-11-13
|
||||
|
||||
**Deliverables:**
|
||||
- SQLite database with 11 tables (scans, sites, IPs, ports, services, certificates, TLS versions, schedules, alerts, alert_rules, settings)
|
||||
- SQLAlchemy ORM models with relationships
|
||||
- Alembic migration system
|
||||
- Settings system with encryption (bcrypt for passwords, Fernet for sensitive data)
|
||||
- Database schema with 11 tables (SQLAlchemy ORM, Alembic migrations)
|
||||
- Settings system with encryption (bcrypt, Fernet)
|
||||
- Flask app structure with API blueprints
|
||||
- Docker Compose deployment configuration
|
||||
- Validation script for verification
|
||||
- Docker Compose deployment
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Flask Web App Core ✅ COMPLETE
|
||||
### Phase 2: Flask Web App Core ✅
|
||||
**Completed:** 2025-11-14
|
||||
|
||||
**Deliverables:**
|
||||
- REST API with 8 endpoints (scans: trigger, list, get, status, delete; settings: get, update, test-email)
|
||||
- Background job queue using APScheduler (up to 3 concurrent scans)
|
||||
- Session-based authentication with Flask-Login
|
||||
- Database integration for scan results (full normalized schema population)
|
||||
- Web UI templates (dashboard, scan list/detail, login, error pages)
|
||||
- Error handling with content negotiation (JSON/HTML) and request IDs
|
||||
- Logging system with rotating file handlers
|
||||
- Production Docker Compose deployment
|
||||
- Comprehensive test suite (100 tests, all passing)
|
||||
- Documentation (API_REFERENCE.md, DEPLOYMENT.md)
|
||||
- REST API (8 endpoints for scans, settings)
|
||||
- Background job queue (APScheduler, 3 concurrent scans)
|
||||
- Session-based authentication (Flask-Login)
|
||||
- Web UI templates (dashboard, scan list/detail, login)
|
||||
- Comprehensive test suite (100 tests)
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Dashboard & Scheduling ✅ COMPLETE
|
||||
### Phase 3: Dashboard & Scheduling ✅
|
||||
**Completed:** 2025-11-14
|
||||
|
||||
**Deliverables:**
|
||||
- Dashboard with summary stats (total scans, IPs, ports, services)
|
||||
- Recent scans table with clickable details
|
||||
- Scan detail page with full results display
|
||||
- Historical trend charts using Chart.js (port counts over time)
|
||||
- Scheduled scan management UI (create, edit, delete, enable/disable)
|
||||
- Schedule execution with APScheduler and cron expressions
|
||||
- Manual scan trigger from web UI
|
||||
- Navigation menu (Dashboard, Scans, Schedules, Configs, Settings)
|
||||
- Download buttons for scan reports (JSON, HTML, ZIP)
|
||||
- Dashboard with summary stats and trend charts (Chart.js)
|
||||
- Scan detail pages with full results display
|
||||
- Scheduled scan management (cron expressions)
|
||||
- Download buttons for reports (JSON, HTML, ZIP)
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Config Creator ✅ COMPLETE
|
||||
### Phase 4: Config Creator ✅
|
||||
**Completed:** 2025-11-17
|
||||
|
||||
**Deliverables:**
|
||||
- CIDR-based config creation UI (simplified workflow for quick config generation)
|
||||
- YAML editor with CodeMirror (syntax highlighting, line numbers)
|
||||
- Config management UI (list, view, edit, download, delete)
|
||||
- Direct YAML upload for advanced users
|
||||
- REST API for config operations (7 endpoints: list, get, create, update, delete, upload, download)
|
||||
- Schedule dependency protection (prevents deleting configs used by schedules)
|
||||
- Comprehensive testing (25+ unit and integration tests)
|
||||
- CIDR-based config creation UI
|
||||
- YAML editor with CodeMirror
|
||||
- Config management (list, view, edit, download, delete)
|
||||
- REST API for config operations (7 endpoints)
|
||||
|
||||
---
|
||||
|
||||
### Phase 5: Webhooks & Alerting ✅ COMPLETE
|
||||
### Phase 5: Webhooks & Alerting ✅
|
||||
**Completed:** 2025-11-19
|
||||
|
||||
**Goals:**
|
||||
- ✅ Implement webhook notification system for real-time alerting
|
||||
- ✅ Add alert rule configuration for unexpected exposure detection
|
||||
- ✅ Create notification template system for flexible alerting
|
||||
|
||||
**Core Use Case:**
|
||||
Monitor infrastructure for misconfigurations that expose unexpected ports/services to the world. When a scan detects an open port that wasn't defined in the YAML config's `expected_ports` list, trigger immediate notifications via webhooks.
|
||||
|
||||
**Implemented Features:**
|
||||
|
||||
#### 1. Alert Rule Engine ✅
|
||||
**Purpose:** Automatically detect and classify infrastructure anomalies after each scan.
|
||||
|
||||
**Alert Types:**
|
||||
- `unexpected_port` - Port open but not in config's `expected_ports` list
|
||||
- `unexpected_service` - Service detected that doesn't match expected service name
|
||||
- `cert_expiry` - SSL/TLS certificate expiring soon (configurable threshold)
|
||||
- `ping_failed` - Expected host not responding to ping
|
||||
- `service_down` - Previously detected service no longer responding
|
||||
- `service_change` - Service version/product changed between scans
|
||||
- `weak_tls` - TLS 1.0/1.1 detected or weak cipher suites
|
||||
- `new_host` - New IP address responding in CIDR range
|
||||
- `host_disappeared` - Previously seen IP no longer responding
|
||||
|
||||
**Alert Severity Levels:**
|
||||
- `critical` - Unexpected internet-facing service (ports 80/443/22/3389/etc.)
|
||||
- `warning` - Minor configuration drift or upcoming cert expiry
|
||||
- `info` - Informational alerts (new host discovered, service version change)
|
||||
|
||||
**Alert Rule Configuration:**
|
||||
```yaml
|
||||
# Example alert rule configuration (stored in DB)
|
||||
alert_rules:
|
||||
- id: 1
|
||||
rule_type: unexpected_port
|
||||
enabled: true
|
||||
severity: critical
|
||||
webhook_enabled: true
|
||||
filter_conditions:
|
||||
ports: [22, 80, 443, 3389, 3306, 5432, 27017] # High-risk ports
|
||||
|
||||
- id: 2
|
||||
rule_type: cert_expiry
|
||||
enabled: true
|
||||
severity: warning
|
||||
threshold: 30 # Days before expiry
|
||||
webhook_enabled: true
|
||||
```
|
||||
|
||||
**Implementation:**
|
||||
- ✅ Evaluate alert rules after each scan completes
|
||||
- ✅ Compare current scan results to expected configuration
|
||||
- ✅ Generate alerts and store in `alerts` table
|
||||
- ✅ Trigger notifications based on rule configuration
|
||||
- ✅ Alert deduplication (don't spam for same issue)
|
||||
|
||||
#### 2. Webhook Notifications ✅
|
||||
**Purpose:** Real-time HTTP POST notifications for integration with external systems (Slack, PagerDuty, custom dashboards, SIEM tools).
|
||||
|
||||
**Webhook Configuration (via Settings API):**
|
||||
```json
|
||||
{
|
||||
"webhook_enabled": true,
|
||||
"webhook_urls": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "Slack Security Channel",
|
||||
"url": "https://hooks.slack.com/services/XXX/YYY/ZZZ",
|
||||
"enabled": true,
|
||||
"auth_type": "none",
|
||||
"custom_headers": {},
|
||||
"alert_types": ["unexpected_port", "unexpected_service", "weak_tls"],
|
||||
"severity_filter": ["critical", "warning"]
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "PagerDuty",
|
||||
"url": "https://events.pagerduty.com/v2/enqueue",
|
||||
"enabled": true,
|
||||
"auth_type": "bearer",
|
||||
"auth_token": "encrypted_token",
|
||||
"custom_headers": {
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"alert_types": ["unexpected_port"],
|
||||
"severity_filter": ["critical"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Webhook Payload Format (JSON):**
|
||||
```json
|
||||
{
|
||||
"event_type": "scan_alert",
|
||||
"alert_id": 42,
|
||||
"alert_type": "unexpected_port",
|
||||
"severity": "critical",
|
||||
"timestamp": "2025-11-17T14:23:45Z",
|
||||
"scan": {
|
||||
"scan_id": 123,
|
||||
"title": "Production Network Scan",
|
||||
"timestamp": "2025-11-17T14:15:00Z",
|
||||
"config_id": "prod_config.yaml",
|
||||
"triggered_by": "scheduled"
|
||||
},
|
||||
"alert_details": {
|
||||
"message": "Unexpected port 3306 (MySQL) exposed on 192.168.1.100",
|
||||
"ip_address": "192.168.1.100",
|
||||
"port": 3306,
|
||||
"protocol": "tcp",
|
||||
"state": "open",
|
||||
"service": {
|
||||
"name": "mysql",
|
||||
"product": "MySQL",
|
||||
"version": "8.0.32"
|
||||
},
|
||||
"expected": false,
|
||||
"site_name": "Production Servers"
|
||||
},
|
||||
"recommended_actions": [
|
||||
"Verify if MySQL should be exposed externally",
|
||||
"Check firewall rules for 192.168.1.100",
|
||||
"Review MySQL bind-address configuration"
|
||||
],
|
||||
"web_url": "https://sneakyscanner.local/scans/123"
|
||||
}
|
||||
```
|
||||
|
||||
**Webhook Features:**
|
||||
- ✅ Multiple webhook URLs with independent configuration
|
||||
- ✅ Per-webhook filtering by alert type and severity
|
||||
- ✅ Custom headers support (for API keys, auth tokens)
|
||||
- ✅ Authentication methods:
|
||||
- `none` - No authentication
|
||||
- `bearer` - Bearer token in Authorization header
|
||||
- `basic` - Basic authentication
|
||||
- `custom` - Custom header-based auth
|
||||
- ✅ Retry logic with exponential backoff (3 attempts)
|
||||
- ✅ Webhook delivery tracking (webhook_sent, webhook_sent_at, webhook_response_code)
|
||||
- ✅ Test webhook functionality in Settings UI
|
||||
- ✅ Timeout configuration (default 10 seconds)
|
||||
- ✅ Webhook delivery history and logs
|
||||
|
||||
**Webhook API Endpoints:**
|
||||
- ✅ `POST /api/webhooks` - Create webhook configuration
|
||||
- ✅ `GET /api/webhooks` - List all webhooks
|
||||
- ✅ `PUT /api/webhooks/{id}` - Update webhook configuration
|
||||
- ✅ `DELETE /api/webhooks/{id}` - Delete webhook
|
||||
- ✅ `POST /api/webhooks/{id}/test` - Send test webhook
|
||||
- ✅ `GET /api/webhooks/{id}/history` - Get delivery history
|
||||
|
||||
**Notification Templates:**
|
||||
Flexible template system supporting multiple platforms (Slack, Discord, PagerDuty, etc.):
|
||||
```json
|
||||
{
|
||||
"text": "SneakyScanner Alert: Unexpected Port Detected",
|
||||
"attachments": [
|
||||
{
|
||||
"color": "danger",
|
||||
"fields": [
|
||||
{"title": "IP Address", "value": "192.168.1.100", "short": true},
|
||||
{"title": "Port", "value": "3306/tcp", "short": true},
|
||||
{"title": "Service", "value": "MySQL 8.0.32", "short": true},
|
||||
{"title": "Severity", "value": "CRITICAL", "short": true}
|
||||
],
|
||||
"footer": "SneakyScanner",
|
||||
"ts": 1700234625
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
- Alert Rule Engine (9 alert types: unexpected_port, cert_expiry, ping_failed, etc.)
|
||||
- Webhook notifications with retry logic
|
||||
- Multiple webhook URLs with independent filtering
|
||||
- Notification templates (Slack, Discord, PagerDuty support)
|
||||
- Alert deduplication
|
||||
|
||||
---
|
||||
|
||||
**Deliverables:**
|
||||
- ✅ Alert Rule Engine with 9 alert types (unexpected_port, unexpected_service, cert_expiry, ping_failed, service_down, service_change, weak_tls, new_host, host_disappeared)
|
||||
- ✅ Alert severity classification (critical, warning, info)
|
||||
- ✅ Alert rule configuration API (CRUD operations)
|
||||
- ✅ Webhook notification system with retry logic
|
||||
- ✅ Multiple webhook URL support with independent configuration
|
||||
- ✅ Notification template system for flexible platform integration (Slack, Discord, PagerDuty, custom)
|
||||
- ✅ Webhook API endpoints (create, list, update, delete, test, history)
|
||||
- ✅ Custom headers and authentication support (none, bearer, basic, custom)
|
||||
- ✅ Webhook delivery tracking and logging
|
||||
- ✅ Alert deduplication to prevent notification spam
|
||||
- ✅ Integration with scan completion workflow
|
||||
## Planned Features
|
||||
|
||||
**Success Criteria Met:**
|
||||
- ✅ Alerts triggered within 30 seconds of scan completion
|
||||
- ✅ Webhook POST delivered with retry on failure
|
||||
- ✅ Zero false positives for expected ports/services
|
||||
- ✅ Alert deduplication prevents notification spam
|
||||
### Version 1.1.0 - Communication & Automation
|
||||
|
||||
#### CLI as API Client
|
||||
- CLI tool for scripting and automation via REST API
|
||||
- API token authentication (Bearer tokens)
|
||||
- Commands for scan management, schedules, alerts
|
||||
|
||||
#### Email Notifications
|
||||
- SMTP integration with Flask-Mail
|
||||
- Jinja2 email templates (HTML + plain text)
|
||||
- Configurable recipients and rate limiting
|
||||
|
||||
#### Site CSV Export/Import
|
||||
- Bulk site management via CSV files
|
||||
|
||||
---
|
||||
|
||||
### Phase 6: CLI as API Client
|
||||
**Status:** Planned
|
||||
**Priority:** MEDIUM
|
||||
### Version 1.2.0 - Reporting & Analysis
|
||||
|
||||
**Goals:**
|
||||
- Create CLI API client for scripting and automation
|
||||
- Maintain standalone mode for testing
|
||||
- API token authentication
|
||||
#### Scan Comparison
|
||||
- Compare two scans API endpoint
|
||||
- Side-by-side comparison view with color-coded differences
|
||||
- Export comparison report to PDF/HTML
|
||||
|
||||
**Planned Features:**
|
||||
1. **API Client Mode:**
|
||||
- `--api-mode` flag to enable API client mode
|
||||
- `--api-url` and `--api-token` arguments
|
||||
- Trigger scans via API, poll for status, download results
|
||||
- Scans stored centrally in database
|
||||
- Standalone mode still available for testing
|
||||
|
||||
2. **API Token System:**
|
||||
- Token generation UI in settings page
|
||||
- Secure token storage (hashed in database)
|
||||
- Token authentication middleware
|
||||
- Token expiration and revocation
|
||||
|
||||
3. **Benefits:**
|
||||
- Centralized scan history accessible via web dashboard
|
||||
- No need to mount volumes for output
|
||||
- Scheduled scans managed through web UI
|
||||
- Scriptable automation while leveraging web features
|
||||
#### Enhanced Reports
|
||||
- Sortable/filterable tables (DataTables.js)
|
||||
- PDF export (WeasyPrint)
|
||||
|
||||
---
|
||||
|
||||
### Phase 7: Advanced Features
|
||||
**Status:** Future/Deferred
|
||||
**Priority:** LOW
|
||||
### Version 1.3.0 - Visualization
|
||||
|
||||
**Planned Features:**
|
||||
#### Timeline View
|
||||
- Visual scan history timeline
|
||||
- Filter by site/IP
|
||||
- Event annotations
|
||||
|
||||
1. **Email Notifications:**
|
||||
- SMTP integration with Flask-Mail
|
||||
- Jinja2 email templates (HTML + plain text)
|
||||
- Settings API for email configuration
|
||||
- Test email functionality
|
||||
- Email delivery tracking
|
||||
- Rate limiting to prevent email flood
|
||||
- Configurable recipients (multiple emails)
|
||||
|
||||
2. **Scan Comparison:**
|
||||
- Compare two scans API endpoint
|
||||
- Side-by-side comparison view
|
||||
- Color-coded differences (green=new, red=removed, yellow=changed)
|
||||
- Filter by change type
|
||||
- Export comparison report to PDF/HTML
|
||||
- "Compare with previous scan" button on scan detail page
|
||||
|
||||
3. **Enhanced Reports:**
|
||||
- Sortable/filterable tables (DataTables.js)
|
||||
- Inline screenshot thumbnails with lightbox
|
||||
- PDF export (WeasyPrint)
|
||||
|
||||
4. **Vulnerability Detection:**
|
||||
- CVE database integration (NVD API)
|
||||
- Service version matching to known CVEs
|
||||
- CVSS severity scores
|
||||
- Alert rules for critical CVEs
|
||||
|
||||
5. **Timeline View:**
|
||||
- Visual scan history timeline
|
||||
- Filter by site/IP
|
||||
- Event annotations
|
||||
|
||||
6. **Advanced Charts:**
|
||||
- Port activity heatmap
|
||||
- Service version tracking
|
||||
- Certificate expiration forecast
|
||||
|
||||
7. **Additional Integrations:**
|
||||
- Prometheus metrics export
|
||||
- CSV export/import
|
||||
- Advanced reporting dashboards
|
||||
#### Advanced Charts
|
||||
- Port activity heatmap
|
||||
- Certificate expiration forecast
|
||||
|
||||
---
|
||||
|
||||
## Development Workflow
|
||||
### Version 2.0.0 - Security Intelligence
|
||||
|
||||
### Iteration Cycle
|
||||
1. **Plan** - Define features for phase
|
||||
2. **Implement** - Code backend + frontend
|
||||
3. **Test** - Unit tests + manual testing
|
||||
4. **Deploy** - Update Docker Compose
|
||||
5. **Document** - Update README.md, ROADMAP.md
|
||||
6. **Review** - Get user feedback
|
||||
7. **Iterate** - Adjust priorities based on feedback
|
||||
#### Vulnerability Detection
|
||||
- CVE database integration (NVD API)
|
||||
- Service version matching to known CVEs
|
||||
- CVSS severity scores
|
||||
|
||||
### Git Workflow
|
||||
- **main branch** - Stable releases
|
||||
- **develop branch** - Active development
|
||||
- **feature branches** - Individual features (`feature/dashboard`, `feature/scheduler`)
|
||||
- **Pull requests** - Review before merge
|
||||
|
||||
### Testing Strategy
|
||||
- **Unit tests** - pytest for models, API endpoints
|
||||
- **Integration tests** - Full scan → DB → API workflow
|
||||
- **Manual testing** - UI/UX testing in browser
|
||||
- **Performance tests** - Large scans, database queries
|
||||
|
||||
### Documentation
|
||||
- **README.md** - User-facing documentation (updated each phase)
|
||||
- **ROADMAP.md** - This file (updated as priorities shift)
|
||||
- **CLAUDE.md** - Developer documentation (architecture, code references)
|
||||
- **API.md** - API documentation (OpenAPI/Swagger in Phase 4)
|
||||
|
||||
## Resources & References
|
||||
|
||||
### Documentation
|
||||
- [Flask Documentation](https://flask.palletsprojects.com/)
|
||||
- [SQLAlchemy ORM](https://docs.sqlalchemy.org/)
|
||||
- [APScheduler](https://apscheduler.readthedocs.io/)
|
||||
- [Chart.js](https://www.chartjs.org/docs/)
|
||||
- [Bootstrap 5](https://getbootstrap.com/docs/5.3/)
|
||||
|
||||
### Tutorials
|
||||
- [Flask Mega-Tutorial](https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-i-hello-world)
|
||||
- [SQLAlchemy Tutorial](https://docs.sqlalchemy.org/en/20/tutorial/)
|
||||
- [APScheduler with Flask](https://github.com/viniciuschiele/flask-apscheduler)
|
||||
|
||||
### Similar Projects (Inspiration)
|
||||
- [OpenVAS](https://www.openvas.org/) - Vulnerability scanner with web UI
|
||||
- [Nessus](https://www.tenable.com/products/nessus) - Commercial scanner (inspiration for UI/UX)
|
||||
- [OWASP ZAP](https://www.zaproxy.org/) - Web app scanner (comparison reports, alerts)
|
||||
---
|
||||
|
||||
## Changelog
|
||||
|
||||
| Date | Version | Changes |
|
||||
|------|---------|---------|
|
||||
| 2025-11-14 | 1.0 | Initial roadmap created based on user requirements |
|
||||
| 2025-11-13 | 1.1 | **Phase 1 COMPLETE** - Database schema, SQLAlchemy models, Flask app structure, settings system with encryption, Alembic migrations, API blueprints, Docker support, validation script |
|
||||
| 2025-11-14 | 1.2 | **Phase 2 COMPLETE** - REST API (5 scan endpoints, 3 settings endpoints), background jobs (APScheduler), authentication (Flask-Login), web UI (dashboard, scans, login, errors), error handling (content negotiation, request IDs, logging), 100 tests passing, comprehensive documentation (API_REFERENCE.md, DEPLOYMENT.md, PHASE2_COMPLETE.md) |
|
||||
| 2025-11-17 | 1.3 | **Bug Fix** - Fixed Chart.js infinite canvas growth issue in scan detail page (duplicate initialization, missing chart.destroy(), missing fixed-height container) |
|
||||
| 2025-11-17 | 1.4 | **Phase 4 COMPLETE** - Config Creator with CIDR-based creation, YAML editor (CodeMirror), config management UI (list/edit/delete), REST API (7 endpoints), Docker volume permissions fix, comprehensive testing and documentation |
|
||||
| 2025-11-17 | 1.5 | **Roadmap Compression** - Condensed completed phases (1-4) into concise summaries, updated project scope to emphasize web GUI frontend with CLI as API client coming soon (Phase 6), reorganized phases for clarity |
|
||||
| 2025-11-19 | 1.6 | **Phase 5 Progress** - Completed webhooks, notification templates, and alerting rules. Alert Rule Engine and Webhook System implemented. |
|
||||
| 2025-11-19 | 1.7 | **Phase 5 COMPLETE** - Webhooks & Alerting phase completed. Moved Email Notifications and Scan Comparison to Phase 7. Alert rules, webhook notifications, and notification templates fully implemented and tested. |
|
||||
| 2025-11-13 | 1.0.0-alpha | Phase 1 complete - Foundation |
|
||||
| 2025-11-14 | 1.0.0-beta | Phases 2-3 complete - Web App Core, Dashboard & Scheduling |
|
||||
| 2025-11-17 | 1.0.0-rc1 | Phase 4 complete - Config Creator |
|
||||
| 2025-11-19 | 1.0.0 | Phase 5 complete - Webhooks & Alerting |
|
||||
|
||||
---
|
||||
|
||||
**Last Updated:** 2025-11-19
|
||||
**Next Review:** Before Phase 6 kickoff (CLI as API Client)
|
||||
**Last Updated:** 2025-11-20
|
||||
|
||||
BIN
docs/alerts.png
BIN
docs/alerts.png
Binary file not shown.
|
Before Width: | Height: | Size: 103 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 60 KiB |
BIN
docs/configs.png
BIN
docs/configs.png
Binary file not shown.
|
Before Width: | Height: | Size: 56 KiB |
BIN
docs/scans.png
BIN
docs/scans.png
Binary file not shown.
|
Before Width: | Height: | Size: 61 KiB |
@@ -6,8 +6,8 @@
|
||||
set -e
|
||||
|
||||
CONFIG_FILE="app/web/config.py"
|
||||
DEVELOP_BRANCH="develop"
|
||||
STAGING_BRANCH="staging"
|
||||
DEVELOP_BRANCH="nightly"
|
||||
STAGING_BRANCH="beta"
|
||||
MAIN_BRANCH="master"
|
||||
|
||||
# Colors for output
|
||||
|
||||
43
setup.sh
43
setup.sh
@@ -91,27 +91,40 @@ echo "Creating required directories..."
|
||||
mkdir -p data logs output configs
|
||||
echo "✓ Directories created"
|
||||
|
||||
# Check if Docker is running
|
||||
# Check if Podman is running
|
||||
echo ""
|
||||
echo "Checking Docker..."
|
||||
if ! docker info > /dev/null 2>&1; then
|
||||
echo "✗ Docker is not running or not installed"
|
||||
echo "Please install Docker and start the Docker daemon"
|
||||
echo "Checking Podman..."
|
||||
if ! podman info > /dev/null 2>&1; then
|
||||
echo "✗ Podman is not running or not installed"
|
||||
echo "Please install Podman"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Docker is running"
|
||||
echo "✓ Podman is available"
|
||||
|
||||
# Build and start
|
||||
echo ""
|
||||
echo "Building and starting SneakyScanner..."
|
||||
echo "Starting SneakyScanner..."
|
||||
echo "This may take a few minutes on first run..."
|
||||
echo ""
|
||||
|
||||
docker compose build
|
||||
podman build --network=host -t sneakyscan .
|
||||
|
||||
# Initialize database if it doesn't exist or is empty
|
||||
echo ""
|
||||
echo "Starting SneakyScanner..."
|
||||
docker compose up -d
|
||||
echo "Initializing database..."
|
||||
|
||||
# Build init command with optional password
|
||||
INIT_CMD="init_db.py --db-url sqlite:////app/data/sneakyscanner.db --force"
|
||||
if [ -n "$INITIAL_PASSWORD" ]; then
|
||||
INIT_CMD="$INIT_CMD --password $INITIAL_PASSWORD"
|
||||
fi
|
||||
|
||||
podman run --rm --entrypoint python3 -w /app \
|
||||
-v "$(pwd)/data:/app/data" \
|
||||
sneakyscan $INIT_CMD
|
||||
echo "✓ Database initialized"
|
||||
|
||||
podman-compose up -d
|
||||
|
||||
# Wait for service to be healthy
|
||||
echo ""
|
||||
@@ -119,7 +132,7 @@ echo "Waiting for application to start..."
|
||||
sleep 5
|
||||
|
||||
# Check if container is running
|
||||
if docker ps | grep -q sneakyscanner-web; then
|
||||
if podman ps | grep -q sneakyscanner-web; then
|
||||
echo ""
|
||||
echo "================================================"
|
||||
echo " ✓ SneakyScanner is Running!"
|
||||
@@ -140,15 +153,15 @@ if docker ps | grep -q sneakyscanner-web; then
|
||||
fi
|
||||
echo ""
|
||||
echo "Useful commands:"
|
||||
echo " docker compose logs -f # View logs"
|
||||
echo " docker compose stop # Stop the service"
|
||||
echo " docker compose restart # Restart the service"
|
||||
echo " podman-compose logs -f # View logs"
|
||||
echo " podman-compose stop # Stop the service"
|
||||
echo " podman-compose restart # Restart the service"
|
||||
echo ""
|
||||
echo "⚠ IMPORTANT: Change your password after first login!"
|
||||
echo "================================================"
|
||||
else
|
||||
echo ""
|
||||
echo "✗ Container failed to start. Check logs with:"
|
||||
echo " docker compose logs"
|
||||
echo " podman-compose logs"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
Reference in New Issue
Block a user