diff --git a/analyzer/windows/modules/auxiliary/sslkeylogfile.py b/analyzer/windows/modules/auxiliary/sslkeylogfile.py index 7565d053c64..2d1dc48ee51 100644 --- a/analyzer/windows/modules/auxiliary/sslkeylogfile.py +++ b/analyzer/windows/modules/auxiliary/sslkeylogfile.py @@ -11,7 +11,14 @@ class SslKeyLogFile(Auxiliary): - """Collect SSLKEYLOGFILE logs from guests.""" + """Collect SSLKEYLOGFILE logs from guests. + + For Schannel (Windows native TLS) key capture, the registry key + HKLM\\SYSTEM\\CurrentControlSet\\Control\\SecurityProviders\\SCHANNEL\\KeyLogging + must have Enable=1 (REG_DWORD). This requires a reboot to take effect, so + it should be baked into the VM snapshot — not set at runtime. + This module handles setting the SSLKEYLOGFILE path at analysis start. + """ def __init__(self, options, config): Auxiliary.__init__(self, options, config) diff --git a/analyzer/windows/modules/packages/jar.py b/analyzer/windows/modules/packages/jar.py index d2d5fef6d4e..dd2c0bc6f18 100644 --- a/analyzer/windows/modules/packages/jar.py +++ b/analyzer/windows/modules/packages/jar.py @@ -2,14 +2,28 @@ # This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org # See the file 'docs/LICENSE' for copying permission. +import logging +import os + from lib.common.abstracts import Package from lib.common.constants import OPT_CLASS +log = logging.getLogger(__name__) + class Jar(Package): """Java analysis package.""" PATHS = [ + # javaw.exe preferred (no console window) + ("ProgramFiles", "Java", "jre*", "bin", "javaw.exe"), + ("ProgramFiles", "Java", "jdk*", "bin", "javaw.exe"), + ("ProgramFiles", "Java", "jdk-*", "bin", "javaw.exe"), + ("ProgramFiles", "Microsoft", "jdk-*", "bin", "javaw.exe"), + ("ProgramFiles", "Eclipse Adoptium", "jdk-*", "bin", "javaw.exe"), + ("ProgramFiles", "Eclipse Adoptium", "jre-*", "bin", "javaw.exe"), + ("ProgramFiles", "OpenJDK", "jdk-*", "bin", "javaw.exe"), + # java.exe fallback ("ProgramFiles", "Java", "jre*", "bin", "java.exe"), ("ProgramFiles", "Java", "jdk*", "bin", "java.exe"), ("ProgramFiles", "Java", "jdk-*", "bin", "java.exe"), @@ -18,15 +32,31 @@ class Jar(Package): ("ProgramFiles", "Eclipse Adoptium", "jre-*", "bin", "java.exe"), ("ProgramFiles", "OpenJDK", "jdk-*", "bin", "java.exe"), ] - summary = "Executes a java class using java.exe." - description = f"""Uses 'java.exe -jar [path]' to run the given sample. - However, if the '{OPT_CLASS}' option is specified, use - 'java.exe -cp [path] [class]' to run the named java class.""" + summary = "Executes a .jar file using javaw.exe (or java.exe)." + description = f"""Uses 'javaw.exe -jar [path]' to run the given sample. + Falls back to java.exe if javaw.exe is not available. + If the '{OPT_CLASS}' option is specified, uses '-cp [path] [class]' + to run the named java class instead.""" option_names = (OPT_CLASS,) def start(self, path): java = self.get_path_glob("Java") class_path = self.options.get("class") + java_opts = [] + # When SSLproxy MITM is active, tell Java to use the Windows + # certificate store so it trusts the MITM CA without needing + # to import it into Java's cacerts keystore. + if self.options.get("sslproxy"): + java_opts.extend([ + "-Djavax.net.ssl.trustStoreType=Windows-ROOT", + "-Djavax.net.ssl.trustStore=NUL", + ]) + + if java_opts: + os.environ["JAVA_TOOL_OPTIONS"] = " ".join(java_opts) + log.info("Set JAVA_TOOL_OPTIONS=%s", os.environ["JAVA_TOOL_OPTIONS"]) + args = f'-cp "{path}" {class_path}' if class_path else f'-jar "{path}"' + log.info("Executing: %s %s", java, args) return self.execute(java, args, path) diff --git a/analyzer/windows/tests/test_analysis_packages.py b/analyzer/windows/tests/test_analysis_packages.py index d1bd1202198..f1b1ed0b6cd 100644 --- a/analyzer/windows/tests/test_analysis_packages.py +++ b/analyzer/windows/tests/test_analysis_packages.py @@ -149,7 +149,7 @@ def test_inp(self): def test_jar(self): pkg_class = self.class_from_analysis_package("modules.packages.jar") - expected_summary = "Executes a java class using java.exe." + expected_summary = "Executes a .jar file using javaw.exe (or java.exe)." obj = pkg_class() self.assertEqual(expected_summary, obj.summary) self.assertEqual("class", obj.option_names[0]) diff --git a/conf/default/processing.conf.default b/conf/default/processing.conf.default index c176543a58b..8fe384c0eaa 100644 --- a/conf/default/processing.conf.default +++ b/conf/default/processing.conf.default @@ -122,6 +122,14 @@ country_lookup = no # For ipinfo use: Free IP to Country + IP to ASN maxmind_database = data/GeoLite2-Country.mmdb +[decryptpcap] +enabled = yes +# Path to GoGoRoboCap binary (relative to CUCKOO_ROOT or absolute) +gogorobocap = data/gogorobocap/gogorobocap-linux-amd64 +# Decryption source: auto (default), pcap_with_keylog, or sslproxy_synth_pcap +# auto: uses sslproxy synthetic pcap when available, falls back to keylog decryption +pcapsrc = auto + [pcapng] enabled = no diff --git a/conf/default/sslproxy.conf.default b/conf/default/sslproxy.conf.default new file mode 100644 index 00000000000..a79e78dad69 --- /dev/null +++ b/conf/default/sslproxy.conf.default @@ -0,0 +1,20 @@ +[cfg] +# Path to sslproxy binary +bin = /usr/local/bin/sslproxy + +# CA certificate and key for MITM signing. +# The CA must be trusted by guest VMs (imported into the Windows certificate store). +# Generate with: +# openssl req -new -x509 -days 3650 -keyout ca.key -out ca.crt -nodes \ +# -subj "/CN=My MITM CA" -addext "basicConstraints=critical,CA:TRUE" +ca_cert = data/sslproxy/ca.crt +ca_key = data/sslproxy/ca.key + +# Interface where VMs connect (must match your hypervisor network interface) +interface = virbr0 + +# Firewall mark base for routing SSLproxy upstream connections through VPN. +# Each analysis gets fwmark_base + (task_id % fwmark_range) to avoid conflicts +# between concurrent analyses using different routes. +fwmark_base = 100 +fwmark_range = 900 diff --git a/data/gogorobocap/gogorobocap-linux-amd64 b/data/gogorobocap/gogorobocap-linux-amd64 new file mode 100755 index 00000000000..20e45af835c Binary files /dev/null and b/data/gogorobocap/gogorobocap-linux-amd64 differ diff --git a/lib/cuckoo/core/startup.py b/lib/cuckoo/core/startup.py index 4c4342f2dbf..6bbe35eed20 100644 --- a/lib/cuckoo/core/startup.py +++ b/lib/cuckoo/core/startup.py @@ -495,6 +495,7 @@ def init_rooter(): log.debug("An unexpected error occurred while checking UFW status: %s", e) + def init_routing(): """Initialize and check whether the routing information is correct.""" diff --git a/modules/auxiliary/SSLProxy.py b/modules/auxiliary/SSLProxy.py new file mode 100644 index 00000000000..cb8f1adb419 --- /dev/null +++ b/modules/auxiliary/SSLProxy.py @@ -0,0 +1,215 @@ +import logging +import os +import shlex +import signal +import socket +import subprocess + +from contextlib import closing +from threading import Thread + +from lib.cuckoo.common.abstracts import Auxiliary +from lib.cuckoo.common.config import Config +from lib.cuckoo.common.constants import CUCKOO_ROOT +from lib.cuckoo.core.rooter import rooter + +log = logging.getLogger(__name__) + +sslproxy_cfg = Config("sslproxy") + + +class SSLProxy(Auxiliary): + """Per-analysis SSLproxy TLS interception with STARTTLS support. + + Uses a single SSLproxy autossl listener per analysis. All VM TCP traffic is + NAT REDIRECT'd to it. SSLproxy detects TLS ClientHello on any port and + STARTTLS upgrades, intercepts them with MITM, and passes non-TLS through. + + Enabled per-task via the ``sslproxy=1`` task option. + """ + + def __init__(self): + Auxiliary.__init__(self) + self.sslproxy_thread = None + + def start(self): + self.sslproxy_thread = SSLProxyThread(self.task, self.machine) + self.sslproxy_thread.start() + return True + + def stop(self): + if self.sslproxy_thread: + self.sslproxy_thread.stop() + + +class SSLProxyThread(Thread): + """Thread controlling per-analysis SSLproxy instance.""" + + # Fwmark range for per-analysis upstream VPN routing. + # Each analysis gets fwmark_base + (task_id % fwmark_range). + FWMARK_BASE = 100 + FWMARK_RANGE = 900 + + def __init__(self, task, machine): + Thread.__init__(self) + self.task = task + self.machine = machine + self.storage_dir = os.path.join(CUCKOO_ROOT, "storage", "analyses", + str(self.task.id), "sslproxy") + self.proc = None + self.log_file = None + self.do_run = True + self._rooter_enabled = False + + # Config + self.sslproxy_bin = sslproxy_cfg.cfg.get("bin") + self.ca_cert = sslproxy_cfg.cfg.get("ca_cert") + self.ca_key = sslproxy_cfg.cfg.get("ca_key") + self.interface = sslproxy_cfg.cfg.get("interface") + + # Per-analysis fwmark for upstream VPN routing + fwmark_base = int(sslproxy_cfg.cfg.get("fwmark_base", self.FWMARK_BASE)) + fwmark_range = int(sslproxy_cfg.cfg.get("fwmark_range", self.FWMARK_RANGE)) + self.fwmark = str(fwmark_base + (self.task.id % fwmark_range)) + + # Single autossl port handles everything + self.proxy_port = self._get_unused_port() + self.resultserver_port = str(getattr(self.machine, 'resultserver_port', 2042)) + + # Determine routing table for upstream VPN routing + routing_conf = Config("routing") + self.route = self.task.route or routing_conf.routing.route + self.rt_table = "" + if self.route and self.route not in ("none", "None", "drop", "false", "inetsim", "tor"): + if hasattr(routing_conf, self.route): + entry = routing_conf.get(self.route) + self.rt_table = str(getattr(entry, 'rt_table', '')) + elif self.route.startswith("tun"): + self.rt_table = self.route + elif self.route == "internet": + self.rt_table = str(routing_conf.routing.rt_table) if routing_conf.routing.rt_table else "" + + def _get_unused_port(self): + with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: + s.bind(("", 0)) + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + return str(s.getsockname()[1]) + + def _is_sslproxy_requested(self): + """Check if sslproxy=1 is set in task options.""" + for opt in (self.task.options or "").split(","): + opt = opt.strip() + if "=" in opt: + key, val = opt.split("=", 1) + if key.strip() == "sslproxy": + return val.strip() not in ("0", "no", "false", "") + return False + + def run(self): + log.info("SSLProxy thread running for task %s", self.task.id) + if not self._is_sslproxy_requested(): + log.info("SSLProxy not requested for task %s, skipping", self.task.id) + return + + if not self.do_run: + return + + if not self.proxy_port: + log.error("SSLProxy failed to allocate port") + return + + # Set up NAT REDIRECT + per-analysis upstream VPN routing + try: + rooter("sslproxy_enable", self.interface, self.machine.ip, + self.proxy_port, self.resultserver_port, self.rt_table, + self.fwmark) + self._rooter_enabled = True + except Exception as e: + log.exception("Failed to enable SSLproxy iptables rules: %s", e) + return + + try: + self._start_sslproxy() + except Exception as e: + log.error("Failed to start SSLproxy for task %s: %s", self.task.id, e) + self._disable_rooter() + + def _start_sslproxy(self): + """Build command and launch SSLproxy process.""" + os.makedirs(self.storage_dir, exist_ok=True) + + conn_log = os.path.join(self.storage_dir, "connections.log") + master_keys = os.path.join(self.storage_dir, "master_keys.log") + pcap_file = os.path.join(self.storage_dir, "sslproxy.pcap") + + # Build command as a list to avoid shell injection + sslproxy_cmd = [ + self.sslproxy_bin, "-D", + "-k", self.ca_key, "-c", self.ca_cert, + "-l", conn_log, "-X", pcap_file, "-M", master_keys, + "-u", "root", "-o", "VerifyPeer=no", "-P", + "autossl", "0.0.0.0", self.proxy_port, "up:80", + ] + + # Launch in per-VM cgroup so iptables cgroup match can route upstream through VPN. + cgroup_procs = f"/sys/fs/cgroup/sslproxy/{self.machine.ip}/cgroup.procs" + shell_cmd = "echo $$ > {} 2>/dev/null; exec {}".format( + shlex.quote(cgroup_procs), + " ".join(shlex.quote(arg) for arg in sslproxy_cmd), + ) + popen_args = ["sudo", "bash", "-c", shell_cmd] + + self.log_file = open(os.path.join(self.storage_dir, "sslproxy.log"), "w") + self.log_file.write(" ".join(sslproxy_cmd) + "\n") + self.log_file.flush() + + try: + self.proc = subprocess.Popen(popen_args, stdout=self.log_file, + stderr=self.log_file, shell=False, + start_new_session=True) + except (OSError, subprocess.SubprocessError): + self.log_file.close() + self.log_file = None + raise + + log.info("Started SSLproxy PID %d for task %s (autossl port=%s, VM=%s, fwmark=%s)", + self.proc.pid, self.task.id, self.proxy_port, self.machine.ip, self.fwmark) + + def _disable_rooter(self): + """Remove per-VM iptables rules.""" + if not self._rooter_enabled: + return + try: + rooter("sslproxy_disable", self.interface, self.machine.ip, + self.proxy_port, self.resultserver_port, self.rt_table, + self.fwmark) + self._rooter_enabled = False + except Exception as e: + log.error("Failed to disable SSLproxy iptables rules: %s", e) + + def stop(self): + self.do_run = False + + try: + if self.proc and self.proc.poll() is None: + log.info("Stopping SSLproxy for task %s", self.task.id) + try: + os.killpg(os.getpgid(self.proc.pid), signal.SIGTERM) + self.proc.wait(timeout=10) + except subprocess.TimeoutExpired: + log.warning("SSLproxy did not exit gracefully, killing") + try: + os.killpg(os.getpgid(self.proc.pid), signal.SIGKILL) + self.proc.wait(timeout=5) + except OSError: + pass + except OSError: + pass # Process already exited + except Exception as e: + log.error("Failed to stop SSLproxy: %s", e) + finally: + self.proc = None + if self.log_file: + self.log_file.close() + self.log_file = None + self._disable_rooter() diff --git a/modules/processing/decryptpcap.py b/modules/processing/decryptpcap.py new file mode 100644 index 00000000000..ae1adf71cd1 --- /dev/null +++ b/modules/processing/decryptpcap.py @@ -0,0 +1,221 @@ +import logging +import os +import shutil +import subprocess +import tempfile +from pathlib import Path + +from lib.cuckoo.common.abstracts import Processing +from lib.cuckoo.common.constants import CUCKOO_ROOT +from lib.cuckoo.common.objects import File + +log = logging.getLogger(__name__) + +PCAP_HEADER_SIZE = 24 + + +class DecryptPcap(Processing): + """Generate decrypted pcaps from TLS traffic using GoGoRoboCap. + + Auto-detects the best decryption method: + + 1. If SSLproxy's synthetic PCAP exists (sslproxy/sslproxy.pcap), uses + --sslproxy-clean to strip the prepended TLS ClientHello so Suricata + can do proper protocol identification, then merges with the original + network PCAP for a combined encrypted + decrypted view. + + 2. Otherwise, collects TLS master keys from tlsdump, sslkeylogfile, and + sslproxy master_keys.log, then decrypts dump.pcap via GoGoRoboCap. + + The ``pcapsrc`` config option can override auto-detection: + - ``auto`` (default): try sslproxy synth first, fall back to keylog + - ``pcap_with_keylog``: always use keylog decryption + - ``sslproxy_synth_pcap``: always use sslproxy synthetic PCAP + """ + + key = "decryptpcap" + order = 0 # Run before network (order=1) and suricata (order=1) + + def run(self): + self.key = "decryptpcap" + + pcap_path = Path(self.pcap_path) + analysis_path = Path(self.analysis_path) + decrypted_path = analysis_path / "dump_decrypted.pcap" + mixed_path = analysis_path / "dump_mixed.pcap" + + for p in (decrypted_path, mixed_path): + if p.exists(): + p.unlink() + + if not pcap_path.exists() or pcap_path.stat().st_size == 0: + return {} + + gogorobocap_bin = self.options.get("gogorobocap", "data/gogorobocap/gogorobocap-linux-amd64") + if not os.path.isabs(gogorobocap_bin): + gogorobocap_bin = os.path.join(CUCKOO_ROOT, gogorobocap_bin) + + if not os.path.isfile(gogorobocap_bin) or not os.access(gogorobocap_bin, os.X_OK): + log.error("GoGoRoboCap binary not found or not executable at %s", gogorobocap_bin) + return {} + + pcapsrc = self.options.get("pcapsrc", "auto") + sslproxy_pcap = analysis_path / "sslproxy" / "sslproxy.pcap" + has_synth = sslproxy_pcap.exists() and sslproxy_pcap.stat().st_size > PCAP_HEADER_SIZE + + if pcapsrc == "sslproxy_synth_pcap" or (pcapsrc == "auto" and has_synth): + result = self._process_sslproxy_synth(gogorobocap_bin, pcap_path, analysis_path, decrypted_path, mixed_path) + if result: + return result + if pcapsrc == "sslproxy_synth_pcap": + return {} + # auto mode: synth failed, fall through to keylog + + return self._process_keylog(gogorobocap_bin, pcap_path, analysis_path, decrypted_path, mixed_path) + + def _process_sslproxy_synth(self, gogorobocap_bin, pcap_path, analysis_path, decrypted_path, mixed_path): + """Strip TLS ClientHello from SSLproxy synthetic PCAP, merge with original.""" + sslproxy_pcap = analysis_path / "sslproxy" / "sslproxy.pcap" + if not sslproxy_pcap.exists() or sslproxy_pcap.stat().st_size <= PCAP_HEADER_SIZE: + log.debug("No sslproxy.pcap found, nothing to process") + return {} + + sslproxy_clean = analysis_path / "sslproxy" / "sslproxy_clean.pcap" + if not self._run_sslproxy_clean(gogorobocap_bin, sslproxy_pcap, sslproxy_clean): + return {} + + if not sslproxy_clean.exists() or sslproxy_clean.stat().st_size <= PCAP_HEADER_SIZE: + log.debug("sslproxy-clean produced no output") + return {} + + result = {} + + # The cleaned PCAP is the decrypted output + try: + os.link(str(sslproxy_clean), str(decrypted_path)) + except OSError: + shutil.copy2(str(sslproxy_clean), str(decrypted_path)) + result["decrypted_pcap_sha256"] = File(str(decrypted_path)).get_sha256() + + # Merge original (encrypted) + cleaned (decrypted) into mixed + if self._mergecap([pcap_path, sslproxy_clean], mixed_path): + result["mixed_pcap_sha256"] = File(str(mixed_path)).get_sha256() + + return result + + def _process_keylog(self, gogorobocap_bin, pcap_path, analysis_path, decrypted_path, mixed_path): + """Decrypt dump.pcap using collected TLS master keys.""" + key_sources = [ + analysis_path / "tlsdump" / "tlsdump.log", + analysis_path / "aux" / "sslkeylogfile" / "sslkeys.log", + analysis_path / "sslproxy" / "master_keys.log", + ] + available_keys = [k for k in key_sources if k.exists() and k.stat().st_size > 0] + if not available_keys: + return {} + + tmp_keylog_path = None + try: + with tempfile.NamedTemporaryFile( + mode="w", dir=str(analysis_path), suffix=".keylog", delete=False + ) as tmp_keylog: + tmp_keylog_path = tmp_keylog.name + for key_file in available_keys: + for line in key_file.read_text().splitlines(): + line = line.strip() + if not line or line.startswith("#"): + continue + # Skip SSLproxy placeholder entries (all-zero secrets) + if line.endswith("0" * 96): + continue + tmp_keylog.write(line + "\n") + + if not self._run_gogorobocap(gogorobocap_bin, pcap_path, tmp_keylog_path, "decrypted", decrypted_path): + return {} + + if not self._run_gogorobocap(gogorobocap_bin, pcap_path, tmp_keylog_path, "mixed", mixed_path): + if decrypted_path.exists(): + decrypted_path.unlink() + return {} + + finally: + if tmp_keylog_path and os.path.exists(tmp_keylog_path): + os.unlink(tmp_keylog_path) + + result = {} + if decrypted_path.exists() and decrypted_path.stat().st_size > PCAP_HEADER_SIZE: + result["decrypted_pcap_sha256"] = File(str(decrypted_path)).get_sha256() + if mixed_path.exists() and mixed_path.stat().st_size > PCAP_HEADER_SIZE: + result["mixed_pcap_sha256"] = File(str(mixed_path)).get_sha256() + + return result + + def _run_sslproxy_clean(self, binary, input_pcap, output_pcap): + """Run GoGoRoboCap --sslproxy-clean to strip TLS ClientHello from synthetic PCAPs.""" + cmd = [ + str(binary), + "-sslproxy-clean", + "-i", str(input_pcap), + "-o", str(output_pcap), + ] + log.debug("Running GoGoRoboCap sslproxy-clean: %s", " ".join(cmd)) + try: + result = subprocess.run(cmd, capture_output=True, timeout=300) + if result.returncode != 0: + log.error( + "GoGoRoboCap (sslproxy-clean) failed with code %d: %s", + result.returncode, result.stderr.decode(errors="replace") + ) + return False + log.info("GoGoRoboCap sslproxy-clean: %s", result.stdout.decode(errors="replace").strip()) + return True + except subprocess.TimeoutExpired: + log.error("GoGoRoboCap (sslproxy-clean) timed out after 300s") + return False + except OSError as e: + log.error("Failed to execute GoGoRoboCap: %s", e) + return False + + def _mergecap(self, input_pcaps, output_path): + """Merge multiple PCAPs into one using mergecap.""" + cmd = ["mergecap", "-w", str(output_path)] + [str(p) for p in input_pcaps] + log.debug("Running mergecap: %s", " ".join(cmd)) + try: + result = subprocess.run(cmd, capture_output=True, timeout=120) + if result.returncode != 0: + log.error("mergecap failed with code %d: %s", + result.returncode, result.stderr.decode(errors="replace")) + return False + return True + except subprocess.TimeoutExpired: + log.error("mergecap timed out after 120s") + return False + except OSError as e: + log.error("Failed to execute mergecap: %s", e) + return False + + def _run_gogorobocap(self, binary, pcap_path, keylog_path, mode, output_path): + """Run GoGoRoboCap with the given mode. Returns True on success.""" + cmd = [ + str(binary), + "-i", str(pcap_path), + "-keylog", str(keylog_path), + "-tlsmode", mode, + "-o", str(output_path), + ] + log.debug("Running GoGoRoboCap: %s", " ".join(cmd)) + try: + result = subprocess.run(cmd, capture_output=True, timeout=300) + if result.returncode != 0: + log.error( + "GoGoRoboCap (%s mode) failed with code %d: %s", + mode, result.returncode, result.stderr.decode(errors="replace") + ) + return False + return True + except subprocess.TimeoutExpired: + log.error("GoGoRoboCap (%s mode) timed out after 300s", mode) + return False + except OSError as e: + log.error("Failed to execute GoGoRoboCap: %s", e) + return False diff --git a/modules/processing/network.py b/modules/processing/network.py index dbe1afc3dc1..891b98749ed 100644 --- a/modules/processing/network.py +++ b/modules/processing/network.py @@ -1535,7 +1535,6 @@ def run(self): return {} global PCAP_TYPE - PCAP_TYPE = check_pcap_file_type(self.pcap_path) self.key = "network" self.ja3_file = self.options.get("ja3_file", os.path.join(CUCKOO_ROOT, "data", "ja3", "ja3fingerprint.json")) if not IS_DPKT: @@ -1546,21 +1545,34 @@ def run(self): log.error('The PCAP file at path "%s" is empty', self.pcap_path) return {} + # Prefer the mixed (original + decrypted TLS) pcap if available + original_pcap_path = self.pcap_path + using_mixed_pcap = False + mixed_pcap_path = os.path.join(self.analysis_path, "dump_mixed.pcap") + if path_exists(mixed_pcap_path) and os.path.getsize(mixed_pcap_path) > 24: + log.info("Using mixed pcap with decrypted TLS traffic: %s", mixed_pcap_path) + self.pcap_path = mixed_pcap_path + using_mixed_pcap = True + + PCAP_TYPE = check_pcap_file_type(self.pcap_path) ja3_fprints = self._import_ja3_fprints() - results = {"pcap_sha256": File(self.pcap_path).get_sha256()} + results = {"pcap_sha256": File(original_pcap_path).get_sha256()} self.options["sorted"] = False results.update(Pcap(self.pcap_path, ja3_fprints, self.options).run()) if proc_cfg.network.sort_pcap: - sorted_path = self.pcap_path.replace("dump.", "dump_sorted.") + if using_mixed_pcap: + sorted_path = os.path.join(self.analysis_path, "dump_mixed_sorted.pcap") + else: + sorted_path = self.pcap_path.replace("dump.", "dump_sorted.") sort_pcap(self.pcap_path, sorted_path) if path_exists(sorted_path): results["sorted_pcap_sha256"] = File(sorted_path).get_sha256() self.options["sorted"] = True results.update(Pcap(sorted_path, ja3_fprints, self.options).run()) - if HAVE_HTTPREPLAY: + if HAVE_HTTPREPLAY and not using_mixed_pcap: try: tls_master = self.get_tlsmaster() p2 = Pcap2(self.pcap_path, tls_master, self.network_path).run() diff --git a/modules/processing/suricata.py b/modules/processing/suricata.py index bb00e371152..de2358eac5b 100644 --- a/modules/processing/suricata.py +++ b/modules/processing/suricata.py @@ -127,6 +127,12 @@ def run(self): with suppress(Exception): shutil.rmtree(SURICATA_FILES_DIR_FULL_PATH, ignore_errors=True) + # Prefer the mixed (original + decrypted TLS) pcap if available + mixed_pcap_path = os.path.join(self.analysis_path, "dump_mixed.pcap") + if path_exists(mixed_pcap_path) and os.path.getsize(mixed_pcap_path) > 24: + log.info("Using mixed pcap with decrypted TLS traffic for Suricata: %s", mixed_pcap_path) + self.pcap_path = mixed_pcap_path + if not path_exists(SURICATA_CONF): log.warning("Unable to Run Suricata: Conf File %s does not exist", SURICATA_CONF) return suricata @@ -253,7 +259,13 @@ def run(self): if enabled_passlist and event_key in filter_event_types: if event_key in ("alert", "fileinfo"): filter_key = "http" - search_value = parsed[event_key].get(filter_event_types[filter_key], "") + search_value = parsed.get(event_key, {}).get(filter_event_types[filter_key], "") + # HTTP/2: hostname is in request_headers as :authority + if not search_value and filter_key == "http" and parsed.get("http", {}).get("version") == "2": + for h in parsed.get("http", {}).get("request_headers", []): + if h.get("name") == ":authority": + search_value = h.get("value", "") + break for reject in domain_passlist_re: if re.search(reject, search_value): @@ -297,22 +309,50 @@ def run(self): "dstip": parsed["dest_ip"], "timestamp": parsed["timestamp"].replace("T", " "), } - keyword = ("uri", "length", "hostname", "status", "http_method", "contenttype", "ua", "referrer") - keyword_suri = ( - "url", - "length", - "hostname", - "status", - "http_method", - "http_content_type", - "http_user_agent", - "http_refer", - ) - for key, key_s in zip(keyword, keyword_suri): + http_data = parsed.get("http", {}) + if http_data.get("version") == "2" and "request_headers" not in http_data: + # HTTP/2 control frame (SETTINGS, WINDOW_UPDATE, etc.) — skip + continue + if http_data.get("version") == "2" and "request_headers" in http_data: + # HTTP/2: extract fields from pseudo-headers and header arrays + req_headers = {h["name"]: h["value"] for h in http_data.get("request_headers", []) if "name" in h and "value" in h} + # Skip HTTP/2 control frames (SETTINGS, WINDOW_UPDATE, etc.) that have no :method + if ":method" not in req_headers: + continue + resp_headers = {h["name"]: h["value"] for h in http_data.get("response_headers", []) if "name" in h and "value" in h} + hlog["hostname"] = req_headers.get(":authority", None) + hlog["uri"] = req_headers.get(":path", None) + hlog["http_method"] = req_headers.get(":method", None) + hlog["ua"] = req_headers.get("user-agent", None) + hlog["referrer"] = req_headers.get("referer", None) + hlog["contenttype"] = resp_headers.get("content-type", None) + try: + hlog["status"] = int(resp_headers.get(":status", 0)) or None + except (ValueError, TypeError): + hlog["status"] = None try: - hlog[key] = parsed["http"].get(key_s, None) - except Exception: - hlog[key] = None + hlog["length"] = int(resp_headers.get("content-length", 0)) or http_data.get("length", None) + except (ValueError, TypeError): + hlog["length"] = http_data.get("length", None) + hlog["protocol"] = "HTTP/2" + else: + # HTTP/1.x: extract fields from top-level keys + keyword = ("uri", "length", "hostname", "status", "http_method", "contenttype", "ua", "referrer") + keyword_suri = ( + "url", + "length", + "hostname", + "status", + "http_method", + "http_content_type", + "http_user_agent", + "http_refer", + ) + for key, key_s in zip(keyword, keyword_suri): + try: + hlog[key] = http_data.get(key_s, None) + except Exception: + hlog[key] = None suricata["http"].append(hlog) elif parsed["event_type"] == "tls": diff --git a/utils/rooter.py b/utils/rooter.py index a87c7115a09..45f1cd018e8 100644 --- a/utils/rooter.py +++ b/utils/rooter.py @@ -124,6 +124,19 @@ def run_iptables(*args, **kwargs): return run(*iptables_args) +# SSLproxy TPROXY/NFQUEUE rules must use iptables-legacy because the nftables +# compat layer does not correctly propagate NFQUEUE verdict marks within the +# same mangle chain traversal. The legacy xtables backend handles this correctly. +IPTABLES_LEGACY = "/usr/sbin/iptables-legacy" + + +def run_iptables_legacy(*args): + iptables_args = [IPTABLES_LEGACY] + iptables_args.extend(list(args)) + iptables_args.extend(["-m", "comment", "--comment", "CAPE-rooter"]) + return run(*iptables_args) + + def cleanup_rooter(): """Filter out all CAPE rooter entries from iptables-save and restore the resulting ruleset.""" @@ -149,6 +162,23 @@ def cleanup_rooter(): run_iptables("-I", "FORWARD", "-j", "CAPE_REJECTED_SEGMENTS") run_iptables("-I", "FORWARD", "-j", "CAPE_ACCEPTED_SEGMENTS") + # Clean up any leftover SSLproxy iptables-legacy mangle rules tagged with CAPE-rooter. + # Only remove CAPE rules, not unrelated host firewall rules. + if os.path.isfile(IPTABLES_LEGACY): + for chain in ("FORWARD", "PREROUTING", "POSTROUTING"): + try: + output = subprocess.check_output( + [IPTABLES_LEGACY, "-t", "mangle", "-S", chain], + stderr=subprocess.DEVNULL, + universal_newlines=True, + ) + for line in reversed(output.strip().splitlines()): + if "CAPE-rooter" in line and line.startswith("-A "): + delete_args = line.replace("-A ", "-D ", 1).split() + run(IPTABLES_LEGACY, "-t", "mangle", *delete_args) + except (subprocess.CalledProcessError, OSError): + pass + def nic_available(interface): """Check if specified network interface is available.""" @@ -977,6 +1007,72 @@ def drop_disable(ipaddr, resultserver_port): run_iptables("-D", "OUTPUT", "--destination", ipaddr, "-j", "DROP") +def sslproxy_enable(interface, client, proxy_port, resultserver_port, rt_table="", fwmark=""): + """Enable SSLproxy interception for a specific VM. + + NAT REDIRECT sends all VM TCP (except ResultServer) to SSLproxy. + Per-analysis fwmark + ip rule routes SSLproxy upstream through the correct VPN. + """ + log.info("Enabling SSLproxy for client %s (port=%s, fwmark=%s)", client, proxy_port, fwmark) + + # Exclude ResultServer traffic + run_iptables("-t", "nat", "-I", "PREROUTING", "1", + "-i", interface, "--source", client, "-p", "tcp", + "--dport", resultserver_port, "-j", "ACCEPT") + + # Redirect all other TCP from this VM to SSLproxy autossl listener + run_iptables("-t", "nat", "-I", "PREROUTING", "2", + "-i", interface, "--source", client, "-p", "tcp", + "-j", "REDIRECT", "--to", proxy_port) + + # Accept connections to the SSLproxy listener port + run_iptables("-A", "INPUT", "-i", interface, "-p", "tcp", + "--dport", proxy_port, "-m", "state", "--state", "NEW", "-j", "ACCEPT") + + # Per-analysis cgroup + fwmark for upstream VPN routing + cgroup_path = f"sslproxy/{client}" + cgroup_dir = f"/sys/fs/cgroup/{cgroup_path}" + run("mkdir", "-p", cgroup_dir) + if rt_table and fwmark: + mark_hex = f"0x{int(fwmark):x}" + run_iptables("-t", "mangle", "-A", "OUTPUT", + "-m", "cgroup", "--path", cgroup_path, + "-p", "tcp", "-j", "MARK", "--set-mark", mark_hex) + run(ServicePaths.ip, "rule", "add", "fwmark", fwmark, "lookup", rt_table, "priority", "32750") + log.info("SSLproxy upstream routing: cgroup %s → fwmark %s → table %s", cgroup_path, fwmark, rt_table) + + +def sslproxy_disable(interface, client, proxy_port, resultserver_port, rt_table="", fwmark=""): + """Disable SSLproxy interception for a specific VM.""" + log.info("Disabling SSLproxy for client %s (fwmark=%s)", client, fwmark) + + # Remove ResultServer exclusion + run_iptables("-t", "nat", "-D", "PREROUTING", + "-i", interface, "--source", client, "-p", "tcp", + "--dport", resultserver_port, "-j", "ACCEPT") + + # Remove NAT REDIRECT + run_iptables("-t", "nat", "-D", "PREROUTING", + "-i", interface, "--source", client, "-p", "tcp", + "-j", "REDIRECT", "--to", proxy_port) + + # Remove INPUT accept + run_iptables("-D", "INPUT", "-i", interface, "-p", "tcp", + "--dport", proxy_port, "-m", "state", "--state", "NEW", "-j", "ACCEPT") + + # Remove cgroup and routing rules + cgroup_path = f"sslproxy/{client}" + cgroup_dir = f"/sys/fs/cgroup/{cgroup_path}" + if rt_table and fwmark: + mark_hex = f"0x{int(fwmark):x}" + run_iptables("-t", "mangle", "-D", "OUTPUT", + "-m", "cgroup", "--path", cgroup_path, + "-p", "tcp", "-j", "MARK", "--set-mark", mark_hex) + run(ServicePaths.ip, "rule", "del", "fwmark", fwmark, "lookup", rt_table, "priority", "32750") + run("rmdir", cgroup_dir) + + + handlers = { "nic_available": nic_available, "rt_available": rt_available, @@ -1015,6 +1111,8 @@ def drop_disable(ipaddr, resultserver_port): "polarproxy_disable": polarproxy_disable, "libvirt_fwo_enable": libvirt_fwo_enable, "libvirt_fwo_disable": libvirt_fwo_disable, + "sslproxy_enable": sslproxy_enable, + "sslproxy_disable": sslproxy_disable, } if __name__ == "__main__": diff --git a/web/analysis/views.py b/web/analysis/views.py index 7fcd99cde34..29bc58f2b98 100644 --- a/web/analysis/views.py +++ b/web/analysis/views.py @@ -980,6 +980,9 @@ def load_files(request, task_id, category): decrypted_pcap_path = os.path.join(ANALYSIS_BASE_PATH, "analyses", str(task_id), "dump_decrypted.pcap") if _path_safe(decrypted_pcap_path): ajax_response["decrypted_pcap_exists"] = True + mixed_pcap_path = os.path.join(ANALYSIS_BASE_PATH, "analyses", str(task_id), "dump_mixed.pcap") + if _path_safe(mixed_pcap_path): + ajax_response["mixed_pcap_exists"] = True elif category == "behavior": ajax_response["detections2pid"] = data.get("detections2pid", {}) return render(request, page, ajax_response) @@ -1263,9 +1266,10 @@ def gen_moloch_from_suri_http(suricata): + "?date=-1&expression=http.user-agent" + quote("\x3d\x3d\x22%s\x22" % (e["ua"].encode()), safe="") ) - if e.get("method"): + http_method = e.get("http_method") or e.get("method") + if http_method: e["moloch_http_method_url"] = ( - settings.MOLOCH_BASE + "?date=-1&expression=http.method" + quote("\x3d\x3d\x22%s\x22" % (e["method"]), safe="") + settings.MOLOCH_BASE + "?date=-1&expression=http.method" + quote("\x3d\x3d\x22%s\x22" % (http_method), safe="") ) return suricata @@ -2136,10 +2140,23 @@ def file(request, category, task_id, dlfile): elif category.startswith("memdumpzip"): path = os.path.join(CUCKOO_ROOT, "storage", "analyses", task_id, "memory", file_name + ".dmp") file_name += ".dmp" - elif category in ("pcap", "pcapzip"): + elif category == "pcap": file_name += ".pcap" path = os.path.join(CUCKOO_ROOT, "storage", "analyses", task_id, "dump.pcap") cd = "application/vnd.tcpdump.pcap" + elif category == "pcapzip": + analysis_dir = os.path.join(CUCKOO_ROOT, "storage", "analyses", task_id) + pcap_files = [ + ("dump.pcap", os.path.join(analysis_dir, "dump.pcap")), + ("dump_decrypted.pcap", os.path.join(analysis_dir, "dump_decrypted.pcap")), + ("dump_mixed.pcap", os.path.join(analysis_dir, "dump_mixed.pcap")), + ("sslproxy.pcap", os.path.join(analysis_dir, "sslproxy", "sslproxy.pcap")), + ("sslproxy_clean.pcap", os.path.join(analysis_dir, "sslproxy", "sslproxy_clean.pcap")), + ] + path = [p for _, p in pcap_files if path_exists(p) and os.path.getsize(p) > 0] + if not path: + path = os.path.join(analysis_dir, "dump.pcap") + cd = "application/zip" elif category == "pcapng": analysis_path = os.path.join(CUCKOO_ROOT, "storage", "analyses", task_id) pcap_path = os.path.join(analysis_path, "dump.pcap") @@ -2154,6 +2171,10 @@ def file(request, category, task_id, dlfile): path = os.path.join(CUCKOO_ROOT, "storage", "analyses", task_id, "dump_decrypted.pcap") file_name += ".pcap" cd = "application/vnd.tcpdump.pcap" + elif category == "mixed_pcap": + path = os.path.join(CUCKOO_ROOT, "storage", "analyses", task_id, "dump_mixed.pcap") + file_name += ".pcap" + cd = "application/vnd.tcpdump.pcap" elif category == "debugger_log": path = os.path.join(CUCKOO_ROOT, "storage", "analyses", task_id, "debugger", str(dlfile) + ".log") elif category == "rtf": diff --git a/web/templates/analysis/network/_suricata_http.html b/web/templates/analysis/network/_suricata_http.html index 2f7d5f3cdde..3244be161d7 100644 --- a/web/templates/analysis/network/_suricata_http.html +++ b/web/templates/analysis/network/_suricata_http.html @@ -58,7 +58,7 @@
Suricata [MLCH] {% endif %} - {{http.method}} + {{http.http_method}} {% if http.moloch_http_method_url %} [MLCH] {% endif %} diff --git a/web/templates/analysis/network/index.html b/web/templates/analysis/network/index.html index 8d2e64c6a63..c8dd65abad5 100644 --- a/web/templates/analysis/network/index.html +++ b/web/templates/analysis/network/index.html @@ -12,6 +12,12 @@ {% if mitmdump_exists %} Mitmdump {% endif %} + {% if decrypted_pcap_exists %} + Decrypted PCAP + {% endif %} + {% if mixed_pcap_exists %} + Mixed PCAP + {% endif %} {% endif %}