diff --git a/analyzer/windows/analyzer.py b/analyzer/windows/analyzer.py
index 6d8ce888b8a..057f1c801a1 100644
--- a/analyzer/windows/analyzer.py
+++ b/analyzer/windows/analyzer.py
@@ -527,8 +527,19 @@ def configure_aux_from_data(instance):
# Walk through the available auxiliary modules.
aux_modules = []
- for module in sorted(Auxiliary.__subclasses__(), key=lambda x: x.start_priority, reverse=True):
+ def get_all_subclasses(cls):
+ all_subclasses = []
+ for subclass in cls.__subclasses__():
+ all_subclasses.append(subclass)
+ all_subclasses.extend(get_all_subclasses(subclass))
+ return all_subclasses
+
+ for module in sorted(get_all_subclasses(Auxiliary), key=lambda x: x.start_priority, reverse=True):
try:
+ # this is not a real module, ignore it
+ if module.__name__ == "ETWAuxiliaryWrapper":
+ continue
+
aux = module(self.options, self.config)
log.debug('Initialized auxiliary module "%s"', module.__name__)
aux_modules.append(aux)
diff --git a/analyzer/windows/lib/common/etw_utils.py b/analyzer/windows/lib/common/etw_utils.py
new file mode 100644
index 00000000000..484f001880b
--- /dev/null
+++ b/analyzer/windows/lib/common/etw_utils.py
@@ -0,0 +1,139 @@
+import json
+import logging
+import pprint
+from collections.abc import Iterable, Mapping
+
+from lib.common.abstracts import Auxiliary
+from lib.core.config import Config
+
+log = logging.getLogger(__name__)
+
+ETW = False
+HAVE_ETW = False
+try:
+ from etw import ETW, ProviderInfo # noqa: F401
+ from etw import evntrace as et # noqa: F401
+ from etw.GUID import GUID # noqa: F401
+
+ HAVE_ETW = True
+except ImportError as e:
+ ETW_IMPORT_ERROR = str(e)
+else:
+ ETW_IMPORT_ERROR = None
+
+
+def encode(data, encoding="utf-8"):
+ if isinstance(data, str):
+ return data.encode(encoding, "ignore")
+ elif isinstance(data, Mapping):
+ return dict(map(lambda x: encode(x, encoding=encoding), data.items()))
+ elif isinstance(data, Iterable):
+ return type(data)(map(lambda x: encode(x, encoding=encoding), data))
+ else:
+ return data
+
+
+class ETWProviderWrapper(ETW if HAVE_ETW else object):
+ def __init__(
+ self,
+ session_name,
+ providers,
+ event_id_filters=None,
+ ring_buf_size=1024,
+ max_str_len=1024,
+ min_buffers=0,
+ max_buffers=0,
+ filters=None,
+ event_callback=None,
+ logfile=None,
+ no_conout=False,
+ ):
+ if not HAVE_ETW:
+ return
+
+ self.logfile = logfile
+ self.no_conout = no_conout
+ self.event_callback = event_callback or self.on_event
+ self.event_id_filters = event_id_filters or []
+
+ super().__init__(
+ session_name=session_name,
+ ring_buf_size=ring_buf_size,
+ max_str_len=max_str_len,
+ min_buffers=min_buffers,
+ max_buffers=max_buffers,
+ event_callback=self.event_callback,
+ task_name_filters=filters,
+ providers=providers,
+ event_id_filters=self.event_id_filters,
+ )
+
+ def on_event(self, event_tufo):
+ event_id, event = event_tufo
+
+ if self.event_id_filters and event_id not in self.event_id_filters:
+ return
+
+ if not self.no_conout:
+ log.info("%d (%s)\n%s\n", event_id, event.get("Task Name", ""), pprint.pformat(encode(event)))
+
+ if self.logfile:
+ self.write_to_log(self.logfile, event_id, event)
+
+ def write_to_log(self, file_handle, event_id, event):
+ json.dump({"event_id": event_id, "event": event}, file_handle)
+ file_handle.write("\n")
+
+ def start(self):
+ if HAVE_ETW:
+ self.do_capture_setup()
+ super().start()
+
+ def stop(self):
+ if HAVE_ETW:
+ super().stop()
+ self.do_capture_teardown()
+
+ def do_capture_setup(self):
+ pass
+
+ def do_capture_teardown(self):
+ pass
+
+
+class ETWAuxiliaryWrapper(Auxiliary):
+ def __init__(self, options, config, enabled_attr):
+ Auxiliary.__init__(self, options, config)
+ self.config = Config(cfg="analysis.conf")
+ self.enabled = getattr(self.config, enabled_attr, False)
+ self.do_run = self.enabled
+ self.capture = None
+
+ if not HAVE_ETW:
+ log.debug(
+ "Could not load auxiliary module %s due to '%s'\n"
+ "In order to use ETW functionality, it is required to have pywintrace setup in python",
+ self.__class__.__name__,
+ ETW_IMPORT_ERROR,
+ )
+
+ def start(self):
+ if not self.enabled or not HAVE_ETW:
+ return False
+ try:
+ log.debug("Starting %s", self.__class__.__name__)
+ if self.capture:
+ self.capture.start()
+ except Exception as e:
+ log.exception("Error starting %s: %s", self.__class__.__name__, e)
+ return True
+
+ def stop(self):
+ if not HAVE_ETW or not self.capture:
+ return
+ log.debug("Stopping %s...", self.__class__.__name__)
+ self.capture.stop()
+ self.upload_results()
+
+ def upload_results(self):
+ pass
diff --git a/analyzer/windows/modules/auxiliary/amsi_etw.py b/analyzer/windows/modules/auxiliary/amsi_etw.py
index 684e2cbcca7..2a3dd87ecfd 100644
--- a/analyzer/windows/modules/auxiliary/amsi_etw.py
+++ b/analyzer/windows/modules/auxiliary/amsi_etw.py
@@ -1,73 +1,36 @@
-"""
-This module captures AMSI events via ETW, uploading script contents (powershell, WMI, macros, etc)
-to aux/amsi_etw and saving trace details to be reported by the amsi_etw processing module.
-
-It is a reimplementation of the SecureWorks amsi_collector and amsi modules, adapted to
-use the CCCS event tracing module format.
-
-Installation of the pywintrace python library on the guest is mandatory.
-Setting the option 'amsi_etw_assemblies=1' during tasking will cause full CLR assemblies
-to be collected as well.
-"""
+import binascii
import json
import logging
import os
import tempfile
-import binascii
-from lib.common.abstracts import Auxiliary
from lib.common.results import upload_buffer_to_host, upload_to_host
-from lib.core.config import Config
+from lib.common.etw_utils import (
+ ETWAuxiliaryWrapper,
+ ETWProviderWrapper,
+ HAVE_ETW,
+ ProviderInfo,
+ GUID,
+)
log = logging.getLogger(__name__)
-ETW = False
-HAVE_ETW = False
-try:
- from etw import ETW, ProviderInfo
- from etw.GUID import GUID
-
- HAVE_ETW = True
-except ImportError as e:
- log.debug(
- "Could not load auxiliary module AMSI_ETW due to '%s'\nIn order to use AMSI_ETW functionality, it "
- "is required to have pywintrace setup in python", str(e)
- )
if HAVE_ETW:
- class ETW_provider(ETW):
+ class AMSIETWProvider(ETWProviderWrapper):
def __init__(
self,
+ logfile=None,
+ upload_prefix="aux/amsi_etw",
+ upload_assemblies=False,
ring_buf_size=1024,
max_str_len=1024,
min_buffers=0,
max_buffers=0,
filters=None,
- event_callback=None,
- logfile=None,
- upload_prefix="aux/amsi_etw",
- upload_assemblies=False
):
- """
- Initializes an instance of AMSI_ETW. The default parameters represent a very typical use case and should not be
- overridden unless the user knows what they are doing.
-
- :param ring_buf_size: The size of the ring buffer used for capturing events.
- :param max_str_len: The maximum length of the strings the proceed the structure.
- Unless you know what you are doing, do not modify this value.
- :param min_buffers: The minimum number of buffers for an event tracing session.
- Unless you know what you are doing, do not modify this value.
- :param max_buffers: The maximum number of buffers for an event tracing session.
- Unless you know what you are doing, do not modify this value.
- :param filters: List of filters to apply to capture.
- :param logfile: Path to logfile.
- :param upload_prefix: Path to upload results to. Must be approved in resultserver.py.
- :param upload_assemblies: Whether to also upload the content of dotnet assemblies.
- """
self.upload_prefix = upload_prefix
- self.log_file = logfile
- self.event_callback = self.on_event
self.upload_assemblies = upload_assemblies
providers = [
@@ -79,119 +42,91 @@ def __init__(
all_keywords=None,
)
]
- self.event_id_filters = [1101]
+
super().__init__(
session_name="ETW_AMSI",
+ providers=providers,
+ event_id_filters=[1101],
ring_buf_size=ring_buf_size,
max_str_len=max_str_len,
min_buffers=min_buffers,
max_buffers=max_buffers,
- event_callback=self.event_callback,
- task_name_filters=filters,
- providers=providers,
- event_id_filters=self.event_id_filters,
+ filters=filters,
+ logfile=logfile,
)
def on_event(self, event_tufo):
- """
- Starts the capture using ETW.
- :param event_tufo: tufo containing event information
- :param logfile: Path to logfile.
- :return: Does not return anything.
- """
event_id, event = event_tufo
content = event.pop("content", None)
if content:
dump_path = f"{self.upload_prefix}/{event['hash'][2:].lower()}"
- decoded_content = binascii.unhexlify(content[2:])
- if event.get("appname", "") in ("DotNet", "coreclr"):
- # The content is the full in-memory .NET assembly PE.
- if self.upload_assemblies:
- event['dump_path'] = dump_path+".bin"
- upload_buffer_to_host(decoded_content, event['dump_path'])
+ try:
+ decoded_content = binascii.unhexlify(content[2:])
+ if event.get("appname", "") in ("DotNet", "coreclr"):
+ # The content is the full in-memory .NET assembly PE.
+ if self.upload_assemblies:
+ event["dump_path"] = dump_path + ".bin"
+ upload_buffer_to_host(decoded_content, event["dump_path"])
+ else:
+ log.debug(
+ "Skipping upload of %d byte CLR assembly - amsi_etw_assemblies option was not set",
+ len(decoded_content),
+ )
else:
- log.debug("Skipping upload of %d byte CLR assembly - amsi_etw_assemblies option was not set", len(decoded_content))
- else:
- # The content is UTF-16 encoded text. We'll store it as utf-8, just like all other text files.
- decoded_content = decoded_content.decode("utf-16", errors="replace").encode("utf-8")
- event['dump_path'] = dump_path+".txt"
- upload_buffer_to_host(decoded_content, event['dump_path'])
-
- if self.log_file:
+ # The content is UTF-16 encoded text. We'll store it as utf-8, just like all other text files.
+ decoded_content = decoded_content.decode(
+ "utf-16", errors="replace"
+ ).encode("utf-8")
+ event["dump_path"] = dump_path + ".txt"
+ upload_buffer_to_host(decoded_content, event["dump_path"])
+ except Exception as e:
+ log.error("Error processing AMSI event content: %s", e)
+
+ if self.logfile:
# Write the event metadata as a line in the jsonl log file.
- json.dump(event, self.log_file)
- self.log_file.write("\n")
-
- def start(self):
- # do pre-capture setup
- self.do_capture_setup()
- super().start()
-
- def stop(self):
- super().stop()
- # do post-capture teardown
- self.do_capture_teardown()
-
- def do_capture_setup(self):
- # do whatever setup for capture here
- pass
-
- def do_capture_teardown(self):
- # do whatever for capture teardown here
- pass
-
- class AMSI_ETW(Auxiliary):
- """ETW logging"""
-
- def __init__(self, options, config):
- Auxiliary.__init__(self, options, config)
-
- self.config = Config(cfg="analysis.conf")
- self.enabled = self.config.amsi_etw
- self.do_run = self.enabled
- self.upload_prefix = "aux/amsi_etw"
- self.upload_assemblies = options.get("amsi_etw_assemblies", False)
- if self.upload_assemblies:
- log.debug("Will upload Dotnet assembly content")
+ json.dump(event, self.logfile)
+ self.logfile.write("\n")
+
+
+class AMSI_ETW(ETWAuxiliaryWrapper):
+ """ETW logging"""
+
+ def __init__(self, options, config):
+ super().__init__(options, config, "amsi_etw")
+
+ self.upload_prefix = "aux/amsi_etw"
+ self.upload_assemblies = options.get("amsi_etw_assemblies", False)
+ if self.upload_assemblies:
+ log.debug("Will upload Dotnet assembly content")
+ else:
+ log.debug("Will discard Dotnet assembly content")
+
+ if HAVE_ETW and self.enabled:
+ self.log_file = tempfile.NamedTemporaryFile(
+ "w", encoding="utf-8", delete=False
+ )
+ self.capture = AMSIETWProvider(
+ logfile=self.log_file,
+ upload_prefix=self.upload_prefix,
+ upload_assemblies=self.upload_assemblies,
+ )
+
+ def upload_results(self):
+ """Upload the file that contains the metadata for all of the events."""
+ if not self.log_file or not os.path.exists(self.log_file.name):
+ log.debug("No logfile to upload")
+ return
+ self.log_file.close()
+
+ try:
+ if os.stat(self.log_file.name).st_size > 0:
+ upload_to_host(self.log_file.name, f"{self.upload_prefix}/amsi.jsonl")
else:
- log.debug("Will discard Dotnet assembly content")
-
- if HAVE_ETW:
- self.log_file = tempfile.NamedTemporaryFile("w", encoding="utf-8", delete=False)
- self.capture = ETW_provider(logfile=self.log_file, upload_prefix=self.upload_prefix,
- upload_assemblies=self.upload_assemblies)
-
- def start(self):
- if not self.enabled or not HAVE_ETW:
- return False
- try:
- log.debug("Starting AMSI ETW")
- # Start AMSI_ETW_provider in the background
- self.capture.start()
- except Exception as e:
- log.exception("An error occurred while starting AMSI ETW: %s", e)
- return True
-
- def stop(self):
- if not HAVE_ETW:
- return
- log.debug("Stopping AMSI_ETW...")
- self.capture.stop()
-
- """Upload the file that contains the metadata for all of the events."""
- if not self.log_file or not os.path.exists(self.log_file.name):
- log.debug("No logfile to upload")
- return
- self.log_file.close()
-
- try:
- if os.stat(self.log_file.name).st_size > 0:
- upload_to_host(self.log_file.name, f"{self.upload_prefix}/amsi.jsonl")
- else:
- log.debug("No AMSI events were collected.")
- except Exception:
- log.exception("Exception was raised while uploading amsi.jsonl")
- raise
- finally:
+ log.debug("No AMSI events were collected.")
+ except Exception:
+ log.exception("Exception was raised while uploading amsi.jsonl")
+ raise
+ finally:
+ if self.log_file and os.path.exists(self.log_file.name):
os.unlink(self.log_file.name)
- self.log_file = None
+ self.log_file = None
diff --git a/analyzer/windows/modules/auxiliary/dns_etw.py b/analyzer/windows/modules/auxiliary/dns_etw.py
index e86a79a4e84..713fb51f61b 100644
--- a/analyzer/windows/modules/auxiliary/dns_etw.py
+++ b/analyzer/windows/modules/auxiliary/dns_etw.py
@@ -2,88 +2,37 @@
import logging
import os
import pprint
-from collections.abc import Iterable, Mapping
-from lib.common.abstracts import Auxiliary
from lib.common.results import upload_to_host
-from lib.core.config import Config
+from lib.common.rand import random_string
+from lib.common.etw_utils import (
+ ETWAuxiliaryWrapper,
+ ETWProviderWrapper,
+ HAVE_ETW,
+ ProviderInfo,
+ GUID,
+ et,
+ encode,
+)
log = logging.getLogger(__name__)
SAFELIST = []
-ETW = False
-HAVE_ETW = False
-try:
- from etw import ETW, ProviderInfo
- from etw import evntrace as et
- from etw.GUID import GUID
-
- HAVE_ETW = True
-except ImportError as e:
- log.debug(
- "Could not load auxiliary module DNS_ETW due to '%s'\nIn order to use DNS_ETW functionality, it "
- "is required to have pywintrace setup in python", str(e)
- )
-
__author__ = "[Canadian Centre for Cyber Security] @CybercentreCanada"
-def encode(data, encoding="utf-8"):
- if isinstance(data, str):
- return data.encode(encoding, "ignore")
- elif isinstance(data, Mapping):
- return dict(map(encode, data.items()))
- elif isinstance(data, Iterable):
- return type(data)(map(encode, data))
- else:
- return data
-
-
if HAVE_ETW:
- class ETW_provider(ETW):
+ class DNSETWProvider(ETWProviderWrapper):
def __init__(
self,
- ring_buf_size=1024,
- max_str_len=1024,
- min_buffers=0,
- max_buffers=0,
level=et.TRACE_LEVEL_INFORMATION,
- any_keywords=None,
- all_keywords=None,
- filters=None,
- event_callback=None,
logfile=None,
no_conout=False,
+ any_keywords=None,
+ all_keywords=None,
):
- """
- Initializes an instance of DNS_ETW. The default parameters represent a very typical use case and should not be
- overridden unless the user knows what they are doing.
-
- :param ring_buf_size: The size of the ring buffer used for capturing events.
- :param max_str_len: The maximum length of the strings the proceed the structure.
- Unless you know what you are doing, do not modify this value.
- :param min_buffers: The minimum number of buffers for an event tracing session.
- Unless you know what you are doing, do not modify this value.
- :param max_buffers: The maximum number of buffers for an event tracing session.
- Unless you know what you are doing, do not modify this value.
- :param level: Logging level
- :param any_keywords: List of keywords to match
- :param all_keywords: List of keywords that all must match
- :param filters: List of filters to apply to capture.
- :param event_callback: Callback for processing events
- :param logfile: Path to logfile.
- :param no_conout: If true does not output live capture to console.
- """
-
- self.logfile = logfile
- self.no_conout = no_conout
- if event_callback:
- self.event_callback = event_callback
- else:
- self.event_callback = self.on_event
-
providers = [
ProviderInfo(
"Microsoft-Windows-DNS-Client",
@@ -93,127 +42,94 @@ def __init__(
all_keywords,
)
]
- self.event_id_filters = [3010, 3020, 60101]
super().__init__(
session_name="ETW_DNS",
- ring_buf_size=ring_buf_size,
- max_str_len=max_str_len,
- min_buffers=min_buffers,
- max_buffers=max_buffers,
- event_callback=self.event_callback,
- task_name_filters=filters,
providers=providers,
- event_id_filters=self.event_id_filters,
+ event_id_filters=[3010, 3020, 60101],
+ logfile=logfile,
+ no_conout=no_conout,
)
def on_event(self, event_tufo):
- """
- Starts the capture using ETW.
- :param event_tufo: tufo containing event information
- :param logfile: Path to logfile.
- :param no_conout: If true does not output live capture to console.
- :return: Does not return anything.
- """
+ # We override on_event because of the specific filtering and SAFELIST check
event_id, event = event_tufo
- # We can filter events based on whatever criteria here in event_tufo/event/event_id
+
if event_id not in self.event_id_filters:
return
- if self.no_conout is False:
- log.info("%d (%s)\n%s\n", event_id, event["Task Name"], pprint.pformat(encode(event)))
- if event["QueryName"] in SAFELIST:
+
+ if not self.no_conout:
+ log.info(
+ "%d (%s)\n%s\n",
+ event_id,
+ event.get("Task Name", ""),
+ pprint.pformat(encode(event)),
+ )
+
+ if event.get("QueryName") in SAFELIST:
return
- # Event 3010 query
- # Pid --> event["EventHeader"]["ProcessId"]
- # threadid --> event["EventHeader"]["ThreadId"]
- # queryname --> event["QueryName"]
- # dnsserveraddress --> event["DnsServerIpAddress"]
- # Event 3020 response
- # Pid --> event["EventHeader"]["ProcessId"]
- # threadid --> event["EventHeader"]["ThreadId"]
- # queryname --> event["QueryName"]
- if self.logfile is not None:
- with open(self.logfile, "a") as file:
- if event_id == 3010:
- printed_events = {
- "QueryType": "Query",
- "ProcessId": event["EventHeader"]["ProcessId"],
- "ThreadId": event["EventHeader"]["ThreadId"],
- "QueryName": event["QueryName"],
- "DNS Server": event["DnsServerIpAddress"],
- }
- json.dump(printed_events, file)
- file.write("\n")
- elif event_id == 3020:
- printed_events = {
- "QueryType": "Response",
- "ProcessId": event["EventHeader"]["ProcessId"],
- "ThreadId": event["EventHeader"]["ThreadId"],
- "QueryName": event["QueryName"],
- }
- json.dump(printed_events, file)
- file.write("\n")
- else:
- json.dump(event, file)
- file.write("\n")
-
- def start(self):
- # do pre-capture setup
- self.do_capture_setup()
- super().start()
-
- def stop(self):
- super().stop()
- # do post-capture teardown
- self.do_capture_teardown()
-
- def do_capture_setup(self):
- # do whatever setup for capture here
- pass
- def do_capture_teardown(self):
- # do whatever for capture teardown here
- pass
+ if self.logfile:
+ self.write_to_log(self.logfile, event_id, event)
+
+ def write_to_log(self, file_handle, event_id, event):
+ if event_id == 3010:
+ printed_events = {
+ "QueryType": "Query",
+ "ProcessId": event["EventHeader"]["ProcessId"],
+ "ThreadId": event["EventHeader"]["ThreadId"],
+ "QueryName": event["QueryName"],
+ "DNS Server": event["DnsServerIpAddress"],
+ }
+ json.dump(printed_events, file_handle)
+ file_handle.write("\n")
+ elif event_id == 3020:
+ printed_events = {
+ "QueryType": "Response",
+ "ProcessId": event["EventHeader"]["ProcessId"],
+ "ThreadId": event["EventHeader"]["ThreadId"],
+ "QueryName": event["QueryName"],
+ }
+ json.dump(printed_events, file_handle)
+ file_handle.write("\n")
+ else:
+ json.dump(event, file_handle)
+ file_handle.write("\n")
- class DNS_ETW(Auxiliary):
- """ETW logging"""
- def __init__(self, options, config):
- Auxiliary.__init__(self, options, config)
- self.config = Config(cfg="analysis.conf")
- self.enabled = self.config.dns_etw
- self.do_run = self.enabled
+class DNS_ETW(ETWAuxiliaryWrapper):
+ """ETW logging"""
- self.output_dir = "C:\\etw_dns\\"
- try:
- os.mkdir(self.output_dir)
- except FileExistsError:
- pass
+ def __init__(self, options, config):
+ super().__init__(options, config, "dns_etw")
+
+ self.output_dir = os.path.join("C:\\", random_string(5, 10))
+ try:
+ os.mkdir(self.output_dir)
+ except FileExistsError:
+ pass
- self.log_file = os.path.join(self.output_dir, "dns_provider.log")
- if HAVE_ETW:
- self.capture = ETW_provider(logfile=self.log_file, level=255, no_conout=True)
+ log_file_path = os.path.join(self.output_dir, f"{random_string(5, 10)}.log")
+ self.log_file = None
- def start(self):
- if not self.enabled or not HAVE_ETW:
- return False
+ if HAVE_ETW and self.enabled:
try:
- log.debug("Starting DNS ETW")
- # Start DNS_ETW_provider in the background
- self.capture.start()
+ self.log_file = open(log_file_path, "w", encoding="utf-8")
+ self.capture = DNSETWProvider(
+ logfile=self.log_file, level=255, no_conout=True
+ )
except Exception as e:
- print(e)
- import traceback
-
- log.exception(traceback.format_exc())
- return True
+ log.error("Failed to open DNS ETW log file: %s", e)
- def stop(self):
- if not HAVE_ETW:
- return
- log.debug("Stopping DNS_ETW...")
- self.capture.stop()
- files_to_upload = set()
+ def upload_results(self):
+ if self.log_file:
+ try:
+ self.log_file.close()
+ except Exception as e:
+ log.error("Failed to close DNS ETW log file: %s", e)
+ self.log_file = None
+ files_to_upload = set()
+ if os.path.exists(self.output_dir):
for d in os.listdir(self.output_dir):
path = os.path.join(self.output_dir, d)
if os.path.isfile(path):
@@ -222,15 +138,9 @@ def stop(self):
for f in os.listdir(path):
file_path = os.path.join(path, f)
files_to_upload.add(file_path)
- continue
-
- # Upload the ETW log files to the host.
- log.debug(files_to_upload)
- for f in files_to_upload:
- # Prepend file name with etw to indicate DNS_ETW
- # file_path_list = f.split("\\")
- # file_name = file_path_list[-1]
- # process = file_path_list[-2]
- dumppath = os.path.join("DNS_ETW", "etw_dns.json")
- log.debug("DNS_ETW Aux Module is uploading %s", f)
- upload_to_host(f, dumppath)
+
+ log.debug(files_to_upload)
+ for f in files_to_upload:
+ dumppath = os.path.join("aux", "dns_etw.json")
+ log.debug("DNS_ETW Aux Module is uploading %s", f)
+ upload_to_host(f, dumppath)
diff --git a/analyzer/windows/modules/auxiliary/wmi_etw.py b/analyzer/windows/modules/auxiliary/wmi_etw.py
index cbd2f5e379e..c96ce32db1f 100644
--- a/analyzer/windows/modules/auxiliary/wmi_etw.py
+++ b/analyzer/windows/modules/auxiliary/wmi_etw.py
@@ -1,179 +1,82 @@
-import json
import logging
import os
-import pprint
-from collections.abc import Iterable, Mapping
-from lib.common.abstracts import Auxiliary
from lib.common.results import upload_to_host
-from lib.core.config import Config
+from lib.common.rand import random_string
+from lib.common.etw_utils import (
+ ETWAuxiliaryWrapper,
+ ETWProviderWrapper,
+ HAVE_ETW,
+ ProviderInfo,
+ GUID,
+ et,
+)
log = logging.getLogger(__name__)
-SAFELIST = []
-
-ETW = False
-HAVE_ETW = False
-try:
- from etw import ETW, ProviderInfo
- from etw import evntrace as et
- from etw.GUID import GUID
-
- HAVE_ETW = True
-except ImportError as e:
- log.debug(
- "Could not load auxiliary module WMI_ETW due to '%s'\nIn order to use WMI_ETW functionality, it "
- "is required to have pywintrace setup in python", str(e)
- )
-
__author__ = "[Andrea Oliveri starting from code of Canadian Centre for Cyber Security]"
-def encode(data, encoding="utf-8"):
- if isinstance(data, str):
- return data.encode(encoding, "ignore")
- elif isinstance(data, Mapping):
- return dict(map(encode, data.items()))
- elif isinstance(data, Iterable):
- return type(data)(map(encode, data))
- else:
- return data
-
-
if HAVE_ETW:
- class ETW_provider(ETW):
-
+ class WMIETWProvider(ETWProviderWrapper):
def __init__(
self,
- ring_buf_size=4096,
- max_str_len=4096,
- min_buffers=0,
- max_buffers=0,
- level=et.TRACE_LEVEL_INFORMATION, # If >= 5 print more useless (?) stuff
- any_keywords=None,
- all_keywords=None,
- filters=None,
- event_callback=None,
+ level=et.TRACE_LEVEL_INFORMATION,
logfile=None,
no_conout=False,
):
- """
- Initializes an instance of WMI_ETW. The default parameters represent a very typical use case and should not be
- overridden unless the user knows what they are doing.
-
- :param ring_buf_size: The size of the ring buffer used for capturing events.
- :param max_str_len: The maximum length of the strings the proceed the structure.
- Unless you know what you are doing, do not modify this value.
- :param min_buffers: The minimum number of buffers for an event tracing session.
- Unless you know what you are doing, do not modify this value.
- :param max_buffers: The maximum number of buffers for an event tracing session.
- Unless you know what you are doing, do not modify this value.
- :param level: Logging level
- :param any_keywords: List of keywords to match
- :param all_keywords: List of keywords that all must match
- :param filters: List of filters to apply to capture.
- :param event_callback: Callback for processing events
- :param logfile: Path to logfile.
- :param no_conout: If true does not output live capture to console.
- """
-
- self.logfile = logfile
- self.no_conout = no_conout
- if event_callback:
- self.event_callback = event_callback
- else:
- self.event_callback = self.on_event
-
providers = [
ProviderInfo(
"Microsoft-Windows-WMI-Activity",
GUID("{1418EF04-B0B4-4623-BF7E-D74AB47BBDAA}"),
level,
- any_keywords,
- all_keywords,
)
]
- self.event_id_filters = []
super().__init__(
session_name="WMI_ETW",
- ring_buf_size=ring_buf_size,
- max_str_len=max_str_len,
- min_buffers=min_buffers,
- max_buffers=max_buffers,
- event_callback=self.event_callback,
- task_name_filters=filters,
providers=providers,
- event_id_filters=self.event_id_filters,
+ ring_buf_size=4096,
+ max_str_len=4096,
+ logfile=logfile,
+ no_conout=no_conout,
)
- def on_event(self, event_tufo):
- """
- Starts the capture using ETW.
- :param event_tufo: tufo containing event information
- :param logfile: Path to logfile.
- :param no_conout: If true does not output live capture to console.
- :return: Does not return anything.
- """
- event_id, event = event_tufo
-
- if self.no_conout is False:
- log.info("%d (%s)\n%s\n", event_id, event["Task Name"], pprint.pformat(encode(event)))
- if self.logfile is not None:
- with open(self.logfile, "a") as file:
- json.dump({"event_id": event_id, "event": event}, file)
- file.write("\n")
+class WMI_ETW(ETWAuxiliaryWrapper):
+ """ETW logging"""
- def start(self):
- super().start()
+ def __init__(self, options, config):
+ super().__init__(options, config, "wmi_etw")
- def stop(self):
- super().stop()
+ self.output_dir = os.path.join("C:\\", random_string(5, 10))
+ try:
+ os.mkdir(self.output_dir)
+ except FileExistsError:
+ pass
- class WMI_ETW(Auxiliary):
- """ETW logging"""
+ log_file_path = os.path.join(self.output_dir, f"{random_string(5, 10)}.log")
+ self.log_file = None
- def __init__(self, options, config):
- Auxiliary.__init__(self, options, config)
- self.config = Config(cfg="analysis.conf")
- self.enabled = self.config.wmi_etw
- self.do_run = self.enabled
-
- self.output_dir = "C:\\wmi\\"
+ if HAVE_ETW and self.enabled:
try:
- os.mkdir(self.output_dir)
+ self.log_file = open(log_file_path, "w", encoding="utf-8")
+ self.capture = WMIETWProvider(
+ logfile=self.log_file, level=255, no_conout=True
+ )
except Exception as e:
- print(e)
- import traceback
+ log.error("Failed to open WMI ETW log file: %s", e)
- log.exception(traceback.format_exc())
-
- self.log_file = os.path.join(self.output_dir, "wmi_provider.log")
- if HAVE_ETW:
- self.capture = ETW_provider(logfile=self.log_file, level=255, no_conout=True)
-
- def start(self):
- if not self.enabled or not HAVE_ETW:
- return False
+ def upload_results(self):
+ if self.log_file:
try:
- log.debug("Starting WMI ETW")
- # Start WMI_ETW_provider in the background
- self.capture.start()
+ self.log_file.close()
except Exception as e:
- print(e)
- import traceback
-
- log.exception(traceback.format_exc())
- return True
-
- def stop(self):
- if not HAVE_ETW:
- return
- log.debug("Stopping WMI_ETW...")
- self.capture.stop()
- files_to_upload = set()
+ log.error("Failed to close WMI ETW log file: %s", e)
+ self.log_file = None
+ files_to_upload = set()
+ if os.path.exists(self.output_dir):
for d in os.listdir(self.output_dir):
path = os.path.join(self.output_dir, d)
if os.path.isfile(path):
@@ -182,11 +85,9 @@ def stop(self):
for f in os.listdir(path):
file_path = os.path.join(path, f)
files_to_upload.add(file_path)
- continue
- # Upload the ETW log files to the host.
- log.debug(files_to_upload)
- for f in files_to_upload:
- dumppath = os.path.join("aux", "wmi_etw.json")
- log.debug("WMI_ETW Aux Module is uploading %s", f)
- upload_to_host(f, dumppath)
+ log.debug(files_to_upload)
+ for f in files_to_upload:
+ dumppath = os.path.join("aux", "wmi_etw.json")
+ log.debug("WMI_ETW Aux Module is uploading %s", f)
+ upload_to_host(f, dumppath)
diff --git a/conf/default/processing.conf.default b/conf/default/processing.conf.default
index 332f8d51e80..d6fbb887f08 100644
--- a/conf/default/processing.conf.default
+++ b/conf/default/processing.conf.default
@@ -323,3 +323,6 @@ enabled = no
# Enable when using the PolarProxy option during analysis. This will merge the tls.pcap containing
# plain-text TLS streams into the task PCAP.
enabled = no
+
+[network_proc_map]
+enabled = no
diff --git a/modules/processing/network_process_map.py b/modules/processing/network_process_map.py
new file mode 100644
index 00000000000..4acdfc95e80
--- /dev/null
+++ b/modules/processing/network_process_map.py
@@ -0,0 +1,581 @@
+# Copyright (C) 2010-2015 Cuckoo Foundation.
+# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
+# See the file 'docs/LICENSE' for copying permission.
+
+import logging
+from collections import defaultdict
+from contextlib import suppress
+from datetime import datetime
+from urllib.parse import urlparse
+
+from lib.cuckoo.common.abstracts import Processing
+
+log = logging.getLogger(__name__)
+
+
+DNS_APIS = {
+ "getaddrinfo",
+ "getaddrinfow",
+ "getaddrinfoex",
+ "getaddrinfoexw",
+ "gethostbyname",
+ "gethostbynamew",
+ "dnsquery_a",
+ "dnsquery_w",
+ "dnsqueryex",
+ "dnsquery",
+}
+
+
+HTTP_HINT_APIS = {
+ "internetcrackurla",
+ "internetcrackurlw",
+ "httpsendrequesta",
+ "httpsendrequestw",
+ "internetsendrequesta",
+ "internetsendrequestw",
+ "internetconnecta",
+ "internetconnectw",
+ "winhttpopenrequest",
+ "winhttpsendrequest",
+ "winhttpconnect",
+ "winhttpopen",
+}
+
+
+TLS_HINT_APIS = {
+ "sslencryptpacket",
+ "ssldecryptpacket",
+ "initializesecuritycontexta",
+ "initializesecuritycontextw",
+ "initializesecuritycontextexa",
+ "initializesecuritycontextexw",
+ "acceptsecuritycontext",
+}
+
+
+def _norm_domain(d):
+ if not d or not isinstance(d, str):
+ return None
+ d = d.strip().strip(".").lower()
+ return d or None
+
+
+def _parse_behavior_ts(ts_str):
+ """
+ Parse behavior timestamp like: '2026-01-22 23:46:58,199' -> epoch float
+ Returns None if parsing fails.
+ """
+ if not ts_str or not isinstance(ts_str, str):
+ return None
+ with suppress(Exception):
+ dt = datetime.strptime(ts_str, "%Y-%m-%d %H:%M:%S,%f")
+ return dt.timestamp()
+ return None
+
+
+def _extract_domain_from_call(call):
+ for name in (
+ "hostname", "host", "node", "nodename", "name", "domain",
+ "szName", "pszName", "lpName", "query", "queryname", "dns_name",
+ "QueryName", "lpstrName", "pName"
+ ):
+ v = _get_arg(call, name)
+ if isinstance(v, str) and v.strip():
+ return v
+
+ for a in call.get("arguments", []) or []:
+ v = a.get("value")
+ if isinstance(v, str):
+ s = v.strip()
+ if "." in s and " " not in s and s.count(".") <= 10:
+ return s
+
+ return None
+
+
+def _get_arg(call, name):
+ for a in call.get("arguments", []) or []:
+ if a.get("name") == name:
+ return a.get("value")
+ return None
+
+
+def _get_arg_any(call, *names):
+ """Return the first matching argument value for any of the provided names."""
+ for n in names:
+ v = _get_arg(call, n)
+ if v is not None:
+ return v
+ return None
+
+
+def _norm_ip(ip):
+ if ip is None:
+ return None
+ if not isinstance(ip, str):
+ ip = str(ip)
+ ip = ip.strip()
+ return ip or None
+
+
+def _looks_like_http(buf):
+ if not buf or not isinstance(buf, str):
+ return False
+
+ first = buf.splitlines()[0].strip() if buf else ""
+ if not first:
+ return False
+
+ u = first.upper()
+ if u.startswith("HTTP/1.") or u.startswith("HTTP/2"):
+ return True
+
+ methods = (
+ "GET ", "POST ", "HEAD ", "PUT ", "DELETE ", "OPTIONS ", "PATCH ", "TRACE "
+ )
+ if any(u.startswith(m) for m in methods) and " HTTP/1." in u:
+ return True
+
+ if u.startswith("CONNECT ") and " HTTP/1." in u:
+ return True
+
+ return False
+
+
+def _http_host_from_buf(buf):
+ if not buf or not isinstance(buf, str):
+ return None
+
+ lines = buf.splitlines()
+ if not lines:
+ return None
+
+ for line in lines[1:50]:
+ if line.lower().startswith("host:"):
+ return line.split(":", 1)[1].strip()
+
+ with suppress(Exception):
+ first = lines[0].strip()
+ parts = first.split()
+ if len(parts) >= 2:
+ target = parts[1].strip()
+ url = _extract_first_url(target)
+ if url:
+ host = _host_from_url(url)
+ if host:
+ return host
+
+ with suppress(Exception):
+ first = lines[0].strip()
+ parts = first.split()
+ if len(parts) >= 2 and parts[0].upper() == "CONNECT":
+ return parts[1].strip()
+
+ return None
+
+
+def _safe_int(x):
+ with suppress(Exception):
+ return int(x)
+ return None
+
+
+def _host_from_url(url):
+ if not url or not isinstance(url, str):
+ return None
+
+ with suppress(Exception):
+ u = urlparse(url)
+ return u.hostname
+
+ return None
+
+
+def _extract_first_url(text):
+ if not text or not isinstance(text, str):
+ return None
+ s = text.strip()
+ for scheme in ("http://", "https://"):
+ idx = s.lower().find(scheme)
+ if idx != -1:
+ return s[idx:].split()[0].strip('"\',')
+ return None
+
+
+def _norm_hostkey(host):
+ if not host or not isinstance(host, str):
+ return None
+ h = host.strip().strip(".").lower()
+ return h or None
+
+
+def _add_http_host(http_host_map, host, pinfo, sock=None):
+ """
+ Store host keys in a stable way.
+ Adds:
+ - normalized host
+ - if host is host:port and port parses, also normalized host-only
+ """
+ hk = _norm_hostkey(host)
+ if not hk:
+ return
+
+ entry = dict(pinfo)
+ if sock is not None:
+ entry["socket"] = sock
+
+ http_host_map[hk].append(entry)
+
+ if ":" in hk:
+ h_only, p = hk.rsplit(":", 1)
+ if _safe_int(p) is not None and h_only:
+ http_host_map[h_only].append(entry)
+
+
+def _extract_tls_server_name(call):
+ """
+ Best-effort server name extraction for TLS/SChannel/SSPI.
+ Common arg names seen in hooks vary; keep it conservative.
+ """
+ for name in (
+ "sni", "SNI",
+ "ServerName", "servername", "server_name",
+ "TargetName", "targetname",
+ "Host", "host", "hostname",
+ "Url", "URL", "url",
+ ):
+ v = _get_arg(call, name)
+ if isinstance(v, str) and v.strip():
+ s = v.strip()
+ u = _extract_first_url(s)
+ if u:
+ return _host_from_url(u) or s
+ if "." in s and " " not in s and len(s) < 260:
+ return s
+
+ for a in call.get("arguments", []) or []:
+ v = a.get("value")
+ if isinstance(v, str):
+ s = v.strip()
+ if "." in s and " " not in s and len(s) < 260:
+ u = _extract_first_url(s)
+ if u:
+ return _host_from_url(u) or s
+ return s
+
+ return None
+
+
+class NetworkProcessMap(Processing):
+ """
+ Augment existing results["network"] entries with process attribution fields.
+
+ Adds (when available):
+ - process_id
+ - process_name
+
+ No separate network_process_map output is produced.
+ """
+
+ order = 5
+
+ def _load_behavior(self):
+ with suppress(Exception):
+ b = self.results.get("behavior")
+ if b:
+ return b
+
+ return None
+
+ def _load_network(self):
+ with suppress(Exception):
+ return self.results.get("network") or {}
+
+ return {}
+
+ def _build_endpoint_to_process_map(self, behavior):
+ """
+ Build:
+ - endpoint_map[(ip, port)] -> [{process_id, process_name, socket?}, ...]
+ - http_host_map[host] -> [{process_id, process_name, socket?}, ...]
+ """
+ endpoint_map = defaultdict(list)
+ http_host_map = defaultdict(list)
+
+ if not behavior:
+ return endpoint_map, http_host_map
+
+ for p in (behavior.get("processes") or []):
+ pid = p.get("process_id")
+ if pid is None:
+ continue
+
+ pinfo = {
+ "process_id": pid,
+ "process_name": p.get("process_name", ""),
+ }
+
+ for c in p.get("calls", []):
+ if c.get("category") != "network":
+ continue
+
+ api = (c.get("api") or "").lower()
+ sock = _get_arg_any(c, "socket", "sock", "fd", "handle")
+ ip = _norm_ip(_get_arg_any(c, "ip", "dst", "dstip", "ip_address", "address", "remote_ip", "server"))
+ port = _get_arg_any(c, "port", "dport", "dstport", "remote_port", "server_port")
+ buf = _get_arg_any(c, "Buffer", "buffer", "buf", "data")
+
+ if api in ("connect", "wsaconnect", "connectex", "sendto", "wsasendto", "recvfrom", "wsarecvfrom"):
+ p_int = _safe_int(port)
+ if ip and p_int is not None:
+ entry = dict(pinfo)
+ if sock is not None:
+ entry["socket"] = sock
+
+ endpoint_map[(ip, p_int)].append(entry)
+
+ if api in ("connect", "wsaconnect", "connectex"):
+ continue
+
+ if api in ("send", "wsasend", "sendto", "wsasendto") and _looks_like_http(buf):
+ host = _http_host_from_buf(buf)
+ if host:
+ _add_http_host(http_host_map, host, pinfo, sock=sock)
+
+ if api in HTTP_HINT_APIS:
+ url = _get_arg_any(c, "url", "lpszUrl", "lpUrl", "uri", "pszUrl", "pUrl")
+ if isinstance(url, str) and url.strip():
+ u = _extract_first_url(url) or url.strip()
+ host = _host_from_url(u)
+ if host:
+ _add_http_host(http_host_map, host, pinfo, sock=sock)
+
+ if isinstance(buf, str):
+ u2 = _extract_first_url(buf)
+ if u2:
+ host2 = _host_from_url(u2)
+ if host2:
+ _add_http_host(http_host_map, host2, pinfo, sock=sock)
+
+ if api in TLS_HINT_APIS:
+ sni = _extract_tls_server_name(c)
+ if sni:
+ _add_http_host(http_host_map, sni, pinfo, sock=sock)
+
+ if isinstance(buf, str) and _looks_like_http(buf):
+ host3 = _http_host_from_buf(buf)
+ if host3:
+ _add_http_host(http_host_map, host3, pinfo, sock=sock)
+
+ return endpoint_map, http_host_map
+
+ def _pick_best(self, candidates):
+ if not candidates:
+ return None
+
+ for c in candidates:
+ if c.get("process_name"):
+ return c
+
+ return candidates[0]
+
+ def _build_dns_intents(self, behavior):
+ """
+ Build: domain -> list of {process info + ts_epoch}
+ """
+ intents = defaultdict(list)
+ if not behavior:
+ return intents
+
+ for p in (behavior.get("processes") or []):
+ pid = p.get("process_id")
+ if pid is None:
+ continue
+
+ pinfo = {
+ "process_id": pid,
+ "process_name": p.get("process_name", ""),
+ }
+
+ for c in p.get("calls", []):
+ if c.get("category") != "network":
+ continue
+
+ api = (c.get("api") or "").lower()
+ if api not in DNS_APIS:
+ continue
+
+ domain = _norm_domain(_extract_domain_from_call(c))
+ if not domain:
+ continue
+
+ ts_epoch = _parse_behavior_ts(c.get("timestamp"))
+ intents[domain].append(
+ {
+ "process": dict(pinfo),
+ "ts_epoch": ts_epoch,
+ "api": api,
+ }
+ )
+
+ for d in list(intents.keys()):
+ intents[d].sort(key=lambda x: (x["ts_epoch"] is None, x["ts_epoch"] or 0.0))
+
+ return intents
+
+ def _match_dns_process(self, dns_entry, dns_intents, max_skew_seconds=10.0):
+ """
+ Match a network.dns entry to the closest behavior DNS intent by:
+ - same domain
+ - closest timestamp (if both sides have timestamps)
+
+ Returns process dict or None.
+ """
+ req = _norm_domain(dns_entry.get("request"))
+ if not req:
+ return None
+
+ candidates = dns_intents.get(req) or []
+ if not candidates:
+ return None
+
+ net_ts = dns_entry.get("first_seen")
+ if not isinstance(net_ts, (int, float)):
+ return candidates[0].get("process")
+
+ best = None
+ best_delta = None
+
+ for c in candidates:
+ bts = c.get("ts_epoch")
+ if not isinstance(bts, (int, float)):
+ continue
+
+ delta = abs(net_ts - bts)
+ if best is None or delta < best_delta:
+ best = c
+ best_delta = delta
+
+ if best is not None and best_delta is not None and best_delta <= max_skew_seconds:
+ return best.get("process")
+
+ return candidates[0].get("process")
+
+ def _pcap_first_epoch(self, network):
+ ts = []
+ for k in ("dns", "http"):
+ for e in (network.get(k) or []):
+ v = e.get("first_seen")
+ if isinstance(v, (int, float)):
+ ts.append(float(v))
+ return min(ts) if ts else None
+
+ def _build_dns_events_rel(self, network, dns_intents, max_skew_seconds=10.0):
+ """
+ Returns a list of dns events:
+ [{"t_rel": float, "process": {...}|None, "request": "example.com"}]
+ """
+ out = []
+ first_epoch = self._pcap_first_epoch(network)
+ if first_epoch is None:
+ return out
+
+ for d in (network.get("dns") or []):
+ first_seen = d.get("first_seen")
+ if not isinstance(first_seen, (int, float)):
+ continue
+ t_rel = float(first_seen) - float(first_epoch)
+ proc = self._match_dns_process(d, dns_intents, max_skew_seconds=max_skew_seconds)
+ out.append({"t_rel": t_rel, "process": proc, "request": d.get("request")})
+
+ out.sort(key=lambda x: x["t_rel"])
+ return out
+
+ def _nearest_dns_process_by_rel_time(self, dns_events_rel, t_rel, max_skew=5.0):
+ if not dns_events_rel or not isinstance(t_rel, (int, float)):
+ return None
+
+ best = None
+ best_delta = None
+ for e in dns_events_rel:
+ delta = abs(e["t_rel"] - float(t_rel))
+ if best is None or delta < best_delta:
+ best = e
+ best_delta = delta
+
+ if best is not None and best_delta is not None and best_delta <= max_skew:
+ return best.get("process")
+ return None
+
+ def _set_proc_fields(self, obj, proc):
+ """
+ Add process_id/process_name onto an existing network entry.
+ If proc is None, sets them to None (keeps template stable).
+ """
+ if proc:
+ obj["process_id"] = proc.get("process_id")
+ obj["process_name"] = proc.get("process_name")
+ else:
+ obj["process_id"] = None
+ obj["process_name"] = None
+
+ def run(self):
+ behavior = self._load_behavior()
+ network = self._load_network()
+
+ endpoint_map, http_host_map = self._build_endpoint_to_process_map(behavior)
+
+ for flow in (network.get("tcp") or []):
+ proc = None
+ if flow.get("dst") and flow.get("dport") is not None:
+ proc = self._pick_best(endpoint_map.get((flow["dst"], int(flow["dport"])), []))
+
+ self._set_proc_fields(flow, proc)
+
+ dns_intents = self._build_dns_intents(behavior)
+ dns_events_rel = self._build_dns_events_rel(network, dns_intents, max_skew_seconds=10.0)
+ for d in (network.get("dns") or []):
+ proc = self._match_dns_process(d, dns_intents, max_skew_seconds=10.0)
+ self._set_proc_fields(d, proc)
+
+ for flow in (network.get("udp") or []):
+ proc = None
+ dst = flow.get("dst")
+ dport = flow.get("dport")
+ sport = flow.get("sport")
+
+ if dst and dport is not None:
+ proc = self._pick_best(endpoint_map.get((dst, int(dport)), []))
+
+ if not proc and (dport == 53 or sport == 53):
+ t_rel = flow.get("time")
+ proc = self._nearest_dns_process_by_rel_time(dns_events_rel, t_rel, max_skew=5.0)
+
+ self._set_proc_fields(flow, proc)
+
+ for key in ("http", "http_ex", "https_ex"):
+ for h in (network.get(key) or []):
+ proc = None
+
+ host = h.get("host")
+ if isinstance(host, str) and host:
+ proc = self._pick_best(http_host_map.get(host, []))
+
+ if not proc and ":" in host:
+ raw = host.rsplit(":", 1)[0].strip()
+ if raw:
+ proc = self._pick_best(http_host_map.get(raw, []))
+
+ if not proc:
+ dst = h.get("dst")
+ dport = h.get("dport")
+ if dst and dport is not None:
+ proc = self._pick_best(endpoint_map.get((dst, int(dport)), []))
+
+ self._set_proc_fields(h, proc)
+
+ self.results.setdefault("network", {})
+ self.results["network"] = network
+
+ return {}
diff --git a/web/templates/analysis/network/_dns.html b/web/templates/analysis/network/_dns.html
index 73689e690c8..0c914942d53 100644
--- a/web/templates/analysis/network/_dns.html
+++ b/web/templates/analysis/network/_dns.html
@@ -10,6 +10,7 @@
DNS Reque
| Name |
Response |
Post-Analysis Lookup |
+ {% if settings.NETWORK_PROC_MAP %} Process Name (PID) | {% endif %}
{% for p in network.dns %}
@@ -53,6 +54,15 @@ DNS Reque
{% endif %}
{% endif %}
+ {% if settings.NETWORK_PROC_MAP %}
+ |
+ {% if p.process_name %}
+ {{ p.process_name }}{% if p.process_id %} ({{ p.process_id }}){% endif %}
+ {% else %}
+ -
+ {% endif %}
+ |
+ {% endif %}
{% endfor %}
diff --git a/web/templates/analysis/network/_http.html b/web/templates/analysis/network/_http.html
index 1f9bfa82cd5..922bfe1297c 100644
--- a/web/templates/analysis/network/_http.html
+++ b/web/templates/analysis/network/_http.html
@@ -8,6 +8,7 @@ HTTP Re
| URI |
Details |
+ {% if settings.NETWORK_PROC_MAP %} Process Name (PID) | {% endif %}
{% for http in network.http_ex|add:network.https_ex %}
@@ -48,6 +49,15 @@ HTTP Re
+ {% if settings.NETWORK_PROC_MAP %}
+
+ {% if http.process_name %}
+ {{ http.process_name }}{% if http.process_id %} ({{ http.process_id }}){% endif %}
+ {% else %}
+ -
+ {% endif %}
+ |
+ {% endif %}
{% endfor %}
@@ -58,12 +68,22 @@ HTTP Re
| URI |
Data |
+ {% if settings.NETWORK_PROC_MAP %}Process Name (PID) | {% endif %}
{% for request in network.http %}
-
- | {{request.uri}} |
- {{request.data}} |
-
+
+ | {{request.uri}} |
+ {{request.data}} |
+ {% if settings.NETWORK_PROC_MAP %}
+
+ {% if request.process_name %}
+ {{ request.process_name }}{% if request.process_id %} ({{ request.process_id }}){% endif %}
+ {% else %}
+ -
+ {% endif %}
+ |
+ {% endif %}
+
{% endfor %}
diff --git a/web/templates/analysis/network/_tcp.html b/web/templates/analysis/network/_tcp.html
index c4ce645e17e..ed256c02757 100644
--- a/web/templates/analysis/network/_tcp.html
+++ b/web/templates/analysis/network/_tcp.html
@@ -14,6 +14,7 @@ T
| Source Port |
Destination |
Destination Port |
+ {% if settings.NETWORK_PROC_MAP %} Process Name (PID) | {% endif %}
{% for p in network.tcp %}
@@ -25,6 +26,15 @@ T
{% endif %}
| {{p.dport}} |
+ {% if settings.NETWORK_PROC_MAP %}
+
+ {% if p.process_name %}
+ {{ p.process_name }}{% if p.process_id %} ({{ p.process_id }}){% endif %}
+ {% else %}
+ -
+ {% endif %}
+ |
+ {% endif %}
{% endfor %}
diff --git a/web/templates/analysis/network/_udp.html b/web/templates/analysis/network/_udp.html
index beef865bce6..d8a55c606fd 100644
--- a/web/templates/analysis/network/_udp.html
+++ b/web/templates/analysis/network/_udp.html
@@ -14,6 +14,7 @@ U
| Source Port |
Destination |
Destination Port |
+ {% if settings.NETWORK_PROC_MAP %} Process Name (PID) | {% endif %}
{% for p in network.udp %}
@@ -25,6 +26,15 @@ U
{% endif %}
| {{p.dport}} |
+ {% if settings.NETWORK_PROC_MAP %}
+
+ {% if p.process_name %}
+ {{ p.process_name }}{% if p.process_id %} ({{ p.process_id }}){% endif %}
+ {% else %}
+ -
+ {% endif %}
+ |
+ {% endif %}
{% endfor %}
diff --git a/web/templates/submission/index.html b/web/templates/submission/index.html
index b61cd0ee832..e8499ec20f1 100644
--- a/web/templates/submission/index.html
+++ b/web/templates/submission/index.html
@@ -270,218 +270,653 @@ Advance
{% if config.linux_on_gui %}
+
-
+
+
+
+
+
+
+
+
+ Syntax: option1=val1,option2=val2
+
+
+
+ -
+
+
+ -
+
+
+ -
+
+
+ -
+
+
+ -
+
+
+
+
+
+
+
+
+
+
+ | Option |
+ Description |
+
+
+
+
+ filename |
+ Rename the sample file |
+
+
+ file |
+ When using the archive package, set the name of the file to execute |
+
+
+ password |
+ When using the archive package, set the password to use for extraction/decryption. Also used when analyzing password-protected Office documents. |
+
+
+ arguments |
+ Arguments to be passed to the sample (or opened file, if applicable) |
+
+
+ timeout |
+ Analysis timeout (in seconds) |
+
+
+ enforce_timeout |
+ Enforce the timeout even if the process finishes early |
+
+
+ clock |
+ Set the VM clock (YYYYMMDDhhmm or seconds since epoch) |
+
+
+ package |
+ Analysis package to use |
+
+
+
+
+
+
+
+
+
+
+
+ | Option |
+ Description |
+
+
+
+
+ free |
+ Run analysis without the monitor (no behavioral analysis) |
+
+
+ procmemdump |
+ Dump process memory |
+
+
+ simul_human |
+ Simulate human interaction (mouse movements, clicks) |
+
+
+ human_click_interval |
+ Interval between simulated clicks |
+
+
+ human_mouse_speed |
+ Speed of simulated mouse movements |
+
+
+
+
+
+
+
+
+
+
+
+ | Option |
+ Description |
+
+
+
+
+ enable_syslog |
+ Enable syslog capturing |
+
+
+ syslog_port |
+ Port for syslog capturing (default: 514) |
+
+
+
+
+
+
+
+
+
+
+
+ | Option |
+ Description |
+
+
+
+
+ memory |
+ Dump full VM memory |
+
+
+
+
+
+
+
+
+
+
+
+ | Option |
+ Description |
+
+
+
+
+ debug |
+ Enable debugging features |
+
+
+
+
+
+
+
+
{% endif %}
-
-
-
-
-
-
-
-
-
-
-
-
- Syntax: option1=val1,option2=val2
-
-
- -
-
-
- -
-
-
- -
-
-
- -
-
-
- -
-
-
-
-
-
-
-
-
- | Option | Description |
-
- filename | Rename the sample file |
- name | Force family extractor to run (e.g., name=trickbot) |
- curdir | Execution directory (default %TEMP%) |
- executiondir | Directory to launch file from (default %TEMP%) |
- arguments | Arguments for the executable or exported function |
- appdata | Run executable from AppData instead of Temp |
- pwsh | Prefer PowerShell Core (pwsh.exe) |
- free | Run without monitoring (disables many capabilities) |
- ignore_size_check | Allow ignore file size (must be enabled in conf) |
- check_shellcode | Disable shellcode check during package ID (check_shellcode=0) |
- function | Exported function/ordinal to execute (DLL) |
- dllloader | Process loading the DLL (default rundll32.exe) |
- file | Name of file to execute (Zip/Rar) |
- password | Password for extraction/Office |
- startbrowser | Launch browser 30s into analysis |
- browserdelay | Seconds to wait before starting browser |
- url | URL for started browser |
- servicedesc | Service description (Service package) |
- pre_script_args | Args for pre_script |
- during_script_args | Args for during_script |
- lang | Override system language (LCID) |
- standalone | Run in standalone mode (no pipe) |
- monitor | Inject monitor into PID/Explorer |
- shutdown-mutex | Mutex name for shutdown signal |
- terminate-event | Event name for termination signal |
- terminate-processes | Terminate processes on event |
- first-process | (Internal) First process in tree |
- startup-time | MS since system startup |
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+ Syntax: option1=val1,option2=val2
-
-
-
-
- | Option | Description |
-
- no-stealth | Disable anti-anti-VM/sandbox tricks |
- force-sleepskip | 1 = Skip all sleeps, 0 = Disable sleep skipping |
- serial | Spoof the system volume serial number |
- single-process | Limit monitoring to initial process only |
- interactive | Enable interactive desktop mode |
- referrer | Fake referrer for URL analysis |
- norefer | Disable fake referrer |
- file-of-interest | Specific file or URL being analyzed |
- pdf | Adobe Reader specific hooks/behavior |
- sysvol_ctimelow/high | Spoof creation time of system volume |
- fake-rdtsc | Enable fake RDTSC results |
- ntdll-protect | Enable write protection on ntdll.dll code |
- ntdll-unhook | Enable protection against ntdll unhooking |
- protected-pids | Enable protection for critical PIDs |
-
-
+
+ -
+
+
+ -
+
+
+ -
+
+
+ -
+
+
+ -
+
+
+
+
+
+
+
+
+
+
+ | Option |
+ Description |
+
+
+
+
+ filename |
+ Rename the sample file |
+
+
+ name |
+ Force family extractor to run (e.g., name=trickbot) |
+
+
+ curdir |
+ Execution directory (default %TEMP%) |
+
+
+ executiondir |
+ Directory to launch file from (default %TEMP%) |
+
+
+ arguments |
+ Arguments for the executable or exported function |
+
+
+ appdata |
+ Run executable from AppData instead of Temp |
+
+
+ pwsh |
+ Prefer PowerShell Core (pwsh.exe) |
+
+
+ free |
+ Run without monitoring (disables many capabilities) |
+
+
+ ignore_size_check |
+ Allow ignore file size (must be enabled in conf) |
+
+
+ check_shellcode |
+ Disable shellcode check during package ID (check_shellcode=0) |
+
+
+ function |
+ Exported function/ordinal to execute (DLL) |
+
+
+ dllloader |
+ Process loading the DLL (default rundll32.exe) |
+
+
+ file |
+ Name of file to execute (Zip/Rar) |
+
+
+ password |
+ Password for extraction/Office |
+
+
+ startbrowser |
+ Launch browser 30s into analysis |
+
+
+ browserdelay |
+ Seconds to wait before starting browser |
+
+
+ url |
+ URL for started browser |
+
+
+ servicedesc |
+ Service description (Service package) |
+
+
+ pre_script_args |
+ Args for pre_script |
+
+
+ during_script_args |
+ Args for during_script |
+
+
+ lang |
+ Override system language (LCID) |
+
+
+ standalone |
+ Run in standalone mode (no pipe) |
+
+
+ monitor |
+ Inject monitor into PID/Explorer |
+
+
+ shutdown-mutex |
+ Mutex name for shutdown signal |
+
+
+ terminate-event |
+ Event name for termination signal |
+
+
+ terminate-processes |
+ Terminate processes on event |
+
+
+ first-process |
+ (Internal) First process in tree |
+
+
+ startup-time |
+ MS since system startup |
+
+
+
+
-
-
-
-
-
- | Option | Description |
-
- full-logs | Disable log suppression |
- force-flush | 1 = Flush after non-duplicate API, 2 = Force flush every log |
- buffer-max | Max size for log buffer |
- large-buffer-max | Max size for large log buffers |
- api-rate-cap | Limit rate of API logging |
- api-cap | Limit total number of API logs |
- hook-type | Hook type: direct, indirect, or safe (32-bit only) |
- syscall | Enable syscall hooks (Win10+) |
- disable-hook-content | 1 = Remove payload of non-critical hooks, 2 = All hooks |
- exclude-apis | Colon-separated list of APIs to exclude from hooking |
- exclude-dlls | Colon-separated list of DLLs to exclude from hooking |
- unhook-apis | Dynamically unhook functions (colon-separated) |
- coverage-modules | Colon-separated list of DLLs to include in monitoring (exclude from 'dll range' filtering) |
- zerohook | Disable all hooks except essential |
- hook-protect | Enable write protection on hook pages |
- log-exceptions | Enable logging of exceptions |
-
-
+
+
+
+
+
+
+ | Option |
+ Description |
+
+
+
+
+ no-stealth |
+ Disable anti-anti-VM/sandbox tricks |
+
+
+ force-sleepskip |
+ 1 = Skip all sleeps, 0 = Disable sleep skipping |
+
+
+ serial |
+ Spoof the system volume serial number |
+
+
+ single-process |
+ Limit monitoring to initial process only |
+
+
+ interactive |
+ Enable interactive desktop mode |
+
+
+ referrer |
+ Fake referrer for URL analysis |
+
+
+ norefer |
+ Disable fake referrer |
+
+
+ file-of-interest |
+ Specific file or URL being analyzed |
+
+
+ pdf |
+ Adobe Reader specific hooks/behavior |
+
+
+ sysvol_ctimelow/high |
+ Spoof creation time of system volume |
+
+
+ fake-rdtsc |
+ Enable fake RDTSC results |
+
+
+ ntdll-protect |
+ Enable write protection on ntdll.dll code |
+
+
+ ntdll-unhook |
+ Enable protection against ntdll unhooking |
+
+
+ protected-pids |
+ Enable protection for critical PIDs |
+
+
+
+
-
-
-
-
-
- | Option | Description |
-
- procdump | Enable process memory dumping on exit/timeout |
- procmemdump | Enable full process memory dumping |
- dump-on-api | Dump calling module when specific APIs are called (colon-separated) |
- dump-config-region | Dump memory regions suspected to contain C2 config |
- dump-crypto | Dump buffers from Crypto APIs |
- dump-keys | Dump keys from CryptImportKey |
- amsidump | Enable AMSI buffer dumping (Win10+) |
- tlsdump | Enable dumping of TLS secrets |
- dropped-limit | Override default dropped file limit (100) |
- compression | Enable CAPE's extraction of compressed payloads |
- extraction | Enable CAPE's extraction of payloads from within process |
- injection | Enable CAPE's capture of injected payloads |
- combo | Combine compression, injection, and extraction |
- unpacker | 1 = Passive unpacking, 2 = Active unpacking |
- import-reconstruction | Attempt import reconstruction on dumps |
- store_memdump | Force STORE memdump (submit to analyzer directly) |
-
-
+
+
+
+
+
+
+ | Option |
+ Description |
+
+
+
+
+ full-logs |
+ Disable log suppression |
+
+
+ force-flush |
+ 1 = Flush after non-duplicate API, 2 = Force flush every log |
+
+
+ buffer-max |
+ Max size for log buffer |
+
+
+ large-buffer-max |
+ Max size for large log buffers |
+
+
+ api-rate-cap |
+ Limit rate of API logging |
+
+
+ api-cap |
+ Limit total number of API logs |
+
+
+ hook-type |
+ Hook type: direct, indirect, or safe (32-bit only) |
+
+
+ syscall |
+ Enable syscall hooks (Win10+) |
+
+
+ disable-hook-content |
+ 1 = Remove payload of non-critical hooks, 2 = All hooks |
+
+
+ exclude-apis |
+ Colon-separated list of APIs to exclude from hooking |
+
+
+ exclude-dlls |
+ Colon-separated list of DLLs to exclude from hooking |
+
+
+ unhook-apis |
+ Dynamically unhook functions (colon-separated) |
+
+
+ coverage-modules |
+ Colon-separated list of DLLs to include in monitoring (exclude from 'dll range' filtering) |
+
+
+ zerohook |
+ Disable all hooks except essential |
+
+
+ hook-protect |
+ Enable write protection on hook pages |
+
+
+ log-exceptions |
+ Enable logging of exceptions |
+
+
+
+
-
-
-
-
-
- | Option | Description |
-
- debugger | Enable internal debugger engine |
- debug | 1 = Report critical exceptions, 2 = All exceptions |
- bp0...bp3 | Hardware breakpoints (Address or Module:Export) |
- bp | Software breakpoints (colon-separated addresses) |
- break-on-return | Break on return from specific APIs |
- base-on-api | Set base address for breakpoints based on API |
- file-offsets | Interpret breakpoints as file offsets |
- trace-all | Enable full execution tracing |
- depth | Trace depth limit (default 0) |
- count | Trace instruction count limit (default 128) |
- loop_detection | Enable loop detection (compress call logs) |
- ttd | Time Travel Debugging (ttd=1) |
- polarproxy | Run PolarProxy (TLS PCAP) |
- mitmdump | Run mitmdump (TLS HAR) |
-
-
+
+
+
+
+
+
+ | Option |
+ Description |
+
+
+
+
+ procdump |
+ Enable process memory dumping on exit/timeout |
+
+
+ procmemdump |
+ Enable full process memory dumping |
+
+
+ dump-on-api |
+ Dump calling module when specific APIs are called (colon-separated) |
+
+
+ dump-config-region |
+ Dump memory regions suspected to contain C2 config |
+
+
+ dump-crypto |
+ Dump buffers from Crypto APIs |
+
+
+ dump-keys |
+ Dump keys from CryptImportKey |
+
+
+ amsidump |
+ Enable AMSI buffer dumping (Win10+) |
+
+
+ tlsdump |
+ Enable dumping of TLS secrets |
+
+
+ dropped-limit |
+ Override default dropped file limit (100) |
+
+
+ compression |
+ Enable CAPE's extraction of compressed payloads |
+
+
+ extraction |
+ Enable CAPE's extraction of payloads from within process |
+
+
+ injection |
+ Enable CAPE's capture of injected payloads |
+
+
+ combo |
+ Combine compression, injection, and extraction |
+
+
+ unpacker |
+ 1 = Passive unpacking, 2 = Active unpacking |
+
+
+ import-reconstruction |
+ Attempt import reconstruction on dumps |
+
+
+ store_memdump |
+ Force STORE memdump (submit to analyzer directly) |
+
+
+
+
+
+
+
+
+
+
+
+ | Option |
+ Description |
+
+
+
+
+ debugger |
+ Enable internal debugger engine |
+
+
+ debug |
+ 1 = Report critical exceptions, 2 = All exceptions |
+
+
+ bp0...bp3 |
+ Hardware breakpoints (Address or Module:Export) |
+
+
+ bp |
+ Software breakpoints (colon-separated addresses) |
+
+
+ break-on-return |
+ Break on return from specific APIs |
+
+
+ base-on-api |
+ Set base address for breakpoints based on API |
+
+
+ file-offsets |
+ Interpret breakpoints as file offsets |
+
+
+ trace-all |
+ Enable full execution tracing |
+
+
+ depth |
+ Trace depth limit (default 0) |
+
+
+ count |
+ Trace instruction count limit (default 128) |
+
+
+ loop_detection |
+ Enable loop detection (compress call logs) |
+
+
+ ttd |
+ Time Travel Debugging (ttd=1) |
+
+
+ polarproxy |
+ Run PolarProxy (TLS PCAP) |
+
+
+ mitmdump |
+ Run mitmdump (TLS HAR) |
+
+
+
+
-
-
@@ -507,165 +942,164 @@
Advance