Skip to content

Commit 9c2bdff

Browse files
authored
Merge branch 'main' into enable_acs_deploy
2 parents 098905a + cfda377 commit 9c2bdff

File tree

18 files changed

+381
-221
lines changed

18 files changed

+381
-221
lines changed

contentctl/actions/detection_testing/GitService.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -67,9 +67,9 @@ def getChanges(self, target_branch:str)->List[Detection]:
6767

6868
#Make a filename to content map
6969
filepath_to_content_map = { obj.file_path:obj for (_,obj) in self.director.name_to_content_map.items()}
70-
updated_detections:List[Detection] = []
71-
updated_macros:List[Macro] = []
72-
updated_lookups:List[Lookup] =[]
70+
updated_detections:set[Detection] = set()
71+
updated_macros:set[Macro] = set()
72+
updated_lookups:set[Lookup] = set()
7373

7474
for diff in all_diffs:
7575
if type(diff) == pygit2.Patch:
@@ -80,14 +80,14 @@ def getChanges(self, target_branch:str)->List[Detection]:
8080
if decoded_path.is_relative_to(self.config.path/"detections") and decoded_path.suffix == ".yml":
8181
detectionObject = filepath_to_content_map.get(decoded_path, None)
8282
if isinstance(detectionObject, Detection):
83-
updated_detections.append(detectionObject)
83+
updated_detections.add(detectionObject)
8484
else:
8585
raise Exception(f"Error getting detection object for file {str(decoded_path)}")
8686

8787
elif decoded_path.is_relative_to(self.config.path/"macros") and decoded_path.suffix == ".yml":
8888
macroObject = filepath_to_content_map.get(decoded_path, None)
8989
if isinstance(macroObject, Macro):
90-
updated_macros.append(macroObject)
90+
updated_macros.add(macroObject)
9191
else:
9292
raise Exception(f"Error getting macro object for file {str(decoded_path)}")
9393

@@ -98,7 +98,7 @@ def getChanges(self, target_branch:str)->List[Detection]:
9898
updatedLookup = filepath_to_content_map.get(decoded_path, None)
9999
if not isinstance(updatedLookup,Lookup):
100100
raise Exception(f"Expected {decoded_path} to be type {type(Lookup)}, but instead if was {(type(updatedLookup))}")
101-
updated_lookups.append(updatedLookup)
101+
updated_lookups.add(updatedLookup)
102102

103103
elif decoded_path.suffix == ".csv":
104104
# If the CSV was updated, we want to make sure that we
@@ -125,7 +125,7 @@ def getChanges(self, target_branch:str)->List[Detection]:
125125
if updatedLookup is not None and updatedLookup not in updated_lookups:
126126
# It is possible that both the CSV and YML have been modified for the same lookup,
127127
# and we do not want to add it twice.
128-
updated_lookups.append(updatedLookup)
128+
updated_lookups.add(updatedLookup)
129129

130130
else:
131131
pass
@@ -136,7 +136,7 @@ def getChanges(self, target_branch:str)->List[Detection]:
136136

137137
# If a detection has at least one dependency on changed content,
138138
# then we must test it again
139-
changed_macros_and_lookups = updated_macros + updated_lookups
139+
changed_macros_and_lookups:set[SecurityContentObject] = updated_macros.union(updated_lookups)
140140

141141
for detection in self.director.detections:
142142
if detection in updated_detections:
@@ -146,14 +146,14 @@ def getChanges(self, target_branch:str)->List[Detection]:
146146

147147
for obj in changed_macros_and_lookups:
148148
if obj in detection.get_content_dependencies():
149-
updated_detections.append(detection)
149+
updated_detections.add(detection)
150150
break
151151

152152
#Print out the names of all modified/new content
153153
modifiedAndNewContentString = "\n - ".join(sorted([d.name for d in updated_detections]))
154154

155155
print(f"[{len(updated_detections)}] Pieces of modifed and new content (this may include experimental/deprecated/manual_test content):\n - {modifiedAndNewContentString}")
156-
return updated_detections
156+
return sorted(list(updated_detections))
157157

158158
def getSelected(self, detectionFilenames: List[FilePath]) -> List[Detection]:
159159
filepath_to_content_map: dict[FilePath, SecurityContentObject] = {

contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py

Lines changed: 59 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from shutil import copyfile
1414
from typing import Union, Optional
1515

16-
from pydantic import BaseModel, PrivateAttr, Field, dataclasses
16+
from pydantic import ConfigDict, BaseModel, PrivateAttr, Field, dataclasses
1717
import requests # type: ignore
1818
import splunklib.client as client # type: ignore
1919
from splunklib.binding import HTTPError # type: ignore
@@ -48,9 +48,9 @@ class SetupTestGroupResults(BaseModel):
4848
success: bool = True
4949
duration: float = 0
5050
start_time: float
51-
52-
class Config:
53-
arbitrary_types_allowed = True
51+
model_config = ConfigDict(
52+
arbitrary_types_allowed=True
53+
)
5454

5555

5656
class CleanupTestGroupResults(BaseModel):
@@ -68,14 +68,23 @@ class CannotRunBaselineException(Exception):
6868
# exception
6969
pass
7070

71+
class ReplayIndexDoesNotExistOnServer(Exception):
72+
'''
73+
In order to replay data files into the Splunk Server
74+
for testing, they must be replayed into an index that
75+
exists. If that index does not exist, this error will
76+
be generated and raised before we try to do anything else
77+
with that Data File.
78+
'''
79+
pass
7180

7281
@dataclasses.dataclass(frozen=False)
7382
class DetectionTestingManagerOutputDto():
7483
inputQueue: list[Detection] = Field(default_factory=list)
7584
outputQueue: list[Detection] = Field(default_factory=list)
7685
currentTestingQueue: dict[str, Union[Detection, None]] = Field(default_factory=dict)
7786
start_time: Union[datetime.datetime, None] = None
78-
replay_index: str = "CONTENTCTL_TESTING_INDEX"
87+
replay_index: str = "contentctl_testing_index"
7988
replay_host: str = "CONTENTCTL_HOST"
8089
timeout_seconds: int = 60
8190
terminate: bool = False
@@ -88,12 +97,13 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
8897
sync_obj: DetectionTestingManagerOutputDto
8998
hec_token: str = ""
9099
hec_channel: str = ""
100+
all_indexes_on_server: list[str] = []
91101
_conn: client.Service = PrivateAttr()
92102
pbar: tqdm.tqdm = None
93103
start_time: Optional[float] = None
94-
95-
class Config:
96-
arbitrary_types_allowed = True
104+
model_config = ConfigDict(
105+
arbitrary_types_allowed=True
106+
)
97107

98108
def __init__(self, **data):
99109
super().__init__(**data)
@@ -131,6 +141,7 @@ def setup(self):
131141
(self.get_conn, "Waiting for App Installation"),
132142
(self.configure_conf_file_datamodels, "Configuring Datamodels"),
133143
(self.create_replay_index, f"Create index '{self.sync_obj.replay_index}'"),
144+
(self.get_all_indexes, "Getting all indexes from server"),
134145
(self.configure_imported_roles, "Configuring Roles"),
135146
(self.configure_delete_indexes, "Configuring Indexes"),
136147
(self.configure_hec, "Configuring HEC"),
@@ -169,12 +180,11 @@ def configure_hec(self):
169180
pass
170181

171182
try:
172-
173183
res = self.get_conn().inputs.create(
174184
name="DETECTION_TESTING_HEC",
175185
kind="http",
176186
index=self.sync_obj.replay_index,
177-
indexes=f"{self.sync_obj.replay_index},_internal,_audit",
187+
indexes=",".join(self.all_indexes_on_server), # This allows the HEC to write to all indexes
178188
useACK=True,
179189
)
180190
self.hec_token = str(res.token)
@@ -183,6 +193,23 @@ def configure_hec(self):
183193
except Exception as e:
184194
raise (Exception(f"Failure creating HEC Endpoint: {str(e)}"))
185195

196+
def get_all_indexes(self) -> None:
197+
"""
198+
Retrieve a list of all indexes in the Splunk instance
199+
"""
200+
try:
201+
# We do not include the replay index because by
202+
# the time we get to this function, it has already
203+
# been created on the server.
204+
indexes = []
205+
res = self.get_conn().indexes
206+
for index in res.list():
207+
indexes.append(index.name)
208+
# Retrieve all available indexes on the splunk instance
209+
self.all_indexes_on_server = indexes
210+
except Exception as e:
211+
raise (Exception(f"Failure getting indexes: {str(e)}"))
212+
186213
def get_conn(self) -> client.Service:
187214
try:
188215
if not self._conn:
@@ -265,11 +292,7 @@ def configure_imported_roles(
265292
self,
266293
imported_roles: list[str] = ["user", "power", "can_delete"],
267294
enterprise_security_roles: list[str] = ["ess_admin", "ess_analyst", "ess_user"],
268-
indexes: list[str] = ["_*", "*"],
269-
):
270-
indexes.append(self.sync_obj.replay_index)
271-
indexes_encoded = ";".join(indexes)
272-
295+
):
273296
try:
274297
# Set which roles should be configured. For Enterprise Security/Integration Testing,
275298
# we must add some extra foles.
@@ -281,7 +304,7 @@ def configure_imported_roles(
281304
self.get_conn().roles.post(
282305
self.infrastructure.splunk_app_username,
283306
imported_roles=roles,
284-
srchIndexesAllowed=indexes_encoded,
307+
srchIndexesAllowed=";".join(self.all_indexes_on_server),
285308
srchIndexesDefault=self.sync_obj.replay_index,
286309
)
287310
return
@@ -293,19 +316,17 @@ def configure_imported_roles(
293316
self.get_conn().roles.post(
294317
self.infrastructure.splunk_app_username,
295318
imported_roles=imported_roles,
296-
srchIndexesAllowed=indexes_encoded,
319+
srchIndexesAllowed=";".join(self.all_indexes_on_server),
297320
srchIndexesDefault=self.sync_obj.replay_index,
298321
)
299322

300-
def configure_delete_indexes(self, indexes: list[str] = ["_*", "*"]):
301-
indexes.append(self.sync_obj.replay_index)
323+
def configure_delete_indexes(self):
302324
endpoint = "/services/properties/authorize/default/deleteIndexesAllowed"
303-
indexes_encoded = ";".join(indexes)
304325
try:
305-
self.get_conn().post(endpoint, value=indexes_encoded)
326+
self.get_conn().post(endpoint, value=";".join(self.all_indexes_on_server))
306327
except Exception as e:
307328
self.pbar.write(
308-
f"Error configuring deleteIndexesAllowed with '{indexes_encoded}': [{str(e)}]"
329+
f"Error configuring deleteIndexesAllowed with '{self.all_indexes_on_server}': [{str(e)}]"
309330
)
310331

311332
def wait_for_conf_file(self, app_name: str, conf_file_name: str):
@@ -654,8 +675,6 @@ def execute_unit_test(
654675
# Set the mode and timeframe, if required
655676
kwargs = {"exec_mode": "blocking"}
656677

657-
658-
659678
# Set earliest_time and latest_time appropriately if FORCE_ALL_TIME is False
660679
if not FORCE_ALL_TIME:
661680
if test.earliest_time is not None:
@@ -1035,8 +1054,8 @@ def retry_search_until_timeout(
10351054
# Get the start time and compute the timeout
10361055
search_start_time = time.time()
10371056
search_stop_time = time.time() + self.sync_obj.timeout_seconds
1038-
1039-
# Make a copy of the search string since we may
1057+
1058+
# Make a copy of the search string since we may
10401059
# need to make some small changes to it below
10411060
search = detection.search
10421061

@@ -1088,8 +1107,6 @@ def retry_search_until_timeout(
10881107
# Initialize the collection of fields that are empty that shouldn't be
10891108
present_threat_objects: set[str] = set()
10901109
empty_fields: set[str] = set()
1091-
1092-
10931110

10941111
# Filter out any messages in the results
10951112
for result in results:
@@ -1119,7 +1136,7 @@ def retry_search_until_timeout(
11191136
# not populated and we should throw an error. This can happen if there is a typo
11201137
# on a field. In this case, the field will appear but will not contain any values
11211138
current_empty_fields: set[str] = set()
1122-
1139+
11231140
for field in observable_fields_set:
11241141
if result.get(field, 'null') == 'null':
11251142
if field in risk_object_fields_set:
@@ -1139,9 +1156,7 @@ def retry_search_until_timeout(
11391156
if field in threat_object_fields_set:
11401157
present_threat_objects.add(field)
11411158
continue
1142-
11431159

1144-
11451160
# If everything succeeded up until now, and no empty fields are found in the
11461161
# current result, then the search was a success
11471162
if len(current_empty_fields) == 0:
@@ -1155,8 +1170,7 @@ def retry_search_until_timeout(
11551170

11561171
else:
11571172
empty_fields = empty_fields.union(current_empty_fields)
1158-
1159-
1173+
11601174
missing_threat_objects = threat_object_fields_set - present_threat_objects
11611175
# Report a failure if there were empty fields in a threat object in all results
11621176
if len(missing_threat_objects) > 0:
@@ -1172,7 +1186,6 @@ def retry_search_until_timeout(
11721186
duration=time.time() - search_start_time,
11731187
)
11741188
return
1175-
11761189

11771190
test.result.set_job_content(
11781191
job.content,
@@ -1233,9 +1246,19 @@ def replay_attack_data_file(
12331246
test_group: TestGroup,
12341247
test_group_start_time: float,
12351248
):
1236-
tempfile = mktemp(dir=tmp_dir)
1237-
1249+
# Before attempting to replay the file, ensure that the index we want
1250+
# to replay into actuall exists. If not, we should throw a detailed
1251+
# exception that can easily be interpreted by the user.
1252+
if attack_data_file.custom_index is not None and \
1253+
attack_data_file.custom_index not in self.all_indexes_on_server:
1254+
raise ReplayIndexDoesNotExistOnServer(
1255+
f"Unable to replay data file {attack_data_file.data} "
1256+
f"into index '{attack_data_file.custom_index}'. "
1257+
"The index does not exist on the Splunk Server. "
1258+
f"The only valid indexes on the server are {self.all_indexes_on_server}"
1259+
)
12381260

1261+
tempfile = mktemp(dir=tmp_dir)
12391262
if not (str(attack_data_file.data).startswith("http://") or
12401263
str(attack_data_file.data).startswith("https://")) :
12411264
if pathlib.Path(str(attack_data_file.data)).is_file():
@@ -1280,7 +1303,6 @@ def replay_attack_data_file(
12801303
)
12811304
)
12821305

1283-
12841306
# Upload the data
12851307
self.format_pbar_string(
12861308
TestReportingType.GROUP,

contentctl/actions/detection_testing/views/DetectionTestingViewWeb.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,14 @@
1-
from bottle import template, Bottle, ServerAdapter
2-
from contentctl.actions.detection_testing.views.DetectionTestingView import (
3-
DetectionTestingView,
4-
)
1+
from threading import Thread
52

3+
from bottle import template, Bottle, ServerAdapter
64
from wsgiref.simple_server import make_server, WSGIRequestHandler
75
import jinja2
86
import webbrowser
9-
from threading import Thread
7+
from pydantic import ConfigDict
8+
9+
from contentctl.actions.detection_testing.views.DetectionTestingView import (
10+
DetectionTestingView,
11+
)
1012

1113
DEFAULT_WEB_UI_PORT = 7999
1214

@@ -100,9 +102,9 @@ def log_exception(*args, **kwargs):
100102
class DetectionTestingViewWeb(DetectionTestingView):
101103
bottleApp: Bottle = Bottle()
102104
server: SimpleWebServer = SimpleWebServer(host="0.0.0.0", port=DEFAULT_WEB_UI_PORT)
103-
104-
class Config:
105-
arbitrary_types_allowed = True
105+
model_config = ConfigDict(
106+
arbitrary_types_allowed=True
107+
)
106108

107109
def setup(self):
108110
self.bottleApp.route("/", callback=self.showStatus)

contentctl/actions/inspect.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -297,9 +297,11 @@ def check_detection_metadata(self, config: inspect) -> None:
297297
validation_errors[rule_name] = []
298298
# No detections should be removed from build to build
299299
if rule_name not in current_build_conf.detection_stanzas:
300-
validation_errors[rule_name].append(DetectionMissingError(rule_name=rule_name))
300+
if config.suppress_missing_content_exceptions:
301+
print(f"[SUPPRESSED] {DetectionMissingError(rule_name=rule_name).long_message}")
302+
else:
303+
validation_errors[rule_name].append(DetectionMissingError(rule_name=rule_name))
301304
continue
302-
303305
# Pull out the individual stanza for readability
304306
previous_stanza = previous_build_conf.detection_stanzas[rule_name]
305307
current_stanza = current_build_conf.detection_stanzas[rule_name]
@@ -335,7 +337,7 @@ def check_detection_metadata(self, config: inspect) -> None:
335337
)
336338

337339
# Convert our dict mapping to a flat list of errors for use in reporting
338-
validation_error_list = [x for inner_list in validation_errors.values() for x in inner_list]
340+
validation_error_list = [x for inner_list in validation_errors.values() for x in inner_list]
339341

340342
# Report failure/success
341343
print("\nDetection Metadata Validation:")
@@ -355,4 +357,4 @@ def check_detection_metadata(self, config: inspect) -> None:
355357
raise ExceptionGroup(
356358
"Validation errors when comparing detection stanzas in current and previous build:",
357359
validation_error_list
358-
)
360+
)

0 commit comments

Comments
 (0)