Skip to content

Commit cfda377

Browse files
authored
Merge pull request #309 from splunk/all_more_custom_indexes
Add more custom indexes ESCU smoketests are failing because the Drilldowns PR has not been merged yet. This is expected.
2 parents f7a939b + adf0f90 commit cfda377

File tree

1 file changed

+52
-30
lines changed

1 file changed

+52
-30
lines changed

contentctl/actions/detection_testing/infrastructures/DetectionTestingInfrastructure.py

Lines changed: 52 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -68,14 +68,23 @@ class CannotRunBaselineException(Exception):
6868
# exception
6969
pass
7070

71+
class ReplayIndexDoesNotExistOnServer(Exception):
72+
'''
73+
In order to replay data files into the Splunk Server
74+
for testing, they must be replayed into an index that
75+
exists. If that index does not exist, this error will
76+
be generated and raised before we try to do anything else
77+
with that Data File.
78+
'''
79+
pass
7180

7281
@dataclasses.dataclass(frozen=False)
7382
class DetectionTestingManagerOutputDto():
7483
inputQueue: list[Detection] = Field(default_factory=list)
7584
outputQueue: list[Detection] = Field(default_factory=list)
7685
currentTestingQueue: dict[str, Union[Detection, None]] = Field(default_factory=dict)
7786
start_time: Union[datetime.datetime, None] = None
78-
replay_index: str = "CONTENTCTL_TESTING_INDEX"
87+
replay_index: str = "contentctl_testing_index"
7988
replay_host: str = "CONTENTCTL_HOST"
8089
timeout_seconds: int = 60
8190
terminate: bool = False
@@ -88,6 +97,7 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
8897
sync_obj: DetectionTestingManagerOutputDto
8998
hec_token: str = ""
9099
hec_channel: str = ""
100+
all_indexes_on_server: list[str] = []
91101
_conn: client.Service = PrivateAttr()
92102
pbar: tqdm.tqdm = None
93103
start_time: Optional[float] = None
@@ -131,6 +141,7 @@ def setup(self):
131141
(self.get_conn, "Waiting for App Installation"),
132142
(self.configure_conf_file_datamodels, "Configuring Datamodels"),
133143
(self.create_replay_index, f"Create index '{self.sync_obj.replay_index}'"),
144+
(self.get_all_indexes, "Getting all indexes from server"),
134145
(self.configure_imported_roles, "Configuring Roles"),
135146
(self.configure_delete_indexes, "Configuring Indexes"),
136147
(self.configure_hec, "Configuring HEC"),
@@ -169,12 +180,11 @@ def configure_hec(self):
169180
pass
170181

171182
try:
172-
173183
res = self.get_conn().inputs.create(
174184
name="DETECTION_TESTING_HEC",
175185
kind="http",
176186
index=self.sync_obj.replay_index,
177-
indexes=f"{self.sync_obj.replay_index},_internal,_audit",
187+
indexes=",".join(self.all_indexes_on_server), # This allows the HEC to write to all indexes
178188
useACK=True,
179189
)
180190
self.hec_token = str(res.token)
@@ -183,6 +193,23 @@ def configure_hec(self):
183193
except Exception as e:
184194
raise (Exception(f"Failure creating HEC Endpoint: {str(e)}"))
185195

196+
def get_all_indexes(self) -> None:
197+
"""
198+
Retrieve a list of all indexes in the Splunk instance
199+
"""
200+
try:
201+
# We do not include the replay index because by
202+
# the time we get to this function, it has already
203+
# been created on the server.
204+
indexes = []
205+
res = self.get_conn().indexes
206+
for index in res.list():
207+
indexes.append(index.name)
208+
# Retrieve all available indexes on the splunk instance
209+
self.all_indexes_on_server = indexes
210+
except Exception as e:
211+
raise (Exception(f"Failure getting indexes: {str(e)}"))
212+
186213
def get_conn(self) -> client.Service:
187214
try:
188215
if not self._conn:
@@ -265,11 +292,7 @@ def configure_imported_roles(
265292
self,
266293
imported_roles: list[str] = ["user", "power", "can_delete"],
267294
enterprise_security_roles: list[str] = ["ess_admin", "ess_analyst", "ess_user"],
268-
indexes: list[str] = ["_*", "*"],
269-
):
270-
indexes.append(self.sync_obj.replay_index)
271-
indexes_encoded = ";".join(indexes)
272-
295+
):
273296
try:
274297
# Set which roles should be configured. For Enterprise Security/Integration Testing,
275298
# we must add some extra foles.
@@ -281,7 +304,7 @@ def configure_imported_roles(
281304
self.get_conn().roles.post(
282305
self.infrastructure.splunk_app_username,
283306
imported_roles=roles,
284-
srchIndexesAllowed=indexes_encoded,
307+
srchIndexesAllowed=";".join(self.all_indexes_on_server),
285308
srchIndexesDefault=self.sync_obj.replay_index,
286309
)
287310
return
@@ -293,19 +316,17 @@ def configure_imported_roles(
293316
self.get_conn().roles.post(
294317
self.infrastructure.splunk_app_username,
295318
imported_roles=imported_roles,
296-
srchIndexesAllowed=indexes_encoded,
319+
srchIndexesAllowed=";".join(self.all_indexes_on_server),
297320
srchIndexesDefault=self.sync_obj.replay_index,
298321
)
299322

300-
def configure_delete_indexes(self, indexes: list[str] = ["_*", "*"]):
301-
indexes.append(self.sync_obj.replay_index)
323+
def configure_delete_indexes(self):
302324
endpoint = "/services/properties/authorize/default/deleteIndexesAllowed"
303-
indexes_encoded = ";".join(indexes)
304325
try:
305-
self.get_conn().post(endpoint, value=indexes_encoded)
326+
self.get_conn().post(endpoint, value=";".join(self.all_indexes_on_server))
306327
except Exception as e:
307328
self.pbar.write(
308-
f"Error configuring deleteIndexesAllowed with '{indexes_encoded}': [{str(e)}]"
329+
f"Error configuring deleteIndexesAllowed with '{self.all_indexes_on_server}': [{str(e)}]"
309330
)
310331

311332
def wait_for_conf_file(self, app_name: str, conf_file_name: str):
@@ -654,8 +675,6 @@ def execute_unit_test(
654675
# Set the mode and timeframe, if required
655676
kwargs = {"exec_mode": "blocking"}
656677

657-
658-
659678
# Set earliest_time and latest_time appropriately if FORCE_ALL_TIME is False
660679
if not FORCE_ALL_TIME:
661680
if test.earliest_time is not None:
@@ -1035,8 +1054,8 @@ def retry_search_until_timeout(
10351054
# Get the start time and compute the timeout
10361055
search_start_time = time.time()
10371056
search_stop_time = time.time() + self.sync_obj.timeout_seconds
1038-
1039-
# Make a copy of the search string since we may
1057+
1058+
# Make a copy of the search string since we may
10401059
# need to make some small changes to it below
10411060
search = detection.search
10421061

@@ -1088,8 +1107,6 @@ def retry_search_until_timeout(
10881107
# Initialize the collection of fields that are empty that shouldn't be
10891108
present_threat_objects: set[str] = set()
10901109
empty_fields: set[str] = set()
1091-
1092-
10931110

10941111
# Filter out any messages in the results
10951112
for result in results:
@@ -1119,7 +1136,7 @@ def retry_search_until_timeout(
11191136
# not populated and we should throw an error. This can happen if there is a typo
11201137
# on a field. In this case, the field will appear but will not contain any values
11211138
current_empty_fields: set[str] = set()
1122-
1139+
11231140
for field in observable_fields_set:
11241141
if result.get(field, 'null') == 'null':
11251142
if field in risk_object_fields_set:
@@ -1139,9 +1156,7 @@ def retry_search_until_timeout(
11391156
if field in threat_object_fields_set:
11401157
present_threat_objects.add(field)
11411158
continue
1142-
11431159

1144-
11451160
# If everything succeeded up until now, and no empty fields are found in the
11461161
# current result, then the search was a success
11471162
if len(current_empty_fields) == 0:
@@ -1155,8 +1170,7 @@ def retry_search_until_timeout(
11551170

11561171
else:
11571172
empty_fields = empty_fields.union(current_empty_fields)
1158-
1159-
1173+
11601174
missing_threat_objects = threat_object_fields_set - present_threat_objects
11611175
# Report a failure if there were empty fields in a threat object in all results
11621176
if len(missing_threat_objects) > 0:
@@ -1172,7 +1186,6 @@ def retry_search_until_timeout(
11721186
duration=time.time() - search_start_time,
11731187
)
11741188
return
1175-
11761189

11771190
test.result.set_job_content(
11781191
job.content,
@@ -1233,9 +1246,19 @@ def replay_attack_data_file(
12331246
test_group: TestGroup,
12341247
test_group_start_time: float,
12351248
):
1236-
tempfile = mktemp(dir=tmp_dir)
1237-
1249+
# Before attempting to replay the file, ensure that the index we want
1250+
# to replay into actuall exists. If not, we should throw a detailed
1251+
# exception that can easily be interpreted by the user.
1252+
if attack_data_file.custom_index is not None and \
1253+
attack_data_file.custom_index not in self.all_indexes_on_server:
1254+
raise ReplayIndexDoesNotExistOnServer(
1255+
f"Unable to replay data file {attack_data_file.data} "
1256+
f"into index '{attack_data_file.custom_index}'. "
1257+
"The index does not exist on the Splunk Server. "
1258+
f"The only valid indexes on the server are {self.all_indexes_on_server}"
1259+
)
12381260

1261+
tempfile = mktemp(dir=tmp_dir)
12391262
if not (str(attack_data_file.data).startswith("http://") or
12401263
str(attack_data_file.data).startswith("https://")) :
12411264
if pathlib.Path(str(attack_data_file.data)).is_file():
@@ -1280,7 +1303,6 @@ def replay_attack_data_file(
12801303
)
12811304
)
12821305

1283-
12841306
# Upload the data
12851307
self.format_pbar_string(
12861308
TestReportingType.GROUP,

0 commit comments

Comments
 (0)