13
13
from shutil import copyfile
14
14
from typing import Union , Optional
15
15
16
- from pydantic import BaseModel , PrivateAttr , Field , dataclasses
16
+ from pydantic import ConfigDict , BaseModel , PrivateAttr , Field , dataclasses
17
17
import requests # type: ignore
18
18
import splunklib .client as client # type: ignore
19
19
from splunklib .binding import HTTPError # type: ignore
@@ -48,9 +48,9 @@ class SetupTestGroupResults(BaseModel):
48
48
success : bool = True
49
49
duration : float = 0
50
50
start_time : float
51
-
52
- class Config :
53
- arbitrary_types_allowed = True
51
+ model_config = ConfigDict (
52
+ arbitrary_types_allowed = True
53
+ )
54
54
55
55
56
56
class CleanupTestGroupResults (BaseModel ):
@@ -68,14 +68,23 @@ class CannotRunBaselineException(Exception):
68
68
# exception
69
69
pass
70
70
71
+ class ReplayIndexDoesNotExistOnServer (Exception ):
72
+ '''
73
+ In order to replay data files into the Splunk Server
74
+ for testing, they must be replayed into an index that
75
+ exists. If that index does not exist, this error will
76
+ be generated and raised before we try to do anything else
77
+ with that Data File.
78
+ '''
79
+ pass
71
80
72
81
@dataclasses .dataclass (frozen = False )
73
82
class DetectionTestingManagerOutputDto ():
74
83
inputQueue : list [Detection ] = Field (default_factory = list )
75
84
outputQueue : list [Detection ] = Field (default_factory = list )
76
85
currentTestingQueue : dict [str , Union [Detection , None ]] = Field (default_factory = dict )
77
86
start_time : Union [datetime .datetime , None ] = None
78
- replay_index : str = "CONTENTCTL_TESTING_INDEX "
87
+ replay_index : str = "contentctl_testing_index "
79
88
replay_host : str = "CONTENTCTL_HOST"
80
89
timeout_seconds : int = 60
81
90
terminate : bool = False
@@ -88,12 +97,13 @@ class DetectionTestingInfrastructure(BaseModel, abc.ABC):
88
97
sync_obj : DetectionTestingManagerOutputDto
89
98
hec_token : str = ""
90
99
hec_channel : str = ""
100
+ all_indexes_on_server : list [str ] = []
91
101
_conn : client .Service = PrivateAttr ()
92
102
pbar : tqdm .tqdm = None
93
103
start_time : Optional [float ] = None
94
-
95
- class Config :
96
- arbitrary_types_allowed = True
104
+ model_config = ConfigDict (
105
+ arbitrary_types_allowed = True
106
+ )
97
107
98
108
def __init__ (self , ** data ):
99
109
super ().__init__ (** data )
@@ -131,6 +141,7 @@ def setup(self):
131
141
(self .get_conn , "Waiting for App Installation" ),
132
142
(self .configure_conf_file_datamodels , "Configuring Datamodels" ),
133
143
(self .create_replay_index , f"Create index '{ self .sync_obj .replay_index } '" ),
144
+ (self .get_all_indexes , "Getting all indexes from server" ),
134
145
(self .configure_imported_roles , "Configuring Roles" ),
135
146
(self .configure_delete_indexes , "Configuring Indexes" ),
136
147
(self .configure_hec , "Configuring HEC" ),
@@ -169,12 +180,11 @@ def configure_hec(self):
169
180
pass
170
181
171
182
try :
172
-
173
183
res = self .get_conn ().inputs .create (
174
184
name = "DETECTION_TESTING_HEC" ,
175
185
kind = "http" ,
176
186
index = self .sync_obj .replay_index ,
177
- indexes = f" { self .sync_obj . replay_index } ,_internal,_audit" ,
187
+ indexes = "," . join ( self .all_indexes_on_server ), # This allows the HEC to write to all indexes
178
188
useACK = True ,
179
189
)
180
190
self .hec_token = str (res .token )
@@ -183,6 +193,23 @@ def configure_hec(self):
183
193
except Exception as e :
184
194
raise (Exception (f"Failure creating HEC Endpoint: { str (e )} " ))
185
195
196
+ def get_all_indexes (self ) -> None :
197
+ """
198
+ Retrieve a list of all indexes in the Splunk instance
199
+ """
200
+ try :
201
+ # We do not include the replay index because by
202
+ # the time we get to this function, it has already
203
+ # been created on the server.
204
+ indexes = []
205
+ res = self .get_conn ().indexes
206
+ for index in res .list ():
207
+ indexes .append (index .name )
208
+ # Retrieve all available indexes on the splunk instance
209
+ self .all_indexes_on_server = indexes
210
+ except Exception as e :
211
+ raise (Exception (f"Failure getting indexes: { str (e )} " ))
212
+
186
213
def get_conn (self ) -> client .Service :
187
214
try :
188
215
if not self ._conn :
@@ -265,11 +292,7 @@ def configure_imported_roles(
265
292
self ,
266
293
imported_roles : list [str ] = ["user" , "power" , "can_delete" ],
267
294
enterprise_security_roles : list [str ] = ["ess_admin" , "ess_analyst" , "ess_user" ],
268
- indexes : list [str ] = ["_*" , "*" ],
269
- ):
270
- indexes .append (self .sync_obj .replay_index )
271
- indexes_encoded = ";" .join (indexes )
272
-
295
+ ):
273
296
try :
274
297
# Set which roles should be configured. For Enterprise Security/Integration Testing,
275
298
# we must add some extra foles.
@@ -281,7 +304,7 @@ def configure_imported_roles(
281
304
self .get_conn ().roles .post (
282
305
self .infrastructure .splunk_app_username ,
283
306
imported_roles = roles ,
284
- srchIndexesAllowed = indexes_encoded ,
307
+ srchIndexesAllowed = ";" . join ( self . all_indexes_on_server ) ,
285
308
srchIndexesDefault = self .sync_obj .replay_index ,
286
309
)
287
310
return
@@ -293,19 +316,17 @@ def configure_imported_roles(
293
316
self .get_conn ().roles .post (
294
317
self .infrastructure .splunk_app_username ,
295
318
imported_roles = imported_roles ,
296
- srchIndexesAllowed = indexes_encoded ,
319
+ srchIndexesAllowed = ";" . join ( self . all_indexes_on_server ) ,
297
320
srchIndexesDefault = self .sync_obj .replay_index ,
298
321
)
299
322
300
- def configure_delete_indexes (self , indexes : list [str ] = ["_*" , "*" ]):
301
- indexes .append (self .sync_obj .replay_index )
323
+ def configure_delete_indexes (self ):
302
324
endpoint = "/services/properties/authorize/default/deleteIndexesAllowed"
303
- indexes_encoded = ";" .join (indexes )
304
325
try :
305
- self .get_conn ().post (endpoint , value = indexes_encoded )
326
+ self .get_conn ().post (endpoint , value = ";" . join ( self . all_indexes_on_server ) )
306
327
except Exception as e :
307
328
self .pbar .write (
308
- f"Error configuring deleteIndexesAllowed with '{ indexes_encoded } ': [{ str (e )} ]"
329
+ f"Error configuring deleteIndexesAllowed with '{ self . all_indexes_on_server } ': [{ str (e )} ]"
309
330
)
310
331
311
332
def wait_for_conf_file (self , app_name : str , conf_file_name : str ):
@@ -654,8 +675,6 @@ def execute_unit_test(
654
675
# Set the mode and timeframe, if required
655
676
kwargs = {"exec_mode" : "blocking" }
656
677
657
-
658
-
659
678
# Set earliest_time and latest_time appropriately if FORCE_ALL_TIME is False
660
679
if not FORCE_ALL_TIME :
661
680
if test .earliest_time is not None :
@@ -1035,8 +1054,8 @@ def retry_search_until_timeout(
1035
1054
# Get the start time and compute the timeout
1036
1055
search_start_time = time .time ()
1037
1056
search_stop_time = time .time () + self .sync_obj .timeout_seconds
1038
-
1039
- # Make a copy of the search string since we may
1057
+
1058
+ # Make a copy of the search string since we may
1040
1059
# need to make some small changes to it below
1041
1060
search = detection .search
1042
1061
@@ -1088,8 +1107,6 @@ def retry_search_until_timeout(
1088
1107
# Initialize the collection of fields that are empty that shouldn't be
1089
1108
present_threat_objects : set [str ] = set ()
1090
1109
empty_fields : set [str ] = set ()
1091
-
1092
-
1093
1110
1094
1111
# Filter out any messages in the results
1095
1112
for result in results :
@@ -1119,7 +1136,7 @@ def retry_search_until_timeout(
1119
1136
# not populated and we should throw an error. This can happen if there is a typo
1120
1137
# on a field. In this case, the field will appear but will not contain any values
1121
1138
current_empty_fields : set [str ] = set ()
1122
-
1139
+
1123
1140
for field in observable_fields_set :
1124
1141
if result .get (field , 'null' ) == 'null' :
1125
1142
if field in risk_object_fields_set :
@@ -1139,9 +1156,7 @@ def retry_search_until_timeout(
1139
1156
if field in threat_object_fields_set :
1140
1157
present_threat_objects .add (field )
1141
1158
continue
1142
-
1143
1159
1144
-
1145
1160
# If everything succeeded up until now, and no empty fields are found in the
1146
1161
# current result, then the search was a success
1147
1162
if len (current_empty_fields ) == 0 :
@@ -1155,8 +1170,7 @@ def retry_search_until_timeout(
1155
1170
1156
1171
else :
1157
1172
empty_fields = empty_fields .union (current_empty_fields )
1158
-
1159
-
1173
+
1160
1174
missing_threat_objects = threat_object_fields_set - present_threat_objects
1161
1175
# Report a failure if there were empty fields in a threat object in all results
1162
1176
if len (missing_threat_objects ) > 0 :
@@ -1172,7 +1186,6 @@ def retry_search_until_timeout(
1172
1186
duration = time .time () - search_start_time ,
1173
1187
)
1174
1188
return
1175
-
1176
1189
1177
1190
test .result .set_job_content (
1178
1191
job .content ,
@@ -1233,9 +1246,19 @@ def replay_attack_data_file(
1233
1246
test_group : TestGroup ,
1234
1247
test_group_start_time : float ,
1235
1248
):
1236
- tempfile = mktemp (dir = tmp_dir )
1237
-
1249
+ # Before attempting to replay the file, ensure that the index we want
1250
+ # to replay into actuall exists. If not, we should throw a detailed
1251
+ # exception that can easily be interpreted by the user.
1252
+ if attack_data_file .custom_index is not None and \
1253
+ attack_data_file .custom_index not in self .all_indexes_on_server :
1254
+ raise ReplayIndexDoesNotExistOnServer (
1255
+ f"Unable to replay data file { attack_data_file .data } "
1256
+ f"into index '{ attack_data_file .custom_index } '. "
1257
+ "The index does not exist on the Splunk Server. "
1258
+ f"The only valid indexes on the server are { self .all_indexes_on_server } "
1259
+ )
1238
1260
1261
+ tempfile = mktemp (dir = tmp_dir )
1239
1262
if not (str (attack_data_file .data ).startswith ("http://" ) or
1240
1263
str (attack_data_file .data ).startswith ("https://" )) :
1241
1264
if pathlib .Path (str (attack_data_file .data )).is_file ():
@@ -1280,7 +1303,6 @@ def replay_attack_data_file(
1280
1303
)
1281
1304
)
1282
1305
1283
-
1284
1306
# Upload the data
1285
1307
self .format_pbar_string (
1286
1308
TestReportingType .GROUP ,
0 commit comments