Skip to content

Commit d11009d

Browse files
committed
Merge branch 'contentctl_5' into remove_use_enum_values
2 parents 6f394cc + 7646c24 commit d11009d

33 files changed

+171
-152
lines changed

contentctl/actions/new_content.py

Lines changed: 98 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
2-
31
from dataclasses import dataclass
42
import questionary
53
from typing import Any
@@ -10,67 +8,108 @@
108
import pathlib
119
from contentctl.objects.abstract_security_content_objects.security_content_object_abstract import SecurityContentObject_Abstract
1210
from contentctl.output.yml_writer import YmlWriter
13-
11+
from contentctl.objects.enums import AssetType
12+
from contentctl.objects.constants import SES_OBSERVABLE_TYPE_MAPPING, SES_OBSERVABLE_ROLE_MAPPING
1413
class NewContent:
14+
UPDATE_PREFIX = "__UPDATE__"
15+
16+
DEFAULT_DRILLDOWN_DEF = [
17+
{
18+
"name": f'View the detection results for - "${UPDATE_PREFIX}FIRST_RISK_OBJECT$" and "${UPDATE_PREFIX}SECOND_RISK_OBJECT$"',
19+
"search": f'%original_detection_search% | search "${UPDATE_PREFIX}FIRST_RISK_OBJECT = "${UPDATE_PREFIX}FIRST_RISK_OBJECT$" second_observable_type_here = "${UPDATE_PREFIX}SECOND_RISK_OBJECT$"',
20+
"earliest_offset": '$info_min_time$',
21+
"latest_offset": '$info_max_time$'
22+
},
23+
{
24+
"name": f'View risk events for the last 7 days for - "${UPDATE_PREFIX}FIRST_RISK_OBJECT$" and "${UPDATE_PREFIX}SECOND_RISK_OBJECT$"',
25+
"search": f'| from datamodel Risk.All_Risk | search normalized_risk_object IN ("${UPDATE_PREFIX}FIRST_RISK_OBJECT$", "${UPDATE_PREFIX}SECOND_RISK_OBJECT$") starthoursago=168 | stats count min(_time) as firstTime max(_time) as lastTime values(search_name) as "Search Name" values(risk_message) as "Risk Message" values(analyticstories) as "Analytic Stories" values(annotations._all) as "Annotations" values(annotations.mitre_attack.mitre_tactic) as "ATT&CK Tactics" by normalized_risk_object | `security_content_ctime(firstTime)` | `security_content_ctime(lastTime)`',
26+
"earliest_offset": '$info_min_time$',
27+
"latest_offset": '$info_max_time$'
28+
}
29+
]
30+
1531

16-
def buildDetection(self)->dict[str,Any]:
32+
def buildDetection(self) -> tuple[dict[str, Any], str]:
1733
questions = NewContentQuestions.get_questions_detection()
18-
answers: dict[str,str] = questionary.prompt(
19-
questions,
20-
kbi_msg="User did not answer all of the prompt questions. Exiting...")
34+
answers: dict[str, str] = questionary.prompt(
35+
questions,
36+
kbi_msg="User did not answer all of the prompt questions. Exiting...",
37+
)
2138
if not answers:
2239
raise ValueError("User didn't answer one or more questions!")
23-
answers.update(answers)
24-
answers['name'] = answers['detection_name']
25-
del answers['detection_name']
26-
answers['id'] = str(uuid.uuid4())
27-
answers['version'] = 1
28-
answers['date'] = datetime.today().strftime('%Y-%m-%d')
29-
answers['author'] = answers['detection_author']
30-
del answers['detection_author']
31-
answers['data_source'] = answers['data_source']
32-
answers['type'] = answers['detection_type']
33-
del answers['detection_type']
34-
answers['status'] = "production" #start everything as production since that's what we INTEND the content to become
35-
answers['description'] = 'UPDATE_DESCRIPTION'
36-
file_name = answers['name'].replace(' ', '_').replace('-','_').replace('.','_').replace('/','_').lower()
37-
answers['search'] = answers['detection_search'] + ' | `' + file_name + '_filter`'
38-
del answers['detection_search']
39-
answers['how_to_implement'] = 'UPDATE_HOW_TO_IMPLEMENT'
40-
answers['known_false_positives'] = 'UPDATE_KNOWN_FALSE_POSITIVES'
41-
answers['references'] = ['REFERENCE']
42-
answers['tags'] = dict()
43-
answers['tags']['analytic_story'] = ['UPDATE_STORY_NAME']
44-
answers['tags']['asset_type'] = 'UPDATE asset_type'
45-
answers['tags']['confidence'] = 'UPDATE value between 1-100'
46-
answers['tags']['impact'] = 'UPDATE value between 1-100'
47-
answers['tags']['message'] = 'UPDATE message'
48-
answers['tags']['mitre_attack_id'] = [x.strip() for x in answers['mitre_attack_ids'].split(',')]
49-
answers['tags']['observable'] = [{'name': 'UPDATE', 'type': 'UPDATE', 'role': ['UPDATE']}]
50-
answers['tags']['product'] = ['Splunk Enterprise','Splunk Enterprise Security','Splunk Cloud']
51-
answers['tags']['required_fields'] = ['UPDATE']
52-
answers['tags']['risk_score'] = 'UPDATE (impact * confidence)/100'
53-
answers['tags']['security_domain'] = answers['security_domain']
54-
del answers["security_domain"]
55-
answers['tags']['cve'] = ['UPDATE WITH CVE(S) IF APPLICABLE']
56-
57-
#generate the tests section
58-
answers['tests'] = [
59-
{
60-
'name': "True Positive Test",
61-
'attack_data': [
62-
{
63-
'data': "https://github.com/splunk/contentctl/wiki",
64-
"sourcetype": "UPDATE SOURCETYPE",
65-
"source": "UPDATE SOURCE"
66-
}
67-
]
68-
}
69-
]
70-
del answers["mitre_attack_ids"]
71-
return answers
7240

73-
def buildStory(self)->dict[str,Any]:
41+
data_source_field = (
42+
answers["data_source"] if len(answers["data_source"]) > 0 else [f"{NewContent.UPDATE_PREFIX} zero or more data_sources"]
43+
)
44+
file_name = (
45+
answers["detection_name"]
46+
.replace(" ", "_")
47+
.replace("-", "_")
48+
.replace(".", "_")
49+
.replace("/", "_")
50+
.lower()
51+
)
52+
53+
#Minimum lenght for a mitre tactic is 5 characters: T1000
54+
if len(answers["mitre_attack_ids"]) >= 5:
55+
mitre_attack_ids = [x.strip() for x in answers["mitre_attack_ids"].split(",")]
56+
else:
57+
#string was too short, so just put a placeholder
58+
mitre_attack_ids = [f"{NewContent.UPDATE_PREFIX} zero or more mitre_attack_ids"]
59+
60+
output_file_answers: dict[str, Any] = {
61+
"name": answers["detection_name"],
62+
"id": str(uuid.uuid4()),
63+
"version": 1,
64+
"date": datetime.today().strftime("%Y-%m-%d"),
65+
"author": answers["detection_author"],
66+
"status": "production", # start everything as production since that's what we INTEND the content to become
67+
"type": answers["detection_type"],
68+
"description": f"{NewContent.UPDATE_PREFIX} by providing a description of your search",
69+
"data_source": data_source_field,
70+
"search": f"{answers['detection_search']} | `{file_name}_filter`",
71+
"how_to_implement": f"{NewContent.UPDATE_PREFIX} how to implement your search",
72+
"known_false_positives": f"{NewContent.UPDATE_PREFIX} known false positives for your search",
73+
"references": [f"{NewContent.UPDATE_PREFIX} zero or more http references to provide more information about your search"],
74+
"drilldown_searches": NewContent.DEFAULT_DRILLDOWN_DEF,
75+
"tags": {
76+
"analytic_story": [f"{NewContent.UPDATE_PREFIX} by providing zero or more analytic stories"],
77+
"asset_type": f"{NewContent.UPDATE_PREFIX} by providing and asset type from {list(AssetType._value2member_map_)}",
78+
"confidence": f"{NewContent.UPDATE_PREFIX} by providing a value between 1-100",
79+
"impact": f"{NewContent.UPDATE_PREFIX} by providing a value between 1-100",
80+
"message": f"{NewContent.UPDATE_PREFIX} by providing a risk message. Fields in your search results can be referenced using $fieldName$",
81+
"mitre_attack_id": mitre_attack_ids,
82+
"observable": [
83+
{"name": f"{NewContent.UPDATE_PREFIX} the field name of the observable. This is a field that exists in your search results.", "type": f"{NewContent.UPDATE_PREFIX} the type of your observable from the list {list(SES_OBSERVABLE_TYPE_MAPPING.keys())}.", "role": [f"{NewContent.UPDATE_PREFIX} the role from the list {list(SES_OBSERVABLE_ROLE_MAPPING.keys())}"]}
84+
],
85+
"product": [
86+
"Splunk Enterprise",
87+
"Splunk Enterprise Security",
88+
"Splunk Cloud",
89+
],
90+
"security_domain": answers["security_domain"],
91+
"cve": [f"{NewContent.UPDATE_PREFIX} with CVE(s) if applicable"],
92+
},
93+
"tests": [
94+
{
95+
"name": "True Positive Test",
96+
"attack_data": [
97+
{
98+
"data": f"{NewContent.UPDATE_PREFIX} the data file to replay. Go to https://github.com/splunk/contentctl/wiki for information about the format of this field",
99+
"sourcetype": f"{NewContent.UPDATE_PREFIX} the sourcetype of your data file.",
100+
"source": f"{NewContent.UPDATE_PREFIX} the source of your datafile",
101+
}
102+
],
103+
}
104+
],
105+
}
106+
107+
if answers["detection_type"] not in ["TTP", "Anomaly", "Correlation"]:
108+
del output_file_answers["drilldown_searches"]
109+
110+
return output_file_answers, answers['detection_kind']
111+
112+
def buildStory(self) -> dict[str, Any]:
74113
questions = NewContentQuestions.get_questions_story()
75114
answers = questionary.prompt(
76115
questions,
@@ -95,12 +134,11 @@ def buildStory(self)->dict[str,Any]:
95134
del answers['usecase']
96135
answers['tags']['cve'] = ['UPDATE WITH CVE(S) IF APPLICABLE']
97136
return answers
98-
99137

100138
def execute(self, input_dto: new) -> None:
101139
if input_dto.type == NewContentType.detection:
102-
content_dict = self.buildDetection()
103-
subdirectory = pathlib.Path('detections') / content_dict.pop('detection_kind')
140+
content_dict, detection_kind = self.buildDetection()
141+
subdirectory = pathlib.Path('detections') / detection_kind
104142
elif input_dto.type == NewContentType.story:
105143
content_dict = self.buildStory()
106144
subdirectory = pathlib.Path('stories')

contentctl/contentctl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ def main():
154154

155155
else:
156156
#The file exists, so load it up!
157-
config_obj = YmlReader().load_file(configFile)
157+
config_obj = YmlReader().load_file(configFile,add_fields=False)
158158
t = test.model_validate(config_obj)
159159
except Exception as e:
160160
print(f"Error validating 'contentctl.yml':\n{str(e)}")

contentctl/helper/utils.py

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -247,20 +247,6 @@ def validate_git_pull_request(repo_path: str, pr_number: int) -> str:
247247

248248
return hash
249249

250-
# @staticmethod
251-
# def check_required_fields(
252-
# thisField: str, definedFields: dict, requiredFields: list[str]
253-
# ):
254-
# missing_fields = [
255-
# field for field in requiredFields if field not in definedFields
256-
# ]
257-
# if len(missing_fields) > 0:
258-
# raise (
259-
# ValueError(
260-
# f"Could not validate - please resolve other errors resulting in missing fields {missing_fields}"
261-
# )
262-
# )
263-
264250
@staticmethod
265251
def verify_file_exists(
266252
file_path: str, verbose_print=False, timeout_seconds: int = 10

contentctl/input/new_content_questions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def get_questions_detection(cls) -> list[dict[str,Any]]:
5757
"type": "text",
5858
"message": "enter search (spl)",
5959
"name": "detection_search",
60-
"default": "| UPDATE_SPL",
60+
"default": "| __UPDATE__ SPL",
6161
},
6262
{
6363
"type": "text",

contentctl/input/yml_reader.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,12 @@
11
from typing import Dict, Any
2-
32
import yaml
4-
5-
63
import sys
74
import pathlib
85

96
class YmlReader():
107

118
@staticmethod
12-
def load_file(file_path: pathlib.Path, add_fields=True, STRICT_YML_CHECKING=False) -> Dict[str,Any]:
9+
def load_file(file_path: pathlib.Path, add_fields:bool=True, STRICT_YML_CHECKING:bool=False) -> Dict[str,Any]:
1310
try:
1411
file_handler = open(file_path, 'r', encoding="utf-8")
1512

@@ -27,8 +24,16 @@ def load_file(file_path: pathlib.Path, add_fields=True, STRICT_YML_CHECKING=Fals
2724
print(f"Error loading YML file {file_path}: {str(e)}")
2825
sys.exit(1)
2926
try:
30-
#yml_obj = list(yaml.safe_load_all(file_handler))[0]
31-
yml_obj = yaml.load(file_handler, Loader=yaml.CSafeLoader)
27+
#Ideally we should use
28+
# from contentctl.actions.new_content import NewContent
29+
# and use NewContent.UPDATE_PREFIX,
30+
# but there is a circular dependency right now which makes that difficult.
31+
# We have instead hardcoded UPDATE_PREFIX
32+
UPDATE_PREFIX = "__UPDATE__"
33+
data = file_handler.read()
34+
if UPDATE_PREFIX in data:
35+
raise Exception(f"The file {file_path} contains the value '{UPDATE_PREFIX}'. Please fill out any unpopulated fields as required.")
36+
yml_obj = yaml.load(data, Loader=yaml.CSafeLoader)
3237
except yaml.YAMLError as exc:
3338
print(exc)
3439
sys.exit(1)

contentctl/objects/abstract_security_content_objects/security_content_object_abstract.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,7 @@
3232

3333

3434
class SecurityContentObject_Abstract(BaseModel, abc.ABC):
35-
model_config = ConfigDict(validate_default=True)
36-
35+
model_config = ConfigDict(validate_default=True,extra="forbid")
3736
name: str = Field(...,max_length=99)
3837
author: str = Field(...,max_length=255)
3938
date: datetime.date = Field(...)

contentctl/objects/alert_action.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from __future__ import annotations
2-
from pydantic import BaseModel, model_serializer
2+
from pydantic import BaseModel, model_serializer, ConfigDict
33
from typing import Optional
44

55
from contentctl.objects.deployment_email import DeploymentEmail
@@ -9,6 +9,7 @@
99
from contentctl.objects.deployment_phantom import DeploymentPhantom
1010

1111
class AlertAction(BaseModel):
12+
model_config = ConfigDict(extra="forbid")
1213
email: Optional[DeploymentEmail] = None
1314
notable: Optional[DeploymentNotable] = None
1415
rba: Optional[DeploymentRBA] = DeploymentRBA()

contentctl/objects/atomic.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ class InputArgumentType(StrEnum):
4141
Url = "Url"
4242

4343
class AtomicExecutor(BaseModel):
44+
model_config = ConfigDict(extra="forbid")
4445
name: str
4546
elevation_required: Optional[bool] = False #Appears to be optional
4647
command: Optional[str] = None

contentctl/objects/base_test.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
from typing import Union
33
from abc import ABC, abstractmethod
44

5-
from pydantic import BaseModel
5+
from pydantic import BaseModel,ConfigDict
66

77
from contentctl.objects.base_test_result import BaseTestResult
88

@@ -21,6 +21,7 @@ def __str__(self) -> str:
2121

2222
# TODO (#224): enforce distinct test names w/in detections
2323
class BaseTest(BaseModel, ABC):
24+
model_config = ConfigDict(extra="forbid")
2425
"""
2526
A test case for a detection
2627
"""

contentctl/objects/baseline.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11

22
from __future__ import annotations
3-
from typing import Annotated, Optional, List,Any
4-
from pydantic import field_validator, ValidationInfo, Field, model_serializer
3+
from typing import Annotated, List,Any
4+
from pydantic import field_validator, ValidationInfo, Field, model_serializer, computed_field
55
from contentctl.objects.deployment import Deployment
66
from contentctl.objects.security_content_object import SecurityContentObject
77
from contentctl.objects.enums import DataModel
@@ -15,7 +15,6 @@
1515
class Baseline(SecurityContentObject):
1616
name:str = Field(...,max_length=CONTENTCTL_MAX_SEARCH_NAME_LENGTH)
1717
type: Annotated[str,Field(pattern="^Baseline$")] = Field(...)
18-
datamodel: Optional[List[DataModel]] = None
1918
search: str = Field(..., min_length=4)
2019
how_to_implement: str = Field(..., min_length=4)
2120
known_false_positives: str = Field(..., min_length=4)
@@ -34,6 +33,10 @@ def get_conf_stanza_name(self, app:CustomApp)->str:
3433
def getDeployment(cls, v:Any, info:ValidationInfo)->Deployment:
3534
return Deployment.getDeployment(v,info)
3635

36+
@computed_field
37+
@property
38+
def datamodel(self) -> List[DataModel]:
39+
return [dm for dm in DataModel if dm.value in self.search]
3740

3841
@model_serializer
3942
def serialize_model(self):

0 commit comments

Comments
 (0)