Skip to content

Commit a6f8d0d

Browse files
Merge branch 'main' into add_historical_rules
2 parents 9318316 + 801efb3 commit a6f8d0d

5 files changed

+375
-15
lines changed
Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
[metadata]
2+
creation_date = "2024/11/25"
3+
maturity = "production"
4+
updated_date = "2024/11/25"
5+
min_stack_comments = "ES|QL rule type is still in technical preview as of 8.13, however this rule was tested successfully; integration in tech preview"
6+
min_stack_version = "8.13.0"
7+
8+
[rule]
9+
author = ["Elastic"]
10+
description = """
11+
Identifies multiple AWS Bedrock executions in a one minute time window without guardrails by the same user in the same account over a session. Multiple
12+
consecutive executions implies that a user may be intentionally attempting to bypass security controls, by not routing the requests with the desired guardrail configuration
13+
in order to access sensitive information, or possibly exploit a vulnerability in the system.
14+
"""
15+
false_positives = ["Users testing new model deployments or updated compliance policies without Amazon Bedrock guardrails."]
16+
from = "now-60m"
17+
interval = "10m"
18+
language = "esql"
19+
license = "Elastic License v2"
20+
name = "AWS Bedrock Invocations without Guardrails Detected by a Single User Over a Session"
21+
references = [
22+
"https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-components.html",
23+
"https://atlas.mitre.org/techniques/AML.T0051",
24+
"https://atlas.mitre.org/techniques/AML.T0054",
25+
"https://www.elastic.co/security-labs/elastic-advances-llm-security"
26+
]
27+
risk_score = 47
28+
rule_id = "f2c653b7-7daf-4774-86f2-34cdbd1fc528"
29+
note = """## Triage and analysis
30+
31+
### Investigating Amazon Bedrock Invocations without Guardrails Detected by a Single User Over a Session.
32+
33+
Using Amazon Bedrock Guardrails during model invocation is critical for ensuring the safe, reliable, and ethical use of AI models.
34+
Guardrails help manage risks associated with AI usage and ensure the output aligns with desired policies and standards.
35+
36+
#### Possible investigation steps
37+
38+
- Identify the user account that caused multiple model violations over a session without desired guardrail configuration and whether it should perform this kind of action.
39+
- Investigate the user activity that might indicate a potential brute force attack.
40+
- Investigate other alerts associated with the user account during the past 48 hours.
41+
- Consider the time of day. If the user is a human (not a program or script), did the activity take place during a normal time of day?
42+
- Examine the account's prompts and responses in the last 24 hours.
43+
- If you suspect the account has been compromised, scope potentially compromised assets by tracking Amazon Bedrock model access, prompts generated, and responses to the prompts by the account in the last 24 hours.
44+
45+
### False positive analysis
46+
47+
- Verify the user account that caused multiple policy violations by a single user over session, is not testing any new model deployments or updated compliance policies in Amazon Bedrock guardrails.
48+
49+
### Response and remediation
50+
51+
- Initiate the incident response process based on the outcome of the triage.
52+
- Disable or limit the account during the investigation and response.
53+
- Identify the possible impact of the incident and prioritize accordingly; the following actions can help you gain context:
54+
- Identify the account role in the cloud environment.
55+
- Identify if the attacker is moving laterally and compromising other Amazon Bedrock Services.
56+
- Identify any regulatory or legal ramifications related to this activity.
57+
- Review the permissions assigned to the implicated user group or role behind these requests to ensure they are authorized and expected to access bedrock and ensure that the least privilege principle is being followed.
58+
- Determine the initial vector abused by the attacker and take action to prevent reinfection via the same vector.
59+
- Using the incident response data, update logging and audit policies to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).
60+
"""
61+
setup = """## Setup
62+
63+
This rule requires that guardrails are configured in AWS Bedrock. For more information, see the AWS Bedrock documentation:
64+
65+
https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-create.html
66+
"""
67+
severity = "medium"
68+
tags = [
69+
"Domain: LLM",
70+
"Data Source: AWS Bedrock",
71+
"Data Source: AWS S3",
72+
"Resources: Investigation Guide",
73+
"Use Case: Policy Violation",
74+
"Mitre Atlas: T0051",
75+
"Mitre Atlas: T0054",
76+
]
77+
timestamp_override = "event.ingested"
78+
type = "esql"
79+
80+
query = '''
81+
from logs-aws_bedrock.invocation-*
82+
// create time window buckets of 1 minute
83+
| eval time_window = date_trunc(1 minute, @timestamp)
84+
| where gen_ai.guardrail_id is NULL
85+
| KEEP @timestamp, time_window, gen_ai.guardrail_id , user.id
86+
| stats model_invocation_without_guardrails = count() by user.id
87+
| where model_invocation_without_guardrails > 5
88+
| sort model_invocation_without_guardrails desc
89+
'''

rules/integrations/aws_bedrock/aws_bedrock_high_confidence_misconduct_blocks_detected.toml

Lines changed: 19 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,44 +1,44 @@
11
[metadata]
22
creation_date = "2024/05/05"
33
maturity = "production"
4-
updated_date = "2024/11/05"
4+
updated_date = "2024/11/21"
55
min_stack_comments = "ES|QL rule type is still in technical preview as of 8.13, however this rule was tested successfully; integration in tech preview"
66
min_stack_version = "8.13.0"
77

88
[rule]
99
author = ["Elastic"]
1010
description = """
11-
Detects repeated high-confidence 'BLOCKED' actions coupled with specific violation codes such as 'MISCONDUCT',
12-
indicating persistent misuse or attempts to probe the model's ethical boundaries.
11+
Detects repeated high-confidence 'BLOCKED' actions coupled with specific 'Content Filter' policy violation having codes such as 'MISCONDUCT',
12+
'HATE', 'SEXUAL', INSULTS', 'PROMPT_ATTACK', 'VIOLENCE' indicating persistent misuse or attempts to probe the model's ethical boundaries.
1313
"""
1414
false_positives = ["New model deployments.", "Testing updates to compliance policies."]
1515
from = "now-60m"
1616
interval = "10m"
1717
language = "esql"
1818
license = "Elastic License v2"
19-
name = "Unusual High Confidence Misconduct Blocks Detected"
19+
name = "Unusual High Confidence Content Filter Blocks Detected"
2020
references = [
2121
"https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-components.html",
2222
"https://atlas.mitre.org/techniques/AML.T0051",
2323
"https://atlas.mitre.org/techniques/AML.T0054",
2424
"https://www.elastic.co/security-labs/elastic-advances-llm-security"
2525
]
26-
risk_score = 73
26+
risk_score = 47
2727
rule_id = "4f855297-c8e0-4097-9d97-d653f7e471c4"
2828
note = """## Triage and analysis
2929
30-
### Investigating Amazon Bedrock Guardrail High Confidence Misconduct Blocks.
30+
### Investigating Amazon Bedrock Guardrail High Confidence Content Filter Blocks.
3131
3232
Amazon Bedrock Guardrail is a set of features within Amazon Bedrock designed to help businesses apply robust safety and privacy controls to their generative AI applications.
3333
3434
It enables users to set guidelines and filters that manage content quality, relevancy, and adherence to responsible AI practices.
3535
36-
Through Guardrail, organizations can define "denied topics" to prevent the model from generating content on specific, undesired subjects,
37-
and they can establish thresholds for harmful content categories, including hate speech, violence, or offensive language.
36+
Through Guardrail, organizations can enable Content filter for Hate, Insults, Sexual Violence and Misconduct along with Prompt Attack filters prompts
37+
to prevent the model from generating content on specific, undesired subjects, and they can establish thresholds for harmful content categories.
3838
3939
#### Possible investigation steps
4040
41-
- Identify the user account that queried denied topics and whether it should perform this kind of action.
41+
- Identify the user account whose prompts caused high confidence content filter blocks and whether it should perform this kind of action.
4242
- Investigate other alerts associated with the user account during the past 48 hours.
4343
- Consider the time of day. If the user is a human (not a program or script), did the activity take place during a normal time of day?
4444
- Examine the account's prompts and responses in the last 24 hours.
@@ -66,7 +66,7 @@ This rule requires that guardrails are configured in AWS Bedrock. For more infor
6666
6767
https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-create.html
6868
"""
69-
severity = "high"
69+
severity = "medium"
7070
tags = [
7171
"Domain: LLM",
7272
"Data Source: AWS Bedrock",
@@ -82,9 +82,13 @@ query = '''
8282
from logs-aws_bedrock.invocation-*
8383
| MV_EXPAND gen_ai.compliance.violation_code
8484
| MV_EXPAND gen_ai.policy.confidence
85-
| where gen_ai.policy.action == "BLOCKED" and gen_ai.policy.confidence LIKE "HIGH" and gen_ai.compliance.violation_code LIKE "MISCONDUCT"
86-
| keep user.id
87-
| stats high_confidence_blocks = count() by user.id
88-
| where high_confidence_blocks > 5
89-
| sort high_confidence_blocks desc
85+
| MV_EXPAND gen_ai.policy.name
86+
| where gen_ai.policy.action == "BLOCKED" and gen_ai.policy.name == "content_policy" and gen_ai.policy.confidence LIKE "HIGH" and gen_ai.compliance.violation_code IN ("HATE", "MISCONDUCT", "SEXUAL", "INSULTS", "PROMPT_ATTACK", "VIOLENCE")
87+
| keep user.id, gen_ai.compliance.violation_code
88+
| stats block_count_per_violation = count() by user.id, gen_ai.compliance.violation_code
89+
| SORT block_count_per_violation DESC
90+
| keep user.id, gen_ai.compliance.violation_code, block_count_per_violation
91+
| STATS violation_count = SUM(block_count_per_violation) by user.id
92+
| WHERE violation_count > 5
93+
| SORT violation_count DESC
9094
'''
Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
[metadata]
2+
creation_date = "2024/11/20"
3+
maturity = "production"
4+
updated_date = "2024/11/20"
5+
min_stack_comments = "ES|QL rule type is still in technical preview as of 8.13, however this rule was tested successfully; integration in tech preview"
6+
min_stack_version = "8.13.0"
7+
8+
[rule]
9+
author = ["Elastic"]
10+
description = """
11+
Detects repeated compliance violation 'BLOCKED' actions coupled with specific policy name such as 'sensitive_information_policy',
12+
indicating persistent misuse or attempts to probe the model's denied topics.
13+
"""
14+
false_positives = ["New model deployments.", "Testing updates to compliance policies."]
15+
from = "now-60m"
16+
interval = "10m"
17+
language = "esql"
18+
license = "Elastic License v2"
19+
name = "Unusual High Denied Sensitive Information Policy Blocks Detected"
20+
references = [
21+
"https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-components.html",
22+
"https://atlas.mitre.org/techniques/AML.T0051",
23+
"https://atlas.mitre.org/techniques/AML.T0054",
24+
"https://www.elastic.co/security-labs/elastic-advances-llm-security"
25+
]
26+
risk_score = 47
27+
rule_id = "0e1af929-42ed-4262-a846-55a7c54e7c84"
28+
note = """## Triage and analysis
29+
30+
### Investigating Amazon Bedrock Guardrail High Sensitive Information Policy Blocks.
31+
32+
Amazon Bedrock Guardrail is a set of features within Amazon Bedrock designed to help businesses apply robust safety and privacy controls to their generative AI applications.
33+
34+
It enables users to set guidelines and filters that manage content quality, relevancy, and adherence to responsible AI practices.
35+
36+
Through Guardrail, organizations can define "sensitive information filters" to prevent the model from generating content on specific, undesired subjects,
37+
and they can establish thresholds for harmful content categories.
38+
39+
#### Possible investigation steps
40+
41+
- Identify the user account that queried sensitive information and whether it should perform this kind of action.
42+
- Investigate other alerts associated with the user account during the past 48 hours.
43+
- Consider the time of day. If the user is a human (not a program or script), did the activity take place during a normal time of day?
44+
- Examine the account's prompts and responses in the last 24 hours.
45+
- If you suspect the account has been compromised, scope potentially compromised assets by tracking Amazon Bedrock model access, prompts generated, and responses to the prompts by the account in the last 24 hours.
46+
47+
### False positive analysis
48+
49+
- Verify the user account that queried denied topics, is not testing any new model deployments or updated compliance policies in Amazon Bedrock guardrails.
50+
51+
### Response and remediation
52+
53+
- Initiate the incident response process based on the outcome of the triage.
54+
- Disable or limit the account during the investigation and response.
55+
- Identify the possible impact of the incident and prioritize accordingly; the following actions can help you gain context:
56+
- Identify the account role in the cloud environment.
57+
- Identify if the attacker is moving laterally and compromising other Amazon Bedrock Services.
58+
- Identify any regulatory or legal ramifications related to this activity.
59+
- Review the permissions assigned to the implicated user group or role behind these requests to ensure they are authorized and expected to access bedrock and ensure that the least privilege principle is being followed.
60+
- Determine the initial vector abused by the attacker and take action to prevent reinfection via the same vector.
61+
- Using the incident response data, update logging and audit policies to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).
62+
"""
63+
setup = """## Setup
64+
65+
This rule requires that guardrails are configured in AWS Bedrock. For more information, see the AWS Bedrock documentation:
66+
67+
https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-create.html
68+
"""
69+
severity = "medium"
70+
tags = [
71+
"Domain: LLM",
72+
"Data Source: AWS Bedrock",
73+
"Data Source: AWS S3",
74+
"Use Case: Policy Violation",
75+
"Mitre Atlas: T0051",
76+
"Mitre Atlas: T0054",
77+
]
78+
timestamp_override = "event.ingested"
79+
type = "esql"
80+
81+
query = '''
82+
from logs-aws_bedrock.invocation-*
83+
| MV_EXPAND gen_ai.policy.name
84+
| where gen_ai.policy.action == "BLOCKED" and gen_ai.compliance.violation_detected == "true" and gen_ai.policy.name == "sensitive_information_policy"
85+
| keep user.id
86+
| stats sensitive_information_block = count() by user.id
87+
| where sensitive_information_block > 5
88+
| sort sensitive_information_block desc
89+
'''
Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
[metadata]
2+
creation_date = "2024/11/20"
3+
maturity = "production"
4+
updated_date = "2024/11/20"
5+
min_stack_comments = "ES|QL rule type is still in technical preview as of 8.13, however this rule was tested successfully; integration in tech preview"
6+
min_stack_version = "8.13.0"
7+
8+
[rule]
9+
author = ["Elastic"]
10+
description = """
11+
Detects repeated compliance violation 'BLOCKED' actions coupled with specific policy name such as 'topic_policy',
12+
indicating persistent misuse or attempts to probe the model's denied topics.
13+
"""
14+
false_positives = ["New model deployments.", "Testing updates to compliance policies."]
15+
from = "now-60m"
16+
interval = "10m"
17+
language = "esql"
18+
license = "Elastic License v2"
19+
name = "Unusual High Denied Topic Blocks Detected"
20+
references = [
21+
"https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-components.html",
22+
"https://atlas.mitre.org/techniques/AML.T0051",
23+
"https://atlas.mitre.org/techniques/AML.T0054",
24+
"https://www.elastic.co/security-labs/elastic-advances-llm-security"
25+
]
26+
risk_score = 47
27+
rule_id = "266bbea8-fcf9-4b0e-ba7b-fc00f6b1dc73"
28+
note = """## Triage and analysis
29+
30+
### Investigating Amazon Bedrock Guardrail High Denied Topic Blocks.
31+
32+
Amazon Bedrock Guardrail is a set of features within Amazon Bedrock designed to help businesses apply robust safety and privacy controls to their generative AI applications.
33+
34+
It enables users to set guidelines and filters that manage content quality, relevancy, and adherence to responsible AI practices.
35+
36+
Through Guardrail, organizations can define "denied topics" to prevent the model from generating content on specific, undesired subjects,
37+
and they can establish thresholds for harmful content categories, including hate speech, violence, or offensive language.
38+
39+
#### Possible investigation steps
40+
41+
- Identify the user account that queried denied topics and whether it should perform this kind of action.
42+
- Investigate other alerts associated with the user account during the past 48 hours.
43+
- Consider the time of day. If the user is a human (not a program or script), did the activity take place during a normal time of day?
44+
- Examine the account's prompts and responses in the last 24 hours.
45+
- If you suspect the account has been compromised, scope potentially compromised assets by tracking Amazon Bedrock model access, prompts generated, and responses to the prompts by the account in the last 24 hours.
46+
47+
### False positive analysis
48+
49+
- Verify the user account that queried denied topics, is not testing any new model deployments or updated compliance policies in Amazon Bedrock guardrails.
50+
51+
### Response and remediation
52+
53+
- Initiate the incident response process based on the outcome of the triage.
54+
- Disable or limit the account during the investigation and response.
55+
- Identify the possible impact of the incident and prioritize accordingly; the following actions can help you gain context:
56+
- Identify the account role in the cloud environment.
57+
- Identify if the attacker is moving laterally and compromising other Amazon Bedrock Services.
58+
- Identify any regulatory or legal ramifications related to this activity.
59+
- Review the permissions assigned to the implicated user group or role behind these requests to ensure they are authorized and expected to access bedrock and ensure that the least privilege principle is being followed.
60+
- Determine the initial vector abused by the attacker and take action to prevent reinfection via the same vector.
61+
- Using the incident response data, update logging and audit policies to improve the mean time to detect (MTTD) and the mean time to respond (MTTR).
62+
"""
63+
setup = """## Setup
64+
65+
This rule requires that guardrails are configured in AWS Bedrock. For more information, see the AWS Bedrock documentation:
66+
67+
https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails-create.html
68+
"""
69+
severity = "medium"
70+
tags = [
71+
"Domain: LLM",
72+
"Data Source: AWS Bedrock",
73+
"Data Source: AWS S3",
74+
"Use Case: Policy Violation",
75+
"Mitre Atlas: T0051",
76+
"Mitre Atlas: T0054",
77+
]
78+
timestamp_override = "event.ingested"
79+
type = "esql"
80+
81+
query = '''
82+
from logs-aws_bedrock.invocation-*
83+
| MV_EXPAND gen_ai.policy.name
84+
| where gen_ai.policy.action == "BLOCKED" and gen_ai.compliance.violation_detected == "true" and gen_ai.policy.name == "topic_policy"
85+
| keep user.id
86+
| stats denied_topics = count() by user.id
87+
| where denied_topics > 5
88+
| sort denied_topics desc
89+
'''

0 commit comments

Comments
 (0)