Skip to content

Commit 8e7a418

Browse files
AI Application Security(LLM) (#389)
* llm model added * llm model added * llm model added * changelog.md and metadata release updated --------- Co-authored-by: Deepak Kumar Jha <[email protected]>
1 parent d88c735 commit 8e7a418

File tree

5 files changed

+133
-1
lines changed

5 files changed

+133
-1
lines changed

CHANGELOG.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,14 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/) and this p
1010

1111
### Changed
1212

13+
## [v1.12](https://github.com/bugcrowd/vulnerability-rating-taxonomy/compare/v1.11...v1.12) - 2023-12-18
14+
### Added
15+
- Application Level DoS - Excessive Resource Consumption - Injection (Prompt) - VARIES
16+
- AI Application Security - Large Language Model (LLM) Security - Prompt Injection - P1
17+
- AI Application Security - Large Language Model (LLM) Security - LLM Output Handling - P1
18+
- AI Application Security - Large Language Model (LLM) Security - Training Data Poisoning - P1
19+
- AI Application Security - Large Language Model (LLM) Security - Excessive Agency/Permission Manipulation - P2
20+
1321
## [v1.11](https://github.com/bugcrowd/vulnerability-rating-taxonomy/compare/v1.10...v1.11) - 2023-11-20
1422
### Added
1523
- Sensitive Data Exposure - Disclosure of Secrets - PII Leakage/Exposure: VARIES

mappings/cvss_v3/cvss_v3.json

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -709,6 +709,10 @@
709709
{
710710
"id": "app_crash",
711711
"cvss_v3": "AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:N"
712+
},
713+
{
714+
"id": "excessive_resource_consumption",
715+
"cvss_v3": "AV:N/AC:L/PR:N/UI:R/S:U/C:L/I:H/A:H"
712716
}
713717
]
714718
},
@@ -1245,6 +1249,32 @@
12451249
{
12461250
"id": "indicators_of_compromise",
12471251
"cvss_v3": "AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:N"
1252+
},
1253+
{
1254+
"id": "ai_application_security",
1255+
"children": [
1256+
{
1257+
"id": "llm_security",
1258+
"children": [
1259+
{
1260+
"id": "prompt_injection",
1261+
"cvss_v3": "AV:N/AC:L/PR:N/UI:R/S:C/C:H/I:L/A:L"
1262+
},
1263+
{
1264+
"id": "llm_output_handling",
1265+
"cvss_v3": "AV:N/AC:L/PR:N/UI:R/S:C/C:L/I:H/A:L"
1266+
},
1267+
{
1268+
"id": "training_data_poisoning",
1269+
"cvss_v3": "AV:N/AC:H/PR:L/UI:N/S:C/C:H/I:H/A:H"
1270+
},
1271+
{
1272+
"id": "excessive_agency_permission_manipulation",
1273+
"cvss_v3": "AV:N/AC:L/PR:L/UI:R/S:C/C:H/I:H/A:H"
1274+
}
1275+
]
1276+
}
1277+
]
12481278
}
12491279
]
12501280
}

mappings/cwe/cwe.json

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -388,6 +388,10 @@
388388
}
389389
]
390390
},
391+
{
392+
"id": "ai_application_security",
393+
"cwe": null
394+
},
391395
{
392396
"id": "lack_of_binary_hardening",
393397
"cwe": ["CWE-693"]

mappings/remediation_advice/remediation_advice.json

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1803,6 +1803,45 @@
18031803
}
18041804
]
18051805
},
1806+
{
1807+
"id": "ai_application_security",
1808+
"children": [
1809+
{
1810+
"id": "llm_security",
1811+
"children": [
1812+
{
1813+
"id": "prompt_injection",
1814+
"remediation_advice": "Implement robust input sanitization to prevent malicious or unintended prompt execution. Establish strict access controls and usage monitoring to detect and prevent unauthorized or anomalous interactions with the LLM. Regularly review and update the model's training data and algorithms to reduce vulnerabilities. Educate users and developers on safe interaction practices with AI systems.",
1815+
"references": [
1816+
"https://developer.nvidia.com/blog/securing-llm-systems-against-prompt-injection"
1817+
]
1818+
},
1819+
{
1820+
"id": "llm_output_handling",
1821+
"remediation_advice": "Implement output filtering and validation to ensure the LLM's responses are appropriate and secure. Use context-aware controls to manage how the LLM processes and responds to various inputs. Regularly audit and update the LLM to handle new types of outputs and emerging security threats. Train users on the potential risks associated with LLM outputs, particularly in sensitive applications.",
1822+
"references": [
1823+
"https://whylabs.ai/blog/posts/safeguard-monitor-large-language-model-llm-applications"
1824+
]
1825+
},
1826+
{
1827+
"id": "training_data_poisoning",
1828+
"remediation_advice": "Implement robust anomaly detection systems to identify and address poisoned data in real-time. Regularly retrain the LLM with clean, diverse, and representative datasets to correct any potential biases or vulnerabilities. Engage in continuous monitoring and auditing of the training process and data sources.",
1829+
"references": [
1830+
"https://owasp.org/www-project-top-10-for-large-language-model-applications/#:~:text=,security%2C%20accuracy%2C%20or%20ethical%20behavior",
1831+
"https://owasp.org/www-project-top-10-for-large-language-model-applications/Archive/0_1_vulns/Training_Data_Poisoning.html"
1832+
]
1833+
},
1834+
{
1835+
"id": "excessive_agency_permission_manipulation",
1836+
"remediation_advice": "Implement stringent access controls and define clear user permissions for interacting with the LLM. Employ regular audits and monitoring to detect and prevent unauthorized or excessive permission changes. Use role-based access control systems to manage user permissions effectively. Educate users and administrators about the risks of permission manipulation and establish protocols for safely managing access rights.",
1837+
"references": [
1838+
"https://owasp.org/www-project-ai-security-and-privacy-guide/#:~:text=,auditability%2C%20bias%20countermeasures%20and%20oversight"
1839+
]
1840+
}
1841+
]
1842+
}
1843+
]
1844+
},
18061845
{
18071846
"id": "indicators_of_compromise",
18081847
"remediation_advice": ""

vulnerability-rating-taxonomy.json

Lines changed: 52 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"metadata": {
3-
"release_date": "2023-11-20T00:00:00+00:00"
3+
"release_date": "2023-12-18T00:00:00+00:00"
44
},
55
"content": [
66
{
@@ -1352,6 +1352,19 @@
13521352
"name": "Application-Level Denial-of-Service (DoS)",
13531353
"type": "category",
13541354
"children": [
1355+
{
1356+
"id": "excessive_resource_consumption",
1357+
"name": "Excessive Resource Consumption",
1358+
"type": "subcategory",
1359+
"children": [
1360+
{
1361+
"id": "injection_prompt",
1362+
"name": "Injection (Prompt)",
1363+
"type": "variant",
1364+
"priority": null
1365+
}
1366+
]
1367+
},
13551368
{
13561369
"id": "critical_impact_and_or_easy_difficulty",
13571370
"name": "Critical Impact and/or Easy Difficulty",
@@ -2432,6 +2445,44 @@
24322445
}
24332446
]
24342447
},
2448+
{
2449+
"id": "ai_application_security",
2450+
"name": "AI Application Security",
2451+
"type": "category",
2452+
"children": [
2453+
{
2454+
"id": "llm_security",
2455+
"name": "Large Language Model (LLM) Security",
2456+
"type": "subcategory",
2457+
"children":[
2458+
{
2459+
"id": "prompt_injection",
2460+
"name": "Prompt Injection",
2461+
"type": "variant",
2462+
"priority": 1
2463+
},
2464+
{
2465+
"id": "llm_output_handling",
2466+
"name": "LLM Output Handling",
2467+
"type": "variant",
2468+
"priority": 1
2469+
},
2470+
{
2471+
"id": "training_data_poisoning",
2472+
"name": "Training Data Poisoning",
2473+
"type": "variant",
2474+
"priority": 1
2475+
},
2476+
{
2477+
"id": "excessive_agency_permission_manipulation",
2478+
"name": "Excessive Agency/Permission Manipulation",
2479+
"type": "variant",
2480+
"priority": 2
2481+
}
2482+
]
2483+
}
2484+
]
2485+
},
24352486
{
24362487
"id": "indicators_of_compromise",
24372488
"name": "Indicators of Compromise",

0 commit comments

Comments
 (0)