Skip to content

Commit

Permalink
AI Application Security(LLM)
Browse files Browse the repository at this point in the history
Application Level DoS - Excessive Resource Consumption - Injection (Prompt): Varies
AI Application Security - Large Language Model (LLM) Security - Prompt Injection: P1
AI Application Security - Large Language Model (LLM) Security - LLM Output Handling: P1
AI Application Security - Large Language Model (LLM) Security - Training Data: P1 Poisoning
AI Application Security - Large Language Model (LLM) Security - Excessive Agency/Permission Manipulation: P2
  • Loading branch information
TimmyBugcrowd committed Nov 14, 2023
1 parent 7aa1bec commit 254dcb9
Show file tree
Hide file tree
Showing 4 changed files with 127 additions and 0 deletions.
35 changes: 35 additions & 0 deletions mappings/cvss_v3/cvss_v3.json
Original file line number Diff line number Diff line change
Expand Up @@ -709,6 +709,15 @@
{
"id": "app_crash",
"cvss_v3": "AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:N"
},
{
"id": "excessive_resource_consumption",
"children": [
{
"id": "",
"cvss_v3": "AV:N/AC:L/PR:N/UI:R/S:U/C:L/I:H/A:H"
}
]
}
]
},
Expand Down Expand Up @@ -1242,6 +1251,32 @@
}
]
},
{
"id": "ai_application_security",
"children": [
{
"id": "llm_security",
"children": [
{
"id": "prompt_injection",
"cvss_v3": "AV:N/AC:L/PR:N/UI:R/S:C/C:H/I:L/A:L"
},
{
"id": "llm_output_handling",
"cvss_v3": "AV:N/AC:L/PR:N/UI:R/S:C/C:L/I:H/A:L"
},
{
"id": "training_data_poisoning",
"cvss_v3": "AV:N/AC:H/PR:L/UI:N/S:C/C:H/I:H/A:H"
},
{
"id": "excessive_agency_permission_manipulation",
"cvss_v3": "AV:N/AC:L/PR:L/UI:R/S:C/C:H/I:H/A:H"
},
]
}
]
},
{
"id": "indicators_of_compromise",
"cvss_v3": "AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:N"
Expand Down
4 changes: 4 additions & 0 deletions mappings/cwe/cwe.json
Original file line number Diff line number Diff line change
Expand Up @@ -388,6 +388,10 @@
}
]
},
{
"id": "ai_application_security",
"cwe": null
},
{
"id": "lack_of_binary_hardening",
"cwe": ["CWE-693"]
Expand Down
39 changes: 39 additions & 0 deletions mappings/remediation_advice/remediation_advice.json
Original file line number Diff line number Diff line change
Expand Up @@ -1755,6 +1755,45 @@
}
]
},
{
"id": "ai_application_security",
"children": [
{
"id": "llm_security",
"children": [
{
"id": "prompt_injection",
"remediation_advice": "Implement robust input sanitization to prevent malicious or unintended prompt execution. Establish strict access controls and usage monitoring to detect and prevent unauthorized or anomalous interactions with the LLM. Regularly review and update the model's training data and algorithms to reduce vulnerabilities. Educate users and developers on safe interaction practices with AI systems.",
"references": [
"https://developer.nvidia.com/blog/securing-llm-systems-against-prompt-injection"
]
},
{
"id": "llm_output_handling",
"remediation_advice": "Implement output filtering and validation to ensure the LLM's responses are appropriate and secure. Use context-aware controls to manage how the LLM processes and responds to various inputs. Regularly audit and update the LLM to handle new types of outputs and emerging security threats. Train users on the potential risks associated with LLM outputs, particularly in sensitive applications.",
"references": [
"https://whylabs.ai/blog/posts/safeguard-monitor-large-language-model-llm-applications"
]
},
{
"id": "training_data_poisoning",
"remediation_advice": "Implement robust anomaly detection systems to identify and address poisoned data in real-time. Regularly retrain the LLM with clean, diverse, and representative datasets to correct any potential biases or vulnerabilities. Engage in continuous monitoring and auditing of the training process and data sources.",
"references": [
"https://owasp.org/www-project-top-10-for-large-language-model-applications/#:~:text=,security%2C%20accuracy%2C%20or%20ethical%20behavior",
"https://owasp.org/www-project-top-10-for-large-language-model-applications/Archive/0_1_vulns/Training_Data_Poisoning.html"
]
},
{
"id": "excessive_agency_permission_manipulation",
"remediation_advice": "Implement stringent access controls and define clear user permissions for interacting with the LLM. Employ regular audits and monitoring to detect and prevent unauthorized or excessive permission changes. Use role-based access control systems to manage user permissions effectively. Educate users and administrators about the risks of permission manipulation and establish protocols for safely managing access rights.",
"references": [
"https://owasp.org/www-project-ai-security-and-privacy-guide/#:~:text=,auditability%2C%20bias%20countermeasures%20and%20oversight"
]
}
]
}
]
},
{
"id": "gnss_gps",
"children": [
Expand Down
49 changes: 49 additions & 0 deletions vulnerability-rating-taxonomy.json
Original file line number Diff line number Diff line change
Expand Up @@ -1380,6 +1380,19 @@
"name": "Application-Level Denial-of-Service (DoS)",
"type": "category",
"children": [
{
"id": "excessive_resource_consumption",
"name": "Excessive Resource Consumption",
"type": "subcategory",
"children": [
{
"id": "injection_prompt",
"name": "Injection (Prompt)",
"type": "variant",
"priority": "varies"
}
]
},
{
"id": "critical_impact_and_or_easy_difficulty",
"name": "Critical Impact and/or Easy Difficulty",
Expand Down Expand Up @@ -2460,6 +2473,42 @@
}
]
},
{
"id": "ai_application_security",
"name": "AI Application Security",
"type": "category",
"children": [
{
"id": "llm_security",
"name": "Large Language Model (LLM) Security",
"type": "subcategory",
"children":[
{
"id": "prompt_injection",
"name": "Prompt Injection",
"type": "variant",
"priority": 1
},
{
"id": "llm_output_handling",
"name": "LLM Output Handling",
"type": "variant",
"priority": 1
},
{
"id": "training_data_poisoning",
"name": "Training Data Poisoning",
"type": "variant",
"priority": 1
},
{
"id": "excessive_agency_permission_manipulation",
"name": "Excessive Agency/Permission Manipulation",
"type": "variant",
"priority": 2
}
]
}
{
"id": "indicators_of_compromise",
"name": "Indicators of Compromise",
Expand Down

0 comments on commit 254dcb9

Please sign in to comment.