Create security.py
Browse files- security.py +39 -0
security.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
class SecurityValidator:
|
| 5 |
+
@staticmethod
|
| 6 |
+
def validate_input(text: str, max_length: int = 1000) -> bool:
|
| 7 |
+
"""Validate input text for security"""
|
| 8 |
+
if len(text) > max_length:
|
| 9 |
+
raise ValueError(f"Input too long. Max {max_length} characters.")
|
| 10 |
+
|
| 11 |
+
# Check for potential prompt injection
|
| 12 |
+
injection_patterns = [
|
| 13 |
+
r"ignore previous",
|
| 14 |
+
r"system prompt",
|
| 15 |
+
r"forget your instructions"
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
for pattern in injection_patterns:
|
| 19 |
+
if re.search(pattern, text, re.IGNORECASE):
|
| 20 |
+
raise SecurityError("Potential prompt injection detected")
|
| 21 |
+
|
| 22 |
+
return True
|
| 23 |
+
|
| 24 |
+
@staticmethod
|
| 25 |
+
def sanitize_output(text: str) -> str:
|
| 26 |
+
"""Sanitize model output"""
|
| 27 |
+
# Remove any potentially harmful content
|
| 28 |
+
harmful_patterns = [
|
| 29 |
+
r"<script.*?>.*?</script>",
|
| 30 |
+
r"<iframe.*?>.*?</iframe>"
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
for pattern in harmful_patterns:
|
| 34 |
+
text = re.sub(pattern, "", text, flags=re.IGNORECASE | re.DOTALL)
|
| 35 |
+
|
| 36 |
+
return text
|
| 37 |
+
|
| 38 |
+
class SecurityError(Exception):
|
| 39 |
+
pass
|