File size: 1,455 Bytes
3cec7b2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
from llm_guard.input_scanners import PromptInjection
from llm_guard.input_scanners.prompt_injection import MatchType
def is_subsequence(main_string, sub_string):
"""
Checks if sub_string is a subsequence of main_string.
A subsequence allows arbitrary characters in between the characters of sub_string in main_string.
Parameters:
main_string (str): The string in which to search.
sub_string (str): The string to search for.
Returns:
bool: True if sub_string is a subsequence of main_string, False otherwise.
"""
main_string = main_string.lower()
sub_string = sub_string.lower()
main_len = len(main_string)
sub_len = len(sub_string)
if sub_len == 0:
return True
if main_len == 0:
return False
main_index = 0
sub_index = 0
while main_index < main_len and sub_index < sub_len:
if main_string[main_index] == sub_string[sub_index]:
sub_index += 1
main_index += 1
return sub_index == sub_len
def is_malicious(user_input: str):
"""Function to check if the user input is malicious. Uses the LLM Guard prompt injection scanner.
Uses a special model `ProtectAI/deberta-v3-base-prompt-injection-v2` to scan the user input for malicious content.
"""
scanner = PromptInjection(threshold=0.5, match_type=MatchType.FULL)
sanitized_prompt, is_valid, risk_score = scanner.scan(user_input)
return is_valid, risk_score
|