John6666 commited on
Commit
5373262
β€’
1 Parent(s): a108184

Upload llmdolphin.py

Browse files
Files changed (1) hide show
  1. llmdolphin.py +9 -4
llmdolphin.py CHANGED
@@ -8,6 +8,7 @@ from llama_cpp_agent.chat_history.messages import Roles
8
  from ja_to_danbooru.ja_to_danbooru import jatags_to_danbooru_tags
9
  import wrapt_timeout_decorator
10
  from pathlib import Path
 
11
  from formatter import mistral_v1_formatter, mistral_v2_formatter, mistral_v3_tekken_formatter
12
 
13
  llm_models_dir = "./llm_models"
@@ -819,8 +820,9 @@ llm_formats = {
819
  "PHI 3": MessagesFormatterType.PHI_3,
820
  "Autocoder": MessagesFormatterType.AUTOCODER,
821
  "DeepSeek Coder v2": MessagesFormatterType.DEEP_SEEK_CODER_2,
822
- "Gemma 2": MessagesFormatterType.ALPACA,
823
  "Qwen2": MessagesFormatterType.OPEN_CHAT,
 
824
  "Mistral Tokenizer V1": mistral_v1_formatter,
825
  "Mistral Tokenizer V2": mistral_v2_formatter,
826
  "Mistral Tokenizer V3 - Tekken": mistral_v3_tekken_formatter,
@@ -1243,7 +1245,8 @@ def dolphin_respond(
1243
  agent = LlamaCppAgent(
1244
  provider,
1245
  system_prompt=f"{system_message}",
1246
- predefined_messages_formatter_type=chat_template,
 
1247
  debug_output=False
1248
  )
1249
 
@@ -1337,7 +1340,8 @@ def dolphin_respond_auto(
1337
  agent = LlamaCppAgent(
1338
  provider,
1339
  system_prompt=f"{system_message}",
1340
- predefined_messages_formatter_type=chat_template,
 
1341
  debug_output=False
1342
  )
1343
 
@@ -1432,7 +1436,8 @@ def respond_playground(
1432
  agent = LlamaCppAgent(
1433
  provider,
1434
  system_prompt=f"{system_message}",
1435
- predefined_messages_formatter_type=chat_template,
 
1436
  debug_output=False
1437
  )
1438
 
 
8
  from ja_to_danbooru.ja_to_danbooru import jatags_to_danbooru_tags
9
  import wrapt_timeout_decorator
10
  from pathlib import Path
11
+ from llama_cpp_agent.messages_formatter import MessagesFormatter
12
  from formatter import mistral_v1_formatter, mistral_v2_formatter, mistral_v3_tekken_formatter
13
 
14
  llm_models_dir = "./llm_models"
 
820
  "PHI 3": MessagesFormatterType.PHI_3,
821
  "Autocoder": MessagesFormatterType.AUTOCODER,
822
  "DeepSeek Coder v2": MessagesFormatterType.DEEP_SEEK_CODER_2,
823
+ "Gemma 2": MessagesFormatterType.GEMMA_2,
824
  "Qwen2": MessagesFormatterType.OPEN_CHAT,
825
+ "Open Interpreter": MessagesFormatterType.OPEN_INTERPRETER,
826
  "Mistral Tokenizer V1": mistral_v1_formatter,
827
  "Mistral Tokenizer V2": mistral_v2_formatter,
828
  "Mistral Tokenizer V3 - Tekken": mistral_v3_tekken_formatter,
 
1245
  agent = LlamaCppAgent(
1246
  provider,
1247
  system_prompt=f"{system_message}",
1248
+ predefined_messages_formatter_type=chat_template if not isinstance(chat_template, MessagesFormatter) else None,
1249
+ custom_messages_formatter=chat_template if isinstance(chat_template, MessagesFormatter) else None,
1250
  debug_output=False
1251
  )
1252
 
 
1340
  agent = LlamaCppAgent(
1341
  provider,
1342
  system_prompt=f"{system_message}",
1343
+ predefined_messages_formatter_type=chat_template if not isinstance(chat_template, MessagesFormatter) else None,
1344
+ custom_messages_formatter=chat_template if isinstance(chat_template, MessagesFormatter) else None,
1345
  debug_output=False
1346
  )
1347
 
 
1436
  agent = LlamaCppAgent(
1437
  provider,
1438
  system_prompt=f"{system_message}",
1439
+ predefined_messages_formatter_type=chat_template if not isinstance(chat_template, MessagesFormatter) else None,
1440
+ custom_messages_formatter=chat_template if isinstance(chat_template, MessagesFormatter) else None,
1441
  debug_output=False
1442
  )
1443