John6666 commited on
Commit
4ec1e55
1 Parent(s): 025c21d

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +25 -16
  2. dc.py +15 -10
  3. llmdolphin.py +153 -0
app.py CHANGED
@@ -87,45 +87,54 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
87
  vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
88
 
89
  with gr.Accordion("LoRA", open=True, visible=True):
 
 
 
 
 
 
 
 
 
90
  with gr.Row():
91
  with gr.Column():
92
  with gr.Row():
93
- lora1 = gr.Dropdown(label="LoRA 1", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
94
- lora1_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 1: weight")
95
  with gr.Row():
96
- lora1_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
97
  lora1_copy = gr.Button(value="Copy example to prompt", visible=False)
98
  lora1_md = gr.Markdown(value="", visible=False)
99
  with gr.Column():
100
  with gr.Row():
101
- lora2 = gr.Dropdown(label="LoRA 2", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
102
- lora2_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 2: weight")
103
  with gr.Row():
104
- lora2_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
105
  lora2_copy = gr.Button(value="Copy example to prompt", visible=False)
106
  lora2_md = gr.Markdown(value="", visible=False)
107
  with gr.Column():
108
  with gr.Row():
109
- lora3 = gr.Dropdown(label="LoRA 3", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
110
- lora3_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 3: weight")
111
  with gr.Row():
112
- lora3_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
113
  lora3_copy = gr.Button(value="Copy example to prompt", visible=False)
114
  lora3_md = gr.Markdown(value="", visible=False)
115
  with gr.Column():
116
  with gr.Row():
117
- lora4 = gr.Dropdown(label="LoRA 4", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
118
- lora4_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 4: weight")
119
  with gr.Row():
120
- lora4_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
121
  lora4_copy = gr.Button(value="Copy example to prompt", visible=False)
122
  lora4_md = gr.Markdown(value="", visible=False)
123
  with gr.Column():
124
  with gr.Row():
125
- lora5 = gr.Dropdown(label="LoRA 5", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
126
- lora5_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 5: weight")
127
  with gr.Row():
128
- lora5_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
129
  lora5_copy = gr.Button(value="Copy example to prompt", visible=False)
130
  lora5_md = gr.Markdown(value="", visible=False)
131
  with gr.Accordion("From URL", open=True, visible=True):
@@ -137,7 +146,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
137
  lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
138
  lora_search_civitai_json = gr.JSON(value={}, visible=False)
139
  lora_search_civitai_desc = gr.Markdown(value="", visible=False)
140
- lora_download_url = gr.Textbox(label="URL", placeholder="http://...my_lora_url.safetensors", lines=1)
141
  lora_download = gr.Button("Get and set LoRA and apply to prompt")
142
 
143
  with gr.Row():
 
87
  vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
88
 
89
  with gr.Accordion("LoRA", open=True, visible=True):
90
+ def lora_dropdown(label):
91
+ return gr.Dropdown(label=label, choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
92
+
93
+ def lora_scale_slider(label):
94
+ return gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label=label)
95
+
96
+ def lora_textbox():
97
+ return gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
98
+
99
  with gr.Row():
100
  with gr.Column():
101
  with gr.Row():
102
+ lora1 = lora_dropdown("LoRA 1")
103
+ lora1_wt = lora_scale_slider("LoRA 1: weight")
104
  with gr.Row():
105
+ lora1_info = lora_textbox()
106
  lora1_copy = gr.Button(value="Copy example to prompt", visible=False)
107
  lora1_md = gr.Markdown(value="", visible=False)
108
  with gr.Column():
109
  with gr.Row():
110
+ lora2 = lora_dropdown("LoRA 2")
111
+ lora2_wt = lora_scale_slider("LoRA 2: weight")
112
  with gr.Row():
113
+ lora2_info = lora_textbox()
114
  lora2_copy = gr.Button(value="Copy example to prompt", visible=False)
115
  lora2_md = gr.Markdown(value="", visible=False)
116
  with gr.Column():
117
  with gr.Row():
118
+ lora3 = lora_dropdown("LoRA 3")
119
+ lora3_wt = lora_scale_slider("LoRA 3: weight")
120
  with gr.Row():
121
+ lora3_info = lora_textbox()
122
  lora3_copy = gr.Button(value="Copy example to prompt", visible=False)
123
  lora3_md = gr.Markdown(value="", visible=False)
124
  with gr.Column():
125
  with gr.Row():
126
+ lora4 = lora_dropdown("LoRA 4")
127
+ lora4_wt = lora_scale_slider("LoRA 4: weight")
128
  with gr.Row():
129
+ lora4_info = lora_textbox()
130
  lora4_copy = gr.Button(value="Copy example to prompt", visible=False)
131
  lora4_md = gr.Markdown(value="", visible=False)
132
  with gr.Column():
133
  with gr.Row():
134
+ lora5 = lora_dropdown("LoRA 5")
135
+ lora5_wt = lora_scale_slider("LoRA 5: weight")
136
  with gr.Row():
137
+ lora5_info = lora_textbox()
138
  lora5_copy = gr.Button(value="Copy example to prompt", visible=False)
139
  lora5_md = gr.Markdown(value="", visible=False)
140
  with gr.Accordion("From URL", open=True, visible=True):
 
146
  lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
147
  lora_search_civitai_json = gr.JSON(value={}, visible=False)
148
  lora_search_civitai_desc = gr.Markdown(value="", visible=False)
149
+ lora_download_url = gr.Textbox(label="LoRA URL", placeholder="https://civitai.com/api/download/models/28907", lines=1)
150
  lora_download = gr.Button("Get and set LoRA and apply to prompt")
151
 
152
  with gr.Row():
dc.py CHANGED
@@ -86,6 +86,13 @@ preprocessor_controlnet = {
86
  "None",
87
  "None (anime)",
88
  ],
 
 
 
 
 
 
 
89
  "shuffle": [
90
  "ContentShuffle",
91
  "None",
@@ -253,20 +260,20 @@ upscaler_dict_gui = {
253
  'Latent (bicubic antialiased)': 'Latent (bicubic antialiased)',
254
  'Latent (nearest)': 'Latent (nearest)',
255
  'Latent (nearest-exact)': 'Latent (nearest-exact)',
256
- "RealESRGAN_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
257
- "RealESRNet_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
258
  "RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
259
  "RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
260
  "realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
261
  "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
262
- "realesr-general-wdn-x4v3" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
263
  "4x-UltraSharp" : "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
264
- "4x_foolhardy_Remacri" : "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
265
- "Remacri4xExtraSmoother" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
266
- "AnimeSharp4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
267
  "lollypop" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/lollypop.pth",
268
- "RealisticRescaler4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/RealisticRescaler%204x.pth",
269
- "NickelbackFS4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/NickelbackFS%204x.pth"
270
  }
271
 
272
  upscaler_keys = list(upscaler_dict_gui.keys())
@@ -536,8 +543,6 @@ class GuiSD:
536
  self.model.device = torch.device("cuda:0")
537
  model_precision = torch.float16
538
  if not self.model:
539
- from stablepy import Model_Diffusers
540
-
541
  print("Loading model...")
542
  self.model = Model_Diffusers(
543
  base_model_id=model_name,
 
86
  "None",
87
  "None (anime)",
88
  ],
89
+ "lineart_anime": [
90
+ "Lineart",
91
+ "Lineart coarse",
92
+ "Lineart (anime)",
93
+ "None",
94
+ "None (anime)",
95
+ ],
96
  "shuffle": [
97
  "ContentShuffle",
98
  "None",
 
260
  'Latent (bicubic antialiased)': 'Latent (bicubic antialiased)',
261
  'Latent (nearest)': 'Latent (nearest)',
262
  'Latent (nearest-exact)': 'Latent (nearest-exact)',
263
+ "RealESRGAN_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
264
+ "RealESRNet_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
265
  "RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
266
  "RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
267
  "realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
268
  "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
269
+ "realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
270
  "4x-UltraSharp" : "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
271
+ "4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
272
+ "Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
273
+ "AnimeSharp4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
274
  "lollypop" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/lollypop.pth",
275
+ "RealisticRescaler4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/RealisticRescaler%204x.pth",
276
+ "NickelbackFS4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/NickelbackFS%204x.pth"
277
  }
278
 
279
  upscaler_keys = list(upscaler_dict_gui.keys())
 
543
  self.model.device = torch.device("cuda:0")
544
  model_precision = torch.float16
545
  if not self.model:
 
 
546
  print("Loading model...")
547
  self.model = Model_Diffusers(
548
  base_model_id=model_name,
llmdolphin.py CHANGED
@@ -7,6 +7,7 @@ from llama_cpp_agent.chat_history import BasicChatHistory
7
  from llama_cpp_agent.chat_history.messages import Roles
8
  from ja_to_danbooru.ja_to_danbooru import jatags_to_danbooru_tags
9
  import wrapt_timeout_decorator
 
10
 
11
 
12
  llm_models_dir = "./llm_models"
@@ -19,6 +20,8 @@ llm_models = {
19
  #"": ["", MessagesFormatterType.PHI_3],
20
  "mn-12b-lyra-v2a1-q5_k_m.gguf": ["HalleyStarbun/MN-12B-Lyra-v2a1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
21
  "L3-8B-Tamamo-v1.i1-Q5_K_M.gguf": ["mradermacher/L3-8B-Tamamo-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
 
 
22
  "Instant-RP-Noodles-12B-v1.3.Q4_K_M.gguf": ["mradermacher/Instant-RP-Noodles-12B-v1.3-GGUF", MessagesFormatterType.MISTRAL],
23
  "MN-12B-Lyra-v4-Q4_K_M.gguf": ["bartowski/MN-12B-Lyra-v4-GGUF", MessagesFormatterType.CHATML],
24
  "Lyra4-Gutenberg-12B.Q4_K_M.gguf": ["mradermacher/Lyra4-Gutenberg-12B-GGUF", MessagesFormatterType.CHATML],
@@ -50,9 +53,86 @@ llm_models = {
50
  "StarDust-12b-v2.i1-Q5_K_M.gguf": ["mradermacher/StarDust-12b-v2-i1-GGUF", MessagesFormatterType.CHATML],
51
  "Rocinante-12B-v2c-Q4_K_M.gguf": ["TheDrummer/UnslopNemo-v1-GGUF", MessagesFormatterType.MISTRAL],
52
  "mn-maghin-12b-q6_k.gguf": ["rityak/MN-Maghin-12B-Q6_K-GGUF", MessagesFormatterType.MISTRAL],
 
53
  "Trinas_Nectar-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/Trinas_Nectar-8B-model_stock-i1-GGUF", MessagesFormatterType.MISTRAL],
54
  "ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
55
  "ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  "L3.1-Vulca-Epith-Bluegrade-v0.2-8B.q8_0.gguf": ["kromquant/L3.1-Vulca-Epith-Bluegrade-v0.2-8B-GGUFs", MessagesFormatterType.LLAMA_3],
57
  "llama-3.1-8b-omnimatrix-iq4_nl-imat.gguf": ["bunnycore/Llama-3.1-8B-OmniMatrix-IQ4_NL-GGUF", MessagesFormatterType.LLAMA_3],
58
  "L3.1-Artemis-d-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Artemis-d-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
@@ -481,6 +561,7 @@ llm_models = {
481
  "Meta-Llama-3.1-8B-Claude-iMat-Q5_K_M.gguf": ["InferenceIllusionist/Meta-Llama-3.1-8B-Claude-iMat-GGUF", MessagesFormatterType.LLAMA_3],
482
  "Phi-3.1-mini-128k-instruct-Q6_K_L.gguf": ["bartowski/Phi-3.1-mini-128k-instruct-GGUF", MessagesFormatterType.PHI_3],
483
  "tifa-7b-qwen2-v0.1.q4_k_m.gguf": ["Tifa-RP/Tifa-7B-Qwen2-v0.1-GGUF", MessagesFormatterType.OPEN_CHAT],
 
484
  "Oumuamua-7b-RP_Q5_K_M.gguf": ["Aratako/Oumuamua-7b-RP-GGUF", MessagesFormatterType.MISTRAL],
485
  "Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW_iMat_Ch200_IQ4_XS.gguf": ["dddump/Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW-gguf", MessagesFormatterType.VICUNA],
486
  "ChatWaifu_v1.2.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.2.1-GGUF", MessagesFormatterType.MISTRAL],
@@ -1087,3 +1168,75 @@ def dolphin_parse_simple(
1087
  else:
1088
  prompts = list_uniq(to_list(raw_prompt) + ["nsfw", "explicit", "rating_explicit"])
1089
  return ", ".join(prompts)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  from llama_cpp_agent.chat_history.messages import Roles
8
  from ja_to_danbooru.ja_to_danbooru import jatags_to_danbooru_tags
9
  import wrapt_timeout_decorator
10
+ from pathlib import Path
11
 
12
 
13
  llm_models_dir = "./llm_models"
 
20
  #"": ["", MessagesFormatterType.PHI_3],
21
  "mn-12b-lyra-v2a1-q5_k_m.gguf": ["HalleyStarbun/MN-12B-Lyra-v2a1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
22
  "L3-8B-Tamamo-v1.i1-Q5_K_M.gguf": ["mradermacher/L3-8B-Tamamo-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
23
+ "MN-12B-Mag-Mell-Q4_K_M.gguf": ["inflatebot/MN-12B-Mag-Mell-R1-GGUF", MessagesFormatterType.MISTRAL],
24
+ "Qwen2.5-14B-Instruct-Q4_K_M.gguf": ["bartowski/Qwen2.5-14B-Instruct-GGUF", MessagesFormatterType.OPEN_CHAT],
25
  "Instant-RP-Noodles-12B-v1.3.Q4_K_M.gguf": ["mradermacher/Instant-RP-Noodles-12B-v1.3-GGUF", MessagesFormatterType.MISTRAL],
26
  "MN-12B-Lyra-v4-Q4_K_M.gguf": ["bartowski/MN-12B-Lyra-v4-GGUF", MessagesFormatterType.CHATML],
27
  "Lyra4-Gutenberg-12B.Q4_K_M.gguf": ["mradermacher/Lyra4-Gutenberg-12B-GGUF", MessagesFormatterType.CHATML],
 
53
  "StarDust-12b-v2.i1-Q5_K_M.gguf": ["mradermacher/StarDust-12b-v2-i1-GGUF", MessagesFormatterType.CHATML],
54
  "Rocinante-12B-v2c-Q4_K_M.gguf": ["TheDrummer/UnslopNemo-v1-GGUF", MessagesFormatterType.MISTRAL],
55
  "mn-maghin-12b-q6_k.gguf": ["rityak/MN-Maghin-12B-Q6_K-GGUF", MessagesFormatterType.MISTRAL],
56
+ "qwen2.5-lumen-14b-q4_k_m.gguf": ["Lambent/Qwen2.5-Lumen-14B-Q4_K_M-GGUF", MessagesFormatterType.OPEN_CHAT],
57
  "Trinas_Nectar-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/Trinas_Nectar-8B-model_stock-i1-GGUF", MessagesFormatterType.MISTRAL],
58
  "ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
59
  "ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
60
+ "Athena-gemma-2-9b-it-Philos-KTO.i1-Q4_K_M.gguf": ["mradermacher/Athena-gemma-2-9b-it-Philos-KTO-i1-GGUF", MessagesFormatterType.ALPACA],
61
+ "Qwen2.5-14B-Gutenberg-Instruct-Slerpeno.Q4_K_M.gguf": ["mradermacher/Qwen2.5-14B-Gutenberg-Instruct-Slerpeno-GGUF", MessagesFormatterType.OPEN_CHAT],
62
+ "typressai-9b-q4_k_m.gguf": ["ClaudioItaly/TypressAI-9B-Q4_K_M-GGUF", MessagesFormatterType.ALPACA],
63
+ "josiefied-qwen2.5-7b-instruct-abliterated-v2.Q5_K_M.gguf": ["Isaak-Carter/Josiefied-Qwen2.5-7B-Instruct-abliterated-v2-gguf", MessagesFormatterType.OPEN_CHAT],
64
+ "josiefied-qwen2.5-7b-instruct-abliterated.Q5_K_M.gguf": ["Isaak-Carter/Josiefied-Qwen2.5-7B-Instruct-abliterated-gguf", MessagesFormatterType.OPEN_CHAT],
65
+ "Fireball-Llama-3.1-8B-Philos-Reflection-v0.2.i1-Q5_K_M.gguf": ["mradermacher/Fireball-Llama-3.1-8B-Philos-Reflection-v0.2-i1-GGUF", MessagesFormatterType.MISTRAL],
66
+ "ArliAI-RPMax-Formax-v1.Q5_K_M.gguf": ["mradermacher/ArliAI-RPMax-Formax-v1-GGUF", MessagesFormatterType.LLAMA_3],
67
+ "Agente-Llama-3.1-Asistant-16bit-v2.Q5_K_M.gguf": ["mradermacher/Agente-Llama-3.1-Asistant-16bit-v2-GGUF", MessagesFormatterType.LLAMA_3],
68
+ "Magnum-Blackout-Ataraxy-4-9b.Q4_K_M.gguf": ["mradermacher/Magnum-Blackout-Ataraxy-4-9b-GGUF", MessagesFormatterType.ALPACA],
69
+ "Magnum-Blackout-Ataraxy-5-9b.Q4_K_M.gguf": ["mradermacher/Magnum-Blackout-Ataraxy-5-9b-GGUF", MessagesFormatterType.ALPACA],
70
+ "Vapor_7B.Q5_K_M.gguf": ["mradermacher/Vapor_7B-GGUF", MessagesFormatterType.OPEN_CHAT],
71
+ "fusion-guide-12b-0.1.Q4_K_M.gguf": ["mradermacher/fusion-guide-12b-0.1-GGUF", MessagesFormatterType.MISTRAL],
72
+ "Meta-Llama-3.1-8B-Instruct-HalfAbliterated-TIES.i1-Q4_K_M.gguf": ["mradermacher/Meta-Llama-3.1-8B-Instruct-HalfAbliterated-TIES-i1-GGUF", MessagesFormatterType.LLAMA_3],
73
+ "Qwen2.5-14B_Uncencored-Q4_K_M.gguf": ["bartowski/Qwen2.5-14B_Uncencored-GGUF", MessagesFormatterType.OPEN_CHAT],
74
+ "L3.1-Niitorm-8B-DPO-t0.0001.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Niitorm-8B-DPO-t0.0001-i1-GGUF", MessagesFormatterType.LLAMA_3],
75
+ "Hermes-ClimateStorm-Sauerkraut-abliterated.i1-Q5_K_M.gguf": ["mradermacher/Hermes-ClimateStorm-Sauerkraut-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
76
+ "qwen2.5-boosted-q5_k_m.gguf": ["ClaudioItaly/Qwen2.5-Boosted-Q5_K_M-GGUF", MessagesFormatterType.OPEN_CHAT],
77
+ "qwen2.5-14b-q4_k_m.gguf": ["Triangle104/Qwen2.5-14B-Q4_K_M-GGUF", MessagesFormatterType.OPEN_CHAT],
78
+ "Qwen2.5-7B-Instruct-Q4_K_M.gguf": ["bartowski/Qwen2.5-7B-Instruct-GGUF", MessagesFormatterType.OPEN_CHAT],
79
+ "Hermes-Storm-lorablated.i1-Q5_K_M.gguf": ["mradermacher/Hermes-Storm-lorablated-i1-GGUF", MessagesFormatterType.LLAMA_3],
80
+ "Gemma-Ataraxy-Dare-NoBase-9b.i1-Q4_K_M.gguf": ["mradermacher/Gemma-Ataraxy-Dare-NoBase-9b-i1-GGUF", MessagesFormatterType.ALPACA],
81
+ "Axolotl-Llama-3.1-8B-instruct-finetuned-V3-merged.Q4_K_M.gguf": ["mradermacher/Axolotl-Llama-3.1-8B-instruct-finetuned-V3-merged-GGUF", MessagesFormatterType.LLAMA_3],
82
+ "Llama-3.1-8B-TitanFusion-v2.i1-Q5_K_M.gguf": ["mradermacher/Llama-3.1-8B-TitanFusion-v2-i1-GGUF", MessagesFormatterType.LLAMA_3],
83
+ "Magnum-Blackout-Ataraxy-2-9b.Q4_K_M.gguf": ["mradermacher/Magnum-Blackout-Ataraxy-2-9b-GGUF", MessagesFormatterType.ALPACA],
84
+ "Aspire-8B-model_stock.i1-Q5_K_M.gguf": ["mradermacher/Aspire-8B-model_stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
85
+ "L3.1-Celestial-Stone-2x8B-DPO.Q4_K_M.gguf": ["mradermacher/L3.1-Celestial-Stone-2x8B-DPO-GGUF", MessagesFormatterType.LLAMA_3],
86
+ "L3-Dark-Planet-8B-V2-Eight-Orbs-Of-Power.i1-Q4_K_M.gguf": ["mradermacher/L3-Dark-Planet-8B-V2-Eight-Orbs-Of-Power-i1-GGUF", MessagesFormatterType.LLAMA_3],
87
+ "l3-luna-8b-q5_k_m-imat.gguf": ["Casual-Autopsy/L3-Luna-8B-Q5_K_M-GGUF", MessagesFormatterType.LLAMA_3],
88
+ "Insanity.i1-Q4_K_M.gguf": ["mradermacher/Insanity-i1-GGUF", MessagesFormatterType.MISTRAL],
89
+ "Rocinante-12B-v2d-Q4_K_M.gguf": ["TheDrummer/UnslopNemo-v2-GGUF", MessagesFormatterType.MISTRAL],
90
+ "Fatgirl_8B.Q4_K_M.gguf": ["QuantFactory/Fatgirl_8B-GGUF", MessagesFormatterType.MISTRAL],
91
+ "stormclimate-q4_k_m.gguf": ["MotherEarth/stormclimate-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
92
+ "InsanityB.Q4_K_M.gguf": ["mradermacher/InsanityB-GGUF", MessagesFormatterType.MISTRAL],
93
+ "mistral-nemo-gutades-12B.Q4_K_M.gguf": ["mradermacher/mistral-nemo-gutades-12B-GGUF", MessagesFormatterType.MISTRAL],
94
+ "FourFictionGemma-9.Q5_K_M.gguf": ["mradermacher/FourFictionGemma-9-GGUF", MessagesFormatterType.ALPACA],
95
+ "blackmagnumataraxy-9b-q6_k.gguf": ["BarBarickoza/BlackMagnumAtaraxy-9B-Q6_K-GGUF", MessagesFormatterType.ALPACA],
96
+ "fourfastgemma-9-q4_k_m-imat.gguf": ["ClaudioItaly/FourFastGemma-9-Q4_K_M-GGUF", MessagesFormatterType.ALPACA],
97
+ "ThinkingMistral-gen.i1-Q4_K_M.gguf": ["mradermacher/ThinkingMistral-gen-i1-GGUF", MessagesFormatterType.MISTRAL],
98
+ "L3.1-Dark-Planet-10.7B-ExxxxxxxxTended-D_AU-Q4_k_m.gguf": ["DavidAU/L3.1-Dark-Planet-10.7B-ExxxxxxxxTended-GGUF", MessagesFormatterType.LLAMA_3],
99
+ "Gemma-The-Writer-9B-D_AU-Q4_k_m.gguf": ["DavidAU/Gemma-The-Writer-9B-GGUF", MessagesFormatterType.ALPACA],
100
+ "CleverBoi-Nemo-12B-v2.i1-Q4_K_M.gguf": ["mradermacher/CleverBoi-Nemo-12B-v2-i1-GGUF", MessagesFormatterType.MISTRAL],
101
+ "ORPO-EdgeRunner-Tactical-7B-GSM8K.Q5_K_M.gguf": ["mradermacher/ORPO-EdgeRunner-Tactical-7B-GSM8K-GGUF", MessagesFormatterType.OPEN_CHAT],
102
+ "wip-test_pending_c-q4_k_m.gguf": ["DreadPoor/WIP-TEST_PENDING_C-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
103
+ "L3.1-8B-komorebi-q8_0.gguf": ["crestf411/L3.1-8B-komorebi-gguf", MessagesFormatterType.LLAMA_3],
104
+ "Canada_Reimbursement_Model.Q5_K_M.gguf": ["mradermacher/Canada_Reimbursement_Model-GGUF", MessagesFormatterType.LLAMA_3],
105
+ "BuddyGlassNeverSleeps.Q5_K_M.gguf": ["mradermacher/BuddyGlassNeverSleeps-GGUF", MessagesFormatterType.LLAMA_3],
106
+ "gemma-writer-stock-no-ifable-9b-q6_k.gguf": ["BarBarickoza/Gemma-writer-stock-no-Ifable-9b-Q6_K-GGUF", MessagesFormatterType.ALPACA],
107
+ "Gemma-Ataraxy-Dare-9b.i1-Q4_K_M.gguf": ["mradermacher/Gemma-Ataraxy-Dare-9b-i1-GGUF", MessagesFormatterType.ALPACA],
108
+ "MotherEarth-Hermes-8B.i1-Q5_K_M.gguf": ["mradermacher/MotherEarth-Hermes-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
109
+ "albacus-q5_k_m-imat.gguf": ["ClaudioItaly/Albacus-V2-Imatrix", MessagesFormatterType.MISTRAL],
110
+ "mn-12b-siskin-test2-q8_0.gguf": ["Nohobby/MN-12B-Siskin-TEST2-Q8_0-GGUF", MessagesFormatterType.MISTRAL],
111
+ "mn-12b-siskin-test3-q8_0.gguf": ["Nohobby/MN-12B-Siskin-TEST3-Q8_0-GGUF", MessagesFormatterType.MISTRAL],
112
+ "Violet_Twilight-v0.2.Q4_K_M.gguf": ["Epiculous/Violet_Twilight-v0.2-GGUF", MessagesFormatterType.CHATML],
113
+ "albacus-q4_k_m-imat.gguf": ["ClaudioItaly/Albacus-Imatrix-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
114
+ "mn-chinofun-q4_k_m.gguf": ["djuna/MN-Chinofun-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
115
+ "NarraThinker12B.i1-Q4_K_M.gguf": ["mradermacher/NarraThinker12B-i1-GGUF", MessagesFormatterType.MISTRAL],
116
+ "IceDrinkNameNotFoundRP-7b-Model_Stock.Q4_K_S.gguf": ["mradermacher/IceDrinkNameNotFoundRP-7b-Model_Stock-GGUF", MessagesFormatterType.MISTRAL],
117
+ "Hatheno_Max_1.1-ALT-8B-model_stock.i1-Q5_K_M.gguf": ["mradermacher/Hatheno_Max_1.1-ALT-8B-model_stock-i1-GGUF", MessagesFormatterType.MISTRAL],
118
+ "Gluon-8B.i1-Q5_K_M.gguf": ["mradermacher/Gluon-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
119
+ "mergekit-model_stock-yhrnwcb-q4_k_m.gguf": ["DreadPoor/WIP-TEST_PENDING_A-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
120
+ "wip-test_pending_b-q4_k_m.gguf": ["DreadPoor/WIP-TEST_PENDING_B-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
121
+ "Hatheno_Max_1.1-8B-model_stock.i1-Q5_K_M.gguf": ["mradermacher/Hatheno_Max_1.1-8B-model_stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
122
+ "MN-12B-Siskin-v0.2.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Siskin-v0.2-i1-GGUF", MessagesFormatterType.MISTRAL],
123
+ "Hatheno_Max-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/Hatheno_Max-8B-model_stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
124
+ "L3.1-SuperNovabliterated-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/L3.1-SuperNovabliterated-8B-model_stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
125
+ "Hatheno_Max-ALT-8B-model_stock.i1-Q5_K_M.gguf": ["mradermacher/Hatheno_Max-ALT-8B-model_stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
126
+ "experiment_x-wip-q4_k_m.gguf": ["DreadPoor/EXPERIMENT_X-WIP-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
127
+ "narrathinker12b-q4_k_m.gguf": ["ClaudioItaly/NarraThinker12B-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
128
+ "llama-3.1-8b-matrix-q5_k_m.gguf": ["bunnycore/LLama-3.1-8B-Matrix-Q5_K_M-GGUF", MessagesFormatterType.LLAMA_3],
129
+ "Barcenas-8b-Cartas.Q5_K_M.gguf": ["mradermacher/Barcenas-8b-Cartas-GGUF", MessagesFormatterType.LLAMA_3],
130
+ "HannaOpenHermes-2.5-Mistral-7B.Q5_K_M.gguf": ["mradermacher/HannaOpenHermes-2.5-Mistral-7B-GGUF", MessagesFormatterType.MISTRAL],
131
+ "IceDrinkNameGoesHereRP-7b-Model_Stock.i1-Q4_K_M.gguf": ["mradermacher/IceDrinkNameGoesHereRP-7b-Model_Stock-i1-GGUF", MessagesFormatterType.ALPACA],
132
+ "Llama-3.1-Literotica-8B.Q4_K_S.gguf": ["mradermacher/Llama-3.1-Literotica-8B-GGUF", MessagesFormatterType.LLAMA_3],
133
+ "project-12-q4_k_m.gguf": ["ClaudioItaly/Project-12-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
134
+ "L3.1-Celestial-Stone-2x8B.i1-Q4_K_M.gguf": ["mradermacher/L3.1-Celestial-Stone-2x8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
135
+ "experiment_y-wip-q4_k_m.gguf": ["DreadPoor/EXPERIMENT_Y-WIP-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
136
  "L3.1-Vulca-Epith-Bluegrade-v0.2-8B.q8_0.gguf": ["kromquant/L3.1-Vulca-Epith-Bluegrade-v0.2-8B-GGUFs", MessagesFormatterType.LLAMA_3],
137
  "llama-3.1-8b-omnimatrix-iq4_nl-imat.gguf": ["bunnycore/Llama-3.1-8B-OmniMatrix-IQ4_NL-GGUF", MessagesFormatterType.LLAMA_3],
138
  "L3.1-Artemis-d-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Artemis-d-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
 
561
  "Meta-Llama-3.1-8B-Claude-iMat-Q5_K_M.gguf": ["InferenceIllusionist/Meta-Llama-3.1-8B-Claude-iMat-GGUF", MessagesFormatterType.LLAMA_3],
562
  "Phi-3.1-mini-128k-instruct-Q6_K_L.gguf": ["bartowski/Phi-3.1-mini-128k-instruct-GGUF", MessagesFormatterType.PHI_3],
563
  "tifa-7b-qwen2-v0.1.q4_k_m.gguf": ["Tifa-RP/Tifa-7B-Qwen2-v0.1-GGUF", MessagesFormatterType.OPEN_CHAT],
564
+ "Holland-Magnum-Merge-R2.i1-Q5_K_M.gguf": ["mradermacher/Holland-Magnum-Merge-R2-i1-GGUF", MessagesFormatterType.LLAMA_3],
565
  "Oumuamua-7b-RP_Q5_K_M.gguf": ["Aratako/Oumuamua-7b-RP-GGUF", MessagesFormatterType.MISTRAL],
566
  "Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW_iMat_Ch200_IQ4_XS.gguf": ["dddump/Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW-gguf", MessagesFormatterType.VICUNA],
567
  "ChatWaifu_v1.2.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.2.1-GGUF", MessagesFormatterType.MISTRAL],
 
1168
  else:
1169
  prompts = list_uniq(to_list(raw_prompt) + ["nsfw", "explicit", "rating_explicit"])
1170
  return ", ".join(prompts)
1171
+
1172
+
1173
+ # https://huggingface.co/spaces/CaioXapelaum/GGUF-Playground
1174
+ import cv2
1175
+ cv2.setNumThreads(1)
1176
+
1177
+ @spaces.GPU()
1178
+ def respond_playground(
1179
+ message,
1180
+ history: list[tuple[str, str]],
1181
+ model,
1182
+ system_message,
1183
+ max_tokens,
1184
+ temperature,
1185
+ top_p,
1186
+ top_k,
1187
+ repeat_penalty,
1188
+ ):
1189
+ if override_llm_format:
1190
+ chat_template = override_llm_format
1191
+ else:
1192
+ chat_template = llm_models[model][1]
1193
+
1194
+ llm = Llama(
1195
+ model_path=str(Path(f"{llm_models_dir}/{model}")),
1196
+ flash_attn=True,
1197
+ n_gpu_layers=81, # 81
1198
+ n_batch=1024,
1199
+ n_ctx=8192, #8192
1200
+ )
1201
+ provider = LlamaCppPythonProvider(llm)
1202
+
1203
+ agent = LlamaCppAgent(
1204
+ provider,
1205
+ system_prompt=f"{system_message}",
1206
+ predefined_messages_formatter_type=chat_template,
1207
+ debug_output=False
1208
+ )
1209
+
1210
+ settings = provider.get_provider_default_settings()
1211
+ settings.temperature = temperature
1212
+ settings.top_k = top_k
1213
+ settings.top_p = top_p
1214
+ settings.max_tokens = max_tokens
1215
+ settings.repeat_penalty = repeat_penalty
1216
+ settings.stream = True
1217
+
1218
+ messages = BasicChatHistory()
1219
+
1220
+ # Add user and assistant messages to the history
1221
+ for msn in history:
1222
+ user = {'role': Roles.user, 'content': msn[0]}
1223
+ assistant = {'role': Roles.assistant, 'content': msn[1]}
1224
+ messages.add_message(user)
1225
+ messages.add_message(assistant)
1226
+
1227
+ # Stream the response
1228
+ try:
1229
+ stream = agent.get_chat_response(
1230
+ message,
1231
+ llm_sampling_settings=settings,
1232
+ chat_history=messages,
1233
+ returns_streaming_generator=True,
1234
+ print_output=False
1235
+ )
1236
+
1237
+ outputs = ""
1238
+ for output in stream:
1239
+ outputs += output
1240
+ yield outputs
1241
+ except Exception as e:
1242
+ yield f"Error during response generation: {str(e)}"