Update app.py
Browse files
app.py
CHANGED
@@ -5,19 +5,17 @@ import subprocess
|
|
5 |
import asyncio
|
6 |
import os
|
7 |
import stat
|
8 |
-
title = "Apollo-
|
9 |
|
10 |
description = """
|
11 |
-
π [Apollo-
|
12 |
|
13 |
π¨ Running on CPU-Basic free hardware. Suggest duplicating this space to run without a queue.
|
14 |
|
15 |
-
Mistral does not support system prompt symbol (such as ```<<SYS>>```) now, input your system prompt in the first message if you need. Learn more: [Guardrailing Mistral 7B](https://docs.mistral.ai/usage/guardrailing).
|
16 |
"""
|
17 |
|
18 |
"""
|
19 |
-
[Model From
|
20 |
-
[Mistral-instruct-v0.1 System prompt](https://docs.mistral.ai/usage/guardrailing)
|
21 |
"""
|
22 |
|
23 |
model_path = "models"
|
|
|
5 |
import asyncio
|
6 |
import os
|
7 |
import stat
|
8 |
+
title = "Apollo-6B-GGUF Run On CPU"
|
9 |
|
10 |
description = """
|
11 |
+
π [Apollo-6B](https://huggingface.co/FreedomIntelligence/Apollo-6B) [GGUF format model](https://huggingface.co/FreedomIntelligence/Apollo-6B-GGUF) , 8-bit quantization balanced quality gguf version, running on CPU. Using [GitHub - llama.cpp](https://github.com/ggerganov/llama.cpp) [GitHub - gpt4all](https://github.com/nomic-ai/gpt4all).
|
12 |
|
13 |
π¨ Running on CPU-Basic free hardware. Suggest duplicating this space to run without a queue.
|
14 |
|
|
|
15 |
"""
|
16 |
|
17 |
"""
|
18 |
+
[Model From FreedomIntelligence/Apollo-6B-GGUF](https://huggingface.co/FreedomIntelligence/Apollo-6B-GGUF)
|
|
|
19 |
"""
|
20 |
|
21 |
model_path = "models"
|