nchen909 commited on
Commit
e0372cc
β€’
1 Parent(s): d4fe6ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -5,19 +5,17 @@ import subprocess
5
  import asyncio
6
  import os
7
  import stat
8
- title = "Apollo-7B-GGUF Run On CPU"
9
 
10
  description = """
11
- πŸ”Ž [Apollo-7B](https://huggingface.co/FreedomIntelligence/Apollo-7B) [GGUF format model](https://huggingface.co/FreedomIntelligence/Apollo-7B-GGUF) , 8-bit quantization balanced quality gguf version, running on CPU. Using [GitHub - llama.cpp](https://github.com/ggerganov/llama.cpp) [GitHub - gpt4all](https://github.com/nomic-ai/gpt4all).
12
 
13
  πŸ”¨ Running on CPU-Basic free hardware. Suggest duplicating this space to run without a queue.
14
 
15
- Mistral does not support system prompt symbol (such as ```<<SYS>>```) now, input your system prompt in the first message if you need. Learn more: [Guardrailing Mistral 7B](https://docs.mistral.ai/usage/guardrailing).
16
  """
17
 
18
  """
19
- [Model From TheBloke/Mistral-6B-Instruct-v0.1-GGUF](https://huggingface.co/FreedomIntelligence/Apollo-6B-GGUF)
20
- [Mistral-instruct-v0.1 System prompt](https://docs.mistral.ai/usage/guardrailing)
21
  """
22
 
23
  model_path = "models"
 
5
  import asyncio
6
  import os
7
  import stat
8
+ title = "Apollo-6B-GGUF Run On CPU"
9
 
10
  description = """
11
+ πŸ”Ž [Apollo-6B](https://huggingface.co/FreedomIntelligence/Apollo-6B) [GGUF format model](https://huggingface.co/FreedomIntelligence/Apollo-6B-GGUF) , 8-bit quantization balanced quality gguf version, running on CPU. Using [GitHub - llama.cpp](https://github.com/ggerganov/llama.cpp) [GitHub - gpt4all](https://github.com/nomic-ai/gpt4all).
12
 
13
  πŸ”¨ Running on CPU-Basic free hardware. Suggest duplicating this space to run without a queue.
14
 
 
15
  """
16
 
17
  """
18
+ [Model From FreedomIntelligence/Apollo-6B-GGUF](https://huggingface.co/FreedomIntelligence/Apollo-6B-GGUF)
 
19
  """
20
 
21
  model_path = "models"