Spaces:
Sleeping
Sleeping
ehristoforu
commited on
Commit
β’
709ff7d
1
Parent(s):
87ff3aa
Update app (13).py
Browse files- app (13).py +5 -5
app (13).py
CHANGED
@@ -7,19 +7,19 @@ from model import run
|
|
7 |
|
8 |
HF_PUBLIC = os.environ.get("HF_PUBLIC", False)
|
9 |
|
10 |
-
DEFAULT_SYSTEM_PROMPT = "You are
|
11 |
MAX_MAX_NEW_TOKENS = 4096
|
12 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
13 |
MAX_INPUT_TOKEN_LENGTH = 4000
|
14 |
|
15 |
DESCRIPTION = """
|
16 |
-
#
|
17 |
|
18 |
-
π» This Space demonstrates model [
|
19 |
|
20 |
-
π For more details about the
|
21 |
|
22 |
-
ππ» Check out our [Playground](https://huggingface.co/spaces/
|
23 |
|
24 |
"""
|
25 |
|
|
|
7 |
|
8 |
HF_PUBLIC = os.environ.get("HF_PUBLIC", False)
|
9 |
|
10 |
+
DEFAULT_SYSTEM_PROMPT = "You are Mistral. You are AI-assistant, you are polite, give only truthful information and are based on the Mistral-7B model from Mistral AI. You can communicate in different languages equally well."
|
11 |
MAX_MAX_NEW_TOKENS = 4096
|
12 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
13 |
MAX_INPUT_TOKEN_LENGTH = 4000
|
14 |
|
15 |
DESCRIPTION = """
|
16 |
+
# Mistral-7B Chat
|
17 |
|
18 |
+
π» This Space demonstrates model [Mistral-7b-Instruct](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) by Mistral AI, a Mistral-chat model with 7B parameters fine-tuned for chat instructions and specialized on many tasks. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
|
19 |
|
20 |
+
π For more details about the Mistral family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/mistral).
|
21 |
|
22 |
+
ππ» Check out our [Playground](https://huggingface.co/spaces/osanseviero/mistral-super-fast) for a super-fast code completion demo that leverages a streaming [inference endpoint](https://huggingface.co/inference-endpoints).
|
23 |
|
24 |
"""
|
25 |
|