update UI text
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
|
13 |
DESCRIPTION = '''
|
14 |
<div>
|
15 |
<h1 style="text-align: center;">Loki ποΈ</h1>
|
16 |
-
<p>This uses
|
17 |
</div>
|
18 |
'''
|
19 |
|
@@ -27,19 +27,13 @@ terminators = [
|
|
27 |
|
28 |
@spaces.GPU(duration=120)
|
29 |
def chat_llama3_8b(message: str,
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
"""
|
35 |
-
|
36 |
-
|
37 |
-
message (str): The input message.
|
38 |
-
history (list): The conversation history used by ChatInterface.
|
39 |
-
temperature (float): The temperature for generating the response.
|
40 |
-
max_new_tokens (int): The maximum number of new tokens to generate.
|
41 |
-
Returns:
|
42 |
-
str: The generated response.
|
43 |
"""
|
44 |
conversation = []
|
45 |
for user, assistant in history:
|
|
|
13 |
DESCRIPTION = '''
|
14 |
<div>
|
15 |
<h1 style="text-align: center;">Loki ποΈ</h1>
|
16 |
+
<p>This uses an open source Large Language Model called <a href="https://huggingface.co/meta-llama/Meta-Llama-3-8B"><b>Llama3-8b</b></a></p>
|
17 |
</div>
|
18 |
'''
|
19 |
|
|
|
27 |
|
28 |
@spaces.GPU(duration=120)
|
29 |
def chat_llama3_8b(message: str,
|
30 |
+
history: list,
|
31 |
+
temperature: float,
|
32 |
+
max_new_tokens: int
|
33 |
+
) -> str:
|
34 |
"""
|
35 |
+
Passes input, converts in tokens, generate's with ids and outputs
|
36 |
+
the text out.
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
"""
|
38 |
conversation = []
|
39 |
for user, assistant in history:
|