Spaces:
Runtime error
Runtime error
rogerkoranteng
commited on
Commit
•
0150ac0
1
Parent(s):
a5774b4
Upload folder using huggingface_hub
Browse files
.ipynb_checkpoints/README-checkpoint.md
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: chatbot
|
3 |
+
app_file: main.py
|
4 |
+
sdk: gradio
|
5 |
+
sdk_version: 4.42.0
|
6 |
+
---
|
.ipynb_checkpoints/main-checkpoint.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
import os
|
4 |
+
import keras
|
5 |
+
import keras_nlp
|
6 |
+
|
7 |
+
import os
|
8 |
+
|
9 |
+
os.environ["KERAS_BACKEND"] = "jax"
|
10 |
+
# Avoid memory fragmentation on JAX backend.
|
11 |
+
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"]="1.00"
|
12 |
+
|
13 |
+
import os
|
14 |
+
|
15 |
+
# Set Kaggle API credentials
|
16 |
+
os.environ["KAGGLE_USERNAME"] = "rogerkorantenng"
|
17 |
+
os.environ["KAGGLE_KEY"] = "9a33b6e88bcb6058b1281d777fa6808d"
|
18 |
+
|
19 |
+
# Load environment variables
|
20 |
+
load_dotenv()
|
21 |
+
|
22 |
+
# Replace this with the path or method to load your local model
|
23 |
+
gemma_lm = keras_nlp.models.GemmaCausalLM.from_preset("gemma_2b_en")
|
24 |
+
|
25 |
+
def generate_response(message, history):
|
26 |
+
# Format the conversation history for the local model
|
27 |
+
formatted_history = []
|
28 |
+
for user, assistant in history:
|
29 |
+
formatted_history.append(f"Instruction:\n{user}\n\nResponse:\n{assistant}")
|
30 |
+
|
31 |
+
# Add the latest user message to the history
|
32 |
+
formatted_history.append(f"Instruction:\n{message}\n\nResponse:\n")
|
33 |
+
|
34 |
+
# Join formatted history into a single string for input
|
35 |
+
input_text = "\n".join(formatted_history)
|
36 |
+
|
37 |
+
# Generate response from the local model
|
38 |
+
# Make sure to adjust this part according to your model's API
|
39 |
+
response = gemma_lm.generate(input_text, max_length=256)
|
40 |
+
|
41 |
+
# Extract the response text
|
42 |
+
# Adjust the response extraction based on the actual structure of your model's output
|
43 |
+
return response[0] # Change this line if necessary
|
44 |
+
|
45 |
+
# Create the Gradio interface
|
46 |
+
gr.ChatInterface(
|
47 |
+
generate_response,
|
48 |
+
chatbot=gr.Chatbot(height=300),
|
49 |
+
textbox=gr.Textbox(placeholder="You can ask me anything", container=False, scale=7),
|
50 |
+
title="Local Model Chat Bot",
|
51 |
+
retry_btn=None,
|
52 |
+
undo_btn="Delete Previous",
|
53 |
+
clear_btn="Clear"
|
54 |
+
).launch(share=True)
|
.ipynb_checkpoints/requirements-checkpoint.txt
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
jupyterlab>3.0
|
2 |
+
keras>=3
|
3 |
+
keras-nlp
|
4 |
+
kaggle
|
5 |
+
jax[cuda12]
|
6 |
+
gradio
|
7 |
+
tensorflow
|
8 |
+
silence-tensorflow
|
9 |
+
absl-py==2.1.0
|
10 |
+
aiofiles==23.2.1
|
11 |
+
annotated-types==0.7.0
|
12 |
+
anyio==4.4.0
|
13 |
+
astunparse==1.6.3
|
14 |
+
certifi==2024.8.30
|
15 |
+
charset-normalizer==3.3.2
|
16 |
+
click==8.1.7
|
17 |
+
contourpy==1.3.0
|
18 |
+
cycler==0.12.1
|
19 |
+
exceptiongroup==1.2.2
|
20 |
+
fastapi==0.112.2
|
21 |
+
ffmpy==0.4.0
|
22 |
+
filelock==3.15.4
|
23 |
+
flatbuffers==24.3.25
|
24 |
+
fonttools==4.53.1
|
25 |
+
fsspec==2024.6.1
|
26 |
+
gast==0.6.0
|
27 |
+
google-pasta==0.2.0
|
28 |
+
gradio==4.42.0
|
29 |
+
gradio_client==1.3.0
|
30 |
+
grpcio==1.66.1
|
31 |
+
h11==0.14.0
|
32 |
+
h5py==3.11.0
|
33 |
+
httpcore==1.0.5
|
34 |
+
httpx==0.27.2
|
35 |
+
huggingface-hub==0.24.6
|
36 |
+
idna==3.8
|
37 |
+
importlib_resources==6.4.4
|
38 |
+
Jinja2==3.1.4
|
39 |
+
kagglehub==0.2.9
|
40 |
+
keras==3.5.0
|
41 |
+
keras-nlp==0.14.4
|
42 |
+
kiwisolver==1.4.7
|
43 |
+
libclang==18.1.1
|
44 |
+
Markdown==3.7
|
45 |
+
markdown-it-py==3.0.0
|
46 |
+
MarkupSafe==2.1.5
|
47 |
+
matplotlib==3.9.2
|
48 |
+
mdurl==0.1.2
|
49 |
+
ml-dtypes==0.4.0
|
50 |
+
namex==0.0.8
|
51 |
+
numpy==1.26.4
|
52 |
+
opt-einsum==3.3.0
|
53 |
+
optree==0.12.1
|
54 |
+
orjson==3.10.7
|
55 |
+
packaging==24.1
|
56 |
+
pandas==2.2.2
|
57 |
+
pillow==10.4.0
|
58 |
+
protobuf==4.25.4
|
59 |
+
pydantic==2.8.2
|
60 |
+
pydantic_core==2.20.1
|
61 |
+
pydub==0.25.1
|
62 |
+
Pygments==2.18.0
|
63 |
+
pyparsing==3.1.4
|
64 |
+
python-dateutil==2.9.0.post0
|
65 |
+
python-multipart==0.0.9
|
66 |
+
pytz==2024.1
|
67 |
+
PyYAML==6.0.2
|
68 |
+
regex==2024.7.24
|
69 |
+
requests==2.32.3
|
70 |
+
rich==13.8.0
|
71 |
+
ruff==0.6.3
|
72 |
+
safetensors==0.4.4
|
73 |
+
semantic-version==2.10.0
|
74 |
+
shellingham==1.5.4
|
75 |
+
six==1.16.0
|
76 |
+
sniffio==1.3.1
|
77 |
+
starlette==0.38.4
|
78 |
+
tensorboard==2.17.1
|
79 |
+
tensorboard-data-server==0.7.2
|
80 |
+
tensorflow==2.17.0
|
81 |
+
tensorflow-io-gcs-filesystem==0.37.1
|
82 |
+
tensorflow-text==2.17.0
|
83 |
+
termcolor==2.4.0
|
84 |
+
tokenizers==0.19.1
|
85 |
+
tomlkit==0.12.0
|
86 |
+
tqdm==4.66.5
|
87 |
+
transformers==4.44.2
|
88 |
+
typer==0.12.5
|
89 |
+
typing_extensions==4.12.2
|
90 |
+
tzdata==2024.1
|
91 |
+
urllib3==2.2.2
|
92 |
+
uvicorn==0.30.6
|
93 |
+
websockets==12.0
|
94 |
+
Werkzeug==3.0.4
|
95 |
+
wrapt==1.16.0
|
flagged/log.csv
CHANGED
@@ -1,4 +1,2 @@
|
|
1 |
-
|
2 |
-
"
|
3 |
-
",,,,,,2024-09-04 18:19:14.583976
|
4 |
-
,,"I'm sorry to you and your family. I'm sure this is very upsetting for you. I'm not sure I can really offer much help. I can only imagine how you feel. I wish I could offer you a hug. I'm glad you're considering counseling. That's a good sign. It's a good sign because you're here, reading this. It's a good sign you're here asking this question. It's a good sign you're looking for answers. It's a good thing you're here on this site. You're doing something good for yourself. You're taking care of you. You're looking for answers. You're asking questions. You're looking for help. You're looking for support. You're looking for friends. You're looking for someone to talk to. You're looking for someone to talk to. You're looking for someone to talk with. You're looking for someone to talk about it all. You're looking for someone to listen. You're looking for someone to talk to. You're looking for someone to talk with. You'",,,,2024-09-04 18:25:01.121662
|
|
|
1 |
+
message,output,flag,username,timestamp
|
2 |
+
Am very sad today?,"Sadness is a normal emotion, but if you are experiencing a level of sadness that is weighing you down or getting in the way of enjoying your life then you may be experiencing depression. The best thing you can do is talk to someone about it, whether it be a friend, a friend of a friend, or a mental health professional. The great news about depression is that it is treatable. The overwhelming majority of people who experience sadness are also able to overcome it on their own, but the most important thing is that you know you can and that you deserve to be happy.",,,2024-09-06 11:20:19.122122
|
|
|
|