rogerkoranteng commited on
Commit
355a772
1 Parent(s): 9d58403

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. app.py +57 -60
  3. frpc_linux_amd64 +3 -0
  4. model.lora.h5 +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ frpc_linux_amd64 filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -1,63 +1,60 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
- if __name__ == "__main__":
63
- demo.launch()
 
1
  import gradio as gr
2
+ import os
3
+ import keras_nlp
4
+
5
+
6
+ # Set Kaggle API credentials (if needed)
7
+ # Set Kaggle API credentials
8
+
9
+ os.environ["KAGGLE_USERNAME"] = "rogerkorantenng"
10
+ os.environ["KAGGLE_KEY"] = "9a33b6e88bcb6058b1281d777fa6808d"
11
+
12
+
13
+ # Set the JAX backend and configure memory
14
+ os.environ["KERAS_BACKEND"] = "tensorflow"
15
+ os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "1.00"
16
+
17
+ # Load the GemmaCausalLM model
18
+ gemma_lm = keras_nlp.models.GemmaCausalLM.from_preset("gemma_2b_en")
19
+
20
+ # Load LoRA weights if you have them
21
+ LoRA_weights_path = "model.lora.h5"
22
+ gemma_lm.backbone.enable_lora(rank=4) # Enable LoRA with rank 4
23
+ gemma_lm.preprocessor.sequence_length = 512 # Limit sequence length
24
+ gemma_lm.backbone.load_lora_weights(LoRA_weights_path) # Load LoRA weights
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
+ # Define the response generation function
27
+ def generate_response(message, history):
28
+ # Create a prompt template
29
+ template = "Instruction:\n{instruction}\n\nResponse:\n{response}"
30
+
31
+ # Format the history and the current message into the prompt
32
+ formatted_history = ""
33
+ for user_msg, bot_msg in history:
34
+ formatted_history += template.format(instruction=user_msg, response=bot_msg)
35
+
36
+ # Add the latest message from the user
37
+ prompt = template.format(instruction=message, response="")
38
+
39
+ # Combine history with the latest prompt
40
+ final_prompt = formatted_history + prompt
41
+
42
+ # Generate response from the model
43
+ response = gemma_lm.generate(final_prompt, max_length=256)
44
+
45
+ # Extract and return the generated response text
46
+ return response[0] # Adjust this if your model's output structure differs
47
+
48
+ # Create the Gradio chat interface
49
+ interface = gr.ChatInterface(
50
+ fn=generate_response, # Function that generates responses
51
+ chatbot=gr.Chatbot(height=300), # Chatbot UI component
52
+ textbox=gr.Textbox(placeholder="You can ask me anything", container=False, scale=7),
53
+ title="Local Model Chat Bot",
54
+ retry_btn=None, # Disable retry button
55
+ undo_btn="Delete Previous", # Enable undo button
56
+ clear_btn="Clear" # Enable clear button
57
+ )
58
 
59
+ # Launch the Gradio app
60
+ interface.launch(share=True)
frpc_linux_amd64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb74b665633589410540c49dfcef5b6f0fd4a9bd7c9558bcdee2f0e43da0774d
3
+ size 11374592
model.lora.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f50855153040325fe2a203e7ee03c3aa8c98f3f3db7cccc435fa759527bd7b5
3
+ size 5560280