rogerkoranteng commited on
Commit
46d4ec4
1 Parent(s): 0150ac0

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. main.py +7 -6
main.py CHANGED
@@ -3,9 +3,10 @@ import os
3
  import keras_nlp
4
  from transformers import AutoModelForCausalLM
5
 
6
- # Set Kaggle API credentials
7
- os.environ["KAGGLE_USERNAME"] = "rogerkorantenng"
8
- os.environ["KAGGLE_KEY"] = "9a33b6e88bcb6058b1281d777fa6808d"
 
9
 
10
  # Load LoRA weights if you have them
11
  LoRA_weights_path = "fined-tuned-model.lora.h5"
@@ -38,10 +39,10 @@ interface = gr.Interface(
38
  fn=generate_response, # Function that generates responses
39
  inputs=gr.Textbox(placeholder="Hello, I am Sage, your mental health advisor", lines=2, scale=7),
40
  outputs=gr.Textbox(),
41
- title="Welcome to Sage, your dedicated mental health advisor.",
42
  # description="Chat with Sage, your mental health advisor.",
43
  # live=True
44
  )
45
-
46
  # Launch the Gradio app
47
- interface.launch(share=True, share_server_address="hopegivers.tech:7000")
 
3
  import keras_nlp
4
  from transformers import AutoModelForCausalLM
5
 
6
+ # Set Kaggle API credentials using values from environment variables
7
+ os.environ["KAGGLE_USERNAME"] = os.environ.get("KAGGLE_USERNAME")
8
+ os.environ["KAGGLE_KEY"] = os.environ.get("KAGGLE_KEY")
9
+
10
 
11
  # Load LoRA weights if you have them
12
  LoRA_weights_path = "fined-tuned-model.lora.h5"
 
39
  fn=generate_response, # Function that generates responses
40
  inputs=gr.Textbox(placeholder="Hello, I am Sage, your mental health advisor", lines=2, scale=7),
41
  outputs=gr.Textbox(),
42
+ title="Sage, your Mental Health Advisor",
43
  # description="Chat with Sage, your mental health advisor.",
44
  # live=True
45
  )
46
+ proxy_prefix = os.environ.get("PROXY_PREFIX")
47
  # Launch the Gradio app
48
+ interface.launch(server_name="0.0.0.0", server_port=8080, root_path=proxy_prefix, share=True)