Bey007 commited on
Commit
f3e8b12
1 Parent(s): 071cf5b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -14
app.py CHANGED
@@ -36,31 +36,35 @@ def generate_story(theme):
36
  return story
37
 
38
  def generate_response(user_input):
39
- response_prompt = (
40
- f"A user is feeling very sad and overwhelmed: '{user_input}'. "
41
- "You are a compassionate support bot. Respond with empathy, encouragement, and reassurance."
42
- )
43
- input_ids = gpt2_tokenizer.encode(response_prompt, return_tensors='pt')
 
 
 
 
 
44
  response_ids = gpt2_model.generate(
45
  input_ids,
46
- max_length=150,
47
- temperature=0.8,
48
- top_k=40,
49
- repetition_penalty=1.1,
 
50
  num_return_sequences=1
51
-
52
  )
53
 
54
- # Decode the response and clean it up by removing the prompt
55
  response = gpt2_tokenizer.decode(response_ids[0], skip_special_tokens=True)
56
-
57
- # Strip out the prompt portion to get a clean, empathetic message
58
- cleaned_response = response.replace(f"A user is feeling very sad and overwhelmed:{user_input}.You are a compassionate support bot. Respond with empathy, encouragement, and reassurance", "").strip()
59
 
60
  return cleaned_response
61
 
62
 
63
 
 
64
  # Analyze user input for emotional tone
65
  def get_emotion(user_input):
66
  emotions = emotion_classifier(user_input)
 
36
  return story
37
 
38
  def generate_response(user_input):
39
+ # Limit user input length to prevent overflow issues
40
+ truncated_input = user_input[:200]
41
+
42
+ # Construct a simpler prompt for generating empathetic responses
43
+ prompt = f"The user is feeling: '{truncated_input}'. Respond with empathy, compassion, and encouragement."
44
+
45
+ # Encode the prompt
46
+ input_ids = gpt2_tokenizer.encode(prompt, return_tensors='pt')
47
+
48
+ # Generate the response
49
  response_ids = gpt2_model.generate(
50
  input_ids,
51
+ max_length=100,
52
+ temperature=0.7,
53
+ top_p=0.9,
54
+ top_k=50,
55
+ repetition_penalty=1.2,
56
  num_return_sequences=1
 
57
  )
58
 
59
+ # Decode and clean up the generated response
60
  response = gpt2_tokenizer.decode(response_ids[0], skip_special_tokens=True)
61
+ cleaned_response = response.replace(prompt, "").strip()
 
 
62
 
63
  return cleaned_response
64
 
65
 
66
 
67
+
68
  # Analyze user input for emotional tone
69
  def get_emotion(user_input):
70
  emotions = emotion_classifier(user_input)