Sg-at-srijan-us-kg commited on
Commit
78c0656
1 Parent(s): a347d13

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -14
app.py CHANGED
@@ -3,8 +3,22 @@ from huggingface_hub import InferenceClient
3
 
4
  client = InferenceClient("Qwen/Qwen2.5-Coder-32B-Instruct")
5
 
6
- # A global variable to store file content
7
  uploaded_file_content = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  def handle_file_upload(file_obj):
10
  global uploaded_file_content
@@ -18,9 +32,20 @@ def handle_file_upload(file_obj):
18
  except UnicodeDecodeError:
19
  file_content = content.decode('latin-1')
20
 
 
21
  uploaded_file_content = file_content
22
- preview = file_content[:200] + "..." if len(file_content) > 200 else file_content
23
- return f"File uploaded successfully! Preview:\n\n{preview}"
 
 
 
 
 
 
 
 
 
 
24
  except Exception as e:
25
  return f"Error uploading file: {str(e)}"
26
 
@@ -34,12 +59,15 @@ def respond(
34
  ):
35
  global uploaded_file_content
36
 
37
- # Format the current message to include file content
 
 
 
38
  current_message = message
39
- if uploaded_file_content:
40
- current_message = f"""Here is the content of the uploaded text file:
41
 
42
- {uploaded_file_content}
43
 
44
  User's question about the above content: {message}"""
45
 
@@ -48,10 +76,10 @@ User's question about the above content: {message}"""
48
  {"role": "system", "content": system_message}
49
  ]
50
 
51
- # Add conversation history
52
- for user_msg, assistant_msg in history:
53
  messages.append({"role": "user", "content": user_msg})
54
- if assistant_msg: # Add assistant's message if it exists
55
  messages.append({"role": "assistant", "content": assistant_msg})
56
 
57
  # Add the current message
@@ -72,13 +100,18 @@ User's question about the above content: {message}"""
72
  response += token
73
  yield response
74
  except Exception as e:
75
- yield f"Error generating response: {str(e)}"
76
 
77
  # Create the Gradio interface
78
  demo = gr.Blocks()
79
 
80
  with demo:
81
- gr.Markdown("## Chat Interface with File Upload")
 
 
 
 
 
82
 
83
  with gr.Row():
84
  with gr.Column():
@@ -91,7 +124,7 @@ with demo:
91
  upload_output = gr.Textbox(
92
  label="Upload Status",
93
  interactive=False,
94
- lines=4
95
  )
96
 
97
  # Connect file upload handling
@@ -105,7 +138,7 @@ with demo:
105
  respond,
106
  additional_inputs=[
107
  gr.Textbox(
108
- value="You are a helpful AI assistant. When analyzing text files, provide detailed and insightful analysis of their content.",
109
  label="System message"
110
  ),
111
  gr.Slider(
 
3
 
4
  client = InferenceClient("Qwen/Qwen2.5-Coder-32B-Instruct")
5
 
6
+ # Global variables
7
  uploaded_file_content = ""
8
+ MAX_CHARS = 4000 # Maximum characters to send to API
9
+
10
+ def truncate_text(text, max_length=MAX_CHARS):
11
+ """Truncate text to max_length while trying to preserve complete sentences."""
12
+ if len(text) <= max_length:
13
+ return text
14
+
15
+ # Try to find the last period before max_length
16
+ last_period = text[:max_length].rfind('.')
17
+ if last_period != -1:
18
+ return text[:last_period + 1]
19
+
20
+ # If no period found, just truncate at max_length
21
+ return text[:max_length] + "..."
22
 
23
  def handle_file_upload(file_obj):
24
  global uploaded_file_content
 
32
  except UnicodeDecodeError:
33
  file_content = content.decode('latin-1')
34
 
35
+ # Store full content but truncate for preview
36
  uploaded_file_content = file_content
37
+ truncated = truncate_text(file_content, 4000) # Store full but preview truncated
38
+ preview = truncated[:200] + "..."
39
+
40
+ total_chars = len(file_content)
41
+ usable_chars = len(truncated)
42
+
43
+ return f"""File uploaded successfully!
44
+ Total length: {total_chars} characters
45
+ Usable length for AI: {usable_chars} characters (due to API limits)
46
+ Preview of beginning:
47
+ {preview}"""
48
+
49
  except Exception as e:
50
  return f"Error uploading file: {str(e)}"
51
 
 
59
  ):
60
  global uploaded_file_content
61
 
62
+ # Truncate file content if needed
63
+ truncated_content = truncate_text(uploaded_file_content) if uploaded_file_content else ""
64
+
65
+ # Format the current message to include truncated file content
66
  current_message = message
67
+ if truncated_content:
68
+ current_message = f"""Here is the content of the uploaded text file (truncated to fit within limits):
69
 
70
+ {truncated_content}
71
 
72
  User's question about the above content: {message}"""
73
 
 
76
  {"role": "system", "content": system_message}
77
  ]
78
 
79
+ # Add conversation history (limited)
80
+ for user_msg, assistant_msg in history[-3:]: # Only keep last 3 exchanges
81
  messages.append({"role": "user", "content": user_msg})
82
+ if assistant_msg:
83
  messages.append({"role": "assistant", "content": assistant_msg})
84
 
85
  # Add the current message
 
100
  response += token
101
  yield response
102
  except Exception as e:
103
+ yield f"Error generating response: {str(e)}\nTry asking about a specific part of the text instead of the whole document."
104
 
105
  # Create the Gradio interface
106
  demo = gr.Blocks()
107
 
108
  with demo:
109
+ gr.Markdown("""## Chat Interface with File Upload
110
+ Note: Due to API limitations, very large texts will be truncated. For best results with long texts:
111
+ - Ask about specific sections or chapters
112
+ - Break up your questions into smaller parts
113
+ - Be specific about what you want to know
114
+ """)
115
 
116
  with gr.Row():
117
  with gr.Column():
 
124
  upload_output = gr.Textbox(
125
  label="Upload Status",
126
  interactive=False,
127
+ lines=6
128
  )
129
 
130
  # Connect file upload handling
 
138
  respond,
139
  additional_inputs=[
140
  gr.Textbox(
141
+ value="You are a helpful AI assistant. When analyzing text files, provide detailed and insightful analysis of their content. If the text is truncated, acknowledge this in your response.",
142
  label="System message"
143
  ),
144
  gr.Slider(