SauravMaheshkar commited on
Commit
4b724c7
β€’
1 Parent(s): bc1817b

chore: use local as app_file

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +0 -207
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: blue
5
  colorTo: blue
6
  sdk: gradio
7
  sdk_version: 5.7.1
8
- app_file: app.py
9
  pinned: false
10
  short_description: Reason about papers using LLMs
11
  license: agpl-3.0
 
5
  colorTo: blue
6
  sdk: gradio
7
  sdk_version: 5.7.1
8
+ app_file: local.py
9
  pinned: false
10
  short_description: Reason about papers using LLMs
11
  license: agpl-3.0
app.py DELETED
@@ -1,207 +0,0 @@
1
- import os
2
- import time
3
- from typing import Dict, List, Optional, TypeAlias
4
-
5
- import gradio as gr
6
- import torch
7
- import weave
8
- from papersai.utils import load_paper_as_context
9
- from transformers import pipeline
10
-
11
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
12
-
13
- HistoryType: TypeAlias = List[Dict[str, str]]
14
-
15
- # Initialize the LLM and Weave client
16
- client = weave.init("papersai")
17
- checkpoint: str = "HuggingFaceTB/SmolLM2-135M-Instruct"
18
- pipe = pipeline(
19
- model=checkpoint,
20
- torch_dtype=torch.bfloat16,
21
- device_map="auto",
22
- )
23
-
24
-
25
- class ChatState:
26
- """Utility class to store context and last response"""
27
-
28
- def __init__(self):
29
- self.context = None
30
- self.last_response = None
31
-
32
-
33
- def record_feedback(x: gr.LikeData) -> None:
34
- """
35
- Logs user feedback on the assistant's response in the form of a
36
- like/dislike reaction.
37
-
38
- Reference:
39
- * https://weave-docs.wandb.ai/guides/tracking/feedback
40
-
41
- Args:
42
- x (gr.LikeData): User feedback data
43
-
44
- Returns:
45
- None
46
- """
47
- call = state.last_response
48
-
49
- # Remove any existing feedback before adding new feedback
50
- for existing_feedback in list(call.feedback):
51
- call.feedback.purge(existing_feedback.id)
52
-
53
- if x.liked:
54
- call.feedback.add_reaction("πŸ‘")
55
- else:
56
- call.feedback.add_reaction("πŸ‘Ž")
57
-
58
-
59
- @weave.op()
60
- def invoke(history: HistoryType):
61
- """
62
- Simple wrapper around llm inference wrapped in a weave op
63
-
64
- Args:
65
- history (HistoryType): Chat history
66
-
67
- Returns:
68
- BaseMessage: Response from the model
69
- """
70
- input_text = pipe.tokenizer.apply_chat_template(
71
- history,
72
- tokenize=False,
73
- )
74
- response = pipe(input_text, do_sample=True, top_p=0.95, max_new_tokens=100)[0][
75
- "generated_text"
76
- ]
77
- response = response.split("\nassistant\n")[-1]
78
- return response
79
-
80
-
81
- def update_state(history: HistoryType, message: Optional[Dict[str, str]]):
82
- """
83
- Update history and app state with the latest user input.
84
-
85
- Args:
86
- history (HistoryType): Chat history
87
- message (Optional[Dict[str, str]]): User input message
88
-
89
- Returns:
90
- Tuple[HistoryType, gr.MultimodalTextbox]: Updated history and chat input
91
- """
92
- if message is None:
93
- return history, gr.MultimodalTextbox(value=None, interactive=True)
94
-
95
- # Initialize history if None
96
- if history is None:
97
- history = []
98
-
99
- # Handle file uploads without adding to visible history
100
- if isinstance(message, dict) and "files" in message:
101
- for file_path in message["files"]:
102
- try:
103
- text = load_paper_as_context(file_path=file_path)
104
- doc_context = [x.get_content() for x in text]
105
- state.context = " ".join(doc_context)[
106
- : pipe.model.config.max_position_embeddings
107
- ]
108
- history.append(
109
- {"role": "system", "content": f"Context: {state.context}\n"}
110
- )
111
- except Exception as e:
112
- history.append(
113
- {"role": "assistant", "content": f"Error loading file: {str(e)}"}
114
- )
115
-
116
- # Handle text input
117
- if isinstance(message, dict) and message.get("text"):
118
- history.append({"role": "user", "content": message["text"]})
119
-
120
- return history, gr.MultimodalTextbox(value=None, interactive=True)
121
-
122
-
123
- def bot(history: HistoryType):
124
- """
125
- Generate response from the LLM and stream it back to the user.
126
-
127
- Args:
128
- history (HistoryType): Chat history
129
-
130
- Yields:
131
- response from the LLM
132
- """
133
- if not history:
134
- return history
135
-
136
- try:
137
- # Get response from LLM
138
- response, call = invoke.call(history)
139
- state.last_response = call
140
-
141
- # Add empty assistant message
142
- history.append({"role": "assistant", "content": ""})
143
-
144
- # Stream the response
145
- for character in response:
146
- history[-1]["content"] += character
147
- time.sleep(0.02)
148
- yield history
149
-
150
- except Exception as e:
151
- history.append({"role": "assistant", "content": f"Error: {str(e)}"})
152
- yield history
153
-
154
-
155
- def create_interface():
156
- with gr.Blocks() as demo:
157
- global state
158
- state = ChatState()
159
- gr.Markdown(
160
- """
161
- <a href="https://github.com/SauravMaheshkar/papersai">
162
- <div align="center"><h1>papers.ai</h1></div>
163
- </a>
164
- """,
165
- )
166
- chatbot = gr.Chatbot(
167
- show_label=False,
168
- height=600,
169
- type="messages",
170
- show_copy_all_button=True,
171
- placeholder="Upload a research paper and ask questions!!",
172
- )
173
-
174
- chat_input = gr.MultimodalTextbox(
175
- interactive=True,
176
- file_count="single",
177
- placeholder="Upload a document or type your message...",
178
- show_label=False,
179
- )
180
-
181
- chat_msg = chat_input.submit(
182
- fn=update_state,
183
- inputs=[chatbot, chat_input],
184
- outputs=[chatbot, chat_input],
185
- )
186
-
187
- bot_msg = chat_msg.then( # noqa: F841
188
- fn=bot, inputs=[chatbot], outputs=chatbot, api_name="bot_response"
189
- )
190
-
191
- chatbot.like(
192
- fn=record_feedback,
193
- inputs=None,
194
- outputs=None,
195
- like_user_message=True,
196
- )
197
-
198
- return demo
199
-
200
-
201
- def main():
202
- demo = create_interface()
203
- demo.launch(share=False)
204
-
205
-
206
- if __name__ == "__main__":
207
- main()