Shailesh Zope commited on
Commit
80d0154
1 Parent(s): 425ee8e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +123 -0
app.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from threading import Thread
3
+ from typing import Iterator
4
+
5
+ import gradio as gr
6
+ import spaces
7
+ import torch
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
+
10
+ MAX_MAX_NEW_TOKENS = 2048
11
+ DEFAULT_MAX_NEW_TOKENS = 1024
12
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
+
14
+ DESCRIPTION = """\
15
+ # BioMistral-7b Chat
16
+ This Space demonstrates model [BioMistral-7b](https://huggingface.co/BioMistral/BioMistral-7B) by BioMistral, a finetuned Mistral model with 7B parameters for chat instructions.
17
+ Advisory Notice! Although BioMistral is intended to encapsulate medical knowledge sourced from high-quality evidence, it hasn't been tailored to effectively, safely, or suitably convey this knowledge within professional parameters for action. We advise refraining from utilizing BioMistral in medical contexts unless it undergoes thorough alignment with specific use cases and undergoes further testing, notably including randomized controlled trials in real-world medical environments. BioMistral 7B may possess inherent risks and biases that have not yet been thoroughly assessed. Additionally, the model's performance has not been evaluated in real-world clinical settings. Consequently, we recommend using BioMistral 7B strictly as a research tool and advise against deploying it in production environments for natural language generation or any professional health and medical purposes.
18
+ """
19
+
20
+
21
+
22
+ model_id = "BioMistral/BioMistral-7B"
23
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
24
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
25
+ tokenizer.use_default_system_prompt = False
26
+
27
+
28
+ def generate(
29
+ message: str,
30
+ chat_history: list[tuple[str, str]],
31
+ system_prompt: str,
32
+ max_new_tokens: int = 1024,
33
+ temperature: float = 0.6,
34
+ top_p: float = 0.9,
35
+ top_k: int = 50,
36
+ repetition_penalty: float = 1.2,
37
+ ) -> Iterator[str]:
38
+ conversation = []
39
+ if system_prompt:
40
+ conversation.append({"role": "system", "content": system_prompt})
41
+ for user, assistant in chat_history:
42
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
43
+ conversation.append({"role": "user", "content": message})
44
+
45
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
46
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
47
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
48
+ gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
49
+ input_ids = input_ids.to(model.device)
50
+
51
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
52
+ generate_kwargs = dict(
53
+ {"input_ids": input_ids},
54
+ streamer=streamer,
55
+ max_new_tokens=max_new_tokens,
56
+ do_sample=True,
57
+ top_p=top_p,
58
+ top_k=top_k,
59
+ temperature=temperature,
60
+ num_beams=1,
61
+ repetition_penalty=repetition_penalty,
62
+ )
63
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
64
+ t.start()
65
+
66
+ outputs = []
67
+ for text in streamer:
68
+ outputs.append(text)
69
+ yield "".join(outputs)
70
+
71
+
72
+ chat_interface = gr.ChatInterface(
73
+ fn=generate,
74
+ additional_inputs=[
75
+ gr.Textbox(label="System prompt", lines=6),
76
+ gr.Slider(
77
+ label="Max new tokens",
78
+ minimum=1,
79
+ maximum=MAX_MAX_NEW_TOKENS,
80
+ step=1,
81
+ value=DEFAULT_MAX_NEW_TOKENS,
82
+ ),
83
+ gr.Slider(
84
+ label="Temperature",
85
+ minimum=0.1,
86
+ maximum=4.0,
87
+ step=0.1,
88
+ value=0.6,
89
+ ),
90
+ gr.Slider(
91
+ label="Top-p (nucleus sampling)",
92
+ minimum=0.05,
93
+ maximum=1.0,
94
+ step=0.05,
95
+ value=0.9,
96
+ ),
97
+ gr.Slider(
98
+ label="Top-k",
99
+ minimum=1,
100
+ maximum=1000,
101
+ step=1,
102
+ value=50,
103
+ ),
104
+ gr.Slider(
105
+ label="Repetition penalty",
106
+ minimum=1.0,
107
+ maximum=2.0,
108
+ step=0.05,
109
+ value=1.2,
110
+ ),
111
+ ],
112
+ stop_btn=None,
113
+ examples=[
114
+ ["Hello there! How are you doing?"],
115
+ ],
116
+ )
117
+
118
+ with gr.Blocks(css="style.css") as demo:
119
+ gr.Markdown(DESCRIPTION)
120
+ chat_interface.render()
121
+
122
+ if __name__ == "__main__":
123
+ demo.queue(max_size=20).launch(show_api=False)