sandz7 commited on
Commit
daa8caf
β€’
1 Parent(s): cb6f09d

placed sliders for UI

Browse files
Files changed (1) hide show
  1. app.py +50 -6
app.py CHANGED
@@ -26,7 +26,11 @@ model.generation_config.eos_token_id = 128009
26
 
27
  @spaces.GPU(duration=120)
28
  def krypton(input,
29
- history):
 
 
 
 
30
  """
31
  Recieves inputs (prompts with images if they were added),
32
  the image is formated for pil and prompt is formated for the model,
@@ -55,12 +59,31 @@ def krypton(input,
55
  image = Image.open(image)
56
  # image = Image.open(requests.get(url, stream=True).raw)
57
  prompt = ("<|start_header_id|>user<|end_header_id|>\n\n<image>\n{input['text']}<|eot_id|>"
58
- "<|start_header_id|>assistant<|end_header_id|>\n\n")
59
  inputs = processor(prompt, image, return_tensors='pt').to('cuda', torch.float16)
60
- outputs = model.generate(**inputs, max_new_tokens=200, do_sample=False)
61
- output_text = processor.decode(outputs[0], skip_special_tokens=True)
62
- print(output_text)
63
- return output_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
  chatbot=gr.Chatbot(height=600, label="Krypt AI")
66
  chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter your question or upload an image.", show_label=False)
@@ -70,6 +93,27 @@ with gr.Blocks(fill_height=True) as demo:
70
  fn=krypton,
71
  chatbot=chatbot,
72
  fill_height=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  multimodal=True,
74
  textbox=chat_input,
75
  )
 
26
 
27
  @spaces.GPU(duration=120)
28
  def krypton(input,
29
+ history,
30
+ max_new_tokens,
31
+ temperature,
32
+ num_beams,
33
+ do_sample: bool=True):
34
  """
35
  Recieves inputs (prompts with images if they were added),
36
  the image is formated for pil and prompt is formated for the model,
 
59
  image = Image.open(image)
60
  # image = Image.open(requests.get(url, stream=True).raw)
61
  prompt = ("<|start_header_id|>user<|end_header_id|>\n\n<image>\n{input['text']}<|eot_id|>"
62
+ "<|start_header_id|>assistant<|end_header_id|>\n\n")
63
  inputs = processor(prompt, image, return_tensors='pt').to('cuda', torch.float16)
64
+
65
+ # Streamer
66
+ streamer = TextIteratorStreamer(processor, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
67
+
68
+ if temperature == 0.0:
69
+ do_sample = False
70
+
71
+ # Generation kwargs
72
+ generation_kwargs = dict(
73
+ inputs=inputs,
74
+ max_new_tokens=max_new_tokens,
75
+ temperature=temperature,
76
+ num_beams=num_beams,
77
+ do_sample=do_sample
78
+ )
79
+
80
+ thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
81
+ thread.start()
82
+
83
+ outputs = []
84
+ for text in streamer:
85
+ outputs.append(text)
86
+ yield "".join(outputs)
87
 
88
  chatbot=gr.Chatbot(height=600, label="Krypt AI")
89
  chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter your question or upload an image.", show_label=False)
 
93
  fn=krypton,
94
  chatbot=chatbot,
95
  fill_height=True,
96
+ additional_inputs_accordion=gr.Accordion(label="βš™οΈ Parameters", open=False, render=False),
97
+ additional_inputs=[
98
+ gr.Slider(minimum=20,
99
+ maximum=80,
100
+ step=1,
101
+ value=50,
102
+ label="Max New Tokens",
103
+ render=False),
104
+ gr.Slider(minimum=0.0,
105
+ maximum=1.0,
106
+ step=0.1,
107
+ value=0.7,
108
+ label="Temperature",
109
+ render=False),
110
+ gr.Slider(minimum=1,
111
+ maximum=12,
112
+ step=1,
113
+ value=5,
114
+ label="Number of Beams",
115
+ render=False),
116
+ ],
117
  multimodal=True,
118
  textbox=chat_input,
119
  )