Spaces:
Sleeping
Sleeping
demo buildout 13
Browse files
app.py
CHANGED
@@ -50,13 +50,12 @@ def infer(
|
|
50 |
max_new_tokens: int
|
51 |
) -> str:
|
52 |
inputs = processor(text=text, images=resize_and_pad(image, 448), return_tensors="pt", padding="longest", do_convert_rgb=True).to(device).to(dtype=model.dtype)
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
result
|
59 |
-
return result[0][len(text):].lstrip("\n")
|
60 |
|
61 |
######## Demo
|
62 |
|
@@ -81,14 +80,6 @@ with gr.Blocks(css="style.css") as demo:
|
|
81 |
|
82 |
text_output = gr.Text(label="Text Output")
|
83 |
chat_btn = gr.Button()
|
84 |
-
tokens = gr.Slider(
|
85 |
-
label="Max New Tokens",
|
86 |
-
info="Set to larger for longer generation.",
|
87 |
-
minimum=10,
|
88 |
-
maximum=100,
|
89 |
-
value=20,
|
90 |
-
step=10,
|
91 |
-
)
|
92 |
|
93 |
chat_inputs = [
|
94 |
image,
|
|
|
50 |
max_new_tokens: int
|
51 |
) -> str:
|
52 |
inputs = processor(text=text, images=resize_and_pad(image, 448), return_tensors="pt", padding="longest", do_convert_rgb=True).to(device).to(dtype=model.dtype)
|
53 |
+
generated_ids = model.generate(
|
54 |
+
**inputs,
|
55 |
+
max_length=2048
|
56 |
+
)
|
57 |
+
result = processor.decode(generated_ids[0], skip_special_tokens=True)
|
58 |
+
return result
|
|
|
59 |
|
60 |
######## Demo
|
61 |
|
|
|
80 |
|
81 |
text_output = gr.Text(label="Text Output")
|
82 |
chat_btn = gr.Button()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
chat_inputs = [
|
85 |
image,
|