MaziyarPanahi commited on
Commit
6ad6f9b
1 Parent(s): 91c00b7
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -62,10 +62,10 @@ def bot_streaming(message, history):
62
  prompt = f"{message['text']}<|image_1|>\nCan you convert the table to markdown format?{prompt_suffix}{assistant_prompt}"
63
  # print(f"prompt: {prompt}")
64
  image = Image.open(image)
65
- inputs = processor(prompt, [image], return_tensors='pt').to(0, torch.float16)
66
 
67
  streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": False, "skip_prompt": True})
68
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024, do_sample=False)
69
 
70
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
71
  thread.start()
 
62
  prompt = f"{message['text']}<|image_1|>\nCan you convert the table to markdown format?{prompt_suffix}{assistant_prompt}"
63
  # print(f"prompt: {prompt}")
64
  image = Image.open(image)
65
+ inputs = processor(prompt, image, return_tensors='pt').to("cuda:0")
66
 
67
  streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": False, "skip_prompt": True})
68
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024, do_sample=False, eos_token_id=processor.tokenizer.eos_token_id)
69
 
70
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
71
  thread.start()