merve HF staff commited on
Commit
8e9be15
1 Parent(s): d4fa011

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -38,7 +38,6 @@ def model_inference(
38
  text = [text]
39
 
40
  inputs = processor(text=text, images=images, padding=True, return_tensors="pt").to("cuda")
41
- print("inputs",inputs)
42
 
43
  assert decoding_strategy in [
44
  "Greedy",
@@ -54,8 +53,8 @@ def model_inference(
54
  generated_ids = model.generate(**inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=max_new_tokens,
55
  temperature=temperature, do_sample=do_sample, repetition_penalty=repetition_penalty,
56
  top_p=top_p),
57
- generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
58
- #generated_texts = processor.batch_decode(generated_ids[:, generation_args["input_ids"].size(1):], skip_special_tokens=True)
59
  print("INPUT:", text, "|OUTPUT:", generated_texts)
60
  return generated_texts[0]
61
 
 
38
  text = [text]
39
 
40
  inputs = processor(text=text, images=images, padding=True, return_tensors="pt").to("cuda")
 
41
 
42
  assert decoding_strategy in [
43
  "Greedy",
 
53
  generated_ids = model.generate(**inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=max_new_tokens,
54
  temperature=temperature, do_sample=do_sample, repetition_penalty=repetition_penalty,
55
  top_p=top_p),
56
+ #generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
57
+ generated_texts = processor.batch_decode(generated_ids[:, inputs["input_ids"].size(1):], skip_special_tokens=True)
58
  print("INPUT:", text, "|OUTPUT:", generated_texts)
59
  return generated_texts[0]
60