from PIL import Image from transformers import AutoProcessor, AutoModelForVision2Seq from flask import Flask, request, jsonify app = Flask(__name__) model = AutoModelForVision2Seq.from_pretrained("ydshieh/kosmos-2-patch14-224", trust_remote_code=True) processor = AutoProcessor.from_pretrained("ydshieh/kosmos-2-patch14-224", trust_remote_code=True) @app.route('/process_grounding_prompt', methods=['POST']) def process_prompt(): try: # Get the uploaded image data from the POST request uploaded_file = request.files['image'] prompt = request.form.get('prompt') image = Image.open(uploaded_file.stream) inputs = processor(text=''+prompt, images=image, return_tensors="pt") generated_ids = model.generate( pixel_values=inputs["pixel_values"], input_ids=inputs["input_ids"][:, :-1], attention_mask=inputs["attention_mask"][:, :-1], img_features=None, img_attn_mask=inputs["img_attn_mask"][:, :-1], use_cache=True, max_new_tokens=64, ) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] # Specify `cleanup_and_extract=False` in order to see the raw model generation. processed_text = processor.post_process_generation(generated_text, cleanup_and_extract=False) # print(processed_text) # ` An image of a snowman warming himself by a fire.` # By default, the generated text is cleanup and the entities are extracted. processed_text, entities = processor.post_process_generation(generated_text) print(processed_text) # `An image of a snowman warming himself by a fire.` print(entities) return jsonify({"message": processed_text, 'entities': entities}) except Exception as e: return jsonify({"error": str(e)}) if __name__ == '__main__': app.run(host='localhost', port=8005)