|
from typing import Dict, List, Any |
|
import torch |
|
from transformers import LlavaNextVideoForConditionalGeneration, LlavaNextVideoProcessor |
|
from peft import PeftModel |
|
|
|
class EndpointHandler: |
|
def __init__(self): |
|
self.base_model_name = "llava-hf/LLaVA-NeXT-Video-7B-hf" |
|
self.adapter_model_name = "EnariGmbH/surftown-1.0" |
|
|
|
|
|
self.model = LlavaNextVideoForConditionalGeneration.from_pretrained( |
|
self.base_model_name, |
|
torch_dtype=torch.float16, |
|
device_map="auto" |
|
) |
|
|
|
|
|
self.model = PeftModel.from_pretrained(self.model, self.adapter_model_name) |
|
|
|
|
|
self.model = self.model.merge_and_unload() |
|
|
|
|
|
self.processor = LlavaNextVideoProcessor.from_pretrained(self.adapter_model_name) |
|
|
|
|
|
self.model.eval() |
|
|
|
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]: |
|
""" |
|
Args: |
|
data (Dict): Contains the input data including "clip" and "prompt". |
|
|
|
Returns: |
|
List[Dict[str, Any]]: The generated text from the model. |
|
""" |
|
|
|
clip = data.get("clip") |
|
prompt = data.get("prompt") |
|
|
|
if clip is None or prompt is None: |
|
return [{"error": "Missing 'clip' or 'prompt' in input data"}] |
|
|
|
|
|
inputs_video = self.processor(text=prompt, videos=clip, padding=True, return_tensors="pt").to(self.model.device) |
|
|
|
|
|
generate_kwargs = {"max_new_tokens": 512, "do_sample": True, "top_p": 0.9} |
|
output = self.model.generate(**inputs_video, **generate_kwargs) |
|
generated_text = self.processor.batch_decode(output, skip_special_tokens=True) |
|
|
|
|
|
assistant_answer_start = generated_text[0].find("ASSISTANT:") + len("ASSISTANT:") |
|
assistant_answer = generated_text[0][assistant_answer_start:].strip() |
|
|
|
return [{"generated_text": assistant_answer}] |
|
|