atwine commited on
Commit
40fe8c0
1 Parent(s): 5709f6a

Create handler.py

Browse files
Files changed (1) hide show
  1. handler.py +44 -0
handler.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
3
+ import torch
4
+
5
+ class EndpointHandler:
6
+ def __init__(self, path=""):
7
+ # Load model and processor from path
8
+ self.model = AutoModelForSeq2SeqLM.from_pretrained(path)
9
+ self.tokenizer = AutoTokenizer.from_pretrained(path)
10
+
11
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
12
+ """
13
+ Args:
14
+ data (:obj:):
15
+ Includes the deserialized image file as PIL.Image
16
+ """
17
+ # Process input
18
+ inputs = data.pop("inputs", data)
19
+ parameters = data.pop("parameters", None)
20
+
21
+ # Preprocess
22
+ input_ids = self.tokenizer(inputs, return_tensors="pt").input_ids
23
+
24
+ # Modify parameters to increase max_length
25
+ if parameters is None:
26
+ parameters = {}
27
+ parameters['max_length'] = 512 # Set your desired max_length here
28
+ parameters['min_length'] = 100
29
+ parameters['length_penalty'] = 2.0
30
+ parameters['num_beams'] = 10
31
+ parameters['early_stopping'] = True
32
+ parameters['temperature'] = 0.0
33
+ parameters['top_k'] = 15
34
+ parameters['top_p'] = 0.8
35
+
36
+
37
+
38
+ # Generate output
39
+ outputs = self.model.generate(input_ids, **parameters)
40
+
41
+ # Postprocess the prediction
42
+ prediction = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
43
+
44
+ return [{"generated_text": prediction}]