Pierce Maloney commited on
Commit
31cf0d3
1 Parent(s): eebf1ef

adding logging + some bad words

Browse files
Files changed (1) hide show
  1. handler.py +93 -20
handler.py CHANGED
@@ -1,9 +1,13 @@
 
1
  from typing import Dict, List, Any
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, StoppingCriteria, StoppingCriteriaList
3
 
 
 
4
 
5
  class EndpointHandler():
6
  def __init__(self, path=""):
 
7
  tokenizer = AutoTokenizer.from_pretrained(path)
8
  tokenizer.pad_token = tokenizer.eos_token
9
  self.model = AutoModelForCausalLM.from_pretrained(path)
@@ -11,55 +15,124 @@ class EndpointHandler():
11
  self.stopping_criteria = StoppingCriteriaList([StopAtPeriodCriteria(tokenizer)])
12
 
13
  def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
14
- """
15
- data args:
16
- inputs (:obj: `str`)
17
- kwargs
18
- Return:
19
- A :obj:`list` | `dict`: will be serialized and returned
20
- """
21
  inputs = data.pop("inputs", data)
22
  additional_bad_words_ids = data.pop("additional_bad_words_ids", [])
 
 
 
 
 
23
 
 
 
24
 
25
- # 3070, 10456, [313, 334] corresponds to "(*", and we do not want to output a comment
26
  # 13 is a newline character
27
  # [1976, 441, 29889], [4920, 441, 29889] is "Abort." [4920, 18054, 29889] is "Aborted."
28
  # [2087, 29885, 4430, 29889] is "Admitted."
29
- bad_words_ids = [[3070], [313, 334], [10456], [13], [1976, 441, 29889], [2087, 29885, 4430, 29889], [4920, 441], [4920, 441, 29889], [4920, 18054, 29889]]
30
  bad_words_ids.extend(additional_bad_words_ids)
31
 
32
- input_ids = self.tokenizer.encode(inputs, return_tensors="pt")
33
- max_generation_length = 75 # Desired number of tokens to generate
34
- max_input_length = 4092 - max_generation_length # Maximum input length to allow space for generation
35
-
36
- # # Truncate input_ids to the most recent tokens that fit within the max_input_length
37
  if input_ids.shape[1] > max_input_length:
 
38
  input_ids = input_ids[:, -max_input_length:]
39
 
40
  max_length = input_ids.shape[1] + max_generation_length
41
 
 
42
  generated_ids = self.model.generate(
43
  input_ids,
44
- max_length=max_length, # 50 new tokens
45
  bad_words_ids=bad_words_ids,
46
- temperature=0.7,
47
  top_k=40,
48
  do_sample=True,
49
  stopping_criteria=self.stopping_criteria,
50
  )
 
51
 
52
  generated_text = self.tokenizer.decode(generated_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
53
  prediction = [{"generated_text": generated_text, "generated_ids": generated_ids[0][input_ids.shape[1]:].tolist()}]
 
54
  return prediction
55
 
56
-
57
  class StopAtPeriodCriteria(StoppingCriteria):
58
  def __init__(self, tokenizer):
59
  self.tokenizer = tokenizer
60
 
61
  def __call__(self, input_ids, scores, **kwargs):
62
- # Decode the last generated token to text
63
  last_token_text = self.tokenizer.decode(input_ids[:, -1], skip_special_tokens=True)
64
- # Check if the decoded text ends with a period
65
- return '.' in last_token_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
  from typing import Dict, List, Any
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, StoppingCriteria, StoppingCriteriaList
4
 
5
+ # Configure logging
6
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
7
 
8
  class EndpointHandler():
9
  def __init__(self, path=""):
10
+ logging.info("Initializing EndpointHandler with model path: %s", path)
11
  tokenizer = AutoTokenizer.from_pretrained(path)
12
  tokenizer.pad_token = tokenizer.eos_token
13
  self.model = AutoModelForCausalLM.from_pretrained(path)
 
15
  self.stopping_criteria = StoppingCriteriaList([StopAtPeriodCriteria(tokenizer)])
16
 
17
  def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
18
+ logging.info("Starting inference")
 
 
 
 
 
 
19
  inputs = data.pop("inputs", data)
20
  additional_bad_words_ids = data.pop("additional_bad_words_ids", [])
21
+
22
+ # Log the input size
23
+ logging.info("Encoding inputs")
24
+ input_ids = self.tokenizer.encode(inputs, return_tensors="pt")
25
+ logging.info("Input IDs shape: %s", input_ids.shape)
26
 
27
+ max_generation_length = 75 # Desired number of tokens to generate
28
+ max_input_length = 4092 - max_generation_length # Maximum input length to allow space for generation
29
 
30
+ # 3070, 10456, [313, 334], [29898, 1068] corresponds to "(*", and we do not want to output a comment
31
  # 13 is a newline character
32
  # [1976, 441, 29889], [4920, 441, 29889] is "Abort." [4920, 18054, 29889] is "Aborted."
33
  # [2087, 29885, 4430, 29889] is "Admitted."
34
+ bad_words_ids = [[3070], [313, 334], [10456], [13], [1976, 441, 29889], [2087, 29885, 4430, 29889], [4920, 441], [4920, 441, 29889], [4920, 18054, 29889], [29898, 1068]]
35
  bad_words_ids.extend(additional_bad_words_ids)
36
 
37
+ # Truncation and generation logging
 
 
 
 
38
  if input_ids.shape[1] > max_input_length:
39
+ logging.info("Truncating input IDs to fit within max input length")
40
  input_ids = input_ids[:, -max_input_length:]
41
 
42
  max_length = input_ids.shape[1] + max_generation_length
43
 
44
+ logging.info("Generating output")
45
  generated_ids = self.model.generate(
46
  input_ids,
47
+ max_length=max_length,
48
  bad_words_ids=bad_words_ids,
49
+ temperature=0.5,
50
  top_k=40,
51
  do_sample=True,
52
  stopping_criteria=self.stopping_criteria,
53
  )
54
+ logging.info("Finished generating output")
55
 
56
  generated_text = self.tokenizer.decode(generated_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
57
  prediction = [{"generated_text": generated_text, "generated_ids": generated_ids[0][input_ids.shape[1]:].tolist()}]
58
+ logging.info("Inference complete")
59
  return prediction
60
 
 
61
  class StopAtPeriodCriteria(StoppingCriteria):
62
  def __init__(self, tokenizer):
63
  self.tokenizer = tokenizer
64
 
65
  def __call__(self, input_ids, scores, **kwargs):
 
66
  last_token_text = self.tokenizer.decode(input_ids[:, -1], skip_special_tokens=True)
67
+ logging.info("StopAtPeriodCriteria called. Last token text: '%s'", last_token_text)
68
+ return '.' in last_token_text
69
+
70
+
71
+
72
+
73
+ # from typing import Dict, List, Any
74
+ # from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, StoppingCriteria, StoppingCriteriaList
75
+
76
+
77
+ # class EndpointHandler():
78
+ # def __init__(self, path=""):
79
+ # tokenizer = AutoTokenizer.from_pretrained(path)
80
+ # tokenizer.pad_token = tokenizer.eos_token
81
+ # self.model = AutoModelForCausalLM.from_pretrained(path)
82
+ # self.tokenizer = tokenizer
83
+ # self.stopping_criteria = StoppingCriteriaList([StopAtPeriodCriteria(tokenizer)])
84
+
85
+ # def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
86
+ # """
87
+ # data args:
88
+ # inputs (:obj: `str`)
89
+ # kwargs
90
+ # Return:
91
+ # A :obj:`list` | `dict`: will be serialized and returned
92
+ # """
93
+ # inputs = data.pop("inputs", data)
94
+ # additional_bad_words_ids = data.pop("additional_bad_words_ids", [])
95
+
96
+
97
+ # # 3070, 10456, [313, 334], [29898, 1068] corresponds to "(*", and we do not want to output a comment
98
+ # # 13 is a newline character
99
+ # # [1976, 441, 29889], [4920, 441, 29889] is "Abort." [4920, 18054, 29889] is "Aborted."
100
+ # # [2087, 29885, 4430, 29889] is "Admitted."
101
+ # bad_words_ids = [[3070], [313, 334], [10456], [13], [1976, 441, 29889], [2087, 29885, 4430, 29889], [4920, 441], [4920, 441, 29889], [4920, 18054, 29889], [29898, 1068]]
102
+ # bad_words_ids.extend(additional_bad_words_ids)
103
+
104
+ # input_ids = self.tokenizer.encode(inputs, return_tensors="pt")
105
+ # max_generation_length = 75 # Desired number of tokens to generate
106
+ # max_input_length = 4092 - max_generation_length # Maximum input length to allow space for generation
107
+
108
+ # # # Truncate input_ids to the most recent tokens that fit within the max_input_length
109
+ # if input_ids.shape[1] > max_input_length:
110
+ # input_ids = input_ids[:, -max_input_length:]
111
+
112
+ # max_length = input_ids.shape[1] + max_generation_length
113
+
114
+ # generated_ids = self.model.generate(
115
+ # input_ids,
116
+ # max_length=max_length, # 50 new tokens
117
+ # bad_words_ids=bad_words_ids,
118
+ # temperature=0.5,
119
+ # top_k=40,
120
+ # do_sample=True,
121
+ # stopping_criteria=self.stopping_criteria,
122
+ # )
123
+
124
+ # generated_text = self.tokenizer.decode(generated_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
125
+ # prediction = [{"generated_text": generated_text, "generated_ids": generated_ids[0][input_ids.shape[1]:].tolist()}]
126
+ # return prediction
127
+
128
+
129
+ # class StopAtPeriodCriteria(StoppingCriteria):
130
+ # def __init__(self, tokenizer):
131
+ # self.tokenizer = tokenizer
132
+
133
+ # def __call__(self, input_ids, scores, **kwargs):
134
+ # # Decode the last generated token to text
135
+ # last_token_text = self.tokenizer.decode(input_ids[:, -1], skip_special_tokens=True)
136
+ # logging.info("StopAtPeriodCriteria called. Last token text: '%s'", last_token_text)
137
+ # # Check if the decoded text ends with a period
138
+ # return '.' in last_token_text