Spaces:
Sleeping
Sleeping
speed up inference
Browse files- ShaderEval.py +2 -2
ShaderEval.py
CHANGED
@@ -106,14 +106,14 @@ class ReturnGenerationEvaluator(evaluate.TextGenerationEvaluator):
|
|
106 |
|
107 |
def _estimate_stopping(self, labels, **kwargs):
|
108 |
""" estimates max_new_tokens for the pipeline call
|
109 |
-
by counting the characters in the longest string of the references
|
110 |
Args:
|
111 |
labels: A list of dicts by knowing the labels
|
112 |
Returns:
|
113 |
`int`: the estimated max_new_tokens, should be smaller than context_lenght in all cases
|
114 |
"""
|
115 |
context_lenght = self._resolve_context_lenght(**kwargs)
|
116 |
-
estimate = min(max([len(ref) for ref in labels])
|
117 |
return estimate
|
118 |
|
119 |
# this one needs to be adjusted
|
|
|
106 |
|
107 |
def _estimate_stopping(self, labels, **kwargs):
|
108 |
""" estimates max_new_tokens for the pipeline call
|
109 |
+
by counting the characters in the longest string of the references adding 5 (for good measure but probably not needed)
|
110 |
Args:
|
111 |
labels: A list of dicts by knowing the labels
|
112 |
Returns:
|
113 |
`int`: the estimated max_new_tokens, should be smaller than context_lenght in all cases
|
114 |
"""
|
115 |
context_lenght = self._resolve_context_lenght(**kwargs)
|
116 |
+
estimate = min(max([len(ref) for ref in labels]) + 5, context_lenght)
|
117 |
return estimate
|
118 |
|
119 |
# this one needs to be adjusted
|