Vipitis commited on
Commit
9d3e407
1 Parent(s): 10f00e5

initial commit

Browse files
Files changed (2) hide show
  1. ShaderEval.py +206 -0
  2. app.py +7 -0
ShaderEval.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # #TODO: license: MIT pending (evaluation suite itself can be completely open, nothing copyleft from the dataset reaches us here)
4
+ """TODO: Add a description here."""
5
+
6
+ # TODO: Add BibTeX citation
7
+ _CITATION = """\
8
+ @InProceedings{huggingface:module,
9
+ title = {A great new module},
10
+ authors={huggingface, Inc.},
11
+ year={2020}
12
+ }
13
+ """
14
+
15
+ # TODO: Add description of the module here
16
+ _DESCRIPTION = """\
17
+ This EvaluationSuite currently solves {1} tasks to test code intelligence of genereative language models for "creative programming" (fragment shaders).
18
+ """
19
+
20
+
21
+
22
+
23
+ # via https://huggingface.co/docs/evaluate/evaluation_suite
24
+ import evaluate
25
+ from evaluate import evaluator #used by Suite.run()
26
+ from evaluate.evaluator.utils import DatasetColumn # used in .prepare_data()
27
+ from evaluate.evaluation_suite import SubTask
28
+ from datasets import Dataset
29
+ from typing import Any, Callable, Dict, List, Optional, Union # used in .prepare_pipeline()
30
+ import transformers
31
+ from transformers import Pipeline, pipeline
32
+ from datasets import load_dataset #used by Suite.run()
33
+
34
+ # write a custom evaluator, inherent from: https://github.com/huggingface/evaluate/blob/v0.4.0/src/evaluate/evaluator/text_generation.py#L31
35
+ class ReturnGenerationEvaluator(evaluate.TextGenerationEvaluator):
36
+ def __init__(self, task="text-generation", default_metric_name="exact_match", predictions_prefix: str = "generated"):
37
+ super().__init__(task=task, default_metric_name=default_metric_name)
38
+ self.predictions_prefix = predictions_prefix
39
+ PIPELINE_KWARGS = {"return_full_text":False, "do_sample":False} #these kwargs are for the pipeline call, not the pipeline init.
40
+
41
+ # for the pipeline init we need to copy the whole function and add two lines. this still prints errors due to the pad_toke_id = eos_token_id change.
42
+ # from: https://github.com/huggingface/evaluate/blob/v0.4.0/src/evaluate/evaluator/base.py#L375
43
+ def prepare_pipeline(
44
+ self,
45
+ model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821
46
+ tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
47
+ feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
48
+ device: int = None,
49
+ ):
50
+ """
51
+ Prepare pipeline.
52
+ Args:
53
+ model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`,
54
+ defaults to `None`):
55
+ If the argument in not specified, we initialize the default pipeline for the task. If the argument is of the type `str` or
56
+ is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the
57
+ argument specifies a pre-initialized pipeline.
58
+ preprocessor (`PreTrainedTokenizerBase` or `FeatureExtractionMixin`, *optional*, defaults to `None`):
59
+ Argument can be used to overwrite a default preprocessor if `model_or_pipeline` represents a model for
60
+ which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
61
+ this argument.
62
+ Returns:
63
+ The initialized pipeline, with modifications for the specific task of generating text, even with long inputs.
64
+ """
65
+
66
+ if device is None:
67
+ device = self._infer_device()
68
+
69
+ if (
70
+ isinstance(model_or_pipeline, str)
71
+ or isinstance(model_or_pipeline, transformers.PreTrainedModel)
72
+ or isinstance(model_or_pipeline, transformers.TFPreTrainedModel)
73
+ ):
74
+ pipe = pipeline(
75
+ self.task,
76
+ model=model_or_pipeline,
77
+ tokenizer=tokenizer,
78
+ feature_extractor=feature_extractor,
79
+ device=device,
80
+ # my additions here:
81
+ handle_long_generation= "hole", #our solution? relevant: https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227
82
+ # pad_token_id=tokenizer.eos_token_id, #to avoid the warning, however there might be issues as tokenizers will call this differently.
83
+ do_sample=False, #important to get reproduceable results but we need to make sure the generator is deterministic
84
+
85
+ )
86
+ else:
87
+ if model_or_pipeline is None:
88
+ pipe = pipeline(self.task, device=device)
89
+ else:
90
+ pipe = model_or_pipeline
91
+ # if tokenizer is not None and feature_extractor is not None:
92
+ # logger.warning("Ignoring the value of the preprocessor argument (`tokenizer` or `feature_extractor`).") #excluded warning because I didn't import logger
93
+ if (pipe.task != self.task) and not (self.task == "translation" and pipe.task.startswith("translation")):
94
+ raise ValueError(
95
+ f"Incompatible `model_or_pipeline`. Please specify `model_or_pipeline` compatible with the `{self.task}` task."
96
+ )
97
+ return pipe
98
+
99
+ def _resolve_context_lenght(self, model_or_pipeline=None): #TODO should really copy the typing hints here.
100
+ # tokenizer needs to know the context length for our pipe strategy, but it has to be passed to the tokenizer, not model.
101
+ # the tokenizer should read from the model config, but that can be wrong, or it has a task overwrite (for "text-generation" for example you get 50)
102
+ #model_or_pipeline only exists via the .compute call, so we have to take it in
103
+ # model_or_pipeline.tokenier.config.max_new_tokens = 1024 # we shouldn't return it, but overwrite the tokenizer config, which the pipeline relies on.
104
+
105
+ return 1024 # we shouldn't return it, but overwrite the tokenizer config, which the pipeline relies on.
106
+
107
+ def _estimate_stopping(self, labels, **kwargs):
108
+ """ estimates max_new_tokens for the pipeline call
109
+ by counting the characters in the longest string of the references and multiplying by 2 (for good measure but probably not needed)
110
+ Args:
111
+ labels: A list of dicts by knowing the labels
112
+ Returns:
113
+ `int`: the estimated max_new_tokens, should be smaller than context_lenght in all cases
114
+ """
115
+ context_lenght = self._resolve_context_lenght(**kwargs)
116
+ estimate = min(max([len(ref) for ref in labels])*2, context_lenght)
117
+ return estimate
118
+
119
+ # this one needs to be adjusted
120
+ def predictions_processor(self, predictions, *args, **kwargs):
121
+ """
122
+ processes the output of the pipeline to be compatible with the metric.
123
+ generated texts cut off by the first semicolon and whitespaces are stripped (using python str builtins)
124
+ Args:
125
+ predictions: A list of lists of dicts
126
+ Returns:
127
+ `dict`: All the processed text are flattened and stored under the "predictions" key.
128
+ """
129
+ return {"predictions": [pred[f"{self.predictions_prefix}_text"].split(";")[0].strip() for pred_list in predictions for pred in pred_list]}
130
+
131
+ # straight copy, doesn't seem to give me the
132
+ def prepare_data(self, data: Dataset, input_column: str, label_column: str, *args, **kwargs):
133
+ """
134
+ Prepare data.
135
+ Args:
136
+ data (`Dataset`): Specifies the dataset we will run evaluation on.
137
+ input_column (`str`, defaults to `"text"`):
138
+ the name of the column containing the text feature in the dataset specified by `data`.
139
+ label_column (`str`, defaults to `"label"`):
140
+ the name of the column containing the labels in the dataset specified by `data`.
141
+ Returns:
142
+ `dict`: metric inputs. everything before the first semicolon and whitespaces are stripped (using python str builtins, just like the pred prep)
143
+ `list`: pipeline inputs.
144
+ """
145
+
146
+ self.check_required_columns(data, {"input_column": input_column, "label_column": label_column}) #this will throw and exception with useful error messages
147
+
148
+ # don't put everything in the return statement, so you have the control...
149
+ references = [ref.split(";")[0].strip() for ref in data[label_column]]
150
+ self.PIPELINE_KWARGS.update({"max_new_tokens": self._estimate_stopping(references)}) #this is a hack, does it work tho?
151
+
152
+ return {"references": references}, data[input_column] #DatasetColumn(data, input_column) doesn't seem to work. data[input_column] does, but ignores any of the features of the helper class..
153
+
154
+
155
+
156
+ # via: https://huggingface.co/docs/evaluate/evaluation_suite
157
+ # relevant source: https://github.com/huggingface/evaluate/blob/v0.4.0/src/evaluate/evaluation_suite/__init__.py
158
+ class Suite(evaluate.EvaluationSuite):
159
+
160
+
161
+ def __init__(self, name):
162
+ super().__init__(name)
163
+ self.preprocessor = lambda x: {"return_statement": x["return_statement"].split(";")[0]} #like this? refactored to RetrunGenerationEvaluator
164
+ self.suite = [
165
+ # more subtasks are only possible once we can pass custom evaluators. -> https://github.com/huggingface/evaluate/pull/367
166
+ SubTask( #this one is adjusted already
167
+ task_type="text-generation", #this call an evaluator, but can you specify your own custom evaluator instead?
168
+ data="Vipitis/Shadertoys-fine",
169
+ subset="return_completion",
170
+ split="test[5:10]", #[5:10] is for testing to make it quick, and they got some easy examples, unless the first 5.
171
+ args_for_task={
172
+ # "metric": "exact_match",
173
+ "input_column": "body",
174
+ "label_column": "return_statement",
175
+ }
176
+ )
177
+ ]
178
+
179
+ # from: https://github.com/huggingface/evaluate/blob/v0.4.0/src/evaluate/evaluation_suite/__init__.py#LL103C5-L129C27
180
+ def run(
181
+ self, model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"] = "Vipitis/CodeGPT-small-java-adaptedGPT2-transfer-shadertoys" # noqa: F821 not so useful default model?
182
+ ) -> Dict[str, float]:
183
+
184
+ self.assert_suite_nonempty()
185
+
186
+ results_all = []
187
+ for task in self.suite:
188
+
189
+ task_name = task.data
190
+
191
+ if task.data_preprocessor: # task requires extra preprocessing is all done inside the Evaluator
192
+ ds = load_dataset(task.data, name=task.subset, split=task.split)
193
+ task.data = ds.map(task.data_preprocessor)
194
+
195
+ task_evaluator = ReturnGenerationEvaluator() #this is the change we make: specify our custom evaluator from above.
196
+ args_for_task = task.args_for_task
197
+ args_for_task["model_or_pipeline"] = model_or_pipeline
198
+ args_for_task["data"] = task.data
199
+ args_for_task["subset"] = task.subset
200
+ args_for_task["split"] = task.split
201
+ results = task_evaluator.compute(**args_for_task)
202
+
203
+ results["task_name"] = task_name + "/" + task.subset if task.subset else task_name
204
+ results["data_preprocessor"] = str(task.data_preprocessor) if task.data_preprocessor is not None else None
205
+ results_all.append(results)
206
+ return results_all
app.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def greet(name):
4
+ return "Hello " + name + "!!\n This space hosts the ShaderEval Suite. more to follow soon."
5
+
6
+ iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
+ iface.launch()