Inherit from GenerationMixin
#22
by
fcakyon
- opened
- modeling_florence2.py +2 -1
modeling_florence2.py
CHANGED
@@ -29,6 +29,7 @@ from einops import rearrange
|
|
29 |
from timm.models.layers import DropPath, trunc_normal_
|
30 |
|
31 |
from transformers.modeling_utils import PreTrainedModel
|
|
|
32 |
from transformers.utils import (
|
33 |
ModelOutput,
|
34 |
add_start_docstrings,
|
@@ -2059,7 +2060,7 @@ class Florence2LanguageModel(Florence2LanguagePreTrainedModel):
|
|
2059 |
)
|
2060 |
|
2061 |
|
2062 |
-
class Florence2LanguageForConditionalGeneration(Florence2LanguagePreTrainedModel):
|
2063 |
base_model_prefix = "model"
|
2064 |
_tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
|
2065 |
_keys_to_ignore_on_load_missing = ["final_logits_bias"]
|
|
|
29 |
from timm.models.layers import DropPath, trunc_normal_
|
30 |
|
31 |
from transformers.modeling_utils import PreTrainedModel
|
32 |
+
from transformers.generation.utils import GenerationMixin
|
33 |
from transformers.utils import (
|
34 |
ModelOutput,
|
35 |
add_start_docstrings,
|
|
|
2060 |
)
|
2061 |
|
2062 |
|
2063 |
+
class Florence2LanguageForConditionalGeneration(Florence2LanguagePreTrainedModel, GenerationMixin):
|
2064 |
base_model_prefix = "model"
|
2065 |
_tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"]
|
2066 |
_keys_to_ignore_on_load_missing = ["final_logits_bias"]
|