Upload modeling_mplug_owl2.py with huggingface_hub
Browse files- modeling_mplug_owl2.py +4 -1
modeling_mplug_owl2.py
CHANGED
@@ -250,9 +250,12 @@ class MPLUGOwl2LlamaForCausalLM(LlamaForCausalLM, MPLUGOwl2MetaForCausalLM):
|
|
250 |
def __init__(self, config):
|
251 |
super(LlamaForCausalLM, self).__init__(config)
|
252 |
self.model = MPLUGOwl2LlamaModel(config)
|
|
|
|
|
|
|
253 |
|
254 |
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
255 |
-
self.preferential_ids_ = [id_[1] for id_ in tokenizer(["excellent","good","fair","poor","bad"])["input_ids"]]
|
256 |
self.weight_tensor = torch.Tensor([5.,4.,3.,2.,1.]).half().to(model.device)
|
257 |
|
258 |
# Initialize weights and apply final processing
|
|
|
250 |
def __init__(self, config):
|
251 |
super(LlamaForCausalLM, self).__init__(config)
|
252 |
self.model = MPLUGOwl2LlamaModel(config)
|
253 |
+
|
254 |
+
self.tokenizer = AutoTokenizer.from_pretrained("q-future/one-align")
|
255 |
+
self.image_processor = CLIPImageProcessor.from_pretrained("q-future/one-align")
|
256 |
|
257 |
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
258 |
+
self.preferential_ids_ = [id_[1] for id_ in self.tokenizer(["excellent","good","fair","poor","bad"])["input_ids"]]
|
259 |
self.weight_tensor = torch.Tensor([5.,4.,3.,2.,1.]).half().to(model.device)
|
260 |
|
261 |
# Initialize weights and apply final processing
|