Update README.md
Browse files
README.md
CHANGED
@@ -20,8 +20,6 @@ tags:
|
|
20 |
datasets:
|
21 |
- lamm-mit/Cephalo-Bioinspired-Mechanics-Materials
|
22 |
- lamm-mit/Cephalo-Wikipedia-Materials
|
23 |
-
- OleehyO/latex-formulas
|
24 |
-
- lamm-mit/OleehyO-latex-formulas
|
25 |
pipeline_tag: image-text-to-text
|
26 |
inference:
|
27 |
parameters:
|
@@ -146,7 +144,11 @@ for file_name in tqdm(py_files):
|
|
146 |
print("Download completed.")
|
147 |
```
|
148 |
|
149 |
-
Download models that will form the experts, as well as the base model
|
|
|
|
|
|
|
|
|
150 |
|
151 |
```python
|
152 |
from transformers import AutoProcessor, Idefics2ForConditionalGeneration , AutoTokenizer
|
@@ -168,7 +170,7 @@ model_1 = Idefics2ForConditionalGeneration.from_pretrained( model_id_1,
|
|
168 |
_attn_implementation="flash_attention_2", #make sure Flash Attention 2 is installed
|
169 |
trust_remote_code=True,
|
170 |
#quantization_config=quantization_config,
|
171 |
-
|
172 |
processor = AutoProcessor.from_pretrained(
|
173 |
f"{model_id_1}",
|
174 |
do_image_splitting=True
|
@@ -189,7 +191,7 @@ model_2 = Idefics2ForConditionalGeneration.from_pretrained( model_id_2,
|
|
189 |
_attn_implementation="flash_attention_2", #make sure Flash Attention 2 is installed
|
190 |
trust_remote_code=True,
|
191 |
#quantization_config=quantization_config,
|
192 |
-
|
193 |
|
194 |
model_id_3='HuggingFaceM4/idefics2-8b'
|
195 |
|
@@ -198,7 +200,7 @@ model_3 = Idefics2ForConditionalGeneration.from_pretrained( model_id_3,
|
|
198 |
_attn_implementation="flash_attention_2", #make sure Flash Attention 2 is installed
|
199 |
trust_remote_code=True,
|
200 |
#quantization_config=quantization_config,
|
201 |
-
|
202 |
```
|
203 |
Put on device:
|
204 |
```python
|
|
|
20 |
datasets:
|
21 |
- lamm-mit/Cephalo-Bioinspired-Mechanics-Materials
|
22 |
- lamm-mit/Cephalo-Wikipedia-Materials
|
|
|
|
|
23 |
pipeline_tag: image-text-to-text
|
24 |
inference:
|
25 |
parameters:
|
|
|
144 |
print("Download completed.")
|
145 |
```
|
146 |
|
147 |
+
Download models that will form the experts, as well as the base model. As a simple example, we use
|
148 |
+
|
149 |
+
1) Materials-science fine-tuned model: lamm-mit/Cephalo-Idefics-2-vision-8b-beta (model_1)
|
150 |
+
2) A chatty version: HuggingFaceM4/idefics2-8b-chatty (model_1) (model_2)
|
151 |
+
3) A basic variant: HuggingFaceM4/idefics2-8b (model_3)
|
152 |
|
153 |
```python
|
154 |
from transformers import AutoProcessor, Idefics2ForConditionalGeneration , AutoTokenizer
|
|
|
170 |
_attn_implementation="flash_attention_2", #make sure Flash Attention 2 is installed
|
171 |
trust_remote_code=True,
|
172 |
#quantization_config=quantization_config,
|
173 |
+
)
|
174 |
processor = AutoProcessor.from_pretrained(
|
175 |
f"{model_id_1}",
|
176 |
do_image_splitting=True
|
|
|
191 |
_attn_implementation="flash_attention_2", #make sure Flash Attention 2 is installed
|
192 |
trust_remote_code=True,
|
193 |
#quantization_config=quantization_config,
|
194 |
+
)
|
195 |
|
196 |
model_id_3='HuggingFaceM4/idefics2-8b'
|
197 |
|
|
|
200 |
_attn_implementation="flash_attention_2", #make sure Flash Attention 2 is installed
|
201 |
trust_remote_code=True,
|
202 |
#quantization_config=quantization_config,
|
203 |
+
)
|
204 |
```
|
205 |
Put on device:
|
206 |
```python
|