Update README.md
Browse files
README.md
CHANGED
@@ -1,52 +1,100 @@
|
|
1 |
-
---
|
2 |
-
license: apache-2.0
|
3 |
-
datasets:
|
4 |
-
- FreedomIntelligence/PubMedVision
|
5 |
-
language:
|
6 |
-
- en
|
7 |
-
- zh
|
8 |
-
pipeline_tag: text-generation
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
datasets:
|
4 |
+
- FreedomIntelligence/PubMedVision
|
5 |
+
language:
|
6 |
+
- en
|
7 |
+
- zh
|
8 |
+
pipeline_tag: text-generation
|
9 |
+
tags:
|
10 |
+
- vision
|
11 |
+
- image-text-to-text
|
12 |
+
---
|
13 |
+
<div align="center">
|
14 |
+
<h1>
|
15 |
+
HuatuoGPT-Vision-7B
|
16 |
+
</h1>
|
17 |
+
</div>
|
18 |
+
|
19 |
+
<div align="center">
|
20 |
+
<a href="https://github.com/FreedomIntelligence/HuatuoGPT-Vision" target="_blank">GitHub</a> | <a href="https://arxiv.org/abs/2406.19280" target="_blank">Paper</a>
|
21 |
+
</div>
|
22 |
+
|
23 |
+
## Introduction
|
24 |
+
We convert HuatuoGPT-Vision into Huggingface LLaVA format, so you can run the model using VLLM or other frameworks. The original model can be found here: [HuatuoGPT-Vision-7B](https://huggingface.co/FreedomIntelligence/HuatuoGPT-Vision-7B).
|
25 |
+
|
26 |
+
# Quick Start
|
27 |
+
|
28 |
+
### 1. Deploy the model using [VLLM](https://github.com/vllm-project/vllm/tree/main)
|
29 |
+
```bash
|
30 |
+
python -m vllm.entrypoints.openai.api_server \
|
31 |
+
--model huatuogpt_vision_model_path \
|
32 |
+
--tensor_parallel_size 1 \
|
33 |
+
--gpu_memory_utilization 0.8 \
|
34 |
+
--served-model-name huatuogpt_vision_7b \
|
35 |
+
--chat-template "{%- if messages[0]['role'] == 'system' -%}\n {%- set system_message = messages[0]['content'] -%}\n {%- set messages = messages[1:] -%}\n{%- else -%}\n {% set system_message = '' -%}\n{%- endif -%}\n\n{%- for message in messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif -%}\n\n {%- if message['role'] == 'user' -%}\n {{ '<|user|>\n' + message['content'] + '\n' }}\n {%- elif message['role'] == 'assistant' -%}\n {{ '<|assistant|>\n' + message['content'] + '\n' }}\n {%- endif -%}\n{%- endfor -%}\n\n{%- if add_generation_prompt -%}\n {{ '<|assistant|>' }}\n{% endif %}" \
|
36 |
+
--port 9559 --max-model-len 2048 > vllm_openai_server.log 2>&1 &
|
37 |
+
```
|
38 |
+
|
39 |
+
### 2. Model inference
|
40 |
+
```python
|
41 |
+
from openai import OpenAI
|
42 |
+
from PIL import Image
|
43 |
+
import base64
|
44 |
+
import io
|
45 |
+
|
46 |
+
def get_image(image_path):
|
47 |
+
image = Image.open(image_path).convert('RGB')
|
48 |
+
img_type = image.format
|
49 |
+
if not img_type:
|
50 |
+
img_type = image_path.split('.')[-1]
|
51 |
+
byte_arr = io.BytesIO()
|
52 |
+
image.save(byte_arr, format=img_type)
|
53 |
+
byte_arr.seek(0)
|
54 |
+
image = base64.b64encode(byte_arr.getvalue()).decode()
|
55 |
+
return image, img_type
|
56 |
+
|
57 |
+
|
58 |
+
client = OpenAI(
|
59 |
+
base_url="http://localhost:9559/v1",
|
60 |
+
api_key="token-abc123"
|
61 |
+
)
|
62 |
+
image_path = 'your_image_path'
|
63 |
+
image, img_type = get_image(image_path)
|
64 |
+
|
65 |
+
|
66 |
+
inputcontent = [{
|
67 |
+
"type": "text",
|
68 |
+
"text": '<image>\nWhat does the picture show?'
|
69 |
+
}]
|
70 |
+
|
71 |
+
inputcontent.append({
|
72 |
+
"type": "image_url",
|
73 |
+
"image_url": {
|
74 |
+
"url": f"data:image/{img_type};base64,{image}"
|
75 |
+
}
|
76 |
+
})
|
77 |
+
|
78 |
+
response = client.chat.completions.create(
|
79 |
+
model="huatuogpt_vision_7b",
|
80 |
+
messages=[
|
81 |
+
{"role": "user", "content": inputcontent}
|
82 |
+
],
|
83 |
+
temperature=0.2
|
84 |
+
)
|
85 |
+
print(response.choices[0].message.content)
|
86 |
+
```
|
87 |
+
|
88 |
+
# <span id="Start">Citation</span>
|
89 |
+
|
90 |
+
```
|
91 |
+
@misc{chen2024huatuogptvisioninjectingmedicalvisual,
|
92 |
+
title={HuatuoGPT-Vision, Towards Injecting Medical Visual Knowledge into Multimodal LLMs at Scale},
|
93 |
+
author={Junying Chen and Ruyi Ouyang and Anningzhe Gao and Shunian Chen and Guiming Hardy Chen and Xidong Wang and Ruifei Zhang and Zhenyang Cai and Ke Ji and Guangjun Yu and Xiang Wan and Benyou Wang},
|
94 |
+
year={2024},
|
95 |
+
eprint={2406.19280},
|
96 |
+
archivePrefix={arXiv},
|
97 |
+
primaryClass={cs.CV},
|
98 |
+
url={https://arxiv.org/abs/2406.19280},
|
99 |
+
}
|
100 |
+
```
|