Update README.md (#2)
Browse files- Update README.md (6cf334e9996110e15d3c27857922e3f8b21e5071)
README.md
CHANGED
@@ -31,6 +31,7 @@ This is the 4-bit version of InternLM-XComposer2, install the latest version of
|
|
31 |
|
32 |
```python
|
33 |
import torch, auto_gptq
|
|
|
34 |
from transformers import AutoModel, AutoTokenizer
|
35 |
from auto_gptq.modeling import BaseGPTQForCausalLM
|
36 |
|
@@ -62,12 +63,12 @@ img_path_list = [
|
|
62 |
images = []
|
63 |
for img_path in img_path_list:
|
64 |
image = Image.open(img_path).convert("RGB")
|
65 |
-
image =
|
66 |
images.append(image)
|
67 |
image = torch.stack(images)
|
68 |
query = '<ImageHere> <ImageHere>please write an article based on the images. Title: my favorite animal.'
|
69 |
with torch.cuda.amp.autocast():
|
70 |
-
response, history =
|
71 |
print(response)
|
72 |
|
73 |
#My Favorite Animal: The Panda
|
|
|
31 |
|
32 |
```python
|
33 |
import torch, auto_gptq
|
34 |
+
from PIL import Image
|
35 |
from transformers import AutoModel, AutoTokenizer
|
36 |
from auto_gptq.modeling import BaseGPTQForCausalLM
|
37 |
|
|
|
63 |
images = []
|
64 |
for img_path in img_path_list:
|
65 |
image = Image.open(img_path).convert("RGB")
|
66 |
+
image = model.vis_processor(image)
|
67 |
images.append(image)
|
68 |
image = torch.stack(images)
|
69 |
query = '<ImageHere> <ImageHere>please write an article based on the images. Title: my favorite animal.'
|
70 |
with torch.cuda.amp.autocast():
|
71 |
+
response, history = model.chat(tokenizer, query=query, image=image, history=[], do_sample=False)
|
72 |
print(response)
|
73 |
|
74 |
#My Favorite Animal: The Panda
|