BK-Lee commited on
Commit
76c4f2d
1 Parent(s): 2953130

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +78 -3
README.md CHANGED
@@ -1,3 +1,78 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ ---
4
+
5
+ You should follow the two steps
6
+
7
+ 1. Install libraries and dowloand github package [Meteor](https://github.com/ByungKwanLee/Meteor)
8
+ ```bash
9
+ bash install
10
+ conda activate meteor
11
+ pip install -r requirements.txt
12
+ ```
13
+
14
+ 2. Run the file: demo.py in [Meteor](https://github.com/ByungKwanLee/Meteor)
15
+
16
+ You can choose prompt type: text_only or with_image!
17
+ Enjoy Meteor!
18
+
19
+
20
+ ```python
21
+ import time
22
+ import torch
23
+ from config import *
24
+ from PIL import Image
25
+ from utils.utils import *
26
+ import torch.nn.functional as F
27
+ from meteor.load_mmamba import load_mmamba
28
+ from meteor.load_meteor import load_meteor
29
+ from torchvision.transforms.functional import pil_to_tensor
30
+
31
+ # User prompt
32
+ prompt_type='with_image' # text_only / with_image
33
+ img_path='figures/demo.png'
34
+ question='Provide the detail of the image'
35
+
36
+ # loading meteor model
37
+ mmamba = load_mmamba('BK-Lee/Meteor-Mamba').cuda()
38
+ meteor, tok_meteor = load_meteor('BK-Lee/Meteor-MLM', bits=4)
39
+
40
+ # freeze model
41
+ freeze_model(mmamba)
42
+ freeze_model(meteor)
43
+
44
+ # Device
45
+ device = torch.cuda.current_device()
46
+
47
+ # prompt type -> input prompt
48
+ image_token_number = int((490/14)**2)
49
+ if prompt_type == 'with_image':
50
+ # Image Load
51
+ image = F.interpolate(pil_to_tensor(Image.open(img_path).convert("RGB")).unsqueeze(0), size=(490, 490), mode='bicubic').squeeze(0)
52
+ inputs = [{'image': image, 'question': question}]
53
+ elif prompt_type=='text_only':
54
+ inputs = [{'question': question}]
55
+
56
+ # Generate
57
+ with torch.inference_mode():
58
+
59
+ # Meteor Mamba
60
+ mmamba_inputs = mmamba.eval_process(inputs=inputs, tokenizer=tok_meteor, device=device, img_token_number=image_token_number)
61
+ if 'image' in mmamba_inputs.keys():
62
+ clip_features = meteor.clip_features(mmamba_inputs['image'])
63
+ mmamba_inputs.update({"image_features": clip_features})
64
+ mmamba_outputs = mmamba(**mmamba_inputs)
65
+
66
+ # Meteor
67
+ meteor_inputs = meteor.eval_process(inputs=inputs, data='demo', tokenizer=tok_meteor, device=device, img_token_number=image_token_number)
68
+ if 'image' in mmamba_inputs.keys():
69
+ meteor_inputs.update({"image_features": clip_features})
70
+ meteor_inputs.update({"tor_features": mmamba_outputs.tor_features})
71
+
72
+ # Generation
73
+ generate_ids = meteor.generate(**meteor_inputs, do_sample=True, max_new_tokens=128, top_p=0.95, temperature=0.9, use_cache=True)
74
+
75
+ # Text decoding
76
+ decoded_text = tok_meteor.batch_decode(generate_ids, skip_special_tokens=True)[0].split('assistant\n')[-1].split('[U')[0].strip()
77
+ print(decoded_text)
78
+ ```