adamlu1 commited on
Commit
94dace7
1 Parent(s): 44bb293
Files changed (2) hide show
  1. README.md +4 -1
  2. config.json +40 -0
README.md CHANGED
@@ -4,7 +4,10 @@ license: mit
4
  📢 [[Project Page](https://microsoft.github.io/OmniParser/)] [[Blog Post](https://www.microsoft.com/en-us/research/articles/omniparser-for-pure-vision-based-gui-agent/)]
5
 
6
  # Model Summary
7
- OmniParser is a general screen parsing tool, which interprets/converts UI screenshot to structured format, to improve existing LLM based UI agent. It includes a finetuned version of YOLOv8 and a finetuned BLIP-2 model. For more details of the models used and finetuning, please refer to the [paper](https://arxiv.org/abs/2408.00203).
 
 
 
8
 
9
  # Responsible AI Considerations
10
  ## Intended Use
 
4
  📢 [[Project Page](https://microsoft.github.io/OmniParser/)] [[Blog Post](https://www.microsoft.com/en-us/research/articles/omniparser-for-pure-vision-based-gui-agent/)]
5
 
6
  # Model Summary
7
+ OmniParser is a general screen parsing tool, which interprets/converts UI screenshot to structured format, to improve existing LLM based UI agent.
8
+ Training Datasets include: 1) an interactable icon detection dataset, which was curated from 67k popular web pages and automatically annotated to highlight clickable and actionable regions, and 2) an icon description dataset, designed to associate each UI element with its corresponding function. This dataset serves as a key component for training models to understand the semantics of detected elements.
9
+
10
+ It includes a finetuned version of YOLOv8 and a finetuned BLIP-2 model on the above dataset respectively. For more details of the models used and finetuning, please refer to the [paper](https://arxiv.org/abs/2408.00203).
11
 
12
  # Responsible AI Considerations
13
  ## Intended Use
config.json CHANGED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Salesforce/blip2-opt-2.7b",
3
+ "architectures": [
4
+ "Blip2ForConditionalGeneration"
5
+ ],
6
+ "initializer_factor": 1.0,
7
+ "initializer_range": 0.02,
8
+ "model_type": "blip-2",
9
+ "num_query_tokens": 32,
10
+ "qformer_config": {
11
+ "classifier_dropout": null,
12
+ "model_type": "blip_2_qformer"
13
+ },
14
+ "text_config": {
15
+ "_name_or_path": "facebook/opt-2.7b",
16
+ "activation_dropout": 0.0,
17
+ "architectures": [
18
+ "OPTForCausalLM"
19
+ ],
20
+ "eos_token_id": 50118,
21
+ "ffn_dim": 10240,
22
+ "hidden_size": 2560,
23
+ "model_type": "opt",
24
+ "num_attention_heads": 32,
25
+ "num_hidden_layers": 32,
26
+ "prefix": "</s>",
27
+ "torch_dtype": "float16",
28
+ "word_embed_proj_dim": 2560
29
+ },
30
+ "torch_dtype": "bfloat16",
31
+ "transformers_version": "4.40.2",
32
+ "use_decoder_only_language_model": true,
33
+ "vision_config": {
34
+ "dropout": 0.0,
35
+ "initializer_factor": 1.0,
36
+ "model_type": "blip_2_vision_model",
37
+ "num_channels": 3,
38
+ "projection_dim": 512
39
+ }
40
+ }