Spaces:
Running
on
Zero
Running
on
Zero
initial commit
Browse files
app.py
CHANGED
@@ -1,10 +1,42 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
-
from transformers import AutoModel, AutoTokenizer
|
4 |
import os
|
5 |
import base64
|
6 |
import spaces
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
|
9 |
config = AutoConfig.from_pretrained(model_name, trust_remote_code=True)
|
10 |
model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
|
@@ -61,7 +93,9 @@ def ocr_demo(image, task, ocr_type, ocr_box, ocr_color):
|
|
61 |
return res, None
|
62 |
|
63 |
with gr.Blocks() as demo:
|
64 |
-
gr.Markdown(
|
|
|
|
|
65 |
with gr.Row():
|
66 |
with gr.Column():
|
67 |
image_input = gr.Image(type="filepath", label="Input Image")
|
@@ -102,6 +136,16 @@ with gr.Blocks() as demo:
|
|
102 |
output_text = gr.Textbox(label="OCR Result")
|
103 |
output_html = gr.HTML(label="Rendered HTML Output")
|
104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
task_dropdown.change(
|
106 |
update_inputs,
|
107 |
inputs=[task_dropdown],
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
+
from transformers import AutoModel, AutoTokenizer, AutoConfig
|
4 |
import os
|
5 |
import base64
|
6 |
import spaces
|
7 |
|
8 |
+
title = """# 🙋🏻♂️Welcome to Tonic's🫴🏻📸GOT-OCR"""
|
9 |
+
description = """"
|
10 |
+
The GOT-OCR model is a revolutionary step in the evolution of OCR systems, boasting 580M parameters and the ability to process various forms of "characters." It features a high-compression encoder and a long-context decoder, making it well-suited for both scene- and document-style images. The model also supports multi-page and dynamic resolution OCR for added practicality.
|
11 |
+
|
12 |
+
The model can output results in a variety of formats, including plain text, markdown, and even complex outputs like TikZ diagrams or molecular SMILES strings. Interactive OCR allows users to specify regions of interest for OCR using coordinates or colors.
|
13 |
+
|
14 |
+
## Features
|
15 |
+
- **Plain Text OCR**: Recognizes and extracts plain text from images.
|
16 |
+
- **Formatted Text OCR**: Extracts text while preserving its formatting (tables, formulas, etc.).
|
17 |
+
- **Fine-grained OCR**: Box-based and color-based OCR for precise text extraction from specific regions.
|
18 |
+
- **Multi-crop OCR**: Processes multiple cropped regions within an image.
|
19 |
+
- **Rendered Formatted OCR Results**: Outputs OCR results in markdown, TikZ, SMILES, or other formats with rendered formatting.
|
20 |
+
|
21 |
+
GOT-OCR-2.0 can handle:
|
22 |
+
- Plain text
|
23 |
+
- Math/molecular formulas
|
24 |
+
- Tables
|
25 |
+
- Charts
|
26 |
+
- Sheet music
|
27 |
+
- Geometric shapes
|
28 |
+
|
29 |
+
## How to Use
|
30 |
+
1. Select a task from the dropdown menu.
|
31 |
+
2. Upload an image.
|
32 |
+
3. (Optional) Fill in additional parameters based on the task.
|
33 |
+
4. Click **Process** to see the results.
|
34 |
+
---
|
35 |
+
### Join us :
|
36 |
+
🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/qdfnvSPcqP) On 🤗Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [Build Tonic](https://git.tonic-ai.com/contribute)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
|
37 |
+
"""
|
38 |
+
|
39 |
+
|
40 |
tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
|
41 |
config = AutoConfig.from_pretrained(model_name, trust_remote_code=True)
|
42 |
model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
|
|
|
93 |
return res, None
|
94 |
|
95 |
with gr.Blocks() as demo:
|
96 |
+
gr.Markdown(title)
|
97 |
+
# gr.Markdown()
|
98 |
+
gr.Markdown(description)
|
99 |
with gr.Row():
|
100 |
with gr.Column():
|
101 |
image_input = gr.Image(type="filepath", label="Input Image")
|
|
|
136 |
output_text = gr.Textbox(label="OCR Result")
|
137 |
output_html = gr.HTML(label="Rendered HTML Output")
|
138 |
|
139 |
+
gr.Markdown("""## GOT-OCR 2.0
|
140 |
+
|
141 |
+
This small **330M parameter** model powerful OCR model can handle various text recognition tasks with high accuracy.
|
142 |
+
|
143 |
+
### Model Information
|
144 |
+
- **Model Name**: GOT-OCR 2.0
|
145 |
+
- **Hugging Face Repository**: [ucaslcl/GOT-OCR2_0](https://huggingface.co/ucaslcl/GOT-OCR2_0)
|
146 |
+
- **Environment**: CUDA 11.8 + PyTorch 2.0.1
|
147 |
+
""")
|
148 |
+
|
149 |
task_dropdown.change(
|
150 |
update_inputs,
|
151 |
inputs=[task_dropdown],
|