victor30608 commited on
Commit
d7df7ab
1 Parent(s): 7e017f3

Added project files

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ /.idea/
Dockerfile ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9.10-slim
2
+
3
+ RUN apt-get update -y && apt-get upgrade -y && apt-get install -y pkg-config build-essential libudev-dev libssl-dev
4
+ RUN apt install -y wget bzip2 ca-certificates nginx supervisor libglib2.0-0 libxext6 libsm6 libxrender1 libgl1-mesa-glx libzbar0 build-essential libpcre3 git mercurial subversion locales procps
5
+
6
+ WORKDIR /code
7
+
8
+ COPY ./requirements.txt /code/requirements.txt
9
+
10
+ RUN useradd -m -u 1000 user
11
+ USER user
12
+
13
+ ENV HOME=/home/user \
14
+ PATH=/home/user/.local/bin:$PATH
15
+
16
+ WORKDIR $HOME/app
17
+
18
+ COPY --chown=user . $HOME/app
19
+
20
+ RUN pip install paddlepaddle -i https://pypi.tuna.tsinghua.edu.cn/simple
21
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
22
+
23
+ RUN git clone https://github.com/PaddlePaddle/PaddleOCR
24
+
25
+ CMD ["python3", "app.py"]
app.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import gradio as gr
4
+ import yaml
5
+ from utils import load_config, get_args, download_model
6
+ from ocr import OCR
7
+ import PIL
8
+ import numpy as np
9
+
10
+ config = load_config()
11
+ download_model('inference.pdmodel', config['inference.pdmodel'])
12
+ download_model('inference.pdiparams', config['inference.pdiparams'])
13
+
14
+ model = OCR(get_args(config['svtr_nums']['params']))
15
+
16
+ def process_image(image):
17
+ img = image.convert('RGB')
18
+ img = np.array(img)
19
+ val, conf = model(img)
20
+ return val, conf
21
+
22
+ title = "Interactive demo: SVTR-Tiny"
23
+ description = "Demo for SVTR-Tiny. The model supports only digit recognition. To use it, simply upload a (single-text line) image or use one of the example images below and click 'submit'. Results will show up in a few seconds."
24
+ article = "<p style='text-align: center'><a href='https://arxiv.org/pdf/2205.00159.pdf'>SVTR: Scene Text Recognition with a Single Visual Model</a> | <a href='https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.7/doc/doc_en/algorithm_rec_svtr_en.md'>Github Repo</a></p>"
25
+
26
+ demo = gr.Interface(fn=process_image,
27
+ inputs=gr.inputs.Image(type="pil"),
28
+ outputs=gr.outputs.Textbox(),
29
+ title=title,
30
+ description=description,
31
+ article=article,
32
+ examples=glob.glob('examples/*.jpeg'))
33
+
34
+ demo.launch(server_name="0.0.0.0", server_port=7860, debug=True)
configs/model.yaml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ svtr_nums:
2
+ params:
3
+ max_text_length: 13
4
+ rec_algorithm: SVTR
5
+ rec_image_shape: '3,64,256'
6
+ rec_char_dict_path: 'configs/nums_dict.txt'
7
+ use_space_char: False
8
+ rec_model_dir: 'weights'
9
+
10
+ inference.pdmodel: https://drive.google.com/file/d/1sTD0klxkin81MDMzXIsO-POWuESD3Ebh/view?usp=sharing
11
+ inference.pdiparams: https://drive.google.com/file/d/1fdRoRLFi1S0NnD3FLP7Mpc_PbrEJ4JSm/view?usp=sharing
12
+
13
+
configs/nums_dict.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ 0
2
+ 1
3
+ 2
4
+ 3
5
+ 4
6
+ 5
7
+ 6
8
+ 7
9
+ 8
10
+ 9
examples/0f0c616.jpeg ADDED
examples/109.jpeg ADDED
examples/31.jpeg ADDED
examples/37.jpeg ADDED
examples/4b3604e.jpeg ADDED
examples/53.jpeg ADDED
examples/657.jpeg ADDED
examples/c772bf2.jpeg ADDED
examples/e150862.jpeg ADDED
examples/f8d1b18.jpeg ADDED
examples/fc51fc7.jpeg ADDED
flagged/.gitkeep ADDED
File without changes
ocr.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ sys.path.append('PaddleOCR')
5
+ os.environ["FLAGS_allocator_strategy"] = 'auto_growth'
6
+
7
+ from PaddleOCR.tools.infer.predict_rec import TextRecognizer
8
+
9
+
10
+ class OCR:
11
+
12
+ def __init__(self, *args):
13
+ self.ocr_model = TextRecognizer(*args)
14
+
15
+ def __call__(self, img):
16
+ [rec_res], _ = self.ocr_model([img])
17
+ return rec_res[0], rec_res[1]
18
+
19
+
20
+
21
+
22
+
requirements.txt ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ shapely
2
+ scikit-image
3
+ imgaug
4
+ pyclipper
5
+ lmdb
6
+ tqdm
7
+ numpy
8
+ visualdl
9
+ rapidfuzz
10
+ opencv-python<=4.6.0.66
11
+ opencv-contrib-python<=4.6.0.66
12
+ cython
13
+ lxml
14
+ premailer
15
+ openpyxl
16
+ attrdict
17
+ PyMuPDF<1.21.0
18
+ Pillow>=10.0.0
19
+ pyyaml
20
+ gdown
21
+ gradio
22
+ paddleocr>=2.0.1
23
+
utils.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import yaml
3
+ import gdown
4
+ import os
5
+
6
+
7
+ def load_config(path='configs/model.yaml'):
8
+ with open(path, 'r', encoding='utf-8') as f:
9
+ return yaml.load(f, Loader=yaml.FullLoader)
10
+
11
+
12
+ def str2bool(v):
13
+ return v.lower() in ("true", "yes", "t", "y", "1")
14
+
15
+
16
+ def init_args():
17
+ parser = argparse.ArgumentParser()
18
+ # params for prediction engine
19
+ parser.add_argument("--use_gpu", type=str2bool, default=False)
20
+ parser.add_argument("--use_xpu", type=str2bool, default=False)
21
+ parser.add_argument("--use_npu", type=str2bool, default=False)
22
+ parser.add_argument("--ir_optim", type=str2bool, default=True)
23
+ parser.add_argument("--use_tensorrt", type=str2bool, default=False)
24
+ parser.add_argument("--min_subgraph_size", type=int, default=15)
25
+ parser.add_argument("--precision", type=str, default="fp32")
26
+ parser.add_argument("--gpu_mem", type=int, default=500)
27
+ parser.add_argument("--gpu_id", type=int, default=0)
28
+
29
+ # params for text detector
30
+ parser.add_argument("--image_dir", type=str)
31
+ parser.add_argument("--page_num", type=int, default=0)
32
+ parser.add_argument("--det_algorithm", type=str, default='DB')
33
+ parser.add_argument("--det_model_dir", type=str)
34
+ parser.add_argument("--det_limit_side_len", type=float, default=960)
35
+ parser.add_argument("--det_limit_type", type=str, default='max')
36
+ parser.add_argument("--det_box_type", type=str, default='quad')
37
+
38
+ # DB parmas
39
+ parser.add_argument("--det_db_thresh", type=float, default=0.3)
40
+ parser.add_argument("--det_db_box_thresh", type=float, default=0.6)
41
+ parser.add_argument("--det_db_unclip_ratio", type=float, default=1.5)
42
+ parser.add_argument("--max_batch_size", type=int, default=10)
43
+ parser.add_argument("--use_dilation", type=str2bool, default=False)
44
+ parser.add_argument("--det_db_score_mode", type=str, default="fast")
45
+
46
+ # EAST parmas
47
+ parser.add_argument("--det_east_score_thresh", type=float, default=0.8)
48
+ parser.add_argument("--det_east_cover_thresh", type=float, default=0.1)
49
+ parser.add_argument("--det_east_nms_thresh", type=float, default=0.2)
50
+
51
+ # SAST parmas
52
+ parser.add_argument("--det_sast_score_thresh", type=float, default=0.5)
53
+ parser.add_argument("--det_sast_nms_thresh", type=float, default=0.2)
54
+
55
+ # PSE parmas
56
+ parser.add_argument("--det_pse_thresh", type=float, default=0)
57
+ parser.add_argument("--det_pse_box_thresh", type=float, default=0.85)
58
+ parser.add_argument("--det_pse_min_area", type=float, default=16)
59
+ parser.add_argument("--det_pse_scale", type=int, default=1)
60
+
61
+ # FCE parmas
62
+ parser.add_argument("--scales", type=list, default=[8, 16, 32])
63
+ parser.add_argument("--alpha", type=float, default=1.0)
64
+ parser.add_argument("--beta", type=float, default=1.0)
65
+ parser.add_argument("--fourier_degree", type=int, default=5)
66
+
67
+ # params for text recognizer
68
+ parser.add_argument("--rec_algorithm", type=str, default='SVTR_LCNet')
69
+ parser.add_argument("--rec_model_dir", type=str)
70
+ parser.add_argument("--rec_image_inverse", type=str2bool, default=True)
71
+ parser.add_argument("--rec_image_shape", type=str, default="3, 48, 320")
72
+ parser.add_argument("--rec_batch_num", type=int, default=6)
73
+ parser.add_argument("--max_text_length", type=int, default=25)
74
+ parser.add_argument(
75
+ "--rec_char_dict_path",
76
+ type=str,
77
+ default="./ppocr/utils/ppocr_keys_v1.txt")
78
+ parser.add_argument("--use_space_char", type=str2bool, default=True)
79
+ parser.add_argument(
80
+ "--vis_font_path", type=str, default="./doc/fonts/simfang.ttf")
81
+ parser.add_argument("--drop_score", type=float, default=0.5)
82
+
83
+ # params for e2e
84
+ parser.add_argument("--e2e_algorithm", type=str, default='PGNet')
85
+ parser.add_argument("--e2e_model_dir", type=str)
86
+ parser.add_argument("--e2e_limit_side_len", type=float, default=768)
87
+ parser.add_argument("--e2e_limit_type", type=str, default='max')
88
+
89
+ # PGNet parmas
90
+ parser.add_argument("--e2e_pgnet_score_thresh", type=float, default=0.5)
91
+ parser.add_argument(
92
+ "--e2e_char_dict_path", type=str, default="./ppocr/utils/ic15_dict.txt")
93
+ parser.add_argument("--e2e_pgnet_valid_set", type=str, default='totaltext')
94
+ parser.add_argument("--e2e_pgnet_mode", type=str, default='fast')
95
+
96
+ # params for text classifier
97
+ parser.add_argument("--use_angle_cls", type=str2bool, default=False)
98
+ parser.add_argument("--cls_model_dir", type=str)
99
+ parser.add_argument("--cls_image_shape", type=str, default="3, 48, 192")
100
+ parser.add_argument("--label_list", type=list, default=['0', '180'])
101
+ parser.add_argument("--cls_batch_num", type=int, default=6)
102
+ parser.add_argument("--cls_thresh", type=float, default=0.9)
103
+
104
+ parser.add_argument("--enable_mkldnn", type=str2bool, default=False)
105
+ parser.add_argument("--cpu_threads", type=int, default=10)
106
+ parser.add_argument("--use_pdserving", type=str2bool, default=False)
107
+ parser.add_argument("--warmup", type=str2bool, default=False)
108
+
109
+ # SR parmas
110
+ parser.add_argument("--sr_model_dir", type=str)
111
+ parser.add_argument("--sr_image_shape", type=str, default="3, 32, 128")
112
+ parser.add_argument("--sr_batch_num", type=int, default=1)
113
+
114
+ #
115
+ parser.add_argument(
116
+ "--draw_img_save_dir", type=str, default="./inference_results")
117
+ parser.add_argument("--save_crop_res", type=str2bool, default=False)
118
+ parser.add_argument("--crop_res_save_dir", type=str, default="./output")
119
+
120
+ # multi-process
121
+ parser.add_argument("--use_mp", type=str2bool, default=False)
122
+ parser.add_argument("--total_process_num", type=int, default=1)
123
+ parser.add_argument("--process_id", type=int, default=0)
124
+
125
+ parser.add_argument("--benchmark", type=str2bool, default=False)
126
+ parser.add_argument("--save_log_path", type=str, default="./log_output/")
127
+
128
+ parser.add_argument("--show_log", type=str2bool, default=True)
129
+ parser.add_argument("--use_onnx", type=str2bool, default=False)
130
+ return parser
131
+
132
+
133
+ def get_args(model_params):
134
+ print(model_params)
135
+ args, _ = init_args().parse_known_args()
136
+ for key, val in model_params.items():
137
+ setattr(args, key, val)
138
+ return args
139
+
140
+
141
+ def download_model(filename, url, save_path='weights'):
142
+ gdown.download(url=url, output=os.path.join(save_path, filename), quiet=False, fuzzy=True, use_cookies=False)
weights/.gitkeep ADDED
File without changes