import os os.system('pip install paddlepaddle') os.system('pip install paddleocr') from paddleocr import PaddleOCR, draw_ocr from PIL import Image import gradio as gr import torch torch.hub.download_url_to_file('https://i.imgur.com/aqMBT0i.jpg', 'example.jpg') def inference(img, lang): ocr = PaddleOCR(lang=lang,use_gpu=False) img_path = img.name result = ocr.ocr(img_path, cls=False)[0] txts = [line[1][0] for line in result] return "\n".join(txts) title = 'PaddleOCR Extract Text' description = 'Gradio demo for PaddleOCR. PaddleOCR demo supports Chinese, English, French, German, Korean and Japanese. To use it, simply upload your image and choose a language from the dropdown menu, or click one of the examples to load them. Read more at the links below.' article = "
Awesome multilingual OCR toolkits based on PaddlePaddle (practical ultra lightweight OCR system, support 80+ languages recognition, provide data annotation and synthesis tools, support training and deployment among server, mobile, embedded and IoT devices) | Github Repo
" examples = [['example.jpg','en']] css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}" gr.Interface( inference, [gr.inputs.Image(type='file', label='Input'),gr.inputs.Dropdown(choices=['ch', 'en', 'fr', 'german', 'korean', 'japan'], type="value", default='en', label='language')], gr.outputs.Textbox(type="auto", label="Text extracted from image"), title=title, description=description, article=article, examples=examples, css=css, enable_queue=True ).launch(debug=True)