Spaces:
Running
Running
File size: 1,266 Bytes
c16d8e2 408936f c16d8e2 408936f 026ac8f 408936f c16d8e2 408936f c16d8e2 17f4049 c16d8e2 17f4049 c16d8e2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
from transformers.utils import logging
from language_directions import *
logging.set_verbosity_error()
import warnings
warnings.filterwarnings("ignore",
message="Using the model-agnostic default `max_length`")
import os
import gradio as gr
from transformers import pipeline
pipe = pipeline("image-to-text",
model="Salesforce/blip-image-captioning-base")
def translate(input_text, source, target):
try:
# Helsinki-NLP/opus-mt-en-sq
model = f"Helsinki-NLP/opus-mt-{source}-{target}"
pipe = pipeline("translation", model=model)
translation = pipe(input_text)
return translation[0]['translation_text'], input_text
except KeyError:
return "", f"Error: Translation direction {source_readable} to {target} is not supported by Helsinki Translation Models"
def launch(input):
out = pipe(input)
context_str = out[0]['generated_text']
translate_str = translate(context_str, 'en', 'sq')
return translate_str
# Start gradio interface
iface = gr.Interface(launch,
inputs=gr.Image(type='pil'),
outputs="text")
iface.launch(share=True)
# iface.launch(share=True,
# server_port=int(os.environ['PORT1']))
iface.close() |