Spaces:
Runtime error
Runtime error
import streamlit as st | |
import time | |
from transformers import pipeline | |
from datasets import load_dataset, Audio, Features | |
st.set_page_config(page_title="🤗 Transformers Library examples",layout="wide") | |
st.title('🤗 :rainbow[Transformers Library examples]') | |
# Done | |
# function for Sentiment Analysis or Text classification model | |
def sentiment_analysis(): | |
code = ''' | |
from transformers import pipeline | |
classifier = pipeline("sentiment-analysis") | |
results = classifier("Transformers library is very helpful.") | |
''' | |
st.code(code, language='python') | |
if st.button("Run Test ", type="primary"): | |
with st.spinner('Wait for it...'): | |
time.sleep(5) | |
classifier = pipeline("sentiment-analysis") | |
results = classifier("Transformers library is very helpful.") | |
st.write("Output:") | |
st.success(results) | |
st.divider() | |
st.subheader("Example: Multiple statements analysis") | |
with st.spinner('Wait for it...'): | |
time.sleep(5) | |
code = ''' | |
from transformers import pipeline | |
classifier = pipeline("sentiment-analysis") | |
results = classifier([ | |
"This is quick tutorial site.", | |
"I learnt new topics today.", | |
"I do not like lengthy tutorials." | |
]) | |
''' | |
st.code(code, language='python') | |
results = classifier([ | |
"This is quick tutorial site.", | |
"I learnt new topics today.", | |
"I do not like lengthy tutorials." | |
]) | |
st.write("Output:") | |
st.success(results) | |
# function for Sentiment Analysis or Text classification model | |
def text_generation(): | |
code = ''' | |
from transformers import pipeline | |
classifier = pipeline("sentiment-analysis") | |
results = classifier("Transformers library is very helpful.") | |
''' | |
st.code(code, language='python') | |
if st.button("Run Test ", type="primary"): | |
with st.spinner('Wait for it...'): | |
time.sleep(5) | |
classifier = pipeline("sentiment-analysis") | |
# function for Sentiment Analysis or Text classification model | |
def summarization(): | |
code = ''' | |
from transformers import pipeline | |
classifier = pipeline("sentiment-analysis") | |
results = classifier("Transformers library is very helpful.") | |
''' | |
st.code(code, language='python') | |
if st.button("Run Test ", type="primary"): | |
with st.spinner('Wait for it...'): | |
time.sleep(5) | |
classifier = pipeline("sentiment-analysis") | |
# DONE | |
# function for Image Classification model | |
def image_classification(): | |
code = ''' | |
from transformers import pipeline | |
classifier = pipeline("sentiment-analysis") | |
results = classifier("Transformers library is very helpful.") | |
''' | |
st.code(code, language='python') | |
if st.button("Run Test ", type="primary"): | |
st.image("./data/dog.jpeg", width=250, use_column_width=100) | |
with st.spinner('Wait for it...'): | |
time.sleep(8) | |
vision_classifier = pipeline(model="google/vit-base-patch16-224") | |
preds = vision_classifier(images="./data/dog.jpeg") | |
st.success("Output:") | |
st.json(preds) | |
# function for Sentiment Analysis or Text classification model | |
def image_segmentation(): | |
code = ''' | |
from transformers import pipeline | |
classifier = pipeline("sentiment-analysis") | |
results = classifier("Transformers library is very helpful.") | |
''' | |
st.code(code, language='python') | |
if st.button("Run Test ", type="primary"): | |
with st.spinner('Wait for it...'): | |
time.sleep(5) | |
classifier = pipeline("sentiment-analysis") | |
# function for Sentiment Analysis or Text classification model | |
def object_detection(): | |
code = ''' | |
from transformers import pipeline | |
classifier = pipeline("sentiment-analysis") | |
results = classifier("Transformers library is very helpful.") | |
''' | |
st.code(code, language='python') | |
if st.button("Run Test ", type="primary"): | |
with st.spinner('Wait for it...'): | |
time.sleep(5) | |
classifier = pipeline("sentiment-analysis") | |
# function for Audio Classification model | |
def audio_classification(): | |
code = ''' | |
from transformers import pipeline | |
classifier = pipeline("sentiment-analysis") | |
results = classifier("Transformers library is very helpful.") | |
''' | |
st.code(code, language='python') | |
if st.button("Run Test ", type="primary"): | |
with st.spinner('Wait for it...'): | |
time.sleep(5) | |
classifier = pipeline("sentiment-analysis") | |
# function forAutomatic Speech Recognition model | |
def automatic_speech_recognition(): | |
code = ''' | |
from transformers import pipeline | |
classifier = pipeline("automatic-speech-recognition") | |
results = transcriber("./data/mlk.flac") | |
''' | |
st.code(code, language='python') | |
if st.button("Run Test ", type="primary"): | |
speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") | |
dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") | |
dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) | |
result = speech_recognizer(dataset[:4]["audio"]) | |
with st.spinner('Wait for it...'): | |
time.sleep(5) | |
st.write("Output:") | |
st.success([d["text"] for d in result]) | |
# function for Image Captioningn model | |
def image_captioning(): | |
code = ''' | |
from transformers import pipeline | |
classifier = pipeline("sentiment-analysis") | |
results = classifier("Transformers library is very helpful.") | |
''' | |
st.code(code, language='python') | |
if st.button("Run Test ", type="primary"): | |
with st.spinner('Wait for it...'): | |
time.sleep(5) | |
classifier = pipeline("sentiment-analysis") | |
# function for Mask Filling model | |
def mask_filling(): | |
code = ''' | |
from transformers import pipeline | |
classifier = pipeline("sentiment-analysis") | |
results = classifier("Transformers library is very helpful.") | |
''' | |
st.code(code, language='python') | |
if st.button("Run Test ", type="primary"): | |
with st.spinner('Wait for it...'): | |
time.sleep(5) | |
classifier = pipeline("sentiment-analysis") | |
# function for Document Question Answering model | |
def document_question_answering(): | |
code = ''' | |
from transformers import pipeline | |
classifier = pipeline("sentiment-analysis") | |
results = classifier("Transformers library is very helpful.") | |
''' | |
st.code(code, language='python') | |
with st.spinner('Wait for it...'): | |
time.sleep(5) | |
# function for Named Entity Recognition model | |
def named_entity_recognition(): | |
code = ''' | |
from transformers import pipeline | |
classifier = pipeline("sentiment-analysis") | |
results = classifier("Transformers library is very helpful.") | |
''' | |
st.code(code, language='python') | |
if st.button("Run Test ", type="primary"): | |
with st.spinner('Wait for it...'): | |
time.sleep(5) | |
classifier = pipeline("sentiment-analysis") | |
# function for translation model | |
def translation(): | |
code = ''' | |
from transformers import pipeline | |
classifier = pipeline("sentiment-analysis") | |
results = classifier("Transformers library is very helpful.") | |
''' | |
st.code(code, language='python') | |
if st.button("Run Test ", type="primary"): | |
with st.spinner('Wait for it...'): | |
time.sleep(5) | |
classifier = pipeline("sentiment-analysis") | |
col1, col2 = st.columns(2) | |
''' | |
- `"audio-classification"`: will return a [`AudioClassificationPipeline`]. | |
- `"automatic-speech-recognition"`: will return a [`AutomaticSpeechRecognitionPipeline`]. | |
- `"conversational"`: will return a [`ConversationalPipeline`]. | |
- `"depth-estimation"`: will return a [`DepthEstimationPipeline`]. | |
- `"document-question-answering"`: will return a [`DocumentQuestionAnsweringPipeline`]. | |
- `"feature-extraction"`: will return a [`FeatureExtractionPipeline`]. | |
- `"fill-mask"`: will return a [`FillMaskPipeline`]:. | |
- `"image-classification"`: will return a [`ImageClassificationPipeline`]. | |
- `"image-feature-extraction"`: will return an [`ImageFeatureExtractionPipeline`]. | |
- `"image-segmentation"`: will return a [`ImageSegmentationPipeline`]. | |
- `"image-to-image"`: will return a [`ImageToImagePipeline`]. | |
- `"image-to-text"`: will return a [`ImageToTextPipeline`]. | |
- `"mask-generation"`: will return a [`MaskGenerationPipeline`]. | |
- `"object-detection"`: will return a [`ObjectDetectionPipeline`]. | |
- `"question-answering"`: will return a [`QuestionAnsweringPipeline`]. | |
- `"summarization"`: will return a [`SummarizationPipeline`]. | |
- `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`]. | |
- `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`]. | |
- `"text-classification"` (alias `"sentiment-analysis"` available): will return a | |
[`TextClassificationPipeline`]. | |
- `"text-generation"`: will return a [`TextGenerationPipeline`]:. | |
- `"text-to-audio"` (alias `"text-to-speech"` available): will return a [`TextToAudioPipeline`]:. | |
- `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`]. | |
- `"translation"`: will return a [`TranslationPipeline`]. | |
- `"translation_xx_to_yy"`: will return a [`TranslationPipeline`]. | |
- `"video-classification"`: will return a [`VideoClassificationPipeline`]. | |
- `"visual-question-answering"`: will return a [`VisualQuestionAnsweringPipeline`]. | |
- `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`]. | |
- `"zero-shot-image-classification"`: will return a [`ZeroShotImageClassificationPipeline`]. | |
- `"zero-shot-audio-classification"`: will return a [`ZeroShotAudioClassificationPipeline`]. | |
- `"zero-shot-object-detection"`: will return a [`ZeroShotObjectDetectionPipeline`]. | |
''' | |
with col1: | |
taskType = st.radio( | |
"Select a type of task to perform", | |
[ | |
"Sentiment Analysis or Text classification", | |
"Text Generation", | |
"Summarization", | |
"Image Classification", | |
"Image Segmentation", | |
"Object Detection", | |
"Audio Classification", | |
"Automatic Speech Recognition", | |
"Visual Question Answering", | |
"Document Question Answering", | |
"Image Captioning", | |
"Mask Filling", | |
"Named Entity Recognition", | |
"Translation" | |
], | |
captions = [ | |
"**pipeline(task=“sentiment-analysis”)**", | |
"pipeline(task=“text-generation”)", | |
"pipeline(task=“summarization”)", | |
"pipeline(task=“image-classification”)", | |
"pipeline(task=“image-segmentation”)", | |
"pipeline(task=“object-detection”)", | |
"pipeline(task=“audio-classification”)", | |
"pipeline(task=“automatic-speech-recognition”)", | |
"pipeline(task=“vqa”)", | |
"pipeline(task=“document-question-answering”)", | |
"pipeline(task=“image-to-text”)" | |
"Mask Filling", | |
"Named Entity Recognition", | |
"Translation" | |
], index=0) | |
with col2: | |
st.subheader(f"Example: {taskType}") | |
if taskType == "Sentiment Analysis or Text classification": | |
sentiment_analysis() | |
if taskType == "Text Generation": | |
text_generation() | |
if taskType == "Summarization": | |
summarization() | |
if taskType == "Image Classification": | |
image_classification() | |
if taskType == "Image Segmentation": | |
image_segmentation() | |
if taskType == "Object Detection": | |
object_detection() | |
if taskType == "Audio Classification": | |
audio_classification() | |
if taskType == "Automatic Speech Recognition": | |
automatic_speech_recognition() | |
if taskType == "Document Question Answering": | |
document_question_answering() | |
if taskType == "Image Captioning": | |
image_captioning() | |
if taskType == "Mask Filling": | |
mask_filling() | |
if taskType == "Named Entity Recognition": | |
named_entity_recognition() | |
if taskType == "Translation": | |
translation() | |