gpt_detect23 / app.py
Prakhar618's picture
Update app.py
0be7b2c verified
raw
history blame contribute delete
No virus
1.58 kB
import gradio as gr
from transformers import pipeline
import os
from datasets import Dataset, DatasetDict
import pandas as pd
import numpy as np
import subprocess
from transformers import RobertaTokenizerFast, RobertaForSequenceClassification,Trainer, TrainingArguments
TOKEN = os.environ.get('token', None)
subprocess.run(["huggingface-cli", "login", "--token", TOKEN])
model = RobertaForSequenceClassification.from_pretrained('Prakhar618/Gptdetect')
tokenizer = RobertaTokenizerFast.from_pretrained('Prakhar618/Gptdetect', max_length = 256)
def tokenize_function(examples):
return tokenizer(examples['text'], padding=True, truncation=True,
max_length=256)
def predict(text):
# Convert test dataframe to Hugging Face
test_dataset = Dataset.from_pandas(pd.DataFrame(text,columns=['text']))
# Apply the tokenization function to the train dataset
train_dataset1 = test_dataset.map(tokenize_function, batched=True,)
predictions, label_probs, _ = trainer.predict(train_dataset1)
y_pred = np.argmax(predictions, axis=1)
return y_pred
# Create Gradio interface
text_input = gr.Textbox(lines=7, label="Input Text", placeholder="Enter your text here...")
output_text = gr.Textbox(label="Predicted Sentiment")
test_args = TrainingArguments(
output_dir='Prakhar618/gpt_detect23/output/',
do_train=False,
do_predict=True,
per_device_eval_batch_size = 2
)
trainer = Trainer(
model=model,
args=test_args,
)
iface = gr.Interface(fn=predict, inputs=text_input, outputs=output_text)
iface.launch(share=True)