detectapp / main.py
GautamGaur's picture
Upload main.py
f2b5652 verified
raw
history blame
1.74 kB
from fastapi import FastAPI,Header,HTTPException,Depends,WebSocket,WebSocketDisconnect
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Allow all origins
allow_methods=["GET", "POST"], # Allow only GET and POST methods
allow_headers=["*"], # Allow all headers
)
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import torch
from transformers import RobertaTokenizer, RobertaForSequenceClassification
app = FastAPI()
# Load the tokenizer
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
# Load the model
model_path="model_ai_detection"
model = RobertaForSequenceClassification.from_pretrained(model_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.eval()
class TextData(BaseModel):
text: str
@app.post("/predict")
async def predict(data: TextData):
inputs = tokenizer(data.text, return_tensors="pt", padding=True, truncation=True)
inputs = {k: v.to(device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs)
probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
ai_prob = probs[0][1].item() * 100 # Probability of the text being AI-generated
message = "The text is likely generated by AI." if ai_prob > 50 else "The text is likely generated by a human."
return {
"score": ai_prob,
"message": message
}
@app.get("/")
async def read_root():
return {"message": "Ready to go"}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)