Spaces:
Runtime error
Runtime error
"""FastAPI endpoint | |
To run locally use 'uvicorn app:app --host localhost --port 7860' | |
or | |
`python -m uvicorn app:app --reload --host localhost --port 7860` | |
""" | |
import datetime as dt | |
import json | |
import logging | |
import numpy as np | |
import os | |
import random | |
from typing import Dict, List | |
import uvicorn | |
from fastapi import FastAPI, HTTPException, Request, Response | |
from fastapi.responses import HTMLResponse | |
from fastapi.staticfiles import StaticFiles | |
from fastapi.templating import Jinja2Templates | |
import scripts.sentiment as sentiment | |
import scripts.twitter_scraper as ts | |
import scripts.utils as utils | |
from scripts import generative | |
import nltk | |
logging.basicConfig(level=logging.INFO) | |
app = FastAPI() | |
templates = Jinja2Templates(directory="templates") | |
app.mount("/static", StaticFiles(directory="static"), name="static") | |
# Construct absolute path to models folder | |
models_path = os.path.abspath("models") | |
username_list = [ | |
"alikarimi_ak8", | |
"elonmusk", | |
"BarackObama", | |
"taylorlorenz", | |
"cathiedwood", | |
"ylecun", | |
] | |
## Static objects/paths | |
start_date = dt.date(year=2023, month=2, day=1) | |
end_date = dt.date(year=2023, month=3, day=22) | |
async def webpage(request: Request): | |
return templates.TemplateResponse("index.html", {"request": request}) | |
def get_accounts() -> List[dict]: | |
import pandas as pd | |
logging.info(f"Pulling account information on {username_list}") | |
account_info_list = [ | |
ts.get_twitter_account_info(twitter_handle=account) for account in username_list | |
] | |
df_account = pd.DataFrame(account_info_list) | |
df_account = df_account.style.bar( | |
subset=["follower_count", "friends_count"], color="#d65f5f" | |
) | |
df_account = df_account.format( | |
{"follower_count": "{:,.0f}", "friends_count": "{:,.0f}"} | |
) | |
return HTMLResponse(content=df_account.to_html(classes="center"), status_code=200) | |
def get_tweets(username: str) -> dict: | |
if username in username_list: | |
# query = f"from:{username} since:{start_date} until:{end_date}" | |
return ts.get_tweets(query=query) | |
else: | |
return {"detail": "Account not in scope of project."} | |
def get_audience(username: str) -> dict: | |
if username in username_list: | |
query = f"from:{username} since:{start_date} until:{end_date}" | |
tweets = ts.get_tweets(query=query) | |
n_samples = 5 | |
# Random sample 3 tweets from user | |
tweets_sampled = random.sample(tweets, n_samples) | |
# Get all replies to sampled tweets | |
tweet_threads = [] | |
for tweet in tweets_sampled: | |
threads = ts.get_replies( | |
username=tweet["username"], | |
conversation_id=tweet["conversation_id"], | |
max_tweets=100, | |
) | |
tweet_threads += threads | |
# Get usernames from sample threads tweets | |
usernames = [t["username"] for t in tweet_threads] | |
# Get user info from sample replies to sampled tweets of user | |
info_accounts = [ | |
ts.get_twitter_account_info(twitter_handle=account) for account in usernames | |
] | |
# "follower_count":1,"friends_count":20,"verified":false} | |
# Get stats for followers/audience engaging with tweets | |
follower_counts = [ | |
info_accounts[i]["follower_count"] for i in range(len(info_accounts)) | |
] | |
friends_counts = [ | |
info_accounts[i]["friends_count"] for i in range(len(info_accounts)) | |
] | |
verified_counts = [ | |
1 if info_accounts[i]["verified"] == True else 0 | |
for i in range(len(info_accounts)) | |
] | |
return { | |
"sample_size": len(info_accounts), | |
"mean_follower_count": round(np.mean(follower_counts), 3), | |
"mean_friends_count": round(np.mean(friends_counts), 3), | |
"mean_verified": round(np.mean(verified_counts), 3), | |
} | |
else: | |
response = Response(content="Account not in scope of project.", status_code=404) | |
return response | |
async def get_sentiment(username: str) -> Dict[str, Dict[str, float]]: | |
if username not in username_list: | |
raise HTTPException(status_code=404, detail="Account not in scope of project.") | |
query = f"from:{username} since:{start_date} until:{end_date}" | |
tweets = ts.get_tweets(query=query) | |
n_samples = 5 | |
tweets_sampled = random.sample(tweets, n_samples) | |
tweet_threads = [] | |
for tweet in tweets_sampled: | |
threads = ts.get_replies( | |
username=tweet["username"], | |
conversation_id=tweet["conversation_id"], | |
max_tweets=100, | |
) | |
tweet_threads += threads | |
print( | |
f"Total replies to {n_samples} sampled tweets from username: {username}, {len(tweet_threads)}" | |
) | |
## Sentiment scoring | |
print(f"Running tweet sentiment scoring on username: {username} tweets") | |
tweets_scores = sentiment.get_tweets_sentiment(tweets=tweets) | |
mean_tweets_score = round(np.mean(tweets_scores), 2) | |
ci_tweets = utils.wilson_score_interval(tweets_scores) | |
# Get sentiment of the threads from tweets | |
# Get username tweets sentiment | |
print(f"Running tweet thread sentiment scoring on username: {username} tweets") | |
threads_scores = sentiment.get_tweets_sentiment(tweets=tweet_threads) | |
mean_threads_score = round(np.mean(threads_scores), 2) | |
ci_threads = utils.wilson_score_interval(threads_scores) | |
return { | |
"thread_level": { | |
"mean": mean_threads_score, | |
"confidence_interal": ci_threads, | |
}, | |
"audience_level": { | |
"mean": mean_tweets_score, | |
"confidence_interval": ci_tweets, | |
}, | |
} | |
async def generate_text(request: Request): | |
print("*" * 50) | |
data = await request.json() | |
print("*" * 50) | |
print("POST Request:") | |
# Check length of input, if it is greater than 10 tokens, the text is sent off to a summarizer to generate: | |
try: | |
generated_text = generative.generate_account_text( | |
prompt=data["text"], model_dir=os.path.join(models_path, data["account"]) | |
) | |
logging.info("INFO: Successfully generate text from model.") | |
except Exception as e: | |
logging.error(f"Error generating text: {e}") | |
return {"error": "Error generating text"} | |
# return one example | |
generated_text = generated_text[0]["generated_text"] | |
################################################### | |
## Clean up generate text | |
# Get rid of final sentence | |
# sentences = nltk.sent_tokenize(generated_text) | |
# unique_sentences = set() | |
# non_duplicate_sentences = [] | |
# for sentence in sentences: | |
# if sentence not in unique_sentences: | |
# non_duplicate_sentences.append(sentence) | |
# unique_sentences.add(sentence) | |
# final_text = " ".join(non_duplicate_sentences[:-1]) | |
final_text= generated_text | |
return {"generated_text": final_text} | |
async def read_examples(): | |
with open("templates/charts/handle_sentiment_breakdown.html") as f: | |
html = f.read() | |
return HTMLResponse(content=html) | |
async def read_examples(): | |
with open("templates/charts/handle_sentiment_timesteps.html") as f: | |
html = f.read() | |
return HTMLResponse(content=html) | |
# uvicorn --workers=2 app:app | |
# if __name__ == "__main__": | |
# # uvicorn.run(app, host="0.0.0.0", port=8000) | |
# uvicorn.run("app:app", host="127.0.0.1", port=5049, reload=True) | |