File size: 3,601 Bytes
4fe8a03
 
 
 
 
 
f3f2130
4fe8a03
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54725b9
4fe8a03
 
 
 
 
 
 
 
 
 
a297e9a
5072ba2
5d0fa3e
b7956c7
 
344e7b1
b7956c7
8cafaa1
 
 
 
344e7b1
b7956c7
022a19a
8828ef5
4fe8a03
 
 
 
 
 
 
 
022a19a
 
 
 
 
 
4fe8a03
 
 
4eba62e
7463417
 
 
4fe8a03
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import gradio as gr
from bs4 import BeautifulSoup
import requests
from acogsphere import acf
from bcogsphere import bcf
import math

import sqlite3
import huggingface_hub
import pandas as pd
import shutil
import os
import datetime
from apscheduler.schedulers.background import BackgroundScheduler

import random
import time

DB_FILE = "./reviews.db"

TOKEN = os.environ.get('HF_KEY')

repo = huggingface_hub.Repository(
    local_dir="data",
    repo_type="dataset",
    clone_from="CognitiveScience/csdhdata",
    use_auth_token=TOKEN
)
repo.git_pull()

# Set db to latest
shutil.copyfile("./data/reviews.db", DB_FILE)

# Create table if it doesn't already exist

db = sqlite3.connect(DB_FILE)
try:
    db.execute("SELECT * FROM reviews").fetchall()
    db.close()
except sqlite3.OperationalError:
    db.execute(
        '''
        CREATE TABLE reviews (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
                              created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
                              name TEXT, review INTEGER, comments TEXT)
        ''')
    db.commit()
    db.close()

def get_latest_reviews(db: sqlite3.Connection):
    reviews = db.execute("SELECT * FROM reviews ORDER BY id DESC limit 10").fetchall()
    total_reviews = db.execute("Select COUNT(id) from reviews").fetchone()[0]
    reviews = pd.DataFrame(reviews, columns=["id", "date_created", "name", "review", "comments"])
    return reviews, total_reviews


def add_review(name: str, review: int, comments: str):
    db = sqlite3.connect(DB_FILE)
    cursor = db.cursor()
    cursor.execute("INSERT INTO reviews(name, review, comments) VALUES(?,?,?)", [name, review, comments])
    db.commit()
    reviews, total_reviews = get_latest_reviews(db)
    db.close()
    #demo.load()
    return reviews, total_reviews

def load_data():
    db = sqlite3.connect(DB_FILE)
    reviews, total_reviews = get_latest_reviews(db)
    db.close()
    return reviews, total_reviews

with gr.Blocks() as demo:
    with gr.Row():
        with gr.Column():
            #with gr.Box():
                #gr.Markdown("Based on dataset [here](https://huggingface.co/datasets/freddyaboulton/gradio-reviews)")
            data = gr.Dataframe()
            count = gr.Number(label="Rates!")
    with gr.Row():
        with gr.Column():
            name = gr.Textbox(label="a") #, placeholder="What is your name?")
            review =  gr.Textbox(label="b") #, placeholder="What is your name?") #gr.Radio(label="How satisfied are you with using gradio?", choices=[1, 2, 3, 4, 5])
            comments = gr.Textbox(label="c") #, lines=10, placeholder="Do you have any feedback on gradio?")
            submit = gr.Button(value=".")
        
    submit.click(add_review, [name, review, comments], [data, count])
    #demo.load(load_data, None, [data, count])
    
def backup_db():
    shutil.copyfile(DB_FILE, "./data/reviews.db")
    db = sqlite3.connect(DB_FILE)
    reviews = db.execute("SELECT * FROM reviews").fetchall()
    pd.DataFrame(reviews).to_csv("./data/reviews.csv", index=False)
    print("updating db")
    repo.push_to_hub(blocking=False, commit_message=f"Updating data at {datetime.datetime.now()}")

def load_data2():
    db = sqlite3.connect(DB_FILE)
    reviews, total_reviews = get_latest_reviews(db)
    db.close()
    return reviews, total_reviews
    
scheduler = BackgroundScheduler()
scheduler.add_job(func=backup_db, trigger="interval", seconds=60)
scheduler.start()

#scheduler2 = BackgroundScheduler()
#scheduler2.add_job(func=load_data2, trigger="interval", seconds=3)
#scheduler2.start()
demo.launch()