shikharyashmaurya's picture
Rename improved-fact-checker-app.py to app.py
5167482 verified
import streamlit as st
import google.generativeai as genai
import ast
import time
import os
import re
from typing import List, Tuple, Optional
def extract_python_code(text: str) -> Optional[str]:
pattern = r"```python\n(.*?)```"
match = re.search(pattern, text, re.DOTALL)
return match.group(1).strip() if match else None
def configure_genai():
secret_key = os.getenv("SECRET_KEY")
if not secret_key:
st.error("API key not found. Please set the SECRET_KEY environment variable.")
st.stop()
genai.configure(api_key=secret_key)
def parse_gemini_response(response_text: str) -> Tuple[str, str]:
try:
parsed = ast.literal_eval(response_text)
if isinstance(parsed, list) and len(parsed) == 2:
return parsed[0], parsed[1]
raise ValueError("Unexpected response format")
except Exception as e:
return "Error", f"Failed to parse response: {str(e)}"
def get_gemini_response(input_text: str) -> Tuple[str, str]:
prompt = """You are a fact checker. Given a text, respond with:
1. 'True', 'False', or 'Unsure' (if you are unsure or knowledge cutoff)
2. Evidence in support or 'knowledge cutoff'
Respond in this exact format: ['True/False/Unsure', 'evidence or knowledge cutoff']
Example input: 'Google was founded in 1998'
Example output: ['True', 'Google was indeed founded in September 1998 by Larry Page and Sergey Brin']
Now give a response in the exact described format for the following text:
"""
model = genai.GenerativeModel('gemini-1.5-pro')
try:
response = model.generate_content(prompt + input_text)
result, evidence = parse_gemini_response(response.text)
return result, evidence
except Exception as e:
return "Error", f"Failed to get or parse the model's response: {str(e)}"
def generate_interesting_facts(topic: str) -> List[str]:
prompt = f"""Generate up to 10 interesting facts about the following topic.
Return only a Python list of strings, with each string being a single fact.
Topic: {topic}
"""
model = genai.GenerativeModel('gemini-1.5-pro')
try:
response = model.generate_content(prompt)
code = extract_python_code(response.text)
facts = ast.literal_eval(code) if code else []
return facts if isinstance(facts, list) else []
except Exception as e:
st.error(f"Failed to generate facts: {str(e)}")
return []
def main():
st.title("Verified Interesting Fact Generator")
configure_genai()
topic = st.text_input('Enter a topic to generate interesting facts about (e.g., "Elephants", "Mars")')
if st.button("Generate and Verify Facts"):
if not topic:
st.warning("Please enter a topic.")
return
with st.spinner('Generating facts...'):
facts = generate_interesting_facts(topic)
if not facts:
st.error("Failed to generate facts. Please try a different topic or try again later.")
return
st.subheader(f"Verified Interesting Facts about {topic}:")
for fact in facts:
with st.expander(fact):
with st.spinner('Verifying...'):
result, evidence = get_gemini_response(fact)
if result.lower() == "true":
st.success(f"Likely True: {evidence}")
elif result.lower() == "false":
st.error(f"Likely False: {evidence}")
elif result.lower() == "unsure":
st.warning(f"Uncertain: {evidence}")
else:
st.error(f"Error in fact-checking: {evidence}")
time.sleep(4) # Delay to avoid rate limiting
if __name__ == "__main__":
main()