Freedom of Information Act (FOIA) Datasets
Freedom of Information Act is responsible for making public all federal datasets that are safe to release to the public.
import streamlit as st
import os
import re
from xml.etree import ElementTree
Function to create a search URL for Wikipedia based on dataset name
def create_search_url_wikipedia(dataset_name):
base_url = "https://www.wikipedia.org/search-redirect.php?family=wikipedia&language=en&search="
return base_url + dataset_name.replace(' ', '+').replace('β', '%E2%80%93').replace('&', 'and')
Function to scan current directory for XML files and extract URLs
def scan_xml_for_urls():
urls = []
for file in os.listdir('.'):
if file.endswith('.xml'):
try:
tree = ElementTree.parse(file)
root = tree.getroot()
# Assuming that URLs might be within 'url' tags for simplicity
for url in root.iter('url'):
urls.append(url.text)
except ElementTree.ParseError:
st.error(f"Error parsing {file}")
return urls
Main application
def main():
st.title("Freedom of Information Act (FOIA) π and Open Data π")
# Description of FOIA
st.markdown("""
The Freedom of Information Act (FOIA) πΊπΈ is a law that keeps citizens in the know about their government. By allowing full or partial disclosure of previously unreleased information and documents controlled by the United States government, FOIA strengthens the principle of transparency and accountability. Datasets created or used by federal programs, and thus made publicly available under FOIA, are invaluable resources for researchers, developers, and the curious minds alike! π΅οΈββοΈπβ¨
""")
# List of datasets under FOIA with guessed Wikipedia URLs
datasets = [
"Provider Taxonomy",
"Consumer Complaint Database",
"National Bridge Inventory",
"Medicare Provider Utilization and Payment Data",
"College Scorecard",
"Toxic Release Inventory",
"Veterans Data",
"Public Access to Court Electronic Records (PACER)"
]
st.markdown("## FOIA Datasets and Their Wikipedia URLs π")
st.markdown("| Dataset | Wikipedia URL |")
st.markdown("| ------- | ------------- |")
for dataset in datasets:
url = create_search_url_wikipedia(dataset)
st.markdown(f"| {dataset} | [Link]({url}) |")
# Scan for XML files and display URLs
st.markdown("## Detected URLs in Local XML Files ππ")
urls = scan_xml_for_urls()
if urls:
for url in urls:
st.markdown(f"- [URL]({url})")
else:
st.markdown("No XML files with URLs found in the current directory.")
Run the main application
if name == "main":
main()