File size: 1,186 Bytes
6b278ce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
# -*- coding: utf-8 -*-
"""Untitled3.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1OPk27uLuoRSbYWcNdKG0Qtq0Ffb9RrcT
"""
import requests
import pandas as pd
urls = ['https://nascent.colorado.edu/samples/all_samples','https://nascent.colorado.edu/datasets']
for url in urls:
html = requests.get(url).content
dfs = pd.read_html(html)
# Make the first line header and string
dfs[0].columns = dfs[0].iloc[0]
dfs[0] = dfs[0].iloc[1:].astype(str)
# pretty print dfs
for i, df in enumerate(dfs):
print(f"{i}: {df.shape}")
print(df.head())
# Write to parquet file
dfs[0].to_parquet(f"{url.split('/')[-1]}.parquet")
# Make a csv version while we're here
dfs[0].to_csv(f"{url.split('/')[-1]}.csv")
# Scrap this website
# https://nascent.colorado.edu/
# TODO once we pick out some samples of interest
import requests
from bs4 import BeautifulSoup
response = requests.get('https://nascent.colorado.edu/samples/SRZ7741175')
soup = BeautifulSoup(response.content, 'html.parser')
bed_files = soup.find_all('a', href=re.compile('.bed$'))
for bed_file in bed_files:
print(bed_file['href']) |