File size: 6,995 Bytes
4ed10db |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
from bs4 import BeautifulSoup
import datetime
import requests
import nltk
import json
class WebScraper:
def __init__(self, url):
self.URL = url
try:
self.webpage = requests.get(self.URL)
except requests.exceptions.RequestException as e:
print(f"Error: {e}")
if self.webpage:
try:
self.soup = BeautifulSoup(self.webpage.text, "html.parser")
except:
print("Error: Failed to create BeautifulSoup object.")
def remove_unicode(self, string):
return string.encode('ascii', 'ignore').decode('utf-8')
def get_page_title(self):
try:
div_element = self.soup.find('div', class_='m-statement__quote')
title = div_element.get_text(strip=True)
cleaned_title = self.remove_unicode(title)
cleaned_title = cleaned_title.replace('\"', '')
except AttributeError:
return None # Error: Failed to get page title.
return cleaned_title
def get_page_author(self):
try:
author_element = self.soup.find('div', class_='m-author__content').find('a')
author = author_element.get_text(strip=True)
except AttributeError:
return None # Error: Failed to get page author.
return author
def get_page_posted_date(self):
date_element = None
try:
date_element = self.soup.find('span', class_='m-author__date')
date = date_element.get_text(strip=True)
date_obj = datetime.datetime.strptime(date, "%B %d, %Y")
formatted_date = date_obj.strftime("%m/%d/%Y")
except (AttributeError, ValueError):
return None # Error: Failed to get page posted date.
return formatted_date
def get_sci_check_digest(self):
try:
div_element = self.soup.find('div', class_='short-on-time')
li_tags = div_element.find_all('li') if div_element else []
sci_digest_list = [li.get_text(strip=True) for li in li_tags]
final_sci_digest = ", ".join(sci_digest_list)
cleaned_sci_digest = self.remove_unicode(final_sci_digest)
cleaned_sci_digest = cleaned_sci_digest.replace('\"', '')
tokenised_sci_digest = nltk.sent_tokenize(cleaned_sci_digest)
except AttributeError:
return None # Error: Failed to get SciCheck digest.
return tokenised_sci_digest
def get_paragraph_list(self):
try:
paragraph_list = []
article_element = self.soup.find('article', class_='m-textblock')
p_elements = article_element.find_all('p')
text_list = [p.get_text(strip=True) for p in p_elements]
for text in text_list:
paragraph_list.append(text)
final_paragraphs = " ".join(paragraph_list)
cleaned_paragraphs = final_paragraphs.replace('\u00a0', ' ')
cleaned_paragraphs = self.remove_unicode(cleaned_paragraphs)
cleaned_paragraphs = cleaned_paragraphs.replace('\"', '')
tokenized_paragraphs = nltk.sent_tokenize(cleaned_paragraphs)
except AttributeError:
return None, None # Error: Failed to get paragraphs.
return paragraph_list, tokenized_paragraphs
def get_sentences_citations(self):
try:
p_elements = self.soup.select('article.m-textblock p')
citation_list = []
for p in p_elements:
href = p.find('a')
if href and 'href' in href.attrs:
href_text = href['href']
sentence = p.get_text(strip=True)
cleaned_sentence = sentence.replace('\u00a0', ' ')
cleaned_sentence = self.remove_unicode(cleaned_sentence)
cleaned_sentence = cleaned_sentence.replace('\"', '')
citation_list.append({"sentence": cleaned_sentence, "hrefs": href_text})
except AttributeError:
return None # Error: Failed to get citation list.
return citation_list
def get_issue_list(self):
issue_list = []
try:
ul_element = self.soup.find('ul', class_='m-list--horizontal')
li_elements = ul_element.find_all('li', class_='m-list__item')
for li in li_elements[:-1]:
category = li.a['title']
issue_list.append(category)
except AttributeError:
return None # Error: Failed to get issue list.
return issue_list
def get_image_info(self):
try:
article_element = self.soup.find('article', class_='m-textblock')
p_elements = article_element.find_all('p')
em_elements = article_element.find_all('em')
img_count = 0
image_captions = []
for p in p_elements:
img_tag = p.find('img')
if img_tag:
img_src = img_tag['src']
if img_src:
img_count += 1
if img_count <= len(em_elements):
image_caption = em_elements[img_count - 1].get_text(strip=True)
cleaned_captions = image_caption.replace('\u00a0', ' ')
cleaned_captions = self.remove_unicode(cleaned_captions)
cleaned_captions = cleaned_captions.replace('\"', '')
image_captions.append({"image_src": img_src, "image_caption": cleaned_captions})
except:
return None
return image_captions
def get_label(self):
try:
target = self.soup.find_all('div', attrs={'class':'m-statement__meter'})
for i in target:
label = i.find('div', attrs={'class':'c-image'}).find('img').get('alt')
# if label == 'pants-fire':
# label = 'false'
# elif label == 'mostly-true':
# label = 'true'
except:
return None
return label
with open("./income.json", "r") as infile:
data = json.load(infile)
urls = data["url"]
labels = data["label"]
scraped_data = []
for url, label in zip(urls,labels):
print(url)
scraper = WebScraper(url)
data = {
"url": url,
"title": scraper.get_page_title(),
"author": scraper.get_page_author(),
"posted": scraper.get_page_posted_date(),
"sci_digest": scraper.get_sci_check_digest(),
"paragraphs": scraper.get_paragraph_list()[1],
"issues": scraper.get_issue_list(),
"image_data": scraper.get_image_info(),
"data": scraper.get_sentences_citations(),
"label": label
}
scraped_data.append(data)
with open("./json_new/income.json", "w") as outfile:
json.dump(scraped_data, outfile) |