Dmitry Chaplinsky commited on
Commit
77f91c1
1 Parent(s): 8b11c67

Code and amendments to the README

Browse files
.gitignore ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .DS_Store
2
+ .scrapy/
3
+ venv/
4
+ *.csv
5
+ *.xml
6
+ *.json
7
+ *.jsonlines
8
+ *.jsonl
9
+ *.pyc
10
+ test_spiders/
11
+ .hg/
12
+ .hgignore
README.md CHANGED
@@ -14,7 +14,7 @@ Every Prompt is a data-driven approach to mining instructions from the web.
14
  It contains over a million FAQs and HowTos from around the world in a structured format.
15
  It also has basic pre-processing to calculate the length of the useful text and identify the language of that text with the help of [GCLD3](https://github.com/google/cld3)
16
 
17
- It relies on the [Web Data Commons](http://webdatacommons.org) dataset to find the seed list of sites with [**HowTo**](https://schema.org/HowTo) and [**FAQPage**](https://schema.org/FAQPage) items.
18
  The general pipeline looks like this:
19
  * Download 1.6TB of structured data from webdatacommons to identify the pages with the structured data we need (wget/parallel). That gives us 1,985,925 seed pages
20
  * Crawls the seed pages and tries to extract structured data using [extruct](https://pypi.org/project/extruct/#description) package. That left around 1,358,638 pages which are alive and well-formed.
@@ -24,5 +24,14 @@ The general pipeline looks like this:
24
  You can use the resulting dataset by filtering for the language and amount of the text. You need to convert the structured data into instructions yourself.
25
  You'll need to apply extra cleansing/evaluation of the instructions you've got because, you know, the internet is still full of crap.
26
 
 
 
 
 
 
 
 
 
 
27
  ## License
28
  **Code** of the project has an MIT license.
 
14
  It contains over a million FAQs and HowTos from around the world in a structured format.
15
  It also has basic pre-processing to calculate the length of the useful text and identify the language of that text with the help of [GCLD3](https://github.com/google/cld3)
16
 
17
+ It relies on the [Web Data Commons](http://webdatacommons.org) dataset (from October 2022) to find the seed list of sites with [**HowTo**](https://schema.org/HowTo) and [**FAQPage**](https://schema.org/FAQPage) items.
18
  The general pipeline looks like this:
19
  * Download 1.6TB of structured data from webdatacommons to identify the pages with the structured data we need (wget/parallel). That gives us 1,985,925 seed pages
20
  * Crawls the seed pages and tries to extract structured data using [extruct](https://pypi.org/project/extruct/#description) package. That left around 1,358,638 pages which are alive and well-formed.
 
24
  You can use the resulting dataset by filtering for the language and amount of the text. You need to convert the structured data into instructions yourself.
25
  You'll need to apply extra cleansing/evaluation of the instructions you've got because, you know, the internet is still full of crap.
26
 
27
+ ## Recreating the results
28
+ 1. Clone the repo without the LFS files
29
+ 2. Install requirements from `requirements.txt`
30
+ 3. Install `pv` and `parallel`
31
+ 4. Run `bin/get_seed_urls.sh` to filter urls of interest out of 1.6TB of compressed data. Don't worry about disk space. Worry about the traffic
32
+ 5. Run scrapy spider like this `scrapy crawl webdatacommons_org -s WEB_DATA_COMMONS=web_data_commons_urls_sample.txt -L INFO -o webdatacommons.jsonlines` with `WEB_DATA_COMMONS` pointing to the list of seed URLs from step 4.
33
+ 6. Run `python extract_relevant_structured_data.py --num-threads 12 webdatacommons.jsonlines relevant.jsonlines.bz2`
34
+ 7. Run `python export_structured_data.py relevant.jsonlines.bz2 extruct_out.jsonlines.bz2` to obtain the final version of the dataset
35
+
36
  ## License
37
  **Code** of the project has an MIT license.
bin/export_structured_data.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Iterator, List, Dict, TypeVar
2
+ import json
3
+ import argparse
4
+ from concurrent.futures import ThreadPoolExecutor
5
+ from itertools import islice
6
+
7
+ import smart_open
8
+ from tqdm import tqdm
9
+ import jmespath
10
+ import gcld3
11
+
12
+ T = TypeVar("T")
13
+
14
+
15
+ detector = gcld3.NNetLanguageIdentifier(min_num_bytes=20, max_num_bytes=1000)
16
+
17
+ def batch_iterator(iterator: Iterator[T], batch_size: int = 50) -> Iterator[List[T]]:
18
+ """
19
+ Batch an iterator into an iterator over lists of batch size.
20
+ """
21
+ iterator = iter(iterator)
22
+ while True:
23
+ batch = list(islice(iterator, batch_size))
24
+ if not batch:
25
+ return
26
+ yield batch
27
+
28
+
29
+ def process_chunk(chunk: List, extractor: jmespath.parser.ParsedResult) -> List[Dict]:
30
+ """
31
+ Apply JMESPath to a chunk of JSONL data.
32
+ """
33
+ results: List[Dict] = []
34
+ for line in chunk:
35
+ data = json.loads(line)
36
+ extracted = extractor.search(data)
37
+ if extracted is not None:
38
+ if not isinstance(extracted, list):
39
+ extracted = [extracted]
40
+
41
+ try:
42
+ extracted_str = " ".join(set([ex for ex in extracted if isinstance(ex, str)])).strip()
43
+ except:
44
+ print(json.dumps(data, ensure_ascii=False))
45
+ raise
46
+
47
+ lang = detector.FindLanguage(extracted_str)
48
+ data["language"] = lang.language
49
+ data["language_is_reliable"] = lang.is_reliable
50
+ data["text_length"] = len(extracted_str)
51
+ data["data_length"] = len(line)
52
+ data["text_to_data_ratio"] = len(extracted_str) / len(line)
53
+
54
+ results.append(data)
55
+
56
+ return results
57
+
58
+
59
+ def process_file(
60
+ input_files: str,
61
+ output_file: str,
62
+ chunk_size: int,
63
+ num_threads: int,
64
+ extractor: jmespath.parser.ParsedResult,
65
+ ):
66
+ """
67
+ Apply JMESPath to a large JSONL file in parallel.
68
+ input_file: path to input JSONL file
69
+ output_file: path to output JSONL file
70
+ chunk_size: number of lines to process at a time
71
+ num_threads: number of threads to use
72
+ extractor: compiled JMESPath expression to apply
73
+ """
74
+ with smart_open.open(output_file, "wt", encoding="utf-8") as writer:
75
+ for input_file in input_files:
76
+ with smart_open.open(
77
+ input_file, "rt", encoding="utf-8"
78
+ ) as reader:
79
+ with ThreadPoolExecutor(max_workers=num_threads) as executor:
80
+ for chunk in batch_iterator(tqdm(reader), batch_size=chunk_size):
81
+ if not chunk:
82
+ break
83
+
84
+ results = executor.map(process_chunk, [chunk], [extractor])
85
+ for result in results:
86
+ for item in result:
87
+ writer.write(json.dumps(item, ensure_ascii=False))
88
+ writer.write("\n")
89
+
90
+
91
+ if __name__ == "__main__":
92
+ parser = argparse.ArgumentParser(
93
+ description="Apply JMESPath to a large JSONL files in parallel."
94
+ )
95
+ parser.add_argument("input_files", help="path to input JSONL files", nargs="+")
96
+ parser.add_argument("output_file", help="path to output JSONL file")
97
+ parser.add_argument(
98
+ "--chunk-size",
99
+ type=int,
100
+ default=10000,
101
+ help="number of lines to process at a time (default: 10000)",
102
+ )
103
+ parser.add_argument(
104
+ "--num-threads",
105
+ type=int,
106
+ default=4,
107
+ help="number of threads to use (default: 4)",
108
+ )
109
+ args = parser.parse_args()
110
+ # TODO: itemListElement as text or Thing?
111
+ jmespath_expression: jmespath.parser.ParsedResult = jmespath.compile(
112
+ "[name, description, headline, about, tool, supply, keywords, step[].name, step[].text, step[].itemListElement[].text, "
113
+ + "step[].itemListElement[].itemListElement[].text, mainEntity[].name, mainEntity[].acceptedAnswer.text, "
114
+ + "mainEntity[].acceptedAnswer.name, mainEntity.name, mainEntity.acceptedAnswer.text, "
115
+ + "mainEntity.*.acceptedAnswer[].text, mainEntity[].acceptedAnswer[].text, step.itemListElement[].text, step.itemListElement[].itemListElement[].text][][]"
116
+ )
117
+
118
+ process_file(
119
+ args.input_files,
120
+ args.output_file,
121
+ args.chunk_size,
122
+ args.num_threads,
123
+ jmespath_expression,
124
+ )
bin/extract_relevant_structured_data.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Iterator, List, Dict, TypeVar
2
+ import json
3
+ import argparse
4
+ from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
5
+ from itertools import islice
6
+
7
+ import smart_open
8
+ from tqdm import tqdm
9
+ import jmespath
10
+
11
+ T = TypeVar("T")
12
+
13
+
14
+ def batch_iterator(iterator: Iterator[T], batch_size: int = 50) -> Iterator[List[T]]:
15
+ iterator = iter(iterator)
16
+ while True:
17
+ batch = list(islice(iterator, batch_size))
18
+ if not batch:
19
+ return
20
+ yield batch
21
+
22
+
23
+ def process_chunk(chunk: List, extractor: jmespath.parser.ParsedResult) -> List[Dict]:
24
+ """
25
+ Apply JMESPath to a chunk of JSONL data.
26
+ """
27
+ results: List[Dict] = []
28
+ for line in chunk:
29
+ data = json.loads(line)
30
+ extracted = extractor.search(data)
31
+ if extracted is not None:
32
+ if not isinstance(extracted, list):
33
+ extracted = [extracted]
34
+
35
+ for item in extracted:
36
+ item["url"] = data["url"]
37
+ item["schema_type"] = data["schema_type"]
38
+ results.append(item)
39
+ return results
40
+
41
+
42
+ def process_file(
43
+ input_file: str,
44
+ output_file: str,
45
+ chunk_size: int,
46
+ num_threads: int,
47
+ extractor: jmespath.parser.ParsedResult,
48
+ ):
49
+ """
50
+ Apply JMESPath to a large JSONL file in parallel.
51
+ input_file: path to input JSONL file
52
+ output_file: path to output JSONL file
53
+ chunk_size: number of lines to process at a time
54
+ num_threads: number of threads to use
55
+ extractor: compiled JMESPath expression to apply
56
+ """
57
+ with smart_open.open(input_file, "rt", encoding="utf-8") as reader, smart_open.open(
58
+ output_file, "wt", encoding="utf-8"
59
+ ) as writer:
60
+ with ThreadPoolExecutor(max_workers=num_threads) as executor:
61
+ for chunk in batch_iterator(tqdm(reader), batch_size=chunk_size):
62
+ if not chunk:
63
+ break
64
+
65
+ results = executor.map(process_chunk, [chunk], [extractor])
66
+ for result in results:
67
+ for item in result:
68
+ writer.write(json.dumps(item, ensure_ascii=False))
69
+ writer.write("\n")
70
+
71
+
72
+ if __name__ == "__main__":
73
+ parser = argparse.ArgumentParser(
74
+ description="Apply JMESPath to a large JSONL file in parallel."
75
+ )
76
+ parser.add_argument("input_file", help="path to input JSONL file")
77
+ parser.add_argument("output_file", help="path to output JSONL file")
78
+ parser.add_argument(
79
+ "--chunk-size",
80
+ type=int,
81
+ default=10000,
82
+ help="number of lines to process at a time (default: 10000)",
83
+ )
84
+ parser.add_argument(
85
+ "--num-threads",
86
+ type=int,
87
+ default=4,
88
+ help="number of threads to use (default: 4)",
89
+ )
90
+ args = parser.parse_args()
91
+
92
+ jmespath_expression: jmespath.parser.ParsedResult = jmespath.compile(
93
+ """metadata.*[?"@type"=='FAQPage' || "@type"=='HowTo'][]"""
94
+ )
95
+
96
+ process_file(
97
+ args.input_file,
98
+ args.output_file,
99
+ args.chunk_size,
100
+ args.num_threads,
101
+ jmespath_expression,
102
+ )
bin/get_seed_urls.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # download file containing urls
4
+ curl http://webdatacommons.org/structureddata/2022-12/files/file.list > urls.txt
5
+
6
+ # create output file
7
+ touch output.txt
8
+
9
+ # use parallel command to download/grep in parallel
10
+ cat urls.txt | pv -cN Input | parallel -j 4 "curl -s {} | zcat | grep -e '<http://schema.org/FAQPage>' -e '<http://schema.org/HowTo>'" | pv -cN Output > output.txt
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ smart-open
2
+ jmespath
3
+ tqdm
4
+ Scrapy
5
+ extruct
6
+ gcld3
scraper/scraper/__init__.py ADDED
File without changes
scraper/scraper/items.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Define here the models for your scraped items
2
+ #
3
+ # See documentation in:
4
+ # https://docs.scrapy.org/en/latest/topics/items.html
5
+
6
+ import scrapy
7
+
8
+
9
+ class ScraperItem(scrapy.Item):
10
+ # define the fields for your item here like:
11
+ # name = scrapy.Field()
12
+ pass
scraper/scraper/middlewares.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Define here the models for your spider middleware
2
+ #
3
+ # See documentation in:
4
+ # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
5
+
6
+ from scrapy import signals
7
+
8
+ # useful for handling different item types with a single interface
9
+ from itemadapter import is_item, ItemAdapter
10
+
11
+
12
+ class ScraperSpiderMiddleware:
13
+ # Not all methods need to be defined. If a method is not defined,
14
+ # scrapy acts as if the spider middleware does not modify the
15
+ # passed objects.
16
+
17
+ @classmethod
18
+ def from_crawler(cls, crawler):
19
+ # This method is used by Scrapy to create your spiders.
20
+ s = cls()
21
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
22
+ return s
23
+
24
+ def process_spider_input(self, response, spider):
25
+ # Called for each response that goes through the spider
26
+ # middleware and into the spider.
27
+
28
+ # Should return None or raise an exception.
29
+ return None
30
+
31
+ def process_spider_output(self, response, result, spider):
32
+ # Called with the results returned from the Spider, after
33
+ # it has processed the response.
34
+
35
+ # Must return an iterable of Request, or item objects.
36
+ for i in result:
37
+ yield i
38
+
39
+ def process_spider_exception(self, response, exception, spider):
40
+ # Called when a spider or process_spider_input() method
41
+ # (from other spider middleware) raises an exception.
42
+
43
+ # Should return either None or an iterable of Request or item objects.
44
+ pass
45
+
46
+ def process_start_requests(self, start_requests, spider):
47
+ # Called with the start requests of the spider, and works
48
+ # similarly to the process_spider_output() method, except
49
+ # that it doesn’t have a response associated.
50
+
51
+ # Must return only requests (not items).
52
+ for r in start_requests:
53
+ yield r
54
+
55
+ def spider_opened(self, spider):
56
+ spider.logger.info('Spider opened: %s' % spider.name)
57
+
58
+
59
+ class ScraperDownloaderMiddleware:
60
+ # Not all methods need to be defined. If a method is not defined,
61
+ # scrapy acts as if the downloader middleware does not modify the
62
+ # passed objects.
63
+
64
+ @classmethod
65
+ def from_crawler(cls, crawler):
66
+ # This method is used by Scrapy to create your spiders.
67
+ s = cls()
68
+ crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
69
+ return s
70
+
71
+ def process_request(self, request, spider):
72
+ # Called for each request that goes through the downloader
73
+ # middleware.
74
+
75
+ # Must either:
76
+ # - return None: continue processing this request
77
+ # - or return a Response object
78
+ # - or return a Request object
79
+ # - or raise IgnoreRequest: process_exception() methods of
80
+ # installed downloader middleware will be called
81
+ return None
82
+
83
+ def process_response(self, request, response, spider):
84
+ # Called with the response returned from the downloader.
85
+
86
+ # Must either;
87
+ # - return a Response object
88
+ # - return a Request object
89
+ # - or raise IgnoreRequest
90
+ return response
91
+
92
+ def process_exception(self, request, exception, spider):
93
+ # Called when a download handler or a process_request()
94
+ # (from other downloader middleware) raises an exception.
95
+
96
+ # Must either:
97
+ # - return None: continue processing this exception
98
+ # - return a Response object: stops process_exception() chain
99
+ # - return a Request object: stops process_exception() chain
100
+ pass
101
+
102
+ def spider_opened(self, spider):
103
+ spider.logger.info('Spider opened: %s' % spider.name)
scraper/scraper/pipelines.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Define your item pipelines here
2
+ #
3
+ # Don't forget to add your pipeline to the ITEM_PIPELINES setting
4
+ # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
5
+
6
+
7
+ # useful for handling different item types with a single interface
8
+ from itemadapter import ItemAdapter
9
+
10
+
11
+ class ScraperPipeline:
12
+ def process_item(self, item, spider):
13
+ return item
scraper/scraper/settings.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Scrapy settings for scraper project
2
+ #
3
+ # For simplicity, this file contains only settings considered important or
4
+ # commonly used. You can find more settings consulting the documentation:
5
+ #
6
+ # https://docs.scrapy.org/en/latest/topics/settings.html
7
+ # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
8
+ # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
9
+
10
+ BOT_NAME = 'scraper'
11
+
12
+ SPIDER_MODULES = ['scraper.spiders']
13
+ NEWSPIDER_MODULE = 'scraper.spiders'
14
+
15
+
16
+ # Crawl responsibly by identifying yourself (and your website) on the user-agent
17
+ #USER_AGENT = 'scraper (+http://www.yourdomain.com)'
18
+
19
+ # Obey robots.txt rules
20
+ ROBOTSTXT_OBEY = True
21
+
22
+ # Configure maximum concurrent requests performed by Scrapy (default: 16)
23
+ #CONCURRENT_REQUESTS = 32
24
+
25
+ # Configure a delay for requests for the same website (default: 0)
26
+ # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
27
+ # See also autothrottle settings and docs
28
+ #DOWNLOAD_DELAY = 3
29
+ # The download delay setting will honor only one of:
30
+ #CONCURRENT_REQUESTS_PER_DOMAIN = 16
31
+ #CONCURRENT_REQUESTS_PER_IP = 16
32
+
33
+ # Disable cookies (enabled by default)
34
+ #COOKIES_ENABLED = False
35
+
36
+ # Disable Telnet Console (enabled by default)
37
+ #TELNETCONSOLE_ENABLED = False
38
+
39
+ # Override the default request headers:
40
+ #DEFAULT_REQUEST_HEADERS = {
41
+ # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
42
+ # 'Accept-Language': 'en',
43
+ #}
44
+
45
+ # Enable or disable spider middlewares
46
+ # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
47
+ #SPIDER_MIDDLEWARES = {
48
+ # 'scraper.middlewares.ScraperSpiderMiddleware': 543,
49
+ #}
50
+
51
+ # Enable or disable downloader middlewares
52
+ # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
53
+ #DOWNLOADER_MIDDLEWARES = {
54
+ # 'scraper.middlewares.ScraperDownloaderMiddleware': 543,
55
+ #}
56
+
57
+ # Enable or disable extensions
58
+ # See https://docs.scrapy.org/en/latest/topics/extensions.html
59
+ #EXTENSIONS = {
60
+ # 'scrapy.extensions.telnet.TelnetConsole': None,
61
+ #}
62
+
63
+ # Configure item pipelines
64
+ # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
65
+ #ITEM_PIPELINES = {
66
+ # 'scraper.pipelines.ScraperPipeline': 300,
67
+ #}
68
+
69
+ # Enable and configure the AutoThrottle extension (disabled by default)
70
+ # See https://docs.scrapy.org/en/latest/topics/autothrottle.html
71
+ #AUTOTHROTTLE_ENABLED = True
72
+ # The initial download delay
73
+ #AUTOTHROTTLE_START_DELAY = 5
74
+ # The maximum download delay to be set in case of high latencies
75
+ #AUTOTHROTTLE_MAX_DELAY = 60
76
+ # The average number of requests Scrapy should be sending in parallel to
77
+ # each remote server
78
+ #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
79
+ # Enable showing throttling stats for every response received:
80
+ #AUTOTHROTTLE_DEBUG = False
81
+
82
+ # Enable and configure HTTP caching (disabled by default)
83
+ # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
84
+ #HTTPCACHE_ENABLED = True
85
+ #HTTPCACHE_EXPIRATION_SECS = 0
86
+ #HTTPCACHE_DIR = 'httpcache'
87
+ #HTTPCACHE_IGNORE_HTTP_CODES = []
88
+ #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
89
+
90
+ # Set settings whose default value is deprecated to a future-proof value
91
+ REQUEST_FINGERPRINTER_IMPLEMENTATION = '2.7'
92
+ TWISTED_REACTOR = 'twisted.internet.asyncioreactor.AsyncioSelectorReactor'
scraper/scraper/spiders/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # This package will contain the spiders of your Scrapy project
2
+ #
3
+ # Please refer to the documentation for information on how to create and manage
4
+ # your spiders.
scraper/scraper/spiders/webdatacommons_org.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict
2
+ import pathlib
3
+ import logging
4
+
5
+ import scrapy
6
+ import extruct
7
+ from w3lib.html import get_base_url
8
+
9
+
10
+ class WebDataCommonsSpider(scrapy.spiders.Spider):
11
+ """
12
+ Spider for crawling pages with structured data found in the web-data-commons project.
13
+ http://webdatacommons.org
14
+
15
+ You can later extract the required structured information using
16
+ metadata.*[?"@type"=='FAQPage' || "@type"=='HowTo'][] code in jmespath
17
+ """
18
+
19
+ allowed_domains = ["*"]
20
+ name = "webdatacommons_org"
21
+ custom_settings = {
22
+ "HTTPCACHE_ENABLED": True,
23
+ "CONCURRENT_REQUESTS": 100,
24
+ "CONCURRENT_ITEMS": 100,
25
+ "CONCURRENT_REQUESTS_PER_DOMAIN": 3,
26
+ "AUTOTHROTTLE_ENABLED": False,
27
+ "RETRY_TIMES": 1,
28
+ "DOWNLOAD_DELAY": 0,
29
+ }
30
+
31
+ def parse(self, response):
32
+ base_url: str = get_base_url(response.text, response.url)
33
+ try:
34
+ metadata: Dict = extruct.extract(
35
+ htmlstring=response.text, base_url=base_url, uniform=True
36
+ )
37
+ yield {
38
+ "url": response.url,
39
+ "metadata": metadata,
40
+ "schema_type": response.meta["schema_type"],
41
+ }
42
+ except Exception as e:
43
+ logging.error(f"Error while parsing {response.url}: {e}")
44
+
45
+ def start_requests(self):
46
+ """
47
+ Start crawling from the index file, created with the following command (be careful, this command downloads 1.6TB of data):
48
+ #!/bin/bash
49
+
50
+ # download file containing urls
51
+ curl http://webdatacommons.org/structureddata/2022-12/files/file.list > urls.txt
52
+
53
+ # create output file
54
+ touch output.txt
55
+
56
+ # use parallel command to download/grep in parallel
57
+ cat urls.txt | pv -cN Input | parallel -j 4 "curl -s {} | zcat | grep -e '<http://schema.org/FAQPage>' -e '<http://schema.org/HowTo>'" | pv -cN Output > output.txt
58
+
59
+ """
60
+
61
+ assert self.settings.get(
62
+ "WEB_DATA_COMMONS"
63
+ ), "You should specify WEB_DATA_COMMONS setting as a path to a file or url in order run that spider"
64
+
65
+ index_file = pathlib.Path(self.settings["WEB_DATA_COMMONS"])
66
+
67
+ assert (
68
+ index_file.exists()
69
+ ), f"Local file with index '{index_file}' doesn't exist"
70
+
71
+ with index_file.open("r") as fp:
72
+ for l in map(str.strip, fp):
73
+ _, _, schema_type, url, _ = l.split(" ", 4)
74
+ schema_type = schema_type.strip("<>")
75
+ url = url.strip("<>")
76
+ if "://" not in url:
77
+ url = "https://" + url
78
+
79
+ yield scrapy.Request(
80
+ url, callback=self.parse, meta={"schema_type": schema_type}
81
+ )
scraper/scrapy.cfg ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Automatically created by: scrapy startproject
2
+ #
3
+ # For more information about the [deploy] section see:
4
+ # https://scrapyd.readthedocs.io/en/latest/deploy.html
5
+
6
+ [settings]
7
+ default = scraper.settings
8
+
9
+ [deploy]
10
+ #url = http://localhost:6800/
11
+ project = scraper