File size: 1,333 Bytes
e472b09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
#!/usr/bin/env python
# coding: utf-8

# If arxiv is not installed:
# 
# ```bash
# pip install arxiv
# ```

# In[1]:


import json
import arxiv
import logging
from tqdm import tqdm

def get_info(result):
    d = {'title': result.title, 'published': str(result.published.date()), 
         'authors': [a.name for a in result.authors],
         'summary': result.summary, 'link': result.entry_id}
    return d


# In[2]:


num_papers = 50000
# logging.basicConfig(level=logging.INFO)  # if you have more details about process

client = arxiv.Client(
  page_size = 10000,
  delay_seconds = 60,
  num_retries = 5
)

search = arxiv.Search(
  query = "Generative modeling",
  max_results = num_papers,
  sort_by= arxiv.SortCriterion.Relevance,
  sort_order=arxiv.SortOrder.Descending
)


# In[3]:


data = []
while len(data) < num_papers:
    for result in tqdm(client.results(search), total=num_papers):
        data.append(get_info(result))
        if len(data) >= num_papers:
            break
#     except arxiv.UnexpectedEmptyPageError:
#         print(f'Exception on {len(data)} records')


# In[4]:


print(f'finally: {len(data)} entries')


# In[5]:


with open('arxiv_sample.json', 'w') as fp:
    json.dump(data, fp)


# Pack and check results
# 
# ```bash
# tar -zcvf arxiv_sample.tar.gz arxiv_sample.json
# ls -lh arxiv*
# ```