summaryrefslogtreecommitdiff
path: root/s2-papers.py
blob: 7320c095f278d7b1971a261889ec9170bba64ff4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import os
import sys
import csv
import subprocess
import time
import random
import re
import json
import click
from s2 import SemanticScholarAPI

'''
s2 search API format:
results
matchedAuthors
matchedPresentations
query
querySuggestions
results
stats
totalPages
totalResults
'''

s2 = SemanticScholarAPI()

@click.command()
@click.option('--index', '-n', default=0, help='Index of CSV.')
@click.option('--depth', '-d', default=1, help='Depth to recurse.')
def fetch_papers(index, depth):
  keys, lines = read_citation_list(index)
  for line in lines:
    label = line[0]
    title = re.sub(r'[^-0-9a-zA-Z ]+', '', line[1])
    entry_fn = './datasets/s2/entries/{}.json'.format(title)
    if not os.path.exists(entry_fn):
      print('not found: {}'.format(entry_fn))
      continue
    result = read_json(entry_fn)
    paper_id = result['id']
    paper = fetch_paper(paper_id)
    # get all of the paper's citations

def fetch_paper(paper_id):
  os.makedirs('./datasets/s2/papers/{}/{}'.format(paper_id[0:2], paper_id), exist_ok=True)
  paper_fn = './datasets/s2/papers/{}/{}/paper.json'.format(paper_id[0:2], paper_id)
  if os.path.exists(paper_fn):
    return read_json(paper_fn)
  print(paper_id)
  paper = s2.paper(paper_id)
  if paper is None:
    print("Got none paper??")
    time.sleep(random.randint(20, 30))
    paper = s2.paper(paper_id)
    if paper is None:
      print("Paper not found")
      return None  
  write_json(paper_fn, paper)
  time.sleep(random.randint(5, 10))
  return paper

def read_citation_list(index=0):
  filename = './datasets/citations.csv'
  if index > 0:
    fn, ext = os.path.splitext(filename)
    filename = fn + '-' + str(index) + ext
  with open(filename, 'r') as f:
    reader = csv.reader(f)
    lines = list(reader)
    keys = lines[0]
    lines = lines[1:]
    return keys, lines

def read_json(fn):
  with open(fn, 'r') as json_file:
    return json.load(json_file)
def write_json(fn, data):
  with open(fn, 'w') as outfile:
    json.dump(data, outfile)
def write_csv(fn, keys, rows):
  with open(fn, 'w') as f:
    writer = csv.writer(f)
    if keys is not None:
      writer.writerow(keys)
    for row in rows:
      writer.writerow(row)

if __name__ == '__main__':
  fetch_papers()