summaryrefslogtreecommitdiff
path: root/scraper/s2-search.py
blob: 77800e329f55e76320face96a3dc08f1e630224e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import os
import sys
import csv
import subprocess
import time
import random
import re
import simplejson as json
import click
from s2 import SemanticScholarAPI
from util import *

'''
s2 search API format:
results
matchedAuthors
matchedPresentations
query
querySuggestions
results
stats
totalPages
totalResults
'''

@click.command()
@click.option('--index', '-n', default=0, help='Index of CSV (query,)')
@click.option('--refresh/--no-refresh', '-f', help='Force it to query the paper API again')
def fetch_entries(index, refresh):
  keys, lines = read_citation_list(index)
  citation_lookup = []
  s2 = SemanticScholarAPI()
  for line in lines:
    key = line[0]
    name = line[1]
    title = line[2].strip()
    clean_title = re.sub(r'[^-0-9a-zA-Z ]+', '', line[2])
    if len(clean_title) < 2:
      continue
    dump_fn = './datasets/s2/dumps/{}.json'.format(key)
    result = None
    if not refresh and os.path.exists(dump_fn):
      results = read_json(dump_fn)
    else:
      results = s2.search(clean_title)
      write_json(dump_fn, results)

    if len(results['results']) == 0:
      print("- {}".format(title))
    else:
      print("+ {}".format(title))
      result = results['results'][0]

    if result:
      paper_id = result['id']
      paper = fetch_paper(s2, paper_id)
      entry_fn = './datasets/s2/entries/{}.json'.format(paper_id)
      write_json(entry_fn, result)
      citation_lookup.append([key, name, title, paper_id])
  write_csv("datasets/citation_lookup.csv", keys=['key', 'name', 'title', 'paper_id'], rows=citation_lookup)

if __name__ == '__main__':
  fetch_entries()