summaryrefslogtreecommitdiff
path: root/scraper/s2-search.py
diff options
context:
space:
mode:
authoradamhrv <adam@ahprojects.com>2018-12-15 19:57:49 +0100
committeradamhrv <adam@ahprojects.com>2018-12-15 19:57:49 +0100
commit82b2c0b5d6d7baccbe4d574d96e18fe2078047d7 (patch)
treea8784b7ec2bc5a0451c252f66a6b786f3a2504f5 /scraper/s2-search.py
parent8e978af21c2b29f678a09701afb3ec7d65d0a6ab (diff)
parentc5b02ffab8d388e8a2925e51736b902a48a95e71 (diff)
Merge branch 'master' of github.com:adamhrv/megapixels_dev
Diffstat (limited to 'scraper/s2-search.py')
-rw-r--r--scraper/s2-search.py78
1 files changed, 78 insertions, 0 deletions
diff --git a/scraper/s2-search.py b/scraper/s2-search.py
new file mode 100644
index 00000000..e943053a
--- /dev/null
+++ b/scraper/s2-search.py
@@ -0,0 +1,78 @@
+import os
+import sys
+import csv
+import subprocess
+import time
+import random
+import re
+import simplejson as json
+import click
+from s2 import SemanticScholarAPI
+from util import *
+
+'''
+s2 search API format:
+results
+matchedAuthors
+matchedPresentations
+query
+querySuggestions
+results
+stats
+totalPages
+totalResults
+'''
+
+@click.command()
+@click.option('--index', '-n', default=0, help='Index of CSV (query,)')
+def fetch_entries(index):
+ keys, lines = read_citation_list(index)
+ citation_lookup = []
+ s2 = SemanticScholarAPI()
+ for line in lines:
+ key = line[0]
+ name = line[1]
+ title = line[2].strip()
+ clean_title = re.sub(r'[^-0-9a-zA-Z ]+', '', line[2])
+ if len(clean_title) < 2:
+ continue
+ dump_fn = './datasets/s2/dumps/{}.json'.format(key)
+ entry_fn = './datasets/s2/entries/{}.json'.format(key)
+ result = None
+ if os.path.exists(entry_fn):
+ result = read_json(entry_fn)
+ else:
+ results = s2.search(clean_title)
+ write_json(dump_fn, results)
+ if len(results['results']) == 0:
+ print("- {}".format(title))
+ else:
+ print("+ {}".format(title))
+ result = results['results'][0]
+ write_json(entry_fn, result)
+ if result:
+ paper_id = result['id']
+ paper = fetch_paper(s2, paper_id)
+ citation_lookup.append([key, name, title, paper_id])
+ write_csv("datasets/citation_lookup.csv", keys=['key', 'name', 'title', 'paper_id'], rows=citation_lookup)
+
+def fetch_paper(s2, paper_id):
+ os.makedirs('./datasets/s2/papers/{}/{}'.format(paper_id[0:2], paper_id), exist_ok=True)
+ paper_fn = './datasets/s2/papers/{}/{}/paper.json'.format(paper_id[0:2], paper_id)
+ if os.path.exists(paper_fn):
+ return read_json(paper_fn)
+ print(paper_id)
+ paper = s2.paper(paper_id)
+ if paper is None:
+ print("Got none paper??")
+ # time.sleep(random.randint(1, 2))
+ paper = s2.paper(paper_id)
+ if paper is None:
+ print("Paper not found")
+ return None
+ write_json(paper_fn, paper)
+ # time.sleep(random.randint(1, 2))
+ return paper
+
+if __name__ == '__main__':
+ fetch_entries()