summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--s2-dump-missing-paper-ids.py40
-rw-r--r--s2-raw-papers.py44
-rw-r--r--s2.py7
-rw-r--r--util.py10
4 files changed, 101 insertions, 0 deletions
diff --git a/s2-dump-missing-paper-ids.py b/s2-dump-missing-paper-ids.py
new file mode 100644
index 00000000..72ff1c44
--- /dev/null
+++ b/s2-dump-missing-paper-ids.py
@@ -0,0 +1,40 @@
+import os
+import gzip
+import glob
+import click
+from util import *
+
+DB_PAPER_DIR = './datasets/s2/db_papers'
+RAW_PAPER_DIR = './datasets/s2/raw_papers'
+
+@click.command()
+@click.option('--fn', '-f', default='ids.json', help='List of IDs to extract from the big dataset.')
+def fetch_missing_entries(fn):
+ missing_ids = load_missing_ids(fn)
+ write_csv('./missing.csv', [[id] for id in missing_ids])
+
+def load_missing_ids(fn):
+ lookup = {}
+ missing_lookup = {}
+ ids = read_json(fn)
+ found_count = 0
+ missing_count = 0
+ for paper_id in ids:
+ db_paper_path = make_db_paper_path(paper_id)
+ raw_paper_path = make_raw_paper_path(paper_id)
+ if os.path.exists(db_paper_path) or os.path.exists(raw_paper_path):
+ lookup[paper_id] = True
+ found_count += 1
+ else:
+ missing_lookup[paper_id] = True
+ missing_count += 1
+ print("{} papers found, {} must be fetched".format(found_count, missing_count))
+ return missing_lookup.keys()
+
+def make_db_paper_path(paper_id):
+ return '{}/{}/{}'.format(DB_PAPER_DIR, paper_id[0:2], paper_id)
+def make_raw_paper_path(paper_id):
+ return '{}/{}/{}'.format(RAW_PAPER_DIR, paper_id[0:2], paper_id)
+
+if __name__ == '__main__':
+ fetch_missing_entries()
diff --git a/s2-raw-papers.py b/s2-raw-papers.py
new file mode 100644
index 00000000..86ec8710
--- /dev/null
+++ b/s2-raw-papers.py
@@ -0,0 +1,44 @@
+import os
+import sys
+import csv
+import subprocess
+import time
+import random
+import re
+import json
+import click
+from s2 import SemanticScholarAPI
+from util import *
+
+s2 = SemanticScholarAPI()
+
+@click.command()
+@click.option('--fn', '-i', default='missing.csv', help='Filename of CSV.')
+def fetch_raw_papers(paper_ids_fn):
+ lines = read_csv(paper_ids_fn, keys=False)
+ for line in lines:
+ paper_id = line[0]
+ fetch_raw_paper(paper_id)
+
+def fetch_raw_paper(paper_id):
+ os.makedirs(make_raw_paper_fn(paper_id), exist_ok=True)
+ paper_fn = make_raw_paper_fn(paper_id)
+ if os.path.exists(paper_fn):
+ return read_json(paper_fn)
+ print(paper_id)
+ paper = s2.raw_paper(paper_id)
+ if paper is None:
+ print("Got empty paper?? {}".format(paper_id))
+ time.sleep(random.randint(5, 10))
+ return None
+ write_json(paper_fn, paper)
+ time.sleep(random.randint(5, 10))
+ return paper
+
+def make_raw_paper_path(paper_id):
+ return './datasets/s2/raw_papers/{}/{}'.format(paper_id[0:2], paper_id)
+def make_raw_paper_fn(paper_id):
+ return './datasets/s2/raw_papers/{}/{}/paper.json'.format(paper_id[0:2], paper_id)
+
+if __name__ == '__main__':
+ fetch_raw_papers()
diff --git a/s2.py b/s2.py
index 5ebe507b..c9de75e6 100644
--- a/s2.py
+++ b/s2.py
@@ -115,6 +115,7 @@ class SemanticScholarAPI(object):
AUTHOR_ENDPOINT = "{}/{}".format(BASE_URL, "author")
PAPER_ENDPOINT = "{}/{}".format(BASE_URL, "paper")
SEARCH_ENDPOINT = "https://www.semanticscholar.org/api/1/search"
+ RAW_PAPER_ENDPOINT = "https://www.semanticscholar.org/api/1/paper"
@staticmethod
def paper(paper_id, **kwargs):
@@ -133,6 +134,12 @@ class SemanticScholarAPI(object):
return "http://pdfs.semanticscholar.org/{}/{}.pdf".format(paper_id[:4], paper_id[4:])
@staticmethod
+ def raw_paper(paper_id, **kwargs):
+ url = "{}/{}".format(SemanticScholarAPI.RAW_PAPER_ENDPOINT, paper_id)
+ resp = requests.get(url, params=kwargs)
+ return None if resp.status_code != 200 else resp.json() # Paper(**resp.json())
+
+ @staticmethod
def search(q):
resp = requests.post(SemanticScholarAPI.SEARCH_ENDPOINT, json={
'authors': [],
diff --git a/util.py b/util.py
index dae3f67c..9f321465 100644
--- a/util.py
+++ b/util.py
@@ -14,6 +14,16 @@ def read_citation_list(index=0):
lines = lines[1:]
return keys, lines
+def read_csv(fn, keys=True):
+ with open(fn, 'r') as f:
+ reader = csv.reader(f)
+ lines = list(reader)
+ if keys:
+ keys = lines[0]
+ lines = lines[1:]
+ return keys, lines
+ return lines
+
def read_json(fn):
with open(fn, 'r') as json_file:
return json.load(json_file)