diff options
| author | Jules Laplace <julescarbon@gmail.com> | 2018-11-03 17:22:47 +0100 |
|---|---|---|
| committer | Jules Laplace <julescarbon@gmail.com> | 2018-11-03 17:22:47 +0100 |
| commit | 17b049ccc6e5c3a3b5dd7e4dd787bfe984e55fd6 (patch) | |
| tree | d84fbf608f2dd6836f59e649d4fb6e90f4960a92 | |
| parent | 8ec3acdb68f0107ae2520516817aeda66c6ed9a6 (diff) | |
dump pdf urls
| -rw-r--r-- | s2-dump-pdf-urls.py | 37 |
1 files changed, 37 insertions, 0 deletions
diff --git a/s2-dump-pdf-urls.py b/s2-dump-pdf-urls.py new file mode 100644 index 00000000..111ed830 --- /dev/null +++ b/s2-dump-pdf-urls.py @@ -0,0 +1,37 @@ +import os +import glob +import simplejson as json +import click +from util import * + +PAPER_JSON_DIR = 'datasets/s2/db_papers' + +@click.command() +def s2_dump_pdf_urls(): + # loop over all the papers in db_papers + # get all the PDF urls, pick the best one + # store it and the paper id + # another script will fetch the urls from this process + ids = {} + for fn in glob.iglob('{}/**/paper.json'.format(PAPER_JSON_DIR), recursive=True): + process_paper(fn, ids) + id_list = list(ids.keys()) + print("Wrote {} ids".format(len(id_list))) + write_csv('pdf_list.csv', id_list) + + for line in lines: + label = line[0] + title = re.sub(r'[^-0-9a-zA-Z ]+', '', line[1]) + entry_fn = './datasets/s2/entries/{}.json'.format(title) + if not os.path.exists(entry_fn): + results = s2.search(title) + write_json(dump_fn, results) + if len(results['results']) == 0: + print("No results for {}".format(title)) + else: + print(title) + write_json(entry_fn, results['results'][0]) + time.sleep(random.randint(10, 20)) + +if __name__ == '__main__': + s2_dump_pdf_urls() |
