From 2278adead1ff16115f8b989dc316bdf9efe9e37d Mon Sep 17 00:00:00 2001 From: Jules Laplace Date: Sat, 3 Nov 2018 18:41:23 +0100 Subject: s2-dump-db-pdf-urls.py --- s2-dump-db-pdf-urls.py | 37 +++++++++++++++++++++++++++++++++++++ s2-dump-pdf-urls.py | 32 -------------------------------- 2 files changed, 37 insertions(+), 32 deletions(-) create mode 100644 s2-dump-db-pdf-urls.py delete mode 100644 s2-dump-pdf-urls.py diff --git a/s2-dump-db-pdf-urls.py b/s2-dump-db-pdf-urls.py new file mode 100644 index 00000000..520b513e --- /dev/null +++ b/s2-dump-db-pdf-urls.py @@ -0,0 +1,37 @@ +import os +import glob +import simplejson as json +import click +from util import * + +PAPER_JSON_DIR = 'datasets/s2/db_papers' + +@click.command() +def s2_dump_pdf_urls(): + # loop over all the papers in db_papers + # get all the PDF urls, pick the best one + # store it and the paper id + # another script will fetch the urls from this process + rows = [process_paper(fn) for fn in glob.iglob('{}/**/paper.json'.format(PAPER_JSON_DIR), recursive=True)] + print("Wrote {} rows".format(len(rows))) + write_csv('db_paper_pdf_list.csv', keys=['Paper ID', 'PDF URL', 'IEEE URL', 'Extra URL'], rows=rows) + +def process_paper(fn, lookups): + paper = read_json(fn) + paper_id = paper['id'] + pdf_url = None + ieee_url = None + extra_url = None + if paper['s2PdfUrl']: + pdf_url = paper['s2PdfUrl'] + for url in paper['pdfUrls']: + if 'ieeexplore.ieee.org' in url: + ieee_url = url + elif pdf_url is None and 'pdf' in url: + pdf_url = url + else: + extra_url = url + return [paper_id, pdf_url, ieee_url, extra_url] + +if __name__ == '__main__': + s2_dump_pdf_urls() diff --git a/s2-dump-pdf-urls.py b/s2-dump-pdf-urls.py deleted file mode 100644 index b833d0fc..00000000 --- a/s2-dump-pdf-urls.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import glob -import simplejson as json -import click -from util import * - -PAPER_JSON_DIR = 'datasets/s2/db_papers' - -@click.command() -def s2_dump_pdf_urls(): - # loop over all the papers in db_papers - # get all the PDF urls, pick the best one - # store it and the paper id - # another script will fetch the urls from this process - lookups = {} - for fn in glob.iglob('{}/**/paper.json'.format(PAPER_JSON_DIR), recursive=True): - process_paper(fn, lookups) - lookups_list = list(lookups.keys()) - print("Wrote {} ids".format(len(id_list))) - write_csv('pdf_list.csv', id_list) - -def process_paper(fn, lookups): - paper = read_json(fn) - paper_id = paper['id'] - pdf_url = None - if paper['s2PdfUrl']: - pdf_url = paper['s2PdfUrl'] - elif len(paper['pdfUrls']): - pdf_url = paper['pdfUrls'][0] - -if __name__ == '__main__': - s2_dump_pdf_urls() -- cgit v1.2.3-70-g09d2