diff options
| author | Jules Laplace <julescarbon@gmail.com> | 2018-11-25 22:19:15 +0100 |
|---|---|---|
| committer | Jules Laplace <julescarbon@gmail.com> | 2018-11-25 22:19:15 +0100 |
| commit | ee3d0d98e19f1d8177d85af1866fd0ee431fe9ea (patch) | |
| tree | 41372528e78d4328bc2a47bbbabac7e809c58894 /scraper/s2-fetch-doi.py | |
| parent | 255b8178af1e25a71fd23703d30c0d1f74911f47 (diff) | |
moving stuff
Diffstat (limited to 'scraper/s2-fetch-doi.py')
| -rw-r--r-- | scraper/s2-fetch-doi.py | 69 |
1 files changed, 69 insertions, 0 deletions
diff --git a/scraper/s2-fetch-doi.py b/scraper/s2-fetch-doi.py new file mode 100644 index 00000000..f78989f7 --- /dev/null +++ b/scraper/s2-fetch-doi.py @@ -0,0 +1,69 @@ +import os +import sys +import csv +import subprocess +import time +import random +import re +import json +import click +from urllib.parse import urlparse +from s2 import SemanticScholarAPI +from util import * + +s2 = SemanticScholarAPI() + +@click.command() +@click.option('--fn', '-i', default='db_paper_doi.csv', help='Filename of CSV (id, url,)') +def fetch_doi_list(fn): + lines = read_csv(fn, keys=False) + domains = [] + for line in lines: + paper_id, url = line + if url: + domain = fetch_doi(paper_id, url) + print(domain) + print("{} papers processed".format(len(lines))) + +def fetch_doi(paper_id, url, replace=False): + os.makedirs(make_doi_path(paper_id), exist_ok=True) + doi_fn = make_doi_fn(paper_id) + url_fn = make_url_fn(paper_id) + txt_fn = make_txt_fn(paper_id) + if replace and os.path.exists(doi_fn): + os.rename(doi_fn, old_doi_fn(paper_id)) + os.rename(url_fn, old_url_fn(paper_id)) + if os.path.exists(doi_fn) or os.path.exists(txt_fn): + # return read_json(doi_fn) + return None, None + size, final_url = s2.fetch_doi(url, doi_fn) + if size is None: + print("{} empty?".format(paper_id)) + time.sleep(random.randint(2, 5)) + return None, None + print("{} {} kb".format(paper_id, int(size / 1024))) + domain = urlparse(final_url).netloc + write_json(url_fn, { + 'paper_id': paper_id, + 'domain': domain + }) + time.sleep(random.randint(2, 5)) + return domain + # return paper + +def make_doi_path(paper_id): + return './datasets/s2/doi/{}/{}'.format(paper_id[0:2], paper_id) +def make_doi_fn(paper_id): + return './datasets/s2/doi/{}/{}/paper.doi'.format(paper_id[0:2], paper_id) +def make_url_fn(paper_id): + return './datasets/s2/doi/{}/{}/paper.url'.format(paper_id[0:2], paper_id) +def make_txt_fn(paper_id): + return './datasets/s2/pdf/{}/{}/paper.txt'.format(paper_id[0:2], paper_id) + +def old_doi_fn(paper_id): + return './datasets/s2/doi/{}/{}/paper.doi2'.format(paper_id[0:2], paper_id) +def old_url_fn(paper_id): + return './datasets/s2/doi/{}/{}/paper.url2'.format(paper_id[0:2], paper_id) + +if __name__ == '__main__': + fetch_doi_list() |
