diff options
| author | Jules Laplace <julescarbon@gmail.com> | 2019-03-08 21:02:58 +0100 |
|---|---|---|
| committer | Jules Laplace <julescarbon@gmail.com> | 2019-03-08 21:02:58 +0100 |
| commit | 8f0d59a5f44c71aeb4eecf60cb323d2fe0306a3e (patch) | |
| tree | 397f8b4b738217fd87f3460b44e06c1fb21d03ac /scraper | |
| parent | 1b086936a927aed44e505b12239c78fefa1e058c (diff) | |
possibly freshen raw papers
Diffstat (limited to 'scraper')
| -rw-r--r-- | scraper/s2-geocode-spreadsheet.py | 4 | ||||
| -rw-r--r-- | scraper/s2-papers.py | 5 | ||||
| -rw-r--r-- | scraper/util.py | 4 |
3 files changed, 9 insertions, 4 deletions
diff --git a/scraper/s2-geocode-spreadsheet.py b/scraper/s2-geocode-spreadsheet.py index 32d7c669..c36625a6 100644 --- a/scraper/s2-geocode-spreadsheet.py +++ b/scraper/s2-geocode-spreadsheet.py @@ -110,6 +110,8 @@ def update_country_from_address(address, i, countries, worksheet): country = None if possible_country in countries: country = countries[possible_country] + elif "CHINA" in address: + country = "China" elif "China" in address: country = "China" elif "Hong Kong" in address: @@ -118,6 +120,8 @@ def update_country_from_address(address, i, countries, worksheet): country = "Singapore" elif "Taiwan" in address: country = "Taiwan" + elif "Saudi Arabia" in address: + country = "Saudi Arabia" elif "Russia" in address: country = "Russia" elif "Ukraine" in address: diff --git a/scraper/s2-papers.py b/scraper/s2-papers.py index 782dc198..40489e75 100644 --- a/scraper/s2-papers.py +++ b/scraper/s2-papers.py @@ -13,7 +13,8 @@ from util import * s2 = SemanticScholarAPI() @click.command() -def fetch_papers(): +@click.option('--freshen/--no-freshen', '-f', help='Force it to query the paper API again') +def fetch_papers(freshen): addresses = AddressBook() lookup_keys, lines = fetch_google_sheet('citation_lookup') report_keys = [ @@ -30,7 +31,7 @@ def fetch_papers(): paper_id = line[3] if paper_id == '': continue - paper = fetch_paper(s2, paper_id) + paper = fetch_paper(s2, paper_id, freshen) if paper is None: continue db_paper = load_paper(paper_id) diff --git a/scraper/util.py b/scraper/util.py index fdbc0534..830dbe8b 100644 --- a/scraper/util.py +++ b/scraper/util.py @@ -386,10 +386,10 @@ def parallelize(func, rows): with Pool(processes=processCount) as pool: pool.starmap(func, rows, chunksize) -def fetch_paper(s2, paper_id): +def fetch_paper(s2, paper_id, freshen=False): os.makedirs('./datasets/s2/papers/{}/{}'.format(paper_id[0:2], paper_id), exist_ok=True) paper_fn = './datasets/s2/papers/{}/{}/paper.json'.format(paper_id[0:2], paper_id) - if os.path.exists(paper_fn): + if os.path.exists(paper_fn) and not freshen: return read_json(paper_fn) print(paper_id) paper = s2.paper(paper_id) |
