import os import re import glob import simplejson as json import math import operator import click import subprocess from util import * DIR_PUBLIC_CITATIONS = "../site/datasets/final" DIR_UNKNOWN_CITATIONS = "../site/datasets/unknown" addresses = AddressBook() paper_location_lookup = fetch_google_lookup('paper_locations', item_key='paper_id') @click.command() def s2_final_report(): megapixels = load_megapixels_lookup() items = [] for key, item in megapixels.items(): if 'ft_share' in item['dataset'] and item['dataset']['ft_share'] == 'Y': items.append((item,)) parallelize(process_paper, items) def process_paper(row): aggregate_citations = {} unknown_citations = {} address = None address_list = [] papers = [] # print(row['paper_ids']) for paper_id in row['paper_ids']: res = process_single_paper(row, paper_id, addresses, aggregate_citations, unknown_citations) if res: papers.append(res) if res['address']: address_list.append(res['address']) if not len(papers): return with open('{}/{}.json'.format(DIR_PUBLIC_CITATIONS, row['key']), 'w') as f: json.dump({ 'id': papers[0]['paper_id'], 'dataset': row['dataset'], 'paper': papers[0], 'addresses': address_list, 'additional_papers': papers[1:], 'citations': [aggregate_citations[key] for key in aggregate_citations.keys()], }, f) with open('{}/{}.json'.format(DIR_UNKNOWN_CITATIONS, row['key']), 'w') as f: json.dump({ 'id': papers[0]['paper_id'], 'citations': [unknown_citations[key] for key in unknown_citations.keys()], }, f) def process_single_paper(row, paper_id, addresses, aggregate_citations, unknown_citations): res = { 'paper_id': '', 'key': '', 'title': '', # 'journal': '', 'year': '', 'pdf': '', 'address': '', # 'citation_count': 0, # 'citations_geocoded': 0, # 'citations_unknown': 0, # 'citations_empty': 0, # 'citations_pdf': 0, # 'citations_doi': 0, } fn = file_path('papers', paper_id, 'paper.json') with open(fn, 'r') as f: data = json.load(f) print('>> {} {}'.format(data['paperId'], row['key'])) paper = load_paper(data['paperId']) if paper is None: print("Paper missing! {}".format(data['paperId'])) return res['key'] = row['key'] res['name'] = row['name'] res['paper_id'] = paper.paper_id res['title'] = paper.title # res['journal'] = paper.journal res['year'] = paper.year res['pdf'] = paper.pdf_links() res['doi'] = paper.doi_links() # res['authors'] = ', '.join(paper.authors) # res['citations'] = [] paper_institutions = load_institutions(paper.paper_id, paper_location_lookup) paper_address = None for inst in sorted(paper_institutions, key=operator.itemgetter(1)): #print(inst[1]) institution = inst[1] if paper_address is None: paper_address = addresses.findObject(institution) if paper_address: # print(paper_address) res['address'] = paper_address for cite in data['citations']: citationId = cite['paperId'] if citationId in aggregate_citations: continue elif citationId in unknown_citations: continue seen_here = {} citation = load_paper(citationId) has_pdf = os.path.exists(file_path('pdf', citationId, 'paper.txt')) has_doi = os.path.exists(file_path('doi', citationId, 'paper.doi')) # if has_pdf: # pdf_count += 1 # if has_doi: # doi_count += 1 if citation is None or citation.data is None: print("Citation missing! {}".format(cite['paperId'])) continue institutions = load_institutions(citationId, paper_location_lookup) geocoded_addresses = [] geocoded_institutions = [] institution = '' address = None for inst in sorted(institutions, key=operator.itemgetter(1)): # address_count += 1 institution = inst[1] next_address = addresses.findObject(institution) if next_address and next_address['name'] not in seen_here: seen_here[next_address['name']] = True address = next_address geocoded_addresses.append(next_address) if not address: if has_pdf: headings, found_abstract = read_headings(file_path('pdf', citationId, 'paper.txt'), citation) heading_string = '\n'.join(headings[0:20]) found_addresses = [] if len(headings): for heading in headings: possible_address = heading.lower().strip() if possible_address: next_address = addresses.findObject(possible_address) if next_address and next_address['name'] not in seen_here: seen_here[next_address['name']] = True address = next_address geocoded_addresses.append(next_address) if address: aggregate_citations[citationId] = { 'id': citationId, 'title': citation.title, 'addresses': geocoded_addresses, 'year': citation.year, 'pdf': citation.pdf_links(), } else: unknown_citations[citationId] = { 'id': citationId, 'title': citation.title, 'year': citation.year, 'pdf': citation.pdf_links(), } return res def load_megapixels_lookup(): keys, rows = fetch_google_sheet('citation_lookup') dataset_lookup = fetch_google_lookup('datasets') lookup = {} for row in rows: rec = {} for index, key in enumerate(keys): rec[key] = row[index] if rec['paper_id'] == "" or (rec['verified'] != 1 and rec['verified'] != '1'): continue paper_key = rec['key'] if paper_key not in lookup: rec['paper_ids'] = [] lookup[paper_key] = rec lookup[paper_key]['paper_ids'].append(rec['paper_id']) if paper_key in dataset_lookup: lookup[paper_key]['dataset'] = dataset_lookup[paper_key] else: print("not in datasets lookup:", paper_key) lookup[paper_key]['dataset'] = {} return lookup if __name__ == '__main__': s2_final_report()