")
if keys is not None:
for key in keys:
f.write("
{}
".format(key))
for row in rows:
if row is None:
return
count += 1
f.write("
")
for cell in row:
if isinstance(cell, list) or isinstance(cell, tuple):
f.write("
{}
".format(' '.join(str(x) for x in cell)))
else:
f.write("
{}
".format(cell))
f.write("
")
f.write("
")
return count
def paper_path(key='papers', paper_id=''):
return '{}/{}/{}/{}/paper.json'.format('./datasets/s2', key, paper_id[0:2], paper_id)
class DbPaper(object):
def __init__(self, paper_id):
self.paper_id = paper_id
self.data = read_json(paper_path('db_papers', paper_id))
@property
def title(self):
return self.data['title']
@property
def journal(self):
return self.data['journalName']
@property
def year(self):
return self.data['year'] if 'year' in self.data else ''
@property
def authors(self):
return [ (author['ids'][0] if len(author['ids']) else '', author['name']) for author in self.data['authors'] ]
@property
def pdf_link(self):
link = None
if self.data['s2PdfUrl']:
link = self.data['s2PdfUrl']
elif len(self.data['pdfUrls']):
link = self.data['pdfUrls'][0]
if link is None:
return None
if type(link) == dict and 'url' in link:
return link['url']
return link
def record(self):
return [ self.paper_id, self.title, self.journal, self.year ]
class RawPaper(object):
def __init__(self, paper_id):
self.paper_id = paper_id
data = read_json(paper_path('raw_papers', paper_id))
# {'responseType': 'CANONICAL', 'canonicalId': '02ccd5f0eb9a48a6af088197b950fb30a8e3abcc', 'canonicalSlug': 'Scaling-for-Multimodal-3-D-Object-Detection-Stanford'}
if 'responseType' in data and data['responseType'] == 'CANONICAL':
canonical_id = data['canonicalId']
canonical_path = paper_path('raw_papers', canonical_id)
if os.path.exists(canonical_path):
data = read_json(canonical_path)
else:
# print('fetching canonical paper {}'.format(canonical_id))
# os.makedirs(paper_path('raw_papers', canonical_id).replace('paper.json', ''), exist_ok=True)
# data = s2.raw_paper(canonical_id)
data = None
if data is None:
# print("Got empty canonical paper?? {}".format(canonical_id))
self.data = None
return None
#else:
#write_json(canonical_path, data)
# print(data)
if 'paper' not in data:
print(data)
self.data = None
return None
self.data = data['paper']
@property
def title(self):
return self.data['title']['text']
@property
def year(self):
return self.data['year']['text']
@property
def journal(self):
if 'journal' in self.data and 'name' in self.data['journal']:
return self.data['journal']['name']
else:
return 'Unknown'
@property
def authors(self):
return [ (author[0]['ids'][0] if len(author[0]['ids']) else '', author[0]['name']) for author in self.data['authors'] ]
def paper_links(self):
if self.data is None:
return []
def url_part(link):
if type(link) == dict and 'url' in link:
return link['url']
return link
paper_links = []
if 'primaryPaperLink' in self.data:
paper_links.append(url_part(self.data['primaryPaperLink']))
if 'alternatePaperLinks' in self.data:
for link in self.data['alternatePaperLinks']:
paper_links.append(url_part(link))
return paper_links
def pdf_links(self):
return [ link for link in self.paper_links() if 'pdf' in link ]
def doi_links(self):
return [ link for link in self.paper_links() if 'pdf' not in link ]
@property
def pdf_link(self):
links = self.pdf_links()
return links[0] if len(links) else None
def record(self):
return [ self.paper_id, self.title, self.journal, self.year ]
def load_paper(paper_id):
# no longer using DB papers :p
# if os.path.exists(paper_path('db_papers', paper_id))
# print('db paper')
# return DbPaper(paper_id)
if os.path.exists(paper_path('raw_papers', paper_id)):
# print('raw paper')
return RawPaper(paper_id)
print('no raw paper: {}'.format(paper_id))
return None
def dedupe(a):
p = {}
for s in a:
p[s] = None
ss = sorted(p.keys())
return ss
def read_headings(fn, paper):
headings = []
found_abstract = False
found_authors = []
journal = paper.journal.lower()
authors = [ (a[0], a[1], a[1].lower(),) for a in paper.authors ]
with open(fn, 'r') as f:
for line in f.readlines():
line = re.sub(r"\S*@\S*\s?", '', line)
l = line.lower().strip()
if len(l) < 5:
continue
if line[0] == 'a' or line[0] == 'b' or line[0] == 'c' or line[0] == '1' or line[0] == '2' or line[0] == '3' or line[0] == '4':
line = line[1:]
line = line.strip("∗†‡")
line = line.replace("fl", "fl").replace('ff', 'ff').replace('ffi', 'ffi').replace('ffl', 'ffl')
line = line.strip()
if 'abstract' in l:
found_abstract = True
break
if journal and journal in l:
continue
names = [s.strip() for s in re.split(',| and ', l)]
was_found = False
for name in names:
found = find_authors(authors, name)
if found:
was_found = True
# print("found {}".format(found[1]))
if found[0]:
found_authors.append(found)
continue
headings.append(line.strip())
return headings, found_abstract
def find_authors(authors, line):
for a in authors:
if a[2] in line:
return a
return None
class AddressBook (object):
def __init__(self):
entities = {}
lookup = {}
keys, data = fetch_google_sheet('institutions')
# keys, data = read_csv('reports/pdf_institutions_deduped.csv', keys=True)
for index, line in enumerate(data):
if line[0] == line[1] or line[0] not in entities:
entities[line[0]] = index
name = line[1].lower().strip()
if name not in lookup:
lookup[name] = line[0]
self.data = data
self.lookup = lookup
self.entities = entities
def find(self, address):
address = address.lower().strip().strip(string.digits)
if address in self.lookup:
entity = self.lookup[address]
index = self.entities[entity]
return self.data[index]
for part in address.split(','):
part = part.strip().replace(' ', ' ')
if part in self.lookup:
entity = self.lookup[part]
index = self.entities[entity]
return self.data[index]
return None
def findObject(self, address):
row = self.find(address)
if row is not None:
return {
'name': row[0],
'source_name': row[1],
'street_adddress': row[2],
'lat': row[3],
'lng': row[4],
'type': row[5],
'country': row[7],
}
return None
def load_institutions(paperId, paper_location_lookup={}):
if paperId in paper_location_lookup:
items = paper_location_lookup[paperId]
result = []
if items['validate_1'] == 'TRUE':
result.append(['', items['institution_1'], '', ''])
if items['validate_2'] == 'TRUE':
result.append(['', items['institution_2'], '', ''])
if items['validate_3'] == 'TRUE':
result.append(['', items['institution_3'], '', ''])
if items['validate_4'] == 'TRUE':
result.append(['', items['institution_4'], '', ''])
# if items['validate_5'] == 'TRUE':
# result.append(['', items['institution_5'], '', ''])
# if items['validate_6'] == 'TRUE':
# result.append(['', items['institution_6'], '', ''])
# if items['validate_7'] == 'TRUE':
# result.append(['', items['institution_7'], '', ''])
# if items['validate_8'] == 'TRUE':
# result.append(['', items['institution_8'], '', ''])
return result
if os.path.exists(file_path('pdf', paperId, 'institutions.json')):
return read_json(file_path('pdf', paperId, 'institutions.json'))['institutions']
elif os.path.exists(file_path('doi', paperId, 'institutions.json')):
return read_json(file_path('doi', paperId, 'institutions.json'))['institutions']
else:
return []
def data_path(key, paper_id):
return 'datasets/s2/{}/{}/{}'.format(key, paper_id[0:2], paper_id)
def file_path(key, paper_id, fn):
return os.path.join(data_path(key, paper_id), fn)
def parallelize(func, rows):
print("Processing {} items".format(len(rows)))
if hasattr(os, 'sched_getaffinity'):
processCount = len(os.sched_getaffinity(0))
else:
processCount = 4
chunksize = 3
with Pool(processes=processCount) as pool:
pool.starmap(func, rows, chunksize)
def fetch_paper(s2, paper_id, freshen=False):
os.makedirs('./datasets/s2/papers/{}/{}'.format(paper_id[0:2], paper_id), exist_ok=True)
paper_fn = './datasets/s2/papers/{}/{}/paper.json'.format(paper_id[0:2], paper_id)
if os.path.exists(paper_fn) and not freshen:
return read_json(paper_fn)
print(paper_id)
paper = s2.paper(paper_id)
if paper is None:
print("Paper not found: {}".format(paper_id))
# time.sleep(random.randint(1, 2))
paper = s2.paper(paper_id)
if paper is None:
# print("Paper not found")
return None
write_json(paper_fn, paper)
# time.sleep(random.randint(1, 2))
return paper
def fetch_spreadsheet():
"""Open the Google Spreadsheet, which contains the individual worksheets"""
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
path = os.path.dirname(os.path.abspath(__file__))
credentials = ServiceAccountCredentials.from_json_keyfile_name(os.path.join(path, '.creds/Megapixels-ef28f91112a9.json'), scope)
docid = "1denb7TjYsN9igHyvYah7fQ0daABW32Z30lwV7QrDJQc"
client = gspread.authorize(credentials)
spreadsheet = client.open_by_key(docid)
return spreadsheet
def fetch_worksheet(name="institutions"):
"""Get a reference to a particular "worksheet" from the Google Spreadsheet"""
spreadsheet = fetch_spreadsheet()
return spreadsheet.worksheet(name)
def fetch_google_sheet(name="institutions"):
"""Get all the values from a particular worksheet as a list of lists.
Returns:
:keys - the first row of the document
:lines - a list of lists with the rest of the rows"""
rows = fetch_worksheet(name).get_all_values()
keys = rows[0]
lines = rows[1:]
return keys, lines
def fetch_google_sheet_objects(name):
"""Get all the values from a worksheet as a list of dictionaries"""
keys, rows = fetch_google_sheet(name)
recs = []
for row in rows:
rec = {}
for index, key in enumerate(keys):
rec[key] = row[index]
recs.append(rec)
return recs
def fetch_google_lookup(name, item_key='key'):
"""Get all the values from a worksheet as a dictionary of dictionaries.
Specify which field you want to use as the dictionary key."""
keys, rows = fetch_google_sheet(name)
lookup = {}
for row in rows:
rec = {}
for index, key in enumerate(keys):
rec[key] = row[index]
lookup[rec[item_key]] = rec
return lookup
def load_countries():
countries = read_json('countries.json')
lookup = {}
for country in countries:
name = country['name']
lookup[name] = name
if 'alt' in country:
for alt_name in country['alt']:
lookup[alt_name] = name
return lookup