import re
import os
import csv
import string
import codecs
import gspread
from multiprocessing import Pool
import simplejson as json
from oauth2client.service_account import ServiceAccountCredentials
def read_citation_list(index=0):
filename = './datasets/citations.csv'
if index > 0:
fn, ext = os.path.splitext(filename)
filename = fn + '-' + str(index) + ext
with open(filename, 'r') as f:
reader = csv.reader(f)
lines = list(reader)
keys = lines[0]
lines = lines[1:]
return keys, lines
def unfussy_reader(reader):
while True:
try:
yield next(reader)
except StopIteration:
return
except csv.Error:
print(csv.Error)
# log the problem or whatever
continue
def read_csv(fn, keys=True, create=False):
try:
with open(fn, 'r', newline='', encoding='utf-8') as f:
# reader = csv.reader( (line.replace('\0','') for line in f) )
reader = csv.reader(f)
lines = list(unfussy_reader(reader))
if keys:
keys = lines[0]
lines = lines[1:]
return keys, lines
return lines
except:
if create:
return []
raise
def csv_writer(fn):
with open(fn, 'w', newline='', encoding='utf-8') as f:
return csv.writer(f)
def write_csv(fn, keys, rows):
with open(fn, 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
if keys is not None:
writer.writerow(keys)
for row in rows:
writer.writerow(row)
def read_text(fn):
with open(fn, 'r') as f:
return f.read()
def read_json(fn):
with open(fn, 'r') as json_file:
return json.load(json_file)
def write_json(fn, data):
with open(fn, 'w') as outfile:
json.dump(data, outfile)
def write_report(fn, title=None, keys=None, rows=[]):
with open(fn, 'w') as f:
f.write("")
f.write("")
f.write("
")
f.write("")
if title is not None:
f.write("{}".format(title))
f.write("")
f.write("")
f.write("")
if title is not None:
f.write("{}
".format(title))
count = write_table(f, keys=keys, rows=rows)
f.write("")
f.write("")
print("{} {}".format(fn, count))
def percent(m, n):
if n == 0:
return 100
return round(m / n * 100)
class NameLine(object):
def __init__(self, s):
self.s = s.strip()
def __str__(self):
return '' + self.s + ''
class BoldLine(object):
def __init__(self, s):
self.s = s.strip()
def __str__(self):
return '' + self.s + ''
class LinkLine(object):
def __init__(self, href, txt):
self.href = href
self.txt = txt.strip()
def __str__(self):
if self.href:
return '{}'.format(self.href, self.txt)
else:
return '{}'.format(self.txt)
def write_table(f, keys, rows):
count = 0
f.write("")
if keys is not None:
for key in keys:
f.write("| {} | ".format(key))
for row in rows:
if row is None:
return
count += 1
f.write("")
for cell in row:
if isinstance(cell, list) or isinstance(cell, tuple):
f.write("| {} | ".format('
'.join(str(x) for x in cell)))
else:
f.write("{} | ".format(cell))
f.write("
")
f.write("
")
return count
def paper_path(key='papers', paper_id=''):
return '{}/{}/{}/{}/paper.json'.format('./datasets/s2', key, paper_id[0:2], paper_id)
class DbPaper(object):
def __init__(self, paper_id):
self.paper_id = paper_id
self.data = read_json(paper_path('db_papers', paper_id))
@property
def title(self):
return self.data['title']
@property
def journal(self):
return self.data['journalName']
@property
def year(self):
return self.data['year'] if 'year' in self.data else ''
@property
def authors(self):
return [ (author['ids'][0] if len(author['ids']) else '', author['name']) for author in self.data['authors'] ]
@property
def pdf_link(self):
link = None
if self.data['s2PdfUrl']:
link = self.data['s2PdfUrl']
elif len(self.data['pdfUrls']):
link = self.data['pdfUrls'][0]
if link is None:
return None
if type(link) == dict and 'url' in link:
return link['url']
return link
def record(self):
return [ self.paper_id, self.title, self.journal, self.year ]
class RawPaper(object):
def __init__(self, paper_id):
self.paper_id = paper_id
data = read_json(paper_path('raw_papers', paper_id))
# print(data)
if 'paper' not in data:
print(data)
self.data = None
return None
self.data = data['paper']
@property
def title(self):
return self.data['title']['text']
@property
def year(self):
return self.data['year']['text']
@property
def journal(self):
if 'journal' in self.data and 'name' in self.data['journal']:
return self.data['journal']['name']
else:
return 'Unknown'
@property
def authors(self):
return [ (author[0]['ids'][0] if len(author[0]['ids']) else '', author[0]['name']) for author in self.data['authors'] ]
@property
def pdf_link(self):
if 'primaryPaperLink' in self.data:
link = self.data['primaryPaperLink']
if type(link) == dict and 'url' in link:
return link['url']
return link
return None
def record(self):
return [ self.paper_id, self.title, self.journal, self.year ]
def load_paper(paper_id):
if os.path.exists(paper_path('db_papers', paper_id)):
# print('db paper')
return DbPaper(paper_id)
if os.path.exists(paper_path('raw_papers', paper_id)):
# print('raw paper')
return RawPaper(paper_id)
print('no paper')
return None
def dedupe(a):
p = {}
for s in a:
p[s] = None
ss = sorted(p.keys())
return ss
def read_headings(fn, paper):
headings = []
found_abstract = False
found_authors = []
journal = paper.journal.lower()
authors = [ (a[0], a[1], a[1].lower(),) for a in paper.authors ]
with open(fn, 'r') as f:
for line in f.readlines():
line = re.sub(r"\S*@\S*\s?", '', line)
l = line.lower().strip()
if len(l) < 5:
continue
if line[0] == 'a' or line[0] == 'b' or line[0] == 'c' or line[0] == '1' or line[0] == '2' or line[0] == '3' or line[0] == '4':
line = line[1:]
line = line.strip("∗†‡")
line = line.replace("fl", "fl").replace('ff', 'ff').replace('ffi', 'ffi').replace('ffl', 'ffl')
line = line.strip()
if 'abstract' in l:
found_abstract = True
break
if journal and journal in l:
continue
names = [s.strip() for s in re.split(',| and ', l)]
was_found = False
for name in names:
found = find_authors(authors, name)
if found:
was_found = True
# print("found {}".format(found[1]))
if found[0]:
found_authors.append(found)
continue
headings.append(line.strip())
return headings, found_abstract
def find_authors(authors, line):
for a in authors:
if a[2] in line:
return a
return None
class AddressBook (object):
def __init__(self):
entities = {}
lookup = {}
keys, data = fetch_google_sheet('institutions')
# keys, data = read_csv('reports/pdf_institutions_deduped.csv', keys=True)
for index, line in enumerate(data):
if line[0] == line[1] or line[0] not in entities:
entities[line[0]] = index
lookup[line[1].lower().strip()] = line[0]
self.data = data
self.lookup = lookup
self.entities = entities
def find(self, address):
address = address.lower().strip().strip(string.digits)
if address in self.lookup:
entity = self.lookup[address]
index = self.entities[entity]
return self.data[index]
for part in address.split(','):
part = part.strip().replace(' ', ' ')
if part in self.lookup:
entity = self.lookup[part]
index = self.entities[entity]
return self.data[index]
return None
def findObject(self, address):
row = self.find(address)
if row is not None:
return {
'address': row[0],
'lat': row[3],
'lng': row[4],
'type': row[5],
}
return None
def load_institutions(paperId):
if os.path.exists(file_path('pdf', paperId, 'institutions.json')):
return read_json(file_path('pdf', paperId, 'institutions.json'))['institutions']
elif os.path.exists(file_path('doi', paperId, 'institutions.json')):
return read_json(file_path('doi', paperId, 'institutions.json'))['institutions']
else:
return []
def data_path(key, paper_id):
return 'datasets/s2/{}/{}/{}'.format(key, paper_id[0:2], paper_id)
def file_path(key, paper_id, fn):
return os.path.join(data_path(key, paper_id), fn)
def parallelize(func, rows):
print("Fetching {} items".format(len(rows)))
chunksize = 3
with Pool(processes=len(os.sched_getaffinity(0))) as pool:
pool.starmap(func, rows, chunksize)
def fetch_paper(s2, paper_id):
os.makedirs('./datasets/s2/papers/{}/{}'.format(paper_id[0:2], paper_id), exist_ok=True)
paper_fn = './datasets/s2/papers/{}/{}/paper.json'.format(paper_id[0:2], paper_id)
if os.path.exists(paper_fn):
return read_json(paper_fn)
print(paper_id)
paper = s2.paper(paper_id)
if paper is None:
print("Paper not found: {}".format(paper_id))
# time.sleep(random.randint(1, 2))
paper = s2.paper(paper_id)
if paper is None:
# print("Paper not found")
return None
write_json(paper_fn, paper)
# time.sleep(random.randint(1, 2))
return paper
def fetch_spreadsheet():
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name('./.creds/Megapixels-ef28f91112a9.json', scope)
docid = "1denb7TjYsN9igHyvYah7fQ0daABW32Z30lwV7QrDJQc"
client = gspread.authorize(credentials)
spreadsheet = client.open_by_key(docid)
return spreadsheet
def fetch_worksheet(name="institutions"):
spreadsheet = fetch_spreadsheet()
return spreadsheet.worksheet(name)
def fetch_google_sheet(name="institutions"):
rows = fetch_worksheet(name).get_all_values()
keys = rows[0]
lines = rows[1:]
return keys, lines