")
if keys is not None:
for key in keys:
f.write("
{}
".format(key))
for row in rows:
if row is None:
return
count += 1
f.write("
")
for cell in row:
if isinstance(cell, list) or isinstance(cell, tuple):
f.write("
{}
".format(' '.join(str(x) for x in cell)))
else:
f.write("
{}
".format(cell))
f.write("
")
f.write("
")
return count
def paper_path(key='papers', paper_id=''):
return '{}/{}/{}/{}/paper.json'.format('./datasets/s2', key, paper_id[0:2], paper_id)
class DbPaper(object):
def __init__(self, paper_id):
self.paper_id = paper_id
self.data = read_json(paper_path('db_papers', paper_id))
@property
def title(self):
return self.data['title']
@property
def journal(self):
return self.data['journalName']
@property
def year(self):
return self.data['year'] if 'year' in self.data else ''
@property
def authors(self):
return [ (author['ids'][0] if len(author['ids']) else '', author['name']) for author in self.data['authors'] ]
@property
def pdf_link(self):
link = None
if self.data['s2PdfUrl']:
link = self.data['s2PdfUrl']
elif len(self.data['pdfUrls']):
link = self.data['pdfUrls'][0]
if link is None:
return None
if type(link) == dict and 'url' in link:
return link['url']
return link
def record(self):
return [ self.paper_id, self.title, self.journal, self.year ]
class RawPaper(object):
def __init__(self, paper_id):
self.paper_id = paper_id
data = read_json(paper_path('raw_papers', paper_id))
# print(data)
if 'paper' not in data:
print(data)
self.data = None
return None
self.data = data['paper']
@property
def title(self):
return self.data['title']['text']
@property
def year(self):
return self.data['year']['text']
@property
def journal(self):
if 'journal' in self.data and 'name' in self.data['journal']:
return self.data['journal']['name']
else:
return 'Unknown'
@property
def authors(self):
return [ (author[0]['ids'][0] if len(author[0]['ids']) else '', author[0]['name']) for author in self.data['authors'] ]
@property
def pdf_link(self):
if 'primaryPaperLink' in self.data:
link = self.data['primaryPaperLink']
if type(link) == dict and 'url' in link:
return link['url']
return link
return None
def record(self):
return [ self.paper_id, self.title, self.journal, self.year ]
def load_paper(paper_id):
if os.path.exists(paper_path('db_papers', paper_id)):
# print('db paper')
return DbPaper(paper_id)
if os.path.exists(paper_path('raw_papers', paper_id)):
# print('raw paper')
return RawPaper(paper_id)
print('no paper')
return None
def dedupe(a):
p = {}
for s in a:
p[s] = None
ss = sorted(p.keys())
return ss
def read_headings(fn, paper):
headings = []
found_abstract = False
found_authors = []
journal = paper.journal.lower()
authors = [ (a[0], a[1], a[1].lower(),) for a in paper.authors ]
with open(fn, 'r') as f:
for line in f.readlines():
line = re.sub(r"\S*@\S*\s?", '', line)
l = line.lower().strip()
if len(l) < 5:
continue
if line[0] == 'a' or line[0] == 'b' or line[0] == 'c' or line[0] == '1' or line[0] == '2' or line[0] == '3' or line[0] == '4':
line = line[1:]
line = line.strip("∗†‡")
line = line.replace("fl", "fl").replace('ff', 'ff').replace('ffi', 'ffi').replace('ffl', 'ffl')
line = line.strip()
if 'abstract' in l:
found_abstract = True
break
if journal and journal in l:
continue
names = [s.strip() for s in re.split(',| and ', l)]
was_found = False
for name in names:
found = find_authors(authors, name)
if found:
was_found = True
# print("found {}".format(found[1]))
if found[0]:
found_authors.append(found)
continue
headings.append(line.strip())
return headings, found_abstract
def find_authors(authors, line):
for a in authors:
if a[2] in line:
return a
return None
class AddressBook (object):
def __init__(self):
entities = {}
lookup = {}
keys, data = fetch_google_sheet('institutions')
# keys, data = read_csv('reports/pdf_institutions_deduped.csv', keys=True)
for index, line in enumerate(data):
if line[0] == line[1] or line[0] not in entities:
entities[line[0]] = index
lookup[line[1].lower().strip()] = line[0]
self.data = data
self.lookup = lookup
self.entities = entities
def find(self, address):
address = address.lower().strip().strip(string.digits)
if address in self.lookup:
entity = self.lookup[address]
index = self.entities[entity]
return self.data[index]
for part in address.split(','):
part = part.strip().replace(' ', ' ')
if part in self.lookup:
entity = self.lookup[part]
index = self.entities[entity]
return self.data[index]
return None
def findObject(self, address):
row = self.find(address)
if row is not None:
return {
'address': row[0],
'lat': row[3],
'lng': row[4],
'type': row[5],
}
return None
def parallelize(func, rows):
print("Fetching {} items".format(len(rows)))
chunksize = 3
with Pool(processes=len(os.sched_getaffinity(0))) as pool:
pool.starmap(func, rows, chunksize)
def fetch_paper(s2, paper_id):
os.makedirs('./datasets/s2/papers/{}/{}'.format(paper_id[0:2], paper_id), exist_ok=True)
paper_fn = './datasets/s2/papers/{}/{}/paper.json'.format(paper_id[0:2], paper_id)
if os.path.exists(paper_fn):
return read_json(paper_fn)
print(paper_id)
paper = s2.paper(paper_id)
if paper is None:
print("Got none paper??")
# time.sleep(random.randint(1, 2))
paper = s2.paper(paper_id)
if paper is None:
print("Paper not found")
return None
write_json(paper_fn, paper)
# time.sleep(random.randint(1, 2))
return paper
def fetch_spreadsheet():
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name('./.creds/Megapixels-ef28f91112a9.json', scope)
docid = "1denb7TjYsN9igHyvYah7fQ0daABW32Z30lwV7QrDJQc"
client = gspread.authorize(credentials)
spreadsheet = client.open_by_key(docid)
return spreadsheet
def fetch_worksheet(name="institutions"):
spreadsheet = fetch_spreadsheet()
return spreadsheet.worksheet(name)
def fetch_google_sheet(name="institutions"):
rows = fetch_worksheet(name).get_all_values()
keys = rows[0]
lines = rows[1:]
return keys, lines