summaryrefslogtreecommitdiff
path: root/megapixels/commands/datasets/citations_to_csv.py
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/commands/datasets/citations_to_csv.py')
-rw-r--r--megapixels/commands/datasets/citations_to_csv.py35
1 files changed, 21 insertions, 14 deletions
diff --git a/megapixels/commands/datasets/citations_to_csv.py b/megapixels/commands/datasets/citations_to_csv.py
index c6a04bd4..f3277d7e 100644
--- a/megapixels/commands/datasets/citations_to_csv.py
+++ b/megapixels/commands/datasets/citations_to_csv.py
@@ -35,9 +35,12 @@ def cli(ctx, opt_fp_in, opt_dir_out):
else:
fps_in = [opt_fp_in]
- log.info(f'{fps_in}')
+ log.info(f'Converting {len(fps_in)} JSON files to CSV')
for fp_in in fps_in:
+
+ log.info(f'Processing: {Path(fp_in).name}')
+
with open(fp_in, 'r') as fp:
json_data = json.load(fp)
@@ -45,18 +48,22 @@ def cli(ctx, opt_fp_in, opt_dir_out):
papers = []
dataset_key = json_data['paper']['key']
dataset_name = json_data['paper']['name']
- papers_main = get_orig_paper(json_data)
- papers += papers_main
- papers_citations = get_citations(dataset_key, dataset_name, json_data)
- papers += papers_citations
- papers = [p.to_dict() for p in papers]
+ try:
+ papers_main = get_orig_paper(json_data)
+ papers += papers_main
+ papers_citations = get_citations(dataset_key, dataset_name, json_data)
+ papers += papers_citations
+ papers = [p.to_dict() for p in papers]
+ except Exception as e:
+ log.error(f'{e} on {Path(fp_in).name}')
+ continue
# save
if not opt_dir_out:
# save to same directory replacing ext
fp_out = fp_in.replace('.json','.csv')
else:
- fp_out = join(opt_dir_out, Path(fp_in).name)
+ fp_out = join(opt_dir_out, f'{Path(fp_in).stem}.csv')
df_papers = pd.DataFrame.from_dict(papers)
df_papers.index.name = 'id'
@@ -76,13 +83,13 @@ def get_citations(dataset_key, dataset_name, json_data):
addresses = p.get('addresses', '')
if addresses:
for a in addresses:
- pdf_url = '' if not p['pdf'] else p['pdf'][0]
+ pdf_url = '' if not p.get('pdf') else p.get('pdf')[0]
paper = Paper(dataset_key, dataset_name, p['id'], p['title'], d_type,
year, pdf_url,
a['name'], a['type'], a['lat'], a['lng'], a['country'])
papers.append(paper)
else:
- pdf_url = '' if not p['pdf'] else p['pdf'][0]
+ pdf_url = '' if not p.get('pdf') else p.get('pdf')[0]
paper = Paper(p['key'], p['name'], d['id'], p['title'], 'main', year, pdf_url)
papers.append(paper)
return papers
@@ -98,13 +105,13 @@ def get_orig_paper(json_data):
for a in addresses:
if type(a) == str or a is None:
continue
- pdf_url = '' if not p['pdf'] else p['pdf'][0]
- paper = Paper(p['key'], p['name'], p['paper_id'], p['title'], d_type, year,
+ pdf_url = '' if not p.get('pdf') else p.get('pdf')[0]
+ paper = Paper(p.get('key'), p.get('name'), p.get('paper_id'), p.get('title'), d_type, year,
pdf_url,
- a['name'], a['type'], a['lat'], a['lng'], a['country'])
+ a.get('name'), a.get('type'), a.get('lat'), a.get('lng'), a.get('country'))
papers.append(paper)
else:
- pdf_url = '' if not p['pdf'] else p['pdf'][0]
- paper = Paper(p['key'], p['name'], p['paper_id'], p['title'], d_type, year, pdf_url)
+ pdf_url = '' if not p.get('pdf') else p.get('pdf')[0]
+ paper = Paper(p.get('key'), p.get('name'), p.get('paper_id'), p.get('title'), d_type, year, pdf_url)
papers.append(paper)
return papers