diff options
Diffstat (limited to 'megapixels/commands/datasets')
| -rw-r--r-- | megapixels/commands/datasets/citations_to_csv.py | 35 | ||||
| -rw-r--r-- | megapixels/commands/datasets/pull_spreadsheet.py | 25 |
2 files changed, 39 insertions, 21 deletions
diff --git a/megapixels/commands/datasets/citations_to_csv.py b/megapixels/commands/datasets/citations_to_csv.py index c6a04bd4..f3277d7e 100644 --- a/megapixels/commands/datasets/citations_to_csv.py +++ b/megapixels/commands/datasets/citations_to_csv.py @@ -35,9 +35,12 @@ def cli(ctx, opt_fp_in, opt_dir_out): else: fps_in = [opt_fp_in] - log.info(f'{fps_in}') + log.info(f'Converting {len(fps_in)} JSON files to CSV') for fp_in in fps_in: + + log.info(f'Processing: {Path(fp_in).name}') + with open(fp_in, 'r') as fp: json_data = json.load(fp) @@ -45,18 +48,22 @@ def cli(ctx, opt_fp_in, opt_dir_out): papers = [] dataset_key = json_data['paper']['key'] dataset_name = json_data['paper']['name'] - papers_main = get_orig_paper(json_data) - papers += papers_main - papers_citations = get_citations(dataset_key, dataset_name, json_data) - papers += papers_citations - papers = [p.to_dict() for p in papers] + try: + papers_main = get_orig_paper(json_data) + papers += papers_main + papers_citations = get_citations(dataset_key, dataset_name, json_data) + papers += papers_citations + papers = [p.to_dict() for p in papers] + except Exception as e: + log.error(f'{e} on {Path(fp_in).name}') + continue # save if not opt_dir_out: # save to same directory replacing ext fp_out = fp_in.replace('.json','.csv') else: - fp_out = join(opt_dir_out, Path(fp_in).name) + fp_out = join(opt_dir_out, f'{Path(fp_in).stem}.csv') df_papers = pd.DataFrame.from_dict(papers) df_papers.index.name = 'id' @@ -76,13 +83,13 @@ def get_citations(dataset_key, dataset_name, json_data): addresses = p.get('addresses', '') if addresses: for a in addresses: - pdf_url = '' if not p['pdf'] else p['pdf'][0] + pdf_url = '' if not p.get('pdf') else p.get('pdf')[0] paper = Paper(dataset_key, dataset_name, p['id'], p['title'], d_type, year, pdf_url, a['name'], a['type'], a['lat'], a['lng'], a['country']) papers.append(paper) else: - pdf_url = '' if not p['pdf'] else p['pdf'][0] + pdf_url = '' if not p.get('pdf') else p.get('pdf')[0] paper = Paper(p['key'], p['name'], d['id'], p['title'], 'main', year, pdf_url) papers.append(paper) return papers @@ -98,13 +105,13 @@ def get_orig_paper(json_data): for a in addresses: if type(a) == str or a is None: continue - pdf_url = '' if not p['pdf'] else p['pdf'][0] - paper = Paper(p['key'], p['name'], p['paper_id'], p['title'], d_type, year, + pdf_url = '' if not p.get('pdf') else p.get('pdf')[0] + paper = Paper(p.get('key'), p.get('name'), p.get('paper_id'), p.get('title'), d_type, year, pdf_url, - a['name'], a['type'], a['lat'], a['lng'], a['country']) + a.get('name'), a.get('type'), a.get('lat'), a.get('lng'), a.get('country')) papers.append(paper) else: - pdf_url = '' if not p['pdf'] else p['pdf'][0] - paper = Paper(p['key'], p['name'], p['paper_id'], p['title'], d_type, year, pdf_url) + pdf_url = '' if not p.get('pdf') else p.get('pdf')[0] + paper = Paper(p.get('key'), p.get('name'), p.get('paper_id'), p.get('title'), d_type, year, pdf_url) papers.append(paper) return papers diff --git a/megapixels/commands/datasets/pull_spreadsheet.py b/megapixels/commands/datasets/pull_spreadsheet.py index b8b68094..caf5eb43 100644 --- a/megapixels/commands/datasets/pull_spreadsheet.py +++ b/megapixels/commands/datasets/pull_spreadsheet.py @@ -21,6 +21,10 @@ from app.utils.logger_utils import Logger log = Logger.getLogger() opt_sheets = ['datasets', 'relationships', 'funding', 'references', 'sources', 'tags', 'citations', 'legal'] +dataset_sheet_keys = ['key', 'name_short', 'name_full', 'url', 'dl_im', 'purpose', 'funded_by', + 'year_start', 'year_end', 'year_published', 'images', 'videos', 'identities', + 'faces_or_persons', 'campus', 'youtube', 'flickr', 'google', 'bing', 'comment'] + @click.command() @click.option('-n', '--name', 'opt_spreadsheets', multiple=True, @@ -30,11 +34,15 @@ opt_sheets = ['datasets', 'relationships', 'funding', 'references', 'sources', ' @click.option('--all', 'opt_all', is_flag=True, help='Get all sheets') @click.option('-o', '--output', 'opt_fp_out', required=True, + type=click.Path(file_okay=False, dir_okay=True), help='Path to directory or filename') +@click.option('--share', 'opt_share', required=True, + type=click.Choice(['nyt', 'ft']), + help='Share filter') @click.option('-f', '--force', 'opt_force', is_flag=True, help='Force overwrite') @click.pass_context -def cli(ctx, opt_spreadsheets, opt_fp_out, opt_all, opt_force): +def cli(ctx, opt_spreadsheets, opt_fp_out, opt_all, opt_share, opt_force): """Fetch Google spreadsheet""" import sys @@ -47,6 +55,12 @@ def cli(ctx, opt_spreadsheets, opt_fp_out, opt_all, opt_force): for sheet_name in opt_spreadsheets: log.info(f'Get spreadsheet: {sheet_name}') + fp_out = join(opt_fp_out, f'{sheet_name}.csv') + fpp_out = Path(fp_out) + if fpp_out.exists() and not opt_force: + log.error(f'File "{fpp_out} exists. Use "-f" to overwrite') + return + sheet_data = fetch_google_sheet_objects(name=sheet_name) df_sheet = pd.DataFrame.from_dict(sheet_data) if sheet_name == 'datasets': @@ -58,22 +72,19 @@ def cli(ctx, opt_spreadsheets, opt_fp_out, opt_all, opt_force): fpp_out = fpp_out.parent else: fpp_out = join(opt_fp_out, f'{sheet_name}.csv') + log.info(f'Writing file: {fpp_out}') df_sheet.to_csv(fpp_out) def clean_datasets_sheet_ft(df): # clean data for FT df = df[df['ft_share'] == 'Y'] - keys = ['key', 'name_short', 'name_full', 'url', 'downloaded', 'purpose', 'wild'] - keys += ['campus', 'year_start', 'year_end', 'year_published', 'images', 'videos', 'identities', 'faces_or_persons', 'youtube', 'flickr', 'google', 'bing', 'comment'] - return df[keys] + return df[dataset_sheet_keys] def clean_datasets_sheet_nyt(df): # clean data for FT df = df[df['ft_share'] == 'Y'] - keys = ['key', 'name_short', 'name_full', 'url', 'downloaded', 'purpose', 'wild'] - keys += ['campus', 'year_start', 'year_end', 'year_published', 'images', 'videos', 'identities', 'faces_or_persons', 'youtube', 'flickr', 'google', 'bing', 'comment'] - return df[keys] + return df[dataset_sheet_keys] def fetch_spreadsheet(): """Open the Google Spreadsheet, which contains the individual worksheets""" |
