summaryrefslogtreecommitdiff
path: root/megapixels/notebooks/datasets/msc
diff options
context:
space:
mode:
Diffstat (limited to 'megapixels/notebooks/datasets/msc')
-rw-r--r--megapixels/notebooks/datasets/msc/fix_embassies.ipynb215
-rw-r--r--megapixels/notebooks/datasets/msc/html2csv.ipynb189
2 files changed, 404 insertions, 0 deletions
diff --git a/megapixels/notebooks/datasets/msc/fix_embassies.ipynb b/megapixels/notebooks/datasets/msc/fix_embassies.ipynb
new file mode 100644
index 00000000..a48bbc48
--- /dev/null
+++ b/megapixels/notebooks/datasets/msc/fix_embassies.ipynb
@@ -0,0 +1,215 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Fix MSC Embassy CSV"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 82,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "The dotenv extension is already loaded. To reload it, use:\n",
+ " %reload_ext dotenv\n"
+ ]
+ }
+ ],
+ "source": [
+ "%reload_ext autoreload\n",
+ "%autoreload 2\n",
+ "%load_ext dotenv\n",
+ "#%dotenv /work/megapixels_dev/env/flickr.env\n",
+ "\n",
+ "import sys, os\n",
+ "from os.path import join\n",
+ "from glob import glob, iglob\n",
+ "from pathlib import Path\n",
+ "from random import randint\n",
+ "import urllib\n",
+ "\n",
+ "from tqdm import tqdm_notebook as tqdm\n",
+ "import pandas as pd"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 76,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fp_in = '/data_store/datasets/msc/embassies/embassies_on_flickr.csv'\n",
+ "fp_in_metadata = '/data_store/datasets/msc/embassies/embassy_meta_nsid.csv'\n",
+ "fp_out = '/data_store/datasets/msc/embassies/embassies_on_flickr_ext.csv'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 80,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_embassies = pd.read_csv(fp_in)\n",
+ "df_embassies.fillna('', inplace=True)\n",
+ "embassy_records = df_embassies.to_dict('records')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 83,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_meta = pd.read_csv(fp_in_metadata)\n",
+ "df_meta.fillna('', inplace=True)\n",
+ "meta_records = df_meta.to_dict('records')\n",
+ "meta_records_nsid = {}\n",
+ "for meta_record in meta_records:\n",
+ " meta_records_nsid[meta_record['nsid']] = meta_record"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 86,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "for embassy_record in embassy_records:\n",
+ " nsid = embassy_record.get('nsid')\n",
+ " if nsid:\n",
+ " meta = meta_records_nsid.get(nsid)\n",
+ " if meta:\n",
+ " embassy_record['first_name'] = meta['first_name']\n",
+ " embassy_record['last_name'] = meta['last_name']\n",
+ " embassy_record['occupation'] = meta['occupation']\n",
+ " embassy_record['city'] = meta['city']\n",
+ " embassy_record['country'] = meta['country']\n",
+ " embassy_record['email'] = meta['email']\n",
+ " embassy_record['facebook'] = meta['facebook']\n",
+ " embassy_record['instagram'] = meta['instagram']\n",
+ " embassy_record['join_date'] = meta['join_date']\n",
+ " embassy_record['twitter'] = meta['twitter']\n",
+ " embassy_record['profile_description'] = meta['profile_description']\n",
+ " embassy_record['website'] = meta['website']\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 89,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_embassies_ext = pd.DataFrame.from_dict(embassy_records)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 90,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_embassies_ext.to_csv(fp_out, index=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Fix country"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 91,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fp_in = '/data_store/datasets/msc/embassies/embassies_on_flickr.csv'\n",
+ "fp_in_ext = '/data_store/datasets/msc/embassies/embassies_on_flickr_ext.csv'\n",
+ "fp_out = '/data_store/datasets/msc/embassies/embassies_on_flickr_ext_02.csv'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 102,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "df_embassies = pd.read_csv(fp_in)\n",
+ "df_embassies.fillna('', inplace=True)\n",
+ "embassy_records = df_embassies.to_dict('records')\n",
+ "\n",
+ "df_embassies_ext = pd.read_csv(fp_in_ext)\n",
+ "df_embassies_ext.fillna('', inplace=True)\n",
+ "embassy_records_ext = df_embassies_ext.to_dict('records')\n",
+ "\n",
+ "embassy_records_nsid = {}\n",
+ "for embassy_record in embassy_records:\n",
+ " embassy_records_nsid[embassy_record['nsid']] = embassy_record"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 100,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# df_embassies_copy.loc[(df_embassies['nsid'] == '124109311@N07').idxmax(),'country']\n",
+ "# df_embassies_copy.at[df_embassies_copy['nsid'] == '124109311@N07'] = 'Test'\n",
+ "# df_embassies_copy.loc[(df_embassies['nsid'] == '124109311@N07').idxmax(),'country']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 105,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Get country from the original unextended file\n",
+ "for embassy_record_ext in embassy_records_ext:\n",
+ " nsid = embassy_record_ext['nsid']\n",
+ " if embassy_records_nsid.get(nsid):\n",
+ " country = embassy_records_nsid.get(nsid).get('country')\n",
+ " if country:\n",
+ " #embassy_record_ext.setdefault('country', )\n",
+ " country_ext = embassy_record_ext['country']\n",
+ " if country_ext != country:\n",
+ " print(f'set ext: {country_ext} to {country}')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "megapixels",
+ "language": "python",
+ "name": "megapixels"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/megapixels/notebooks/datasets/msc/html2csv.ipynb b/megapixels/notebooks/datasets/msc/html2csv.ipynb
new file mode 100644
index 00000000..aa819214
--- /dev/null
+++ b/megapixels/notebooks/datasets/msc/html2csv.ipynb
@@ -0,0 +1,189 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Convert MSC HTML to CSV\n",
+ "\n",
+ "- create name lists"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%reload_ext autoreload\n",
+ "%autoreload 2\n",
+ "\n",
+ "from os.path import join\n",
+ "from pathlib import Path\n",
+ "from functools import partial\n",
+ "from multiprocessing.dummy import Pool as ThreadPool\n",
+ "\n",
+ "import lxml\n",
+ "from bs4 import BeautifulSoup\n",
+ "import urllib.request\n",
+ "from tqdm import tqdm_notebook as tqdm\n",
+ "import pandas as pd\n",
+ "\n",
+ "import sys\n",
+ "sys.path.append('/work/megapixels_dev/megapixels/')\n",
+ "from app.settings import app_cfg as cfg\n",
+ "from app.utils import file_utils, im_utils"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 147,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fp_dir_in = '/data_store/datasets/munich_security_conference/participants/'\n",
+ "fp_dir_out = '/data_store/datasets/munich_security_conference/participants/'\n",
+ "fp_out_all_csv = join(fp_dir_ou, 'participants.csv') # total list\n",
+ "years = ['2009', '2010', '2011', '2014']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 143,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def parse_name(name):\n",
+ " try:\n",
+ " ridx = name.rindex(',')\n",
+ " except Exception as e:\n",
+ " # names like \"Ban Ki-moon\" have no comman\n",
+ " if name == 'Ban Ki-moon':\n",
+ " name = 'Ki-moon, Ban'\n",
+ " elif name == 'Fu Ying':\n",
+ " name = 'Ying, Fu'\n",
+ " elif name == 'Dr. Ng Eng Hen':\n",
+ " # unclear: https://en.wikipedia.org/wiki/Ng_Eng_Hen\n",
+ " name = 'Ng, Dr. Eng Hen' \n",
+ " elif name == 'Seok-soo Lee':\n",
+ " name = 'Lee, Seok-soo'\n",
+ " else:\n",
+ " print(f'Could not handle: \"{name}\"')\n",
+ " ridx = name.rindex(',')\n",
+ " \n",
+ " name_last = name[:ridx].strip()\n",
+ " name_first = name[(ridx + 1):].strip()\n",
+ " return name_first, name_last\n",
+ " \n",
+ "def parse_year(fp_in_html, year):\n",
+ " # create soup\n",
+ " with open(fp_in_html, 'r') as fp:\n",
+ " data = fp.read()\n",
+ " soup = BeautifulSoup(data, 'lxml')\n",
+ " \n",
+ " # get rows\n",
+ " table = soup.find('table', attrs={'class':'contenttable'})\n",
+ " tbody = table.find('tbody')\n",
+ " trows = tbody.find_all('tr')\n",
+ " \n",
+ " # parse by year\n",
+ " participants = []\n",
+ " for trow in trows[1:]:\n",
+ " if year == '2009' or year == '2014':\n",
+ " tds = trow.find_all('td')\n",
+ " name = tds[0].text.strip()\n",
+ " name_first, name_last = parse_name(name)\n",
+ " desc = tds[1].text.strip()\n",
+ " elif year == '2010':\n",
+ " tds = trow.find_all('td')\n",
+ " name_first = tds[0].text.strip()\n",
+ " name_last = tds[1].text.strip()\n",
+ " desc = tds[2].text.strip()\n",
+ " elif year == '2011':\n",
+ " tds = trow.find_all('td')\n",
+ " name = tds[0].find_all('p')[0].text.strip()\n",
+ " name_first, name_last = parse_name(name)\n",
+ " desc = tds[1].find_all('p')[0].text.strip()\n",
+ " \n",
+ " obj = {'name_first': name_first, 'name_last': name_last, 'description': desc, 'year': year}\n",
+ " participants.append(obj)\n",
+ " \n",
+ " return participants"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 148,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "2009\n",
+ "Wrote: /data_store/datasets/munich_security_conference/participants/2009.csv with 346 items\n",
+ "2010\n",
+ "Wrote: /data_store/datasets/munich_security_conference/participants/2010.csv with 317 items\n",
+ "2011\n",
+ "Wrote: /data_store/datasets/munich_security_conference/participants/2011.csv with 341 items\n",
+ "2014\n",
+ "Wrote: /data_store/datasets/munich_security_conference/participants/2014.csv with 467 items\n",
+ "Wrote: /data_store/datasets/munich_security_conference/participants/participants.csv with 1471 items\n"
+ ]
+ }
+ ],
+ "source": [
+ "participants_all = []\n",
+ "for year in years:\n",
+ " fp_in_html = join(fp_dir_out, f'{year}.html')\n",
+ " fp_out_csv = join(fp_dir_out, f'{year}.csv')\n",
+ " participants = parse_year(fp_in_html, year)\n",
+ " participants_all += participants\n",
+ " df = pd.DataFrame.from_dict(participants)\n",
+ " df.to_csv(fp_out_csv, index=False)\n",
+ " print(f'Wrote: {fp_out_csv} with {len(participants)} items')\n",
+ "\n",
+ "# write total list\n",
+ "\n",
+ "df = pd.DataFrame.from_dict(participants_all)\n",
+ "df.to_csv(fp_out_all_csv, index=False)\n",
+ "print(f'Wrote: {fp_out_all_csv} with {len(participants_all)} items')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 94,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 95,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "megapixels",
+ "language": "python",
+ "name": "megapixels"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}