summaryrefslogtreecommitdiff
path: root/site/datasets/verified/yfcc_100m.csv
blob: a7625e9dbdeeedd62a480deae319133e61c2abae (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
id,country,dataset_name,key,lat,lng,loc,loc_type,paper_id,paper_type,paper_url,title,year
0,,YFCC100M,yfcc_100m,0.0,0.0,,,,main,,YFCC100M: the new data in multimedia research,2016
1,United States,YFCC100M,yfcc_100m,38.7768106,-94.9442982,Amazon,company,d2067c7d31bebf89249966c3d8ee9395dd8531b8,citation,http://skamalas.com/docs/ICPR_2016.pdf,Visual congruent ads for image search,2016
2,Netherlands,YFCC100M,yfcc_100m,52.356678,4.95187,"Centrum Wiskunde & Informatica (CWI), The Netherlands",edu,d2067c7d31bebf89249966c3d8ee9395dd8531b8,citation,http://skamalas.com/docs/ICPR_2016.pdf,Visual congruent ads for image search,2016
3,Spain,YFCC100M,yfcc_100m,41.3789689,2.1797941,"DTIC, Universitat Pompeu Fabra & DCC, Universidad de Chile, Chile",edu,d2067c7d31bebf89249966c3d8ee9395dd8531b8,citation,http://skamalas.com/docs/ICPR_2016.pdf,Visual congruent ads for image search,2016
4,United States,YFCC100M,yfcc_100m,33.0723372,-96.810299,"Futurewei Technologies Inc., USA",company,d2067c7d31bebf89249966c3d8ee9395dd8531b8,citation,http://skamalas.com/docs/ICPR_2016.pdf,Visual congruent ads for image search,2016
5,United States,YFCC100M,yfcc_100m,40.7574714,-73.9877318,Yahoo,company,d2067c7d31bebf89249966c3d8ee9395dd8531b8,citation,http://skamalas.com/docs/ICPR_2016.pdf,Visual congruent ads for image search,2016
6,United States,YFCC100M,yfcc_100m,40.4441619,-79.94272826,Carnegie Mellon University,edu,010f0f4929e6a6644fb01f0e43820f91d0fad292,citation,,YFCC100M: the new data in multimedia research,2016
7,United States,YFCC100M,yfcc_100m,37.4523809,-122.1797586,In-Q-Tel,mil,010f0f4929e6a6644fb01f0e43820f91d0fad292,citation,,YFCC100M: the new data in multimedia research,2016
8,United States,YFCC100M,yfcc_100m,40.7574714,-73.9877318,Yahoo,company,010f0f4929e6a6644fb01f0e43820f91d0fad292,citation,,YFCC100M: the new data in multimedia research,2016
9,United States,YFCC100M,yfcc_100m,39.1254938,-77.22293475,National Institute of Standards and Technology,edu,36631dcbb9452ea3d35b19b2de6ef709022531a6,citation,https://pdfs.semanticscholar.org/0109/93ae9742f7f4c40763a25ded237723de60b5.pdf,"TRECVID 2016 : Evaluating Video Search , Video Event Detection , Localization , and Hyperlinking",2016
10,Ireland,YFCC100M,yfcc_100m,53.38522185,-6.25740874,Dublin City University,edu,36631dcbb9452ea3d35b19b2de6ef709022531a6,citation,https://pdfs.semanticscholar.org/0109/93ae9742f7f4c40763a25ded237723de60b5.pdf,"TRECVID 2016 : Evaluating Video Search , Video Event Detection , Localization , and Hyperlinking",2016
11,Netherlands,YFCC100M,yfcc_100m,51.816701,5.865272,Radboud University,edu,36631dcbb9452ea3d35b19b2de6ef709022531a6,citation,https://pdfs.semanticscholar.org/0109/93ae9742f7f4c40763a25ded237723de60b5.pdf,"TRECVID 2016 : Evaluating Video Search , Video Event Detection , Localization , and Hyperlinking",2016
12,Netherlands,YFCC100M,yfcc_100m,52.2380139,6.8566761,University of Twente,edu,36631dcbb9452ea3d35b19b2de6ef709022531a6,citation,https://pdfs.semanticscholar.org/0109/93ae9742f7f4c40763a25ded237723de60b5.pdf,"TRECVID 2016 : Evaluating Video Search , Video Event Detection , Localization , and Hyperlinking",2016
13,France,YFCC100M,yfcc_100m,43.614386,7.071125,EURECOM,edu,36631dcbb9452ea3d35b19b2de6ef709022531a6,citation,https://pdfs.semanticscholar.org/0109/93ae9742f7f4c40763a25ded237723de60b5.pdf,"TRECVID 2016 : Evaluating Video Search , Video Event Detection , Localization , and Hyperlinking",2016
14,China,YFCC100M,yfcc_100m,40.00229045,116.32098908,Tsinghua University,edu,788da403d220e2cc08dca9cffbe1f84b3c68469a,citation,https://arxiv.org/pdf/1708.06656.pdf,Causally Regularized Learning with Agnostic Data Selection Bias.,2018
15,United States,YFCC100M,yfcc_100m,22.5447154,113.9357164,Tencent,company,788da403d220e2cc08dca9cffbe1f84b3c68469a,citation,https://arxiv.org/pdf/1708.06656.pdf,Causally Regularized Learning with Agnostic Data Selection Bias.,2018
16,Italy,YFCC100M,yfcc_100m,45.069428,7.6889006,University of Turin,edu,61b17f719bab899dd50bcc3be9d55673255fe102,citation,https://arxiv.org/pdf/1608.02289.pdf,Detecting Sarcasm in Multimodal Social Platforms,2016
17,United States,YFCC100M,yfcc_100m,40.7574714,-73.9877318,Yahoo,company,61b17f719bab899dd50bcc3be9d55673255fe102,citation,https://arxiv.org/pdf/1608.02289.pdf,Detecting Sarcasm in Multimodal Social Platforms,2016
18,United States,YFCC100M,yfcc_100m,40.4441619,-79.94272826,Carnegie Mellon University,edu,2577211aeaaa1f2245ddc379564813bee3d46c06,citation,https://arxiv.org/pdf/1512.06974.pdf,Seeing through the Human Reporting Bias: Visual Classifiers from Noisy Human-Centric Labels,2016
19,United States,YFCC100M,yfcc_100m,47.6423318,-122.1369302,Microsoft,company,2577211aeaaa1f2245ddc379564813bee3d46c06,citation,https://arxiv.org/pdf/1512.06974.pdf,Seeing through the Human Reporting Bias: Visual Classifiers from Noisy Human-Centric Labels,2016
20,United States,YFCC100M,yfcc_100m,37.3936717,-122.0807262,Facebook,company,b6397f818f67faad6a36de8480212f6e7e82e71c,citation,,Tag Prediction at Flickr: A View from the Darkroom,2017
21,United States,YFCC100M,yfcc_100m,47.6543238,-122.30800894,University of Washington,edu,b6397f818f67faad6a36de8480212f6e7e82e71c,citation,,Tag Prediction at Flickr: A View from the Darkroom,2017
22,United States,YFCC100M,yfcc_100m,37.7749295,-122.4194155,"Yahoo Research, San Francisco, CA",company,b6397f818f67faad6a36de8480212f6e7e82e71c,citation,,Tag Prediction at Flickr: A View from the Darkroom,2017
23,United States,YFCC100M,yfcc_100m,37.36883,-122.0363496,"Yahoo Research, Sunnyvale, CA, USA",edu,b6397f818f67faad6a36de8480212f6e7e82e71c,citation,,Tag Prediction at Flickr: A View from the Darkroom,2017
24,Germany,YFCC100M,yfcc_100m,53.1474921,8.1817645,University of Oldenburg,edu,d3dae5c4f47a0457ebe2297d7e70432521c82cc6,citation,https://pdfs.semanticscholar.org/d3da/e5c4f47a0457ebe2297d7e70432521c82cc6.pdf,The Benchmarking Initiative for Multimedia Evaluation: MediaEval 2016,2017
25,Netherlands,YFCC100M,yfcc_100m,51.816701,5.865272,Radboud University,edu,d3dae5c4f47a0457ebe2297d7e70432521c82cc6,citation,https://pdfs.semanticscholar.org/d3da/e5c4f47a0457ebe2297d7e70432521c82cc6.pdf,The Benchmarking Initiative for Multimedia Evaluation: MediaEval 2016,2017
26,United States,YFCC100M,yfcc_100m,42.57054745,-88.55578627,University of Geneva,edu,d3dae5c4f47a0457ebe2297d7e70432521c82cc6,citation,https://pdfs.semanticscholar.org/d3da/e5c4f47a0457ebe2297d7e70432521c82cc6.pdf,The Benchmarking Initiative for Multimedia Evaluation: MediaEval 2016,2017
27,Ireland,YFCC100M,yfcc_100m,53.38522185,-6.25740874,Dublin City University,edu,d3dae5c4f47a0457ebe2297d7e70432521c82cc6,citation,https://pdfs.semanticscholar.org/d3da/e5c4f47a0457ebe2297d7e70432521c82cc6.pdf,The Benchmarking Initiative for Multimedia Evaluation: MediaEval 2016,2017
28,United Kingdom,YFCC100M,yfcc_100m,51.24303255,-0.59001382,University of Surrey,edu,8a5be2b370c5a1df06e1063b306b2874706c24dc,citation,http://epubs.surrey.ac.uk/814067/1/konstanz-natural-video.pdf,The Konstanz natural video database (KoNViD-1k),2017
29,Germany,YFCC100M,yfcc_100m,47.689426,9.1868777,University of Konstanz,edu,8a5be2b370c5a1df06e1063b306b2874706c24dc,citation,http://epubs.surrey.ac.uk/814067/1/konstanz-natural-video.pdf,The Konstanz natural video database (KoNViD-1k),2017
30,Hungary,YFCC100M,yfcc_100m,47.4782828,19.0521075,Hungarian Academy of Sciences,edu,8a5be2b370c5a1df06e1063b306b2874706c24dc,citation,http://epubs.surrey.ac.uk/814067/1/konstanz-natural-video.pdf,The Konstanz natural video database (KoNViD-1k),2017
31,Australia,YFCC100M,yfcc_100m,-33.8809651,151.20107299,University of Technology Sydney,edu,062d67af7677db086ef35186dc936b4511f155d7,citation,http://openaccess.thecvf.com/content_cvpr_2016/papers/Chang_They_Are_Not_CVPR_2016_paper.pdf,They are Not Equally Reliable: Semantic Event Search Using Differentiated Concept Classifiers,2016
32,United States,YFCC100M,yfcc_100m,40.4441619,-79.94272826,Carnegie Mellon University,edu,062d67af7677db086ef35186dc936b4511f155d7,citation,http://openaccess.thecvf.com/content_cvpr_2016/papers/Chang_They_Are_Not_CVPR_2016_paper.pdf,They are Not Equally Reliable: Semantic Event Search Using Differentiated Concept Classifiers,2016
33,United States,YFCC100M,yfcc_100m,47.6543238,-122.30800894,University of Washington,edu,697f0e24f24b016cef9474db485fe61a667f07b8,citation,https://arxiv.org/pdf/1802.02568.pdf,VISER: Visual Self-Regularization,2018
34,United States,YFCC100M,yfcc_100m,32.970001,-96.7054311,Yahoo Research,company,697f0e24f24b016cef9474db485fe61a667f07b8,citation,https://arxiv.org/pdf/1802.02568.pdf,VISER: Visual Self-Regularization,2018
35,United States,YFCC100M,yfcc_100m,40.4439789,-79.9464634,Intel Labs,company,5f96af88dfef2bff4ed8a49ceca909efb701d1d5,citation,https://pdfs.semanticscholar.org/6d3f/b3ef83a5d5a905250a1ec986e720ae422ed4.pdf,Addressing the Dark Side of Vision Research: Storage,2017
36,United States,YFCC100M,yfcc_100m,40.4441619,-79.94272826,Carnegie Mellon University,edu,db989600b1857cea9abd14dba9c10808030c7d33,citation,,Delving Deep into Personal Photo and Video Search,2017
37,United States,YFCC100M,yfcc_100m,42.718568,-84.47791571,Michigan State University,edu,db989600b1857cea9abd14dba9c10808030c7d33,citation,,Delving Deep into Personal Photo and Video Search,2017
38,United States,YFCC100M,yfcc_100m,40.7127753,-74.0059728,"Yahoo Research, New York City, NY, USA",edu,db989600b1857cea9abd14dba9c10808030c7d33,citation,,Delving Deep into Personal Photo and Video Search,2017
39,United States,YFCC100M,yfcc_100m,37.7749295,-122.4194155,"Yahoo Research, San Francisco, CA",company,db989600b1857cea9abd14dba9c10808030c7d33,citation,,Delving Deep into Personal Photo and Video Search,2017
40,United States,YFCC100M,yfcc_100m,37.36883,-122.0363496,"Yahoo Research, Sunnyvale, CA, USA",edu,db989600b1857cea9abd14dba9c10808030c7d33,citation,,Delving Deep into Personal Photo and Video Search,2017
41,United States,YFCC100M,yfcc_100m,41.2097516,-73.8026467,IBM T.J. Watson Research Center,company,9e1b0f50417867317a8cb8fe35c6b2617ad9641e,citation,https://arxiv.org/pdf/1901.10436.pdf,Diversity in Faces,2019
42,United States,YFCC100M,yfcc_100m,32.87935255,-117.23110049,"University of California, San Diego",edu,a9be20954e9177d8b2bc39747acdea4f5496f394,citation,http://acsweb.ucsd.edu/~yuw176/report/cvpr_2016.pdf,Event-Specific Image Importance,2016
43,United States,YFCC100M,yfcc_100m,47.6423318,-122.1369302,Microsoft,company,9bbc952adb3e3c6091d45d800e806d3373a52bac,citation,https://pdfs.semanticscholar.org/9bbc/952adb3e3c6091d45d800e806d3373a52bac.pdf,Learning Visual Classifiers using Human-centric Annotations,2015
44,Singapore,YFCC100M,yfcc_100m,1.29500195,103.84909214,Singapore Management University,edu,c8b4beb3dd4d6594fcad58de0394c731d112780f,citation,https://pdfs.semanticscholar.org/c8b4/beb3dd4d6594fcad58de0394c731d112780f.pdf,Leveraging Multimodal Semantics and Sentiments Information in Event Understanding and Summarization,2017
45,Canada,YFCC100M,yfcc_100m,43.6129484,-79.5590303,Samsung Electronics,edu,c8b4beb3dd4d6594fcad58de0394c731d112780f,citation,https://pdfs.semanticscholar.org/c8b4/beb3dd4d6594fcad58de0394c731d112780f.pdf,Leveraging Multimodal Semantics and Sentiments Information in Event Understanding and Summarization,2017
46,Singapore,YFCC100M,yfcc_100m,1.2962018,103.77689944,National University of Singapore,edu,c8b4beb3dd4d6594fcad58de0394c731d112780f,citation,https://pdfs.semanticscholar.org/c8b4/beb3dd4d6594fcad58de0394c731d112780f.pdf,Leveraging Multimodal Semantics and Sentiments Information in Event Understanding and Summarization,2017
47,United States,YFCC100M,yfcc_100m,40.7574714,-73.9877318,Yahoo,company,f0f876b5bf3d442ef9eb017a6fa873bc5d5830c8,citation,https://arxiv.org/pdf/1604.06480.pdf,"LOH and behold: Web-scale visual search, recommendation and clustering using Locally Optimized Hashing",2016
48,Australia,YFCC100M,yfcc_100m,-37.7963689,144.9611738,The University of Melbourne,edu,3ad6bd5c34b0866019b54f5976d644326069cb3d,citation,http://people.eng.unimelb.edu.au/limk2/2016-ICAPS-groupTourRec.pdf,Towards next generation touring: personalized group tours,2016
49,Australia,YFCC100M,yfcc_100m,-33.917347,151.2312675,National ICT Australia,edu,3ad6bd5c34b0866019b54f5976d644326069cb3d,citation,http://people.eng.unimelb.edu.au/limk2/2016-ICAPS-groupTourRec.pdf,Towards next generation touring: personalized group tours,2016
50,Australia,YFCC100M,yfcc_100m,-37.8087465,144.9638875,RMIT University,edu,3ad6bd5c34b0866019b54f5976d644326069cb3d,citation,http://people.eng.unimelb.edu.au/limk2/2016-ICAPS-groupTourRec.pdf,Towards next generation touring: personalized group tours,2016
51,Denmark,YFCC100M,yfcc_100m,55.659635,12.590958,IT University of Copenhagen,edu,92fb2cb7f9a54360ea4442f902472aded5e88c74,citation,https://pure.itu.dk/portal/files/82406569/tmm_2017_blackthorn.pdf,Blackthorn: Large-Scale Interactive Multimodal Learning,2018
52,Netherlands,YFCC100M,yfcc_100m,52.3553655,4.9501644,University of Amsterdam,edu,92fb2cb7f9a54360ea4442f902472aded5e88c74,citation,https://pure.itu.dk/portal/files/82406569/tmm_2017_blackthorn.pdf,Blackthorn: Large-Scale Interactive Multimodal Learning,2018
53,Singapore,YFCC100M,yfcc_100m,1.2962018,103.77689944,National University of Singapore,edu,f2cbdd5f24c2d6a4f33734636cc220f0825042f0,citation,https://arxiv.org/pdf/1708.00634.pdf,Dual-Glance Model for Deciphering Social Relationships,2017
54,United States,YFCC100M,yfcc_100m,44.97308605,-93.23708813,University of Minnesota,edu,f2cbdd5f24c2d6a4f33734636cc220f0825042f0,citation,https://arxiv.org/pdf/1708.00634.pdf,Dual-Glance Model for Deciphering Social Relationships,2017
55,United States,YFCC100M,yfcc_100m,40.4441619,-79.94272826,Carnegie Mellon University,edu,d0ac9913a3b1784f94446db2f1fb4cf3afda151f,citation,https://arxiv.org/pdf/1607.04780.pdf,Exploiting Multi-modal Curriculum in Noisy Web Data for Large-scale Concept Learning,2016
56,China,YFCC100M,yfcc_100m,34.250803,108.983693,Xi’an Jiaotong University,edu,d0ac9913a3b1784f94446db2f1fb4cf3afda151f,citation,https://arxiv.org/pdf/1607.04780.pdf,Exploiting Multi-modal Curriculum in Noisy Web Data for Large-scale Concept Learning,2016
57,Netherlands,YFCC100M,yfcc_100m,52.0021256,4.3732982,"Delft University of Technology, Netherlands",edu,5674ace2c666f6af53a2a58279ade6ebd271e8c7,citation,https://pdfs.semanticscholar.org/5e11/24345969a536fd5fa78db05b6149ea262a69.pdf,Exploiting Visual-based Intent Classification for Diverse Social Image Retrieval,2017
58,Netherlands,YFCC100M,yfcc_100m,51.816701,5.865272,Radboud University,edu,5674ace2c666f6af53a2a58279ade6ebd271e8c7,citation,https://pdfs.semanticscholar.org/5e11/24345969a536fd5fa78db05b6149ea262a69.pdf,Exploiting Visual-based Intent Classification for Diverse Social Image Retrieval,2017
59,China,YFCC100M,yfcc_100m,34.2469152,108.91061982,Northwestern Polytechnical University,edu,5ed63317cdef429f77499d9de0e58402ed1f687e,citation,https://arxiv.org/pdf/1702.05878.pdf,From Photo Streams to Evolving Situations,2017
60,Thailand,YFCC100M,yfcc_100m,13.7972777,100.3263216,Mahidol University,edu,5ed63317cdef429f77499d9de0e58402ed1f687e,citation,https://arxiv.org/pdf/1702.05878.pdf,From Photo Streams to Evolving Situations,2017
61,United States,YFCC100M,yfcc_100m,38.0333742,-84.5017758,University of Kentucky,edu,a851f32d4a4bffd6f95ac67c2ef1b25b8c4e5480,citation,http://bmvc2018.org/contents/papers/0586.pdf,Learning Geo-Temporal Image Features.,2018
62,United States,YFCC100M,yfcc_100m,38.6480445,-90.3099667,Washington University,edu,a851f32d4a4bffd6f95ac67c2ef1b25b8c4e5480,citation,http://bmvc2018.org/contents/papers/0586.pdf,Learning Geo-Temporal Image Features.,2018
63,Canada,YFCC100M,yfcc_100m,48.4634067,-123.3116935,University of Victoria,edu,8a2e3453d5f88ce6ce73cc7731800cd512f95e64,citation,https://arxiv.org/pdf/1711.05971.pdf,Learning to Find Good Correspondences,2018
64,Austria,YFCC100M,yfcc_100m,47.05821,15.46019568,Graz University of Technology,edu,8a2e3453d5f88ce6ce73cc7731800cd512f95e64,citation,https://arxiv.org/pdf/1711.05971.pdf,Learning to Find Good Correspondences,2018
65,Netherlands,YFCC100M,yfcc_100m,52.356678,4.95187,"Centrum Wiskunde & Informatica, Amsterdam, Netherlands",edu,cbd0f4006df1b2661f2c3a711d95727d61756afe,citation,,Multimodal Classification of Moderated Online Pro-Eating Disorder Content,2017
66,United States,YFCC100M,yfcc_100m,33.776033,-84.39884086,Georgia Institute of Technology,edu,cbd0f4006df1b2661f2c3a711d95727d61756afe,citation,,Multimodal Classification of Moderated Online Pro-Eating Disorder Content,2017
67,United States,YFCC100M,yfcc_100m,33.7756178,-84.396285,Georgia Tech,edu,cbd0f4006df1b2661f2c3a711d95727d61756afe,citation,,Multimodal Classification of Moderated Online Pro-Eating Disorder Content,2017
68,United States,YFCC100M,yfcc_100m,37.7749295,-122.4194155,"Yahoo Research, San Francisco, CA",company,cbd0f4006df1b2661f2c3a711d95727d61756afe,citation,,Multimodal Classification of Moderated Online Pro-Eating Disorder Content,2017
69,Australia,YFCC100M,yfcc_100m,-37.7963689,144.9611738,The University of Melbourne,edu,26861e41e5b44774a2801e1cd76fd56126bbe257,citation,https://pdfs.semanticscholar.org/2686/1e41e5b44774a2801e1cd76fd56126bbe257.pdf,Personalized Tour Recommendation Based on User Interests and Points of Interest Visit Durations,2015
70,Australia,YFCC100M,yfcc_100m,-33.917347,151.2312675,National ICT Australia,edu,26861e41e5b44774a2801e1cd76fd56126bbe257,citation,https://pdfs.semanticscholar.org/2686/1e41e5b44774a2801e1cd76fd56126bbe257.pdf,Personalized Tour Recommendation Based on User Interests and Points of Interest Visit Durations,2015
71,Australia,YFCC100M,yfcc_100m,-33.8809651,151.20107299,University of Technology Sydney,edu,fc8fb68a7e3b79c37108588671c0e1abf374f501,citation,https://cs.uwaterloo.ca/~y328yu/mypapers/pami17.pdf,Semantic Pooling for Complex Event Analysis in Untrimmed Videos,2017
72,United States,YFCC100M,yfcc_100m,40.4441619,-79.94272826,Carnegie Mellon University,edu,fc8fb68a7e3b79c37108588671c0e1abf374f501,citation,https://cs.uwaterloo.ca/~y328yu/mypapers/pami17.pdf,Semantic Pooling for Complex Event Analysis in Untrimmed Videos,2017
73,United States,YFCC100M,yfcc_100m,37.4585796,-122.17560525,SRI International,edu,33737f966cca541d5dbfb72906da2794c692b65b,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w28/papers/Mensink_Spotting_Audio-Visual_Inconsistencies_CVPR_2017_paper.pdf,Spotting Audio-Visual Inconsistencies (SAVI) in Manipulated Video,2017
74,Netherlands,YFCC100M,yfcc_100m,52.3553655,4.9501644,University of Amsterdam,edu,33737f966cca541d5dbfb72906da2794c692b65b,citation,http://openaccess.thecvf.com/content_cvpr_2017_workshops/w28/papers/Mensink_Spotting_Audio-Visual_Inconsistencies_CVPR_2017_paper.pdf,Spotting Audio-Visual Inconsistencies (SAVI) in Manipulated Video,2017
75,Australia,YFCC100M,yfcc_100m,-35.2776999,149.118527,Australian National University,edu,2ef0adfaf84def97e88ae77f887f4497ddc9ccbb,citation,https://arxiv.org/pdf/1706.09067.pdf,Structured Recommendation,2017
76,Australia,YFCC100M,yfcc_100m,-35.2776999,149.118527,CSIRO,edu,2ef0adfaf84def97e88ae77f887f4497ddc9ccbb,citation,https://arxiv.org/pdf/1706.09067.pdf,Structured Recommendation,2017
77,Singapore,YFCC100M,yfcc_100m,1.2962018,103.77689944,National University of Singapore,edu,6e50c32f7244e3556eb879f24b7de8410f3177f6,citation,https://arxiv.org/pdf/1812.05917.pdf,Visual Social Relationship Recognition,2018
78,United States,YFCC100M,yfcc_100m,44.97399,-93.2277285,University of Minnesota-Twin Cities,edu,6e50c32f7244e3556eb879f24b7de8410f3177f6,citation,https://arxiv.org/pdf/1812.05917.pdf,Visual Social Relationship Recognition,2018
79,Australia,YFCC100M,yfcc_100m,-37.7963689,144.9611738,The University of Melbourne,edu,24301df85a669c86ae58962b5645b04a66c63cb1,citation,https://arxiv.org/pdf/1808.08023.pdf,A Jointly Learned Context-Aware Place of Interest Embedding for Trip Recommendations,2018
80,United States,YFCC100M,yfcc_100m,40.4441619,-79.94272826,Carnegie Mellon University,edu,f1b35a675017b9eabd70a4bb4ec90a61117e4ad2,citation,http://www.cs.cmu.edu/~yunwang/papers/interspeech17.pdf,A Transfer Learning Based Feature Extractor for Polyphonic Sound Event Detection Using Connectionist Temporal Classification.,2017
81,Australia,YFCC100M,yfcc_100m,-34.9189226,138.60423668,University of Adelaide,edu,86973c8c9adef3b6a36c31c2682f2179e3013ae1,citation,https://pdfs.semanticscholar.org/8697/3c8c9adef3b6a36c31c2682f2179e3013ae1.pdf,Active Learning from Noisy Tagged Images,2018
82,Italy,YFCC100M,yfcc_100m,45.069428,7.6889006,University of Turin,edu,06d10f906ac9023b5566c70a2600384b8c1b24c3,citation,https://arxiv.org/pdf/1711.00536.pdf,Beautiful and Damned. Combined Effect of Content Quality and Social Ties on User Engagement,2017
83,Japan,YFCC100M,yfcc_100m,33.5934539,130.3557837,Information Technologies Institute,edu,ea985e35b36f05156f82ac2025ad3fe8037be0cd,citation,https://pdfs.semanticscholar.org/ea98/5e35b36f05156f82ac2025ad3fe8037be0cd.pdf,CERTH/CEA LIST at MediaEval Placing Task 2015,2015
84,Japan,YFCC100M,yfcc_100m,35.6572957,139.54255868,Tokyo Denki University,edu,666300af8ffb8c903223f32f1fcc5c4674e2430b,citation,https://arxiv.org/pdf/1703.07920.pdf,Changing Fashion Cultures,2017
85,China,YFCC100M,yfcc_100m,24.4399419,118.09301781,Xiamen University,edu,b3e50a64709a62628105546e392cf796f95ea0fb,citation,https://arxiv.org/pdf/1804.04312.pdf,Clustering via Boundary Erosion,2018
86,Thailand,YFCC100M,yfcc_100m,13.65450525,100.49423171,Robotics Institute,edu,b1398234454ee3c9bc5a20f6d2d00232cb79622c,citation,https://pdfs.semanticscholar.org/b139/8234454ee3c9bc5a20f6d2d00232cb79622c.pdf,Combining Low-Density Separators with CNNs,2016
87,Switzerland,YFCC100M,yfcc_100m,46.5190557,6.5667576,EPFL,edu,e8dbdd936c132a1cfb0ecdffce05292ee282263f,citation,http://wp.internetsociety.org/ndss/wp-content/uploads/sites/25/2018/03/NDSS2018_06B-1_Olteanu_Slides.pdf,Consensual and Privacy-Preserving Sharing of Multi-Subject and Interdependent Data,2018
88,United Kingdom,YFCC100M,yfcc_100m,51.7534538,-1.25400997,University of Oxford,edu,20a1350815c4588a2380414bc78a7e215a2e3955,citation,https://arxiv.org/pdf/1807.05636.pdf,Cross Pixel Optical Flow Similarity for Self-Supervised Learning,2018
89,United States,YFCC100M,yfcc_100m,37.43131385,-122.16936535,Stanford University,edu,1e54025a6b399bfc210a52a8c3314e8f570c2204,citation,https://arxiv.org/pdf/1511.07571.pdf,DenseCap: Fully Convolutional Localization Networks for Dense Captioning,2016
90,Ireland,YFCC100M,yfcc_100m,53.308244,-6.2241652,University College Dublin,edu,cc45fb67772898c36519de565c9bd0d1d11f1435,citation,https://forensicsandsecurity.com/papers/EvaluatingFacialAgeEstimation.pdf,Evaluating Automated Facial Age Estimation Techniques for Digital Forensics,2018
91,Italy,YFCC100M,yfcc_100m,46.0658836,11.1159894,University of Trento,edu,27f8b01e628f20ebfcb58d14ea40573d351bbaad,citation,https://pdfs.semanticscholar.org/27f8/b01e628f20ebfcb58d14ea40573d351bbaad.pdf,Events based Multimedia Indexing and Retrieval,2017
92,Germany,YFCC100M,yfcc_100m,47.689426,9.1868777,University of Konstanz,edu,da30d5e0cf214c1d86f629081493fa55e5a27efc,citation,https://www.uni-konstanz.de/mmsp/pubsys/publishedFiles/HoLiSa18.pdf,Expertise screening in crowdsourcing image quality,2018
93,United States,YFCC100M,yfcc_100m,45.51181205,-122.68492999,Portland State University,edu,90eb833df9614da495712f4c1fbb65f8e7d9b356,citation,https://pdfs.semanticscholar.org/c12d/09f36feaa03a533d87eb3ceef5bc76989f05.pdf,Improved Scoring Models for Semantic Image Retrieval Using Scene Graphs,2017
94,Australia,YFCC100M,yfcc_100m,-37.7963689,144.9611738,University of Melbourne,edu,c82840923eeded245a8dab2dd102d8b0cf96758a,citation,https://pdfs.semanticscholar.org/c828/40923eeded245a8dab2dd102d8b0cf96758a.pdf,KDGAN: Knowledge Distillation with Generative Adversarial Networks,2018
95,Germany,YFCC100M,yfcc_100m,47.689426,9.1868777,University of Konstanz,edu,a1ff747cf512c8156620d9c17cb6ed8d21a76ad6,citation,https://arxiv.org/pdf/1803.08489.pdf,KonIQ-10k: Towards an ecologically valid and large-scale IQA database,2018
96,United States,YFCC100M,yfcc_100m,37.8687126,-122.25586815,"University of California, Berkeley",edu,35d181da0b939bdf3bdf579969e5fe69e277e03e,citation,https://arxiv.org/pdf/1612.06370.pdf,Learning Features by Watching Objects Move,2017
97,Thailand,YFCC100M,yfcc_100m,13.65450525,100.49423171,Robotics Institute,edu,774ae9c6b2a83c6891b5aeeb169cfd462d45f715,citation,https://pdfs.semanticscholar.org/774a/e9c6b2a83c6891b5aeeb169cfd462d45f715.pdf,Learning from Small Sample Sets by Combining Unsupervised Meta-Training with CNNs,2016
98,Australia,YFCC100M,yfcc_100m,-35.2776999,149.118527,Australian National University,edu,2ce4e06a9fe107ff29a34ed4a8771222cbaacc9c,citation,https://arxiv.org/pdf/1608.07051.pdf,Learning Points and Routes to Recommend Trajectories,2016
99,United States,YFCC100M,yfcc_100m,40.4441619,-79.94272826,Carnegie Mellon University,edu,6eb5f375d67dd690ec3b134de7caecde461e8c72,citation,http://ijcai.org/Proceedings/16/Papers/250.pdf,Learning to detect concepts from webly-labeled video data,2016
100,China,YFCC100M,yfcc_100m,34.250803,108.983693,Xi’an Jiaotong University,edu,6eb5f375d67dd690ec3b134de7caecde461e8c72,citation,http://ijcai.org/Proceedings/16/Papers/250.pdf,Learning to detect concepts from webly-labeled video data,2016
101,United States,YFCC100M,yfcc_100m,40.4441619,-79.94272826,Carnegie Mellon University,edu,22954dd92a795d7f381465d1b353bcc41901430d,citation,https://arxiv.org/pdf/1604.04279.pdf,Learning Visual Storylines with Skipping Recurrent Neural Networks,2016
102,United States,YFCC100M,yfcc_100m,40.4441619,-79.94272826,Carnegie Mellon University,edu,ceac30061d8f7985987448f4712c49eeb98efad2,citation,https://arxiv.org/pdf/1708.01336.pdf,MemexQA: Visual Memex Question Answering,2017