diff options
Diffstat (limited to 'scraper/samples/s2-paper-detail.json')
| -rw-r--r-- | scraper/samples/s2-paper-detail.json | 3114 |
1 files changed, 3114 insertions, 0 deletions
diff --git a/scraper/samples/s2-paper-detail.json b/scraper/samples/s2-paper-detail.json new file mode 100644 index 00000000..5273fd54 --- /dev/null +++ b/scraper/samples/s2-paper-detail.json @@ -0,0 +1,3114 @@ +{ + "responseType": "PAPER_DETAIL", + "paper": { + "id": "e4754afaa15b1b53e70743880484b8d0736990ff", + "title": { + "text": "300 Faces In-The-Wild Challenge: database and results", + "fragments": [] + }, + "slug": "300-Faces-In-The-Wild-Challenge:-database-and-Sagonas-Antonakos", + "paperAbstract": { + "text": "Computer Vision has recently witnessed great research advance towards automatic facial points detection. Numerous methodologies have been proposed during the last few years that achieve accurate and efficient performance. However, fair comparison between these methodologies is infeasible mainly due to two issues. (a) Most existing databases, captured under both constrained and unconstrained (in-the-wild) conditions have been annotated using different mark-ups and, in most cases, the accuracy of the annotations is low. (b) Most published works report experimental results using different training/testing sets, different error metrics and, of course, landmark points with semantically different locations. In this paper, we aim to overcome the aforementioned problems by (a) proposing a semi-automatic annotation technique that was employed to re-annotate most existing facial databases under a unified protocol, and (b) presenting the 300 Faces In-The-Wild Challenge (300-W), the first facial landmark localization challenge that was organized twice, in 2013 and 2015. To the best of our knowledge, this is the first effort towards a unified annotation scheme of massive databases and a fair experimental comparison of existing facial landmark localization systems. The images and annotations of the new testing database that was used in the 300-W challenge are available from http://ibug.doc.ic.ac.uk/resources/facial-point-annotations/.", + "fragments": [] + }, + "authors": [ + [ + { + "name": "Christos Sagonas", + "ids": [ + "3320415" + ], + "slug": "Christos-Sagonas" + }, + { + "text": "Christos Sagonas", + "fragments": [] + } + ], + [ + { + "name": "Epameinondas Antonakos", + "ids": [ + "2788012" + ], + "slug": "Epameinondas-Antonakos" + }, + { + "text": "Epameinondas Antonakos", + "fragments": [] + } + ], + [ + { + "name": "Georgios Tzimiropoulos", + "ids": [ + "2610880" + ], + "slug": "Georgios-Tzimiropoulos" + }, + { + "text": "Georgios Tzimiropoulos", + "fragments": [] + } + ], + [ + { + "name": "Stefanos P. Zafeiriou", + "ids": [ + "1776444" + ], + "slug": "Stefanos-P.-Zafeiriou" + }, + { + "text": "Stefanos P. Zafeiriou", + "fragments": [] + } + ], + [ + { + "name": "Maja Pantic", + "ids": [ + "1694605" + ], + "slug": "Maja-Pantic" + }, + { + "text": "Maja Pantic", + "fragments": [] + } + ] + ], + "structuredAuthors": [ + { + "firstName": "Christos", + "middleNames": [], + "lastName": "Sagonas" + }, + { + "firstName": "Epameinondas", + "middleNames": [], + "lastName": "Antonakos" + }, + { + "firstName": "Georgios", + "middleNames": [], + "lastName": "Tzimiropoulos" + }, + { + "firstName": "Stefanos", + "middleNames": [ + "P." + ], + "lastName": "Zafeiriou" + }, + { + "firstName": "Maja", + "middleNames": [], + "lastName": "Pantic" + } + ], + "year": { + "text": "2016", + "fragments": [] + }, + "venue": { + "text": "Image Vision Comput.", + "fragments": [] + }, + "citationContexts": [], + "citationStats": { + "citedByBuckets": [ + { + "startKey": 2015, + "endKey": 2015, + "count": 2, + "estimate": { + "min": 2.2728870038174662, + "value": 2.7194889455650393, + "max": 3.3020132174096997, + "confidence": 0.9 + } + }, + { + "startKey": 2016, + "endKey": 2016, + "count": 17, + "estimate": { + "min": 19.319539532448463, + "value": 23.115656037302834, + "max": 28.067112347982448, + "confidence": 0.9 + } + }, + { + "startKey": 2017, + "endKey": 2017, + "count": 45, + "estimate": { + "min": 51.139957585892994, + "value": 61.18850127521338, + "max": 74.29529739171824, + "confidence": 0.9 + } + }, + { + "startKey": 2018, + "endKey": 2018, + "count": 32, + "estimate": { + "min": 43.63943047329535, + "value": 52.21418775484875, + "max": 63.39865377426623, + "confidence": 0.9 + } + } + ], + "keyCitedByBuckets": [ + { + "startKey": 2016, + "endKey": 2016, + "count": 2 + }, + { + "startKey": 2017, + "endKey": 2017, + "count": 6 + }, + { + "startKey": 2018, + "endKey": 2018, + "count": 2 + } + ], + "numCitations": 103, + "estNumCitations": { + "min": 117.05368069659951, + "value": 140.0536806965995, + "max": 170.0536806965995, + "confidence": 0.9 + }, + "numReferences": 49, + "numKeyCitations": 11, + "numKeyReferences": 19, + "numViewableReferences": 49, + "keyCitationRate": 0.10679611650485436, + "estCitationVelocity": { + "estimate": { + "min": 42.65391605341533, + "value": 47.439973828190126, + "max": 53.68265788224421, + "confidence": 0.9 + }, + "estCitationsByRange": [ + { + "value": { + "min": 19.319539532448463, + "value": 23.115656037302834, + "max": 28.067112347982448, + "confidence": 0.9 + }, + "start": [ + 2016, + 1, + 1 + ], + "end": [ + 2016, + 12, + 31 + ] + }, + { + "value": { + "min": 51.139957585892994, + "value": 61.18850127521338, + "max": 74.29529739171824, + "confidence": 0.9 + }, + "start": [ + 2017, + 1, + 1 + ], + "end": [ + 2017, + 12, + 31 + ] + }, + { + "value": { + "min": 48.488256081439275, + "value": 58.01576417205417, + "max": 70.44294863807359, + "confidence": 0.9 + }, + "start": [ + 2018, + 1, + 1 + ], + "end": [ + 2018, + 12, + 31 + ] + } + ] + }, + "estCitationAcceleration": { + "estimate": { + "min": -0.026615969581749135, + "value": -0.026615969581749065, + "max": -0.026615969581749045, + "confidence": 0.9 + }, + "estCitationsByRange": [ + { + "value": { + "min": 51.139957585892994, + "value": 61.18850127521338, + "max": 74.29529739171824, + "confidence": 0.9 + }, + "start": [ + 2017, + 1, + 1 + ], + "end": [ + 2017, + 12, + 31 + ] + }, + { + "value": { + "min": 48.488256081439275, + "value": 58.01576417205417, + "max": 70.44294863807359, + "confidence": 0.9 + }, + "start": [ + 2018, + 1, + 1 + ], + "end": [ + 2018, + 12, + 31 + ] + } + ] + } + }, + "sources": [ + "DBLP", + "Grobid", + "Anansi", + "Crawler", + "ScienceParse", + "SPv2" + ], + "journal": { + "name": "Image Vision Comput.", + "volume": "47", + "pages": "3-18" + }, + "socialLinks": [], + "presentationUrls": [], + "doiInfo": { + "doi": "10.1016/j.imavis.2016.01.002", + "doiUrl": "http://doi.org/10.1016/j.imavis.2016.01.002" + }, + "links": [ + { + "url": "http://doi.org/10.1016/j.imavis.2016.01.002", + "linkType": "doi" + } + ], + "primaryPaperLink": { + "url": "http://doi.org/10.1016/j.imavis.2016.01.002", + "linkType": "doi" + }, + "alternatePaperLinks": [ + { + "url": "http://ibug.doc.ic.ac.uk/media/uploads/documents/1-s2.0-s0262885616000147-main.pdf", + "linkType": "crawler" + }, + { + "url": "https://doi.org/10.1016/j.imavis.2016.01.002", + "linkType": "dblp" + }, + { + "url": "https://spiral.imperial.ac.uk:8443/bitstream/10044/1/32322/2/300w.pdf", + "linkType": "anansi" + } + ], + "entities": [ + { + "id": "5332", + "name": "Computer vision", + "slug": "Computer-vision" + }, + { + "id": "55238", + "name": "Emoticon", + "slug": "Emoticon" + }, + { + "id": "539730", + "name": "Landmark point", + "slug": "Landmark-point" + }, + { + "id": "76540", + "name": "Semiconductor industry", + "slug": "Semiconductor-industry" + }, + { + "id": "468", + "name": "Malignant Fibrous Histiocytoma", + "slug": "Malignant-Fibrous-Histiocytoma" + } + ], + "entityRelations": [], + "blogs": [], + "videos": [], + "githubReferences": [], + "faqs": [], + "scorecardStats": [ + { + "typeKey": "highly_influential", + "score": 50, + "keyCitationCount": 11 + }, + { + "typeKey": "cited_by", + "citationRankPercent": 99, + "citationCount": 140, + "score": 10 + } + ], + "hasPdf": false + }, + "citingPapers": { + "citationType": "citingPapers", + "citations": [ + { + "id": "6742c0a26315d7354ab6b1fa62a5fffaea06da14", + "title": { + "text": "What does 2 D geometric information really tell us about 3 D face shape ?", + "fragments": [] + }, + "slug": "What-does-2-D-geometric-information-really-tell-us-Bas-Smith", + "venue": { + "text": "", + "fragments": [] + }, + "year": 2017, + "authors": [ + [ + { + "name": "Anil Bas", + "ids": [ + "39180407" + ], + "slug": "Anil-Bas" + }, + { + "text": "Anil Bas", + "fragments": [] + } + ], + [ + { + "name": "William A. P. Smith", + "ids": [ + "1687021" + ], + "slug": "William-A.-P.-Smith" + }, + { + "text": "William A. P. Smith", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "For this reason, there has been sustained interest in building feature detectors capable of accurately labelling face landmarks in uncontrolled images (Sagonas et al. 2016).", + "fragments": [ + { + "start": 152, + "end": 171 + } + ] + }, + { + "text": "Despite the small sample size, we find a pair of faces whose mean landmark error is 2.48% (i.e. they are within the expected accuracy of a landmark detector (Sagonas et al. 2016)) when they are viewed at 61cm and 488cm respectively (second and fourth image in the figure).", + "fragments": [ + { + "start": 158, + "end": 177 + } + ] + }, + { + "text": "Landmark detection on highly uncontrolled face images is now a mature research field with benchmarks (Sagonas et al. 2016) providing an indication of likely accuracy.", + "fragments": [ + { + "start": 102, + "end": 121 + } + ] + }, + { + "text": "they are within the expected accuracy of a landmark detector (Sagonas et al. 2016)) when they are viewed at 61cm and 488cm respectively (second and fourth image in the figure).", + "fragments": [ + { + "start": 61, + "end": 82 + } + ] + }, + { + "text": "Similarly, the 300 faces in the wild challenge (Sagonas et al. 2016) found that even the best methods did not obtain better than 5% accuracy for more than 50% of the landmarks.", + "fragments": [ + { + "start": 48, + "end": 67 + } + ] + }, + { + "text": "For example, state-of-the-art automatic face landmarking provides a mean landmark error under 4.5% of interocular distance for only 50% of images (according to the second conduct of the 300 Faces in the Wild challenge (Sagonas et al. 2016)).", + "fragments": [ + { + "start": 219, + "end": 238 + } + ] + }, + { + "text": "5% of interocular distance for only 50% of images (according to the second conduct of the 300 Faces in the Wild challenge (Sagonas et al. 2016)).", + "fragments": [ + { + "start": 122, + "end": 143 + } + ] + } + ], + "isKey": true + }, + { + "id": "014e3d0fa5248e6f4634dc237e2398160294edce", + "title": { + "text": "What does 2D geometric information really tell us about 3D face shape?", + "fragments": [] + }, + "slug": "What-does-2D-geometric-information-really-tell-us-Bas-Smith", + "venue": { + "text": "ArXiv", + "fragments": [] + }, + "year": 2017, + "authors": [ + [ + { + "name": "Anil Bas", + "ids": [ + "39180407" + ], + "slug": "Anil-Bas" + }, + { + "text": "Anil Bas", + "fragments": [] + } + ], + [ + { + "name": "William A. P. Smith", + "ids": [ + "1687021" + ], + "slug": "William-A.-P.-Smith" + }, + { + "text": "William A. P. Smith", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "For this reason, there has been sustained interest in building feature detectors capable of accurately labelling face landmarks in uncontrolled images (Sagonas et al. 2016).", + "fragments": [ + { + "start": 152, + "end": 171 + } + ] + }, + { + "text": "Despite the small sample size, we find a pair of faces whose mean landmark error is 2.48% (i.e. they are within the expected accuracy of a landmark detector (Sagonas et al. 2016)) when they are viewed at 61cm and 488cm respectively (second and fourth image in the figure).", + "fragments": [ + { + "start": 158, + "end": 177 + } + ] + }, + { + "text": "Landmark detection on highly uncontrolled face images is now a mature research field with benchmarks (Sagonas et al. 2016) providing an indication of likely accuracy.", + "fragments": [ + { + "start": 102, + "end": 121 + } + ] + }, + { + "text": "they are within the expected accuracy of a landmark detector (Sagonas et al. 2016)) when they are viewed at 61cm and 488cm respectively (second and fourth image in the figure).", + "fragments": [ + { + "start": 61, + "end": 82 + } + ] + }, + { + "text": "Similarly, the 300 faces in the wild challenge (Sagonas et al. 2016) found that even the best methods did not obtain better than 5% accuracy for more than 50% of the landmarks.", + "fragments": [ + { + "start": 48, + "end": 67 + } + ] + }, + { + "text": "For example, state-of-the-art automatic face landmarking provides a mean landmark error under 4.5% of interocular distance for only 50% of images (according to the second conduct of the 300 Faces in the Wild challenge (Sagonas et al. 2016)).", + "fragments": [ + { + "start": 219, + "end": 238 + } + ] + }, + { + "text": "5% of interocular distance for only 50% of images (according to the second conduct of the 300 Faces in the Wild challenge (Sagonas et al. 2016)).", + "fragments": [ + { + "start": 122, + "end": 143 + } + ] + } + ], + "isKey": true + }, + { + "id": "8f772d9ce324b2ef5857d6e0b2a420bc93961196", + "title": { + "text": "Facial Landmark Point Localization using Coarse-to-Fine Deep Recurrent Neural Network", + "fragments": [] + }, + "slug": "Facial-Landmark-Point-Localization-using-Deep-Mahpod-Das", + "venue": { + "text": "ArXiv", + "fragments": [] + }, + "year": 2018, + "authors": [ + [ + { + "name": "Shahar Mahpod", + "ids": [ + "2748312" + ], + "slug": "Shahar-Mahpod" + }, + { + "text": "Shahar Mahpod", + "fragments": [] + } + ], + [ + { + "name": "Rig Das", + "ids": [ + "3001038" + ], + "slug": "Rig-Das" + }, + { + "text": "Rig Das", + "fragments": [] + } + ], + [ + { + "name": "Emanuele Maiorana", + "ids": [ + "1767715" + ], + "slug": "Emanuele-Maiorana" + }, + { + "text": "Emanuele Maiorana", + "fragments": [] + } + ], + [ + { + "name": "Yosi Keller", + "ids": [ + "1926432" + ], + "slug": "Yosi-Keller" + }, + { + "text": "Yosi Keller", + "fragments": [] + } + ], + [ + { + "name": "Patrizio Campisi", + "ids": [ + "1682433" + ], + "slug": "Patrizio-Campisi" + }, + { + "text": "Patrizio Campisi", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "In order to evaluate the performance of our proposed CFDRNN framework we perform some exhaustive experiments on the data released for the 300-W competition [23].", + "fragments": [ + { + "start": 156, + "end": 160 + } + ] + }, + { + "text": "300-W, MENPO 300-W private test set [23] 3.", + "fragments": [ + { + "start": 36, + "end": 40 + } + ] + }, + { + "text": "The authors have also tested their proposed framework’s performance against the 300-W private test set [23], which consists of 300 indoor and 300 outdoor images.", + "fragments": [ + { + "start": 103, + "end": 107 + } + ] + }, + { + "text": "This dataset [23] contains 135 complicated facial images with different facial expression, poses, illumination, and multiple faces in a single image.", + "fragments": [ + { + "start": 13, + "end": 17 + } + ] + }, + { + "text": "bug [21], [22], 300-W [23], and Menpo [19], [24] leaves a significant scope for further improvement.", + "fragments": [ + { + "start": 22, + "end": 26 + } + ] + } + ], + "isKey": true + }, + { + "id": "7789a5d87884f8bafec8a82085292e87d4e2866f", + "title": { + "text": "A Unified Tensor-based Active Appearance Face Model", + "fragments": [] + }, + "slug": "A-Unified-Tensor-based-Active-Appearance-Face-Model-Feng-Kittler", + "venue": { + "text": "ArXiv", + "fragments": [] + }, + "year": 2016, + "authors": [ + [ + { + "name": "Zhen-Hua Feng", + "ids": [ + "2976854" + ], + "slug": "Zhen-Hua-Feng" + }, + { + "text": "Zhen-Hua Feng", + "fragments": [] + } + ], + [ + { + "name": "Josef Kittler", + "ids": [ + "1748684" + ], + "slug": "Josef-Kittler" + }, + { + "text": "Josef Kittler", + "fragments": [] + } + ], + [ + { + "name": "William J. Christmas", + "ids": [ + "1942955" + ], + "slug": "William-J.-Christmas" + }, + { + "text": "William J. Christmas", + "fragments": [] + } + ], + [ + { + "name": "Xiaojun Wu", + "ids": [ + "50171811" + ], + "slug": "Xiaojun-Wu" + }, + { + "text": "Xiaojun Wu", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "W face dataset (Sagonas et al., 2016).", + "fragments": [ + { + "start": 16, + "end": 36 + } + ] + }, + { + "text": "The 300-W dataset has been widely used for benchmarking a facial landmark detection algorithm (Sagonas et al., 2016).", + "fragments": [ + { + "start": 94, + "end": 116 + } + ] + }, + { + "text": "Then we demonstrate the capacity of the proposed UT-AAM to synthesise a large number of virtual faces and examine how these synthesised faces can improve the training of a facial landmark detector, using the 300-W face dataset (Sagonas et al., 2016).", + "fragments": [ + { + "start": 227, + "end": 249 + } + ] + }, + { + "text": "W dataset has been widely used for benchmarking a facial landmark detection algorithm (Sagonas et al., 2016).", + "fragments": [ + { + "start": 87, + "end": 107 + } + ] + }, + { + "text": "7 A comparison of the cumulative error distribution curves of SDM and CCR, as well as a set of state-of-the-art methods from Baltrusaitis, Hasan, Jaiswal, Miborrow, Yan and Zhou (Sagonas et al., 2016), on the 300-W face dataset: (a) results on the 300 outdoor face images; (b) results on the 300 indoor faces.", + "fragments": [ + { + "start": 178, + "end": 200 + } + ] + } + ], + "isKey": true + }, + { + "id": "0f21a39fa4c0a19c4a5b4733579e393cb1d04f71", + "title": { + "text": "Evaluation of optimization components of a 3D to 2D landmark fitting algorithm for head pose estimation", + "fragments": [] + }, + "slug": "Evaluation-of-optimization-components-of-a-3D-to-2D-Haan", + "venue": { + "text": "", + "fragments": [] + }, + "year": 2018, + "authors": [ + [ + { + "name": "Tim de Haan", + "ids": [], + "slug": "Tim-de-Haan" + }, + { + "text": "Tim de Haan", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "Landmark Localization: Landmark localization has been extensively researched [1], [17], [18], which has led to the development of Dlib [7], a machine learning toolkit that has its usage in many complex applications, such as image processing.", + "fragments": [ + { + "start": 77, + "end": 80 + } + ] + }, + { + "text": "This thesis uses the new 300-W dataset [1, 17, 18], this set consists of 600 photographs that can be used to project the 3DMM onto the landmarks detected by dlib.", + "fragments": [ + { + "start": 39, + "end": 50 + } + ] + }, + { + "text": "histogram of oriented gradients combined with a linear classifier [7], trained on the iBUG 300-W dataset from [1], the landmark detection is an implementation of [19].", + "fragments": [ + { + "start": 110, + "end": 113 + } + ] + }, + { + "text": "Table 8 shows how Dlib compares with other landmarks detection algorithms, which are the winners of the two contests that were organized by [1].", + "fragments": [ + { + "start": 140, + "end": 143 + } + ] + }, + { + "text": "Additionally, the Oracle curve is added, which is the minimum error that can be achieved [1].", + "fragments": [ + { + "start": 89, + "end": 92 + } + ] + } + ], + "isKey": true + }, + { + "id": "1885acea0d24e7b953485f78ec57b2f04e946eaf", + "title": { + "text": "Combining Local and Global Features for 3D Face Tracking", + "fragments": [] + }, + "slug": "Combining-Local-and-Global-Features-for-3D-Face-Xiong-Li", + "venue": { + "text": "2017 IEEE International Conference on Computer Vision Workshops (ICCVW)", + "fragments": [] + }, + "year": 2017, + "authors": [ + [ + { + "name": "Pengfei Xiong", + "ids": [ + "40448951" + ], + "slug": "Pengfei-Xiong" + }, + { + "text": "Pengfei Xiong", + "fragments": [] + } + ], + [ + { + "name": "Guoqing Li", + "ids": [ + "1775836" + ], + "slug": "Guoqing-Li" + }, + { + "text": "Guoqing Li", + "fragments": [] + } + ], + [ + { + "name": "Yuhang Sun", + "ids": [ + "48186289" + ], + "slug": "Yuhang-Sun" + }, + { + "text": "Yuhang Sun", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "In the past few decades, extensive studies [39, 40, 31, 11, 26, 41, 33, 43] have been proposed and singnificant improvements have been achieved, especially since a comprehensice benchmark [32] was made public, and deep convolution neural networks [33, 43] were applied in face shape regression.", + "fragments": [ + { + "start": 188, + "end": 192 + } + ] + }, + { + "text": "Images from AFLW [28], FDDB[22], 300W, 300W-Test [32] are collected and fitted with 3D facial morphable model [4] to generate 84 3D point annotations.", + "fragments": [ + { + "start": 49, + "end": 53 + } + ] + }, + { + "text": "300W-LP: 300W-LP(300W across Large Poses) [45] contains 61225 samples (1786 from IBUG, 5207 from AFW, and 16556 from LFPW) sysnthetical generated from 300-W [32].", + "fragments": [ + { + "start": 157, + "end": 161 + } + ] + }, + { + "text": "[45] sysnthetically generated a series of datasets based on 300W [32], which gradually become the benchmark of 3D shape regression methods.", + "fragments": [ + { + "start": 65, + "end": 69 + } + ] + } + ], + "isKey": true + }, + { + "id": "0a34fe39e9938ae8c813a81ae6d2d3a325600e5c", + "title": { + "text": "FacePoseNet: Making a Case for Landmark-Free Face Alignment", + "fragments": [] + }, + "slug": "FacePoseNet:-Making-a-Case-for-Landmark-Free-Face-Chang-Tran", + "venue": { + "text": "2017 IEEE International Conference on Computer Vision Workshops (ICCVW)", + "fragments": [] + }, + "year": 2017, + "authors": [ + [ + { + "name": "Feng-Ju Chang", + "ids": [ + "1752756" + ], + "slug": "Feng-Ju-Chang" + }, + { + "text": "Feng-Ju Chang", + "fragments": [] + } + ], + [ + { + "name": "Anh Tuan Tran", + "ids": [ + "46634688" + ], + "slug": "Anh-Tuan-Tran" + }, + { + "text": "Anh Tuan Tran", + "fragments": [] + } + ], + [ + { + "name": "Tal Hassner", + "ids": [ + "1756099" + ], + "slug": "Tal-Hassner" + }, + { + "text": "Tal Hassner", + "fragments": [] + } + ], + [ + { + "name": "Iacopo Masi", + "ids": [ + "11269472" + ], + "slug": "Iacopo-Masi" + }, + { + "text": "Iacopo Masi", + "fragments": [] + } + ], + [ + { + "name": "Ramakant Nevatia", + "ids": [ + "1694832" + ], + "slug": "Ramakant-Nevatia" + }, + { + "text": "Ramakant Nevatia", + "fragments": [] + } + ], + [ + { + "name": "Gérard G. Medioni", + "ids": [ + "3463966" + ], + "slug": "Gérard-G.-Medioni" + }, + { + "text": "Gérard G. Medioni", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "Facial landmark detection is big business, as reflected by the numerous citation to relevant papers, the many facial landmark detection benchmarks [5, 23, 26, 40, 53], and popular international events dedicated to this problem.", + "fragments": [ + { + "start": 147, + "end": 166 + } + ] + }, + { + "text": "We evaluate performance on the 300W data set [40], the most challenging benchmark of its kind [45], using 68 landmarks.", + "fragments": [ + { + "start": 45, + "end": 49 + } + ] + }, + { + "text": "Landmarks detected in 300W [40] images by projecting an unmodified 3D face shape,", + "fragments": [ + { + "start": 27, + "end": 31 + } + ] + }, + { + "text": "Finally, (3), we test our FPN extensively and report that better landmark detection accuracy on the widely used 300W benchmark [40] does not imply better alignment and recognition on the highly challenging IJB-A [22] and IJB-B benchmarks [44].", + "fragments": [ + { + "start": 127, + "end": 131 + } + ] + } + ], + "isKey": true + }, + { + "id": "cf736f596bf881ca97ec4b29776baaa493b9d50e", + "title": { + "text": "Low Dimensional Deep Features for facial landmark alignment", + "fragments": [] + }, + "slug": "Low-Dimensional-Deep-Features-for-facial-landmark-Jalan-Mynepalli", + "venue": { + "text": "2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)", + "fragments": [] + }, + "year": 2017, + "authors": [ + [ + { + "name": "Ankit Jalan", + "ids": [ + "18090725" + ], + "slug": "Ankit-Jalan" + }, + { + "text": "Ankit Jalan", + "fragments": [] + } + ], + [ + { + "name": "Siva Chaitanya Mynepalli", + "ids": [ + "18091255" + ], + "slug": "Siva-Chaitanya-Mynepalli" + }, + { + "text": "Siva Chaitanya Mynepalli", + "fragments": [] + } + ], + [ + { + "name": "Viswanath Veera", + "ids": [ + "18178676" + ], + "slug": "Viswanath-Veera" + }, + { + "text": "Viswanath Veera", + "fragments": [] + } + ], + [ + { + "name": "Shankar M. Venkatesan", + "ids": [ + "3210146" + ], + "slug": "Shankar-M.-Venkatesan" + }, + { + "text": "Shankar M. Venkatesan", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "300-W dataset [28, 31]: This dataset is created from", + "fragments": [ + { + "start": 14, + "end": 22 + } + ] + }, + { + "text": "Experimental analysis has demonstrated that LDFFA outperforms other state-of-the-art algorithms on Helen, LFPW benchmark datasets while giving comparable performance on subsets of 300-W database [26, 27, 28, 31] with 68 fiducial landmarks.", + "fragments": [ + { + "start": 195, + "end": 211 + } + ] + }, + { + "text": "We follow the method of [31] where the average L2 distance of the estimated landmark position from the ground truth is normalized by the standard definition of inter-ocular distance (douter,) to give the error (Eqn.", + "fragments": [ + { + "start": 24, + "end": 28 + } + ] + }, + { + "text": "The bounding boxes for 300-W images were provided by [28, 31] using their in-house face detector.", + "fragments": [ + { + "start": 53, + "end": 61 + } + ] + }, + { + "text": "3 (b & c) illustrate the Normalized Mean Error using definition from [31].", + "fragments": [ + { + "start": 69, + "end": 73 + } + ] + } + ], + "isKey": true + }, + { + "id": "7fcfd72ba6bc14bbb90b31fe14c2c77a8b220ab2", + "title": { + "text": "Robust FEC-CNN: A High Accuracy Facial Landmark Detection System", + "fragments": [] + }, + "slug": "Robust-FEC-CNN:-A-High-Accuracy-Facial-Landmark-He-Zhang", + "venue": { + "text": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", + "fragments": [] + }, + "year": 2017, + "authors": [ + [ + { + "name": "Zhenliang He", + "ids": [ + "3469114" + ], + "slug": "Zhenliang-He" + }, + { + "text": "Zhenliang He", + "fragments": [] + } + ], + [ + { + "name": "Jie Zhang", + "ids": [ + "49050482" + ], + "slug": "Jie-Zhang" + }, + { + "text": "Jie Zhang", + "fragments": [] + } + ], + [ + { + "name": "Meina Kan", + "ids": [ + "1693589" + ], + "slug": "Meina-Kan" + }, + { + "text": "Meina Kan", + "fragments": [] + } + ], + [ + { + "name": "Shiguang Shan", + "ids": [ + "1685914" + ], + "slug": "Shiguang-Shan" + }, + { + "text": "Shiguang Shan", + "fragments": [] + } + ], + [ + { + "name": "Xilin Chen", + "ids": [ + "1710220" + ], + "slug": "Xilin-Chen" + }, + { + "text": "Xilin Chen", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "Impressive works [5, 4, 6, 3, 23, 14, 20, 27, 21, 22, 9] and benchmarks [11, 29, 2, 16, 17, 10, 26] were proposed to tackle this task in the the past few decades.", + "fragments": [ + { + "start": 72, + "end": 99 + } + ] + }, + { + "text": "LFPW, HELEN, AFW, 300W Competition and Menpo 68 point subset are used as training set while IBUG is used as testing set.", + "fragments": [ + { + "start": 23, + "end": 34 + } + ] + }, + { + "text": "We employ 300W [16, 18], 300W Competition [16, 17] and Menpo dataset [26] for evaluating RFC.", + "fragments": [ + { + "start": 15, + "end": 23 + } + ] + }, + { + "text": "The 300W Competition dataset consists of indoor and outdoor subset.", + "fragments": [ + { + "start": 9, + "end": 20 + } + ] + } + ], + "isKey": true + }, + { + "id": "4ac3cd8b6c50f7a26f27eefc64855134932b39be", + "title": { + "text": "Robust Facial Landmark Detection via a Fully-Convolutional Local-Global Context Network", + "fragments": [] + }, + "slug": "Robust-Facial-Landmark-Detection-via-a-Local-Global-Merget-Rock", + "venue": { + "text": "", + "fragments": [] + }, + "authors": [ + [ + { + "name": "Daniel Merget", + "ids": [ + "3044182" + ], + "slug": "Daniel-Merget" + }, + { + "text": "Daniel Merget", + "fragments": [] + } + ], + [ + { + "name": "Matthias Rock", + "ids": [ + "28096417" + ], + "slug": "Matthias-Rock" + }, + { + "text": "Matthias Rock", + "fragments": [] + } + ], + [ + { + "name": "Gerhard Rigoll", + "ids": [ + "46343645" + ], + "slug": "Gerhard-Rigoll" + }, + { + "text": "Gerhard Rigoll", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "Some qualitative results on the 300-W challenge data set [28] are presented in Figure 5.", + "fragments": [ + { + "start": 57, + "end": 61 + } + ] + }, + { + "text": "Figure 4 illustrates the results for inter-ocular distance (IOD) normalized mean absolute error (MAE) on the 300-W benchmark [28] and on a cross-data set test with Menpo [41].", + "fragments": [ + { + "start": 125, + "end": 129 + } + ] + }, + { + "text": "The overall prediction performance on 300-W [28] was worse using cross entropy.", + "fragments": [ + { + "start": 44, + "end": 48 + } + ] + }, + { + "text": "Our approach beats the state of the art on 300-W [28] and on a cross-data set test with Menpo [41].", + "fragments": [ + { + "start": 49, + "end": 53 + } + ] + }, + { + "text": "From left to right: 300-W [28], iBUG [29], LFPW [5] + HELEN [20], Menpo frontal train set [41].", + "fragments": [ + { + "start": 26, + "end": 30 + } + ] + }, + { + "text": "• We demonstrate the effectiveness of our approach by improving upon the state of the art on 300-W [28] and a cross-data set test on Menpo [41].", + "fragments": [ + { + "start": 99, + "end": 103 + } + ] + } + ], + "isKey": true + } + ], + "requestedPageSize": 10, + "pageNumber": 1, + "totalPages": 11, + "sort": "is-influential" + }, + "citedPapers": { + "citationType": "citedPapers", + "citations": [ + { + "id": "0a6d344112b5af7d1abbd712f83c0d70105211d0", + "title": { + "text": "Constrained Local Neural Fields for Robust Facial Landmark Detection in the Wild", + "fragments": [] + }, + "slug": "Constrained-Local-Neural-Fields-for-Robust-Facial-Baltrusaitis-Robinson", + "venue": { + "text": "2013 IEEE International Conference on Computer Vision Workshops", + "fragments": [] + }, + "year": 2013, + "authors": [ + [ + { + "name": "Tadas Baltrusaitis", + "ids": [ + "1756344" + ], + "slug": "Tadas-Baltrusaitis" + }, + { + "text": "Tadas Baltrusaitis", + "fragments": [] + } + ], + [ + { + "name": "Peter Robinson", + "ids": [ + "39626495" + ], + "slug": "Peter-Robinson" + }, + { + "text": "Peter Robinson", + "fragments": [] + } + ], + [ + { + "name": "Louis-Philippe Morency", + "ids": [ + "49933077" + ], + "slug": "Louis-Philippe-Morency" + }, + { + "text": "Louis-Philippe Morency", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "[40] propose a probabilistic patch expert technique that learns non-linear and spatial relationships between the pixels and the probability of a landmark being aligned.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[40]", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[40] T.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[40] 0.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + } + ], + "isKey": true + }, + { + "id": "1a8ccc23ed73db64748e31c61c69fe23c48a2bb1", + "title": { + "text": "Extensive Facial Landmark Localization with Coarse-to-Fine Convolutional Network Cascade", + "fragments": [] + }, + "slug": "Extensive-Facial-Landmark-Localization-with-Network-Zhou-Fan", + "venue": { + "text": "2013 IEEE International Conference on Computer Vision Workshops", + "fragments": [] + }, + "year": 2013, + "authors": [ + [ + { + "name": "Erjin Zhou", + "ids": [ + "1848243" + ], + "slug": "Erjin-Zhou" + }, + { + "text": "Erjin Zhou", + "fragments": [] + } + ], + [ + { + "name": "Haoqiang Fan", + "ids": [ + "1934546" + ], + "slug": "Haoqiang-Fan" + }, + { + "text": "Haoqiang Fan", + "fragments": [] + } + ], + [ + { + "name": "Zhimin Cao", + "ids": [ + "2695115" + ], + "slug": "Zhimin-Cao" + }, + { + "text": "Zhimin Cao", + "fragments": [] + } + ], + [ + { + "name": "Yuning Jiang", + "ids": [ + "1691963" + ], + "slug": "Yuning-Jiang" + }, + { + "text": "Yuning Jiang", + "fragments": [] + } + ], + [ + { + "name": "Qi Yin", + "ids": [ + "2274228" + ], + "slug": "Qi-Yin" + }, + { + "text": "Qi Yin", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "[47] 0% 2.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[47] 0.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[47] propose a four-level convolutional network cascade, where each level is trained to locally refine the outputs of the previous network levels.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[47]", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[47] (convolutional network framework), can continually achieve better results with continuous rise in the amount of training data.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[47] E.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[47] from Megvii company.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[47]) and second (J.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + } + ], + "isKey": true + }, + { + "id": "1c1a98df3d0d5e2034ea723994bdc85af45934db", + "title": { + "text": "Guided Unsupervised Learning of Mode Specific Models for Facial Point Detection in the Wild", + "fragments": [] + }, + "slug": "Guided-Unsupervised-Learning-of-Mode-Specific-for-Jaiswal-Almaev", + "venue": { + "text": "2013 IEEE International Conference on Computer Vision Workshops", + "fragments": [] + }, + "year": 2013, + "authors": [ + [ + { + "name": "Shashank Jaiswal", + "ids": [ + "2736086" + ], + "slug": "Shashank-Jaiswal" + }, + { + "text": "Shashank Jaiswal", + "fragments": [] + } + ], + [ + { + "name": "Timur R. Almaev", + "ids": [ + "2449665" + ], + "slug": "Timur-R.-Almaev" + }, + { + "text": "Timur R. Almaev", + "fragments": [] + } + ], + [ + { + "name": "Michel F. Valstar", + "ids": [ + "1795528" + ], + "slug": "Michel-F.-Valstar" + }, + { + "text": "Michel F. Valstar", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "[41]", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[41] 0.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[41] S.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[41] use Local Evidence Aggregated Regression [42], in which local patches provide evidence of the location of the target facial point using Support Vector Regressors.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + } + ], + "isKey": true + }, + { + "id": "321c8ba38db118d8b02c0ba209be709e6792a2c7", + "title": { + "text": "Learn to Combine Multiple Hypotheses for Accurate Face Alignment", + "fragments": [] + }, + "slug": "Learn-to-Combine-Multiple-Hypotheses-for-Accurate-Yan-Lei", + "venue": { + "text": "2013 IEEE International Conference on Computer Vision Workshops", + "fragments": [] + }, + "year": 2013, + "authors": [ + [ + { + "name": "Junjie Yan", + "ids": [ + "48270105" + ], + "slug": "Junjie-Yan" + }, + { + "text": "Junjie Yan", + "fragments": [] + } + ], + [ + { + "name": "Zhen Lei", + "ids": [ + "1718623" + ], + "slug": "Zhen-Lei" + }, + { + "text": "Zhen Lei", + "fragments": [] + } + ], + [ + { + "name": "Dong Yi", + "ids": [ + "1716143" + ], + "slug": "Dong-Yi" + }, + { + "text": "Dong Yi", + "fragments": [] + } + ], + [ + { + "name": "Stan Z. Li", + "ids": [ + "34679741" + ], + "slug": "Stan-Z.-Li" + }, + { + "text": "Stan Z. Li", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "[46]", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[46], Zhou et al.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[46] J.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[46] from The National Laboratory of Pattern Recognition at the Institute of Automation of the Chinese Academy of Sciences, and (b) Zhou et al.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[46] (cascade regression framework) and", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[46] 0.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[46] employ a cascade regression framework, where a series of regressors are utilized to progressively refine the shape initialized by the face detector.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + } + ], + "isKey": true + }, + { + "id": "4f77a37753c03886ca9c9349723ec3bbfe4ee967", + "title": { + "text": "Localizing Facial Keypoints with Global Descriptor Search, Neighbour Alignment and Locally Linear Models", + "fragments": [] + }, + "slug": "Localizing-Facial-Keypoints-with-Global-Descriptor-Hasan-Pal", + "venue": { + "text": "2013 IEEE International Conference on Computer Vision Workshops", + "fragments": [] + }, + "year": 2013, + "authors": [ + [ + { + "name": "Md. Kamrul Hasan", + "ids": [ + "2811524" + ], + "slug": "Md.-Kamrul-Hasan" + }, + { + "text": "Md. Kamrul Hasan", + "fragments": [] + } + ], + [ + { + "name": "Christopher Joseph Pal", + "ids": [ + "1972076" + ], + "slug": "Christopher-Joseph-Pal" + }, + { + "text": "Christopher Joseph Pal", + "fragments": [] + } + ], + [ + { + "name": "Sharon Moalem", + "ids": [ + "9422894" + ], + "slug": "Sharon-Moalem" + }, + { + "text": "Sharon Moalem", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "[43] M.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[43]", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[43] first apply a nearest neighbour search using global descriptors and, then, aim to align local neighbours by dynamically fitting a locally linear model to the global keypoint configurations of the returned neighbours.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[43] 0.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + } + ], + "isKey": true + }, + { + "id": "36e8ef2e5d52a78dddf0002e03918b101dcdb326", + "title": { + "text": "Multiview Active Shape Models with SIFT Descriptors for the 300-W Face Landmark Challenge", + "fragments": [] + }, + "slug": "Multiview-Active-Shape-Models-with-SIFT-Descriptors-Milborrow-Bishop", + "venue": { + "text": "2013 IEEE International Conference on Computer Vision Workshops", + "fragments": [] + }, + "year": 2013, + "authors": [ + [ + { + "name": "Stephen Milborrow", + "ids": [ + "2822258" + ], + "slug": "Stephen-Milborrow" + }, + { + "text": "Stephen Milborrow", + "fragments": [] + } + ], + [ + { + "name": "Tom E. Bishop", + "ids": [ + "1823550" + ], + "slug": "Tom-E.-Bishop" + }, + { + "text": "Tom E. Bishop", + "fragments": [] + } + ], + [ + { + "name": "Fred Nicolls", + "ids": [ + "2537623" + ], + "slug": "Fred-Nicolls" + }, + { + "text": "Fred Nicolls", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "[44] 0.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[44] S.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[44] approach the problem with Active Shape Models (ASMs) that incorporate a modified version of SIFT descriptors [45].", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "[44]", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + } + ], + "isKey": true + }, + { + "title": { + "text": "Face detection", + "fragments": [] + }, + "slug": "Face-detection-Zhu-Ramanan", + "venue": { + "text": "pose estimation, and landmark localization in the wild, in: Proceedings of IEEE International Conference on Computer Vision & Pattern Recognition (CVPR), IEEE", + "fragments": [] + }, + "year": 2012, + "authors": [ + [ + { + "name": "X. Zhu", + "ids": [], + "slug": "X.-Zhu" + }, + { + "text": "X. Zhu", + "fragments": [] + } + ], + [ + { + "name": "D. Ramanan", + "ids": [], + "slug": "D.-Ramanan" + }, + { + "text": "D. Ramanan", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "The most notable databases of this category are LFPW [28], HELEN [29], AFW [17], AFLW [30] and IBUG [31] (all used for facial landmark points localization).", + "fragments": [ + { + "start": 75, + "end": 79 + } + ] + }, + { + "text": "Note that we employ DPMs [17] to estimate the initial landmarks locations for the first iteration of the above procedure.", + "fragments": [ + { + "start": 25, + "end": 29 + } + ] + }, + { + "text": "These databases can be separated in two major categories: (a) those captured under controlled conditions, e.g. Multi-PIE [24], XM2VTS [25], FRGCV2 [26], AR [27], and those captured under totally unconstrained conditions (in-the-wild), e.g. LFPW [28], HELEN [29], AFW [17], AFLW [30], IBUG [31].", + "fragments": [ + { + "start": 263, + "end": 266 + } + ] + }, + { + "text": "Furthermore, we computed the bounding boxes of all the aforementioned databases by using our in-house face detector, the one that is also employed in [16], which is a variant of [17].", + "fragments": [ + { + "start": 178, + "end": 182 + } + ] + }, + { + "text": "W), the first facial landmark localization challenge, that was\n1The annotations of XM2VTS, FRGC-V2, LFPW, HELEN, AFW and IBUG are publicly available from http://ibug.doc.ic.ac.uk/resources/facial-point-annotations/.", + "fragments": [ + { + "start": 113, + "end": 116 + } + ] + }, + { + "text": "LFPW, AFW, HELEN, XM2VTS and FRGC-V2 were provided for training, along with the corrected annotations produced with the semi-automatic annotation tool (Sec 3).", + "fragments": [ + { + "start": 6, + "end": 9 + } + ] + }, + { + "text": ", and outperform discriminative methodologies, such as CLMs [15], DPMs [17] and SDM [18].", + "fragments": [ + { + "start": 71, + "end": 75 + } + ] + }, + { + "text": "For each subject, there are available images for 15 different poses, 19 illumination conditions and 6\n5\nAC C\nEP\nTE\nD M\nAN U\nSC R\nIP T\n(a) MultiPIE/IBUG (b) XM2VTS (c) FRGC-V2 (d) AR\n(e) LFPW (f) HELEN (g) AFW (h) AFLW\nFigure 1: Landmarks configurations of existing databases.", + "fragments": [ + { + "start": 205, + "end": 208 + } + ] + }, + { + "text": "Multi-PIE [24], XM2VTS [25], FRGC-V2 [26], AR [27], LFPW [28], HELEN [29] and AFW [17].", + "fragments": [ + { + "start": 82, + "end": 86 + } + ] + }, + { + "text": "We employed the proposed tool to re-annotate all the widely used databases, i.e. Multi-PIE [24], XM2VTS [25], FRGC-V2 [26], AR [27], LFPW [28], HELEN [29] and AFW [17].", + "fragments": [ + { + "start": 159, + "end": 162 + } + ] + }, + { + "text": "The discriminative techniques can be further divided to those that use discriminative response map functions, such as Active Shape Models (ASMs) [13], Constrained Local Models (CLMs) [14, 15, 16] and Deformable Part Models (DPMs) [17], those that learn a cascade of regression functions, such as Supervised Descent Method (SDM) [18] and others [19, 20, 21], and, finally, those that employ random forests [22, 23].", + "fragments": [ + { + "start": 230, + "end": 234 + } + ] + }, + { + "text": "The accuracy of the fitting results was measured by the point-to-point RMS error between each fitted shape and the ground truth annotations, normalized by the face’s interoccular distance, as proposed in [17].", + "fragments": [ + { + "start": 204, + "end": 208 + } + ] + }, + { + "text": "HELEN, AFW, IBUG: The rest of in-the-wild databases were annotated\n11\nAC C\nEP TE\nD M\nAN U\nSC R\nIP T\n(a) Multi-PIE (b) XM2VTS\n(c) FRGC-V2 (d) LFPW\n(e) HELEN (f) AFW\nFigure 3: Examples of the annotated images.", + "fragments": [ + { + "start": 7, + "end": 10 + } + ] + }, + { + "text": "In both conducts, the training set consisted of the XM2VTS, FRGC-V2, LFPW, HELEN, AFW and IBUG databases that were annotated using the proposed semi-automatic procedure.", + "fragments": [ + { + "start": 82, + "end": 85 + } + ] + }, + { + "text": "LFPW [28], HELEN [29], AFW [17], AFLW [30], IBUG [31].", + "fragments": [ + { + "start": 27, + "end": 31 + } + ] + }, + { + "text": "AFW: The Annotated Faces in-the-wild (AFW) [17] database consists of 250 images with 468 faces, that is, more than one faces are annotated in each image.", + "fragments": [ + { + "start": 43, + "end": 47 + } + ] + }, + { + "text": "AR: The AR Face Database [27] contains over 4000 images corresponding to\n6\nAC C\nEP TE\nD M\nAN U\nSC\nR\nIP T\nDatabase conditions # faces # subjects # points pose\nMulti-PIE controlled ∼ 750000 337 68 [−45◦, 45◦] XM2VTS 2360 295 68 0◦ FRGC-V2 4950 466 5 0◦ AR ∼ 4000 126 22 0◦ LFPW\nin-the-wild\n1035\n−\n35\n[−45◦, 45◦] HELEN 2330 194 AFW 468 6 AFLW 25993 21 IBUG 135 68\nTable 1: Overview of the characteristics of existing facial databases.", + "fragments": [ + { + "start": 310, + "end": 313 + } + ] + }, + { + "text": "For example, DPMs [17] tend to return bounding boxes that only include facial texture and not any of the subject’s hair, as usually done by the Viola-Jones detector [48].", + "fragments": [ + { + "start": 18, + "end": 22 + } + ] + }, + { + "text": "The authors were encouraged, but not restricted, to use LFPW, AFW, HELEN, IBUG, FRGC-V2 and XM2VTS databases with the provided annotations.", + "fragments": [ + { + "start": 62, + "end": 65 + } + ] + } + ], + "isKey": true + }, + { + "id": "a74251efa970b92925b89eeef50a5e37d9281ad0", + "title": { + "text": "Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization", + "fragments": [] + }, + "slug": "Annotated-Facial-Landmarks-in-the-Wild:-A-database-Köstinger-Wohlhart", + "venue": { + "text": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", + "fragments": [] + }, + "year": 2011, + "authors": [ + [ + { + "name": "Martin Köstinger", + "ids": [ + "1993853" + ], + "slug": "Martin-Köstinger" + }, + { + "text": "Martin Köstinger", + "fragments": [] + } + ], + [ + { + "name": "Paul Wohlhart", + "ids": [ + "3202367" + ], + "slug": "Paul-Wohlhart" + }, + { + "text": "Paul Wohlhart", + "fragments": [] + } + ], + [ + { + "name": "Peter M. Roth", + "ids": [ + "1791182" + ], + "slug": "Peter-M.-Roth" + }, + { + "text": "Peter M. Roth", + "fragments": [] + } + ], + [ + { + "name": "Horst Bischof", + "ids": [ + "3628150" + ], + "slug": "Horst-Bischof" + }, + { + "text": "Horst Bischof", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "The most notable databases of this category are LFPW [28], HELEN [29], AFW [17], AFLW [30] and IBUG [31] (all used for facial landmark points localization).", + "fragments": [ + { + "start": 86, + "end": 90 + } + ] + }, + { + "text": "These databases can be separated in two major categories: (a) those captured under controlled conditions, e.g. Multi-PIE [24], XM2VTS [25], FRGCV2 [26], AR [27], and those captured under totally unconstrained conditions (in-the-wild), e.g. LFPW [28], HELEN [29], AFW [17], AFLW [30], IBUG [31].", + "fragments": [ + { + "start": 273, + "end": 277 + } + ] + }, + { + "text": "AFLW: The Annotated Facial Landmarks in theWild (AFLW) [30] database consists of 25993 images gathered from Flickr, exhibiting a large variety in appearance (e.g., pose, expression, ethnicity, age, gender) as well as general imaging and environmental conditions.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "For each subject, there are available images for 15 different poses, 19 illumination conditions and 6\n5\nAC C\nEP\nTE\nD M\nAN U\nSC R\nIP T\n(a) MultiPIE/IBUG (b) XM2VTS (c) FRGC-V2 (d) AR\n(e) LFPW (f) HELEN (g) AFW (h) AFLW\nFigure 1: Landmarks configurations of existing databases.", + "fragments": [ + { + "start": 213, + "end": 217 + } + ] + }, + { + "text": "AFLW: The Annotated Facial Landmarks in theWild (AFLW) [30] database consists of 25993 images gathered from Flickr, exhibiting a large variety in appearance (e.", + "fragments": [ + { + "start": 55, + "end": 59 + } + ] + }, + { + "text": "[30] M.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "LFPW [28], HELEN [29], AFW [17], AFLW [30], IBUG [31].", + "fragments": [ + { + "start": 38, + "end": 42 + } + ] + }, + { + "text": "AR: The AR Face Database [27] contains over 4000 images corresponding to\n6\nAC C\nEP TE\nD M\nAN U\nSC\nR\nIP T\nDatabase conditions # faces # subjects # points pose\nMulti-PIE controlled ∼ 750000 337 68 [−45◦, 45◦] XM2VTS 2360 295 68 0◦ FRGC-V2 4950 466 5 0◦ AR ∼ 4000 126 22 0◦ LFPW\nin-the-wild\n1035\n−\n35\n[−45◦, 45◦] HELEN 2330 194 AFW 468 6 AFLW 25993 21 IBUG 135 68\nTable 1: Overview of the characteristics of existing facial databases.", + "fragments": [ + { + "start": 320, + "end": 324 + } + ] + } + ], + "isKey": true + }, + { + "title": { + "text": "Fddb: A benchmark for face detection in unconstrained settings", + "fragments": [] + }, + "slug": "Fddb:-A-benchmark-for-face-detection-in-settings-Jain-Learned-Miller", + "venue": { + "text": "Tech. Rep. UM-CS-2010-009, University of Massachusetts, Amherst", + "fragments": [] + }, + "year": 2010, + "authors": [ + [ + { + "name": "V. Jain", + "ids": [], + "slug": "V.-Jain" + }, + { + "text": "V. Jain", + "fragments": [] + } + ], + [ + { + "name": "E. Learned-Miller", + "ids": [], + "slug": "E.-Learned-Miller" + }, + { + "text": "E. Learned-Miller", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "Consequently, in order to facilitate the participants and make the competition less dependent to a face\n20\ndetector’s performance, we suggested them to use one of the face detection methods that took part in the Face Detection Data Set and Benchmark (FDDB) [49].", + "fragments": [ + { + "start": 250, + "end": 254 + } + ] + }, + { + "text": "[49] V.", + "fragments": [ + { + "start": 0, + "end": 4 + } + ] + }, + { + "text": "detector’s performance, we suggested them to use one of the face detection methods that took part in the Face Detection Data Set and Benchmark (FDDB) [49].", + "fragments": [ + { + "start": 150, + "end": 154 + } + ] + }, + { + "text": "The results presented in the Face Detection Data Set and Benchmark (FDDB) [49] show that current stateof-the-art techniques achieve very good true positive rates.", + "fragments": [ + { + "start": 74, + "end": 78 + } + ] + } + ], + "isKey": true + }, + { + "title": { + "text": "Multi-pie", + "fragments": [] + }, + "slug": "Multi-pie-Gross-Matthews", + "venue": { + "text": "Image and Vision Computing 28 (5)", + "fragments": [] + }, + "year": 2010, + "authors": [ + [ + { + "name": "R. Gross", + "ids": [], + "slug": "R.-Gross" + }, + { + "text": "R. Gross", + "fragments": [] + } + ], + [ + { + "name": "I. Matthews", + "ids": [], + "slug": "I.-Matthews" + }, + { + "text": "I. Matthews", + "fragments": [] + } + ], + [ + { + "name": "J. Cohn", + "ids": [], + "slug": "J.-Cohn" + }, + { + "text": "J. Cohn", + "fragments": [] + } + ], + [ + { + "name": "T. Kanade", + "ids": [], + "slug": "T.-Kanade" + }, + { + "text": "T. Kanade", + "fragments": [] + } + ], + [ + { + "name": "S. Baker", + "ids": [], + "slug": "S.-Baker" + }, + { + "text": "S. Baker", + "fragments": [] + } + ] + ], + "citationContexts": [ + { + "text": "The most popular such databases are Multi-PIE [24] (used for face recognition, expressions recognition, landmark points localization), FRGC-V2 [26] (used for face recognition), XM2VTS [25] and AR [27] (both used for face recognition and landmark points localization).", + "fragments": [ + { + "start": 46, + "end": 50 + } + ] + }, + { + "text": "The provided facial landmark annotations are produced by employing the annotation scheme of Multi-PIE (Fig.", + "fragments": [ + { + "start": 92, + "end": 101 + } + ] + }, + { + "text": "These databases can be separated in two major categories: (a) those captured under controlled conditions, e.g. Multi-PIE [24], XM2VTS [25], FRGCV2 [26], AR [27], and those captured under totally unconstrained conditions (in-the-wild), e.g. LFPW [28], HELEN [29], AFW [17], AFLW [30], IBUG [31].", + "fragments": [ + { + "start": 111, + "end": 120 + } + ] + }, + { + "text": "Note that in the case of Multi-PIE, even though the original and generated annotations have the same configuration, the generated ones are more accurate.\nusing a common procedure.", + "fragments": [ + { + "start": 25, + "end": 34 + } + ] + }, + { + "text": "The advantages of the generated annotations1 are twofold: (1) They all have the same landmarks configuration, i.e. the one employed in Multi-PIE (Fig.", + "fragments": [ + { + "start": 135, + "end": 144 + } + ] + }, + { + "text": "16: Fit the person-specific AOM to the image i. 17: end for 18: end for 19: end if 20: Check and manually correct, if necessary, the generated annotations of Q.\nMulti-PIE: The available Multi-PIE annotations cover only the neutral expression with pose [−45◦, 45◦] and multiple non-neutral expressions with pose 0◦.", + "fragments": [ + { + "start": 161, + "end": 170 + } + ] + }, + { + "text": "Facial databases under controlled conditions Multi-PIE: The CMU Multi Pose Illumination, and Expression (MultiPIE) Database [24] contains around 750000 images of 337 subjects captured under laboratory conditions in four different sessions.", + "fragments": [ + { + "start": 124, + "end": 128 + } + ] + }, + { + "text": "Multi-PIE [24], XM2VTS [25], FRGC-V2 [26], AR [27], LFPW [28], HELEN [29] and AFW [17].", + "fragments": [ + { + "start": 10, + "end": 14 + } + ] + }, + { + "text": "We employed the proposed tool to re-annotate all the widely used databases, i.e. Multi-PIE [24], XM2VTS [25], FRGC-V2 [26], AR [27], LFPW [28], HELEN [29] and AFW [17].", + "fragments": [ + { + "start": 81, + "end": 90 + } + ] + }, + { + "text": "For example, in Multi-PIE, the annotations for subjects with expressions “disgust” at 0◦ and “neutral” at 15◦ are provided and we want to produce the annotations for subjects with expression “disgust” at 15◦.", + "fragments": [ + { + "start": 16, + "end": 25 + } + ] + }, + { + "text": "The images of each such pose cluster were semi-automatically annotated using images from Multi-PIE with the same pose.", + "fragments": [ + { + "start": 89, + "end": 98 + } + ] + }, + { + "text": "However, the accuracy of the annotations in some cases is limited and the locations of the provided points do not correspond to ones of Multi-PIE.", + "fragments": [ + { + "start": 136, + "end": 145 + } + ] + }, + { + "text": "Multi-PIE: The CMU Multi Pose Illumination, and Expression (MultiPIE) Database [24] contains around 750000 images of 337 subjects captured under laboratory conditions in four different sessions.", + "fragments": [ + { + "start": 0, + "end": 9 + } + ] + }, + { + "text": "XM2VTS: The images of XM2VTS’s first session were semi-automatically annotated by setting V to be the subjects of Multi-PIE with neutral expression and [−15◦, 15◦] poses.", + "fragments": [ + { + "start": 113, + "end": 122 + } + ] + }, + { + "text": "HELEN, AFW, IBUG: The rest of in-the-wild databases were annotated\n11\nAC C\nEP TE\nD M\nAN U\nSC R\nIP T\n(a) Multi-PIE (b) XM2VTS\n(c) FRGC-V2 (d) LFPW\n(e) HELEN (f) AFW\nFigure 3: Examples of the annotated images.", + "fragments": [ + { + "start": 104, + "end": 113 + } + ] + }, + { + "text": "This subset was annotated by employing images from Multi-PIE with six expressions and [−15◦, 15◦] poses as V .", + "fragments": [ + { + "start": 51, + "end": 60 + } + ] + }, + { + "text": "Multi-PIE [24], XM2VTS [25], FRGCV2 [26], AR [27], and those captured under totally unconstrained conditions (in-the-wild), e.", + "fragments": [ + { + "start": 10, + "end": 14 + } + ] + }, + { + "text": "In case Q has multiple images per subject (e.g. Multi-PIE, XM2VTS, FRGC-V2, AR), the above method can be extended to further improve the generated annotations.", + "fragments": [ + { + "start": 48, + "end": 57 + } + ] + }, + { + "text": "To this end, we selected such images of N = 80 different subjects with frontal pose from the Multi-PIE database.", + "fragments": [ + { + "start": 93, + "end": 102 + } + ] + }, + { + "text": "AR: The AR Face Database [27] contains over 4000 images corresponding to\n6\nAC C\nEP TE\nD M\nAN U\nSC\nR\nIP T\nDatabase conditions # faces # subjects # points pose\nMulti-PIE controlled ∼ 750000 337 68 [−45◦, 45◦] XM2VTS 2360 295 68 0◦ FRGC-V2 4950 466 5 0◦ AR ∼ 4000 126 22 0◦ LFPW\nin-the-wild\n1035\n−\n35\n[−45◦, 45◦] HELEN 2330 194 AFW 468 6 AFLW 25993 21 IBUG 135 68\nTable 1: Overview of the characteristics of existing facial databases.", + "fragments": [ + { + "start": 158, + "end": 167 + } + ] + } + ], + "isKey": true + } + ], + "requestedPageSize": 10, + "pageNumber": 1, + "totalPages": 5, + "sort": "is-influential" + }, + "figureExtractions": { + "figures": [ + { + "name": "1", + "figureType": "figure", + "caption": "Figure 1: Landmarks configurations of existing databases. Note they all have different number of landmark points with semantically different locations.", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/7-Figure1-1.png", + "width": 449, + "height": 449 + }, + { + "name": "1", + "figureType": "table", + "caption": "Table 1: Overview of the characteristics of existing facial databases.", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/8-Table1-1.png", + "width": 439, + "height": 439 + }, + { + "name": "2", + "figureType": "figure", + "caption": "Figure 2: Flowchart of the proposed tool. Given a set of landmarked images V with various poses and expressions, we aim to annotate a set of non-annotated images Q (1) with the same subjects and different poses and expressions, or (2) with different subjects but similar pose and expressions.", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/11-Figure2-1.png", + "width": 449, + "height": 449 + }, + { + "name": "2", + "figureType": "table", + "caption": "Table 2: Overview of the characteristics of the 300-W database.", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/16-Table2-1.png", + "width": 445, + "height": 445 + }, + { + "name": "3", + "figureType": "figure", + "caption": "Figure 3: Examples of the annotated images. For each database, the image on the left has the original annotations and the one on the right shows the annotations generated by the proposed tool. Note that in the case of Multi-PIE, even though the original and generated annotations have the same configuration, the generated ones are more accurate.", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/13-Figure3-1.png", + "width": 445, + "height": 445 + }, + { + "name": "3", + "figureType": "table", + "caption": "Table 3: Median absolute deviation of the fitting results of the first conduct of 300-W challenge in 2013, reported for both 68 and 51 points.", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/19-Table3-1.png", + "width": 422, + "height": 422 + }, + { + "name": "4", + "figureType": "figure", + "caption": "Figure 4: The cardinality of W and V per iteration.", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/14-Figure4-1.png", + "width": 321, + "height": 321 + }, + { + "name": "4", + "figureType": "table", + "caption": "Table 4: Overview of the characteristics of the cropped images of the 300-W database.", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/20-Table4-1.png", + "width": 448, + "height": 448 + }, + { + "name": "5", + "figureType": "figure", + "caption": "Figure 5: Each ellipse denotes the variance of each landmark point with regards to three expert human annotators. The colours of the points rank them with respect to their standard deviation normalized by the face size.", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/15-Figure5-1.png", + "width": 413, + "height": 413 + }, + { + "name": "5", + "figureType": "table", + "caption": "Table 5: Second conduct of the 300-W challenge. 2nd column: Number of images for which an estimation of the landmarks was returned. 3rd and 4th columns: The mean absolute deviation of the fitting results for both 68 and 51 points. 5th column: Mean computational cost per method.", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/23-Table5-1.png", + "width": 449, + "height": 449 + }, + { + "name": "6", + "figureType": "figure", + "caption": "Figure 6: The 51-points mark-up is a subset of the 68-points one after removing the 17 points of the face’s boundary. The interoccular distance is defined between the outer points of the eyes.", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/17-Figure6-1.png", + "width": 231, + "height": 231 + }, + { + "name": "6", + "figureType": "table", + "caption": "Table 6: Percentage of images with fitting error less than the specified values for the winners of the first (Yan et al. [46], Zhou et al. [47]) and second (J. Deng, H. Fan) 300-W challenges, and Oracle. The error is based on 68 points using both indoor and oudoor images.", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/27-Table6-1.png", + "width": 445, + "height": 445 + }, + { + "name": "7", + "figureType": "figure", + "caption": "Figure 7: Fitting results of the first conduct of the 300-W challenge in 2013. The plots show the Cumulative Error Distribution (CED) curves with respect to the landmarks (68 and 51 points) and the condtions (indoor, outdoor or both).", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/18-Figure7-1.png", + "width": 477, + "height": 477 + }, + { + "name": "8", + "figureType": "figure", + "caption": "Figure 8: Indicative examples of the way the images were cropped for the second conduct of the 300-W challenge.", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/21-Figure8-1.png", + "width": 449, + "height": 449 + }, + { + "name": "9", + "figureType": "figure", + "caption": "Figure 9: Fitting results of the second conduct of the 300-W challenge in 2015. The plots show the Cumulative Error Distribution (CED) curves with respect to the landmarks (68 and 51 points) and the condtions (indoor, outdoor or both).", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/22-Figure9-1.png", + "width": 477, + "height": 477 + }, + { + "name": "10", + "figureType": "figure", + "caption": "Figure 10: Fitting examples of the first conduct of the 300-W challenge in 2013. Each row shows the fitted landmarks for each participating method.", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/24-Figure10-1.png", + "width": 449, + "height": 449 + }, + { + "name": "11", + "figureType": "figure", + "caption": "Figure 11: Fitting examples of the second conduct of the 300-W challenge in 2015. Each row shows the fitted landmarks for each participating method.", + "uri": "https://ai2-s2-public.s3.amazonaws.com/figures/2017-08-08/e4754afaa15b1b53e70743880484b8d0736990ff/26-Figure11-1.png", + "width": 455, + "height": 455 + } + ] + }, + "presentations": [ + { + "contentType": "PRESENTATION", + "id": "df1e22c9a92e6c506c228ef23049ab3b8c908e80", + "title": "Face detection, pose estimation, and landmark localization in the wild", + "authors": [ + "Xiangxin Zhu", + "Deva Ramanan" + ], + "published": 1412121600, + "url": "https://pdfs.semanticscholar.org/presentation/df1e/22c9a92e6c506c228ef23049ab3b8c908e80.pdf" + }, + { + "contentType": "PRESENTATION", + "id": "b4d2151e29fb12dbe5d164b430273de65103d39b", + "title": "Annotated Facial Landmarks in the Wild: A large-scale, real-world database for facial landmark localization", + "authors": [ + "Martin Köstinger", + "Paul Wohlhart", + "Peter M. Roth", + "Horst Bischof" + ], + "published": 1412121600, + "url": "https://pdfs.semanticscholar.org/presentation/b4d2/151e29fb12dbe5d164b430273de65103d39b.pdf" + }, + { + "contentType": "PRESENTATION", + "id": "d5c22cb54bd23f17289c31abb84baaf0cd439540", + "title": "Face Recognition by Humans: Nineteen Results All Computer Vision Researchers Should Know About", + "authors": [ + "Pawan Sinha", + "Benjamin J. Balas", + "Yuri Ostrovsky", + "Richard Russell" + ], + "published": 1412121600, + "url": "https://pdfs.semanticscholar.org/presentation/d5c2/2cb54bd23f17289c31abb84baaf0cd439540.pdf" + }, + { + "contentType": "PRESENTATION", + "id": "8e1f99812078e91a0d5722eaf54f20c6fc65df4c", + "title": "Challenges of the Open Source Component Marketplace in the Industry", + "authors": [ + "Claudia P. Ayala", + "Øyvind Hauge", + "Reidar Conradi", + "Xavier Franch", + "Jingyue Li", + "Ketil Sandanger Velle" + ], + "published": 1412121600, + "url": "https://pdfs.semanticscholar.org/presentation/8e1f/99812078e91a0d5722eaf54f20c6fc65df4c.pdf" + }, + { + "contentType": "PRESENTATION", + "id": "47bae254f82bdf4b66ea493afebf5a12c7291db2", + "title": "Pervasive computing: vision and challenges", + "authors": [ + "Mahadev Satyanarayanan" + ], + "published": 1412121600, + "url": "https://pdfs.semanticscholar.org/presentation/47ba/e254f82bdf4b66ea493afebf5a12c7291db2.pdf" + }, + { + "contentType": "PRESENTATION", + "id": "cf9f048a00f14ec5348a1dfe76a2e23b51e0b26a", + "title": "Challenges and opportunities in enterprise-wide optimization in the pharmaceutical industry", + "authors": [ + "José Miguel Laínez", + "E. Schaefer", + "Gintaras V. Reklaitis" + ], + "published": 1511903839.603717, + "url": "https://pdfs.semanticscholar.org/presentation/cf9f/048a00f14ec5348a1dfe76a2e23b51e0b26a.pdf" + }, + { + "contentType": "PRESENTATION", + "id": "cf0e634e608cf1d5574266d8c0110b75b3e615fd", + "title": "Industry 4.0 - Challenges in Anti-Counterfeiting", + "authors": [ + "Christian Thiel", + "Christoph Thiel" + ], + "published": 1511948094.409062, + "url": "https://pdfs.semanticscholar.org/presentation/cf0e/634e608cf1d5574266d8c0110b75b3e615fd.pdf" + }, + { + "contentType": "PRESENTATION", + "id": "6b9267cc7b4277e4abdefb52157bc69d318412c9", + "title": "Energy demand forecasting: industry practices and challenges", + "authors": [ + "Mathieu Sinn" + ], + "published": 1447968149.539, + "url": "https://pdfs.semanticscholar.org/presentation/6b92/67cc7b4277e4abdefb52157bc69d318412c9.pdf" + }, + { + "contentType": "PRESENTATION", + "id": "09e47a1b9abcce753ce124eab586d1cb03abdde4", + "title": "Hardware Acceleration Technologies in Computer Algebra : Challenges and Impact", + "authors": [ + "Sardar Anisul Haque" + ], + "published": 1427429814, + "url": "https://pdfs.semanticscholar.org/presentation/09e4/7a1b9abcce753ce124eab586d1cb03abdde4.pdf" + }, + { + "contentType": "PRESENTATION", + "id": "6442a7dc149de6763b0959d924ec5a3dbe09ec1f", + "title": "New results on the coarseness of bicolored point sets", + "authors": [ + "José Miguel Díaz-Báñez", + "Ruy Fabila Monroy", + "Pablo Pérez-Lantero", + "Inmaculada Ventura" + ], + "published": 1385693873, + "url": "https://pdfs.semanticscholar.org/presentation/6442/a7dc149de6763b0959d924ec5a3dbe09ec1f.pdf" + } + ], + "featuredContent": [] +}
\ No newline at end of file |
