@article{Kulkarni:2009:Biomed-Eng-Online:19656402, abstract = {Facial expressions are important in facilitating human communication and interactions. Also, they are used as an important tool in behavioural studies and in medical rehabilitation. Facial image based mood detection techniques may provide a fast and practical approach for non-invasive mood detection. The purpose of the present study was to develop an intelligent system for facial image based expression classification using committee neural networks.Several facial parameters were extracted from a facial image and were used to train several generalized and specialized neural networks. Based on initial testing, the best performing generalized and specialized neural networks were recruited into decision making committees which formed an integrated committee neural network system. The integrated committee neural network system was then evaluated using data obtained from subjects not used in training or in initial testing.The system correctly identified the correct facial expression in 255 of the 282 images (90.43% of the cases), from 62 subjects not used in training or in initial testing. Committee neural networks offer a potential tool for image based mood detection.}, author = {Kulkarni, S S and Reddy, N P and Hariharan, S I}, doi = {10.1186/1475-925X-8-16}, interhash = {9bcd872ea86213a2f7d3271b0e6eb7d1}, intrahash = {14c48c03f40a1c8bdc22314fcdf292bf}, journal = {Biomed Eng Online}, pages = {16-16}, pmid = {19656402}, title = {Facial expression (mood) recognition from facial images using committee neural networks}, url = {http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2731770/}, volume = 8, year = 2009 } @inproceedings{taigman2014deepface, author = {Taigman, Yaniv and Yang, Ming and Ranzato, Marc'Aurelio and Wolf, Lior}, booktitle = {Conference on Computer Vision and Pattern Recognition (CVPR)}, interhash = {19f4a0ed09dd182d9a45466f323efa72}, intrahash = {5704a9ab4c0abe0c34b34b7d564b9401}, title = {DeepFace: Closing the Gap to Human-Level Performance in Face Verification}, year = 2014 } @article{phillips2000introduction, abstract = {On the basis of media hype alone, you might conclude that biometric passwords will soon replace their alphanumeric counterparts with versions that cannot be stolen, forgotten, lost, or given to another person. But what if the actual performance of these systems falls short of the estimates? The authors designed this article to provide sufficient information to know what questions to ask when evaluating a biometric system, and to assist in determining whether performance levels meet the requirements of an application. For example, a low-performance biometric is probably sufficient for reducing-as opposed to eliminating-fraud. Likewise, completely replacing an existing security system with a biometric-based one may require a high-performance biometric system, or the required performance may be beyond what current technology can provide. Of the biometrics that give the user some control over data acquisition, voice, face, and fingerprint systems have undergone the most study and testing-and therefore occupy the bulk of this discussion. This article also covers the tools and techniques of biometric testing}, author = {Phillips, P.J. and Martin, A. and Wilson, C.L. and Przybocki, M.}, doi = {10.1109/2.820040}, interhash = {566f7a9e16fbd8437907e9ba2a34bb4b}, intrahash = {b84d37310fed293b821c56b00b3bfb36}, issn = {0018-9162}, journal = {Computer}, month = feb, number = 2, pages = {56-63}, title = {An introduction evaluating biometric systems}, volume = 33, year = 2000 } @inproceedings{chrupala2010named, author = {Chrupala, Grzegorz and Klakow, Dietrich}, booktitle = {LREC}, crossref = {conf/lrec/2010}, editor = {Calzolari, Nicoletta and Choukri, Khalid and Maegaard, Bente and Mariani, Joseph and Odijk, Jan and Piperidis, Stelios and Rosner, Mike and Tapias, Daniel}, ee = {http://www.lrec-conf.org/proceedings/lrec2010/summaries/538.html}, interhash = {85b8f5e04b66df3fe9411fc8f81ae43a}, intrahash = {68b98f37dc2dd0a89f580d9e6b65c780}, isbn = {2-9517408-6-7}, publisher = {European Language Resources Association}, title = {A Named Entity Labeler for German: Exploiting Wikipedia and Distributional Clusters.}, url = {http://lexitron.nectec.or.th/public/LREC-2010_Malta/pdf/538_Paper.pdf}, year = 2010 } @inproceedings{gunes2012eager, abstract = {Key to named entity recognition, the manual gazetteering of entity lists is a costly, errorprone process that often yields results that are incomplete and suffer from sampling bias. Exploiting current sources of structured information, we propose a novel method for extending minimal seed lists into complete gazetteers. Like previous approaches, we value W IKIPEDIA as a huge, well-curated, and relatively unbiased source of entities. However, in contrast to previous work, we exploit not only its content, but also its structure, as exposed in DBPEDIA. We extend gazetteers through Wikipedia categories, carefully limiting the impact of noisy categorizations. The resulting gazetteers easily outperform previous approaches on named entity recognition. }, author = {Gunes, Omer and Schallhart, Christian and Furche, Tim and Lehmann, Jens and Ngomo, Axel-Cyrille Ngonga}, booktitle = {Proceedings of the 3rd Workshop on the People's Web Meets NLP: Collaboratively Constructed Semantic Resources and their Applications to NLP}, interhash = {20c47a41c89ff6c2a8f7bb524185b8ac}, intrahash = {3eac4c009268cd4f2c264dd24053f8a6}, month = jul, organization = {Association for Computational Linguistics}, pages = {29--33}, title = {EAGER: extending automatically gazetteers for entity recognition}, url = {http://acl.eldoc.ub.rug.nl/mirror/W/W12/W12-4005.pdf}, year = 2012 } @inproceedings{finin2010annotating, abstract = {We describe our experience using both Amazon Mechanical Turk (MTurk) and Crowd-Flower to collect simple named entity annotations for Twitter status updates. Unlike most genres that have traditionally been the focus of named entity experiments, Twitter is far more informal and abbreviated. The collected annotations and annotation techniques will provide a first step towards the full study of named entity recognition in domains like Facebook and Twitter. We also briefly describe how to use MTurk to collect judgements on the quality of "word clouds."}, acmid = {1866709}, address = {Stroudsburg, PA, USA}, author = {Finin, Tim and Murnane, Will and Karandikar, Anand and Keller, Nicholas and Martineau, Justin and Dredze, Mark}, booktitle = {Proceedings of the NAACL HLT 2010 Workshop on Creating Speech and Language Data with Amazon's Mechanical Turk}, interhash = {0fa9636e69f2f516cdb6e11fffd8079b}, intrahash = {f3ce5c15752dab9487220a3aae963655}, location = {Los Angeles, California}, numpages = {9}, pages = {80--88}, publisher = {Association for Computational Linguistics}, title = {Annotating named entities in Twitter data with crowdsourcing}, url = {http://dl.acm.org/citation.cfm?id=1866696.1866709}, year = 2010 } @inproceedings{Keally:2011:PTP:2070942.2070968, abstract = {The vast array of small wireless sensors is a boon to body sensor network applications, especially in the context awareness and activity recognition arena. However, most activity recognition deployments and applications are challenged to provide personal control and practical functionality for everyday use. We argue that activity recognition for mobile devices must meet several goals in order to provide a practical solution: user friendly hardware and software, accurate and efficient classification, and reduced reliance on ground truth. To meet these challenges, we present PBN: Practical Body Networking. Through the unification of TinyOS motes and Android smartphones, we combine the sensing power of on-body wireless sensors with the additional sensing power, computational resources, and user-friendly interface of an Android smartphone. We provide an accurate and efficient classification approach through the use of ensemble learning. We explore the properties of different sensors and sensor data to further improve classification efficiency and reduce reliance on user annotated ground truth. We evaluate our PBN system with multiple subjects over a two week period and demonstrate that the system is easy to use, accurate, and appropriate for mobile devices.}, acmid = {2070968}, address = {New York, NY, USA}, author = {Keally, Matthew and Zhou, Gang and Xing, Guoliang and Wu, Jianxin and Pyles, Andrew}, booktitle = {Proceedings of the 9th ACM Conference on Embedded Networked Sensor Systems}, doi = {10.1145/2070942.2070968}, interhash = {5e6a13d34026f65338cfa619054822c8}, intrahash = {61e5e4559d031c4152b3f316c0aa5209}, isbn = {978-1-4503-0718-5}, location = {Seattle, Washington}, numpages = {14}, pages = {246--259}, publisher = {ACM}, series = {SenSys '11}, title = {PBN: towards practical activity recognition using smartphone-based body sensor networks}, url = {http://doi.acm.org/10.1145/2070942.2070968}, year = 2011 } @techreport{elahmad2011robustness, abstract = {We report a novel attack on two CAPTCHAs that have been widely deployed on the Internet, one being Google's home design and the other acquired by Google (i.e. reCAPTCHA). With a minor change, our attack program also works well on the latest ReCAPTCHA version, which uses a new defence mechanism that was unknown to us when we designed our attack. This suggests that our attack works in a fundamental level. Our attack appears to be applicable to a whole family of text CAPTCHAs that build on top of the popular segmentation-resistant mechanism of "crowding character together" for security. Next, we propose a novel framework that guides the application of our well-tested security engineering methodology for evaluating CAPTCHA robustness, and we propose a new general principle for CAPTCHA design. }, author = {El Ahmad, Ahmad S and Yan, Jeff and Tayara, Mohamad}, institution = {School of Computer Science, Newcastle University, UK}, interhash = {2d6bb0b3bad1f6a01c15e1bbd8bd7158}, intrahash = {3516bc8c24b04f63927808e82824004d}, month = may, title = {The Robustness of Google CAPTCHAs}, url = {http://homepages.cs.ncl.ac.uk/jeff.yan/google.pdf}, year = 2011 } @inproceedings{zhu2010attacks, abstract = {We systematically study the design of image recognition CAPTCHAs (IRCs) in this paper. We first review and examine all existing IRCs schemes and evaluate each scheme against the practical requirements in CAPTCHA applications, particularly in large-scale real-life applications such as Gmail and Hotmail. Then we present a security analysis of the representative schemes we have identified. For the schemes that remain unbroken, we present our novel attacks. For the schemes for which known attacks are available, we propose a theoretical explanation why those schemes have failed. Next, we provide a simple but novel framework for guiding the design of robust IRCs. Then we propose an innovative IRC called Cortcha that is scalable to meet the requirements of large-scale applications. It relies on recognizing objects by exploiting the surrounding context, a task that humans can perform well but computers cannot. An infinite number of types of objects can be used to generate challenges, which can effectively disable the learning process in machine learning attacks. Cortcha does not require the images in its image database to be labeled. Image collection and CAPTCHA generation can be fully automated. Our usability studies indicate that, compared with Google's text CAPTCHA, Cortcha allows a slightly higher human accuracy rate but on average takes more time to solve a challenge.}, address = {New York, NY, USA}, author = {Zhu, Bin B. and Yan, Jeff and Li, Qiujie and Yang, Chao and Liu, Jia and Xu, Ning and Yi, Meng and Cai, Kaiwei}, booktitle = {CCS '10: Proceedings of the 17th ACM conference on Computer and communications security}, doi = {10.1145/1866307.1866329}, ee = {http://homepages.cs.ncl.ac.uk/jeff.yan/ccs10.pdf}, interhash = {e95b041b4b155f5ff44977827e8680cd}, intrahash = {3c8aa0e647903603ddce90c1642b89b2}, isbn = {978-1-4503-0245-6}, location = {Chicago, Illinois, USA}, month = oct, pages = {187--200}, publisher = {ACM}, title = {Attacks and design of image recognition CAPTCHAs}, url = {http://portal.acm.org/citation.cfm?id=1866307.1866329}, year = 2010 } @article{954342, abstract = {As one of the most successful applications of image analysis and understanding, face recognition has recently received significant attention, especially during the past several years. At least two reasons account for this trend: the first is the wide range of commercial and law enforcement applications, and the second is the availability of feasible technologies after 30 years of research. Even though current machine recognition systems have reached a certain level of maturity, their success is limited by the conditions imposed by many real applications. For example, recognition of face images acquired in an outdoor environment with changes in illumination and/or pose remains a largely unsolved problem. In other words, current systems are still far away from the capability of the human perception system.This paper provides an up-to-date critical survey of still- and video-based face recognition research. There are two underlying motivations for us to write this survey paper: the first is to provide an up-to-date review of the existing literature, and the second is to offer some insights into the studies of machine recognition of faces. To provide a comprehensive survey, we not only categorize existing recognition techniques but also present detailed descriptions of representative methods within each category. In addition, relevant topics such as psychophysical studies, system evaluation, and issues of illumination and pose variation are covered.}, address = {New York, NY, USA}, author = {Zhao, W. and Chellappa, R. and Phillips, P. J. and Rosenfeld, A.}, interhash = {0a8e3859cc7f6ea7bd51af1ee9ca1b36}, intrahash = {549485a2446eef075c15c9a4ad00c64a}, issn = {0360-0300}, journal = {ACM Comput. Surv.}, number = 4, pages = {399--458}, publisher = {ACM Press}, title = {Face recognition: A literature survey}, url = {http://doi.acm.org/10.1145/954339.954342}, volume = 35, year = 2003 }