@article{1621452, abstract = {This paper considers how we feel about the content we see or hear. As opposed to the cognitive content information composed of the facts about the genre, temporal content structures and spatiotemporal content elements, we are interested in obtaining the information about the feelings, emotions, and moods evoked by a speech, audio, or video clip. We refer to the latter as the affective content, and to the terms such as happy or exciting as the affective labels of an audiovisual signal. In the first part of the paper, we explore the possibilities for representing and modeling the affective content of an audiovisual signal to effectively bridge the affective gap. Without loosing generality, we refer to this signal simply as video, which we see as an image sequence with an accompanying soundtrack. Then, we show the high potential of the affective video content analysis for enhancing the content recommendation functionalities of the future PVRs and VOD systems. We conclude this paper by outlining some interesting research challenges in the field}, author = {Hanjalic, A.}, doi = {10.1109/MSP.2006.1621452}, interhash = {86afbc088a73b1bdcb2d509f2f41c711}, intrahash = {ebd87b66c699f7ae166e1224030c6200}, issn = {1053-5888}, journal = {Signal Processing Magazine, IEEE}, month = {March}, number = 2, pages = {90-100}, title = {Extracting moods from pictures and sounds: towards truly personalized TV}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=1621452}, volume = 23, year = 2006 } @article{Kulkarni:2009:Biomed-Eng-Online:19656402, abstract = {Facial expressions are important in facilitating human communication and interactions. Also, they are used as an important tool in behavioural studies and in medical rehabilitation. Facial image based mood detection techniques may provide a fast and practical approach for non-invasive mood detection. The purpose of the present study was to develop an intelligent system for facial image based expression classification using committee neural networks.Several facial parameters were extracted from a facial image and were used to train several generalized and specialized neural networks. Based on initial testing, the best performing generalized and specialized neural networks were recruited into decision making committees which formed an integrated committee neural network system. The integrated committee neural network system was then evaluated using data obtained from subjects not used in training or in initial testing.The system correctly identified the correct facial expression in 255 of the 282 images (90.43% of the cases), from 62 subjects not used in training or in initial testing. Committee neural networks offer a potential tool for image based mood detection.}, author = {Kulkarni, S S and Reddy, N P and Hariharan, S I}, doi = {10.1186/1475-925X-8-16}, interhash = {9bcd872ea86213a2f7d3271b0e6eb7d1}, intrahash = {14c48c03f40a1c8bdc22314fcdf292bf}, journal = {Biomed Eng Online}, pages = {16-16}, pmid = {19656402}, title = {Facial expression (mood) recognition from facial images using committee neural networks}, url = {http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2731770/}, volume = 8, year = 2009 } @inproceedings{Suchanek2009SOFIE, address = {New York, NY, USA}, author = {Suchanek, Fabian M. and Sozio, Mauro and Weikum, Gerhard}, booktitle = {International World Wide Web conference (WWW 2009)}, interhash = {0d9712def2e938caecd7a5c151d13ed1}, intrahash = {70263a32bd2c3cf69b0b7b25d875c6d4}, location = {Madrid, Spain}, publisher = {ACM Press}, title = {{SOFIE: A Self-Organizing Framework for Information Extraction}}, year = 2009 } @article{voelker2008aeon, abstract = {OntoClean is an approach towards the formal evaluation of taxonomic relations in ontologies. The application of OntoClean consists of two main steps. First, concepts are tagged according to meta-properties known as rigidity, unity, dependency and identity. Second, the tagged concepts are checked according to predefined constraints to discover taxonomic errors. Although OntoClean is well documented in numerous publications, it is still used rather infrequently due to the high costs of application. Especially, the manual tagging of concepts with the correct meta-properties requires substantial efforts of highly experienced ontology engineers. In order to facilitate the use of OntoClean and to enable the evaluation of real-world ontologies, we provide AEON, a tool which automatically tags concepts with appropriate OntoClean meta-properties and performs the constraint checking. We use the Web as an embodiment of world knowledge, where we search for patterns that indicate how to properly tag concepts. We thoroughly evaluated our approach against a manually created gold standard. The evaluation shows the competitiveness of our approach while at the same time significantly lowering the costs. All of our results, i.e. the tool AEON as well as the experiment data, are publicly available.}, address = {Amsterdam, The Netherlands, The Netherlands}, author = {Völker, Johanna and Vrandečić, Denny and Sure, York and Hotho, Andreas}, interhash = {f14794f4961d0127dc50c1938eaef7ea}, intrahash = {f8f0bb3e3495e7627770b470d1a5f1a3}, issn = {1570-5838}, journal = {Applied Ontology}, number = {1-2}, pages = {41--62}, publisher = {IOS Press}, title = {AEON - An approach to the automatic evaluation of ontologies}, url = {http://portal.acm.org/citation.cfm?id=1412422}, volume = 3, year = 2008 } @article{voelker2008aeon, abstract = {OntoClean is an approach towards the formal evaluation of taxonomic relations in ontologies. The application of OntoClean consists of two main steps. First, concepts are tagged according to meta-properties known as rigidity, unity, dependency and identity. Second, the tagged concepts are checked according to predefined constraints to discover taxonomic errors. Although OntoClean is well documented in numerous publications, it is still used rather infrequently due to the high costs of application. Especially, the manual tagging of concepts with the correct meta-properties requires substantial efforts of highly experienced ontology engineers. In order to facilitate the use of OntoClean and to enable the evaluation of real-world ontologies, we provide AEON, a tool which automatically tags concepts with appropriate OntoClean meta-properties and performs the constraint checking. We use the Web as an embodiment of world knowledge, where we search for patterns that indicate how to properly tag concepts. We thoroughly evaluated our approach against a manually created gold standard. The evaluation shows the competitiveness of our approach while at the same time significantly lowering the costs. All of our results, i.e. the tool AEON as well as the experiment data, are publicly available.}, address = {Amsterdam, The Netherlands, The Netherlands}, author = {Völker, Johanna and Vrandečić, Denny and Sure, York and Hotho, Andreas}, interhash = {f14794f4961d0127dc50c1938eaef7ea}, intrahash = {f8f0bb3e3495e7627770b470d1a5f1a3}, issn = {1570-5838}, journal = {Applied Ontology}, number = {1-2}, pages = {41--62}, publisher = {IOS Press}, title = {AEON - An approach to the automatic evaluation of ontologies}, url = {http://portal.acm.org/citation.cfm?id=1412422}, volume = 3, year = 2008 } @article{4563045, abstract = {Image annotation is hard to do in an automatic way. In this paper, we propose a framework for image annotation that combines the benefits of three paradigms: automatic annotation, human intervention and entertainment activities. We also describe our proposal inside this framework, the ASAA (application for semi-automatic annotation) interface, a new computer game for image tagging. The application has a 3D game interface, and is supported by a game engine that uses a system for automatic image classification and gestural input to play the game. We present results of the performance of semantic models obtained with a training set enlarged by images annotated during the game activity as well as usability tests of the application.}, author = {Jesus, R. and Goncalves, D. and Abrantes, A.J. and Correia, N.}, doi = {10.1109/CVPRW.2008.4563045}, interhash = {cdd13517badd0fdd5d6455a6ea971cb4}, intrahash = {0190ab8d7a4603a19c795e205fdf87ca}, journal = {Computer Vision and Pattern Recognition Workshops, 2008. CVPR Workshops 2008. IEEE Computer Society Conference on}, month = {June}, pages = {1-8}, title = {Playing games as a way to improve automatic image annotation}, year = 2008 } @inproceedings{mladenic98turning, author = {Mladenic, Dunja}, bb-further-address = {--Dordrecht--London}, booktitle = {European Conference on Artificial Intelligence}, interhash = {08b68484e893529a536357659e933230}, intrahash = {e2f7ae71f46593b0feda6c5dc6755990}, pages = {473--474}, title = {Turning Yahoo to Automatic Web-Page Classifier}, url = {citeseer.nj.nec.com/mladenic98turning.html}, year = 1998 } @inproceedings{Chakrabartietal98, author = {Chakrabarti, S. and Dom, B. and Gibson, D. and Kleinberg, J. and Raghavan, P. and Rajagopalan, S.}, booktitle = {Proceedings of the 7th World-wide web conference (WWW7),30(1-7)}, interhash = {911034f92829572cb2c211ad26849bda}, intrahash = {9333fd6bd5864b94c9644e979c7a9fec}, location = {Santa Barbara, CA}, pages = {65--74}, title = {Automatic resource compilation by analyzing hyperlink structure and associated text}, url = {citeseer.nj.nec.com/chakrabarti98automatic.html}, year = 1998 } @article{MobasheretalCACM, author = {Mobasher, B. and Cooley, R. and Srivastava, J.}, interhash = {98d5090dafb39596483c75dc4a6846c3}, intrahash = {a7a6cdb6e0790b276d7f0642991e734e}, journal = {Communications of the ACM}, location = {Santa Barbara, CA}, number = 8, pages = {142--151}, title = {Automatic personalization based on Web usage mining}, volume = 43, year = 2000 } @book{Man2001a, address = {Amsterdam/Philadelphia}, author = {Mani, Inderjeet}, interhash = {af9f39887357cd72ad547dfe4e6345ef}, intrahash = {67eff892b9728c22b51b108deafb2d6b}, key = {Man2001a}, label = {Automatic Summarization}, optnumber = {3}, publisher = {John Benjamins Publishing Company}, series = {Natural Language Processing}, title = {Automatic Summarization}, type = {Book}, volume = 3, year = 2001 }