@article{1621452, abstract = {This paper considers how we feel about the content we see or hear. As opposed to the cognitive content information composed of the facts about the genre, temporal content structures and spatiotemporal content elements, we are interested in obtaining the information about the feelings, emotions, and moods evoked by a speech, audio, or video clip. We refer to the latter as the affective content, and to the terms such as happy or exciting as the affective labels of an audiovisual signal. In the first part of the paper, we explore the possibilities for representing and modeling the affective content of an audiovisual signal to effectively bridge the affective gap. Without loosing generality, we refer to this signal simply as video, which we see as an image sequence with an accompanying soundtrack. Then, we show the high potential of the affective video content analysis for enhancing the content recommendation functionalities of the future PVRs and VOD systems. We conclude this paper by outlining some interesting research challenges in the field}, author = {Hanjalic, A.}, doi = {10.1109/MSP.2006.1621452}, interhash = {86afbc088a73b1bdcb2d509f2f41c711}, intrahash = {ebd87b66c699f7ae166e1224030c6200}, issn = {1053-5888}, journal = {Signal Processing Magazine, IEEE}, month = {March}, number = 2, pages = {90-100}, title = {Extracting moods from pictures and sounds: towards truly personalized TV}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=1621452}, volume = 23, year = 2006 }