@incollection{agrawal2014mining, abstract = {We propose a system for mining videos from the web for supplementing the content of electronic textbooks in order to enhance their utility. Textbooks are generally organized into sections such that each section explains very few concepts and every concept is primarily explained in one section. Building upon these principles from the education literature and drawing upon the theory of }, author = {Agrawal, Rakesh and Christoforaki, Maria and Gollapudi, Sreenivas and Kannan, Anitha and Kenthapadi, Krishnaram and Swaminathan, Adith}, booktitle = {Formal Concept Analysis}, doi = {10.1007/978-3-319-07248-7_16}, editor = {Glodeanu, CynthiaVera and Kaytoue, Mehdi and Sacarea, Christian}, interhash = {fc34406f5a91a9561ba0e12b98830f28}, intrahash = {76ac675e26647d14199da79c3467bc90}, isbn = {978-3-319-07247-0}, language = {English}, pages = {219-234}, publisher = {Springer International Publishing}, series = {Lecture Notes in Computer Science}, title = {Mining Videos from the Web for Electronic Textbooks}, url = {http://dx.doi.org/10.1007/978-3-319-07248-7_16}, volume = 8478, year = 2014 } @mastersthesis{bottger2012konzept, abstract = {Kollaborative Verschlagwortungssysteme bieten Nutzern die Möglichkeit zur freien Verschlagwortung von Ressourcen im World Wide Web. Sie ermöglichen dem Nutzer beliebige Ressourcen mit frei wählbaren Schlagwörtern – so genannten Tags – zu versehen (Social Tagging). Im weiteren Sinne ist Social Tagging nichts anderes als das Indexieren von Ressourcen durch die Nutzenden selbst. Dabei sind die Tag-Zuordnungen für den einzelnen Nutzer und für die gesamte Community in vielerlei Hinsicht hilfreich. So können durch Tags persönliche Ideen oder Wertungen für eine Ressource ausgedrückt werden. Außerdem können Tags als Kommunikationsmittel von den Nutzern oder Nutzergruppen untereinander verwendet werden. Tags helfen zudem bei der Navigation, beim Suchen und beim zufälligen Entdecken von neuen Ressourcen. Das Verschlagworten der Ressourcen ist für unbedarfte Anwender eine kognitiv anspruchsvolle Aufgabe. Als Unterstützung können Tag-Recommender eingesetzt werden, die Nutzern passende Tags vorschlagen sollen. UniVideo ist das Videoportal der Universität Kassel, das jedem Mitglied der Hochschule ermöglicht Videos bereitzustellen und weltweit über das WWW abrufbar zu machen. Die bereitgestellten Videos müssen von ihren Eigentümern beim Hochladen verschlagwortet werden. Die dadurch entstehende Struktur dient wiederum als Grundlage für die Navigation in UniVideo. In dieser Arbeit werden vier verschiedene Ansätze für Tag-Recommender theoretisch diskutiert und deren praktische Umsetzung für UniVideo untersucht und bewertet. Dabei werden zunächst die Grundlagen des Social Taggings erläutert und der Aufbau von UniVideo erklärt, bevor die Umsetzung der vier einzelnen Tag-Recommender beschrieben wird. Anschließend wird gezeigt wie aus den einzelnen Tag-Recommendern durch Verschmelzung ein hybrider Tag-Recommender umgesetzt werden kann.}, address = {Kassel}, author = {Böttger, Sebastian}, interhash = {8fd8ce9278d61f8bd5292d7aeab9aacd}, intrahash = {3c2ffd52e7081b66bf420f993d9144bb}, month = {04}, school = {Universität Kassel}, title = {Konzept und Umsetzung eines Tag-Recommenders für Video-Ressourcen am Beispiel UniVideo}, type = {Bachelor Thesis}, url = {http://www.uni-kassel.de/~seboettg/ba-thesis.pdf}, year = 2012 } @article{song2012video, abstract = {This paper considers the problem of web video geolocation: we hope to determine where on the Earth a web video was taken. By analyzing a 6.5-million geotagged web video dataset, we observe that there exist inherent geography intimacies between a video with its relevant videos (related videos and same-author videos). This social relationship supplies a direct and effective cue to locate the video to a particular region on the earth. Based on this observation, we propose an effective web video geolocation algorithm by propagating geotags among the web video social relationship graph. For the video that have no geotagged relevant videos, we aim to collect those geotagged relevant images that are content similar with the video (share some visual or textual information with the video) as the cue to infer the location of the video. The experiments have demonstrated the effectiveness of both methods, with the geolocation accuracy much better than state-of-the-art approaches. Finally, an online web video geolocation system: Video2Locatoin (V2L) is developed to provide public access to our algorithm.}, author = {Song, Yi-Cheng and Zhang, Yong-Dong and Cao, Juan and Xia, Tian and Liu, Wu and Li, Jin-Tao}, doi = {10.1109/TMM.2011.2172937}, interhash = {090791b9f4e0737f35e40af91c4475d2}, intrahash = {40d777e2e4a83e28c75a1c8ba0554153}, issn = {1520-9210}, journal = {Transactions on Multimedia}, month = apr, number = 2, pages = {456--470}, publisher = {IEEE}, title = {Web Video Geolocation by Geotagged Social Resources}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6054059}, volume = 14, year = 2012 } @article{halvey2010asynchronous, abstract = {There are a number of multimedia tasks and environments that can be collaborative in nature and involve contributions from more than one individual. Examples of such tasks include organising photographs or videos from multiple people from a large event, students working together to complete a class project, or artists and/or animators working on a production. Despite this, current state of the art applications that have been created to assist in multimedia search and organisation focus on a single user searching alone and do not take into consideration the collaborative nature of a large number of multimedia tasks. The limited work in collaborative search for multimedia applications has concentrated mostly on synchronous, and quite often co-located, collaboration between persons. However, these collaborative scenarios are not always practical or feasible. In order to overcome these shortcomings we have created an innovative system for online video search, which provides mechanisms for groups of users to collaborate both asynchronously and remotely on video search tasks. In order to evaluate our system an user evaluation was conducted. This evaluation simulated multiple conditions and scenarios for collaboration, varying on awareness, division of labour, sense making and persistence. The outcome of this evaluation demonstrates the benefit and usability of our system for asynchronous and remote collaboration between users. In addition the results of this evaluation provide a comparison between implicit and explicit collaboration in the same search system.}, author = {Halvey, Martin and Vallet, David and Hannah, David and Feng, Yue and Jose, Joemon M.}, doi = {10.1016/j.ipm.2009.11.007}, interhash = {11f10c5e6d01e3256edc0eb01feebda0}, intrahash = {d042cdee618b3b362368ccc29d0c35ad}, issn = {0306-4573}, journal = {Information Processing & Management}, number = 6, pages = {733--748}, title = {An asynchronous collaborative search system for online video search}, url = {http://www.sciencedirect.com/science/article/pii/S0306457309001447}, volume = 46, year = 2010 } @inproceedings{siersdorfer2009automatic, abstract = {The analysis of the leading social video sharing platform YouTube reveals a high amount of redundancy, in the form of videos with overlapping or duplicated content. In this paper, we show that this redundancy can provide useful information about connections between videos. We reveal these links using robust content-based video analysis techniques and exploit them for generating new tag assignments. To this end, we propose different tag propagation methods for automatically obtaining richer video annotations. Our techniques provide the user with additional information about videos, and lead to enhanced feature representations for applications such as automatic data organization and search. Experiments on video clustering and classification as well as a user evaluation demonstrate the viability of our approach.}, acmid = {1572010}, address = {New York, NY, USA}, author = {Siersdorfer, Stefan and San Pedro, Jose and Sanderson, Mark}, booktitle = {Proceedings of the 32nd international ACM SIGIR conference on Research and development in information retrieval}, doi = {10.1145/1571941.1572010}, interhash = {276b49e417d441ba50bfc6e4b85be1f3}, intrahash = {71c3a120e154ed135408292eb4b96278}, isbn = {978-1-60558-483-6}, location = {Boston, MA, USA}, numpages = {8}, pages = {395--402}, publisher = {ACM}, series = {SIGIR '09}, title = {Automatic video tagging using content redundancy}, url = {http://doi.acm.org/10.1145/1571941.1572010}, year = 2009 }