@inproceedings{gaugaz2012predicting, abstract = {The amount of news content on the Web is increasing: Users can access news articles coming from a variety of sources on the Web: from newswires, news agencies, blogs, and at various places, e.g. even within Web search engines result pages. Anyhow, it still is a challenge for current search engines to decide which news events are worth being shown to the user (either for a newsworthy query or in a news portal). In this paper we define the task of predicting the future impact of news events. Being able to predict event impact will, for example, enable a newspaper to decide whether to follow a specific event or not, or a news search engine which stories to display. We define a flexible framework that, given some definition of impact, can predict its future development at the beginning of the event. We evaluate several possible definitions of event impact and experimentally identify the best features for each of them.}, author = {Gaugaz, Julien and Siehndel, Patrick and Demartini, Gianluca and Iofciu, Tereza and Georgescu, Mihai and Henze, Nicola}, booktitle = {Proc. of the 34th European Conference on Information Retrieval (ECIR 2012)}, interhash = {dc898856b5a18bf1cb9307d1bd9b5268}, intrahash = {f29c05f9a4fc3bb2189a965d95f622f9}, location = {Barcelona, Spain}, month = apr, title = {Predicting the Future Impact of News Events}, url = {http://www.l3s.de/web/page25g.do?kcond12g.att1=1833}, year = 2012 } @inproceedings{1557077, abstract = {Tracking new topics, ideas, and "memes" across the Web has been an issue of considerable interest. Recent work has developed methods for tracking topic shifts over long time scales, as well as abrupt spikes in the appearance of particular named entities. However, these approaches are less well suited to the identification of content that spreads widely and then fades over time scales on the order of days - the time scale at which we perceive news and events. We develop a framework for tracking short, distinctive phrases that travel relatively intact through on-line text; developing scalable algorithms for clustering textual variants of such phrases, we identify a broad class of memes that exhibit wide spread and rich variation on a daily basis. As our principal domain of study, we show how such a meme-tracking approach can provide a coherent representation of the news cycle - the daily rhythms in the news media that have long been the subject of qualitative interpretation but have never been captured accurately enough to permit actual quantitative analysis. We tracked 1.6 million mainstream media sites and blogs over a period of three months with the total of 90 million articles and we find a set of novel and persistent temporal patterns in the news cycle. In particular, we observe a typical lag of 2.5 hours between the peaks of attention to a phrase in the news media and in blogs respectively, with divergent behavior around the overall peak and a "heartbeat"-like pattern in the handoff between news and blogs. We also develop and analyze a mathematical model for the kinds of temporal variation that the system exhibits.}, address = {New York, NY, USA}, author = {Leskovec, Jure and Backstrom, Lars and Kleinberg, Jon}, booktitle = {KDD '09: Proceedings of the 15th ACM SIGKDD international conference on Knowledge discovery and data mining}, doi = {http://doi.acm.org/10.1145/1557019.1557077}, interhash = {f60a96f8adb340b62bacbc90fdb3e069}, intrahash = {051df7b09db1d7806909cc22c1a362c8}, isbn = {978-1-60558-495-9}, location = {Paris, France}, pages = {497--506}, publisher = {ACM}, title = {Meme-tracking and the dynamics of the news cycle}, url = {http://portal.acm.org/citation.cfm?id=1557077}, year = 2009 } @inproceedings{garbin2005disambiguating, abstract = {This research is aimed at the problem of disambiguating toponyms (place names) in terms of a classification derived by merging information from two publicly available gazetteers. To establish the difficulty of the problem, we measured the degree of ambiguity, with respect to a gazetteer, for toponyms in news. We found that 67.82% of the toponyms found in a corpus that were ambiguous in a gazetteer lacked a local discriminator in the text. Given the scarcity of human-annotated data, our method used unsupervised machine learning to develop disambiguation rules. Toponyms were automatically tagged with information about them found in a gazetteer. A toponym that was ambiguous in the gazetteer was automatically disambiguated based on preference heuristics. This automatically tagged data was used to train a machine learner, which disambiguated toponyms in a human-annotated news corpus at 78.5% accuracy.}, acmid = {1220621}, address = {Stroudsburg, PA, USA}, author = {Garbin, Eric and Mani, Inderjeet}, booktitle = {Proceedings of the conference on Human Language Technology and Empirical Methods in Natural Language Processing}, doi = {10.3115/1220575.1220621}, interhash = {566910cb6e9745ee70da19d2ccafaffa}, intrahash = {de574cf3bff3a3748fcd9bd5a9a0f3d1}, location = {Vancouver, British Columbia, Canada}, numpages = {8}, pages = {363--370}, publisher = {Association for Computational Linguistics}, title = {Disambiguating toponyms in news}, url = {http://dx.doi.org/10.3115/1220575.1220621}, year = 2005 }