@electronic{www.ecmlpkdd2009.net, title = {ECML PKDD 2009 » Parameter-free Hierarchical Co-Clustering by $n$-Ary Splits}, url = {http://www.ecmlpkdd2009.net/program/accepted-papers/parameter-free-hierarchical-co-clustering-by-n-ary-splits/}, biburl = {https://puma.uni-kassel.de/url/472d0c841f44e889865ba5595300bdeb/benz}, keywords = {attended clustering co-clustering ecmlpkdd hierarchical}, added-at = {2011-02-04T16:08:06.000+0100}, description = {}, interhash = {472d0c841f44e889865ba5595300bdeb}, intrahash = {472d0c841f44e889865ba5595300bdeb} } @electronic{www.ecmlpkdd2009.net, title = {ECML PKDD 2009 » A Matrix Factorization Approach for Integrating Multiple Data Views}, url = {http://www.ecmlpkdd2009.net/program/accepted-papers/a-matrix-factorization-approach-for-integrating-multiple-data-views/}, biburl = {https://puma.uni-kassel.de/url/1b8a9ace62c84a0136d9f4908c362526/benz}, keywords = {attended clustering ecmlpkdd matrix_factorization}, added-at = {2011-02-04T16:08:06.000+0100}, description = {}, interhash = {1b8a9ace62c84a0136d9f4908c362526}, intrahash = {1b8a9ace62c84a0136d9f4908c362526} } @electronic{www.ecmlpkdd2009.net, title = {ECML PKDD 2009 » One Graph is Worth a Thousand Logs: Uncovering Hidden Structures in Massive System Event Logs}, url = {http://www.ecmlpkdd2009.net/program/accepted-papers/one-graph-is-worth-a-thousand-logs-uncovering-hidden-structures-in-massive-system-event-logs/}, biburl = {https://puma.uni-kassel.de/url/35eb2d07f4ffbc279cb7fff29b61a1bd/benz}, keywords = {attended ecmlpkdd text_mining}, added-at = {2011-02-04T16:08:06.000+0100}, description = {}, interhash = {35eb2d07f4ffbc279cb7fff29b61a1bd}, intrahash = {35eb2d07f4ffbc279cb7fff29b61a1bd} } @electronic{www.ecmlpkdd2009.net, title = {ECML PKDD 2009 » Leveraging Higher Order Dependencies Between Features for Text Classification}, url = {http://www.ecmlpkdd2009.net/program/accepted-papers/leveraging-higher-order-dependencies-between-features-for-text-classification/}, biburl = {https://puma.uni-kassel.de/url/a96fcbe9564294423bb142b59281a805/benz}, keywords = {attended ecmlpkdd text_mining}, added-at = {2011-02-04T16:08:06.000+0100}, description = {Traditional machine learning methods only consider relationships between feature values within individual data instances while disregarding the dependencies that link features across instances. In this work, we develop a general approach to supervised learning by leveraging higher-order dependencies between features. We introduce a novel Bayesian framework for classification named Higher Order Naive Bayes (HONB). Unlike approaches that assume data instances are independent, HONB leverages co-occurrence relations between feature values across different instances. Additionally, we generalize our framework by developing a novel data-driven space transformation that allows any classifier operating in vector spaces to take advantage of these higher-order co-occurrence relations. Results obtained on several benchmarktext corpora demonstrate that higher-order approaches achieve significant improvements in classification accuracy over the baseline (first-order) methods.}, interhash = {a96fcbe9564294423bb142b59281a805}, intrahash = {a96fcbe9564294423bb142b59281a805} } @electronic{www.ecmlpkdd2009.net, title = {ECML PKDD 2009 » Mining Peculiar Compositions of Frequent Substrings from Sparse Text Data Using Background Texts}, url = {http://www.ecmlpkdd2009.net/program/accepted-papers/mining-peculiar-compositions-of-frequent-substrings-from-sparse-text-data-using-background-texts/}, biburl = {https://puma.uni-kassel.de/url/107e82a7ff5a3e48da2f2a3fc4c487a0/benz}, keywords = {attended ecmlpkdd text_mining}, added-at = {2011-02-04T16:08:06.000+0100}, description = {We consider mining unusual patterns from text T. Unlike existing methods which assume probabilistic models and use simple estimation methods, we employ a set B of background text in addition to T and compositions w=xy of x and y as patterns. A string w is peculiar if there exist x and y such that w=xy, each of x and y is more frequent in B than in T, and conversely w=xy is more frequent in T. The frequency of xy in T is very small since x and y are infrequent in T, but xy is relatively abundant in T compared to xy in B. Despite these complex conditions for peculiar compositions, we develop a fast algorithm to find peculiar compositions using the suffix tree. Experiments using DNA sequences show scalability of our algorithm due to our pruning techniques and the superiority of the concept of the peculiar composition.}, interhash = {107e82a7ff5a3e48da2f2a3fc4c487a0}, intrahash = {107e82a7ff5a3e48da2f2a3fc4c487a0} } @electronic{www.ecmlpkdd2009.net, title = {ECML PKDD 2009 » Learning to Disambiguate Search Queries from Short Sessions}, url = {http://www.ecmlpkdd2009.net/program/accepted-papers/learning-to-disambiguate-search-queries-from-short-sessions/}, biburl = {https://puma.uni-kassel.de/url/aff09346f189c700f41aff786dcb8a63/benz}, keywords = {attended ecmlpkdd text_mining}, added-at = {2011-02-04T16:08:06.000+0100}, description = {Web searches tend to be short and ambiguous. It is therefore not surprising that Web query disambiguation is an actively researched topic. To provide a personalized experience for a user, most existing work relies on search engine log data in which the search activities of that particular user, as well as other users, are recorded over long periods of time. Such approaches may raise privacy concerns and may be difficult to implement for pragmatic reasons. We present an approach to Web query disambiguation that bases its predictions only on a short glimpse of user search activity, captured in a brief session of 4-6 previous searches on average. Our method exploits the relations of the current search session to previous similarly short sessions of other users in order to predict the user’s intentions and is based on Markov logic, a statistical relational learning model that has been successfully applied to challenging language problems in the past. We present empirical results that demonstrate the effectiveness of our proposed approach on data collected from a commercial general-purpose search engine.}, interhash = {aff09346f189c700f41aff786dcb8a63}, intrahash = {aff09346f189c700f41aff786dcb8a63} }