@inproceedings{conf/cla/BorchmannH16, author = {Borchmann, Daniel and Hanika, Tom}, booktitle = {CLA}, crossref = {conf/cla/2016}, editor = {Huchard, Marianne and Kuznetsov, Sergei}, ee = {http://ceur-ws.org/Vol-1624/paper5.pdf}, interhash = {e8ddef8aeb9b874f97a1a8230332b7c4}, intrahash = {8af414e45f306527e8316ac681fe7f08}, pages = {57-69}, publisher = {CEUR-WS.org}, series = {CEUR Workshop Proceedings}, title = {Some Experimental Results on Randomly Generating Formal Contexts.}, url = {http://dblp.uni-trier.de/db/conf/cla/cla2016.html#BorchmannH16}, volume = 1624, year = 2016 } @article{breiman2001random, abstract = {Random forests are a combination of tree predictors such that each tree depends on the values of a random vector sampled independently and with the same distribution for all trees in the forest. The generalization error for forests converges a.s. to a limit as the number of trees in the forest becomes large. The generalization error of a forest of tree classifiers depends on the strength of the individual trees in the forest and the correlation between them. Using a random selection of features to split each node yields error rates that compare favorably to }, author = {Breiman, Leo}, doi = {10.1023/A:1010933404324}, interhash = {4450d2e56555e7cb8f3817578e1dd4da}, intrahash = {b8187107bf870043f2f93669958858f1}, issn = {0885-6125}, journal = {Machine Learning}, language = {English}, number = 1, pages = {5-32}, publisher = {Kluwer Academic Publishers}, title = {Random Forests}, url = {http://dx.doi.org/10.1023/A%3A1010933404324}, volume = 45, year = 2001 } @book{janson2000theory, address = {New York; Chichester}, author = {Janson, Svante and Luczak, Tomasz and Rucinski, Andrzej}, interhash = {929294638db37c413b283ac468bbdade}, intrahash = {7bb074240f72009f515123f15afecefd}, isbn = {0471175412 9780471175414}, publisher = {John Wiley & Sons}, refid = {43340250}, title = {Theory of random graphs}, url = {http://www.amazon.com/Random-Graphs-Svante-Janson/dp/0471175412}, year = 2000 } @inproceedings{Yeh:2009:WRW:1708124.1708133, abstract = {Computing semantic relatedness of natural language texts is a key component of tasks such as information retrieval and summarization, and often depends on knowledge of a broad range of real-world concepts and relationships. We address this knowledge integration issue by computing semantic relatedness using personalized PageRank (random walks) on a graph derived from Wikipedia. This paper evaluates methods for building the graph, including link selection strategies, and two methods for representing input texts as distributions over the graph nodes: one based on a dictionary lookup, the other based on Explicit Semantic Analysis. We evaluate our techniques on standard word relatedness and text similarity datasets, finding that they capture similarity information complementary to existing Wikipedia-based relatedness measures, resulting in small improvements on a state-of-the-art measure.}, acmid = {1708133}, address = {Stroudsburg, PA, USA}, author = {Yeh, Eric and Ramage, Daniel and Manning, Christopher D. and Agirre, Eneko and Soroa, Aitor}, booktitle = {Proceedings of the 2009 Workshop on Graph-based Methods for Natural Language Processing}, interhash = {8b28cd800b6ad3929eef3b45de997e51}, intrahash = {ffd20a7357ca8e87d46e516589a7769e}, isbn = {978-1-932432-54-1}, location = {Suntec, Singapore}, numpages = {9}, pages = {41--49}, publisher = {Association for Computational Linguistics}, series = {TextGraphs-4}, title = {WikiWalk: random walks on Wikipedia for semantic relatedness}, url = {http://dl.acm.org/citation.cfm?id=1708124.1708133}, year = 2009 } @inproceedings{backstrom2011supervised, abstract = {Predicting the occurrence of links is a fundamental problem in networks. In the link prediction problem we are given a snapshot of a network and would like to infer which interactions among existing members are likely to occur in the near future or which existing interactions are we missing. Although this problem has been extensively studied, the challenge of how to effectively combine the information from the network structure with rich node and edge attribute data remains largely open.

We develop an algorithm based on Supervised Random Walks that naturally combines the information from the network structure with node and edge level attributes. We achieve this by using these attributes to guide a random walk on the graph. We formulate a supervised learning task where the goal is to learn a function that assigns strengths to edges in the network such that a random walker is more likely to visit the nodes to which new links will be created in the future. We develop an efficient training algorithm to directly learn the edge strength estimation function.

Our experiments on the Facebook social graph and large collaboration networks show that our approach outperforms state-of-the-art unsupervised approaches as well as approaches that are based on feature extraction.}, acmid = {1935914}, address = {New York, NY, USA}, author = {Backstrom, Lars and Leskovec, Jure}, booktitle = {Proceedings of the fourth ACM international conference on Web search and data mining}, doi = {10.1145/1935826.1935914}, interhash = {94f21249839cf875da4ad8842cd37d15}, intrahash = {999a159de862039db86fe74f808526e3}, isbn = {978-1-4503-0493-1}, location = {Hong Kong, China}, numpages = {10}, pages = {635--644}, publisher = {ACM}, series = {WSDM '11}, title = {Supervised random walks: predicting and recommending links in social networks}, url = {http://doi.acm.org/10.1145/1935826.1935914}, year = 2011 } @misc{backstrom2010supervised, abstract = {Predicting the occurrence of links is a fundamental problem in networks. In the link prediction problem we are given a snapshot of a network and would like to infer which interactions among existing members are likely to occur in the near future or which existing interactions are we missing. Although this problem has been extensively studied, the challenge of how to effectively combine the information from the network structure with rich node and edge attribute data remains largely open. We develop an algorithm based on Supervised Random Walks that naturally combines the information from the network structure with node and edge level attributes. We achieve this by using these attributes to guide a random walk on the graph. We formulate a supervised learning task where the goal is to learn a function that assigns strengths to edges in the network such that a random walker is more likely to visit the nodes to which new links will be created in the future. We develop an efficient training algorithm to directly learn the edge strength estimation function. Our experiments on the Facebook social graph and large collaboration networks show that our approach outperforms state-of-the-art unsupervised approaches as well as approaches that are based on feature extraction.}, author = {Backstrom, L. and Leskovec, J.}, interhash = {970b02221d407c64c1c35f997d4fe345}, intrahash = {c5cc52fa016b384f9d7b5ae4da841d44}, note = {cite arxiv:1011.4071}, title = {Supervised Random Walks: Predicting and Recommending Links in Social Networks}, url = {http://arxiv.org/abs/1011.4071}, year = 2010 } @inproceedings{kluegl2012stacked, abstract = {Conditional Random Fields CRF are popular methods for labeling unstructured or textual data. Like many machine learning approaches these undirected graphical models assume the instances to be independently distributed. However, in real world applications data is grouped in a natural way, e.g., by its creation context. The instances in each group often share additional structural consistencies. This paper proposes a domain-independent method for exploiting these consistencies by combining two CRFs in a stacked learning framework. The approach incorporates three successive steps of inference: First, an initial CRF processes single instances as usual. Next, we apply rule learning collectively on all labeled outputs of one context to acquire descriptions of its specific properties. Finally, we utilize these descriptions as dynamic and high quality features in an additional stacked CRF. The presented approach is evaluated with a real-world dataset for the segmentation of references and achieves a significant reduction of the labeling error.}, address = {Vilamoura, Algarve, Portugal}, author = {Klügl, Peter and Toepfer, Martin and Lemmerich, Florian and Hotho, Andreas and Puppe, Frank}, booktitle = {Proceedings of 1st International Conference on Pattern Recognition Applications and Methods ICPRAM}, editor = {Carmona, Pedro Latorre and Sánchez, J. Salvador and Fred, Ana}, interhash = {74969e59c5637d192021e35bbd02bece}, intrahash = {7920d13d4fce68bb9a4947585083986e}, pages = {240-248}, publisher = {SciTePress}, title = {Stacked Conditional Random Fields Exploiting Structural Consistencies}, url = {http://ki.informatik.uni-wuerzburg.de/papers/pkluegl/2012-ICPRAM-StackedCRF.pdf}, year = 2012 } @misc{Sutton2010, abstract = { Often we wish to predict a large number of variables that depend on each other as well as on other observed variables. Structured prediction methods are essentially a combination of classification and graphical modeling, combining the ability of graphical models to compactly model multivariate data with the ability of classification methods to perform prediction using large sets of input features. This tutorial describes conditional random fields, a popular probabilistic method for structured prediction. CRFs have seen wide application in natural language processing, computer vision, and bioinformatics. We describe methods for inference and parameter estimation for CRFs, including practical issues for implementing large scale CRFs. We do not assume previous knowledge of graphical modeling, so this tutorial is intended to be useful to practitioners in a wide variety of fields. }, author = {Sutton, Charles and McCallum, Andrew}, interhash = {05e1b6859124c5bf51c7aafd63f779b0}, intrahash = {49d8c9beb76a8b88739aa9eece7446ee}, note = {cite arxiv:1011.4088Comment: 90 pages}, title = {An Introduction to Conditional Random Fields}, url = {http://arxiv.org/abs/1011.4088}, year = 2010 } @inproceedings{konstas2009social, abstract = {Social network systems, like last.fm, play a significant role in Web 2.0, containing large amounts of multimedia-enriched data that are enhanced both by explicit user-provided annotations and implicit aggregated feedback describing the personal preferences of each user. It is also a common tendency for these systems to encourage the creation of virtual networks among their users by allowing them to establish bonds of friendship and thus provide a novel and direct medium for the exchange of data. We investigate the role of these additional relationships in developing a track recommendation system. Taking into account both the social annotation and friendships inherent in the social graph established among users, items and tags, we created a collaborative recommendation system that effectively adapts to the personal information needs of each user. We adopt the generic framework of Random Walk with Restarts in order to provide with a more natural and efficient way to represent social networks. In this work we collected a representative enough portion of the music social network last.fm, capturing explicitly expressed bonds of friendship of the user as well as social tags. We performed a series of comparison experiments between the Random Walk with Restarts model and a user-based collaborative filtering method using the Pearson Correlation similarity. The results show that the graph model system benefits from the additional information embedded in social knowledge. In addition, the graph model outperforms the standard collaborative filtering method.}, acmid = {1571977}, address = {New York, NY, USA}, author = {Konstas, Ioannis and Stathopoulos, Vassilios and Jose, Joemon M.}, booktitle = {Proceedings of the 32nd international ACM SIGIR conference on Research and development in information retrieval}, doi = {10.1145/1571941.1571977}, interhash = {9dde0442dfcf24151811f301fb7fa3cb}, intrahash = {3a2c3898216376eab27848a7f147ee51}, isbn = {978-1-60558-483-6}, location = {Boston, MA, USA}, numpages = {8}, pages = {195--202}, publisher = {ACM}, series = {SIGIR '09}, title = {On social networks and collaborative recommendation}, url = {http://doi.acm.org/10.1145/1571941.1571977}, year = 2009 } @misc{ghoshal2009random, abstract = { In the last few years we have witnessed the emergence, primarily in on-linecommunities, of new types of social networks that require for theirrepresentation more complex graph structures than have been employed in thepast. One example is the folksonomy, a tripartite structure of users,resources, and tags -- labels collaboratively applied by the users to theresources in order to impart meaningful structure on an otherwiseundifferentiated database. Here we propose a mathematical model of suchtripartite structures which represents them as random hypergraphs. We show thatit is possible to calculate many properties of this model exactly in the limitof large network size and we compare the results against observations of a realfolksonomy, that of the on-line photography web site Flickr. We show that insome cases the model matches the properties of the observed network well, whilein others there are significant differences, which we find to be attributableto the practice of multiple tagging, i.e., the application by a single user ofmany tags to one resource, or one tag to many resources.}, author = {Ghoshal, Gourab and Zlatic, Vinko and Caldarelli, Guido and Newman, M. E. J.}, interhash = {06e785ad79729e23e326b9c572aa7c56}, intrahash = {a1533c3b12096f71a2b6b6970eb9934d}, note = {cite arxiv:0903.0419Comment: 11 pages, 7 figures}, title = {Random hypergraphs and their applications}, url = {http://arxiv.org/abs/0903.0419}, year = 2009 } @article{keyhere, abstract = {We consider a random graph process in which vertices are added to the graph one at a time and joined to a fixed number m of earlier vertices, where each earlier vertex is chosen with probability proportional to its degree. This process was introduced by Barabási and Albert [3], as a simple model of the growth of real-world graphs such as the world-wide web. Computer experiments presented by Barabási, Albert and Jeong [1,5] and heuristic arguments given by Newman, Strogatz and Watts [23] suggest that after n steps the resulting graph should have diameter approximately log n. We show that while this holds for m=1, for m=2 the diameter is asymptotically log n/log log n. ER -}, author = {Bollobás*, Béla and Riordan, Oliver}, interhash = {16beb80dcc792cb525ee07429731149e}, intrahash = {6a79d370bf4979548ace69b97a61b2d3}, journal = {Combinatorica}, month = {#jan#}, number = 1, pages = {5--34}, title = {The Diameter of a Scale-Free Random Graph}, url = {http://dx.doi.org/10.1007/s00493-004-0002-2}, volume = 24, year = 2004 } @article{karonski1982rrg, author = {Karonski, M.}, interhash = {262f8c6536b05069f54f68cc2d37e65e}, intrahash = {38decbe2634cb152b3cb6bd17379392e}, journal = {Journal of Graph Theory}, number = 4, publisher = {Wiley Subscription Services, Inc., A Wiley Company New York}, title = {{A review of random graphs}}, volume = 6, year = 1982 } @article{frank1988rsa, author = {Frank, O.}, interhash = {1761045c6709543b29d880bbb0a45cab}, intrahash = {2d15b32284dc31868096f5220dec0e94}, journal = {Math. Sci. Humaines}, pages = {19--33}, title = {{Random sampling and social networks: a survey of various approaches}}, volume = 104, year = 1988 } @article{bollobas1981drg, author = {Bollobas, B.}, interhash = {fd6bc8a1ec25e68c8a9b941eb930e3d0}, intrahash = {9322d8b8af9747bbdef53b6a481c5ed8}, journal = {Transactions of the American Mathematical Society}, pages = {41--52}, publisher = {American Mathematical Society}, title = {{The diameter of random graphs}}, year = 1981 } @article{newman2001rga, author = {Newman, MEJ and Strogatz, SH and Watts, DJ}, interhash = {706d572ebbb2408b5a4ffa6978579dec}, intrahash = {08a607a8657ec747029ecbaf8d9f224f}, journal = {Arxiv preprint cond-mat/0007235}, title = {{Random graphs with arbitrary degree distributions and their applications}}, year = 2001 } @misc{molloy_reed95, author = {Molloy, M. and Reed, B.}, interhash = {0998c00ecea7c5a7ea384898aa6d137c}, intrahash = {69645e07736cf5cb96efa1401a815cb0}, journal = {Random Structures & Algorithms}, pages = {161-179}, title = {A critical point for random graphs with a given degree sequence}, url = {/brokenurl#citeseer.ist.psu.edu/molloy95critical.html}, volume = 6, year = 1995 } @article{soderberg2002gfi, author = {Soderberg, B.}, interhash = {bff19170c78c4f4b01fe6cd4fef7a9e0}, intrahash = {50374bc391847bb097c46bc38364a7a9}, journal = {Phys. Rev. E}, number = 6, pages = 066121, publisher = {APS}, title = {{General formalism for inhomogeneous random graphs}}, volume = 66, year = 2002 } @article{anderson1999ppl, author = {Anderson, C.J. and Wasserman, S. and Crouch, B.}, interhash = {bc2bb58cfd833af662976fa8b73f4607}, intrahash = {b2e086ec820f42183555e14de772f695}, journal = {Social Networks}, number = 1, pages = {37--66}, publisher = {Elsevier}, title = {{A p* primer: Logit models for social networks}}, volume = 21, year = 1999 } @article{chebolu2008pagerank, author = {Chebolu, P. and Melsted, P.}, booktitle = {Proceedings of the nineteenth annual ACM-SIAM symposium on Discrete algorithms}, interhash = {b186427a40b0af4a6414d82f0040613f}, intrahash = {742b675a09d540687fc2c352a883d501}, organization = {Society for Industrial and Applied Mathematics Philadelphia, PA, USA}, pages = {1010--1018}, title = {{PageRank and the random surfer model}}, url = {http://scholar.google.de/scholar.bib?q=info:f7YaFVQIaeIJ:scholar.google.com/&output=citation&hl=de&ct=citation&cd=2}, year = 2008 } @article{aiello2000random, author = {Aiello, W. and Chung, F. and Lu, L.}, booktitle = {Proceedings of the thirty-second annual ACM symposium on Theory of computing}, interhash = {fc01c77b63e8bc0367d4281e6f7722fc}, intrahash = {bbf5b80f716eacb6e82ffc4e389e8d2b}, organization = {ACM New York, NY, USA}, pages = {171--180}, title = {{A random graph model for massive graphs}}, url = {http://scholar.google.de/scholar.bib?q=info:iG723gINfRAJ:scholar.google.com/&output=citation&hl=de&ct=citation&cd=0}, year = 2000 }