@article{obiedkov2009building, abstract = {The use of lattice-based access control models has been somewhat restricted by their complexity. We argue that attribute exploration from formal concept analysis can help create lattice models of manageable size, while making it possible for the system designer to better understand dependencies between different security categories in the domain and, thus, providing certain guarantees for the relevance of the constructed model to a particular application. In this paper, we introduce the method through an example.}, author = {Obiedkov, Sergei and Kourie, Derrick G. and Eloff, J.H.P.}, doi = {10.1016/j.cose.2008.07.011}, interhash = {367ceb95cd5e3964aa2d7d00ad21da09}, intrahash = {7be2b4bf0987c4d18adf7243eae690c0}, issn = {0167-4048}, journal = {Computers and Security}, number = {1–2}, pages = {2--7}, title = {Building access control models with attribute exploration}, url = {http://www.sciencedirect.com/science/article/pii/S0167404808000497}, volume = 28, year = 2009 } @article{guigues1986familles, author = {Guigues, J.-L. and Duquenne, V.}, interhash = {3671be91dd80e5c415ede85e94ada3d7}, intrahash = {fee9525a2e89b5ebced886b3a9f0194d}, journal = {Mathématiques et Sciences Humaines}, pages = {5--18}, title = {Familles minimales d'implications informatives résultant d'un tableau de données binaires}, volume = 95, year = 1986 } @inproceedings{baader2007completing, abstract = {We propose an approach for extending both the terminological and the assertional part of a Description Logic knowledge base by using information provided by the knowledge base and by a domain expert. The use of techniques from Formal Concept Analysis ensures that, on the one hand, the interaction with the expert is kept to a minimum, and, on the other hand, we can show that the extended knowledge base is complete in a certain, well-defined sense.}, acmid = {1625311}, address = {San Francisco, CA, USA}, author = {Baader, Franz and Ganter, Bernhard and Sertkaya, Baris and Sattler, Ulrike}, booktitle = {Proceedings of the 20th international joint conference on Artifical intelligence}, interhash = {8ab382f3aa141674412ba7ad33316a9b}, intrahash = {87f98ae486014ba78690ffa314b67da8}, location = {Hyderabad, India}, numpages = {6}, pages = {230--235}, publisher = {Morgan Kaufmann Publishers Inc.}, title = {Completing description logic knowledge bases using formal concept analysis}, url = {http://dl.acm.org/citation.cfm?id=1625275.1625311}, year = 2007 } @book{koester2006fooca, abstract = {This book deals with Formal Concept Analysis (FCA) and its application to Web Information Retrieval. It explains how Web search results retrieved by major Web search engines such as Google or Yahoo can be conceptualized leading to a human-oriented form of representation. A generalization of Web search results is conducted, leading to an FCA-based introduction of FooCA. FooCA is an application in the field of Conceptual Knowledge Processing and supports the idea of a holistic representation of Web Information Retrieval.}, address = {Mühltal}, author = {Koester, Bjoern}, interhash = {fe53b2b1fa6be34259647954fca36bf8}, intrahash = {5571d950ada3ee1892e5c043ac438271}, publisher = {Verlag Allgemeine Wissenschaft}, series = {Beiträge zur begrifflichen Wissensverarbeitung}, title = {FooCA: web information retrieval with formal concept analysis}, url = {http://www.bjoern-koester.de/fooca/web_information_retrieval_with_formal_concept_analysis.html}, year = 2006 } @article{poelmans2012semiautomated, abstract = {We propose an iterative and human-centred knowledge discovery methodology based on formal concept analysis. The proposed approach recognizes the important role of the domain expert in mining real-world enterprise applications and makes use of specific domain knowledge, including human intelligence and domain-specific constraints. Our approach was empirically validated at the Amsterdam-Amstelland police to identify suspects and victims of human trafficking in 266,157 suspicious activity reports. Based on guidelines of the Attorney Generals of the Netherlands, we first defined multiple early warning indicators that were used to index the police reports. Using concept lattices, we revealed numerous unknown human trafficking and loverboy suspects. In-depth investigation by the police resulted in a confirmation of their involvement in illegal activities resulting in actual arrestments been made. Our human-centred approach was embedded into operational policing practice and is now successfully used on a daily basis to cope with the vastly growing amount of unstructured information.}, author = {Poelmans, Jonas and Elzinga, Paul and Ignatov, Dmitry I. and Kuznetsov, Sergei O.}, doi = {10.1080/03081079.2012.721662}, eprint = {http://www.tandfonline.com/doi/pdf/10.1080/03081079.2012.721662}, interhash = {18d6f6312af57cc72d7e26de4903dc9f}, intrahash = {9bb41c50dd5333f94a807482489c0732}, journal = {International Journal of General Systems}, number = 8, pages = {774--804}, title = {Semi-automated knowledge discovery: identifying and profiling human trafficking}, url = {http://www.tandfonline.com/doi/abs/10.1080/03081079.2012.721662}, volume = 41, year = 2012 } @incollection{becker2000conceptual, abstract = {Conceptual Information Systems are based on a formalization of the concept of ‘concept’ as it is discussed in traditional philosophical logic. This formalization supports a human-centered approach to the development of Information Systems. We discuss this approach by means of an implemented Conceptual Information System for supporting IT security management in companies and organizations.}, address = {Berlin/Heidelberg}, affiliation = {Entrust Technologies (Switzerland) Ltd liab. Co Glatt Tower CH-8301 Glattzentrum Switzerland}, author = {Becker, Klaus and Stumme, Gerd and Wille, Rudolf and Wille, Uta and Zickwolff, Monika}, booktitle = {Knowledge Engineering and Knowledge Management Methods, Models, and Tools}, doi = {10.1007/3-540-39967-4_27}, editor = {Dieng, Rose and Corby, Olivier}, interhash = {dacb08013d9496d41d4f9f39bce7ecd1}, intrahash = {283f8a780ac47746cc3031ad47bfdf9c}, isbn = {978-3-540-41119-2}, keyword = {Computer Science}, pages = {352--365}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Conceptual Information Systems Discussed through an IT-Security Tool}, url = {http://dx.doi.org/10.1007/3-540-39967-4_27}, volume = 1937, year = 2000 } @incollection{stumme1998conceptual, abstract = {In this paper we discuss Conceptual Knowledge Discovery in Databases (CKDD) as it is developing in the field of Conceptual Knowledge Processing (cf. [29],[30]). Conceptual Knowledge Processing is based on the mathematical theory of Formal Concept Analysis which has become a successful theory for data analysis during the last 18 years. This approach relies on the pragmatic philosophy of Ch.S. Peirce [15] who claims that we can only analyze and argue within restricted contexts where we always rely on pre-knowledge and common sense. The development of Formal Concept Analysis led to the software system TOSCANA, which is presented as a CKDD tool in this paper. TOSCANA is a flexible navigation tool that allows dynamic browsing through and zooming into the data. It supports the exploration of large databases by visualizing conceptual aspects inherent to the data. We want to clarify that CKDD can be understood as a human-centered approach of Knowledge Discovery in Databases. The actual discussion about human-centered Knowledge Discovery is therefore briefly summarized in Section 1.}, address = {Berlin/Heidelberg}, affiliation = {Technische Universität Darmstadt Fachbereich Mathematik D-64289 Darmstadt Germany D-64289 Darmstadt Germany}, author = {Stumme, Gerd and Wille, Rudolf and Wille, Uta}, booktitle = {Principles of Data Mining and Knowledge Discovery}, doi = {10.1007/BFb0094849}, editor = {Zytkow, Jan and Quafafou, Mohamed}, interhash = {5ef89b6f8fb22f9d24eda7da71b8bdb1}, intrahash = {a9859c988f19684b76dc5a3f24e8278e}, isbn = {978-3-540-65068-3}, keyword = {Computer Science}, pages = {450--458}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Conceptual Knowledge Discovery in Databases using formal concept analysis methods}, url = {http://dx.doi.org/10.1007/BFb0094849}, volume = 1510, year = 1998 } @incollection{hereth2000conceptual, abstract = {In this paper, we discuss Conceptual Knowledge Discovery in Databases (CKDD) in its connection with Data Analysis. Our approach is based on Formal Concept Analysis, a mathematical theory which has been developed and proven useful during the last 20 years. Formal Concept Analysis has led to a theory of conceptual information systems which has been applied by using the management system TOSCANA in a wide range of domains. In this paper, we use such an application in database marketing to demonstrate how methods and procedures of CKDD can be applied in Data Analysis. In particular, we show the interplay and integration of data mining and data analysis techniques based on Formal Concept Analysis. The main concern of this paper is to explain how the transition from data to knowledge can be supported by a TOSCANA system. To clarify the transition steps we discuss their correspondence to the five levels of knowledge representation established by R. Brachman and to the steps of empirically groun ded theory building proposed by A. Strauss and J. Corbin.}, address = {Berlin/Heidelberg}, affiliation = {Fachbereich Mathematik, Technische Universität Darmstadt, Schloßgartenstr. 7, D-64289 Darmstadt, Germany}, author = {Hereth, Joachim and Stumme, Gerd and Wille, Rudolf and Wille, Uta}, booktitle = {Conceptual Structures: Logical, Linguistic, and Computational Issues}, doi = {10.1007/10722280_29}, editor = {Ganter, Bernhard and Mineau, Guy}, interhash = {8a4c0c21d83c25bb78f80e89dd36a89a}, intrahash = {89154e5d16a6533280b612e9d3ab8aa6}, isbn = {978-3-540-67859-5}, keyword = {Computer Science}, pages = {421--437}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Conceptual Knowledge Discovery and Data Analysis}, url = {http://dx.doi.org/10.1007/10722280_29}, volume = 1867, year = 2000 } @inproceedings{pavlovic2012quantitative, abstract = {Formal Concept Analysis (FCA) begins from a context, given as a binary relation between some objects and some attributes, and derives a lattice of concepts, where each concept is given as a set of objects and a set of attributes, such that the first set consists of all objects that satisfy all attributes in the second, and vice versa. Many applications, though, provide contexts with quantitative information, telling not just whether an object satisfies an attribute, but also quantifying this satisfaction. Contexts in this form arise as rating matrices in recommender systems, as occurrence matrices in text analysis, as pixel intensity matrices in digital image processing, etc. Such applications have attracted a lot of attention, and several numeric extensions of FCA have been proposed. We propose the framework of proximity sets (proxets), which subsume partially ordered sets (posets) as well as metric spaces. One feature of this approach is that it extracts from quantified contexts quantified concepts, and thus allows full use of the available information. Another feature is that the categorical approach allows analyzing any universal properties that the classical FCA and the new versions may have, and thus provides structural guidance for aligning and combining the approaches.}, address = {Berlin/Heidelberg}, author = {Pavlovic, Dusko}, booktitle = {ICFCA 2012}, editor = {Domenach, F. and Ignatov, D.I. and Poelmans, J.}, ee = {http://arxiv.org/abs/1204.5802}, interhash = {601aaf1dbcb15e8872109be6f4a1a5d8}, intrahash = {a0c8122fe1a490e82129a24e042b371d}, issn = {0302-9743}, pages = {260--277}, publisher = {Springer}, series = {Lecture Notes in Artificial Intelligence}, title = {Quantitative Concept Analysis}, volume = 7278, year = 2012 } @inproceedings{doerfel2012publication, abstract = {We present an analysis of the publication and citation networks of all previous editions of the three conferences most relevant to the FCA community: ICFCA, ICCS and CLA. Using data mining methods from FCA and graph analysis, we investigate patterns and communities among authors, we identify and visualize influential publications and authors, and we give a statistical summary of the conferences’ history. }, address = {Berlin/Heidelberg}, author = {Doerfel, Stephan and Jäschke, Robert and Stumme, Gerd}, booktitle = {Formal Concept Analysis}, doi = {10.1007/978-3-642-29892-9_12}, editor = {Domenach, F. and Ignatov, D.I. and Poelmans, J.}, interhash = {f34f31e8dd1e07b1b0a5ab688f10084a}, intrahash = {9207cd4b1cf7d87c9ae959ac780e152c}, isbn = {978-3-642-29891-2}, month = may, pages = {77--95}, publisher = {Springer}, series = {Lecture Notes in Artificial Intelligence}, title = {Publication Analysis of the Formal Concept Analysis Community}, url = {http://link.springer.com/chapter/10.1007/978-3-642-29892-9_12}, volume = 7278, year = 2012 } @inproceedings{poelmans2011mining, abstract = {Formal Concept Analysis (FCA) is an unsupervised clustering technique and many scientific papers are devoted to applying FCA in Information Retrieval (IR) research. We collected 103 papers published between 2003-2009 which mention FCA and information retrieval in the abstract, title or keywords. Using a prototype of our FCA-based toolset CORDIET, we converted the pdf-files containing the papers to plain text, indexed them with Lucene using a thesaurus containing terms related to FCA research and then created the concept lattice shown in this paper. We visualized, analyzed and explored the literature with concept lattices and discovered multiple interesting research streams in IR of which we give an extensive overview. The core contributions of this paper are the innovative application of FCA to the text mining of scientific papers and the survey of the FCA-based IR research. }, author = {Poelmans, Jonas and Elzinga, Paul and Viaene, Stijn and Dedene, Guido and Kuznetsov, Sergei O.}, booktitle = {Industrial Conference on Data Mining - Poster and Industry Proceedings}, editor = {Perner, Petra}, interhash = {b44d11ea5b5a4df8ee30a9c572d82051}, intrahash = {164c37be60c1a47d1727ad9b82f01237}, isbn = {978-3-942954-06-4}, pages = {82--96}, publisher = {IBaI Publishing}, title = {Text Mining Scientific Papers: a Survey on {FCA}-based Information Retrieval Research.}, url = {http://dblp.uni-trier.de/db/conf/incdm/incdm2011p.html#PoelmansEVDK11}, year = 2011 } @incollection{poelmans2010formal, abstract = {In this paper, we analyze the literature on Formal Concept Analysis (FCA) using FCA. We collected 702 papers published between 2003-2009 mentioning Formal Concept Analysis in the abstract. We developed a knowledge browsing environment to support our literature analysis process. The pdf-files containing the papers were converted to plain text and indexed by Lucene using a thesaurus containing terms related to FCA research. We use the visualization capabilities of FCA to explore the literature, to discover and conceptually represent the main research topics in the FCA community. As a case study, we zoom in on the 140 papers on using FCA in knowledge discovery and data mining and give an extensive overview of the contents of this literature.}, address = {Berlin/Heidelberg}, author = {Poelmans, Jonas and Elzinga, Paul and Viaene, Stijn and Dedene, Guido}, booktitle = {Conceptual Structures: From Information to Intelligence}, doi = {10.1007/978-3-642-14197-3_15}, editor = {Croitoru, Madalina and Ferré, Sébastien and Lukose, Dickson}, interhash = {713d63f847ff4b2cbf613fc0508eb31b}, intrahash = {9694689a034cc02aae1e27114ca26a94}, isbn = {978-3-642-14196-6}, pages = {139--153}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Formal Concept Analysis in Knowledge Discovery: A Survey}, url = {http://dx.doi.org/10.1007/978-3-642-14197-3_15}, volume = 6208, year = 2010 } @article{wille1995basic, abstract = {Experiences with applications of concept lattices and the pragmatic philosophy founded by Ch. S. Peirce have suggested a triadic approach to formal concept analysis. It starts with the notion of a triadic context combining objects, attributes, and conditions under which objects may have certain attributes. The Basic Theorem of triadic concept analysis clarifies the class of structures which are formed by the triadic concepts of triadic contexts: These structures are exactly the complete trilattices up to isomorphism.}, affiliation = {Fachbereich Mathematik Technishe Hochschule Darmstadt 64289 Darmstadt Germany}, author = {Wille, Rudolf}, doi = {10.1007/BF01108624}, interhash = {c5223bed0a0995c5f3cd8962a9d54212}, intrahash = {23959c014b9b6d46c93f45cf68e52294}, issn = {0167-8094}, journal = {Order}, keyword = {Mathematics and Statistics}, number = 2, pages = {149--158}, publisher = {Springer Netherlands}, title = {The Basic Theorem of triadic concept analysis}, url = {http://dx.doi.org/10.1007/BF01108624}, volume = 12, year = 1995 } @inproceedings{daquin2011extracting, abstract = {With the rise of linked data, more and more semantically described information is being published online according to the principles and technologies of the Semantic Web (especially, RDF and SPARQL). The use of such standard technologies means that this data should be exploitable, integrable and reusable straight away. However, once a potentially interesting dataset has been discovered, significant efforts are currently required in order to understand its schema, its content, the way to query it and what it can answer. In this paper, we propose a method and a tool to automatically discover questions that can be answered by an RDF dataset. We use formal concept analysis to build a hierarchy of meaningful sets of entities from a dataset. These sets of entities represent answers, which common characteristics represent the clauses of the corresponding questions. This hierarchy can then be used as a querying interface, proposing questions of varying levels of granularity and specificity to the user. A major issue is however that thousands of questions can be included in this hierarchy. Based on an empirical analysis and using metrics inspired both from formal concept analysis and from ontology summarization, we devise an approach for identifying relevant questions to act as a starting point to the navigation in the question hierarchy.}, acmid = {1999698}, address = {New York, NY, USA}, author = {d'Aquin, Mathieu and Motta, Enrico}, booktitle = {Proceedings of the sixth international conference on Knowledge capture}, doi = {10.1145/1999676.1999698}, interhash = {7794150f2b42c21956eb7fb419ca0248}, intrahash = {45374b975834248c0cd87022fc854e25}, isbn = {978-1-4503-0396-5}, location = {Banff, Alberta, Canada}, numpages = {8}, pages = {121--128}, publisher = {ACM}, title = {Extracting relevant questions to an RDF dataset using formal concept analysis}, url = {http://doi.acm.org/10.1145/1999676.1999698}, year = 2011 } @inproceedings{tilley2007citation, abstract = {In this paper formal concept analysis (FCA) is used as a means to analyse afield of research using published academic papers as its input. In particular, results are presented based on a case study of 47 academic papers in a scientific field of study. The analysis includes inferences about the field of study based on the domain background knowledge derived from the ISO12207 software engineering standard. Additionally, a number of alternative classifications based on the target application language and the reported application size are introduced. FCA reveals useful insights about the nature of the subject matter: identifying fruitful areas of research as well as producing details about characteristics of the community under examination.}, author = {Tilley, T. and Eklund, P.}, booktitle = {18th International Workshop on Database and Expert Systems Applications (DEXA)}, doi = {10.1109/DEXA.2007.59}, interhash = {52fc9589299b48707ab9f22f995ecd17}, intrahash = {5544eac0fd55b4862dde20cad8edc11a}, issn = {1529-4188}, month = sep, pages = {545--550}, publisher = {IEEE Computer Society}, title = {Citation Analysis using Formal Concept Analysis: A case study in Software Engineering}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4312953&tag=1}, year = 2007 } @book{ganter2005formal, address = {Berlin/Heidelberg}, doi = {10.1007/978-3-540-31881-1}, editor = {Ganter, Bernhard and Stumme, Gerd and Wille, Rudolf}, interhash = {171ebaf9a115bc54c00bf293d4fa75ed}, intrahash = {18ace92c8d892a1d0f0e0fb72bd71832}, isbn = {978-3-540-27891-7}, issn = {1611-3349}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {Formal Concept Analysis: Foundations and Applications}, url = {http://dx.doi.org/10.1007/978-3-540-31881-1}, volume = 3626, year = 2005 } @proceedings{valtchev2011formal, abstract = {The present volume features a selection of the papers presented at the 9th International Conference on Formal Concept Analysis (ICFCA 2011). Over the years, the ICFCA conference series has grown into the premier forum for dissemination of research on topics from formal concept analysis (FCA) theory and applications, as well as from the related fields of lattices and partially ordered structures. FCA is a multi-disciplinary field with strong roots in the mathematical theory of partial orders and lattices, with tools originating in computer science and artificial intelligence. FCA emerged in the early 1980s from efforts to restructure lattice theory to promote better communication between lattice theorists and potential users of lattice-based methods for data management. Initially, the central theme was the mathematical formalization of concept and conceptual hierarchy. Since then, the field has developed into a constantly growing research area in its own right with a thriving theoretical community and an increasing number of applications in data and knowledge processing including disciplines such as data visualization, information retrieval, machine learning, software engineering, data analysis, data mining, social networks analysis, etc. ICFCA 2011 was held from May 2 to May 6, 2011, in Nicosia, Cyprus. The program committee received 49 high-quality submissions that were subjected to a highly competitive selection process. Each paper was reviewed by three referees (exceptionally two or four). After a first round, some papers got a definitive acceptance status, while others got accepted conditionally to improvements in their content. The latter got to a second round of reviewing. The overall outcome was the acceptance of 16 papers as regular ones for presentation at the conference and publication in this volume. Another seven papers have still been assessed as valuable for discussion at the conference and were therefore collected in the supplementary proceedings. The regular papers presented hereafter cover advances on a wide range of subjects from FCA and related fields. A first group of papers tackled mathematical problems within the FCA field. A subset thereof focused on factor identification within the incidence relation or its lattice representation (papers by Glodeanu and by Krupka). The remainder of the group proposed characterizations of particular classes of ordered structures (papers by Doerfel and by Meschke et al.). A second group of papers addressed algorithmic problems from FCA and related fields. Two papers approached their problems from an algorithmic complexity viewpoint (papers by Distel and by Babin and Kuznetsov) while the final paper in this group addressed algorithmic problems for general lattices, i.e., not represented as formal contexts, with an FCA-based approach (work by Balcázar and Tîrnăucă). A third group studied alternative approaches for extending the expressive power of the core FCA, e.g., by generalizing the standard one-valued attributes to attributes valued in algebraic rings (work by González Calabozo et al.), by introducing pointer-like attributes, a.k.a. links (paper by Kötters), or by substituting set-shaped concept intents with modal logic expressions (paper by Soldano and Ventos). A fourth group focused on data mining-oriented aspects of FCA: agreement lattices in structured data mining (paper by Nedjar et al.), triadic association rule mining (work by Missaoui and Kwuida) and bi-clustering of numerical data (Kaytoue et al.). An addional paper shed some initial light on a key aspect of FCA-based data analysis and mining, i.e., the filtering of interesting concepts (paper by Belohlavek and Macko). Finally, a set of exciting applications of both basic and enhanced FCA frameworks to practical problems have beed described: in analysis of gene expression data (the already mentioned work by González Calabozo et al.), in web services composition (paper by Azmeh et al.) and in browsing and retrieval of structured data (work by Wray and Eklund). This volume also contains three keynote papers submitted by the invited speakers of the conference. All these contributions constitute a volume of high quality which is the result of the hard work done by the authors, the invited speakers and the reviewers. We therefore wish to thank the members of the Program Committee and of the Editorial Board whose steady involvement and professionalism helped a lot. We would also like to acknowledge the participation of all the external reviewers who sent many valuable comments. Kudos also go to EasyChair for having made the reviewing/editing process a real pleasure. Special thanks go to the Cyprus Tourism Organisation for sponsoring the conference and to the University of Nicosia for hosting it. Finally we wish to thank the Conference Chair Florent Domenach and his colleagues from the Organization Committee for the mountains of energy they put behind the conference organization process right from the beginning in order to make it a total success. We would also like to express our gratitude towards Dr. Peristianis, President of the University of Nicosia, for his personal support. }, address = {Berlin/Heidelberg}, doi = {10.1007/978-3-642-20514-9_2}, editor = {Valtchev, Petko and Jäschke, Robert}, interhash = {a7fd7ebbb14eacc605ff61cf2759cb06}, intrahash = {afd54a24a2eeca1a07f811bd89800d28}, isbn = {978-3-642-20513-2}, month = may, publisher = {Springer}, series = {Lecture Notes in Artificial Intelligence}, title = {Formal Concept Analysis}, url = {http://www.springer.com/computer/ai/book/978-3-642-20513-2}, vgwort = {452}, volume = 6628, year = 2011 } @article{ganter2005pseudomodels, abstract = {A well-known result is that the inference problem for propositional Horn formulae can be solved in linear time. We show that this remains true even in the presence of arbitrary (static) propositional background knowledge. Our main tool is the notion of a cumulated clause, a slight generalization of the usual clauses in Propositional Logic. We show that each propositional theory has a canonical irredundant base of cumulated clauses, and present an algorithm to compute this base. }, address = {Amsterdam, The Netherlands}, author = {Ganter, Bernhard and Krauße, Rüdiger}, doi = {10.1016/j.dam.2004.06.019}, interhash = {75396eab79cd8133f85c88794ac80a61}, intrahash = {bd3394df55a858ed1a516a6999788c25}, issn = {0166-218X}, journal = {Discrete Applied Mathematics}, month = apr, number = 1, pages = {43--55}, publisher = {Elsevier Science Publishers B. V.}, title = {Pseudo-models and propositional Horn inference}, url = {http://www.math.tu-dresden.de/~ganter/psfiles/pseudo.ps}, volume = 147, year = 2005 } @book{jaeschke2011formal, abstract = {One of the most noticeable innovation that emerged with the advent of the Web 2.0 and the focal point of this thesis are collaborative tagging systems. They allow users to annotate arbitrary resources with freely chosen keywords, so called tags. The tags are used for navigation, finding resources, and serendipitous browsing and thus provide an immediate benefit for the user. By now, several systems for tagging photos, web links, publication references, videos, etc. have attracted millions of users which in turn annotated countless resources. Tagging gained so much popularity that it spread into other applications like web browsers, software packet managers, and even file systems. Therefore, the relevance of the methods presented in this thesis goes beyond the Web 2.0. The conceptual structure underlying collaborative tagging systems is called folksonomy. It can be represented as a tripartite hypergraph with user, tag, and resource nodes. Each edge of the graph expresses the fact that a user annotated a resource with a tag. This social network constitutes a lightweight conceptual structure that is not formalized, but rather implicit and thus needs to be extracted with knowledge discovery methods. In this thesis a new data mining task – the mining of all frequent tri-concepts – is presented, together with an efficient algorithm for discovering such implicit shared conceptualizations. Our approach extends the data mining task of discovering all closed itemsets to three-dimensional data structures to allow for mining folksonomies. Extending the theory of triadic Formal Concept Analysis, we provide a formal definition of the problem, and present an efficient algorithm for its solution. We show the applicability of our approach on three large real-world examples and thereby perform a conceptual clustering of two collaborative tagging systems. Finally, we introduce neighborhoods of triadic concepts as basis for a lightweight visualization of tri-lattices. The social bookmark and publication sharing system BibSonomy, which is currently among the three most popular systems of its kind, has been developed by our research group. Besides being a useful tool for many scientists, it provides interested researchers a basis for the evaluation and integration of their knowledge discovery methods. This thesis introduces BibSonomy as an exemplary collaborative tagging system and gives an overview of its architecture and some of its features. Furthermore, BibSonomy is used as foundation for evaluating and integrating some of the discussed approaches. Collaborative tagging systems usually include tag recommendation mechanisms easing the process of finding good tags for a resource, but also consolidating the tag vocabulary across users. In this thesis we evaluate and compare several recommendation algorithms on large-scale real-world datasets: an adaptation of user-based Collaborative Filtering, a graph-based recommender built on top of the FolkRank algorithm, and simple methods based on counting tag co-occurences. We show that both FolkRank and Collaborative Filtering provide better results than non-personalized baseline methods. Moreover, since methods based on counting tag co-occurrences are computationally cheap, and thus usually preferable for real time scenarios, we discuss simple approaches for improving the performance of such methods. We demonstrate how a simple recommender based on counting tags from users and resources can perform almost as good as the best recommender. Furthermore, we show how to integrate recommendation methods into a real tagging system, record and evaluate their performance by describing the tag recommendation framework we developed for BibSonomy. With the intention to develop, test, and evaluate recommendation algorithms and supporting cooperation with researchers, we designed the framework to be easily extensible, open for a variety of methods, and usable independent from BibSonomy. We also present an evaluation of the framework which demonstrates its power. The folksonomy graph shows specific structural properties that explain its growth and the possibility of serendipitous exploration. Clicklogs of web search engines can be represented as a folksonomy in which queries are descriptions of clicked URLs. The resulting network structure, which we will term logsonomy is very similar to the one of folksonomies. In order to find out about its properties, we analyze the topological characteristics of the tripartite hypergraph of queries, users and bookmarks on a large folksonomy snapshot and on query logs of two large search engines. We find that all of the three datasets exhibit similar structural properties and thus conclude that the clicking behaviour of search engine users based on the displayed search results and the tagging behaviour of collaborative tagging users is driven by similar dynamics. In this thesis we further transfer the folksonomy paradigm to the Social Semantic Desktop – a new model of computer desktop that uses Semantic Web technologies to better link information items. There we apply community support methods to the folksonomy found in the network of social semantic desktops. Thus, we connect knowledge discovery for folksonomies with semantic technologies. Alltogether, the research in this thesis is centered around collaborative tagging systems and their underlying datastructure – folksonomies – and thereby paves the way for the further dissemination of this successful knowledge management paradigm. }, address = {Heidelberg, Germany}, author = {Jäschke, Robert}, interhash = {dcb2cd1cd72ae45d77c4d8755d199405}, intrahash = {9db90c2ff04f514ada9f6b50fde46065}, isbn = {978-3-89838-332-5}, month = jan, publisher = {Akademische Verlagsgesellschaft AKA}, series = {Dissertationen zur Künstlichen Intelligenz}, title = {Formal Concept Analysis and Tag Recommendations in Collaborative Tagging Systems}, url = {http://www.aka-verlag.com/de/detail?ean=978-3-89838-332-5}, vgwort = {413}, volume = 332, year = 2011 } @book{ganter1999formal, abstract = {This is the first textbook on formal concept analysis. It gives a systematic presentation of the mathematical foundations and their relation to applications in computer science, especially in data analysis and knowledge processing. Above all, it presents graphical methods for representing conceptual systems that have proved themselves in communicating knowledge. Theory and graphical representation are thus closely coupled together. The mathematical foundations are treated thoroughly and illuminated by means of numerous examples.}, address = {Berlin/Heidelberg}, author = {Ganter, Bernhard and Wille, Rudolf}, interhash = {1b0bf49069eadcdfac42e52addf4eb9d}, intrahash = {ae14b00b5489de8da6e4578ac3062bfc}, publisher = {Springer}, title = {Formal Concept Analysis: Mathematical Foundations}, year = 1999 }