@incollection{pol_introduction, author = {Lehmann, Jens and Voelker, Johanna}, booktitle = {Perspectives on Ontology Learning}, editor = {Lehmann, Jens and Voelker, Johanna}, interhash = {a53a9f1796f71f2f1c5ec646961f8924}, intrahash = {cf6a6785f5cab0525632a003c47ef5f7}, owner = {jl}, pages = {ix-xvi}, publisher = {AKA / IOS Press}, title = {An Introduction to Ontology Learning}, url = {http://jens-lehmann.org/files/2014/pol_introduction.pdf}, year = 2014 } @inproceedings{mitchell2015, author = {Mitchell, T. and Cohen, W. and Hruscha, E. and Talukdar, P. and Betteridge, J. and Carlson, A. and Dalvi, B. and Gardner, M. and Kisiel, B. and Krishnamurthy, J. and Lao, N. and Mazaitis, K. and Mohammad, T. and Nakashole, N. and Platanios, E. and Ritter, A. and Samadi, M. and Settles, B. and Wang, R. and Wijaya, D. and Gupta, A. and Chen, X. and Saparov, A. and Greaves, M. and Welling, J.}, booktitle = {AAAI}, interhash = {52d0d71f6f5b332dabc1412f18e3a93d}, intrahash = {63070703e6bb812852cca56574aed093}, note = {: Never-Ending Learning in AAAI-2015}, title = {Never-Ending Learning}, url = {http://www.cs.cmu.edu/~wcohen/pubs.html}, year = 2015 } @book{staab2009handbook, abstract = {An ontology is a formal description of concepts and relationships that can exist for a community of human and/or machine agents. This book considers ontology languages, ontology engineering methods, example ontologies, infrastructures and technologies for ontologies, and how to bring this all into ontology-based infrastructures and applications.}, address = {Berlin}, author = {Staab, Steffen and Studer, Rudi}, interhash = {c2e7c401bef2cee2bb8b12334d3c7a88}, intrahash = {be122d99dc6dd20cb58a55d62d8eca6c}, isbn = {9783540926733 3540926739}, publisher = {Springer}, refid = {569892085}, title = {Handbook on ontologies}, url = {http://public.eblib.com/choice/publicfullrecord.aspx?p=571805}, year = 2009 } @book{pan2013ontologydriven, address = {Berlin [u.a.]}, editor = {Pan, Jeff Z.}, format = {book}, interhash = {b227c90d573b8bbe06380a07d797612e}, intrahash = {b88adb2114769033172f3974ad1aaaac}, isbn = {9783642312250}, partauthors = {Pan, Jeff Z. (Hrsg.)}, publisher = {Springer}, shorttitle = {Ontology-Driven Software Development}, subtitle = {Jeff Z. Pan ... eds.}, title = {Ontology-Driven Software Development}, titlestatement = {Jeff Z. Pan ... eds.}, uniqueid = {HEB309548594}, url = {http://scans.hebis.de/HEBCGI/show.pl?30954859_cov.jpg}, year = 2013 } @inproceedings{suchanek2007semantic, abstract = {We present YAGO, a light-weight and extensible ontology with high coverage and quality. YAGO builds on entities and relations and currently contains more than 1 million entities and 5 million facts. This includes the Is-A hierarchy as well as non-taxonomic relations between entities (such as HASONEPRIZE). The facts have been automatically extracted from Wikipedia and unified with WordNet, using a carefully designed combination of rule-based and heuristic methods described in this paper. The resulting knowledge base is a major step beyond WordNet: in quality by adding knowledge about individuals like persons, organizations, products, etc. with their semantic relationships - and in quantity by increasing the number of facts by more than an order of magnitude. Our empirical evaluation of fact correctness shows an accuracy of about 95%. YAGO is based on a logically clean model, which is decidable, extensible, and compatible with RDFS. Finally, we show how YAGO can be further extended by state-of-the-art information extraction techniques.}, acmid = {1242667}, address = {New York, NY, USA}, author = {Suchanek, Fabian M. and Kasneci, Gjergji and Weikum, Gerhard}, booktitle = {Proceedings of the 16th international conference on World Wide Web}, doi = {10.1145/1242572.1242667}, interhash = {1d2c2b23ce2a6754d12c4364e19c574c}, intrahash = {84ae693c0a6dfb6d4b051b0b6dbd3668}, isbn = {978-1-59593-654-7}, location = {Banff, Alberta, Canada}, numpages = {10}, pages = {697--706}, publisher = {ACM}, title = {YAGO: a core of semantic knowledge}, url = {http://doi.acm.org/10.1145/1242572.1242667}, year = 2007 } @inproceedings{baader2007completing, abstract = {We propose an approach for extending both the terminological and the assertional part of a Description Logic knowledge base by using information provided by the knowledge base and by a domain expert. The use of techniques from Formal Concept Analysis ensures that, on the one hand, the interaction with the expert is kept to a minimum, and, on the other hand, we can show that the extended knowledge base is complete in a certain, well-defined sense.}, acmid = {1625311}, address = {San Francisco, CA, USA}, author = {Baader, Franz and Ganter, Bernhard and Sertkaya, Baris and Sattler, Ulrike}, booktitle = {Proceedings of the 20th international joint conference on Artifical intelligence}, interhash = {8ab382f3aa141674412ba7ad33316a9b}, intrahash = {87f98ae486014ba78690ffa314b67da8}, location = {Hyderabad, India}, numpages = {6}, pages = {230--235}, publisher = {Morgan Kaufmann Publishers Inc.}, title = {Completing description logic knowledge bases using formal concept analysis}, url = {http://dl.acm.org/citation.cfm?id=1625275.1625311}, year = 2007 } @inproceedings{hearst1992automatic, abstract = {We describe a method for the automatic acquisition of the hyponymy lexical relation from unrestricted text. Two goals motivate the approach: (i) avoidance of the need for pre-encoded knowledge and (ii) applicability across a wide range of text. We identify a set of lexico-syntactic patterns that are easily recognizable, that occur frequently and across text genre boundaries, and that indisputably indicate the lexical relation of interest. We describe a method for discovering these patterns and suggest that other lexical relations will also be acquirable in this way. A subset of the acquisition algorithm is implemented and the results are used to augment and critique the structure of a large hand-built thesaurus. Extensions and applications to areas such as information retrieval are suggested.}, acmid = {992154}, address = {Stroudsburg, PA, USA}, author = {Hearst, Marti A.}, booktitle = {Proceedings of the 14th conference on Computational linguistics}, doi = {10.3115/992133.992154}, interhash = {8c1e90c6cc76625c34f20370a1af7ea2}, intrahash = {2c49ad19ac6977bd806b6687e4dcc550}, location = {Nantes, France}, numpages = {7}, pages = {539--545}, publisher = {Association for Computational Linguistics}, title = {Automatic acquisition of hyponyms from large text corpora}, url = {http://dx.doi.org/10.3115/992133.992154}, volume = 2, year = 1992 } @article{noy2004ontology, abstract = {As ontology development becomes a more ubiquitous and collaborative process, ontology versioning and evolution becomes an important area of ontology research. The many similarities between database-schema evolution and ontology evolution will allow us to build on the extensive research in schema evolution. However, there are also important differences between database schemas and ontologies. The differences stem from different usage paradigms, the presence of explicit semantics and different knowledge models. A lot of problems that existed only in theory in database research come to the forefront as practical problems in ontology evolution. These differences have important implications for the development of ontology-evolution frameworks: The traditional distinction between versioning and evolution is not applicable to ontologies. There are several dimensions along which compatibility between versions must be considered. The set of change operations for ontologies is different. We must develop automatic techniques for finding similarities and differences between versions.}, address = {London}, affiliation = {Stanford Medical Informatics Stanford University Stanford CA 94305 USA}, author = {Noy, Natalya F. and Klein, Michel}, doi = {10.1007/s10115-003-0137-2}, interhash = {4b4ee2090ba5356a3d0e853192968662}, intrahash = {08ee0381e240c3ee414e0eefc7fe1a83}, issn = {0219-1377}, journal = {Knowledge and Information Systems}, keyword = {Computer Science}, number = 4, pages = {428--440}, publisher = {Springer}, title = {Ontology Evolution: Not the Same as Schema Evolution}, url = {http://dx.doi.org/10.1007/s10115-003-0137-2}, volume = 6, year = 2004 } @inproceedings{daquin2011extracting, abstract = {With the rise of linked data, more and more semantically described information is being published online according to the principles and technologies of the Semantic Web (especially, RDF and SPARQL). The use of such standard technologies means that this data should be exploitable, integrable and reusable straight away. However, once a potentially interesting dataset has been discovered, significant efforts are currently required in order to understand its schema, its content, the way to query it and what it can answer. In this paper, we propose a method and a tool to automatically discover questions that can be answered by an RDF dataset. We use formal concept analysis to build a hierarchy of meaningful sets of entities from a dataset. These sets of entities represent answers, which common characteristics represent the clauses of the corresponding questions. This hierarchy can then be used as a querying interface, proposing questions of varying levels of granularity and specificity to the user. A major issue is however that thousands of questions can be included in this hierarchy. Based on an empirical analysis and using metrics inspired both from formal concept analysis and from ontology summarization, we devise an approach for identifying relevant questions to act as a starting point to the navigation in the question hierarchy.}, acmid = {1999698}, address = {New York, NY, USA}, author = {d'Aquin, Mathieu and Motta, Enrico}, booktitle = {Proceedings of the sixth international conference on Knowledge capture}, doi = {10.1145/1999676.1999698}, interhash = {7794150f2b42c21956eb7fb419ca0248}, intrahash = {45374b975834248c0cd87022fc854e25}, isbn = {978-1-4503-0396-5}, location = {Banff, Alberta, Canada}, numpages = {8}, pages = {121--128}, publisher = {ACM}, title = {Extracting relevant questions to an RDF dataset using formal concept analysis}, url = {http://doi.acm.org/10.1145/1999676.1999698}, year = 2011 } @inproceedings{conf/dagstuhl/Stumme05, author = {Stumme, Gerd}, bibsource = {DBLP, http://dblp.uni-trier.de}, booktitle = {Semantic Interoperability and Integration}, editor = {Kalfoglou, Yannis and Schorlemmer, W. Marco and Sheth, Amit P. and Staab, Steffen and Uschold, Michael}, ee = {http://drops.dagstuhl.de/opus/volltexte/2005/49}, interhash = {9206884ea0e91905062366300cfc4870}, intrahash = {225d908cff3ee338f7595032f236fd07}, publisher = {IBFI, Schloss Dagstuhl, Germany}, series = {Dagstuhl Seminar Proceedings}, title = {Ontology Merging with Formal Concept Analysis}, url = {http://www.kde.cs.uni-kassel.de/stumme/papers/2005/stumme2005ontology.pdf}, volume = 04391, year = 2005 }