@article{ls_leimeister, author = {Wegener, R. and Leimeister, J. M.}, interhash = {66bccfd06244422ab0072cbef4d1e3ab}, intrahash = {1ae66b27b82ee8771b7ff6b8f9a3b8ad}, journal = {International Journal of Technology Enhanced Learning (IJTEL)}, note = {JML_390}, number = {5/6}, pages = {383 - 397}, title = {Virtual Learning Communities: Success Factors and Challenges}, volume = 4, year = 2012 } @inproceedings{nivarthi2023towards, abstract = {Anomaly detection plays a pivotal role in diverse realworld applications such as cybersecurity, fault detection, network monitoring, predictive maintenance, and highly automated driving. However, obtaining labeled anomalous data can be a formidable challenge, especially when anomalies exhibit temporal evolution. This paper introduces LATAM (Long short-term memory Autoencoder with Temporal Attention Mechanism) for few-shot anomaly detection, with the aim of enhancing detection performance in scenarios with limited labeled anomaly data. LATAM effectively captures temporal dependencies and emphasizes significant patterns in multivariate time series data. In our investigation, we comprehensively evaluate LATAM against other anomaly detection models, particularly assessing its capability in few-shot learning scenarios where we have minimal examples from the normal class and none from the anomalous class in the training data. Our experimental results, derived from real-world photovoltaic inverter data, highlight LATAM’s superiority, showcasing a substantial 27% mean F1 score improvement, even when trained on a mere two-week dataset. Furthermore, LATAM demonstrates remarkable results on the open-source SWaT dataset, achieving a 12% boost in accuracy with only two days of training data. Moreover, we introduce a simple yet effective dynamic thresholding mechanism, further enhancing the anomaly detection capabilities of LATAM. This underscores LATAM’s efficacy in addressing the challenges posed by limited labeled anomalies in practical scenarios and it proves valuable for downstream tasks involving temporal representation and time series prediction, extending its utility beyond anomaly detection applications.}, author = {Nivarthi, Chandana Priya and Sick, Bernhard}, booktitle = {International Conference on Machine Learning and Applications (ICMLA)}, doi = {10.1109/ICMLA58977.2023.00218}, interhash = {2c7b944a23ce00dd5e4637ce2c572f31}, intrahash = {a4a29acb67656f837ca6e532fc88958d}, pages = {1444--1450}, publisher = {IEEE}, title = {Towards Few-Shot Time Series Anomaly Detection with Temporal Attention and Dynamic Thresholding}, year = 2023 } @inproceedings{ls_leimeister, address = {Helsinki, Finland (accepted for publication)}, author = {Bitzer, Philipp and Weiß, Frank and Leimeister, Jan Marco}, booktitle = {Eighth International Conference on Design Science Research in Information Systems and Technology (DESRIST)}, interhash = {48a19913ff4a7fda6f2ffac9c1b0af08}, intrahash = {ecc572acde1b82bc3db34fcfd34c4e31}, title = {Towards a Reference Model for a Productivity-optimized Delivery of Technology Mediated }, year = 2013 } @inproceedings{ls_leimeister, address = {Utrecht, Netherlands (accepted for publication)}, author = {Bitzer, Philipp and Söllner, Matthias}, booktitle = {European Conference on Information Systems (ECIS)}, interhash = {bc6ff6701f1e1673fa90aa643e1d00a6}, intrahash = {2b38076bc602a1a630b3aef8f0cc6215}, title = {Towards a Productivity Measurement Model for Technology Mediated Learning Services}, year = 2013 } @inproceedings{Carlson10, author = {Carlson, A. and Betteridge, J. and Kisiel, B. and Settles, B. and Jr., E.R. Hruschka and Mitchell, T.M.}, booktitle = {Proceedings of the Conference on Artificial Intelligence (AAAI)}, interhash = {5df31649862b1002848792cd495d46dc}, intrahash = {f0d94ab9d299609ee92f6ecf555266d4}, pages = {1306--1313}, publisher = {AAAI Press}, title = {Toward an Architecture for Never-Ending Language Learning}, year = 2010 } @inproceedings{coates2011detection, abstract = {Reading text from photographs is a challenging problem that has received a significant amount of attention. Two key components of most systems are (i) text detection from images and (ii) character recognition, and many recent methods have been proposed to design better feature representations and models for both. In this paper, we apply methods recently developed in machine learning -- specifically, large-scale algorithms for learning the features automatically from unlabeled data -- and show that they allow us to construct highly effective classifiers for both detection and recognition to be used in a high accuracy end-to-end system.}, author = {Coates, A. and Carpenter, B. and Case, C. and Satheesh, S. and Suresh, B. and Wang, Tao and Wu, D.J. and Ng, A.Y.}, booktitle = {International Conference on Document Analysis and Recognition (ICDAR)}, doi = {10.1109/ICDAR.2011.95}, interhash = {adb17817e5f95605a8066737ce0e8b7e}, intrahash = {b550ca5ec5a8b61b64b17091f7b2eeab}, issn = {1520-5363}, month = sep, pages = {440--445}, title = {Text Detection and Character Recognition in Scene Images with Unsupervised Feature Learning}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6065350&tag=1}, year = 2011 } @article{breiman2001random, abstract = {Random forests are a combination of tree predictors such that each tree depends on the values of a random vector sampled independently and with the same distribution for all trees in the forest. The generalization error for forests converges a.s. to a limit as the number of trees in the forest becomes large. The generalization error of a forest of tree classifiers depends on the strength of the individual trees in the forest and the correlation between them. Using a random selection of features to split each node yields error rates that compare favorably to }, author = {Breiman, Leo}, doi = {10.1023/A:1010933404324}, interhash = {4450d2e56555e7cb8f3817578e1dd4da}, intrahash = {b8187107bf870043f2f93669958858f1}, issn = {0885-6125}, journal = {Machine Learning}, language = {English}, number = 1, pages = {5-32}, publisher = {Kluwer Academic Publishers}, title = {Random Forests}, url = {http://dx.doi.org/10.1023/A%3A1010933404324}, volume = 45, year = 2001 } @inproceedings{mitchell2015, author = {Mitchell, T. and Cohen, W. and Hruscha, E. and Talukdar, P. and Betteridge, J. and Carlson, A. and Dalvi, B. and Gardner, M. and Kisiel, B. and Krishnamurthy, J. and Lao, N. and Mazaitis, K. and Mohammad, T. and Nakashole, N. and Platanios, E. and Ritter, A. and Samadi, M. and Settles, B. and Wang, R. and Wijaya, D. and Gupta, A. and Chen, X. and Saparov, A. and Greaves, M. and Welling, J.}, booktitle = {AAAI}, interhash = {52d0d71f6f5b332dabc1412f18e3a93d}, intrahash = {63070703e6bb812852cca56574aed093}, note = {: Never-Ending Learning in AAAI-2015}, title = {Never-Ending Learning}, url = {http://www.cs.cmu.edu/~wcohen/pubs.html}, year = 2015 } @inproceedings{joachims99, address = {Cambridge, MA, USA}, author = {Joachims, Thorsten}, booktitle = {Advances in Kernel Methods - Support Vector Learning}, editor = {Sch\"olkopf, Bernhard and Burges, Christopher J.C. and Smola, A.}, interhash = {f97179c7ebe10f64411417f9e05563a8}, intrahash = {dc79351cca889847d9d20c7ef9dafa25}, publisher = {MIT Press}, title = {{Making Large-Scale SVM Learning Practical}}, year = 1999 } @book{mitchell2010machine, address = {New York, NY [u.a.}, author = {Mitchell, Tom M.}, interhash = {8be657b11d4324941ba419c176c0229a}, intrahash = {adfebd1b18f04021ba0edd69ccaa3d96}, isbn = {0071154671 9780071154673}, publisher = {McGraw-Hill}, refid = {846511832}, title = {Machine learning}, url = {http://www.amazon.com/Machine-Learning-Tom-M-Mitchell/dp/0070428077}, year = 2010 } @article{cimiano05learning, author = {Cimiano, Philipp and Hotho, Andreas and Staab, Steffen}, ee = {http://www.jair.org/papers/paper1648.html}, interhash = {4c09568cff62babd362aab03095f4589}, intrahash = {eaaf0e4b3a8b29fab23b6c15ce2d308d}, journal = {Journal on Artificial Intelligence Research}, pages = {305-339}, title = {Learning Concept Hierarchies from Text Corpora using Formal Concept Analysis}, url = {http://dblp.uni-trier.de/db/journals/jair/jair24.html#CimianoHS05}, volume = 24, year = 2005 } @misc{yu2013largescale, abstract = {The multi-label classification problem has generated significant interest in recent years. However, existing approaches do not adequately address two key challenges: (a) the ability to tackle problems with a large number (say millions) of labels, and (b) the ability to handle data with missing labels. In this paper, we directly address both these problems by studying the multi-label problem in a generic empirical risk minimization (ERM) framework. Our framework, despite being simple, is surprisingly able to encompass several recent label-compression based methods which can be derived as special cases of our method. To optimize the ERM problem, we develop techniques that exploit the structure of specific loss functions - such as the squared loss function - to offer efficient algorithms. We further show that our learning framework admits formal excess risk bounds even in the presence of missing labels. Our risk bounds are tight and demonstrate better generalization performance for low-rank promoting trace-norm regularization when compared to (rank insensitive) Frobenius norm regularization. Finally, we present extensive empirical results on a variety of benchmark datasets and show that our methods perform significantly better than existing label compression based methods and can scale up to very large datasets such as the Wikipedia dataset.}, author = {Yu, Hsiang-Fu and Jain, Prateek and Kar, Purushottam and Dhillon, Inderjit S.}, interhash = {1252173520757338468a68e028494647}, intrahash = {716e5270c1dcb3a1e4eedf9934859021}, note = {cite arxiv:1307.5101}, title = {Large-scale Multi-label Learning with Missing Labels}, url = {http://arxiv.org/abs/1307.5101}, year = 2013 } @inproceedings{DBLP:conf/dsaa/KrompassNT14, author = {Krompass, Denis and Nickel, Maximilian and Tresp, Volker}, bibsource = {dblp computer science bibliography, http://dblp.org}, booktitle = {International Conference on Data Science and Advanced Analytics, {DSAA} 2014, Shanghai, China, October 30 - November 1, 2014}, crossref = {DBLP:conf/dsaa/2014}, doi = {10.1109/DSAA.2014.7058046}, interhash = {0ca986606c22ca0b3780c9b9c25f31c7}, intrahash = {c952ed96ece470e4fa5336eedf670d5b}, isbn = {978-1-4799-6991-3}, pages = {18--24}, publisher = {{IEEE}}, title = {Large-scale factorization of type-constrained multi-relational data}, url = {http://dx.doi.org/10.1109/DSAA.2014.7058046}, year = 2014 } @article{mnih2015humanlevel, author = {Mnih, Volodymyr and Kavukcuoglu, Koray and Silver, David and Rusu, Andrei A. and Veness, Joel and Bellemare, Marc G. and Graves, Alex and Riedmiller, Martin and Fidjeland, Andreas K. and Ostrovski, Georg and Petersen, Stig and Beattie, Charles and Sadik, Amir and Antonoglou, Ioannis and King, Helen and Kumaran, Dharshan and Wierstra, Daan and Legg, Shane and Hassabis, Demis}, interhash = {eac59980357d99db87b341b61ef6645f}, intrahash = {fb15f4471c81dc2b9edf2304cb2f7083}, issn = {00280836}, journal = {Nature}, month = feb, number = 7540, pages = {529--533}, publisher = {Nature Publishing Group, a division of Macmillan Publishers Limited. All Rights Reserved.}, title = {Human-level control through deep reinforcement learning}, url = {http://dx.doi.org/10.1038/nature14236}, volume = 518, year = 2015 } @inproceedings{conf/pkdd/BalasubramanyanDC13, author = {Balasubramanyan, Ramnath and Dalvi, Bhavana Bharat and Cohen, William W.}, booktitle = {ECML/PKDD (2)}, crossref = {conf/pkdd/2013-2}, editor = {Blockeel, Hendrik and Kersting, Kristian and Nijssen, Siegfried and Zelezný, Filip}, ee = {http://dx.doi.org/10.1007/978-3-642-40991-2_40}, interhash = {9a32b7cc059a500ea302d0aa65036682}, intrahash = {e56623d21a1b7bcb442cd15fe098bb70}, isbn = {978-3-642-40990-5}, pages = {628-642}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, title = {From Topic Models to Semi-supervised Learning: Biasing Mixed-Membership Models to Exploit Topic-Indicative Features in Entity Clustering.}, url = {http://dblp.uni-trier.de/db/conf/pkdd/pkdd2013-2.html#BalasubramanyanDC13}, volume = 8189, year = 2013 } @article{kluegl2013exploiting, abstract = {Conditional Random Fields (CRF) are popular methods for labeling unstructured or textual data. Like many machine learning approaches, these undirected graphical models assume the instances to be independently distributed. However, in real-world applications data is grouped in a natural way, e.g., by its creation context. The instances in each group often share additional structural consistencies. This paper proposes a domain-independent method for exploiting these consistencies by combining two CRFs in a stacked learning framework. We apply rule learning collectively on the predictions of an initial CRF for one context to acquire descriptions of its specific properties. Then, we utilize these descriptions as dynamic and high quality features in an additional (stacked) CRF. The presented approach is evaluated with a real-world dataset for the segmentation of references and achieves a significant reduction of the labeling error.}, author = {Kluegl, Peter and Toepfer, Martin and Lemmerich, Florian and Hotho, Andreas and Puppe, Frank}, interhash = {9ef3f543e4cc9e2b0ef078595f92013b}, intrahash = {fbaab25e96dd20d96ece9d7fefdc3b4f}, journal = {Mathematical Methodologies in Pattern Recognition and Machine Learning Springer Proceedings in Mathematics & Statistics}, pages = {111-125}, title = {Exploiting Structural Consistencies with Stacked Conditional Random Fields}, volume = 30, year = 2013 } @inproceedings{noauthororeditor, author = {Mirowski, Piotr and Ranzato, Marc'Aurelio and LeCun, Yann}, editor = {of the NIPS 2010 Workshop on Deep Learning, Proceedings}, interhash = {b7ce347e904a4ca3263cf6cc1e2253bd}, intrahash = {fc3e0e3af595f9a46df6bc9233df836f}, title = {Dynamic Auto-Encoders for Semantic Indexing}, url = {http://yann.lecun.com/exdb/publis/pdf/mirowski-nipsdl-10.pdf}, year = 2010 } @inproceedings{ring2015condist, author = {Ring, Markus and Otto, Florian and Becker, Martin and Niebler, Thomas and Landes, Dieter and Hotho, Andreas}, editor = {ECMLPKDD2015}, interhash = {c062a57a17a0910d6c27ecd664502ac1}, intrahash = {a2f9d649f2856677e4d886a3b517404d}, title = {ConDist: A Context-Driven Categorical Distance Measure}, year = 2015 } @incollection{pol_introduction, author = {Lehmann, Jens and Voelker, Johanna}, booktitle = {Perspectives on Ontology Learning}, editor = {Lehmann, Jens and Voelker, Johanna}, interhash = {a53a9f1796f71f2f1c5ec646961f8924}, intrahash = {cf6a6785f5cab0525632a003c47ef5f7}, owner = {jl}, pages = {ix-xvi}, publisher = {AKA / IOS Press}, title = {An Introduction to Ontology Learning}, url = {http://jens-lehmann.org/files/2014/pol_introduction.pdf}, year = 2014 } @inproceedings{coates2011analysis, abstract = {A great deal of research has focused on algorithms for learning features from unlabeled data. Indeed, much progress has been made on benchmark datasets like NORB and CIFAR-10 by employing increasingly complex unsupervised learning algorithms and deep models. In this paper, however, we show that several simple factors, such as the number of hidden nodes in the model, may be more important to achieving high performance than the learning algorithm or the depth of the model. Specifically, we will apply several off-the-shelf feature learning algorithms (sparse auto-encoders, sparse RBMs, K-means clustering, and Gaussian mixtures) to CIFAR-10, NORB, and STL datasets using only single-layer networks. We then present a detailed analysis of the effect of changes in the model setup: the receptive field size, number of hidden nodes (features), the step-size ("stride") between extracted features, and the effect of whitening. Our results show that large numbers of hidden nodes and dense feature extraction are critical to achieving high performance - so critical, in fact, that when these parameters are pushed to their limits, we achieve state-of-the-art performance on both CIFAR-10 and NORB using only a single layer of features. More surprisingly, our best performance is based on K-means clustering, which is extremely fast, has no hyper-parameters to tune beyond the model structure itself, and is very easy to implement. Despite the simplicity of our system, we achieve accuracy beyond all previously published results on the CIFAR-10 and NORB datasets (79.6% and 97.2% respectively).}, author = {Coates, A. and Lee, H. and Ng, A.Y.}, booktitle = {Proceedings of the Fourteenth International Conference on Artificial Intelligence and Statistics}, editor = {Gordon, Geoffrey and Dunson, David and Dudík, Miroslav}, interhash = {46cfb4b5b1c16c79a966512e07f67158}, intrahash = {bcb2c1fd335ae57362cdf348ff727589}, pages = {215--223}, publisher = {JMLR W\&CP}, series = {JMLR Workshop and Conference Proceedings}, title = {An analysis of single-layer networks in unsupervised feature learning}, url = {http://jmlr.csail.mit.edu/proceedings/papers/v15/coates11a.html}, volume = 15, year = 2011 }