@article{noauthororeditor, author = {Wieder, Thomas}, doi = {https://doi.org/10.12988/ams.2019.9798}, interhash = {6bb521dabb76f4cbdafc5a4d28d6b860}, intrahash = {33008e86620b05a2eca14924b040702a}, issn = {1314-7552 (online)}, journal = {Applied Mathematical Sciences}, note = {https: / / doi. org / 10. 12988 / ams. 2019. 9798}, number = 15, pages = {685 - 695}, title = {A simple matrix alteration method}, volume = 13, year = 2019 } @article{thurau2012descriptive, abstract = {Climate change, the global energy footprint, and strategies for sustainable development have become topics of considerable political and public interest. The public debate is informed by an exponentially growing amount of data and there are diverse partisan interest when it comes to interpretation. We therefore believe that data analysis methods are called for that provide results which are intuitively understandable even to non-experts. Moreover, such methods should be efficient so that non-experts users can perform their own analysis at low expense in order to understand the effects of different parameters and influential factors. In this paper, we discuss a new technique for factorizing data matrices that meets both these requirements. The basic idea is to represent a set of data by means of convex combinations of extreme data points. This often accommodates human cognition. In contrast to established factorization methods, the approach presented in this paper can also determine over-complete bases. At the same time, convex combinations allow for highly efficient matrix factorization. Based on techniques adopted from the field of distance geometry, we derive a linear time algorithm to determine suitable basis vectors for factorization. By means of the example of several environmental and developmental data sets we discuss the performance and characteristics of the proposed approach and validate that significant efficiency gains are obtainable without performance decreases compared to existing convexity constrained approaches.}, affiliation = {Fraunhofer Institute for Intelligent Analysis and Information Systems IAIS, Sankt Augustin, Germany}, author = {Thurau, Christian and Kersting, Kristian and Wahabzada, Mirwaes and Bauckhage, Christian}, doi = {10.1007/s10618-011-0216-z}, interhash = {457c57f054fea45dcbc8447263591d97}, intrahash = {387f4e1711d7065bd5a94455aeae1957}, issn = {1384-5810}, journal = {Data Mining and Knowledge Discovery}, keyword = {Computer Science}, number = 2, pages = {325-354}, publisher = {Springer Netherlands}, title = {Descriptive matrix factorization for sustainability Adopting the principle of opposites}, url = {http://dx.doi.org/10.1007/s10618-011-0216-z}, volume = 24, year = 2012 } @article{koren2009matrix, abstract = {As the Netflix Prize competition has demonstrated, matrix factorization models are superior to classic nearest neighbor techniques for producing product recommendations, allowing the incorporation of additional information such as implicit feedback, temporal effects, and confidence levels.}, author = {Koren, Y. and Bell, R. and Volinsky, C.}, doi = {10.1109/MC.2009.263}, interhash = {cface72aeba6ee8c561ccd15035d0ead}, intrahash = {59ab9b2678949949c04b0fe2a431585a}, issn = {0018-9162}, journal = {Computer}, month = aug, number = 8, pages = {30--37}, title = {Matrix Factorization Techniques for Recommender Systems}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5197422&tag=1}, volume = 42, year = 2009 } @inproceedings{Rudolph:2010:CMM:1858681.1858774, abstract = {We propose CMSMs, a novel type of generic compositional models for syntactic and semantic aspects of natural language, based on matrix multiplication. We argue for the structural and cognitive plausibility of this model and show that it is able to cover and combine various common compositional NLP approaches ranging from statistical word space models to symbolic grammar formalisms.}, acmid = {1858774}, address = {Stroudsburg, PA, USA}, author = {Rudolph, Sebastian and Giesbrecht, Eugenie}, booktitle = {Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics}, interhash = {6594500d38a361829aeb3ef7889a1709}, intrahash = {05ec57c39e9b945deb674c3b616eac8f}, location = {Uppsala, Sweden}, numpages = {10}, pages = {907--916}, publisher = {Association for Computational Linguistics}, series = {ACL '10}, title = {Compositional matrix-space models of language}, url = {http://dl.acm.org/citation.cfm?id=1858681.1858774}, year = 2010 } @inproceedings{lau2010c, author = {Lau, Sian Lun and König, Immanuel and David, Klaus and Parandian, Baback and Carius-Düssel, Christine and Schultz, Martin}, booktitle = {The Seventh International Symposium on Wireless Communication Systems (ISWCS'10)}, interhash = {661643ee5ec02911c136b17456f84b09}, intrahash = {21f5529596c5c9672e7614e94202766f}, month = {September}, title = {Supporting Patient Monitoring using Activity Recognition with a Smartphone}, year = 2010 } @inproceedings{lau2010a, address = {Florence, Italy}, author = {Lau, Sian Lun and David, Klaus}, booktitle = {Future Network and Mobile Summit 2010}, interhash = {0f743dc2c0e74deb93a57ab62570bc5a}, intrahash = {b8f7902f68f6b5bc682c24916f9d21ab}, month = {June 16-18}, pages = {1-9}, title = {Movement recognition using the accelerometer in smartphones}, year = 2010 } @inproceedings{Parandian2010, address = {Berlin, Germany}, author = {Parandian, Baback and Dewitz, Karl and Schultz, Martin and Carius-D\"ussel, Christine and Lau, Sian Lun and K\"onig, Immanuel and David, Klaus and Maaser, Michael and Ortmann, Steffen}, booktitle = {1. Nationaler Fachkongress Telemedizin}, interhash = {db7d9829902ded38f0ddb5a95cf7274c}, intrahash = {812059a1edd2254a8f786f7098ca25ec}, month = {November 3-5}, organization = {Deutsche Gesellschaft f\"ur Telemedizin}, title = {MATRIX-Middleware f\"ur die Realisierung Internet-basierter telemedizinischer Dienste}, year = 2010 } @inproceedings{voigtmann2011a, address = {Seattle, USA.}, author = {Voigtmann, Christian and Lau, Sian Lun and David, Klaus}, booktitle = {2011 IEEE International Conference on Pervasive Computing and Communications Workshops (PERCOM Workshops)}, interhash = {458f9e64c984aa4a88355169febfa08e}, intrahash = {b2cf800a5485bec8b65164924f5e1b56}, month = {March 21-25}, pages = {403-408}, publisher = {IEEE}, title = {An Approach to Collaborative Context Prediction}, year = 2011 } @incollection{lau2011_aac, author = {Lau, Sian Lun and David, Klaus}, booktitle = {Assistive and Augmentive Communication for the Disabled: Intelligent Technologies for Communication, Learning and Teaching}, chapter = 6, editor = {Theng, Lau Bee}, interhash = {5201fe2cfa5396fa8fb855011a1bed08}, intrahash = {711bc44e01889f0b456431772581a9af}, month = may, pages = {159-192}, publisher = {IGI Global}, title = {Enabling Context Aware Services in the Area of AAC}, url = {http://www.igi-global.com/bookstore/titledetails.aspx?titleid=47433&detailstype=description}, year = 2011 } @article{miettinen2008discrete, abstract = {Matrix decomposition methods represent a data matrix as a product of two factor matrices: one containing basis vectors that represent meaningful concepts in the data and another describing how the observed data can be expressed as combinations of the basis vectors. Decomposition methods have been studied extensively, but many methods return real-valued matrices. Interpreting real-valued factor matrices is hard if the original data is Boolean. In this paper, we describe a matrix decomposition formulation for vBoolean data, the Discrete Basis Problem. The problem seeks for a Boolean decomposition of a binary matrix, thus allowing the user to easily interpret the basis vectors. We also describe a variation of the problem, the Discrete Basis Partitioning Problem. We show that both problems are NP-hard. For the Discrete Basis Problem, we give a simple greedy algorithm for solving it; for the Discrete Basis Partitioning Problem, we show how it can be solved using existing methods. We present experimental results for the greedy algorithm and compare it against other well-known methods. Our algorithm gives intuitive basis vectors, but its reconstruction error is usually larger than with the real-valued methods. We discuss the reasons for this behavior. }, author = {Miettinen, Pauli and Mielikäinen, Taneli and Gionis, Aristides and Das, Gautam and Mannila, Heikki}, doi = {10.1109/TKDE.2008.53}, interhash = {1799269370f8bbb6860151b13145ad7f}, intrahash = {b9e0638656d2fcd0c2965aff0ee0112e}, journal = {IEEE Transactions on Knowledge and Data Engineering}, month = oct, number = 10, pages = {1348--1362}, publisher = {IEEE}, title = {The Discrete Basis Problem}, url = {http://dx.doi.org/10.1109/TKDE.2008.53}, volume = 20, year = 2008 } @inproceedings{tatti2006dimension, abstract = {Many 0/1 datasets have a very large number of variables; however, they are sparse and the dependency structure of the variables is simpler than the number of variables would suggest. Defining the effective dimensionality of such a dataset is a nontrivial problem. We consider the problem of defining a robust measure of dimension for 0/1 datasets, and show that the basic idea of fractal dimension can be adapted for binary data. However, as such the fractal dimension is difficult to interpret. Hence we introduce the concept of normalized fractal dimension. For a dataset D, its normalized fractal dimension counts the number of independent columns needed to achieve the unnormalized fractal dimension of D. The normalized fractal dimension measures the degree of dependency structure of the data. We study the properties of the normalized fractal dimension and discuss its computation. We give empirical results on the normalized fractal dimension, comparing it against PCA.}, author = {Tatti, N. and Mielikainen, T. and Gionis, A. and Mannila, H.}, booktitle = {Proceedings of the Sixth IEEE International Conference on Data Mining (ICDM 2006)}, doi = {10.1109/ICDM.2006.167}, interhash = {5164cd6a09b802d14dce6d3947df60cd}, intrahash = {0a8ad03bc7d2d0d7d77ee73eede4ecc0}, issn = {1550-4786}, month = dec, organization = {IEEE}, pages = {603--612}, title = {What is the Dimension of Your Binary Data?}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4053086}, year = 2006 } @inproceedings{kersting2010convex, abstract = {We present an extension of convex-hull nonnegative matrix factorization (CH-NMF) which was recently proposed as a large scale variant of convex non-negative matrix factorization (CNMF) or Archetypal Analysis (AA). CH-NMF factorizes a non-negative data matrix V into two non-negative matrix factors V = WH such that the columns of W are convex combinations of certain data points so that they are readily interpretable to data analysts. There is, however, no free lunch: imposing convexity constraints on W typically prevents adaptation to intrinsic, low dimensional structures in the data. Alas, in cases where the data is distributed in a nonconvex manner or consists of mixtures of lower dimensional convex distributions, the cluster representatives obtained from CH-NMF will be less meaningful. In this paper, we present a hierarchical CH-NMF that automatically adapts to internal structures of a data set, hence it yields meaningful and interpretable clusters for non-convex data sets. This is also conformed by our extensive evaluation on DBLP publication records of 760,000 authors, 4,000,000 images harvested from the web, and 150,000,000 votes on World of Warcraft guilds.}, address = {Kassel, Germany}, author = {Kersting, Kristian and Wahabzada, Mirwaes and Thurau, Christian and Bauckhage., Christian}, booktitle = {Proceedings of LWA2010 - Workshop-Woche: Lernen, Wissen {\&} Adaptivitaet}, crossref = {lwa2010}, editor = {Atzmüller, Martin and Benz, Dominik and Hotho, Andreas and Stumme, Gerd}, end = {2010-10-06 09:45:00}, interhash = {9513ef5606b53314806fa4ad6507e819}, intrahash = {5a46921f58af4fc313d8b60f38859a57}, room = {0446}, session = {kdml3}, start = {2010-10-06 09:22:30}, title = {Convex NMF on Non-Convex Massiv Data}, track = {kdml}, url = {http://www.kde.cs.uni-kassel.de/conf/lwa10/papers/kdml5.pdf}, year = 2010 } @book{Gentle:2007, abstract = {Bibliogr. S. [505] - 518}, author = {Gentle, James E.}, interhash = {2156bd85da160d6baf88b187fd1e6230}, intrahash = {fccc8b26fcc1912304600c6410f241e5}, isbn = {978-0-387-70872-0}, opac = {http://opac.bibliothek.uni-kassel.de/DB=1/PPN?PPN=190806516}, publisher = {Springer New York}, title = {Matrix algebra}, url = {http://opac.bibliothek.uni-kassel.de/DB=1/PPN?PPN=190806516}, year = 2007 } @book{Gentle:2007, abstract = {Bibliogr. S. [505] - 518}, author = {Gentle, James E.}, interhash = {2156bd85da160d6baf88b187fd1e6230}, intrahash = {fccc8b26fcc1912304600c6410f241e5}, isbn = {978-0-387-70872-0}, opac = {http://opac.bibliothek.uni-kassel.de/DB=1/PPN?PPN=190806516}, publisher = {Springer New York}, title = {Matrix algebra}, url = {http://opac.bibliothek.uni-kassel.de/DB=1/PPN?PPN=190806516}, year = 2007 } @book{Golub1996, author = {Golub, Gene H. and Loan, Charles F. Van}, edition = {3rd}, interhash = {e4e3160da299a446f86da948f6745ecb}, intrahash = {f1bb0caf439b5f67f1d388f1298601d8}, publisher = {The Johns Hopkins University Press}, title = {Matrix Computations}, year = 1996 } @techreport{vogt2004numerik, author = {Vogt, Werner}, interhash = {a61b21a3fab04530a95f9d6feb18b33c}, intrahash = {41b678fc2b9d8b58c703f508c9f66bce}, title = {Numerik großer Gleichungssysteme}, year = 2004 } @book{meyer2000maa, author = {Meyer, C.D.}, interhash = {2284a83d92499fafc3efb4defa41a125}, intrahash = {c6d7ebc83d5cdbcece39293201a68d41}, publisher = {Society for Industrial Mathematics}, title = {{Matrix Analysis and Applied Linear Algebra}}, url = {http://www.matrixanalysis.com/DownloadChapters.html}, year = 2000 } @book{Golub1996, author = {Golub, Gene H. and Loan, Charles F. Van}, edition = {3rd}, interhash = {e4e3160da299a446f86da948f6745ecb}, intrahash = {f1bb0caf439b5f67f1d388f1298601d8}, publisher = {The Johns Hopkins University Press}, title = {Matrix Computations}, year = 1996 } @article{355946, abstract = {
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
REFERENCES
INDEX TERMS
Primary Classification:
Additional Classification:
Collaborative Colleagues:
Peer to Peer - Readers of this Article have also read:
|