@article{noauthororeditor, author = {Wieder, Thomas}, doi = {https://doi.org/10.12988/ams.2019.9798}, interhash = {6bb521dabb76f4cbdafc5a4d28d6b860}, intrahash = {33008e86620b05a2eca14924b040702a}, issn = {1314-7552 (online)}, journal = {Applied Mathematical Sciences}, note = {https: / / doi. org / 10. 12988 / ams. 2019. 9798}, number = 15, pages = {685 - 695}, title = {A simple matrix alteration method}, volume = 13, year = 2019 } @article{thurau2012descriptive, abstract = {Climate change, the global energy footprint, and strategies for sustainable development have become topics of considerable political and public interest. The public debate is informed by an exponentially growing amount of data and there are diverse partisan interest when it comes to interpretation. We therefore believe that data analysis methods are called for that provide results which are intuitively understandable even to non-experts. Moreover, such methods should be efficient so that non-experts users can perform their own analysis at low expense in order to understand the effects of different parameters and influential factors. In this paper, we discuss a new technique for factorizing data matrices that meets both these requirements. The basic idea is to represent a set of data by means of convex combinations of extreme data points. This often accommodates human cognition. In contrast to established factorization methods, the approach presented in this paper can also determine over-complete bases. At the same time, convex combinations allow for highly efficient matrix factorization. Based on techniques adopted from the field of distance geometry, we derive a linear time algorithm to determine suitable basis vectors for factorization. By means of the example of several environmental and developmental data sets we discuss the performance and characteristics of the proposed approach and validate that significant efficiency gains are obtainable without performance decreases compared to existing convexity constrained approaches.}, affiliation = {Fraunhofer Institute for Intelligent Analysis and Information Systems IAIS, Sankt Augustin, Germany}, author = {Thurau, Christian and Kersting, Kristian and Wahabzada, Mirwaes and Bauckhage, Christian}, doi = {10.1007/s10618-011-0216-z}, interhash = {457c57f054fea45dcbc8447263591d97}, intrahash = {387f4e1711d7065bd5a94455aeae1957}, issn = {1384-5810}, journal = {Data Mining and Knowledge Discovery}, keyword = {Computer Science}, number = 2, pages = {325-354}, publisher = {Springer Netherlands}, title = {Descriptive matrix factorization for sustainability Adopting the principle of opposites}, url = {http://dx.doi.org/10.1007/s10618-011-0216-z}, volume = 24, year = 2012 } @article{koren2009matrix, abstract = {As the Netflix Prize competition has demonstrated, matrix factorization models are superior to classic nearest neighbor techniques for producing product recommendations, allowing the incorporation of additional information such as implicit feedback, temporal effects, and confidence levels.}, author = {Koren, Y. and Bell, R. and Volinsky, C.}, doi = {10.1109/MC.2009.263}, interhash = {cface72aeba6ee8c561ccd15035d0ead}, intrahash = {59ab9b2678949949c04b0fe2a431585a}, issn = {0018-9162}, journal = {Computer}, month = aug, number = 8, pages = {30--37}, title = {Matrix Factorization Techniques for Recommender Systems}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5197422&tag=1}, volume = 42, year = 2009 } @inproceedings{Rudolph:2010:CMM:1858681.1858774, abstract = {We propose CMSMs, a novel type of generic compositional models for syntactic and semantic aspects of natural language, based on matrix multiplication. We argue for the structural and cognitive plausibility of this model and show that it is able to cover and combine various common compositional NLP approaches ranging from statistical word space models to symbolic grammar formalisms.}, acmid = {1858774}, address = {Stroudsburg, PA, USA}, author = {Rudolph, Sebastian and Giesbrecht, Eugenie}, booktitle = {Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics}, interhash = {6594500d38a361829aeb3ef7889a1709}, intrahash = {05ec57c39e9b945deb674c3b616eac8f}, location = {Uppsala, Sweden}, numpages = {10}, pages = {907--916}, publisher = {Association for Computational Linguistics}, series = {ACL '10}, title = {Compositional matrix-space models of language}, url = {http://dl.acm.org/citation.cfm?id=1858681.1858774}, year = 2010 } @inproceedings{lau2010c, author = {Lau, Sian Lun and König, Immanuel and David, Klaus and Parandian, Baback and Carius-Düssel, Christine and Schultz, Martin}, booktitle = {The Seventh International Symposium on Wireless Communication Systems (ISWCS'10)}, interhash = {661643ee5ec02911c136b17456f84b09}, intrahash = {21f5529596c5c9672e7614e94202766f}, month = {September}, title = {Supporting Patient Monitoring using Activity Recognition with a Smartphone}, year = 2010 } @inproceedings{lau2010a, address = {Florence, Italy}, author = {Lau, Sian Lun and David, Klaus}, booktitle = {Future Network and Mobile Summit 2010}, interhash = {0f743dc2c0e74deb93a57ab62570bc5a}, intrahash = {b8f7902f68f6b5bc682c24916f9d21ab}, month = {June 16-18}, pages = {1-9}, title = {Movement recognition using the accelerometer in smartphones}, year = 2010 } @inproceedings{Parandian2010, address = {Berlin, Germany}, author = {Parandian, Baback and Dewitz, Karl and Schultz, Martin and Carius-D\"ussel, Christine and Lau, Sian Lun and K\"onig, Immanuel and David, Klaus and Maaser, Michael and Ortmann, Steffen}, booktitle = {1. Nationaler Fachkongress Telemedizin}, interhash = {db7d9829902ded38f0ddb5a95cf7274c}, intrahash = {812059a1edd2254a8f786f7098ca25ec}, month = {November 3-5}, organization = {Deutsche Gesellschaft f\"ur Telemedizin}, title = {MATRIX-Middleware f\"ur die Realisierung Internet-basierter telemedizinischer Dienste}, year = 2010 } @inproceedings{voigtmann2011a, address = {Seattle, USA.}, author = {Voigtmann, Christian and Lau, Sian Lun and David, Klaus}, booktitle = {2011 IEEE International Conference on Pervasive Computing and Communications Workshops (PERCOM Workshops)}, interhash = {458f9e64c984aa4a88355169febfa08e}, intrahash = {b2cf800a5485bec8b65164924f5e1b56}, month = {March 21-25}, pages = {403-408}, publisher = {IEEE}, title = {An Approach to Collaborative Context Prediction}, year = 2011 } @incollection{lau2011_aac, author = {Lau, Sian Lun and David, Klaus}, booktitle = {Assistive and Augmentive Communication for the Disabled: Intelligent Technologies for Communication, Learning and Teaching}, chapter = 6, editor = {Theng, Lau Bee}, interhash = {5201fe2cfa5396fa8fb855011a1bed08}, intrahash = {711bc44e01889f0b456431772581a9af}, month = may, pages = {159-192}, publisher = {IGI Global}, title = {Enabling Context Aware Services in the Area of AAC}, url = {http://www.igi-global.com/bookstore/titledetails.aspx?titleid=47433&detailstype=description}, year = 2011 } @article{miettinen2008discrete, abstract = {Matrix decomposition methods represent a data matrix as a product of two factor matrices: one containing basis vectors that represent meaningful concepts in the data and another describing how the observed data can be expressed as combinations of the basis vectors. Decomposition methods have been studied extensively, but many methods return real-valued matrices. Interpreting real-valued factor matrices is hard if the original data is Boolean. In this paper, we describe a matrix decomposition formulation for vBoolean data, the Discrete Basis Problem. The problem seeks for a Boolean decomposition of a binary matrix, thus allowing the user to easily interpret the basis vectors. We also describe a variation of the problem, the Discrete Basis Partitioning Problem. We show that both problems are NP-hard. For the Discrete Basis Problem, we give a simple greedy algorithm for solving it; for the Discrete Basis Partitioning Problem, we show how it can be solved using existing methods. We present experimental results for the greedy algorithm and compare it against other well-known methods. Our algorithm gives intuitive basis vectors, but its reconstruction error is usually larger than with the real-valued methods. We discuss the reasons for this behavior. }, author = {Miettinen, Pauli and Mielikäinen, Taneli and Gionis, Aristides and Das, Gautam and Mannila, Heikki}, doi = {10.1109/TKDE.2008.53}, interhash = {1799269370f8bbb6860151b13145ad7f}, intrahash = {b9e0638656d2fcd0c2965aff0ee0112e}, journal = {IEEE Transactions on Knowledge and Data Engineering}, month = oct, number = 10, pages = {1348--1362}, publisher = {IEEE}, title = {The Discrete Basis Problem}, url = {http://dx.doi.org/10.1109/TKDE.2008.53}, volume = 20, year = 2008 } @inproceedings{tatti2006dimension, abstract = {Many 0/1 datasets have a very large number of variables; however, they are sparse and the dependency structure of the variables is simpler than the number of variables would suggest. Defining the effective dimensionality of such a dataset is a nontrivial problem. We consider the problem of defining a robust measure of dimension for 0/1 datasets, and show that the basic idea of fractal dimension can be adapted for binary data. However, as such the fractal dimension is difficult to interpret. Hence we introduce the concept of normalized fractal dimension. For a dataset D, its normalized fractal dimension counts the number of independent columns needed to achieve the unnormalized fractal dimension of D. The normalized fractal dimension measures the degree of dependency structure of the data. We study the properties of the normalized fractal dimension and discuss its computation. We give empirical results on the normalized fractal dimension, comparing it against PCA.}, author = {Tatti, N. and Mielikainen, T. and Gionis, A. and Mannila, H.}, booktitle = {Proceedings of the Sixth IEEE International Conference on Data Mining (ICDM 2006)}, doi = {10.1109/ICDM.2006.167}, interhash = {5164cd6a09b802d14dce6d3947df60cd}, intrahash = {0a8ad03bc7d2d0d7d77ee73eede4ecc0}, issn = {1550-4786}, month = dec, organization = {IEEE}, pages = {603--612}, title = {What is the Dimension of Your Binary Data?}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4053086}, year = 2006 } @inproceedings{kersting2010convex, abstract = {We present an extension of convex-hull nonnegative matrix factorization (CH-NMF) which was recently proposed as a large scale variant of convex non-negative matrix factorization (CNMF) or Archetypal Analysis (AA). CH-NMF factorizes a non-negative data matrix V into two non-negative matrix factors V = WH such that the columns of W are convex combinations of certain data points so that they are readily interpretable to data analysts. There is, however, no free lunch: imposing convexity constraints on W typically prevents adaptation to intrinsic, low dimensional structures in the data. Alas, in cases where the data is distributed in a nonconvex manner or consists of mixtures of lower dimensional convex distributions, the cluster representatives obtained from CH-NMF will be less meaningful. In this paper, we present a hierarchical CH-NMF that automatically adapts to internal structures of a data set, hence it yields meaningful and interpretable clusters for non-convex data sets. This is also conformed by our extensive evaluation on DBLP publication records of 760,000 authors, 4,000,000 images harvested from the web, and 150,000,000 votes on World of Warcraft guilds.}, address = {Kassel, Germany}, author = {Kersting, Kristian and Wahabzada, Mirwaes and Thurau, Christian and Bauckhage., Christian}, booktitle = {Proceedings of LWA2010 - Workshop-Woche: Lernen, Wissen {\&} Adaptivitaet}, crossref = {lwa2010}, editor = {Atzmüller, Martin and Benz, Dominik and Hotho, Andreas and Stumme, Gerd}, end = {2010-10-06 09:45:00}, interhash = {9513ef5606b53314806fa4ad6507e819}, intrahash = {5a46921f58af4fc313d8b60f38859a57}, room = {0446}, session = {kdml3}, start = {2010-10-06 09:22:30}, title = {Convex NMF on Non-Convex Massiv Data}, track = {kdml}, url = {http://www.kde.cs.uni-kassel.de/conf/lwa10/papers/kdml5.pdf}, year = 2010 } @book{Gentle:2007, abstract = {Bibliogr. S. [505] - 518}, author = {Gentle, James E.}, interhash = {2156bd85da160d6baf88b187fd1e6230}, intrahash = {fccc8b26fcc1912304600c6410f241e5}, isbn = {978-0-387-70872-0}, opac = {http://opac.bibliothek.uni-kassel.de/DB=1/PPN?PPN=190806516}, publisher = {Springer New York}, title = {Matrix algebra}, url = {http://opac.bibliothek.uni-kassel.de/DB=1/PPN?PPN=190806516}, year = 2007 } @book{Gentle:2007, abstract = {Bibliogr. S. [505] - 518}, author = {Gentle, James E.}, interhash = {2156bd85da160d6baf88b187fd1e6230}, intrahash = {fccc8b26fcc1912304600c6410f241e5}, isbn = {978-0-387-70872-0}, opac = {http://opac.bibliothek.uni-kassel.de/DB=1/PPN?PPN=190806516}, publisher = {Springer New York}, title = {Matrix algebra}, url = {http://opac.bibliothek.uni-kassel.de/DB=1/PPN?PPN=190806516}, year = 2007 } @book{Golub1996, author = {Golub, Gene H. and Loan, Charles F. Van}, edition = {3rd}, interhash = {e4e3160da299a446f86da948f6745ecb}, intrahash = {f1bb0caf439b5f67f1d388f1298601d8}, publisher = {The Johns Hopkins University Press}, title = {Matrix Computations}, year = 1996 } @techreport{vogt2004numerik, author = {Vogt, Werner}, interhash = {a61b21a3fab04530a95f9d6feb18b33c}, intrahash = {41b678fc2b9d8b58c703f508c9f66bce}, title = {Numerik großer Gleichungssysteme}, year = 2004 } @book{meyer2000maa, author = {Meyer, C.D.}, interhash = {2284a83d92499fafc3efb4defa41a125}, intrahash = {c6d7ebc83d5cdbcece39293201a68d41}, publisher = {Society for Industrial Mathematics}, title = {{Matrix Analysis and Applied Linear Algebra}}, url = {http://www.matrixanalysis.com/DownloadChapters.html}, year = 2000 } @book{Golub1996, author = {Golub, Gene H. and Loan, Charles F. Van}, edition = {3rd}, interhash = {e4e3160da299a446f86da948f6745ecb}, intrahash = {f1bb0caf439b5f67f1d388f1298601d8}, publisher = {The Johns Hopkins University Press}, title = {Matrix Computations}, year = 1996 } @article{355946, abstract = { A Block Lanczos Method for Computing the Singular Values and Corresponding Singular Vectors of a Matrix
ACM Home Page
Please provide us with feedback. Feedback
A Block Lanczos Method for Computing the Singular Values and Corresponding Singular Vectors of a Matrix
Full text PdfPdf (977 KB)
Source ACM Transactions on Mathematical Software (TOMS) archive
Volume 7 ,  Issue 2  (June 1981) table of contents
Pages: 149 - 169  
Year of Publication: 1981
ISSN:0098-3500
Authors
Gene H. Golub  Department of Computer Science, Cornell University, Ithaca, NY
Franklin T. Luk  Department of Computer Science, Cornell University, Ithaca, NY
Michael L. Overton  Courant Institute of Mathematical Sciences, New York University, New York
Publisher
ACM  New York, NY, USA
Bibliometrics
Downloads (6 Weeks): 11,   Downloads (12 Months): 80,   Citation Count: 2
Additional Information:

references   cited by   index terms   collaborative colleagues   peer to peer  

Tools and Actions: Request Permissions Request Permissions    Review this Article  
DOI Bookmark: Use this link to bookmark this Article: http://doi.acm.org/10.1145/355945.355946
What is a DOI?

REFERENCES
 
1
CULLUM, J. The simultaneous computation of a few algebraically largest and smallest elgenvalues of a large, sparse, symmetric matrix. Rep. RC 6827, IBM Thomas J. Watson Research Center, Yorktown Heights, NY, 1977.
 
2
CULLUM, J., AND DONATH, W.E. A block Lanczos algorithm for computing the q algebraically largest eigenvalues and a corresponding eigenspace of large, sparse, real symmetric matmces. In Proc. 1974 IEEE Conf on Decas~on and Control, Phoenix, Ariz., 1974, pp. 505-509.
 
3
CULLUM, J., AND WILLOUGHBY, R.A. Computing singular values and corresponding singular vectors of large matrices by Lanczos tridiagonalization. Rep. RC 8200, IBM Thomas J. Watson Research Center, Yorktown Heights, NY, 1980.
 
4
GENTLEMAN, W.M. Least squares computations by Givens transformations w~thout square roots. JIMA 12 (1973), 329-336.
 
5
GOLUB, G H, AND KAHAN, W Calculating the singular values and pseudo-inverse of a matrix. SIAM J. Numer Anal. 2 (1965), 205-224.
 
6
GOLUB, G.H., AND LUK, F T. Singular value decomposition: Apphcations and computations. ARO Rep. 77-1, in Trans. 22nd Conf. of Army Mathematw~ans, 1977, pp. 577-605.
 
7
 
8
GOLUB, G.H., AND REINSCH, C Singular value decomposition and least squares solutions. Numer. Math. 14 (1970), 403-420.
 
9
GOLUB, G H., AND UNDERWOOD, R. The block Lanczos method for computing elgenvalues. In Mathematical Software III, J.R. Rice (Ed.), Academic Press, New York, 1977, pp. 361-377.
 
10
LANCZOS, C L~near D~fferential Operators Van Nostrand, London, 1961.
 
11
 
12
ORTEGA, J.M. Numerical Analys~s: A Second Course Academic Press, New York, 1972.
 
13
PAIGE, C.C Bidiagonahzation of matrices and solution of hnear equations. SIAM J. Numer. Anal. 11 (1974), 197-209.
 
14
 
15
RUHE, A. Implementation aspects of band Lanczos algorithms for computation of elgenvalues of large sparse symmetric matrices Rep., Dep of Mathematics, Umv. California, San Diego, Feb. 1978.
 
16
RUTISHAUSER, H. On Jacobi rotation patterns. In Proc. Syrup. Applled Math, vol. 15, 1963, pp. 219-239.
 
17
STEWART, G W. Error and perturbation bounds for subspaces associated with certain elgenvalue problems. SIAM Rev 15 (1973), 727-764
 
18
 
19
 
20


Collaborative Colleagues:
Gene H. Golub: colleagues
Franklin T. Luk: colleagues
Michael L. Overton: colleagues

Peer to Peer - Readers of this Article have also read:

}, address = {New York, NY, USA}, author = {Golub, Gene H. and Luk, Franklin T. and Overton, Michael L.}, doi = {http://doi.acm.org/10.1145/355945.355946}, interhash = {b7d43137dfaa6351fc838c5c7a459a9f}, intrahash = {b6f719ab6d3027ab9967e80edd140959}, issn = {0098-3500}, journal = {ACM Trans. Math. Softw.}, number = 2, pages = {149--169}, publisher = {ACM}, title = {A Block Lanczos Method for Computing the Singular Values and Corresponding Singular Vectors of a Matrix}, url = {http://portal.acm.org/citation.cfm?id=355945.355946}, volume = 7, year = 1981 } @article{mikel2005storage, abstract = {Many storage formats (or data structures) have been proposed to represent sparse matrices. This paper presents a performance evaluation in Java comparing eight of the most popular formats plus one recently proposed specifically for Java (by Gundersen and Steihaug [6] – Java Sparse Array) using the matrix-vector multiplication operation. ER -}, author = {Luján, Mikel and Usman, Anila and Hardie, Patrick and Freeman, T.L. and Gurd, John}, interhash = {b8bf03d4eba50d021e57b044cf7bb743}, intrahash = {ba051dc3799456ac8b6ae74b75f7e54b}, journal = {Computational Science – ICCS 2005}, pages = {364--371}, title = {Storage Formats for Sparse Matrices in Java}, url = {http://dx.doi.org/10.1007/11428831_45}, year = 2005 }