@conference {11902, title = {Approximate kernel matrix computation on GPUs forlarge scale learning applications}, booktitle = {Proceedings of the 23rd international conference on Supercomputing}, series = {ICS {\textquoteright}09}, year = {2009}, month = {2009///}, pages = {511 - 512}, publisher = {ACM}, organization = {ACM}, address = {Yorktown Heights, NY, USA}, abstract = {Kernel-based learning methods require quadratic space and time complexities to compute the kernel matrix. These complexities limit the applicability of kernel methods to large scale problems with millions of data points. In this paper, we introduce a novel representation of kernel matrices on Graphics Processing Units (GPU). The novel representation exploits the sparseness of the kernel matrix to address the space complexity problem. It also respects the guidelines for memory access on GPUs, which are critical for good performance, to address the time complexity problem. Our representation utilizes the locality preserving properties of space filling curves to obtain a band approximation of the kernel matrix. To prove the validity of the representation, we use Affinity Propagation, an unsupervised clustering algorithm, as an example of kernel methods. Experimental results show a 40x speedup of AP using our representation without degradation in clustering performance.}, keywords = {affinity propagation, algorithms, arrays, gpu, kernel methods, parallel programming, performance, space filling curves, sparse matrices}, isbn = {978-1-60558-498-0}, doi = {10.1145/1542275.1542355}, author = {Hussein,Mohamed E and Abd-Almageed, Wael} }