@article {13066, title = {Face Identification Using Large Feature Sets}, journal = {Image Processing, IEEE Transactions on}, volume = {21}, year = {2012}, month = {2012/04//}, pages = {2245 - 2255}, abstract = {With the goal of matching unknown faces against a gallery of known people, the face identification task has been studied for several decades. There are very accurate techniques to perform face identification in controlled environments, particularly when large numbers of samples are available for each face. However, face identification under uncontrolled environments or with a lack of training data is still an unsolved problem. We employ a large and rich set of feature descriptors (with more than 70 000 descriptors) for face identification using partial least squares to perform multichannel feature weighting. Then, we extend the method to a tree-based discriminative structure to reduce the time required to evaluate probe samples. The method is evaluated on Facial Recognition Technology (FERET) and Face Recognition Grand Challenge (FRGC) data sets. Experiments show that our identification method outperforms current state-of-the-art results, particularly for identifying faces acquired across varying conditions.}, keywords = {(mathematics);, approximations;trees, challenge, data, data;tree-based, discriminative, environments;face, feature, FERET;FRGC, grand, identification, least, recognition, recognition;least, sets;face, sets;facial, squares, squares;training, structure;uncontrolled, task;face, technology;multichannel, weighting;partial}, isbn = {1057-7149}, doi = {10.1109/TIP.2011.2176951}, author = {Schwartz, W.R. and Guo,Huimin and Choi,Jonghyun and Davis, Larry S.} } @conference {14926, title = {A Fast Algorithm for Constructing Inverted Files on Heterogeneous Platforms}, booktitle = {Parallel Distributed Processing Symposium (IPDPS), 2011 IEEE International}, year = {2011}, month = {2011/05//}, pages = {1124 - 1134}, abstract = {Given a collection of documents residing on a disk, we develop a new strategy for processing these documents and building the inverted files extremely fast. Our approach is tailored for a heterogeneous platform consisting of a multicore CPU and a highly multithreaded GPU. Our algorithm is based on a number of novel techniques including: (i) a high-throughput pipelined strategy that produces parallel parsed streams that are consumed at the same rate by parallel indexers, (ii) a hybrid trie and B-tree dictionary data structure in which the trie is represented by a table for fast look-up and each B-tree node contains string caches, (iii) allocation of parsed streams with frequent terms to CPU threads and the rest to GPU threads so as to match the throughput of parsed streams, and (iv) optimized CUDA indexer implementation that ensures coalesced memory accesses and effective use of shared memory. We have performed extensive tests of our algorithm on a single node (two Intel Xeon X5560 Quad-core) with two NVIDIA Tesla C1060 attached to it, and were able to achieve a throughput of more than 262 MB/s on the ClueWeb09 dataset. Similar results were obtained for widely different datasets. The throughput of our algorithm is superior to the best known algorithms reported in the literature even when compared to those run on large clusters.}, keywords = {architecture;graphics, B-tree, C1060;central, construction;multicore, CPU;multithreaded, data, device, dictionary, equipment;coprocessors;data, files, GPU;computer, graphic, indexer;Intel, pipelined, platform;high-throughput, PROCESSING, Quad-core;NVIDIA, strategy;hybrid, structure;CUDA, structure;inverted, structures;multiprocessing, systems;, Tesla, trie, unified, unit;computer, unit;heterogeneous, X5560, Xeon}, doi = {10.1109/IPDPS.2011.107}, author = {Wei, Zheng and JaJa, Joseph F.} } @conference {13940, title = {Scalable fast multipole methods on distributed heterogeneous architectures}, booktitle = {High Performance Computing, Networking, Storage and Analysis (SC), 2011 International Conference for}, year = {2011}, month = {2011/11//}, pages = {1 - 12}, abstract = {We fundamentally reconsider implementation of the Fast Multipole Method (FMM) on a computing node with a heterogeneous CPU-GPU architecture with multicore CPU(s) and one or more GPU accelerators, as well as on an interconnected cluster of such nodes. The FMM is a divide- and-conquer algorithm that performs a fast N-body sum using a spatial decomposition and is often used in a time- stepping or iterative loop. Using the observation that the local summation and the analysis-based translation parts of the FMM are independent, we map these respectively to the GPUs and CPUs. Careful analysis of the FMM is performed to distribute work optimally between the multicore CPUs and the GPU accelerators. We first develop a single node version where the CPU part is parallelized using OpenMP and the GPU version via CUDA. New parallel algorithms for creating FMM data structures are presented together with load balancing strategies for the single node and distributed multiple-node versions. Our implementation can perform the N-body sum for 128M particles on 16 nodes in 4.23 seconds, a performance not achieved by others in the literature on such clusters.}, keywords = {accelerators;OpenMP;analysis, algorithm;iterative, and, architecture;CUDA;FMM, architectures;, architectures;divide-and-conquer, based, conquer, CPU-GPU, CPU;scalable, data, fast, heterogeneous, loop;data, loop;multicore, methods;graphics, methods;multiprocessing, methods;time, multipole, parts;distributed, PROCESSING, stepping, structures;divide, structures;GPU, systems;parallel, translation, units;iterative}, author = {Hu,Qi and Gumerov, Nail A. and Duraiswami, Ramani} } @article {17909, title = {Social Snapshot: A System for Temporally Coupled Social Photography}, journal = {Computer Graphics and Applications, IEEE}, volume = {31}, year = {2011}, month = {2011/02//jan}, pages = {74 - 84}, abstract = {Social Snapshot actively acquires and reconstructs temporally dynamic data. The system enables spatiotemporal 3D photography using commodity devices, assisted by their auxiliary sensors and network functionality. It engages users, making them active rather than passive participants in data acquisition.}, keywords = {3D, acquisition;data, acquisition;photography;social, computing;, coupled, data, photography;data, photography;temporally, reconstruction;social, sciences, snapshot;spatiotemporal, social}, isbn = {0272-1716}, doi = {10.1109/MCG.2010.107}, author = {Patro,R. and Ip, Cheuk Yiu and Bista,S. and Varshney, Amitabh} } @conference {14850, title = {Trainable 3D recognition using stereo matching}, booktitle = {Computer Vision Workshops (ICCV Workshops), 2011 IEEE International Conference on}, year = {2011}, month = {2011///}, pages = {625 - 631}, abstract = {Stereo matching has been used for face recognition in the presence of pose variation. In this approach, stereo matching is used to compare two 2-D images based on correspondences that reflect the effects of viewpoint variation and allow for occlusion. We show how to use stereo matching to derive image descriptors that can be used to train a classifier. This improves face recognition performance, producing the best published results on the CMU PIE dataset. We also demonstrate that classification based on stereo matching can be used for general object classification in the presence of pose variation. In preliminary experiments we show promising results on the 3D object class dataset, a standard, challenging 3D classification data set.}, keywords = {2D, 3D, class, classification, classification;image, data, dataset;CMU, dataset;face, descriptor;occlusion;pose, estimation;solid, image, image;3D, matching;pose, matching;trainable, modelling;stereo, object, PIE, processing;, recognition;face, recognition;image, set;3D, variation;stereo}, doi = {10.1109/ICCVW.2011.6130301}, author = {Castillo,C. D and Jacobs, David W.} } @conference {12471, title = {Automatic target recognition based on simultaneous sparse representation}, booktitle = {Image Processing (ICIP), 2010 17th IEEE International Conference on}, year = {2010}, month = {2010/09//}, pages = {1377 - 1380}, abstract = {In this paper, an automatic target recognition algorithm is presented based on a framework for learning dictionaries for simultaneous sparse signal representation and feature extraction. The dictionary learning algorithm is based on class supervised simultaneous orthogonal matching pursuit while a matching pursuit-based similarity measure is used for classification. We show how the proposed framework can be helpful for efficient utilization of data, with the possibility of developing real-time, robust target classification. We verify the efficacy of the proposed algorithm using confusion matrices on the well known Comanche forward-looking infrared data set consisting of ten different military targets at different orientations.}, keywords = {(artificial, algorithm;feature, based, classification;iterative, classification;learning, Comanche, data, dictionary;matching, extraction;image, forward-looking, infrared, intelligence);military, learning, MATCHING, matrix;dictionary, measure;military, methods;learning, orthogonal, pursuit, pursuit;confusion, recognition;class, recognition;target, representation;feature, representation;sparse, set;automatic, signal, similarity, simultaneous, sparse, supervised, systems;object, target, target;simultaneous, tracking;}, doi = {10.1109/ICIP.2010.5652306}, author = {Patel, Vishal M. and Nasrabadi,N.M. and Chellapa, Rama} } @conference {15283, title = {Scaling Populations of a Genetic Algorithm for Job Shop Scheduling Problems Using MapReduce}, booktitle = {Cloud Computing Technology and Science (CloudCom), 2010 IEEE Second International Conference on}, year = {2010}, month = {2010///}, pages = {780 - 785}, abstract = {Inspired by Darwinian evolution, a genetic algorithm (GA) approach is one popular heuristic method for solving hard problems such as the Job Shop Scheduling Problem (JSSP), which is one of the hardest problems lacking efficient exact solutions today. It is intuitive that the population size of a GA may greatly affect the quality of the solution, but it is unclear what are the effects of having population sizes that are significantly greater than typical experiments. The emergence of MapReduce, a framework running on a cluster of computers that aims to provide large-scale data processing, offers great opportunities to investigate this issue. In this paper, a GA is implemented to scale the population using MapReduce. Experiments are conducted on a large cluster, and population sizes up to 107 are inspected. It is shown that larger population sizes not only tend to yield better solutions, but also require fewer generations. Therefore, it is clear that when dealing with a hard problem such as JSSP, an existing GA can be improved by massively scaling up populations with MapReduce, so that the solution can be parallelized and completed in reasonable time.}, keywords = {algorithm;job, algorithms;job, computing;genetic, data, large-scale, MapReduce;cloud, problems;parallel, processing;, processing;cloud, scheduling, scheduling;parallel, shop}, doi = {10.1109/CloudCom.2010.18}, author = {Di-Wei Huang and Jimmy Lin} } @conference {16047, title = {VAST 2010 Challenge: Arms dealings and pandemics}, booktitle = {Visual Analytics Science and Technology (VAST), 2010 IEEE Symposium on}, year = {2010}, month = {2010/10//}, pages = {263 - 264}, abstract = {The 5th VAST Challenge consisted of three mini-challenges that involved both intelligence analysis and bioinformatics. Teams could solve one, two or all three mini-challenges and assess the overall situation to enter the Grand Challenge. Mini-challenge one involved text reports about people and events giving information about arms dealers, situations in various countries and linkages between different countries. Mini-challenge two involved hospital admission and death records from various countries providing information about the spread of a world wide pandemic. Mini-challenge three involved genetic data to be used to identify the origin of the pandemic and the most dangerous viral mutations. The Grand Challenge was to determine how these various mini-challenges were connected. As always the goal was to analyze the data and provide novel interactive visualizations useful in the analytic process. We received 58 submissions in total and gave 15 awards.}, keywords = {2010, administrative, admission;intelligence, analysis;interactive, analysis;medical, challenge;arms, challenge;hospital, data, data;grand, dealings;bioinformatics;dangerous, mutations;death, processing;weapons;, records;genetic, reports;bioinformatics;data, VAST, viral, visualizations;minichallenge;pandemics;text}, doi = {10.1109/VAST.2010.5649054}, author = {Grinstein,G. and Konecni,S. and Scholtz,J. and Whiting,M. and Plaisant, Catherine} } @conference {16829, title = {Distance Oracles for Spatial Networks}, booktitle = {Data Engineering, 2009. ICDE {\textquoteright}09. IEEE 25th International Conference on}, year = {2009}, month = {2009/04/29/2}, pages = {652 - 663}, abstract = {The popularity of location-based services and the need to do real-time processing on them has led to an interest in performing queries on transportation networks, such as finding shortest paths and finding nearest neighbors. The challenge is that these operations involve the computation of distance along a spatial network rather than "as the crow flies." In many applications an estimate of the distance is sufficient, which can be achieved by use of an oracle. An approximate distance oracle is proposed for spatial networks that exploits the coherence between the spatial position of vertices and the network distance between them. Using this observation, a distance oracle is introduced that is able to obtain the epsiv-approximate network distance between two vertices of the spatial network. The network distance between every pair of vertices in the spatial network is efficiently represented by adapting the well-separated pair technique to spatial networks. Initially, use is made of an epsilon-approximate distance oracle of size O(n/epsivd) that is capable of retrieving the approximate network distance in O(log n) time using a B-tree. The retrieval time can be theoretically reduced further to O(1) time by proposing another epsiv-approximate distance oracle of size O(n log n/epsivd) that uses a hash table. Experimental results indicate that the proposed technique is scalable and can be applied to sufficiently large road networks. A 10\%-approximate oracle (epsiv = 0.1) on a large network yielded an average error of 0.9\% with 90\% of the answers making an error of 2\% or less and an average retrieval timeof 68 mu seconds. Finally, a strategy for the integration of the distance oracle into any relational database system as well as using it to perform a variety of spatial queries such as region search, k-nearest neighbor search, and spatial joins on spatial networks is discussed.}, keywords = {B-tree;distance, data, database, databases;tree, distance;hash, languages;query, neighbor, network, network;transportation, networks;spatial, networks;well-separated, oracles;epsiv-approximate, pair, processing;region, processing;relational, search;location-based, search;relational, services;real-time, structures;, system;road, table;k-nearest, technique;programming}, doi = {10.1109/ICDE.2009.53}, author = {Sankaranarayanan,J. and Samet, Hanan} } @conference {13984, title = {Efficient Kriging via Fast Matrix-Vector Products}, booktitle = {Aerospace Conference, 2008 IEEE}, year = {2008}, month = {2008/03//}, pages = {1 - 7}, abstract = {Interpolating scattered data points is a problem of wide ranging interest. Ordinary kriging is an optimal scattered data estimator, widely used in geosciences and remote sensing. A generalized version of this technique, called cokriging, can be used for image fusion of remotely sensed data. However, it is computationally very expensive for large data sets. We demonstrate the time efficiency and accuracy of approximating ordinary kriging through the use of fast matrix-vector products combined with iterative methods. We used methods based on the fast Multipole methods and nearest neighbor searching techniques for implementations of the fast matrix-vector products.}, keywords = {cokriging, data, data;scattered, efficiency;geophysical, estimator;remotely, fusion;, fusion;interpolation;iterative, matrix-vector, methods;image, methods;nearest, methods;remote, multipole, neighbor, points;time, products;fast, scattered, searching;optimal, sensed, sensing;sensor, technique;fast, techniques;iterative}, doi = {10.1109/AERO.2008.4526433}, author = {Memarsadeghi,N. and Raykar,V.C. and Duraiswami, Ramani and Mount, Dave} } @conference {18212, title = {Extended abstract: A hardware-assisted data hiding based approach in building high performance secure execution systems}, booktitle = {Hardware-Oriented Security and Trust, 2008. HOST 2008. IEEE International Workshop on}, year = {2008}, month = {2008/06//}, pages = {93 - 96}, abstract = {Recently, a novel data hiding technique was proposed to embed information into compiled binary codes in order to enhance system performance. Using this technique as a vehicle, we propose a framework to build high-performance secure execution systems by embedding data for security and trust purposes. We illustrate this approach in a mobile computing environment by an instruction-block level data hiding technique, which can be used to defend against malicious attacks (such as Trojan injection). This improves the trustworthiness of mobile codes. It also protects the code providerpsilas intellectual property because the code can be executed only on the designated device. When combined with the existing approach in [4], the proposed data hiding framework can provide trust and high-performance simultaneously. Finally, we conduct a proof-of-the-concept FPGA prototyping to validate the data hiding technique and evaluate the hardware cost in terms of gate count, power consumption, and gate delay.}, keywords = {arrays;invasive, attacks;mobile, codes;data, codes;mobile, computing;, computing;power, consumption;proof-of-the-concept, count;gate, data, delay;hardware-assisted, embedding;gate, embedding;instruction-block, encapsulation;distributed, execution, FPGA, gate, hiding;information, injection;binary, level;malicious, programmable, programming;field, prototyping;secure, software;mobile, systems;binary, Trojan}, doi = {10.1109/HST.2008.4559062}, author = {Taylor,M. and Yin,Chi-En and M. Wu and Gang Qu} } @conference {16836, title = {High-Dimensional Similarity Retrieval Using Dimensional Choice}, booktitle = {Similarity Search and Applications, 2008. SISAP 2008. First International Workshop on}, year = {2008}, month = {2008/04//}, pages = {35 - 42}, abstract = {There are several pieces of information that can be utilized in order to improve the efficiency of similarity searches on high-dimensional data. The most commonly used information is the distribution of the data itself but the use of dimensional choice based on the information in the query as well as the parameters of the distribution can provide an effective improvement in the query processing speed and storage. The use of this method can produce dimension reduction by as much as a factor of n, the number of data points in the database, over sequential search. We demonstrate that the curse of dimensionality is not based on the dimension of the data itself, but primarily upon the effective dimension of the distance function. We also introduce a new distance function that utilizes fewer dimensions of the higher dimensional space to produce a maximal lower bound distance in order to approximate the full distance function. This work has demonstrated significant dimension reduction, up to 70\% reduction with an improvement in accuracy or over 99\% with only a 6\% loss in accuracy on a prostate cancer data set.}, keywords = {data, database, function;high-dimensional, management, processing;, processing;sequential, reduction;database, reduction;distance, retrieval;query, search;data, search;similarity, similarity, system;dimension, systems;query}, doi = {10.1109/SISAP.2008.20}, author = {Tahmoush,D. and Samet, Hanan} } @article {12545, title = {Object Detection, Tracking and Recognition for Multiple Smart Cameras}, journal = {Proceedings of the IEEE}, volume = {96}, year = {2008}, month = {2008/10//}, pages = {1606 - 1624}, abstract = {Video cameras are among the most commonly used sensors in a large number of applications, ranging from surveillance to smart rooms for videoconferencing. There is a need to develop algorithms for tasks such as detection, tracking, and recognition of objects, specifically using distributed networks of cameras. The projective nature of imaging sensors provides ample challenges for data association across cameras. We first discuss the nature of these challenges in the context of visual sensor networks. Then, we show how real-world constraints can be favorably exploited in order to tackle these challenges. Examples of real-world constraints are (a) the presence of a world plane, (b) the presence of a three-dimiensional scene model, (c) consistency of motion across cameras, and (d) color and texture properties. In this regard, the main focus of this paper is towards highlighting the efficient use of the geometric constraints induced by the imaging devices to derive distributed algorithms for target detection, tracking, and recognition. Our discussions are supported by several examples drawn from real applications. Lastly, we also describe several potential research problems that remain to be addressed.}, keywords = {algorithm;geometric, analysis;image, association;distributed, camera;visual, cameras;, cameras;object, colour, constraints;imaging, data, detection;object, detection;sensor, detection;three-dimiensional, fusion;target, model;video, network;distributed, recognition;object, scene, sensor, sensor;multiple, sensors;geometry;image, sensors;object, smart, texture;intelligent, tracking;target, tracking;video}, isbn = {0018-9219}, doi = {10.1109/JPROC.2008.928758}, author = {Sankaranarayanan,A. C and Veeraraghavan,A. and Chellapa, Rama} } @conference {16064, title = {VAST 2008 Challenge: Introducing mini-challenges}, booktitle = {Visual Analytics Science and Technology, 2008. VAST {\textquoteright}08. IEEE Symposium on}, year = {2008}, month = {2008/10//}, pages = {195 - 196}, abstract = {Visual analytics experts realize that one effective way to push the field forward and to develop metrics for measuring the performance of various visual analytics components is to hold an annual competition. The VAST 2008 Challenge is the third year that such a competition was held in conjunction with the IEEE Visual Analytics Science and Technology (VAST) symposium. The authors restructured the contest format used in 2006 and 2007 to reduce the barriers to participation and offered four mini-challenges and a Grand Challenge. Mini Challenge participants were to use visual analytic tools to explore one of four heterogeneous data collections to analyze specific activities of a fictitious, controversial movement. Questions asked in the Grand Challenge required the participants to synthesize data from all four data sets. In this paper we give a brief overview of the data sets, the tasks, the participation, the judging, and the results.}, keywords = {2008, analytics;data, Challenge;heterogeneous, collections;visual, data, VAST, visualisation;}, doi = {10.1109/VAST.2008.4677383}, author = {Grinstein,G. and Plaisant, Catherine and Laskowski,S. and O{\textquoteright}Connell,T. and Scholtz,J. and Whiting,M.} } @conference {18150, title = {A Component Estimation Framework for Information Forensics}, booktitle = {Multimedia Signal Processing, 2007. MMSP 2007. IEEE 9th Workshop on}, year = {2007}, month = {2007/10//}, pages = {397 - 400}, abstract = {With a rapid growth of imaging technologies and an increasingly widespread usage of digital images and videos for a large number of high security and forensic applications, there is a strong need for techniques to verify the source and integrity of digital data. Component forensics is new approach for forensic analysis that aims to estimate the algorithms and parameters in each component of the digital device. In this paper, we develop a novel theoretical foundation to understand the fundamental performance limits of component forensics. We define formal notions of identifiability of components in the information processing chain, and present methods to quantify the accuracies at which the component parameters can be estimated. Building upon the proposed theoretical framework, we devise methods to improve the accuracies of component parameter estimation for a wide range of forensic applications.}, keywords = {component, data, data;, Estimation, estimation;image, forensics;parameter, framework;digital, integrity;imaging, of, processing;security, technologies;information}, doi = {10.1109/MMSP.2007.4412900}, author = {Swaminathan,A. and M. Wu and Liu,K. J.R} } @conference {14951, title = {Component-based Data Layout for Efficient Slicing of Very Large Multidimensional Volumetric Data}, booktitle = {Scientific and Statistical Database Management, 2007. SSBDM {\textquoteright}07. 19th International Conference on}, year = {2007}, month = {2007/07//}, pages = {8 - 8}, abstract = {In this paper, we introduce a new efficient data layout scheme to efficiently handle out-of-core axis-aligned slicing queries of very large multidimensional volumetric data. Slicing is a very useful dimension reduction tool that removes or reduces occlusion problems in visualizing 3D/4D volumetric data sets and that enables fast visual exploration of such data sets. We show that the data layouts based on typical space-filling curves are not optimal for the out-of-core slicing queries and present a novel component-based data layout scheme for a specialized problem domain, in which it is only required to provide fast slicing at every k-th value, for any k gt; 1. Our component-based data layout scheme provides much faster processing time for any axis-aligned slicing direction at every k-th value, k gt; 1, requiring less cache memory size and without any replication of data. In addition, the data layout can be generalized to any high dimension.}, keywords = {axis-aligned, cache, curves;very, data, data;data, databases;, handling;query, large, layout;data, memory, multidimensional, processing;very, queries;space-filling, size;component-based, slicing, slicing;out-of-core, volumetric}, doi = {10.1109/SSDBM.2007.7}, author = {Kim,Jusub and JaJa, Joseph F.} } @article {15507, title = {Developing a Computer Science Department at the University of Maryland}, journal = {Annals of the History of Computing, IEEE}, volume = {29}, year = {2007}, month = {2007/12//oct}, pages = {64 - 75}, abstract = {This article describes the first six years of the Computer Science Department, established in 1973 at the University of Maryland. The department evolved out of the Computer Science Center, which had been instituted in February 1962. In 1980, the National Academy of Sciences judged the department as being among the leading computer science departments in the US.}, keywords = {administrative, data, department;educational, MARYLAND, processing;, Science, University;computer}, isbn = {1058-6180}, doi = {10.1109/MAHC.2007.4407446}, author = {Minker, Jack} } @conference {14404, title = {GeoDDupe: A Novel Interface for Interactive Entity Resolution in Geospatial Data}, booktitle = {Information Visualization, 2007. IV {\textquoteright}07. 11th International Conference}, year = {2007}, month = {2007/07//}, pages = {489 - 496}, abstract = {Due to the growing interest in geospatial data mining and analysis, data cleaning and integration in geospatial data is becoming an important issue. Geospatial entity resolution is the process of reconciling multiple location references to the same real world location within a single data source (deduplication) or across multiple data sources (integration). In this paper, we introduce an interactive tool called GeoDDupe which effectively combines automatic data mining algorithms for geospatial entity resolution with a novel network visualization supporting users{\textquoteright} resolution analysis and decisions. We illustrate the GeoDDupe interface with an example geospatial dataset and show how users can efficiently and accurately resolve location entities. Finally, the case study with two real-world geospatial datasets demonstrates the potential of GeoDDupe.}, keywords = {algorithms;data, cleaning;geospatial, data, entity, GeoDDupe;automatic, interfaces;, mining, mining;geography;user, mining;geospatial, resolution;interactive, tool;network, visualization;data}, doi = {10.1109/IV.2007.55}, author = {Kang,Hyunmo and Sehgal,V. and Getoor, Lise} } @article {12587, title = {Target Tracking Using a Joint Acoustic Video System}, journal = {Multimedia, IEEE Transactions on}, volume = {9}, year = {2007}, month = {2007/06//}, pages = {715 - 727}, abstract = {In this paper, a multitarget tracking system for collocated video and acoustic sensors is presented. We formulate the tracking problem using a particle filter based on a state-space approach. We first discuss the acoustic state-space formulation whose observations use a sliding window of direction-of-arrival estimates. We then present the video state space that tracks a target{\textquoteright}s position on the image plane based on online adaptive appearance models. For the joint operation of the filter, we combine the state vectors of the individual modalities and also introduce a time-delay variable to handle the acoustic-video data synchronization issue, caused by acoustic propagation delays. A novel particle filter proposal strategy for joint state-space tracking is introduced, which places the random support of the joint filter where the final posterior is likely to lie. By using the Kullback-Leibler divergence measure, it is shown that the joint operation of the filter decreases the worst case divergence of the individual modalities. The resulting joint tracking filter is quite robust against video and acoustic occlusions due to our proposal strategy. Computer simulations are presented with synthetic and field data to demonstrate the filter{\textquoteright}s performance}, keywords = {(numerical, acoustic, adaptive, appearance, approach;synchronization;time-delay, data, delay;acoustic, divergence;acoustic, estimate;joint, estimation;hidden, feature, filter;sliding, Filtering, fusion;multitarget, fusion;synchronisation;target, highways;direction-of-arrival, Kullback-Leibler, methods);sensor, model;particle, processing;, processing;automated, propagation, removal;optical, signal, system;multimodal, tracking;acoustic, tracking;direction-of-arrival, tracking;occlusion;online, tracking;particle, tracking;video, variable;visual, video, window;state-space}, isbn = {1520-9210}, doi = {10.1109/TMM.2007.893340}, author = {Cevher, V. and Sankaranarayanan,A. C and McClellan, J.H. and Chellapa, Rama} } @conference {16074, title = {VAST 2007 Contest - Blue Iguanodon}, booktitle = {Visual Analytics Science and Technology, 2007. VAST 2007. IEEE Symposium on}, year = {2007}, month = {2007/11/30/1}, pages = {231 - 232}, abstract = {Visual analytics experts realize that one effective way to push the field forward and to develop metrics for measuring the performance of various visual analytics components is to hold an annual competition. The second visual analytics science and technology (VAST) contest was held in conjunction with the 2007 IEEE VAST symposium. In this contest participants were to use visual analytic tools to explore a large heterogeneous data collection to construct a scenario and find evidence buried in the data of illegal and terrorist activities that were occurring. A synthetic data set was made available as well as tasks. In this paper we describe some of the advances we have made from the first competition held in 2006.}, keywords = {activities;large, activities;visual, analytic, collection;terrorist, data, heterogeneous, illegal, interfaces;, tools;data, user, visualisation;graphical}, doi = {10.1109/VAST.2007.4389032}, author = {Grinstein,G. and Plaisant, Catherine and Laskowski,S. and O{\textquoteright}Connell,T. and Scholtz,J. and Whiting,M.} } @conference {16085, title = {Exploring content-actor paired network data using iterative query refinement with NetLens}, booktitle = {Digital Libraries, 2006. JCDL {\textquoteright}06. Proceedings of the 6th ACM/IEEE-CS Joint Conference on}, year = {2006}, month = {2006/06//}, pages = {372 - 372}, abstract = {Networks have remained a challenge for information retrieval and visualization because of the rich set of tasks that users want to accomplish. This paper demonstrates a tool, NetLens, to explore a content-actor paired network data model. The NetLens interface was designed to allow users to pose a series of elementary queries and iteratively refine visual overviews and sorted lists. This enables the support of complex queries that are traditionally hard to specify in node-link visualizations. NetLens is general and scalable in that it applies to any dataset that can be represented with our abstract content-actor data model}, keywords = {data, interface, libraries;query, management, model;data, models;data, NetLens;content-actor, network, pair, processing;user, query, refinement;data, retrieval;iterative, systems;, visualisation;digital, visualization;information}, doi = {10.1145/1141753.1141868}, author = {Lee,Bongshin and Kang,Hyunmo and Plaisant, Catherine and Bederson, Benjamin B.} } @article {18191, title = {Data hiding in curves with application to fingerprinting maps}, journal = {Signal Processing, IEEE Transactions on}, volume = {53}, year = {2005}, month = {2005/10//}, pages = {3988 - 4005}, abstract = {This paper presents a new data hiding method for curves. The proposed algorithm parameterizes a curve using the B-spline model and adds a spread spectrum sequence to the coordinates of the B-spline control points. In order to achieve robust fingerprint detection, an iterative alignment-minimization algorithm is proposed to perform curve registration and to deal with the nonuniqueness of B-spline control points. Through experiments, we demonstrate the robustness of the proposed data-hiding algorithm against various attacks, such as collusion, cropping, geometric transformations, vector/raster-raster/vector conversions, printing-and-scanning, and some of their combinations. We also show the feasibility of our method for fingerprinting topographic maps as well as writings and drawings.}, keywords = {(mathematics);, algorithm;, alignment-minimization, B-spline, CONTROL, curve, data, detection;, edge, embedding;, encapsulation;, fingerprint, geospatial, hiding, identification;, image, iterative, method;, methods;, minimisation;, point;, protection;, registration;, sequence;, spectrum, splines, spread, watermarking;}, isbn = {1053-587X}, doi = {10.1109/TSP.2005.855411}, author = {Gou,Hongmei and M. Wu} } @conference {13335, title = {The half-edge tree: a compact data structure for level-of-detail tetrahedral meshes}, booktitle = {Shape Modeling and Applications, 2005 International Conference}, year = {2005}, month = {2005/06//}, pages = {332 - 337}, abstract = {We propose a new data structure for the compact encoding of a level-of detail (LOD) model of a three-dimensional scalar field based on unstructured tetrahedral meshes. Such data structure, called a half-edge tree (HET), is built through the iterative application of a half-edge collapse, i.e. by contracting an edge to one of its endpoints. We also show that selective refined meshes extracted from an HET contain on average about 34\% and up to 75\% less tetrahedra than those extracted from an LOD model built through a general edge collapse.}, keywords = {application;, compact, computational, data, detection;, edge, encoding;, generation;, geometry;, half-edge, iterative, level-of-detail, mesh, meshes;, methods;, model;, structure;, structures;, tetrahedral, tree, tree;}, doi = {10.1109/SMI.2005.47}, author = {Danovaro,E. and De Floriani, Leila and Magillo,P. and Puppo,E. and Sobrero,D. and Sokolovsky,N.} } @conference {14986, title = {Mitigating risk of data loss in preservation environments}, booktitle = {Mass Storage Systems and Technologies, 2005. Proceedings. 22nd IEEE / 13th NASA Goddard Conference on}, year = {2005}, month = {2005/04//}, pages = {39 - 48}, abstract = {Preservation environments manage digital records for time periods that are much longer than that of a single vendor product. A primary requirement is the preservation of the authenticity and integrity of the digital records while simultaneously minimizing the cost of long-term storage, as the data is migrated onto successive generations of technology. The emergence of low-cost storage hardware has made it possible to implement innovative software systems that minimize risk of data loss and preserve authenticity and integrity. This paper describes software mechanisms in use in current persistent archives and presents an example based upon the NARA research prototype persistent archive.}, keywords = {archives;, authentication;, authenticity;, computing;, data, databases;, digital, distributed, environment;, Grid, integrity;, management;, message, objects;, persistent, preservation, record, risk, storage}, doi = {10.1109/MSST.2005.20}, author = {Moore,R.W. and JaJa, Joseph F. and Chadduck,R.} } @conference {12296, title = {Adaptive replication in peer-to-peer systems}, booktitle = {Distributed Computing Systems, 2004. Proceedings. 24th International Conference on}, year = {2004}, month = {2004///}, pages = {360 - 369}, abstract = {Peer-to-peer systems can be used to form a low-latency decentralized data delivery system. Structured peer-to-peer systems provide both low latency and excellent load balance with uniform query and data distributions. Under the more common skewed access distributions, however, individual nodes are easily overloaded, resulting in poor global performance and lost messages. This paper describes a lightweight, adaptive, and system-neutral replication protocol, called LAR, that maintains low access latencies and good load balance even under highly skewed demand. We apply LAR to Chord and show that it has lower overhead and better performance than existing replication strategies.}, keywords = {adaptive, allocation;, data, databases;, decentralized, delivery, distributed, LAR, low-latency, peer-to-peer, processing;, protocol;, replicated, replication, resource, strategies;, structured, system-neutral, system;, systems;}, doi = {10.1109/ICDCS.2004.1281601}, author = {Gopalakrishnan,V. and Silaghi,B. and Bhattacharjee, Bobby and Keleher,P.} } @conference {18017, title = {Arbitrate-and-move primitives for high throughput on-chip interconnection networks}, booktitle = {Circuits and Systems, 2004. ISCAS {\textquoteright}04. Proceedings of the 2004 International Symposium on}, volume = {2}, year = {2004}, month = {2004/05//}, pages = {II - 441-4 Vol.2 - II - 441-4 Vol.2}, abstract = {An n-leaf pipelined balanced binary tree is used for arbitration of order and movement of data from n input ports to one output port. A novel arbitrate-and-move primitive circuit for every node of the tree, which is based on a concept of reduced synchrony that benefits from attractive features of both asynchronous and synchronous designs, is presented. The design objective of the pipelined binary tree is to provide a key building block in a high-throughput mesh-of-trees interconnection network for Explicit Multi Threading (XMT) architecture, a recently introduced parallel computation framework. The proposed reduced synchrony circuit was compared with asynchronous and synchronous designs of arbitrate-and-move primitives. Simulations with 0.18 mu;m technology show that compared to an asynchronous design, the proposed reduced synchrony implementation achieves a higher throughput, up to 2 Giga-Requests per second on an 8-leaf binary tree. Our circuit also consumes less power than the synchronous design, and requires less silicon area than both the synchronous and asynchronous designs.}, keywords = {8, arbiter, arbitrate-and-move, architecture;, asynchronous, balanced, binary, circuit, circuit;, circuits;, consumption;, data, explicit, interconnection, interconnections;, leaf, mesh-of-trees, multi-threading;, Multithreading, n-leaf, network;, pipeline, pipelined, power, primitive, processing;, reduced, simulation;, structures;, synchronous, synchrony, system-on-chip;, tree, tree;}, doi = {10.1109/ISCAS.2004.1329303}, author = {Balkan,A.O. and Gang Qu and Vishkin, Uzi} } @conference {13213, title = {Background modeling and subtraction by codebook construction}, booktitle = {Image Processing, 2004. ICIP {\textquoteright}04. 2004 International Conference on}, volume = {5}, year = {2004}, month = {2004/10//}, pages = {3061 - 3064 Vol. 5 - 3061 - 3064 Vol. 5}, abstract = {We present a new fast algorithm for background modeling and subtraction. Sample background values at each pixel are quantized into codebooks which represent a compressed form of background model for a long image sequence. This allows us to capture structural background variation due to periodic-like motion over a long period of time under limited memory. Our method can handle scenes containing moving backgrounds or illumination variations (shadows and highlights), and it achieves robust detection for compressed videos. We compared our method with other multimode modeling techniques.}, keywords = {(signal);, analysis;, background, codebook, coding;, colour, compression;, construction;, data, image, modeling, modeling;, MOTION, multimode, quantisation, representation;, sequence;, sequences;, subtraction;, technique;, video}, doi = {10.1109/ICIP.2004.1421759}, author = {Kim,Kyungnam and Chalidabhongse,T.H. and Harwood,D. and Davis, Larry S.} } @article {18187, title = {Data hiding in binary image for authentication and annotation}, journal = {Multimedia, IEEE Transactions on}, volume = {6}, year = {2004}, month = {2004/08//}, pages = {528 - 538}, abstract = {This paper proposes a new method to embed data in binary images, including scanned text, figures, and signatures. The method manipulates "flippable" pixels to enforce specific block-based relationship in order to embed a significant amount of data without causing noticeable artifacts. Shuffling is applied before embedding to equalize the uneven embedding capacity from region to region. The hidden data can be extracted without using the original image, and can also be accurately extracted after high quality printing and scanning with the help of a few registration marks. The proposed data embedding method can be used to detect unauthorized use of a digitized signature, and annotate or authenticate binary documents. The paper also presents analysis and discussions on robustness and security issues.}, keywords = {annotation;, authentication;, binary, coding;, data, digital, digitized, document, EMBEDDING, encapsulation;, extraction;, feature, hiding;, image, image;, method;, signature;, unauthorized, user;, watermarking;}, isbn = {1520-9210}, doi = {10.1109/TMM.2004.830814}, author = {M. Wu and Liu,Bede} } @conference {18190, title = {Data hiding in curves for collusion-resistant digital fingerprinting}, booktitle = {Image Processing, 2004. ICIP {\textquoteright}04. 2004 International Conference on}, volume = {1}, year = {2004}, month = {2004/10//}, pages = {51 - 54 Vol. 1 - 51 - 54 Vol. 1}, abstract = {This paper presents a new data hiding method for curves. The proposed algorithm parameterizes a curve using the B-spline model and adds a spread spectrum sequence in the coordinates of the B-spline control points. We demonstrate through experiments the robustness of the proposed data hiding algorithm against printing-and-scanning and collusions, and show its feasibility for collusion-resistant fingerprinting of topographic maps as well as writings/drawings from pen-based input devices.}, keywords = {(mathematics);, B-spline, coding;, collusion-resistant, CONTROL, data, devices;, digital, document, encapsulation;, extraction;, feature, fingerprinting;, hiding;, image, INPUT, maps;, model;, pen-based, points;, printing-and-scanning, processing;, robustness;, sequence;, spectrum, splines, spread, topographic, watermarking;}, doi = {10.1109/ICIP.2004.1418687}, author = {Gou,Hongmei and M. Wu} } @conference {18200, title = {Distortion management of real-time MPEG-4 video over downlink multicode CDMA networks}, booktitle = {Communications, 2004 IEEE International Conference on}, volume = {5}, year = {2004}, month = {2004/06//}, pages = {3071 - 3075 Vol.5 - 3071 - 3075 Vol.5}, abstract = {In this paper, a protocol is designed to manage source rate/channel coding rate adaptation, code allocation, and power control to transmit real-time MPEG-4 FGS video over downlink multicode CDMA networks. We develop a fast adaptive scheme of distortion management to reduce the overall distortion received by all users subject, to the limited number of codes and maximal transmitted power. Compared with a modified greedy method in literature, our proposed algorithm can reduce the overall system{\textquoteright}s distortion by at least 45\%.}, keywords = {access;, adaptation;, allocation;, CDMA, channel, code, coding, coding;, combined, communication;, compression;, control;, data, distortion, division, downlink, links;, management;, MPEG-4, multicode, multiple, networks;, power, radio, rate, real-time, resource, source, source-channel, video, video;, visual}, doi = {10.1109/ICC.2004.1313096}, author = {Su,Guan-Ming and Han,Zhu and Kwasinski,A. and M. Wu and Liu,K. J.R and Farvardin,N.} } @conference {12704, title = {Fusion of gait and face for human identification}, booktitle = {Acoustics, Speech, and Signal Processing, 2004. Proceedings. (ICASSP {\textquoteright}04). IEEE International Conference on}, volume = {5}, year = {2004}, month = {2004/05//}, pages = {V - 901-4 vol.5 - V - 901-4 vol.5}, abstract = {Identification of humans from arbitrary view points is an important requirement for different tasks including perceptual interfaces for intelligent environments, covert security and access control etc. For optimal performance, the system must use as many cues as possible and combine them in meaningful ways. In this paper, we discuss fusion of face and gait cues for the single camera case. We present a view invariant gait recognition algorithm for gait recognition. We employ decision fusion to combine the results of our gait recognition algorithm and a face recognition algorithm based on sequential importance sampling. We consider two fusion scenarios: hierarchical and holistic. The first involves using the gait recognition algorithm as a filter to pass on a smaller set of candidates to the face recognition algorithm. The second involves combining the similarity scores obtained individually from the face and gait recognition algorithms. Simple rules like the SUM, MIN and PRODUCT are used for combining the scores. The results of fusion experiments are demonstrated on the NIST database which has outdoor gait and face data of 30 subjects.}, keywords = {access, algorithm;, analysis;, combining, control;, covert, cues;, data, decision, Environment, Face, fusion;, Gait, hierarchical, holistic, human, identification;, importance, intelligent, interfaces;, invariant, perceptual, recognition, recognition;, rules;, sampling;, score, scores;, security;, sensor, sequential, similarity, view}, doi = {10.1109/ICASSP.2004.1327257}, author = {Kale, A. and Roy Chowdhury, A.K. and Chellapa, Rama} } @article {13215, title = {Rendering localized spatial audio in a virtual auditory space}, journal = {Multimedia, IEEE Transactions on}, volume = {6}, year = {2004}, month = {2004/08//}, pages = {553 - 564}, abstract = {High-quality virtual audio scene rendering is required for emerging virtual and augmented reality applications, perceptual user interfaces, and sonification of data. We describe algorithms for creation of virtual auditory spaces by rendering cues that arise from anatomical scattering, environmental scattering, and dynamical effects. We use a novel way of personalizing the head related transfer functions (HRTFs) from a database, based on anatomical measurements. Details of algorithms for HRTF interpolation, room impulse response creation, HRTF selection from a database, and audio scene presentation are presented. Our system runs in real time on an office PC without specialized DSP hardware.}, keywords = {(computer, 3-D, audio, audio;, auditory, augmented, data, environments;, functions;, graphics);, Head, interfaces;, perceptual, processing;, reality, reality;, related, rendering, rendering;, scene, signal, sonification;, spaces;, spatial, transfer, user, virtual}, isbn = {1520-9210}, doi = {10.1109/TMM.2004.827516}, author = {Zotkin,Dmitry N and Duraiswami, Ramani and Davis, Larry S.} } @conference {18278, title = {Security evaluation for communication-friendly encryption of multimedia}, booktitle = {Image Processing, 2004. ICIP {\textquoteright}04. 2004 International Conference on}, volume = {1}, year = {2004}, month = {2004/10//}, pages = {569 - 572 Vol. 1 - 569 - 572 Vol. 1}, abstract = {This paper addresses the access control issues unique to multimedia, by using a joint signal processing and cryptographic approach to multimedia encryption. Based on three atomic encryption primitives, we present a systematic study on how to strategically integrate different atomic operations to build a video encryption system. We also propose a set of multimedia-specific security metrics to quantify the security against approximation attacks and to complement the existing notion of generic data security. The resulting system can provide superior performance to both generic encryption and its simple adaptation to video in terms of a joint consideration of security, bitrate overhead, and communication friendliness.}, keywords = {access, approximation, atomic, attacks;, bitrate, coding;, communication-friendly, communication;, control;, cryptography;, data, encryption, encryption;, generic, joint, method;, metrics;, multimedia, multimedia-specific, overhead;, primitives;, processing/cryptographic, Security, security;, signal, system;, Telecommunication, video}, doi = {10.1109/ICIP.2004.1418818}, author = {Mao,Yinian and M. Wu} } @conference {12288, title = {Slurpie: a cooperative bulk data transfer protocol}, booktitle = {INFOCOM 2004. Twenty-third AnnualJoint Conference of the IEEE Computer and Communications Societies}, volume = {2}, year = {2004}, month = {2004/03//}, pages = {941 - 951 vol.2 - 941 - 951 vol.2}, abstract = {We present Slurpie: a peer-to-peer protocol for bulk data transfer. Slurpie is specifically designed to reduce client download times for large, popular files, and to reduce load on servers that serve these files. Slurpie employs a novel adaptive downloading strategy to increase client performance, and employs a randomized backoff strategy to precisely control load on the server. We describe a full implementation of the Slurpie protocol, and present results from both controlled local-area and wide-area testbeds. Our results show that Slurpie clients improve performance as the size of the network increases, and the server is completely insulated from large flash crowds entering the Slurpie network.}, keywords = {adaptive, bulk, client-server, clients;, computing;, cooperative, data, data;, downloading, network;, peer-to-peer, protocol;, protocols;, Slurpie, strategy;, systems;, transfer, transport}, doi = {10.1109/INFCOM.2004.1356981}, author = {Sherwood,R. and Braud,R. and Bhattacharjee, Bobby} } @conference {15029, title = {Strategies for exploring large scale data}, booktitle = {Parallel Architectures, Algorithms and Networks, 2004. Proceedings. 7th International Symposium on}, year = {2004}, month = {2004/05//}, pages = {2 - 2}, abstract = {We consider the problem of querying large scale multidimensional time series data to discover events of interest, test and validate hypotheses, or to associate temporal patterns with specific events. This type of data currently dominates most other types of available data, and will very likely become even more prevalent in the future given the current trends in collecting time series of business, scientific, demographic, and simulation data. The ability to explore such collections interactively, even at a coarse level, will be critical in discovering the information and knowledge embedded in such collections. We develop indexing techniques and search algorithms to efficiently handle temporal range value querying of multidimensional time series data. Our indexing uses linear space data structures that enable the handling of queries in I/O time that is essentially the same as that of handling a single time slice, assuming the availability of a logarithmic number of processors as a function of the temporal window. A data structure with provably almost optimal asymptotic bounds is also presented for the case when the number of multidimensional objects is relatively small. These techniques improve significantly over standard techniques for either serial or parallel processing, and are evaluated by extensive experimental results that confirm their superior performance.}, keywords = {algorithms;, association;, asymptotic, bounds;, business, data, data;, database, databases;, demographic, discovery;, Indexing, indexing;, information, knowledge, large, linear, mining;, multidimensional, objects;, optimal, Parallel, pattern, processing;, query, querying;, range, scale, scientific, search, serial, series, series;, simulation, space, structure;, structures;, techniques;, temporal, TIME, value, very, window;}, doi = {10.1109/ISPAN.2004.1300447}, author = {JaJa, Joseph F.} } @article {18167, title = {Anti-collusion fingerprinting for multimedia}, journal = {Signal Processing, IEEE Transactions on}, volume = {51}, year = {2003}, month = {2003/04//}, pages = {1069 - 1087}, abstract = {Digital fingerprinting is a technique for identifying users who use multimedia content for unintended purposes, such as redistribution. These fingerprints are typically embedded into the content using watermarking techniques that are designed to be robust to a variety of attacks. A cost-effective attack against such digital fingerprints is collusion, where several differently marked copies of the same content are combined to disrupt the underlying fingerprints. We investigate the problem of designing fingerprints that can withstand collusion and allow for the identification of colluders. We begin by introducing the collusion problem for additive embedding. We then study the effect that averaging collusion has on orthogonal modulation. We introduce a tree-structured detection algorithm for identifying the fingerprints associated with K colluders that requires O(Klog(n/K)) correlations for a group of n users. We next develop a fingerprinting scheme based on code modulation that does not require as many basis signals as orthogonal modulation. We propose a new class of codes, called anti-collusion codes (ACCs), which have the property that the composition of any subset of K or fewer codevectors is unique. Using this property, we can therefore identify groups of K or fewer colluders. We present a construction of binary-valued ACC under the logical AND operation that uses the theory of combinatorial designs and is suitable for both the on-off keying and antipodal form of binary code modulation. In order to accommodate n users, our code construction requires only O( radic;n) orthogonal signals for a given number of colluders. We introduce three different detection strategies that can be used with our ACC for identifying a suspect set of colluders. We demonstrate the performance of our ACC for fingerprinting multimedia and identifying colluders through experiments using Gaussian signals and real images.}, keywords = {(mathematics);, additive, algorithm;, and, anti-collusion, attack;, averaging, binary, code, codes;, codevectors;, coding;, colluders, collusion;, combinatorial, communication;, compression;, correlation;, cost-effective, data, data;, design, DETECTION, detection;, digital, embedding;, fingerprinting;, Gaussian, identification;, image, images;, keying;, logical, mathematics;, Modulation, modulation;, multimedia, multimedia;, of, on-off, operation;, orthogonal, processes;, real, redistribution;, Security, signal, signals;, theory;, tree-structured, TREES, watermarking;}, isbn = {1053-587X}, doi = {10.1109/TSP.2003.809378}, author = {Trappe,W. and M. Wu and Wang,Z.J. and Liu,K. J.R} } @article {18193, title = {Data hiding in image and video .I. Fundamental issues and solutions}, journal = {Image Processing, IEEE Transactions on}, volume = {12}, year = {2003}, month = {2003/06//}, pages = {685 - 695}, abstract = {We address a number of fundamental issues of data hiding in image and video and propose general solutions to them. We begin with a review of two major types of embedding, based on which we propose a new multilevel embedding framework to allow the amount of extractable data to be adaptive according to the actual noise condition. We then study the issues of hiding multiple bits through a comparison of various modulation and multiplexing techniques. Finally, the nonstationary nature of visual signals leads to highly uneven distribution of embedding capacity and causes difficulty in data hiding. We propose an adaptive solution switching between using constant embedding rate with shuffling and using variable embedding rate with embedded control bits. We verify the effectiveness of our proposed solutions through analysis and simulation.}, keywords = {adaptive, analysis;, bits;, colour, condition;, constant, CONTROL, data, embedded, EMBEDDING, embedding;, encapsulation;, extractable, hiding;, image, Modulation, modulation;, multilevel, multiplexing, multiplexing;, NOISE, nonstationary, processing;, rate;, reviews;, shuffling;, signal, signals;, simulation;, solution;, techniques;, variable, video, visual}, isbn = {1057-7149}, doi = {10.1109/TIP.2003.810588}, author = {M. Wu and Liu,Bede} } @article {18194, title = {Data hiding in image and video .II. Designs and applications}, journal = {Image Processing, IEEE Transactions on}, volume = {12}, year = {2003}, month = {2003/06//}, pages = {696 - 705}, abstract = {For pt. I see ibid., vol.12, no.6, p.685-95 (2003). This paper applies the solutions to the fundamental issues addressed in Part I to specific design problems of embedding data in image and video. We apply multilevel embedding to allow the amount of embedded information that can be reliably extracted to be adaptive with respect to the actual noise conditions. When extending the multilevel embedding to video, we propose strategies for handling uneven embedding capacity from region to region within a frame as well as from frame to frame. We also embed control information to facilitate the accurate extraction of the user data payload and to combat such distortions as frame jitter. The proposed algorithm can be used for a variety of applications such as copy control, access control, robust annotation, and content-based authentication.}, keywords = {access, annotation;, authentication;, capacity;, conditions;, content-based, CONTROL, control;, copy, data, distortions;, EMBEDDING, embedding;, encapsulation;, extraction;, frame, hiding;, image, information;, jitter;, message, multilevel, NOISE, noise;, payload, processing;, robust, signal, uneven, user, video}, isbn = {1057-7149}, doi = {10.1109/TIP.2003.810589}, author = {M. Wu and Yu,H. and Liu,Bede} } @article {12301, title = {Deno: a decentralized, peer-to-peer object-replication system for weakly connected environments}, journal = {Computers, IEEE Transactions on}, volume = {52}, year = {2003}, month = {2003/07//}, pages = {943 - 959}, abstract = {This paper presents the design, implementation, and evaluation of the replication framework of Deno, a decentralized, peer-to-peer object-replication system targeted for weakly connected environments. Deno uses weighted voting for availability and pair-wise, epidemic information flow for flexibility. This combination allows the protocols to operate with less than full connectivity, to easily adapt to changes in group membership, and to make few assumptions about the underlying network topology. We present two versions of Deno{\textquoteright}s protocol that differ in the consistency levels they support. We also propose security extensions to handle a class of malicious actions that involve misrepresentation of protocol information. Deno has been implemented and runs on top of Linux and Win32 platforms. We use the Deno prototype to characterize the performance of the Deno protocols and extensions. Our study reveals several interesting results that provide fundamental insight into the benefits of decentralization and the mechanics of epidemic protocols.}, keywords = {actions;, connected, consistency, data, data;, databases;, decentralized, Deno;, distributed, environments;, epidemic, group, levels;, Linux;, malicious, management;, membership;, network, object, object-replication, of, operating, peer-to-peer, protocols;, replicated, replication;, Security, security;, synchronisation;, system;, systems;, topology;, Unix;, voting;, weakly, weighted, Win32;}, isbn = {0018-9340}, doi = {10.1109/TC.2003.1214342}, author = {Cetintemel,U. and Keleher,P. J and Bhattacharjee, Bobby and Franklin,M.J.} } @conference {16894, title = {Depth-first k-nearest neighbor finding using the MaxNearestDist estimator}, booktitle = {Image Analysis and Processing, 2003.Proceedings. 12th International Conference on}, year = {2003}, month = {2003/09//}, pages = {486 - 491}, abstract = {Similarity searching is an important task when trying to find patterns in applications which involve mining different types of data such as images, video, time series, text documents, DNA sequences, etc. Similarity searching often reduces to finding the k nearest neighbors to a query object. A description is given of how to use an estimate of the maximum possible distance at which a nearest neighbor can be found to prune the search process in a depth-first branch-and-bound k-nearest neighbor finding algorithm. Using the MaxNearestDist estimator (Larsen, S. and Kanal, L.N., 1986) in the depth-first k-nearest neighbor algorithm provides a middle ground between a pure depth-first and a best-first k-nearest neighbor algorithm.}, keywords = {branch-and-bound, data, depth-first, distance;, DNA, documents;, estimation;, estimator;, finding;, images;, k-nearest, matching;, maximum, MaxNearestDist, mining;, neighbor, parameter, pattern, possible, process;, processing;, query, search, searching;, sequences;, series;, similarity, text, TIME, tree, video;}, doi = {10.1109/ICIAP.2003.1234097}, author = {Samet, Hanan} } @conference {11911, title = {Energy-efficient broadcast and multicast trees for reliable wireless communication}, booktitle = {Wireless Communications and Networking, 2003. WCNC 2003. 2003 IEEE}, volume = {1}, year = {2003}, month = {2003/03//}, pages = {660 -667 vol.1 - 660 -667 vol.1}, abstract = {We define energy-efficient broadband and multicast schemes for reliable communication in multi-hop wireless networks. Unlike previous techniques, the choice of neighbors in the broadband and multicast trees in these schemes, are based not only on the link distance, but also on the error rates associated with the link. Our schemes can be implemented using both positive and negative acknowledgement based reliable broadcast techniques in the link layer. Through simulations, we show that our scheme achieves up to 45\% improvement over previous schemes on realistic 100-node network topologies. A positive acknowledgment based implementation is preferred. Our simulations show that the additional benefits of a positive acknowledgement based implementation is marginal (1-2\%). Therefore a negative acknowledgement based implementation of our schemes is equally applicable in constructing energy-efficient reliable and multicast data delivery paths.}, keywords = {broadcast, channels;multicast, communication;broadcast, communication;network, data, delivery, energy-efficient, layer;multicast, network, networks;network, networks;telecommunication, paths;multicast, rates;link, reliability;, topologies;wireless, topology;radio, trees;error, trees;multihop, wireless}, doi = {10.1109/WCNC.2003.1200429}, author = {Banerjee,S. and Misra,A. and Yeo,Jihwang and Agrawala, Ashok K.} } @article {18234, title = {Joint security and robustness enhancement for quantization based data embedding}, journal = {Circuits and Systems for Video Technology, IEEE Transactions on}, volume = {13}, year = {2003}, month = {2003/08//}, pages = {831 - 841}, abstract = {The paper studies joint security and robustness enhancement of quantization-based data embedding for multimedia authentication applications. We present an analysis showing that through a nontrivial run lookup table (LUT) that maps quantized multimedia features randomly to binary data, the probability of detection error can be considerably smaller than the traditional quantization embedding. We quantify the security strength of LUT embedding and enhance its robustness through distortion compensation. Introducing a joint security and capacity measure, we show that the proposed distortion-compensated LUT embedding provides joint enhancement of security and robustness over the traditional quantization embedding.}, keywords = {(signal);, authentication;, binary, compensation;, data, data;, DETECTION, digital, distortion, distortion;, embedding;, encapsulation;, enhancement;, error, features;, hiding;, lookup, lookup;, LUT;, message, multimedia, nontrivial, probability;, quantisation, quantization, quantized, Robustness, run, Security, statistics;, systems;, table, table;, watermarking;}, isbn = {1051-8215}, doi = {10.1109/TCSVT.2003.815951}, author = {Wu,M.} } @conference {18235, title = {Joint security robustness enhancement for quantization embedding}, booktitle = {Image Processing, 2003. ICIP 2003. Proceedings. 2003 International Conference on}, volume = {2}, year = {2003}, month = {2003/09//}, pages = {II - 483-6 vol.3 - II - 483-6 vol.3}, abstract = {This paper studies joint security and robustness enhancement of quantization based data embedding for multimedia authentication applications. We present analysis showing that through a lookup table (LUT) of nontrivial run that maps quantized multimedia features randomly to binary data, the detection error probability can be considerably smaller than the traditional quantization embedding. We quantify the security strength of LUT embedding and enhance its robustness through distortion compensation. Introducing a joint security and capacity measure, we show that the proposed distortion compensated LUT embedding provides joint enhancement of security and robustness over the traditional quantization embedding.}, keywords = {applications;, Authentication, authentication;, binary, communication;, data, data;, embedding;, error, joint, lookup, lookup;, message, multimedia, probability;, security;, statistics;, table, table;, watermarking;}, doi = {10.1109/ICIP.2003.1246722}, author = {Wu,M.} } @article {16897, title = {Properties of embedding methods for similarity searching in metric spaces}, journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on}, volume = {25}, year = {2003}, month = {2003/05//}, pages = {530 - 549}, abstract = {Complex data types-such as images, documents, DNA sequences, etc.-are becoming increasingly important in modern database applications. A typical query in many of these applications seeks to find objects that are similar to some target object, where (dis)similarity is defined by some distance function. Often, the cost of evaluating the distance between two objects is very high. Thus, the number of distance evaluations should be kept at a minimum, while (ideally) maintaining the quality of the result. One way to approach this goal is to embed the data objects in a vector space so that the distances of the embedded objects approximates the actual distances. Thus, queries can be performed (for the most part) on the embedded objects. We are especially interested in examining the issue of whether or not the embedding methods will ensure that no relevant objects are left out. Particular attention is paid to the SparseMap, FastMap, and MetricMap embedding methods. SparseMap is a variant of Lipschitz embeddings, while FastMap and MetricMap are inspired by dimension reduction methods for Euclidean spaces. We show that, in general, none of these embedding methods guarantee that queries on the embedded objects have no false dismissals, while also demonstrating the limited cases in which the guarantee does hold. Moreover, we describe a variant of SparseMap that allows queries with no false dismissals. In addition, we show that with FastMap and MetricMap, the distances of the embedded objects can be much greater than the actual distances. This makes it impossible (or at least impractical) to modify FastMap and MetricMap to guarantee no false dismissals.}, keywords = {complex, contractiveness;, data, databases;, decomposition;, dimension, distance, distortion;, DNA, documents;, EMBEDDING, embeddings;, Euclidean, evaluations;, FastMap;, images;, Lipschitz, methods;, metric, MetricMap;, multimedia, processing;, query, reduction, search;, searching;, sequences;, similarity, singular, spaces;, SparseMap;, types;, value}, isbn = {0162-8828}, doi = {10.1109/TPAMI.2003.1195989}, author = {Hjaltason,G. R and Samet, Hanan} } @conference {14387, title = {PXML: a probabilistic semistructured data model and algebra}, booktitle = {Data Engineering, 2003. Proceedings. 19th International Conference on}, year = {2003}, month = {2003/03//}, pages = {467 - 478}, abstract = {Despite the recent proliferation of work on semistructured data models, there has been little work to date on supporting uncertainty in these models. We propose a model for probabilistic semistructured data (PSD). The advantage of our approach is that it supports a flexible representation that allows the specification of a wide class of distributions over semistructured instances. We provide two semantics for the model and show that the semantics are probabilistically coherent. Next, we develop an extension of the relational algebra to handle probabilistic semistructured data and describe efficient algorithms for answering queries that use this algebra. Finally, we present experimental results showing the efficiency of our algorithms.}, keywords = {algebra;, data, databases;, instances;, model;, models;, probabilistic, processing;, PXML;, query, relational, semistructured, structures;, tree, XML;}, doi = {10.1109/ICDE.2003.1260814}, author = {Hung,E. and Getoor, Lise and V.S. Subrahmanian} } @conference {18266, title = {Resistance of orthogonal Gaussian fingerprints to collusion attacks}, booktitle = {Multimedia and Expo, 2003. ICME {\textquoteright}03. Proceedings. 2003 International Conference on}, volume = {1}, year = {2003}, month = {2003/07//}, pages = {I - 617-20 vol.1 - I - 617-20 vol.1}, abstract = {Digital fingerprinting is a means to offer protection to digital data by which fingerprints embedded in the multimedia are capable of identifying unauthorized use of digital content. A powerful attack that can be employed to reduce this tracing capability is collusion. In this paper, we study the collusion resistance of a fingerprinting system employing Gaussian distributed fingerprints and orthogonal modulation. We propose a likelihood-based approach to estimate the number of colluders, and introduce the thresholding detector for colluder identification. We first analyze the collusion resistance of a system to the average attack by considering the probability of a false negative and the probability of a false positive when identifying colluders. Lower and upper bounds for the maximum number of colluders Kmax are derived. We then show that the detectors are robust to different attacks. We further study different sets of performance criteria.}, keywords = {approach;, attacks;, capability;, collusion, data, data;, digital, distributed, embedded, fingerprinting;, fingerprints;, Gaussian, likelihood-based, modulation;, multimedia, of, orthogonal, probability;, processes;, protection;, Security, systems;, tracing}, doi = {10.1109/ICME.2003.1220993}, author = {Wang,Z.J. and M. Wu and Zhao,Hong and Liu,K. J.R and Trappe,W.} } @article {17809, title = {Temporal probabilistic object bases}, journal = {Knowledge and Data Engineering, IEEE Transactions on}, volume = {15}, year = {2003}, month = {2003/08//july}, pages = {921 - 939}, abstract = {There are numerous applications where we have to deal with temporal uncertainty associated with objects. The ability to automatically store and manipulate time, probabilities, and objects is important. We propose a data model and algebra for temporal probabilistic object bases (TPOBs), which allows us to specify the probability with which an event occurs at a given time point. In explicit TPOB-instances, the sets of time points along with their probability intervals are explicitly enumerated. In implicit TPOB-instances, sets of time points are expressed by constraints and their probability intervals by probability distribution functions. Thus, implicit object base instances are succinct representations of explicit ones; they allow for an efficient implementation of algebraic operations, while their explicit counterparts make defining algebraic operations easy. We extend the relational algebra to both explicit and implicit instances and prove that the operations on implicit instances correctly implement their counterpart on explicit instances.}, keywords = {algebra;, algebraic, bases;, constraints;, data, database, database;, databases;, distribution, explicit, functions;, handling;, implicit, instances;, integrity;, intervals;, management;, model;, models;, object, object-oriented, operations;, probabilistic, probability, probability;, relational, temporal, theory;, Uncertainty, uncertainty;}, isbn = {1041-4347}, doi = {10.1109/TKDE.2003.1209009}, author = {Biazzo,V. and Giugno,R. and Lukasiewicz,T. and V.S. Subrahmanian} } @article {12165, title = {User interface evaluation and empirically-based evolution of a prototype experience management tool}, journal = {Software Engineering, IEEE Transactions on}, volume = {29}, year = {2003}, month = {2003/09//}, pages = {838 - 850}, abstract = {Experience management refers to the capture, structuring, analysis, synthesis, and reuse of an organization{\textquoteright}s experience in the form of documents, plans, templates, processes, data, etc. The problem of managing experience effectively is not unique to software development, but the field of software engineering has had a high-level approach to this problem for some time. The Experience Factory is an organizational infrastructure whose goal is to produce, store, and reuse experiences gained in a software development organization. This paper describes The Q-Labs Experience Management System (Q-Labs EMS), which is based on the Experience Factory concept and was developed for use in a multinational software engineering consultancy. A critical aspect of the Q-Labs EMS project is its emphasis on empirical evaluation as a major driver of its development and evolution. The initial prototype requirements were grounded in the organizational needs and vision of Q-Labs, as were the goals and evaluation criteria later used to evaluate the prototype. However, the Q-Labs EMS architecture, data model, and user interface were designed to evolve, based on evolving user needs. This paper describes this approach, including the evaluation that was conducted of the initial prototype and its implications for the further development of systems to support software experience management.}, keywords = {data, development;, empirical, EMS;, engineering;, evaluation;, experience, experience;, Factory;, interface, interfaces;, knowledge, management, management;, model;, models;, organization, performance, prototype, Q-Labs, re-engineering;, reusability;, reuse;, software, system;, systems, tool;, user}, isbn = {0098-5589}, doi = {10.1109/TSE.2003.1232288}, author = {Seaman,C.B and Mendonca,M.G. and Basili, Victor R. and Kim,Y. M} } @conference {18166, title = {Anti-collusion codes: multi-user and multimedia perspectives}, booktitle = {Image Processing. 2002. Proceedings. 2002 International Conference on}, volume = {2}, year = {2002}, month = {2002///}, pages = {II-149 - II-152 vol.2 - II-149 - II-152 vol.2}, abstract = {Digital fingerprinting is an effective method to identify users who might try to redistribute multimedia content, such as images and video. These fingerprints are typically embedded into the content using watermarking techniques that are designed to be robust to a variety of attacks. A cheap and effective attack against such digital fingerprints is collusion, where several differently marked copies of the same content are averaged or combined to disrupt the underlying fingerprint. We present a construction of collusion-resistant fingerprints based upon anti-collusion codes (ACC) and binary code modulation. ACC have the property that the composition of any subset of K or fewer codevectors is unique. Using this property, we build fingerprints that allow for the identification of groups of K or less colluders. We present a construction of binary-valued ACC under the logical AND operation using the theory of combinatorial designs. Our code construction requires only Oscr;( radic;n) orthogonal signals to accommodate n users. We demonstrate the performance of our ACC for fingerprinting multimedia by identifying colluders through experiments using real images.}, keywords = {and, anti-collusion, authentication;, binary, code, codes;, coding;, combinatorial, computing;, content;, data, designs;, digital, embedding;, encapsulation;, fingerprinting;, image, images;, logical, mathematics;, message, Modulation, modulation;, multimedia, operation;, performance;, watermarking;}, doi = {10.1109/ICIP.2002.1039909}, author = {Trappe,W. and M. Wu and Liu,K. J.R} } @conference {18182, title = {Communication-friendly encryption of multimedia}, booktitle = {Multimedia Signal Processing, 2002 IEEE Workshop on}, year = {2002}, month = {2002/12//}, pages = {292 - 295}, abstract = {This paper discusses encryption operations that selectively encrypt content-carrying segments of multimedia data stream. We propose and analyze three techniques that work in different domains, namely, a syntax-aware selective bitstream encryption tool with bit stuffing, a generalized index mapping encryption tool with controlled overhead and an intra-bitplane encryption tool compatible with fine granularity scalable coding. The designs of these proposed encryption operations take into consideration the inherent structure and syntax of multimedia sources and have improved friendliness to communications, compression and computation.}, keywords = {bit, bitstream, coding;, Communication, communication;, compression, compression;, COMPUTATION, controlled, cryptography;, data, encryption, encryption;, fine, friendliness;, granularity, image, index, intrabitplane, MAPPING, multimedia, operation;, overhead;, scalable, selective, sources, stream;, stuffing;, syntax-aware, syntax;, tool;}, doi = {10.1109/MMSP.2002.1203303}, author = {M. Wu and Mao,Yinian} } @conference {14961, title = {Efficient techniques for range search queries on earth science data}, booktitle = {Scientific and Statistical Database Management, 2002. Proceedings. 14th International Conference on}, year = {2002}, month = {2002///}, pages = {142 - 151}, abstract = {We consider the problem of organizing large scale earth science raster data to efficiently handle queries for identifying regions whose parameters fall within certain range values specified by the queries. This problem seems to be critical to enabling basic data mining tasks such as determining associations between physical phenomena and spatial factors, detecting changes and trends, and content based retrieval. We assume that the input is too large to fit in internal memory and hence focus on data structures and algorithms that minimize the I/O bounds. A new data structure, called a tree-of-regions (ToR), is introduced and involves a combination of an R-tree and efficient representation of regions. It is shown that such a data structure enables the handling of range queries in an optimal I/O time, under certain reasonable assumptions. We also show that updates to the ToR can be handled efficiently. Experimental results for a variety of multi-valued earth science data illustrate the fast execution times of a wide range of queries, as predicted by our theoretical analysis.}, keywords = {based, computing;, content, data, data;, databases;, Earth, factors;, large, mining, mining;, natural, processing;, queries;, query, range, raster, retrieval;, scale, Science, sciences, search, spatial, structures;, tasks;, temporal, tree, tree-of-regions;, visual}, doi = {10.1109/SSDM.2002.1029714}, author = {Shi,Qingmin and JaJa, Joseph F.} } @conference {16153, title = {Interactive information visualization of a million items}, booktitle = {Information Visualization, 2002. INFOVIS 2002. IEEE Symposium on}, year = {2002}, month = {2002///}, pages = {117 - 124}, abstract = {Existing information visualization techniques are usually limited to the display of a few thousand items. This article describes new interactive techniques capable of handling a million items (effectively visible and manageable on screen). We evaluate the use of hardware-based techniques available with newer graphics cards, as well as new animation techniques and non-standard graphical features such as stereovision and overlap count. These techniques have been applied to two popular information visualizations: treemaps and scatter plot diagrams; but are generic enough to be applied to other 2D representations as well.}, keywords = {animation, animation;, cards;, Computer, count;, data, diagrams;, Graphics, hardware-based, information, interactive, interpolation;, overlap, plot, scatter, stereovision;, systems;, techniques;, treemaps;, visualisation;, visualization;}, doi = {10.1109/INFVIS.2002.1173156}, author = {Fekete,J.-D. and Plaisant, Catherine} } @conference {13317, title = {Multiresolution tetrahedral meshes: an analysis and a comparison}, booktitle = {Shape Modeling International, 2002. Proceedings}, year = {2002}, month = {2002///}, pages = {83 - 91}, abstract = {We deal with the problem of analyzing and visualizing large-size volume data sets. To this aim, we consider multiresolution representations based on a decomposition of the field domain into tetrahedral cells. We compare two types of multiresolution representations that differ on the rule applied to refine an initial coarse mesh: one is based on tetrahedron bisection, and one based on vertex split. The two representations can be viewed as instances of a common multiresolution model, that we call a multiresolution mesh. Encoding data structures for the two representations are briefly described. An experimental comparison on structured volume data sets is presented}, keywords = {(computer, bisection;vertex, cells;tetrahedron, data, generation;rendering, geometry;data, graphics);solid, meshes;rendering;tetrahedral, modelling;, refinement;multiresolution, representations;multiresolution, sets;mesh, split;computational, structures;data, tetrahedral, visualisation;mesh, visualization;experiment;large-size, volume}, doi = {10.1109/SMI.2002.1003532}, author = {Danovaro,E. and De Floriani, Leila and Lee,M. and Samet, Hanan} } @conference {13623, title = {Performance evaluation of object detection algorithms}, booktitle = {Pattern Recognition, 2002. Proceedings. 16th International Conference on}, volume = {3}, year = {2002}, month = {2002///}, pages = {965 - 969 vol.3 - 965 - 969 vol.3}, abstract = {The continuous development of object detection algorithms is ushering in the need for evaluation tools to quantify algorithm performance. In this paper a set of seven metrics are proposed for quantifying different aspects of a detection algorithm{\textquoteright}s performance. The strengths and weaknesses of these metrics are described. They are implemented in the Video Performance Evaluation Resource (ViPER) system and will be used to evaluate algorithms for detecting text, faces, moving people and vehicles. Results for running two previous text-detection algorithms on a common data set are presented.}, keywords = {algorithms;, common, data, DETECTION, detection;, Evaluation, evaluation;, image, object, performance, recognition;, resource, set;, system;, text-detection, video}, doi = {10.1109/ICPR.2002.1048198}, author = {Mariano,V.Y. and Min,Junghye and Park,Jin-Hyeong and Kasturi,R. and Mihalcik,D. and Huiping Li and David Doermann and Drayer,T.} } @article {11913, title = {Rover: scalable location-aware computing}, journal = {Computer}, volume = {35}, year = {2002}, month = {2002/10//}, pages = {46 - 53}, abstract = {All the components necessary for realizing location-aware computing are available in the marketplace today. What has hindered the widespread deployment of location-based systems is the lack of an integration architecture that scales with user populations. The authors have completed the initial implementation of Rover, a system designed to achieve this sort of integration and to automatically tailor information and services to a mobile user{\textquoteright}s location. Their studies have validated Rover{\textquoteright}s underlying software architecture, which achieves system scalability through high-resolution, application-specific resource scheduling at the servers and network. The authors believe that this technology will greatly enhance the user experience in many places, including museums, amusement and theme parks, shopping malls, game fields, offices, and business centers. They designed the system specifically to scale to large user populations and expect its benefits to increase with them.}, keywords = {amusement, application-specific, architecture;, automation;, business, business;, computing;, data, entertainment;, handheld, humanities;, integration, LAN;, location-aware, malls;, mobile, museums;, office, parks;, processing;, resource, Rover;, scalability;, scalable, scheduling;, shopping, software, system, theme, units;, user;, wireless}, isbn = {0018-9162}, doi = {10.1109/MC.2002.1039517}, author = {Banerjee,S. and Agarwal,S. and Kamel,K. and Kochut, A. and Kommareddy,C. and Nadeem,T. and Thakkar,P. and Trinh,Bao and Youssef,A. and Youssef, M. and Larsen,R.L. and Udaya Shankar,A. and Agrawala, Ashok K.} } @conference {13272, title = {A Smale-like decomposition for discrete scalar fields}, booktitle = {Pattern Recognition, 2002. Proceedings. 16th International Conference on}, volume = {1}, year = {2002}, month = {2002///}, pages = {184 - 187 vol.1 - 184 - 187 vol.1}, abstract = {In this paper we address the problem of representing the structure of the topology of a d-dimensional scalar field as a basis for constructing a multiresolution representation of the structure of such afield. To this aim, we define a discrete decomposition of a triangulated d-dimensional domain, on whose vertices the values of the field are given. We extend a Smale decomposition, defined by Thom (1949) and Smale (1960) for differentiable functions, to the discrete case, to what we call a Smale-like decomposition. We introduce the notion of discrete gradient vector field, which indicates the growth of the scalar field and matches with our decomposition. We sketch an algorithm for building a Smale-like decomposition and a graph-based representation of this decomposition. We present results for the case of two-dimensional fields.}, keywords = {data, decomposition;, differentiable, discrete, domain;, field;, fields;, functions;, gradient, graph-based, methods;, multidimensional, multiresolution, representation;, scalar, Smale-like, structure, Topology, triangulated, vector, visualisation;}, doi = {10.1109/ICPR.2002.1044644}, author = {De Floriani, Leila and Mesmoudi,M. M. and Danovaro,E.} } @conference {16150, title = {SpaceTree: supporting exploration in large node link tree, design evolution and empirical evaluation}, booktitle = {Information Visualization, 2002. INFOVIS 2002. IEEE Symposium on}, year = {2002}, month = {2002///}, pages = {57 - 64}, abstract = {We present a novel tree browser that builds on the conventional node link tree diagrams. It adds dynamic rescaling of branches of the tree to best fit the available screen space, optimized camera movement, and the use of preview icons summarizing the topology of the branches that cannot be expanded. In addition, it includes integrated search and filter functions. This paper reflects on the evolution of the design and highlights the principles that emerged from it. A controlled experiment showed benefits for navigation to already previously visited nodes and estimation of overall tree topology.}, keywords = {browser;, camera, data, design, diagrams;, dynamic, evolution;, experiment;, exploration;, filter, functions;, graphical, icons;, integrated, interfaces;, large, link, movement;, node, novel, optimized, rescaling;, search;, SpaceTree;, structures;, topology;, tree, user, visualisation;, visualization;}, doi = {10.1109/INFVIS.2002.1173148}, author = {Plaisant, Catherine and Grosjean,J. and Bederson, Benjamin B.} } @conference {12120, title = {An experience management system for a software engineering research organization}, booktitle = {Software Engineering Workshop, 2001. Proceedings. 26th Annual NASA Goddard}, year = {2001}, month = {2001///}, pages = {29 - 35}, abstract = {Most businesses rely on the fact that their employees possess relevant knowledge and that they can apply it to the task at hand. The problem is that this knowledge is not owned by the organization. It is owned and controlled by its employees. Maintaining an appropriate level of knowledge in the organization is a very important issue. It is, however, not an easy task for most organizations, and it is particularly problematic for software organizations, which are human- and knowledge-intensive. Knowledge management is a relatively new area that has attempted to address these problems. This paper introduces an approach called the "knowledge dust-to-pearls" approach. This approach addresses some of the issues with knowledge management by providing low-barrier mechanisms to "jump start" the experience base. This approach allows the experience base to become more useful more quickly than traditional approaches. This paper describes the approach and gives an example of its use at the Fraunhofer Center for Experimental Software Engineering, Maryland, USA}, keywords = {and, approach;knowledge, base;experience, Center, control;knowledge, data, development, dust-to-pearls, engineering, Engineering;business, engineering;software, experimental, for, Fraunhofer, houses;, knowledge;employee, knowledge;experience, level, maintenance;knowledge, management, management;knowledge, management;software, mechanisms;software, organization;software, organizational, organizations;administrative, organizations;knowledge, organizations;low-barrier, ownership;knowledge-intensive, processing;personnel;research, research, software, system;human-intensive}, doi = {10.1109/SEW.2001.992652}, author = {Basili, Victor R. and Costa,P. and Lindvall,M. and Mendonca,M. and Seaman,C. and Tesoriero,R. and Zelkowitz, Marvin V} } @conference {18288, title = {Video access control via multi-level data hiding}, booktitle = {Multimedia and Expo, 2000. ICME 2000. 2000 IEEE International Conference on}, volume = {1}, year = {2000}, month = {2000///}, pages = {381 -384 vol.1 - 381 -384 vol.1}, abstract = {The paper proposes novel data hiding algorithms and system design for high quality digital video. Instead of targeting on a single degree of robustness, which results in overestimation and/or underestimation of the noise conditions, we apply multi-level embedding to digital video to achieve more than one level of robustness-capacity tradeoff. In addition, an adaptive technique is proposed to determine how many bits are embedded in each part of the video. Besides user data, control information such as synchronization and the number of hidden user bits are embedded as well. The algorithm can be used for applications such as access control}, keywords = {access, adaptive, algorithms;hidden, bits;high, conditions;robustness;robustness-capacity, control;adaptive, data, data;video, design;user, digital, embedding;noise, encapsulation;multimedia, hiding, hiding;multi-level, information;data, processing;, QUALITY, signal, systems;authorisation;data, systems;video, technique;control, tradeoff;system, user, video;multi-level}, doi = {10.1109/ICME.2000.869620}, author = {M. Wu and Yu,Hong Heather} } @conference {18198, title = {Digital watermarking using shuffling}, booktitle = {Image Processing, 1999. ICIP 99. Proceedings. 1999 International Conference on}, volume = {1}, year = {1999}, month = {1999///}, pages = {291 -295 vol.1 - 291 -295 vol.1}, abstract = {This paper applies shuffling to digital watermarking and data hiding. The data embedding capacity in the multimedia source generally varies significantly from one part of the source to another. Sequential embedding is very sensitive to noise which may cause synchronization problem; the common but conservative solution via partitioning an image into large segments and embedding only one bit per segment is wasteful of the data embedding capacity. This paper shows how random shuffling can be used to equalize the uneven distribution of embedding capacity. The effectiveness of random shuffling is demonstrated by analysis and experiments}, keywords = {coding;, data, embeddin;data, encapsulation;document, hiding;digital, image, processing;image, shuffling;shuffling;synchronization;data, source;random, watermarking;multimedia}, doi = {10.1109/ICIP.1999.821616}, author = {M. Wu and Liu,B.} } @conference {14927, title = {A hierarchical data archiving and processing system to generate custom tailored products from AVHRR data}, booktitle = {Geoscience and Remote Sensing Symposium, 1999. IGARSS {\textquoteright}99 Proceedings. IEEE 1999 International}, volume = {5}, year = {1999}, month = {1999///}, pages = {2374 -2376 vol.5 - 2374 -2376 vol.5}, abstract = {A novel indexing scheme is described to catalogue satellite data on a pixel basis. The objective of this research is to develop an efficient methodology to archive, retrieve and process satellite data, so that data products can be generated to meet the specific needs of individual scientists. When requesting data, users can specify the spatial and temporal resolution, geographic projection, choice of atmospheric correction, and the data selection methodology. The data processing is done in two stages. Satellite data is calibrated, navigated and quality flags are appended in the initial processing. This processed data is then indexed and stored. Secondary processing such as atmospheric correction and projection are done after a user requests the data to create custom made products. By dividing the processing in to two stages saves time, since the basic processing tasks such as navigation and calibration which are common to all requests are not repeated when different users request satellite data. The indexing scheme described can be extended to allow fusion of data sets from different sensors}, keywords = {archiving;image, AVHRR;GIS;PACS;custom, data, image;land, image;remote, mapping;, mapping;PACS;geophysical, measurement, PROCESSING, processing;geophysical, product;data, remote, scheme;infrared, sensing;optical, sensing;terrain, signal, surface;multispectral, system;indexing, tailored, technique;hierarchical, techniques;remote}, doi = {10.1109/IGARSS.1999.771514}, author = {Kalluri, SNV and Zhang,Z. and JaJa, Joseph F. and Bader, D.A. and Song,H. and El Saleous,N. and Vermote,E. and Townshend,J.R.G.} } @conference {16167, title = {Refining query previews techniques for data with multivalued attributes: the case of NASA EOSDIS}, booktitle = {Research and Technology Advances in Digital Libraries, 1999. ADL {\textquoteright}99. Proceedings. IEEE Forum on}, year = {1999}, month = {1999///}, pages = {50 - 59}, abstract = {Query Previews allow users to rapidly gain an understanding of the content and scope of a digital data collection. These previews present overviews of abstracted metadata, enabling users to rapidly and dynamically avoid undesired data. We present our recent work on developing query previews for a variety of NASA EOSDIS situations. We focus on approaches that successfully address the challenge of multi-valued attribute data. Memory requirements and processing time associated with running these new solutions remain independent of the number of records in the dataset. We describe two techniques and their respective prototypes used to preview NASA Earth science data}, keywords = {attribute, attributes;processing, collection;memory, computing;meta, data, data;abstracted, data;digital, data;multivalued, data;query, Earth, EOSDIS;NASA, libraries;geophysics, metadata;dataset;digital, NASA, previews, processing;, requirements;multi-valued, Science, techniques;undesired, time;query}, doi = {10.1109/ADL.1999.777690}, author = {Plaisant, Catherine and Venkatraman,M. and Ngamkajorwiwat,K. and Barth,R. and Harberts,B. and Feng,Wenlan} } @conference {17962, title = {Skip Strips: maintaining triangle strips for view-dependent rendering}, booktitle = {Visualization {\textquoteright}99. Proceedings}, year = {1999}, month = {1999/10//}, pages = {131 - 518}, abstract = {View-dependent simplification has emerged as a powerful tool for graphics acceleration in visualization of complex environments. However, view-dependent simplification techniques have not been able to take full advantage of the underlying graphics hardware. Specifically, triangle strips are a widely used hardware-supported mechanism to compactly represent and efficiently render static triangle meshes. However, in a view-dependent framework, the triangle mesh connectivity changes at every frame, making it difficult to use triangle strips. We present a novel data structure, Skip Strip, that efficiently maintains triangle strips during such view-dependent changes. A Skip Strip stores the vertex hierarchy nodes in a skip-list-like manner with path compression. We anticipate that Skip Strips will provide a road map to combine rendering acceleration techniques for static datasets, typical of retained-mode graphics applications, with those for dynamic datasets found in immediate-mode applications.}, keywords = {(computer, Acceleration, acceleration;graphics, applications;path, applications;skip-list-like, changes;view-dependent, compression;rendering, connectivity;triangle, data, datasets;graphics, datasets;static, environments;data, equipment;data, graphic, Graphics, graphics);spatial, hardware;hardware-supported, hierarchy, manner;static, mechanism;immediate-mode, mesh, meshes;triangle, nodes;view-dependent, rendering;view-dependent, simplification;visualization;computer, Skip, Strips;complex, strips;vertex, structure;dynamic, structures;, techniques;retained-mode, triangle, visualisation;rendering}, doi = {10.1109/VISUAL.1999.809877}, author = {El-Sana,J. and Azanli,E. and Varshney, Amitabh} } @conference {18038, title = {Efficient approximate and dynamic matching of patterns using a labeling paradigm}, booktitle = {Foundations of Computer Science, 1996. Proceedings., 37th Annual Symposium on}, year = {1996}, month = {1996/10//}, pages = {320 - 328}, abstract = {A key approach in string processing algorithmics has been the labeling paradigm which is based on assigning labels to some of the substrings of a given string. If these labels are chosen consistently, they can enable fast comparisons of substrings. Until the first optimal parallel algorithm for suffix tree construction was given by the authors in 1994 the labeling paradigm was considered not to be competitive with other approaches. They show that this general method is also useful for several central problems in the area of string processing: approximate string matching, dynamic dictionary matching, and dynamic text indexing. The approximate string matching problem deals with finding all substrings of a text which match a pattern ldquo;approximately rdquo;, i.e., with at most m differences. The differences can be in the form of inserted, deleted, or replaced characters. The text indexing problem deals with finding all occurrences of a pattern in a text, after the text is preprocessed. In the dynamic text indexing problem, updates to the text in the form of insertions and deletions of substrings are permitted. The dictionary matching problem deals with finding all occurrences of each pattern set of a set of patterns in a text, after the pattern set is preprocessed. In the dynamic dictionary matching problem, insertions and deletions of patterns to the pattern set are permitted}, keywords = {algorithm;replaced, algorithmics;substrings;suffix, algorithms;pattern, approximate, characters;dynamic, characters;labeling, characters;string, complexity;indexing;parallel, construction;computational, data, dictionary, dynamic, indexing;efficient, matching;deleted, matching;dynamic, matching;efficient, matching;inserted, matching;string, matching;tree, paradigm;optimal, Parallel, pattern, PROCESSING, string, structures;, text, tree}, doi = {10.1109/SFCS.1996.548491}, author = {Sahinalp,S. C and Vishkin, Uzi} } @conference {17928, title = {FINESSE: a financial information spreadsheet}, booktitle = {Information Visualization {\textquoteright}96, Proceedings IEEE Symposium on}, year = {1996}, month = {1996/10//}, pages = {70 -71, 125 - 70 -71, 125}, abstract = {We outline a spreadsheet-based system for visualization of real-time financial information. Our system permits the user to define arithmetic and presentation relationships amongst the various cells of the spreadsheet. The cells contain primitives that can be numbers, text, images, functions and graphics. Presenting financial information in this format allows its intended clients, the financial analysts, to work in the familiar environment of a spreadsheet and allows them the flexibility afforded by the powerful interface of the spreadsheet paradigm. In addition, our system permits real-time visualization of the financial data stream allowing its user to visually trade the changing market trends in two and three dimensions}, keywords = {cells;text;three, data, dimensions;two, dimensions;user, financial, FINESSE;arithmetic;data, information, information;spreadsheet, interface;data, interfaces;, presentation;market, processing;real-time, programs;user, spreadsheet;functions;graphics;images;information, systems;spreadsheet, trends;numbers;presentation;real-time, visualisation;financial, visualization;financial}, doi = {10.1109/INFVIS.1996.559222}, author = {Varshney, Amitabh and Kaufman,A.} } @conference {15016, title = {Practical parallel algorithms for dynamic data redistribution, median finding, and selection}, booktitle = {Parallel Processing Symposium, 1996., Proceedings of IPPS {\textquoteright}96, The 10th International}, year = {1996}, month = {1996/04//}, pages = {292 - 301}, abstract = {A common statistical problem is that of finding the median element in a set of data. This paper presents a fast and portable parallel algorithm for finding the median given a set of elements distributed across a parallel machine. In fact, our algorithm solves the general selection problem that requires the determination of the element of rank i, for an arbitrarily given integer i. Practical algorithms needed by our selection algorithm for the dynamic redistribution of data are also discussed. Our general framework is a distributed memory programming model enhanced by a set of communication primitives. We use efficient techniques for distributing, coalescing, and load balancing data as well as efficient combinations of task and data parallelism. The algorithms have been coded in SPLIT-C and run on a variety of platforms, including the Thinking Machines CM-5, IBM SP-1 and SP-2, Gray Research T3D, Meiko Scientific CS-2, Intel Paragon, and workstation clusters. Our experimental results illustrate the scalability and efficiency of our algorithms across different platforms and improve upon all the related experimental results known to the authors}, keywords = {algorithms;performance, algorithms;scalability;statistical, allocation;, balancing, clusters;distributed, CM-5;communication, CS-2;SPLIT-C;Thinking, data, data;median, evaluation;resource, finding;parallel, Gray, Machines, memory, model;dynamic, Paragon;Meiko, primitives;distributed, problem;workstation, Programming, redistribution;load, research, scientific, SP-1;Intel, systems;parallel, T3D;IBM}, doi = {10.1109/IPPS.1996.508072}, author = {Bader, D.A. and JaJa, Joseph F.} } @conference {16201, title = {Query previews in networked information systems}, booktitle = {Research and Technology Advances in Digital Libraries, 1996. ADL {\textquoteright}96., Proceedings of the Third Forum on}, year = {1996}, month = {1996/05//}, pages = {120 - 129}, abstract = {In a networked information system (such as the NASA Earth Observing System-Data Information System (EOS-DIS)), there are three major obstacles facing users in a querying process: network performance, data volume and data complexity. In order to overcome these obstacles, we propose a two phase approach to query formulation. The two phases are the Query Preview and the Query Refinement. In the Query Preview phase, users formulate an initial query by selecting rough attribute values. The estimated number of matching data sets is shown, graphically on preview bars which allows users to rapidly focus on a manageable number of relevant data sets. Query previews also prevent wasted steps by eliminating zero hit queries. When the estimated number of data sets is long enough, the initial query is submitted to the network which returns the metadata of the data sets for further refinement in the Query Refinement phase. The two phase approach to query formulation overcomes slow network performance, and reduces the data volume and data complexity, problems. This approach is especially appropriate for users who do not have extensive knowledge about the data and who prefer an exploratory method to discover data patterns and exceptions. Using this approach, we have developed dynamic query user interfaces to allow users to formulate their queries across a networked environment}, keywords = {attribute, complexity;data, data, Earth, environment;networked, EOS-DIS;NASA, formulation;querying, formulation;user, hit, information, interfaces;, interfaces;exploratory, method;matching, networks;information, Observing, patterns;data, performance;networked, Preview;Query, process;rough, queries;computer, query, refinement;data, retrieval;information, services;interactive, sets;network, System-Data, System;Query, systems;query, user, values;zero, volume;dynamic}, doi = {10.1109/ADL.1996.502522}, author = {Donn,K. and Plaisant, Catherine and Shneiderman, Ben} } @article {15022, title = {Scalable data parallel algorithms for texture synthesis using Gibbs random fields}, journal = {Image Processing, IEEE Transactions on}, volume = {4}, year = {1995}, month = {1995/10//}, pages = {1456 - 1460}, abstract = {This article introduces scalable data parallel algorithms for image processing. Focusing on Gibbs and Markov random field model representation for textures, we present parallel algorithms for texture synthesis, compression, and maximum likelihood parameter estimation, currently implemented on Thinking Machines CM-2 and CM-5. The use of fine-grained, data parallel processing techniques yields real-time algorithms for texture synthesis and compression that are substantially faster than the previously known sequential implementations. Although current implementations are on Connection Machines, the methodology presented enables machine-independent scalable algorithms for a number of problems in image processing and analysis}, keywords = {algorithms;maximum, algorithms;parallel, algorithms;scalable, algorithms;texture, analysis;image, CM-2;Thinking, CM-5;fine-grained, compression;image, compression;texture, Connection, data, estimation;model, estimation;parallel, field;Thinking, fields;Markov, likelihood, machine, Machines;Gibbs, machines;random, Parallel, parameter, processes;, processes;data, processing;image, processing;machine-independent, random, representation;real-time, scalable, synthesis;Markov, texture;maximum}, isbn = {1057-7149}, doi = {10.1109/83.465111}, author = {Bader, D.A. and JaJa, Joseph F. and Chellapa, Rama} } @conference {15039, title = {The block distributed memory model for shared memory multiprocessors}, booktitle = {Parallel Processing Symposium, 1994. Proceedings., Eighth International}, year = {1994}, month = {1994/04//}, pages = {752 - 756}, abstract = {Introduces a computation model for developing and analyzing parallel algorithms on distributed memory machines. The model allows the design of algorithms using a single address space and does not assume any particular interconnection topology. We capture performance by incorporating a cost measure for interprocessor communication induced by remote memory accesses. The cost measure includes parameters reflecting memory latency, communication bandwidth, and spatial locality. Our model allows the initial placement of the input data and pipelined prefetching. We use our model to develop parallel algorithms for various data rearrangement problems, load balancing, sorting, FFT, and matrix multiplication. We show that most of these algorithms achieve optimal or near optimal communication complexity while simultaneously guaranteeing an optimal speed-up in computational complexity}, keywords = {accesses;shared, address, algebra;parallel, algorithms;performance, algorithms;performance;pipelined, allocation;shared, balancing;matrix, bandwidth;computation, block, Communication, communication;load, complexity;computational, complexity;cost, complexity;distributed, complexity;optimal, data, distributed, evaluation;resource, Fourier, latency;optimal, locality;communication, measure;data, memory, model;communication, model;computational, multiplication;memory, multiprocessors;single, placement;interprocessor, prefetching;remote, problems;fast, rearrangement, space;sorting;spatial, speedup;parallel, systems;fast, systems;sorting;, transforms;input, transforms;matrix}, doi = {10.1109/IPPS.1994.288220}, author = {JaJa, Joseph F. and Ryu,Kwan Woo} } @conference {13571, title = {Image based typographic analysis of documents}, booktitle = {Document Analysis and Recognition, 1993., Proceedings of the Second International Conference on}, year = {1993}, month = {1993/10//}, pages = {769 - 773}, abstract = {An approach to image based typographic analysis of documents is provided. The problem requires a spatial understanding of the document layout as well as knowledge of the proper syntax. The system performs a page synthesis from the stream of formatting commands defined in a DVI file. Since the two-dimensional relationships between document components are not explicit in the page language, the authors develop a representation which preserves the two-dimensional layout, the read-order and the attributes of document components. From this hierarchical representation of the page layout we extract and analyze relevant typographic features such as margins, line and character spacing, and figure placement}, keywords = {2D, analysis;, attributes;, based, character, commands;, component, data, description, document, DVI, extraction;, feature, figure, file;, formatting, hierarchical, image, language;, languages;, layout;, line, margins;, page, placement;, processing;, read-order;, relationships;, representation;, spacing;, spatial, structures;, syntax;, synthesis;, typographic, understanding;}, doi = {10.1109/ICDAR.1993.395624}, author = {David Doermann and Furuta,R.} } @article {15049, title = {VLSI implementation of a tree searched vector quantizer}, journal = {Signal Processing, IEEE Transactions on}, volume = {41}, year = {1993}, month = {1993/02//}, pages = {901 - 905}, abstract = {The VLSI design and implementation of a tree-searched vector quantizer is presented. The number of processors needed is equal to the depth of the tree. All processors are identical, and data flow between processors is regular. No global control signals are needed. The processors have been fabricated using 2 mu;m N-well process on a 7.9 times;9.2 mm die. Each processor chip contains 25000 transistors and has 84 pins. The processors have been thoroughly tested at a clock frequency of 20 MHz}, keywords = {(mathematics);, 2, 20, chips;, coding;, compression;, data, design;, digital, image, implementation;, MHz;, micron;, PROCESSING, quantisation;, quantizer;, searched, signal, tree, TREES, vector, VLSI, VLSI;}, isbn = {1053-587X}, doi = {10.1109/78.193225}, author = {Kolagotla,R. K. and Yu,S.-S. and JaJa, Joseph F.} }