@conference {13068, title = {Face verification using large feature sets and one shot similarity}, booktitle = {Biometrics (IJCB), 2011 International Joint Conference on}, year = {2011}, month = {2011/10//}, pages = {1 - 8}, abstract = {We present a method for face verification that combines Partial Least Squares (PLS) and the One-Shot similarity model[28]. First, a large feature set combining shape, texture and color information is used to describe a face. Then PLS is applied to reduce the dimensionality of the feature set with multi-channel feature weighting. This provides a discriminative facial descriptor. PLS regression is used to compute the similarity score of an image pair by One-Shot learning. Given two feature vector representing face images, the One-Shot algorithm learns discriminative models exclusively for the vectors being compared. A small set of unlabeled images, not containing images belonging to the people being compared, is used as a reference (negative) set. The approach is evaluated on the Labeled Face in the Wild (LFW) benchmark and shows very comparable results to the state-of-the-art methods (achieving 86.12\% classification accuracy) while maintaining simplicity and good generalization ability.}, keywords = {analysis;set, approximations;regression, descriptor;labeled, Face, feature, in, information;face, information;texture, least, LFW;PLS;PLS, recognition;least, regression;color, sets;one, shot, similarity;partial, squares, squares;shape, the, theory;, verification;facial, wild;large}, doi = {10.1109/IJCB.2011.6117498}, author = {Guo,Huimin and Robson Schwartz,W. and Davis, Larry S.} } @article {15391, title = {Nonlinear Resonant and Chaotic Dynamics in Microwave Assisted Magnetization Switching}, journal = {Magnetics, IEEE Transactions on}, volume = {45}, year = {2009}, month = {2009/10//}, pages = {3950 - 3953}, abstract = {The switching process of a uniformly magnetized magnetic nanoparticle is considered. The particle is subject to applied fields having both dc and linearly-polarized radio-frequency (RF) components. The possibility of using the RF power to obtain a reduced coercivity of the particle is related to the onset of chaotic magnetization dynamics for moderately low values of the RF field amplitude. Perturbation technique for the evaluation of the reduced coercive field is developed and applied to the microwave assisted switching of the particle. Numerical simulations confirm the predictions of the theory.}, keywords = {amplitude;chaotic, components;magnetic, dynamical, dynamics;coercive, dynamics;numerical, field, field;coercivity;linearly-polarized, force;magnetic, magnetization, nanoparticle;microwave-assisted, particles;magnetic, radiofrequency, resonant, RF, simulations;perturbation, switching;nanoparticles;nonlinear, switching;nonlinear, systems;perturbation, technique;chaos;coercive, theory;}, isbn = {0018-9464}, doi = {10.1109/TMAG.2009.2023242}, author = {d{\textquoteright}Aquino,M. and Serpico,C. and Bertotti,G. and Mayergoyz, Issak D and Bonin,R.} } @article {18267, title = {Robust and secure image hashing}, journal = {Information Forensics and Security, IEEE Transactions on}, volume = {1}, year = {2006}, month = {2006/06//}, pages = {215 - 230}, abstract = {Image hash functions find extensive applications in content authentication, database search, and watermarking. This paper develops a novel algorithm for generating an image hash based on Fourier transform features and controlled randomization. We formulate the robustness of image hashing as a hypothesis testing problem and evaluate the performance under various image processing operations. We show that the proposed hash function is resilient to content-preserving modifications, such as moderate geometric and filtering distortions. We introduce a general framework to study and evaluate the security of image hashing systems. Under this new framework, we model the hash values as random variables and quantify its uncertainty in terms of differential entropy. Using this security framework, we analyze the security of the proposed schemes and several existing representative methods for image hashing. We then examine the security versus robustness tradeoff and show that the proposed hashing methods can provide excellent security and robustness.}, keywords = {content-preserving, cryptography;, differential, distortions;, entropy;, Filtering, Fourier, functions;, hash, hashing;, image, modifications;, processing;, secure, theory;, transform;, transforms;}, isbn = {1556-6013}, doi = {10.1109/TIFS.2006.873601}, author = {Swaminathan,A. and Mao,Yinian and M. Wu} } @conference {12650, title = {An algebraic approach to surface reconstruction from gradient fields}, booktitle = {Computer Vision, 2005. ICCV 2005. Tenth IEEE International Conference on}, volume = {1}, year = {2005}, month = {2005/10//}, pages = {174 - 181 Vol. 1 - 174 - 181 Vol. 1}, abstract = {Several important problems in computer vision such as shape from shading (SFS) and photometric stereo (PS) require reconstructing a surface from an estimated gradient field, which is usually non-integrable, i.e. have non-zero curl. We propose a purely algebraic approach to enforce integrability in discrete domain. We first show that enforcing integrability can be formulated as solving a single linear system Ax =b over the image. In general, this system is under-determined. We show conditions under which the system can be solved and a method to get to those conditions based on graph theory. The proposed approach is non-iterative, has the important property of local error confinement and can be applied to several problems. Results on SFS and PS demonstrate the applicability of our method.}, keywords = {algebra;, algebraic, approach;, Computer, confinement;, discrete, domain, error, field;, from, gradient, graph, image, integrability;, linear, local, methods;, photometric, reconstruction;, shading;, SHAPE, stereo;, surface, system;, theory;, vision;}, doi = {10.1109/ICCV.2005.31}, author = {Agrawal,A. and Chellapa, Rama and Raskar, R.} } @conference {14024, title = {Approximate expressions for the mean and the covariance of the maximum likelihood estimator for acoustic source localization}, booktitle = {Acoustics, Speech, and Signal Processing, 2005. Proceedings. (ICASSP {\textquoteright}05). IEEE International Conference on}, volume = {3}, year = {2005}, month = {2005/03//}, pages = {iii/73 - iii/76 Vol. 3 - iii/73 - iii/76 Vol. 3}, abstract = {Acoustic source localization using multiple microphones can be formulated as a maximum likelihood estimation problem. The estimator is implicitly defined as the minimum of a certain objective function. As a result, we cannot get explicit expressions for the mean and the covariance of the estimator. We derive approximate expressions for the mean vector and covariance matrix of the estimator using Taylor{\textquoteright}s series expansion of the implicitly defined estimator. The validity of our expressions is verified by Monte-Carlo simulations. We also study the performance of the estimator for different microphone array configurations.}, keywords = {(mathematics);, acoustic, approximate, approximation, array, array;, covariance, estimation;, expansion;, expressions;, function;, likelihood, localization;, matrices;, matrix;, maximum, mean, microphone, objective, processing;, series, signal, source, Taylor, theory;, vector;, vectors;}, doi = {10.1109/ICASSP.2005.1415649}, author = {Raykar,V.C. and Duraiswami, Ramani} } @conference {13188, title = {Detecting rotational symmetries}, booktitle = {Computer Vision, 2005. ICCV 2005. Tenth IEEE International Conference on}, volume = {2}, year = {2005}, month = {2005/10//}, pages = {954 - 961 Vol. 2 - 954 - 961 Vol. 2}, abstract = {We present an algorithm for detecting multiple rotational symmetries in natural images. Given an image, its gradient magnitude field is computed, and information from the gradients is spread using a diffusion process in the form of a gradient vector flow (GVF) field. We construct a graph whose nodes correspond to pixels in tire image, connecting points that are likely to be rotated versions of one another The n-cycles present in tire graph are made to vote for Cn symmetries, their votes being weighted by the errors in transformation between GVF in the neighborhood of the voting points, and the irregularity of the n-sided polygons formed by the voters. The votes are accumulated at tire centroids of possible rotational symmetries, generating a confidence map for each order of symmetry. We tested the method with several natural images.}, keywords = {axial, computational, detection;, field;, flow;, geometry;, gradient, graph, graph;, image, image;, magnitude, methods;, multiple, n-sided, object, polygons;, recognition;, rotational, symmetries;, symmetry;, theory;, tire, tyres;, vector}, doi = {10.1109/ICCV.2005.71}, author = {Shiv Naga Prasad,V. and Davis, Larry S.} } @conference {12280, title = {Measurement-based multipath multicast}, booktitle = {INFOCOM 2005. 24th Annual Joint Conference of the IEEE Computer and Communications Societies. Proceedings IEEE}, volume = {4}, year = {2005}, month = {2005/03//}, pages = {2803 - 2808 vol. 4 - 2803 - 2808 vol. 4}, abstract = {We propose a measurement-based routing algorithm to load balance intradomain traffic along multiple paths for multiple multicast sources. Multiple paths are established using application-layer overlaying. The proposed algorithm is able to converge under different network models, where each model reflects a different set of assumptions about the multicasting capabilities of the network. The algorithm is derived from simultaneous perturbation stochastic approximation and relies only on noisy estimates from measurements. Simulation results are presented to demonstrate the additional benefits obtained by incrementally increasing the multicasting capabilities.}, keywords = {algorithm;, allocation;, application-layer, approximation, approximation;, balance, communication;, Convergence, convergence;, intradomain, load, measurement-based, methods;, Multicast, multipath, network, numerical, of, overlaying;, perturbation, processes;, resource, Routing, routing;, source;, Stochastic, techniques;, Telecommunication, theory;, traffic;}, doi = {10.1109/INFCOM.2005.1498566}, author = {Guven,T. and La,R.J. and Shayman,M.A. and Bhattacharjee, Bobby} } @conference {18201, title = {Dynamic distortion control for 3-D embedded wavelet video over multiuser OFDM networks}, booktitle = {Global Telecommunications Conference, 2004. GLOBECOM {\textquoteright}04. IEEE}, volume = {2}, year = {2004}, month = {2004/12/03/nov}, pages = {650 - 654 Vol.2 - 650 - 654 Vol.2}, abstract = {In this paper, we propose a system to transmit multiple 3D embedded wavelet video programs over downlink multiuser OFDM. We consider the fairness among users and formulate the problem as minimizing the users{\textquoteright} maximal distortion subject to power, rate, and subcarrier constraints. By exploring frequency, time, and multiuser diversity in OFDM and flexibility of the 3D embedded wavelet video codec, the proposed algorithm can achieve fair video qualities among all users. Compared to a scheme similar to the current multiuser OFDM standard (IEEE 802.11a), the proposed scheme outperforms it by 1-5 dB on the worst received PSNR among all users and has much smaller PSNR deviation.}, keywords = {3D, 802.11a;, channels;, codec;, codecs;, communication;, control;, deviation;, distortion, diversity, diversity;, downlink, dynamic, embedded, fairness;, Frequency, IEEE, LAN;, maximal, minimax, minimization;, modulation;, multimedia, multiuser, OFDM, OFDM;, PSNR, rate, reception;, streaming;, systems;, techniques;, theory;, TIME, transforms;, video, video;, wavelet, wireless}, doi = {10.1109/GLOCOM.2004.1378042}, author = {Su,Guan-Ming and Han,Zhu and M. Wu and Liu,K. J.R} } @conference {18225, title = {Image hashing resilient to geometric and filtering operations}, booktitle = {Multimedia Signal Processing, 2004 IEEE 6th Workshop on}, year = {2004}, month = {2004/10/01/sept}, pages = {355 - 358}, abstract = {Image hash functions provide compact representations of images, which is useful for search and authentication applications. In this work, we have identified a general three step framework and proposed a new image hashing scheme that achieves a better overall performance than the existing approaches under various kinds of image processing distortions. By exploiting the properties of discrete polar Fourier transform and incorporating cryptographic keys, the proposed image hash is resilient to geometric and filtering operations, and is secure against guessing and forgery attacks.}, keywords = {compact, cryptographic, cryptography;, discrete, distortion;, Filtering, Fourier, function;, geometric, hash, image, key, key;, operation;, polar, PROCESSING, public, representation;, theory;, transform;, transforms;}, doi = {10.1109/MMSP.2004.1436566}, author = {Swaminathan,A. and Mao,Yinian and M. Wu} } @conference {13216, title = {Iterative figure-ground discrimination}, booktitle = {Pattern Recognition, 2004. ICPR 2004. Proceedings of the 17th International Conference on}, volume = {1}, year = {2004}, month = {2004/08//}, pages = {67 - 70 Vol.1 - 67 - 70 Vol.1}, abstract = {Figure-ground discrimination is an important problem in computer vision. Previous work usually assumes that the color distribution of the figure can be described by a low dimensional parametric model such as a mixture of Gaussians. However, such approach has difficulty selecting the number of mixture components and is sensitive to the initialization of the model parameters. In this paper, we employ non-parametric kernel estimation for color distributions of both the figure and background. We derive an iterative sampling-expectation (SE) algorithm for estimating the color, distribution and segmentation. There are several advantages of kernel-density estimation. First, it enables automatic selection of weights of different cues based on the bandwidth calculation from the image itself. Second, it does not require model parameter initialization and estimation. The experimental results on images of cluttered scenes demonstrate the effectiveness of the proposed algorithm.}, keywords = {algorithm;, analysis;, Bandwidth, calculation;, Color, colour, Computer, density, dimensional, discrimination;, distribution;, distributions;, Estimation, estimation;, expectation, figure, Gaussian, ground, image, initialization;, iterative, Kernel, low, methods;, mixture;, model, model;, nonparametric, parameter, parametric, processes;, sampling, sampling;, segmentation, segmentation;, statistics;, theory;, vision;}, doi = {10.1109/ICPR.2004.1334006}, author = {Zhao, L. and Davis, Larry S.} } @conference {12289, title = {Trust-preserving set operations}, booktitle = {INFOCOM 2004. Twenty-third AnnualJoint Conference of the IEEE Computer and Communications Societies}, volume = {4}, year = {2004}, month = {2004/03//}, pages = {2231 - 2241 vol.4 - 2231 - 2241 vol.4}, abstract = {We describe a method for performing trust-preserving set operations by untrusted parties. Our motivation for this is the problem of securely reusing content-based search results in peer-to-peer networks. We model search results and indexes as data sets. Such sets have value for answering a new query only if they are trusted. In the absence of any system-wide security mechanism, a data set is trusted by a node a only if it was generated by some node which is trusted by a. Our main contributions are a formal definition of the problem as well as an efficient scheme that solves this problem by allowing untrusted peers to perform set operations on trusted data sets while also producing unforgeable proofs of correctness. This is accomplished by requiring trusted nodes to sign appropriately-defined digests of generated sets; each such digest consists of an RSA accumulator and a Bloom filter. The scheme is general, and has other applications as well. We give an analysis demonstrating the low overhead of the scheme, and we include experimental data which confirm the analysis.}, keywords = {accumulator;, Bloom, computing;, filter;, mechanism;, network;, operation;, peer-to-peer, RSA, Security, security;, set, system-wide, Telecommunication, theory;, trust-preserving}, doi = {10.1109/INFCOM.2004.1354646}, author = {Morselli,R. and Bhattacharjee, Bobby and Katz, Jonathan and Keleher,P.} } @conference {13217, title = {Window-based, discontinuity preserving stereo}, booktitle = {Computer Vision and Pattern Recognition, 2004. CVPR 2004. Proceedings of the 2004 IEEE Computer Society Conference on}, volume = {1}, year = {2004}, month = {2004/07/02/june}, pages = {I-66 - I-73 Vol.1 - I-66 - I-73 Vol.1}, abstract = {Traditionally, the problem of stereo matching has been addressed either by a local window-based approach or a dense pixel-based approach using global optimization. In this paper we present an algorithm which combines window-based local matching into a global optimization framework. Our local matching algorithm assumes that local windows can have at most two disparities. Under this assumption, the local matching can be performed very efficiently using graph cuts. The global matching is formulated as minimization of an energy term that takes into account the matching constraints induced by the local stereo algorithm. Fast, approximate minimization of this energy is achieved through graph cuts. The key feature of our algorithm is that it preserves discontinuities both during the local as well as global matching phase.}, keywords = {algorithm;, approach;, based, cuts;, dense, discontinuity, global, graph, image, local, MATCHING, matching;, minimisation;, optimization;, Pixel, preserving, processing;, stereo, theory;, window}, doi = {10.1109/CVPR.2004.1315015}, author = {Agrawal,M. and Davis, Larry S.} } @conference {14055, title = {A 2D profile reconstruction in a multilayered waveguide structure}, booktitle = {Antennas and Propagation Society International Symposium, 2003. IEEE}, volume = {1}, year = {2003}, month = {2003/06//}, pages = {531 - 534 vol.1 - 531 - 534 vol.1}, abstract = {We discuss the problem of finding a profile or its location for a 2D scattering of electromagnetic waves with fixed frequencies in a waveguide multilayered domain. We use the dual space method (DSM) of Colton and Monk. Our goal is to extend our previous work of TE and TM cases to the more complicated case of a waveguide. We emphasize on the frequency range in the reconstruction.}, keywords = {(mathematics);, 2D, dual, duality, electromagnetic, equations;, inhomogeneous, Maxwell, media;, method;, multilayered, multilayers;, profile, reconstruction;, scattering;, space, structure;, theory;, wave, waveguide, waveguides;}, doi = {10.1109/APS.2003.1217513}, author = {Seydou,F. and Duraiswami, Ramani and Seppanen,T.} } @conference {12723, title = {Adaptive visual tracking and recognition using particle filters}, booktitle = {Multimedia and Expo, 2003. ICME {\textquoteright}03. Proceedings. 2003 International Conference on}, volume = {2}, year = {2003}, month = {2003/07//}, pages = {II - 349-52 vol.2 - II - 349-52 vol.2}, abstract = {This paper presents an improved method for simultaneous tracking and recognition of human faces from video, where a time series model is used to resolve the uncertainties in tracking and recognition. The improvements mainly arise from three aspects: (i) modeling the inter-frame appearance changes within the video sequence using an adaptive appearance model and an adaptive-velocity motion model; (ii) modeling the appearance changes between the video frames and gallery images by constructing intra- and extra-personal spaces; and (iii) utilization of the fact that the gallery images are in frontal views. By embedding them in a particle filter, we are able to achieve a stabilized tracker and an accurate recognizer when confronted by pose and illumination variations.}, keywords = {adaptive, adaptive-velocity, appearance, extra-personal, Filtering, filters;, image, intra-personal, model;, MOTION, particle, processing;, recognition;, sequence;, sequences;, series, signal, spaces;, theory;, TIME, tracking;, video, visual}, doi = {10.1109/ICME.2003.1221625}, author = {Zhou,Shaohua and Chellapa, Rama and Moghaddam, B.} } @article {18167, title = {Anti-collusion fingerprinting for multimedia}, journal = {Signal Processing, IEEE Transactions on}, volume = {51}, year = {2003}, month = {2003/04//}, pages = {1069 - 1087}, abstract = {Digital fingerprinting is a technique for identifying users who use multimedia content for unintended purposes, such as redistribution. These fingerprints are typically embedded into the content using watermarking techniques that are designed to be robust to a variety of attacks. A cost-effective attack against such digital fingerprints is collusion, where several differently marked copies of the same content are combined to disrupt the underlying fingerprints. We investigate the problem of designing fingerprints that can withstand collusion and allow for the identification of colluders. We begin by introducing the collusion problem for additive embedding. We then study the effect that averaging collusion has on orthogonal modulation. We introduce a tree-structured detection algorithm for identifying the fingerprints associated with K colluders that requires O(Klog(n/K)) correlations for a group of n users. We next develop a fingerprinting scheme based on code modulation that does not require as many basis signals as orthogonal modulation. We propose a new class of codes, called anti-collusion codes (ACCs), which have the property that the composition of any subset of K or fewer codevectors is unique. Using this property, we can therefore identify groups of K or fewer colluders. We present a construction of binary-valued ACC under the logical AND operation that uses the theory of combinatorial designs and is suitable for both the on-off keying and antipodal form of binary code modulation. In order to accommodate n users, our code construction requires only O( radic;n) orthogonal signals for a given number of colluders. We introduce three different detection strategies that can be used with our ACC for identifying a suspect set of colluders. We demonstrate the performance of our ACC for fingerprinting multimedia and identifying colluders through experiments using Gaussian signals and real images.}, keywords = {(mathematics);, additive, algorithm;, and, anti-collusion, attack;, averaging, binary, code, codes;, codevectors;, coding;, colluders, collusion;, combinatorial, communication;, compression;, correlation;, cost-effective, data, data;, design, DETECTION, detection;, digital, embedding;, fingerprinting;, Gaussian, identification;, image, images;, keying;, logical, mathematics;, Modulation, modulation;, multimedia, multimedia;, of, on-off, operation;, orthogonal, processes;, real, redistribution;, Security, signal, signals;, theory;, tree-structured, TREES, watermarking;}, isbn = {1053-587X}, doi = {10.1109/TSP.2003.809378}, author = {Trappe,W. and M. Wu and Wang,Z.J. and Liu,K. J.R} } @conference {14051, title = {Electromagnetic scattering from a multilayered cylindrical waveguide}, booktitle = {Antennas and Propagation Society International Symposium, 2003. IEEE}, volume = {3}, year = {2003}, month = {2003/06//}, pages = {332 - 335 vol.3 - 332 - 335 vol.3}, abstract = {This paper is devoted to electromagnetic scattering from an N multilayered circular cylinder. We consider waveguides in the z direction; that is, we look for the solution of Maxwell equations along the z direction. We assume a dielectric core and derive a mode matching approach for solving the problem. A numerical result is presented that illustrates the algorithm.}, keywords = {circular, core;, cylinder;, cylindrical, dielectric, dielectric-loaded, electromagnetic, EM, equations;, matching;, Maxwell, mode, multilayered, scattering;, theory;, wave, waveguide, waveguide;, waveguides;}, doi = {10.1109/APS.2003.1219855}, author = {Seydou,F. and Duraiswami, Ramani and Seppanen,T.} } @conference {13237, title = {Improved fast gauss transform and efficient kernel density estimation}, booktitle = {Computer Vision, 2003. Proceedings. Ninth IEEE International Conference on}, year = {2003}, month = {2003/10//}, pages = {664 -671 vol.1 - 664 -671 vol.1}, abstract = {Evaluating sums of multivariate Gaussians is a common computational task in computer vision and pattern recognition, including in the general and powerful kernel density estimation technique. The quadratic computational complexity of the summation is a significant barrier to the scalability of this algorithm to practical applications. The fast Gauss transform (FGT) has successfully accelerated the kernel density estimation to linear running time for low-dimensional problems. Unfortunately, the cost of a direct extension of the FGT to higher-dimensional problems grows exponentially with dimension, making it impractical for dimensions above 3. We develop an improved fast Gauss transform to efficiently estimate sums of Gaussians in higher dimensions, where a new multivariate expansion scheme and an adaptive space subdivision technique dramatically improve the performance. The improved FGT has been applied to the mean shift algorithm achieving linear computational complexity. Experimental results demonstrate the efficiency and effectiveness of our algorithm.}, keywords = {adaptive, algorithm;multivariate, complexity;computer, complexity;Gaussian, computational, density, estimation;mean, expansion, Gauss, processes;computational, recognition;quadratic, scheme;pattern, shift, space, subdivision, technique;computer, theory;, transform;kernel, vision;estimation, vision;fast}, doi = {10.1109/ICCV.2003.1238383}, author = {Yang,C. and Duraiswami, Ramani and Gumerov, Nail A. and Davis, Larry S.} } @conference {14053, title = {Integral equation solution of electromagnetic scattering from a multilayered cylindrical waveguide}, booktitle = {Antennas and Propagation Society International Symposium, 2003. IEEE}, volume = {3}, year = {2003}, month = {2003/06//}, pages = {524 - 527 vol.3 - 524 - 527 vol.3}, abstract = {This paper is devoted to the electromagnetic scattering from an N multilayered cylinder. We consider waveguides in the z direction, that is: we look for the solution of Maxwell equations along the z direction. We assume a dielectric core and discuss the problem for the case of general domains. We use an integral equation approach to solve the problem and the Nystrom method for the numerical approximation.}, keywords = {approximation, circular, core;, cylinder;, cylindrical, dielectric, dielectric-loaded, electromagnetic, EM, equations;, integral, Maxwell, method;, multilayered, numerical, Nystrom, scattering;, theory;, wave, waveguide, waveguides;}, doi = {10.1109/APS.2003.1219901}, author = {Seydou,F. and Duraiswami, Ramani and Seppanen,T.} } @conference {12725, title = {Shape and motion driven particle filtering for human body tracking}, booktitle = {Multimedia and Expo, 2003. ICME {\textquoteright}03. Proceedings. 2003 International Conference on}, volume = {3}, year = {2003}, month = {2003/07//}, pages = {III - 61-4 vol.3 - III - 61-4 vol.3}, abstract = {In this paper, we propose a method to recover 3D human body motion from a video acquired by a single static camera. In order to estimate the complex state distribution of a human body, we adopt the particle filtering framework. We present the human body using several layers of representation and compose the whole body step by step. In this way, more effective particles are generated and ineffective particles are removed as we process each layer. In order to deal with the rotational motion, the frequency of rotation is obtained using a preprocessing operation. In the preprocessing step, the variance of the motion field at each image is computed, and the frequency of rotation is estimated. The estimated frequency is used for the state update in the algorithm. We successfully track the movement of figure skaters in TV broadcast image sequence, and recover the 3D shape and motion of the skater.}, keywords = {3D, body, broadcast, camera;, cameras;, estimation;, Filtering, framework;, human, image, MOTION, motion;, particle, processing;, rotational, sequence;, sequences;, signal, single, static, theory;, tracking;, TV, video}, doi = {10.1109/ICME.2003.1221248}, author = {Yamamoto, T. and Chellapa, Rama} } @conference {12733, title = {Statistical shape theory for activity modeling}, booktitle = {Acoustics, Speech, and Signal Processing, 2003. Proceedings. (ICASSP {\textquoteright}03). 2003 IEEE International Conference on}, volume = {3}, year = {2003}, month = {2003/04//}, pages = {III - 493-6 vol.3 - III - 493-6 vol.3}, abstract = {Monitoring activities in a certain region from video data is an important surveillance problem. The goal is to learn the pattern of normal activities and detect unusual ones by identifying activities that deviate appreciably from the typical ones. We propose an approach using statistical shape theory based on the shape model of D.G. Kendall et al. (see "Shape and Shape Theory", John Wiley and Sons, 1999). In a low resolution video, each moving object is best represented as a moving point mass or particle. In this case, an activity can be defined by the interactions of all or some of these moving particles over time. We model this configuration of the particles by a polygonal shape formed from the locations of the points in a frame and the activity by the deformation of the polygons in time. These parameters are learned for each typical activity. Given a test video sequence, an activity is classified as abnormal if the probability for the sequence (represented by the mean shape and the dynamics of the deviations), given the model, is below a certain threshold The approach gives very encouraging results in surveillance applications using a single camera and is able to identify various kinds of abnormal behavior.}, keywords = {abnormal, activities, activity, analysis;, behavior;, classification;, data;, image, mass;, matching;, modeling;, monitoring;, moving, normal, particle;, pattern, pattern;, point, polygonal, probability;, problem;, processing;, sequence;, sequences;, SHAPE, shape;, signal, statistical, Surveillance, surveillance;, theory;, video}, doi = {10.1109/ICASSP.2003.1199519}, author = {Vaswani, N. and Chowdhury, A.R. and Chellapa, Rama} } @article {17809, title = {Temporal probabilistic object bases}, journal = {Knowledge and Data Engineering, IEEE Transactions on}, volume = {15}, year = {2003}, month = {2003/08//july}, pages = {921 - 939}, abstract = {There are numerous applications where we have to deal with temporal uncertainty associated with objects. The ability to automatically store and manipulate time, probabilities, and objects is important. We propose a data model and algebra for temporal probabilistic object bases (TPOBs), which allows us to specify the probability with which an event occurs at a given time point. In explicit TPOB-instances, the sets of time points along with their probability intervals are explicitly enumerated. In implicit TPOB-instances, sets of time points are expressed by constraints and their probability intervals by probability distribution functions. Thus, implicit object base instances are succinct representations of explicit ones; they allow for an efficient implementation of algebraic operations, while their explicit counterparts make defining algebraic operations easy. We extend the relational algebra to both explicit and implicit instances and prove that the operations on implicit instances correctly implement their counterpart on explicit instances.}, keywords = {algebra;, algebraic, bases;, constraints;, data, database, database;, databases;, distribution, explicit, functions;, handling;, implicit, instances;, integrity;, intervals;, management;, model;, models;, object, object-oriented, operations;, probabilistic, probability, probability;, relational, temporal, theory;, Uncertainty, uncertainty;}, isbn = {1041-4347}, doi = {10.1109/TKDE.2003.1209009}, author = {Biazzo,V. and Giugno,R. and Lukasiewicz,T. and V.S. Subrahmanian} } @conference {16914, title = {Content-based image retrieval using Fourier descriptors on a logo database}, booktitle = {Pattern Recognition, 2002. Proceedings. 16th International Conference on}, volume = {3}, year = {2002}, month = {2002///}, pages = {521 - 524 vol.3 - 521 - 524 vol.3}, abstract = {A system that enables the pictorial specification of queries in an image database is described. The queries are comprised of rectangle, polygon, ellipse, and B-spline shapes. The queries specify which shapes should appear in the target image as well as spatial constraints on the distance between them and their relative position. The retrieval process makes use of an abstraction of the contour of the shape which is invariant against translation, scale, rotation, and starting point, that is based on the use of Fourier descriptors. These abstractions are used in a system to locate logos in an image database. The utility of this approach is illustrated using some sample queries.}, keywords = {abstraction;, analysis;, constraints;, content-based, contour, database, database;, databases;, descriptors;, detection;, edge, Fourier, image, logos;, pictorial, processing;, query, retrieval;, SHAPE, spatial, specification;, theory;, visual}, doi = {10.1109/ICPR.2002.1047991}, author = {Folkers,A. and Samet, Hanan} } @article {12748, title = {Optimal edge-based shape detection}, journal = {Image Processing, IEEE Transactions on}, volume = {11}, year = {2002}, month = {2002/11//}, pages = {1209 - 1227}, abstract = {We propose an approach to accurately detecting two-dimensional (2-D) shapes. The cross section of the shape boundary is modeled as a step function. We first derive a one-dimensional (1-D) optimal step edge operator, which minimizes both the noise power and the mean squared error between the input and the filter output. This operator is found to be the derivative of the double exponential (DODE) function, originally derived by Ben-Arie and Rao (1994). We define an operator for shape detection by extending the DODE filter along the shape{\textquoteright}s boundary contour. The responses are accumulated at the centroid of the operator to estimate the likelihood of the presence of the given shape. This method of detecting a shape is in fact a natural extension of the task of edge detection at the pixel level to the problem of global contour detection. This simple filtering scheme also provides a tool for a systematic analysis of edge-based shape detection. We investigate how the error is propagated by the shape geometry. We have found that, under general assumptions, the operator is locally linear at the peak of the response. We compute the expected shape of the response and derive some of its statistical properties. This enables us to predict both its localization and detection performance and adjust its parameters according to imaging conditions and given performance specifications. Applications to the problem of vehicle detection in aerial images, human facial feature detection, and contour tracking in video are presented.}, keywords = {1D, 2D, aerial, analysis;, boundary, conditions;, contour, cross, detection;, DODE, double, edge, edge-based, error, error;, exponential, extraction;, facial, feature, filter, filter;, Filtering, function;, geometry;, global, human, images;, imaging, localization, mean, methods;, NOISE, operator;, optimal, optimisation;, output;, performance;, pixel;, power;, propagation;, properties;, section;, SHAPE, square, squared, statistical, step, theory;, tracking;, two-dimensional, vehicle, video;}, isbn = {1057-7149}, doi = {10.1109/TIP.2002.800896}, author = {Moon, H. and Chellapa, Rama and Rosenfeld, A.} } @conference {13618, title = {Page classification through logical labelling}, booktitle = {Pattern Recognition, 2002. Proceedings. 16th International Conference on}, volume = {3}, year = {2002}, month = {2002///}, pages = {477 - 480 vol.3 - 477 - 480 vol.3}, abstract = {We propose an integrated approach to page classification and logical labelling. Layout is represented by a fully connected attributed relational graph that is matched to the graph of an unknown document, achieving classification and labelling simultaneously. By incorporating global constraints in an integrated fashion, ambiguity at the zone level can be reduced, providing robustness to noise and variation. Models are automatically trained from sample documents. Experimental results show promise for the classification and labelling of technical article title pages, and supports the idea of a hierarchical model base.}, keywords = {article, attributed, base;, character, classification;, constraints;, document, document;, experimental, global, graph, graph;, hierarchical, image, images;, labelling;, logical, model, noise;, OCR;, optical, page, pages;, processing;, recognition;, relational, results;, technical, theory;, title, unknown}, doi = {10.1109/ICPR.2002.1047980}, author = {Liang,Jian and David Doermann and Ma,M. and Guo,J. K} } @conference {13612, title = {OCR-based rate-distortion analysis of residual coding}, booktitle = {Image Processing, 1997. Proceedings., International Conference on}, volume = {3}, year = {1997}, month = {1997/10//}, pages = {690 -693 vol.3 - 690 -693 vol.3}, abstract = {Symbolic compression of document images provides access to symbols found in document images and exploits the redundancy found within them. Document images are highly structured and contain large numbers of repetitive symbols. We have shown that while symbolically compressing a document image we are able to perform compressed-domain processing. Symbolic compression forms representative prototypes for symbols and encode the image by the location of these prototypes and a residual (the difference between symbol and prototype). We analyze the rate-distortion tradeoff by varying the amount of residual used in compression for both distance- and row-order coding. A measure of distortion is based on the performance of an OCR system on the resulting image. The University of Washington document database images, ground truth, and OCR evaluation software are used for experiments}, keywords = {analysis;redundancy;representative, character, coding;distortion, coding;image, coding;lossy, coding;row-order, coding;symbolic, compression;data, compression;document, compression;lossy, database, distortion, Evaluation, image, images;document, images;experiments;ground, measure;document, OCR, of, performance;University, processing;distance-order, processing;image, prototypes;residual, recognition;rate, representation;optical, representation;progressive, software;OCR, system, theory;, transmission;rate-distortion, truth;image, Washington;compressed-domain}, doi = {10.1109/ICIP.1997.632215}, author = {Kia,O. E and David Doermann} } @conference {14897, title = {Space/time trade-offs for associative memory}, booktitle = {Pattern Recognition, 1996., Proceedings of the 13th International Conference on}, volume = {4}, year = {1996}, month = {1996/08//}, pages = {296 -302 vol.4 - 296 -302 vol.4}, abstract = {In any storage scheme, there is some trade-off between the space used (size of memory) and access time. However, the nature of this trade-off seems to depend on more than just what is being stored-it also depends the types of queries we consider. We justify this claim by considering a particular memory model and contrast recognition (membership queries) with associative recall. We show that the latter task can require exponentially larger memories even when identical information is stored}, keywords = {access, matching;set, memory;associative, nets;pattern, processing;content-addressable, query;memory, recall;membership, scheme;associative, space;set, storage;neural, theory;, theory;storage, time;associative}, doi = {10.1109/ICPR.1996.547434}, author = {GROVE,A. J and Jacobs, David W.} } @conference {15207, title = {Efficient minimum cost matching using quadrangle inequality}, booktitle = {Foundations of Computer Science, 1992. Proceedings., 33rd Annual Symposium on}, year = {1992}, month = {1992/10//}, pages = {583 - 592}, abstract = {The authors present efficient algorithms for finding a minimum cost perfect matching, and for solving the transportation problem in bipartite graphs, G = (Red cup; Blue, Red times; Blue), where |Red| = n, |Blue| = m, n les; m, and the cost function obeys the quadrangle inequality. The first results assume that all the red points and all the blue points lie on a curve that is homeomorphic to either a line or a circle and the cost function is given by the Euclidean distance along the curve. They present a linear time algorithm for the matching problem. They generalize the method to solve the corresponding transportation problem in O((m+n)log(m+n)) time. The next result is an O(n log m) algorithm for minimum cost matching when the cost array is a bitonic Monge array. An example of this is when the red points lie on one straight line and the blue points lie on another straight line (that is not necessarily parallel to the first one). Finally, they provide a weakly polynomial algorithm for the transportation problem in which the associated cost array is a bitonic Monge array}, keywords = {algorithm;, array;, bipartite, bitonic, blue, complexity;, computational, cost, distance;, Euclidean, function;, geometry;, graph, graphs;, inequality;, linear, MATCHING, matching;, minimisation;, minimum, Monge, perfect, points;, polynomial, problem;, quadrangle, red, theory;, TIME, transportation, transportation;, weakly}, doi = {10.1109/SFCS.1992.267793}, author = {Aggarwal,A. and Bar-Noy,A. and Khuller, Samir and Kravets,D. and Schieber,B.} } @article {14930, title = {A new approach to realizing partially symmetric functions}, journal = {Computers, IEEE Transactions on}, volume = {38}, year = {1989}, month = {1989/06//}, pages = {896 - 898}, abstract = {Consideration is given to the class of partially symmetric functions and a method for realizing them is outlined. Each such function can be expressed as a sum of totally symmetric functions such that a circuit can be designed with its complexity dependent on the size of such symmetric cover. The authors compare the sizes of symmetric and sum-of-product covers and show that the symmetric cover will be substantially smaller for this class of functions}, keywords = {class, complexity;logic, cover;symmetric, covers;switching, design;switching, functions;Boolean, functions;complexity;partially, functions;computational, functions;sum-of-product, of, symmetric, theory;, theory;symmetric}, isbn = {0018-9340}, doi = {10.1109/12.24302}, author = {JaJa, Joseph F. and Wu,S.-M.} }