@article {14317, title = {Disaggregated End-Use Energy Sensing for the Smart Grid}, journal = {IEEE Pervasive Computing}, volume = {10}, year = {2011}, month = {2011/03//Jan}, pages = {28 - 39}, abstract = {This article surveys existing and emerging disaggregation techniques for energy-consumption data and highlights signal features that might be used to sense disaggregated data in an easily installed and cost-effective manner.}, keywords = {Calibration, disaggregated end-use energy sensing, Disaggregated energy sensing, disaggregation data techniques, Electricity, Energy consumption, Energy efficiency, energy-consumption data, Gas, Home appliances, Sensors, Smart grid, Smart grids, smart power grids, Sustainability, Water}, isbn = {1536-1268}, doi = {10.1109/MPRV.2010.74}, author = {Jon Froehlich and Larson,E. and Gupta,S. and Cohn,G. and Reynolds,M. and Patel,S.} } @article {16431, title = {Estimating Functional Agent-Based Models: An Application to Bid Shading in Online Markets Format}, journal = {Proceedings of the Genetic and Evolutionary Computation Conference (GECCO 2011)}, year = {2011}, month = {2011///}, abstract = {Bid shading is a common strategy in online auctions to avoid the "winner{\textquoteright}s curse". While almost all bidders shade their bids, at least to some degree, it is impossible to infer the degree and volume of shaded bids directly from observed bidding data. In fact, most bidding data only allows us to observe the resulting price process, i.e. whether prices increase fast (due to little shading) or whether they slow down (when all bidders shade their bids). In this work, we propose an agent-based model that simulates bidders with different bidding strategies and their interaction with one another. We calibrate that model (and hence estimate properties about the propensity and degree of shaded bids) by matching the emerging simulated price process with that of the observed auction data using genetic algorithms. From a statistical point of view, this is challenging because we match functional draws from simulated and real price processes. We propose several competing fitness functions and explore how the choice alters the resulting ABM calibration. We apply our model to the context of eBay auctions for digital cameras and show that a balanced fitness function yields the best results.}, keywords = {Agent-based modeling, business, Calibration, Genetic algorithms, internet auctions, simulation}, url = {http://papers.ssrn.com/sol3/papers.cfm?abstract_id=1846639}, author = {Guo,Wei and Jank,Wolfgang and Rand, William} } @conference {12453, title = {Recent advances in age and height estimation from still images and video}, booktitle = {2011 IEEE International Conference on Automatic Face \& Gesture Recognition and Workshops (FG 2011)}, year = {2011}, month = {2011/03/21/25}, pages = {91 - 96}, publisher = {IEEE}, organization = {IEEE}, abstract = {Soft-biometrics such as gender, age, race, etc have been found to be useful characterizations that enable fast pre-filtering and organization of data for biometric applications. In this paper, we focus on two useful soft-biometrics - age and height. We discuss their utility and the factors involved in their estimation from images and videos. In this context, we highlight the role that geometric constraints such as multiview-geometry, and shape-space geometry play. Then, we present methods based on these geometric constraints for age and height-estimation. These methods provide a principled means by fusing image-formation models, multi-view geometric constraints, and robust statistical methods for inference.}, keywords = {age estimation, biometrics (access control), Calibration, Estimation, Geometry, height estimation, HUMANS, image fusion, image-formation model fusion, Legged locomotion, multiview-geometry, Robustness, SHAPE, shape-space geometry, soft-biometrics, statistical analysis, statistical methods, video signal processing}, isbn = {978-1-4244-9140-7}, doi = {10.1109/FG.2011.5771367}, author = {Chellapa, Rama and Turaga,P.} } @conference {16417, title = {When does simulated data match real data?}, booktitle = {Proceedings of the 13th annual conference companion on Genetic and evolutionary computation}, series = {GECCO {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {231 - 232}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Agent-based models can replicate real-world patterns, but finding parameters that achieve the best match can be difficult. To validate a model, a real-world dataset is often divided into a training set (to calibrate the parameters) and a test set (to validate the calibrated model). The difference between the training and test data and the simulated data is determined using an error measure. In the context of evolutionary computation techniques, the error measure also serves as a fitness function, and thus affects evolutionary search dynamics. We survey the effect of five different error measures on both a toy problem and a real world problem of matching a model to empirical online news consumption behavior. We use each error measure separately for calibration on the training dataset, and then examine the results of all five error measures on both the training and testing datasets. We show that certain error measures sometimes serve as better fitness functions than others, and in fact using one error measure may result in better calibration (on a different measure) than using the different measure directly. For the toy problem, the Pearson{\textquoteright}s correlation measure dominated all other measures, but no single error measure was Pareto dominant for the real world problem.}, keywords = {Agent-based modeling, business, Calibration, Genetic algorithms, information search, network analysis}, isbn = {978-1-4503-0690-4}, doi = {10.1145/2001858.2001988}, url = {http://doi.acm.org/10.1145/2001858.2001988}, author = {Stonedahl,Forrest and Anderson,David and Rand, William} } @article {12475, title = {Robust Height Estimation of Moving Objects From Uncalibrated Videos}, journal = {IEEE Transactions on Image Processing}, volume = {19}, year = {2010}, month = {2010/08//}, pages = {2221 - 2232}, abstract = {This paper presents an approach for video metrology. From videos acquired by an uncalibrated stationary camera, we first recover the vanishing line and the vertical point of the scene based upon tracking moving objects that primarily lie on a ground plane. Using geometric properties of moving objects, a probabilistic model is constructed for simultaneously grouping trajectories and estimating vanishing points. Then we apply a single view mensuration algorithm to each of the frames to obtain height measurements. We finally fuse the multiframe measurements using the least median of squares (LMedS) as a robust cost function and the Robbins-Monro stochastic approximation (RMSA) technique. This method enables less human supervision, more flexibility and improved robustness. From the uncertainty analysis, we conclude that the method with auto-calibration is robust in practice. Results are shown based upon realistic tracking data from a variety of scenes.}, keywords = {algorithms, Biometry, Calibration, EM algorithm, geometric properties, Geometry, Image Enhancement, Image Interpretation, Computer-Assisted, Imaging, Three-Dimensional, least median of squares, least squares approximations, MOTION, motion information, multiframe measurements, Pattern Recognition, Automated, Reproducibility of results, Robbins-Monro stochastic approximation, robust height estimation, Sensitivity and Specificity, Signal Processing, Computer-Assisted, stochastic approximation, Subtraction Technique, tracking data, uncalibrated stationary camera, uncalibrated videos, uncertainty analysis, vanishing point, video metrology, Video Recording, video signal processing}, isbn = {1057-7149}, doi = {10.1109/TIP.2010.2046368}, author = {Jie Shao and Zhou,S. K and Chellapa, Rama} } @conference {12167, title = {Using uncertainty as a model selection and comparison criterion}, booktitle = {Proceedings of the 5th International Conference on Predictor Models in Software Engineering}, series = {PROMISE {\textquoteright}09}, year = {2009}, month = {2009///}, pages = {18:1{\textendash}18:9 - 18:1{\textendash}18:9}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Over the last 25+ years, software estimation research has been searching for the best model for estimating variables of interest (e.g., cost, defects, and fault proneness). This research effort has not lead to a common agreement. One problem is that, they have been using accuracy as the basis for selection and comparison. But accuracy is not invariant; it depends on the test sample, the error measure, and the chosen error statistics (e.g., MMRE, PRED, Mean and Standard Deviation of error samples). Ideally, we would like an invariant criterion. In this paper, we show that uncertainty can be used as an invariant criterion to figure out which estimation model should be preferred over others. The majority of this work is empirically based, applying Bayesian prediction intervals to some COCOMO model variations with respect to a publicly available cost estimation data set coming from the PROMISE repository.}, keywords = {accuracy, Bayesian prediction intervals, Calibration, cost estimation, cost model, model evaluation, model selection, prediction interval, Uncertainty}, isbn = {978-1-60558-634-2}, doi = {10.1145/1540438.1540464}, url = {http://doi.acm.org/10.1145/1540438.1540464}, author = {Sarcia{\textquoteright},Salvatore Alessandro and Basili, Victor R. and Cantone,Giovanni} } @conference {11975, title = {Robust Contrast Invariant Stereo Correspondence}, booktitle = {Proceedings of the 2005 IEEE International Conference on Robotics and Automation, 2005. ICRA 2005}, year = {2005}, month = {2005/04/18/22}, pages = {819 - 824}, publisher = {IEEE}, organization = {IEEE}, abstract = {A stereo pair of cameras attached to a robot will inevitably yield images with different contrast. Even if we assume that the camera hardware is identical, due to slightly different points of view, the amount of light entering the two cameras is also different, causing dynamically adjusted internal parameters such as aperture, exposure and gain to be different. Due to the difficulty of obtaining and maintaining precise intensity or color calibration between the two cameras, contrast invariance becomes an extremely desirable property of stereo correspondence algorithms. The problem of achieving point correspondence between a stereo pair of images is often addressed by using the intensity or color differences as a local matching metric, which is sensitive to contrast changes. We present an algorithm for contrast invariant stereo matching which relies on multiple spatial frequency channels for local matching. A fast global framework uses the local matching to compute the correspondences and find the occlusions. We demonstrate that the use of multiple frequency channels allows the algorithm to yield good results even in the presence of significant amounts of noise.}, keywords = {Apertures, Calibration, CAMERAS, Computer science, contrast invariance, diffusion, Educational institutions, Frequency, gabor, Hardware, occlusions, Robot vision systems, Robotics and automation, Robustness, stereo}, isbn = {0-7803-8914-X}, doi = {10.1109/ROBOT.2005.1570218}, author = {Ogale, A. S and Aloimonos, J.} } @article {14263, title = {The Argus eye: a new imaging system designed to facilitate robotic tasks of motion}, journal = {IEEE Robotics \& Automation Magazine}, volume = {11}, year = {2004}, month = {2004/12//}, pages = {31 - 38}, abstract = {This article describes an imaging system that has been designed to facilitate robotic tasks of motion. The system consists of a number of cameras in a network, arranged so that they sample different parts of the visual sphere. This geometric configuration has provable advantages compared to small field of view cameras for the estimation of the system{\textquoteright}s own motion and, consequently, the estimation of shape models from the individual cameras. The reason is, inherent ambiguities of confusion between translation and rotation disappear. Pairs of cameras may also be arranged in multiple stereo configurations, which provide additional advantages for segmentation. Algorithms for the calibration of the system and the three-dimensional (3-D) motion estimation are provided.}, keywords = {Argus eye, Calibration, CAMERAS, computational geometry, Design automation, Eyes, image formation, imaging system, Information geometry, Layout, Motion estimation, multiple stereo configurations, panoramic robots, robot vision, Robot vision systems, robotic motion tasks, Robotics and automation, SHAPE, shape model estimation, system calibration}, isbn = {1070-9932}, doi = {10.1109/MRA.2004.1371606}, author = {Baker, P. and Ogale, A. S and Ferm{\"u}ller, Cornelia} } @conference {14235, title = {New eyes for robotics}, booktitle = {2003 IEEE/RSJ International Conference on Intelligent Robots and Systems, 2003. (IROS 2003). Proceedings}, volume = {1}, year = {2003}, month = {2003/10/27/31}, pages = {1018- 1023 vol.1 - 1018- 1023 vol.1}, publisher = {IEEE}, organization = {IEEE}, abstract = {This paper describes an imaging system that has been designed to facilitate robotic tasks of motion. The system consists of a number of cameras in a network arranged so that they sample different parts of the visual sphere. This geometric configuration has provable advantages compared to small field of view cameras for the estimation of the system{\textquoteright}s own motion and consequently the estimation of shape models from the individual cameras. The reason is that inherent ambiguities of confusion between translation and rotation disappear. Pairs of cameras may also be arranged in multiple stereo configurations which provide additional advantages for segmentation. Algorithms for the calibration of the system and the 3D motion estimation are provided.}, keywords = {3D motion estimation, Argus eye, array signal processing, Birds, Calibration, CAMERAS, Control systems, Eyes, geometric configuration, imaging, imaging system, Layout, Motion estimation, multiple stereo configurations, Robot kinematics, robot vision, Robot vision systems, ROBOTICS, Robotics and automation, SHAPE, shape models}, isbn = {0-7803-7860-1}, doi = {10.1109/IROS.2003.1250761}, author = {Baker, P. and Ogale, A. S and Ferm{\"u}ller, Cornelia and Aloimonos, J.} } @conference {12728, title = {Towards a view invariant gait recognition algorithm}, booktitle = {Proceedings. IEEE Conference on Advanced Video and Signal Based Surveillance, 2003.}, year = {2003}, month = {2003/07//}, pages = {143 - 150}, abstract = {Human gait is a spatio-temporal phenomenon and typifies the motion characteristics of an individual. The gait of a person is easily recognizable when extracted from a side-view of the person. Accordingly, gait-recognition algorithms work best when presented with images where the person walks parallel to the camera image plane. However, it is not realistic to expect this assumption to be valid in most real-life scenarios. Hence, it is important to develop methods whereby the side-view can be generated from any other arbitrary view in a simple, yet accurate, manner. This is the main theme of the paper. We show that if the person is far enough from the camera, it is possible to synthesize a side view (referred to as canonical view) from any other arbitrary view using a single camera. Two methods are proposed for doing this: (i) using the perspective projection model; (ii) using the optical flow based structure from motion equations. A simple camera calibration scheme for this method is also proposed. Examples of synthesized views are presented. Preliminary testing with gait recognition algorithms gives encouraging results. A by-product of this method is a simple algorithm for synthesizing novel views of a planar scene.}, keywords = {(access, algorithm;, analysis;, Biometrics, biometrics;, Calibration, calibration;, camera, canonical, control);, equations;, flow;, Gait, gait;, human, image, invariant, model;, MOTION, optical, perspective, phenomenon;, projection, recognition, scheme;, sequences;, spatio-temporal, view, view;}, doi = {10.1109/AVSS.2003.1217914}, author = {Kale, A. and Chowdhury, A.K.R. and Chellapa, Rama} } @conference {13343, title = {Using state modules for adaptive query processing}, booktitle = {19th International Conference on Data Engineering, 2003. Proceedings}, year = {2003}, month = {2003/03/05/8}, pages = {353 - 364}, publisher = {IEEE}, organization = {IEEE}, abstract = {We present a query architecture in which join operators are decomposed into their constituent data structures (State Modules, or SteMs), and dataflow among these SteMs is managed adaptively by an eddy routing operator [R. Avnur et al., (2000)]. Breaking the encapsulation of joins serves two purposes. First, it allows the eddy to observe multiple physical operations embedded in a join algorithm, allowing for better calibration and control of these operations. Second, the SteM on a relation serves as a shared materialization point, enabling multiple competing access methods to share results, which can be leveraged by multiple competing join algorithms. Our architecture extends prior work significantly, allowing continuously adaptive decisions for most major aspects of traditional query optimization: choice of access methods and join algorithms, ordering of operators, and choice of a query spanning tree. SteMs introduce significant routing flexibility to the eddy, enabling more opportunities for adaptation, but also introducing the possibility of incorrect query results. We present constraints on eddy routing through SteMs that ensure correctness while preserving a great deal of flexibility. We also demonstrate the benefits of our architecture via experiments in the Telegraph dataflow system. We show that even a simple routing policy allows significant flexibility in adaptation, including novel effects like automatic "hybridization " of multiple algorithms for a single join.}, keywords = {adaptive query processing, Bandwidth, Calibration, data encapsulation, data structure, Data structures, Databases, Dictionaries, eddy routing, eddy routing operator, Encapsulation, join operator, multiple algorithm automatic hybridization, multiple competing join algorithm, query architecture, Query processing, query spanning tree, Routing, routing policy, Runtime, shared materialization point, State Module, SteMs, Telegraph dataflow system, Telegraphy, Tree data structures}, isbn = {0-7803-7665-X}, doi = {10.1109/ICDE.2003.1260805}, author = {Vijayshankar Raman and Deshpande, Amol and Hellerstein,J. M} } @conference {14188, title = {A spherical eye from multiple cameras (makes better models of the world)}, booktitle = {Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2001. CVPR 2001}, volume = {1}, year = {2001}, month = {2001///}, pages = {I-576- I-583 vol.1 - I-576- I-583 vol.1}, publisher = {IEEE}, organization = {IEEE}, abstract = {The paper describes an imaging system that has been designed specifically for the purpose of recovering egomotion and structure from video. The system consists of six cameras in a network arranged so that they sample different parts of the visual sphere. This geometric configuration has provable advantages compared to small field of view cameras for the estimation of the system{\textquoteright}s own motion and consequently the estimation of shape models from the individual cameras. The reason is that inherent ambiguities of confusion between translation and rotation disappear. We provide algorithms for the calibration of the system and 3D motion estimation. The calibration is based on a new geometric constraint that relates the images of lines parallel in space to the rotation between the cameras. The 3D motion estimation uses a constraint relating structure directly to image gradients.}, keywords = {3D motion estimation, Calibration, camera network, CAMERAS, Computer vision, egomotion recovery, geometric configuration, geometric constraint, image gradients, image sampling, imaging system, Laboratories, Layout, Motion estimation, multiple cameras, Pixel, Robot vision systems, SHAPE, shape models, Space technology, spherical eye, system calibration, video, video cameras, video signal processing, visual sphere sampling}, isbn = {0-7695-1272-0}, doi = {10.1109/CVPR.2001.990525}, author = {Baker, P. and Ferm{\"u}ller, Cornelia and Aloimonos, J. and Pless, R.} } @conference {11993, title = {Self-calibration from image derivatives}, booktitle = {Sixth International Conference on Computer Vision, 1998}, year = {1998}, month = {1998/01/04/7}, pages = {83 - 89}, publisher = {IEEE}, organization = {IEEE}, abstract = {This study investigates the problem of estimating the calibration parameters from image motion fields induced by a rigidly moving camera with unknown calibration parameters, where the image formation is modeled with a linear pinhole-camera model. The equations obtained show the flow to be clearly separated into a component due to the translation and the calibration parameters and a component due to the rotation and the calibration parameters. A set of parameters encoding the latter component are linearly related to the flow, and from these parameters the calibration can be determined. However, as for discrete motion, in the general case it is not possible, to decouple image measurements from two frames only into their translational and rotational component. Geometrically, the ambiguity takes the form of a part of the rotational component being parallel to the translational component, and thus the scene can be reconstructed only up to a projective transformation. In general, for a full calibration at least four successive image frames are necessary with the 3D-rotation changing between the measurements. The geometric analysis gives rise to a direct self-calibration method that avoids computation of optical flow or point correspondences and uses only normal flow measurements. In this technique the direction of translation is estimated employing in a novel way smoothness constraints. Then the calibration parameters are estimated from the rotational components of several flow fields using Levenberg-Marquardt parameter estimation, iterative in the calibration parameters only. The technique proposed does not require calibration objects in the scene or special camera motions and it also avoids the computation of exact correspondence. This makes it suitable for the calibration of active vision systems which have to acquire knowledge about their intrinsic parameters while they perform other tasks, or as a tool for analyzing image sequences in large video databases}, keywords = {3D-rotation, active vision, Calibration, CAMERAS, discrete motion, Encoding, Equations, image derivatives, image formation, image measurements, Image motion analysis, image motion fields, Image reconstruction, Image sequences, large video databases, Layout, Levenberg-Marquardt parameter estimation, linear pinhole-camera model, Motion estimation, Motion measurement, Optical computing, parameter estimation, projective transformation, rigidly moving camera, self-calibration, smoothness constraints, unknown calibration parameters}, isbn = {81-7319-221-9}, doi = {10.1109/ICCV.1998.710704}, author = {Brodsky, T. and Ferm{\"u}ller, Cornelia and Aloimonos, J.} }