@article {12463, title = {Compressive Video Acquisition, Fusion and Processing}, year = {2010}, month = {2010/12/14/}, abstract = {Modern developments in sensor technology, signal processing, and wireless communications have enabled the conception and deployment of large-scale networked sensing systems spanning numerous collection platforms and varied modalities. These systems have the potential to make intelligent decisions by integrating information from massive amounts of sensor data. Before such benefits can be achieved, significant advances must be made in methods for communicating, fusing, and processing this evergrowing volume of diverse data. In this one-year research project, we aimed to expose the fundamental issues and pave the way for further careful study of compressive approaches to video acquisition, fusion, and processing. In doing so, we developed a theoretical definition of video temporal bandwidth and applied the theory to compressive sampling and reconstruction. We created a new framework for compressive video sensing based on linear dynamical systems, lowering the compressive measurement rate. Finally, we applied our own joint manifold model to a variety of relevant image processing problems, demonstrating the model{\textquoteright}s effectiveness and ability to overcome noise and occlusion obstacles. We also showed how joint manifold models can discover an object{\textquoteright}s trajectory, an important step towards video fusion.}, keywords = {*DATA FUSION, *DETECTORS, *SIGNAL PROCESSING, *VIDEO SIGNALS, ACQUISITION, COLLECTION, COMMUNICATION AND RADIO SYSTEMS, COMPRESSIVE PROPERTIES, COMPRESSIVE SAMPLING, compressive sensing, COMPRESSIVE VIDEO, decision making, DEPLOYMENT, DETECTION, DYNAMICS, IMAGE PROCESSING, Linear systems, Linearity, MANIFOLDS(ENGINES), measurement, MISCELLANEOUS DETECTION AND DETECTORS, MODELS, sampling, THEORY, TRAJECTORIES}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA533703}, author = {Baraniuk,Richard G. and Chellapa, Rama and Wakin,Michael} } @article {18183, title = {Component forensics}, journal = {Signal Processing Magazine, IEEE}, volume = {26}, year = {2009}, month = {2009/03//}, pages = {38 - 48}, abstract = {Visual sensor technologies have experienced tremendous growth in recent decades, and digital devices are becoming ubiquitous. Digital images taken by various imaging devices have been used in a growing number of applications, from military and reconnaissance to medical diagnosis and consumer photography. Consequently, a series of new forensic issues arise amidst such rapid advancement and widespread adoption of imaging technologies. For example, one can readily ask what kinds of hardware and software components as well as their parameters have been employed inside these devices? Given a digital image, which imaging sensor or which brand of sensor was used to acquire the image? How was the image acquired? Was it captured using a digital camera, cell phone camera, image scanner, or was it created artificially using an imageediting software? Has the image undergone any manipulation after capture? Is it authentic, or has it been tampered in any way? Does it contain any hidden information or steganographic data? Many of these forensic questions are related to tracing the origin of the digital image to its creation process. Evidence obtained from such analysis would provide useful forensic information to law enforcement, security, and intelligence agencies. Knowledge of image acquisition techniques can also help answer further forensic questions regarding the nature of additional processing that the image might have undergone after capture.}, keywords = {ACQUISITION, component, data;, forensics;digital, image, of, processing;image, sensor, sensors;security, technique;visual, technology;image}, isbn = {1053-5888}, doi = {10.1109/MSP.2008.931076}, author = {Swaminathan,A. and M. Wu and Liu,K. J.R} } @article {18197, title = {Digital image forensics via intrinsic fingerprints}, journal = {Information Forensics and Security, IEEE Transactions on}, volume = {3}, year = {2008}, month = {2008/03//}, pages = {101 - 117}, abstract = {Digital imaging has experienced tremendous growth in recent decades, and digital camera images have been used in a growing number of applications. With such increasing popularity and the availability of low-cost image editing software, the integrity of digital image content can no longer be taken for granted. This paper introduces a new methodology for the forensic analysis of digital camera images. The proposed method is based on the observation that many processing operations, both inside and outside acquisition devices, leave distinct intrinsic traces on digital images, and these intrinsic fingerprints can be identified and employed to verify the integrity of digital data. The intrinsic fingerprints of the various in-camera processing operations can be estimated through a detailed imaging model and its component analysis. Further processing applied to the camera captured image is modelled as a manipulation filter, for which a blind deconvolution technique is applied to obtain a linear time-invariant approximation and to estimate the intrinsic fingerprints associated with these postcamera operations. The absence of camera-imposed fingerprints from a test image indicates that the test image is not a camera output and is possibly generated by other image production processes. Any change or inconsistencies among the estimated camera-imposed fingerprints, or the presence of new types of fingerprints suggest that the image has undergone some kind of processing after the initial capture, such as tampering or steganographic embedding. Through analysis and extensive experimental studies, this paper demonstrates the effectiveness of the proposed framework for nonintrusive digital image forensics.}, keywords = {ACQUISITION, analysis;intrinsic, approximation;cameras;digital, camera, deconvolution;digital, devices;blind, fingerprints;time, forensics;forensic, identification;image, image, images;digital, invariant, photography;fingerprint, sensors;}, isbn = {1556-6013}, doi = {10.1109/TIFS.2007.916010}, author = {Swaminathan,A. and M. Wu and Liu,K. J.R} } @conference {18224, title = {Image acquisition forensics: Forensic analysis to identify imaging source}, booktitle = {Acoustics, Speech and Signal Processing, 2008. ICASSP 2008. IEEE International Conference on}, year = {2008}, month = {2008/04/31/4}, pages = {1657 - 1660}, abstract = {With widespread availability of digital images and easy-to-use image editing softwares, the origin and integrity of digital images has become a serious concern. This paper introduces the problem of image acquisition forensics and proposes a fusion of a set of signal processing features to identify the source of digital images. Our results show that the devices{\textquoteright} color interpolation coefficients and noise statistics can jointly serve as good forensic features to help accurately trace the origin of the input image to its production process and to differentiate between images produced by cameras, cell phone cameras, scanners, and computer graphics. Further, the proposed features can also be extended to determining the brand and model of the device. Thus, the techniques introduced in this work provide a unified framework for image acquisition forensics.}, keywords = {ACQUISITION, acquisition;image, analysis;, analysis;image, analysis;interpolation;statistical, cameras;color, cell, coefficients;computer, colour, editing, forensics;image, graphics;digital, identification;noise, images;forensic, Interpolation, phone, processing;data, softwares;imaging, source, statistics;scanners;signal}, doi = {10.1109/ICASSP.2008.4517945}, author = {McKay,C. and Swaminathan,A. and Gou,Hongmei and M. Wu} } @article {18257, title = {Nonintrusive component forensics of visual sensors using output images}, journal = {Information Forensics and Security, IEEE Transactions on}, volume = {2}, year = {2007}, month = {2007/03//}, pages = {91 - 106}, abstract = {Rapid technology development and the widespread use of visual sensors have led to a number of new problems related to protecting intellectual property rights, handling patent infringements, authenticating acquisition sources, and identifying content manipulations. This paper introduces nonintrusive component forensics as a new methodology for the forensic analysis of visual sensing information, aiming to identify the algorithms and parameters employed inside various processing modules of a digital device by only using the device output data without breaking the device apart. We propose techniques to estimate the algorithms and parameters employed by important camera components, such as color filter array and color interpolation modules. The estimated interpolation coefficients provide useful features to construct an efficient camera identifier to determine the brand and model from which an image was captured. The results obtained from such component analysis are also useful to examine the similarities between the technologies employed by different camera models to identify potential infringement/licensing and to facilitate studies on technology evolution}, keywords = {ACQUISITION, array;color, authenticating, component, filter, forensics;patent, infringements;visual, Interpolation, manipulations;intellectual, modules;content, property, property;, protection;nonintrusive, rights, sensors;image, sensors;industrial, sources;color}, isbn = {1556-6013}, doi = {10.1109/TIFS.2006.890307}, author = {Swaminathan,A. and M. Wu and Liu,K. J.R} } @conference {13245, title = {Scalable image-based multi-camera visual surveillance system}, booktitle = {Proceedings. IEEE Conference on Advanced Video and Signal Based Surveillance, 2003.}, year = {2003}, month = {2003/07//}, pages = {205 - 212}, abstract = {We describe the design of a scalable and wide coverage visual surveillance system. Scalability (the ability to add and remove cameras easily during system operation with minimal overhead and system degradation) is achieved by utilizing only image-based information for camera control. We show that when a pan-tilt-zoom camera pans and tilts, a given image point moves in a circular and a linear trajectory, respectively. We create a scene model using a plan view of the scene. The scene model makes it easy for us to handle occlusion prediction and schedule video acquisition tasks subject to visibility constraints. We describe a maximum weight matching algorithm to assign cameras to tasks that meet the visibility constraints. The system is illustrated both through simulations and real video from a 6-camera configuration.}, keywords = {ACQUISITION, algorithm;, camera;, constraints;, feature, hidden, image-based, MATCHING, maximum, multi-camera, occlusion, pan-tilt-zoom, PLAN, prediction;, processing;, removal;, scalable, scheduling;, signal, Surveillance, surveillance;, system;, task, video, view;, visibility, visual, weight}, doi = {10.1109/AVSS.2003.1217923}, author = {Lim,Ser-Nam and Davis, Larry S. and Elgammal,A.} } @article {12133, title = {Experience in implementing a learning software organization}, journal = {Software, IEEE}, volume = {19}, year = {2002}, month = {2002/06//may}, pages = {46 - 49}, abstract = {In an effort to improve software development and acquisition processes and explicitly reuse knowledge from previous software projects, DaimlerChrysler created a software experience center (SEC). The authors report on challenges the company faced in creating the SEC}, keywords = {ACQUISITION, center;software, centres;project, DaimlerChrysler;explicit, development, experience, improvement;, improvement;automobile, industry;computer, knowledge, management;software, organization;previous, process, processes;software, projects;software, reuse;learning, software}, isbn = {0740-7459}, doi = {10.1109/MS.2002.1003453}, author = {Schneider,K. and von Hunnius,J.-P. and Basili, Victor R.} } @article {13790, title = {Improved Word-Level Alignment: Injecting Knowledge about MT Divergences}, year = {2002}, month = {2002/02/14/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {Word-level alignments of bilingual text (bitexts) are not an integral part of statistical machine translation models, but also useful for lexical acquisition, treebank construction. and part-of-speech tagging. The frequent occurrence of divergences, structural differences between languages, presents a great challenge to the alignment task. We resolve some of the most prevalent divergence cases by using syntactic parse information to transform the sentence structure of one language to bear a closer resemblance to that of the other language. In this paper, we show that common divergence types can be found in multiple language pairs (in particular, we focus on English-Spanish and English-Arabic) and systematically identified. We describe our techniques for modifying English parse trees to form resulting sentences that share more similarity with the sentences in the other languages; finally, we present an empirical analysis comparing the complexities of performing word-level alignments with an without divergence handling. Our results suggest that divergence-handling can improve word-level alignment.}, keywords = {*LEXICOGRAPHY, *MACHINE TRANSLATION, *STATISTICAL ANALYSIS, *WORDS(LANGUAGE), ACQUISITION, ALIGNMENT, EXPERIMENTAL DATA, LANGUAGE, linguistics, MATHEMATICAL MODELS, STATISTICS AND PROBABILITY, TREES}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA458774}, author = {Dorr, Bonnie J and Pearl,Lisa and Hwa,Rebecca and Habash,Nizar} } @article {16720, title = {Word-level Alignment for Multilingual Resource Acquisition}, year = {2002}, month = {2002/04//}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {We present a simple, one-pass word alignment algorithm for parallel text. Our algorithm utilizes synchronous parsing and takes advantage of existing syntactic annotations. In our experiments the performance of this model is comparable to more complicated iterative methods. We discuss the challenges and potential benefits of using the model to train syntactic parsers for new languages.}, keywords = {*LEARNING MACHINES, *MULTILINGUAL RESOURCES, *WORDS(LANGUAGE), ACQUISITION, algorithms, ALIGNMENT, BENEFITS, ITERATIONS, LANGUAGE, linguistics}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA458782}, author = {Lopez,Adam and Nossal,Michael and Hwa,Rebecca and Resnik, Philip} }