@article {20548, title = {Rapid, quantitative therapeutic screening for Alzheimer{\textquoteright}s enzymes enabled by optimal signal transduction with transistors}, journal = {The Analyst}, volume = {145}, year = {2020}, month = {Feb-04-2021}, pages = {2925 - 2936}, issn = {0003-2654}, doi = {10.1039/C9AN01804B}, url = {http://xlink.rsc.org/?DOI=C9AN01804Bhttp://pubs.rsc.org/en/content/articlepdf/2020/AN/C9AN01804Bhttp://pubs.rsc.org/en/content/articlepdf/2020/AN/C9AN01804B}, author = {Le, Son T. and Morris, Michelle A. and Cardone, Antonio and Guros, Nicholas B. and Klauda, Jeffery B. and Sperling, Brent A. and Richter, Curt A. and Pant, Harish C. and Balijepalli, Arvind} } @article {20327, title = {Recognition of achievement {\textendash} priorities and process}, journal = {Materials Today}, volume = {19}, year = {2016}, month = {Jan-12-2016}, pages = {547 - 549}, issn = {13697021}, doi = {10.1016/j.mattod.2016.08.001}, url = {https://linkinghub.elsevier.com/retrieve/pii/S1369702116301055}, author = {Madsen, Lynnette D. and Rita R Colwell} } @article {20326, title = {Reduced Susceptibility to Extended-Spectrum β-Lactams in Vibrio cholerae Isolated in Bangladesh}, journal = {Frontiers in Public Health}, year = {2016}, month = {Jun-10-2017}, doi = {10.3389/fpubh.2016.00231}, url = {http://journal.frontiersin.org/article/10.3389/fpubh.2016.00231/full}, author = {Ceccarelli, Daniela and Alam, Munirul and Huq, Anwar and Rita R Colwell} } @article {20344, title = {Rapid Proliferation of Vibrio parahaemolyticus, Vibrio vulnificus, and Vibrio cholerae during Freshwater Flash Floods in French Mediterranean Coastal Lagoons}, journal = {Applied and Environmental Microbiology}, year = {2015}, month = {Jan-11-2015}, pages = {7600 - 7609}, abstract = {Vibrio parahaemolyticus, Vibrio vulnificus, and Vibrio cholerae of the non-O1/non-O139 serotype are present in coastal lagoons of southern France. In these Mediterranean regions, the rivers have long low-flow periods followed by short-duration or flash floods during and after heavy intense rainstorms, particularly at the end of the summer and in autumn. These floods bring large volumes of freshwater into the lagoons, reducing their salinity. Water temperatures recorded during sampling (15 to 24{\textdegree}C) were favorable for the presence and multiplication of vibrios. In autumn 2011, before heavy rainfalls and flash floods, salinities ranged from 31.4 to 36.1{\textperthousand} and concentrations of V. parahaemolyticus, V. vulnificus, and V. cholerae varied from 0 to 1.5 {\texttimes} 103 most probable number (MPN)/liter, 0.7 to 2.1 {\texttimes} 103 MPN/liter, and 0 to 93 MPN/liter, respectively. Following heavy rainstorms that generated severe flash flooding and heavy discharge of freshwater, salinity decreased, reaching 2.2 to 16.4{\textperthousand} within 15 days, depending on the site, with a concomitant increase in Vibrio concentration to ca. 104 MPN/liter. The highest concentrations were reached with salinities between 10 and 20{\textperthousand} for V. parahaemolyticus, 10 and 15{\textperthousand} for V. vulnificus, and 5 and 12{\textperthousand} for V. cholerae. Thus, an abrupt decrease in salinity caused by heavy rainfall and major flooding favored growth of human-pathogenic Vibrio spp. and their proliferation in the Languedocian lagoons. Based on these results, it is recommended that temperature and salinity monitoring be done to predict the presence of these Vibrio spp. in shellfish-harvesting areas of the lagoons.}, issn = {0099-2240}, doi = {10.1128/AEM.01848-15}, url = {http://aem.asm.org/lookup/doi/10.1128/AEM.01848-15}, author = {Esteves, Kevin and Hervio-Heath, Dominique and Mosser, Thomas and Rodier, Claire and Tournoud, Marie-George and Jumas-Bilak, Estelle and Rita R Colwell and Monfort, Patrick}, editor = {Wommack, K. E.} } @conference {19308, title = {Real-time No-Reference Image Quality Assessment based on Filter Learning}, booktitle = {International Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2013}, author = {Ye,Peng and Kumar,Jayant and Kang,Le and David Doermann} } @conference {19318, title = {A Random Forest System Combination Approach for Error Detection in Digital Dictionaries}, booktitle = {Innovative hybrid approaches to the processing of textual data, EACL 2012 Workshop}, year = {2012}, month = {04/2012}, pages = {78-86}, abstract = {When digitizing a print bilingual dictionary, whether via optical character recognition or manual entry, it is inevitable that errors are introduced into the electronic version that is created. We investigate automating the process of detecting errors in an XML representation of a digitized print dictionary using a hybrid approach that combines rule-based, feature-based, and language model-based methods. We investigate combining methods and show that using random forests is a promising approach. We find that in isolation, unsupervised methods rival the performance of supervised methods. Random forests typically require training data so we investigate how we can apply random forests to combine individual base methods that are themselves unsupervised without requiring large amounts of training data. Experiments reveal empirically that a relatively small amount of data is sufficient and can potentially be further reduced through specific selection criteria. }, author = {Bloodgood,Michael and Ye,Peng and Rodrigues,Paul and Zajic, David and David Doermann} } @article {15067, title = {Recognizing Human Actions by Learning and Matching Shape-Motion Prototype Trees}, journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on}, volume = {34}, year = {2012}, month = {2012/03//}, pages = {533 - 547}, abstract = {A shape-motion prototype-based approach is introduced for action recognition. The approach represents an action as a sequence of prototypes for efficient and flexible action matching in long video sequences. During training, an action prototype tree is learned in a joint shape and motion space via hierarchical K-means clustering and each training sequence is represented as a labeled prototype sequence; then a look-up table of prototype-to-prototype distances is generated. During testing, based on a joint probability model of the actor location and action prototype, the actor is tracked while a frame-to-prototype correspondence is established by maximizing the joint probability, which is efficiently performed by searching the learned prototype tree; then actions are recognized using dynamic prototype sequence matching. Distance measures used for sequence matching are rapidly obtained by look-up table indexing, which is an order of magnitude faster than brute-force computation of frame-to-frame distances. Our approach enables robust action matching in challenging situations (such as moving cameras, dynamic backgrounds) and allows automatic alignment of action sequences. Experimental results demonstrate that our approach achieves recognition rates of 92.86 percent on a large gesture data set (with dynamic backgrounds), 100 percent on the Weizmann action data set, 95.77 percent on the KTH action data set, 88 percent on the UCF sports data set, and 87.27 percent on the CMU action data set.}, keywords = {action prototype, actor location, brute-force computation, CMU action data set, distance measures, dynamic backgrounds, dynamic prototype sequence matching, flexible action matching, frame-to-frame distances, frame-to-prototype correspondence, hierarchical k-means clustering, human action recognition, Image matching, image recognition, Image sequences, joint probability model, joint shape, KTH action data set, large gesture data set, learning, learning (artificial intelligence), look-up table indexing, motion space, moving cameras, pattern clustering, prototype-to-prototype distances, shape-motion prototype-based approach, table lookup, training sequence, UCF sports data set, Video sequences, video signal processing, Weizmann action data set}, isbn = {0162-8828}, doi = {10.1109/TPAMI.2011.147}, author = {Zhuolin Jiang and Zhe Lin and Davis, Larry S.} } @article {19458, title = {Refactoring Network Infrastructure to Improve Manageability: A Case Study of Home Networking}, journal = {SIGCOMM Comput. Commun. Rev.}, volume = {42}, year = {2012}, month = {2012/06//}, pages = {54 - 61}, abstract = {Managing a home network is challenging because the underlying infrastructure is so complex. Existing interfaces either hide or expose the network{\textquoteright}s underlying complexity, but in both cases, the information that is shown does not necessarily allow a user to complete desired tasks. Recent advances in software defined networking, however, permit a redesign of the underlying network and protocols, potentially allowing designers to move complexity further from the user and, in some cases, eliminating it entirely. In this paper, we explore whether the choices of what to make visible to the user in the design of today{\textquoteright}s home network infrastructure, performance, and policies make sense. We also examine whether new capabilities for refactoring the network infrastructure - changing the underlying system without compromising existing functionality - should cause us to revisit some of these choices. Our work represents a case study of how co-designing an interface and its underlying infrastructure could ultimately improve interfaces for that infrastructure.}, keywords = {home networking, management, Monitoring, software defined networking}, isbn = {0146-4833}, url = {http://doi.acm.org/10.1145/2317307.2317318}, author = {Marshini Chetty and Feamster, Nick} } @article {15163, title = {Review of efficient secure two-party protocols: techniques and constructions by Carmit Hazay and Yehuda Lindell}, journal = {SIGACT News}, volume = {43}, year = {2012}, month = {2012/03//}, pages = {21 - 23}, isbn = {0163-5700}, doi = {10.1145/2160649.2160656}, url = {http://doi.acm.org/10.1145/2160649.2160656}, author = {Katz, Jonathan} } @inbook {18597, title = {Re-wiring Activity of Malicious Networks}, booktitle = {Passive and Active MeasurementPassive and Active Measurement}, series = {Lecture Notes in Computer Science}, volume = {7192}, year = {2012}, month = {2012///}, pages = {116 - 125}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {This paper studies the AS-level re-wiring dynamics (changes in the connectivity) of malicious networks. Anecdotal evidence suggests that some malicious ASes that are primarily involved in nefarious activities on the Internet, were sequentially de-peered by providers before their final cut-off (as occurred in the well-publicized cases of Atrivo/Intercage). We present the first systematic study of the re-wiring dynamics of malicious ASes. We tracked the ASes that were listed by Hostexploit over the last two years and compared their AS-level re-wiring dynamics with non-reported ASes. Using a publicly available dataset of Customer-Provider (CP) relations in the Internet{\textquoteright}s AS graph, we studied how interconnection between autonomous systems evolves, both for ASes that provide connectivity for attackers and ASes that were not reported as malicious. We find that malicious networks are more aggressive both in forming links with providers and changing their upstream connectivity than other ASes. Our results indicate that the re-wiring dynamics of the networks that host attacks are stable over time, despite the evolving nature of the attacks themselves, which suggests that existing defense mechanisms could benefit from incorporating these features.}, keywords = {Computer science}, isbn = {978-3-642-28536-3}, url = {http://www.springerlink.com/content/95vn833728404877/abstract/}, author = {Konte,Maria and Feamster, Nick}, editor = {Taft,Nina and Ricciato,Fabio} } @article {20385, title = {Role of GbpA protein, an important virulence-related colonization factor, for Vibrio cholerae{\textquoteright}s survival in the aquatic environment}, journal = {Environmental Microbiology Reports}, year = {2012}, month = {Jan-08-2012}, pages = {439 - 445}, abstract = {Vibrio cholerae N-acetyl glucosamine-binding protein A (GbpA) is a chitin binding protein and a virulence factor involved in the colonization of human intestine. We investigated the distribution and genetic variations of gbpA in 488 V. cholerae strains of environmental and clinical origin, belonging to different serogroups and biotypes. We found that the gene is consistently present and highly conserved including an environmental V. cholerae-related strain of ancestral origin. The gene was also consistently expressed in a number of representative V. cholerae strains cultured in laboratory aquatic microcosms under conditions simulating those found in temperate marine environments. Functional analysis carried out on V. cholerae O1 El Tor N16961 showed that GbpA is not involved in adhesion to inorganic surfaces but promotes interaction with environmental biotic substrates (plankton and bivalve hepatopancreas cells) representing known marine reservoir or host for the bacterium. It is suggested that the ability of GbpA to colonize human intestinal cells most probably originated from its primary function in the aquatic environment.}, doi = {10.1111/j.1758-2229.2012.00356.x}, url = {http://doi.wiley.com/10.1111/j.1758-2229.2012.00356.x}, author = {Stauder, Monica and Huq, Anwar and Pezzati, Elisabetta and Grim, Christopher J. and Ramoino, Paola and Pane, Luigi and Rita R Colwell and Pruzzo, Carla and Vezzulli, Luigi} } @article {12855, title = {Role of Shrimp Chitin in the Ecology of Toxigenic Vibrio cholerae and Cholera Transmission}, journal = {Frontiers in Microbiology}, volume = {2:260}, year = {2012}, month = {01/2012}, abstract = {Seasonal plankton blooms correlate with occurrence of cholera in Bangladesh, although the mechanism of how dormant Vibrio cholerae, enduring interepidemic period in biofilms and plankton, initiates seasonal cholera is not fully understood. In this study, laboratory microcosms prepared with estuarine Mathbaria water (MW) samples supported active growth of toxigenic V. cholerae O1 up to 7 weeks as opposed to 6 months when microcosms were supplemented with dehydrated shrimp chitin chips (CC) as the single source of nutrient. Bacterial counting and detection of wbe and ctxA genes were done employing culture, direct fluorescent antibody (DFA) assay, and multiplex-polymerase chain reaction methods. In MW microcosm, the aqueous phase became clear as the non-culturable cells settled, whereas the aqueous phase of the MW{\textendash}CC microcosm became turbid from bacterial growth stimulated by chitin. Bacterial chitin degradation and biofilm formation proceeded from an initial steady state to a gradually declining bacterial culturable count. V. cholerae within the microenvironments of chitin and chitin-associated biofilms remained metabolically active even in a high acidic environment without losing either viability or virulence. It is concluded that the abundance of chitin that occurs during blooms plays an important role in the aquatic life cycle of V. cholerae and, ultimately, in the seasonal transmission of cholera.}, isbn = {1664-302X}, doi = {10.3389/fmicb.2011.00260}, author = {Nahar,Shamsun and Sultana,Marzia and Naser,M. Niamul and Nair,Gopinath B. and Watanabe,Haruo and Ohnishi,Makoto and Yamamoto,Shouji and Endtz,Hubert and Cravioto,Alejandro and Sack,R. Bradley and Hasan,Nur A. and Sadique,Abdus and Huq,Anwar and Rita R Colwell and Alam,Munirul} } @article {18712, title = {Rpn1 and Rpn2 coordinate ubiquitin processing factors at the proteasome}, journal = {Journal of Biological ChemistryJ. Biol. Chem.}, year = {2012}, month = {2012/02/08/}, abstract = {Substrates tagged with (poly)ubiquitin for degradation can be targeted directly to the 26S proteasome where they are proteolysed. Independently, ubiquitin-conjugates may also be delivered by bivalent shuttles. The majority of shuttles attach to the proteasome through a ubiquitin-like domain (UBL) while anchoring cargo at a C-terminal polyubiquitin-binding domain(s). We found that two shuttles of this class, Rad23 and Dsk2, dock at two different receptors embedded within a single subunit of the 19S proteasome regulatory particle (RP), Rpn1. Their association/dissociation constants and affinities for Rpn1 are similar. In contrast, another UBL-containing protein, the deubiquitinase Ubp6, is also anchored by Rpn1, yet dissociates slower, thus behaving as a sometimes proteasome subunit distinct from transiently-associated shuttles. Two neighboring subunits, Rpn10 and Rpn13, show a marked preference for polyubiquitin over UBLs. Rpn10 attaches to the central solenoid portion of Rpn1 although this association is stabilized by the presence of a third subunit, Rpn2. Rpn13 binds directly to the C-terminal portion of Rpn2. These intrinsic polyubiquitin receptors may compete with substrate shuttles for their polyubiquitin-conjugates, thereby aiding release of the emptied shuttles. By binding multiple ubiquitin-processing factors simultaneously, Rpn1 is uniquely suited to coordinate substrate recruitment, deubiquitination, and movement towards the catalytic core. The broad range of affinities for ubiquitin, ubiquitin-like, and non-ubiquitin signals by adjacent yet non-overlapping sites all within the Base illustrates a hub of activity that coordinates the intricate relay of substrates within the proteasome, and consequently influences substrate residency time and commitment to degradation.}, keywords = {deubiquitination, Proteasome, solenoid, Surface plasmon resonance (SPR), ubiquitin, Ubiquitin-dependent protease}, isbn = {0021-9258, 1083-351X}, doi = {10.1074/jbc.M111.316323}, url = {http://www.jbc.org/content/early/2012/02/08/jbc.M111.316323}, author = {Rosenzweig,Rina and Bronner,Vered and Zhang,Daoning and Fushman, David and Glickman,Michael H.} } @article {13847, title = {Rapid understanding of scientific paper collections: integrating statistics, text analysis, and visualization}, journal = {University of Maryland, Human-Computer Interaction Lab Tech Report HCIL-2011}, year = {2011}, month = {2011///}, abstract = {Keeping up with rapidly growing research fields, especially when there aremultiple interdisciplinary sources, requires substantial effort for researchers, program managers, or venture capital investors. Current theories and tools are directed at finding a paper or website, not gaining an understanding of the key papers, authors, controversies, and hypotheses. This report presents an effort to integrate statistics, text analytics, and visualization in a multiple coordinated window environment that supports exploration. Our prototype system, Action Science Explorer (ASE), provides an environment for demon- strating principles of coordination and conducting iterative usability tests of them with interested and knowledgeable users. We developed an under- standing of the value of reference management, statistics, citation context extraction, natural language summarization for single and multiple docu- ments, filters to interactively select key papers, and network visualization to see citation patterns and identify clusters. The three-phase usability study guided our revisions to ASE and led us to improve the testing methods. }, author = {Dunne,C. and Shneiderman, Ben and Gove,R. and Klavans,J. and Dorr, Bonnie J} } @article {17340, title = {Realizing the value of social media requires innovative computing research}, journal = {Communications of the ACM}, volume = {54}, year = {2011}, month = {2011/09//}, pages = {34 - 37}, abstract = {How social media are expanding traditional research and development topics for computer and information scientists.}, isbn = {0001-0782}, doi = {10.1145/1995376.1995389}, url = {http://doi.acm.org/10.1145/1995376.1995389}, author = {Shneiderman, Ben and Preece,Jennifer and Pirolli,Peter} } @article {18783, title = {Real-time dynamics simulation of unmanned sea surface vehicle for virtual environments}, journal = {Journal of Computing and Information Science in Engineering}, volume = {11}, year = {2011}, month = {2011///}, pages = {031005 - 031005}, author = {Thakur,A. and Gupta,S.K.} } @conference {18891, title = {Real-Time Planning for Covering an Initially-Unknown Spatial Environment}, year = {2011}, month = {2011///}, abstract = {We consider the problem of planning, on the fly, a path whereby a robotic vehicle will cover every point in an ini- tially unknown spatial environment. We describe four strate- gies (Iterated WaveFront, Greedy-Scan, Delayed Greedy- Scan and Closest-First Scan) for generating cost-effective coverage plans in real time for unknown environments. We give theorems showing the correctness of our planning strate- gies. Our experiments demonstrate that some of these strate- gies work significantly better than others, and that the best ones work very well; e.g., in environments having an average of 64,000 locations for the robot to cover, the best strategy re- turned plans with less than 6\% redundant coverage, and took only an average of 0.1 milliseconds per action.}, url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS11/paper/download/2566/2992}, author = {Shivashankar,V. and Jain, R. and Kuter,U. and Nau, Dana S.} } @article {18793, title = {Recent advances and future challenges in automated manufacturing planning}, journal = {Journal of Computing and Information Science in Engineering}, volume = {11}, year = {2011}, month = {2011///}, pages = {021006 - 021006}, author = {Bourne,D. and Corney,J. and Gupta,S.K.} } @conference {12453, title = {Recent advances in age and height estimation from still images and video}, booktitle = {2011 IEEE International Conference on Automatic Face \& Gesture Recognition and Workshops (FG 2011)}, year = {2011}, month = {2011/03/21/25}, pages = {91 - 96}, publisher = {IEEE}, organization = {IEEE}, abstract = {Soft-biometrics such as gender, age, race, etc have been found to be useful characterizations that enable fast pre-filtering and organization of data for biometric applications. In this paper, we focus on two useful soft-biometrics - age and height. We discuss their utility and the factors involved in their estimation from images and videos. In this context, we highlight the role that geometric constraints such as multiview-geometry, and shape-space geometry play. Then, we present methods based on these geometric constraints for age and height-estimation. These methods provide a principled means by fusing image-formation models, multi-view geometric constraints, and robust statistical methods for inference.}, keywords = {age estimation, biometrics (access control), Calibration, Estimation, Geometry, height estimation, HUMANS, image fusion, image-formation model fusion, Legged locomotion, multiview-geometry, Robustness, SHAPE, shape-space geometry, soft-biometrics, statistical analysis, statistical methods, video signal processing}, isbn = {978-1-4244-9140-7}, doi = {10.1109/FG.2011.5771367}, author = {Chellapa, Rama and Turaga,P.} } @conference {16428, title = {Recommendations in social media for brand monitoring}, booktitle = {Proceedings of the fifth ACM conference on Recommender systems}, series = {RecSys {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {345 - 348}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We present a recommendation system for social media that draws upon monitoring and prediction methods. We use historical posts on some focal topic or historical links to a focal blog channel to recommend a set of authors to follow. Such a system would be useful for brand managers interested in monitoring conversations about their products. Our recommendations are based on a prediction system that trains a ranking Support Vector Machine (RSVM) using multiple features including the content of a post, similarity between posts, links between posts and/or blog channels, and links to external websites. We solve two problems, Future Author Prediction (FAP) and Future Link Prediction (FLP), and apply the prediction outcome to make recommendations. Using an extensive experimental evaluation on a blog dataset, we demonstrate the quality and value of our recommendations.}, keywords = {blog, brand monitoring, recommendation, social media}, isbn = {978-1-4503-0683-6}, doi = {10.1145/2043932.2043999}, url = {http://doi.acm.org/10.1145/2043932.2043999}, author = {Wu,Shanchan and Rand, William and Raschid, Louiqa} } @article {16038, title = {Reducing Missed Laboratory Results: Defining Temporal Responsibility, Generating User Interfaces for Test Process Tracking, and Retrospective Analyses to Identify Problems}, journal = {AMIA Annual Symposium ProceedingsAMIA Annu Symp Proc}, volume = {2011}, year = {2011}, month = {2011///}, pages = {1382 - 1391}, abstract = {Researchers have conducted numerous case studies reporting the details on how laboratory test results of patients were missed by the ordering medical providers. Given the importance of timely test results in an outpatient setting, there is limited discussion of electronic versions of test result management tools to help clinicians and medical staff with this complex process. This paper presents three ideas to reduce missed results with a system that facilitates tracking laboratory tests from order to completion as well as during follow-up: (1) define a workflow management model that clarifies responsible agents and associated time frame, (2) generate a user interface for tracking that could eventually be integrated into current electronic health record (EHR) systems, (3) help identify common problems in past orders through retrospective analyses.}, isbn = {1942-597X}, author = {Tarkan,Sureyya and Plaisant, Catherine and Shneiderman, Ben and Hettinger,A. Zachary} } @conference {17338, title = {Re-engineering health care with information technology: the role of computer-human interaction}, booktitle = {PART 2 {\textemdash}{\textemdash}{\textemdash}{\textendash} Proceedings of the 2011 annual conference extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {451 - 454}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {There is critical, nation-wide need to improve health care and its cost. Health information technology has great promise that is yet to be realized. In this panel four noted experts will discuss key issues that should drive health IT, and the challenges for the CHI community to play a leading role.}, keywords = {clinical workflow, electronic medical records, health information technology visualization, healthcare informatics, participatory design, usability standards \&$\#$38; evaluation}, doi = {10.1145/1979482.1979490}, url = {http://doi.acm.org/10.1145/1979482.1979490}, author = {Butler,Keith and Payne,Thomas and Shneiderman, Ben and Brennan,Patricia and Zhang,Jiajie} } @article {14639, title = {Regulation of Lung Endoderm Progenitor Cell Behavior by miR302/367}, journal = {DevelopmentDevelopment}, volume = {138}, year = {2011}, month = {2011/04/01/}, pages = {1235 - 1245}, abstract = {The temporal and spatial control of organ-specific endoderm progenitor development is poorly understood. miRNAs affect cell function by regulating programmatic changes in protein expression levels. We show that the miR302/367 cluster is a target of the transcription factor Gata6 in mouse lung endoderm and regulates multiple aspects of early lung endoderm progenitor development. miR302/367 is expressed at early stages of lung development, but its levels decline rapidly as development proceeds. Gain- and loss-of-function studies show that altering miR302/367 expression disrupts the balance of lung endoderm progenitor proliferation and differentiation, as well as apical-basal polarity. Increased miR302/367 expression results in the formation of an undifferentiated multi-layered lung endoderm, whereas loss of miR302/367 activity results in decreased proliferation and enhanced lung endoderm differentiation. miR302/367 coordinates the balance between proliferation and differentiation, in part, through direct regulation of Rbl2 and Cdkn1a, whereas apical-basal polarity is controlled by regulation of Tiam1 and Lis1. Thus, miR302/367 directs lung endoderm development by coordinating multiple aspects of progenitor cell behavior, including proliferation, differentiation and apical-basal polarity.}, keywords = {Lung, MicroRNA, mouse, Progenitor}, isbn = {0950-1991, 1477-9129}, doi = {10.1242/dev.061762}, url = {http://dev.biologists.org/content/138/7/1235}, author = {Tian,Ying and Zhang,Yuzhen and Hurd,Laura and Hannenhalli, Sridhar and Liu,Feiyan and Lu,Min Min and Morrisey,Edward E.} } @article {12434, title = {Remote identification of faces: Problems, prospects, and progress}, journal = {Pattern Recognition Letters}, year = {2011}, month = {2011/12//}, abstract = {Face recognition in unconstrained acquisition conditions is one of the most challenging problems that has been actively researched in recent years. It is well known that many state-of-the-art still face recognition algorithms perform well, when constrained (frontal, well illuminated, high-resolution, sharp, and full) face images are acquired. However, their performance degrades significantly when the test images contain variations that are not present in the training images. In this paper, we highlight some of the key issues in remote face recognition. We define the remote face recognition as one where faces are several tens of meters (10{\textendash}250\&$\#$xa0;m) from the cameras. We then describe a remote face database which has been acquired in an unconstrained outdoor maritime environment. Recognition performance of a subset of existing still image-based face recognition algorithms is evaluated on the remote face data set. Further, we define the remote re-identification problem as matching a subject at one location with candidate sets acquired at a different location and over time in remote conditions. We provide preliminary experimental results on remote re-identification. It is demonstrated that in addition to applying a good classification algorithm, finding features that are robust to variations mentioned above and developing statistical models which can account for these variations are very important for remote face recognition.}, keywords = {Blur, illumination, low-resolution, pose variation, Re-identification, Remote face recognition}, isbn = {0167-8655}, doi = {10.1016/j.patrec.2011.11.020}, url = {http://www.sciencedirect.com/science/article/pii/S0167865511004107}, author = {Chellapa, Rama and Ni,Jie and Patel, Vishal M.} } @article {16033, title = {Research Directions in Data Wrangling: Visualizations and Transformations for Usable and Credible Data}, journal = {Information VisualizationInformation Visualization}, volume = {10}, year = {2011}, month = {2011/10/01/}, pages = {271 - 288}, abstract = {In spite of advances in technologies for working with data, analysts still spend an inordinate amount of time diagnosing data quality issues and manipulating data into a usable form. This process of {\textquoteleft}data wrangling{\textquoteright} often constitutes the most tedious and time-consuming aspect of analysis. Though data cleaning and integration arelongstanding issues in the database community, relatively little research has explored how interactive visualization can advance the state of the art. In this article, we review the challenges and opportunities associated with addressing data quality issues. We argue that analysts might more effectively wrangle data through new interactive systems that integrate data verification, transformation, and visualization. We identify a number of outstanding research questions, including how appropriate visual encodings can facilitate apprehension of missing data, discrepant values, and uncertainty; how interactive visualizations might facilitate data transform specification; and how recorded provenance and social interaction might enable wider reuse, verification, and modification of data transformations.}, keywords = {data cleaning, data quality, data transformation, Uncertainty, Visualization}, isbn = {1473-8716, 1473-8724}, doi = {10.1177/1473871611415994}, url = {http://ivi.sagepub.com/content/10/4/271}, author = {Kandel,Sean and Heer,Jeffrey and Plaisant, Catherine and Kennedy,Jessie and Van Ham,Frank and Riche,Nathalie Henry and Weaver,Chris and Lee,Bongshin and Brodbeck,Dominique and Buono,Paolo} } @article {15993, title = {Robotson Crusoe{\textendash}or{\textendash}What Is Common Sense?}, journal = {Logical Formalizations of Commonsense Reasoning {\textemdash} Papers from the AAAI 2011 Spring Symposium}, year = {2011}, month = {2011///}, abstract = {I will present a perspective on human-level commonsense behavior (HLCSB) that differs from commonsense reasoning (CSR) as the latter is often characterized in AI. I will argue that HLCSB is not far beyond the reach of current technology, and that it also provides solutions to some of the problems that plague CSR, most notably the brittleness problem. A key is the judicious use of metacognitive monitoring and control, especially in the area of automated learning.}, author = {Perlis, Don} } @article {17920, title = {A robust and rotationally invariant local surface descriptor with applications to non-local mesh processing}, journal = {Graphical Models}, volume = {73}, year = {2011}, month = {2011/09//}, pages = {231 - 242}, abstract = {In recent years, we have witnessed a striking increase in research concerning how to describe a meshed surface. These descriptors are commonly used to encode mesh properties or guide mesh processing, not to augment existing computations by replication. In this work, we first define a robust surface descriptor based on a local height field representation, and present a transformation via the extraction of Zernike moments. Unlike previous work, our local surface descriptor is innately rotationally invariant. Second, equipped with this novel descriptor, we present SAMPLE {\textendash} similarity augmented mesh processing using local exemplars {\textendash} a method which uses feature neighbourhoods to propagate mesh processing done in one part of the mesh, the local exemplar, to many others. Finally, we show that SAMPLE can be used in a number of applications, such as detail transfer and parameterization.}, keywords = {Local descriptors, Non-local mesh processing, shape analysis, Similarity processing}, isbn = {1524-0703}, doi = {10.1016/j.gmod.2011.05.002}, url = {http://www.sciencedirect.com/science/article/pii/S1524070311000166}, author = {Maximo, A. and Patro,R. and Varshney, Amitabh and Farias, R.} } @article {12861, title = {Role of Zooplankton Diversity in Vibrio Cholerae Population Dynamics and in the Incidence of Cholera in the Bangladesh Sundarbans}, journal = {Applied and Environmental Microbiology}, volume = {77}, year = {2011}, month = {09/2011}, pages = {6125 - 6132}, abstract = {Vibrio cholerae, a bacterium autochthonous to the aquatic environment, is the causative agent of cholera, a severe watery, life-threatening diarrheal disease occurring predominantly in developing countries. V. cholerae, including both serogroups O1 and O139, is found in association with crustacean zooplankton, mainly copepods, and notably in ponds, rivers, and estuarine systems globally. The incidence of cholera and occurrence of pathogenic V. cholerae strains with zooplankton were studied in two areas of Bangladesh: Bakerganj and Mathbaria. Chitinous zooplankton communities of several bodies of water were analyzed in order to understand the interaction of the zooplankton population composition with the population dynamics of pathogenic V. cholerae and incidence of cholera. Two dominant zooplankton groups were found to be consistently associated with detection of V. cholerae and/or occurrence of cholera cases, namely, rotifers and cladocerans, in addition to copepods. Local differences indicate there are subtle ecological factors that can influence interactions between V. cholerae, its plankton hosts, and the incidence of cholera.}, isbn = {0099-2240, 1098-5336}, doi = {10.1128/AEM.01472-10}, url = {http://aem.asm.org/content/77/17/6125}, author = {De Magny,Guillaume Constantin and Mozumder,Pronob K. and Grim,Christopher J. and Hasan,Nur A. and Naser,M. Niamul and Alam,Munirul and Sack,R. Bradley and Huq,Anwar and Rita R Colwell} } @article {15178, title = {Round-optimal password-based authenticated key exchange}, journal = {Theory of Cryptography}, year = {2011}, month = {2011///}, pages = {293 - 310}, abstract = {We show a general framework for constructing password-based authenticated key exchange protocols with optimal round complexity {\textemdash} one message per party, sent simultaneously {\textemdash} in the standard model, assuming a common reference string. When our framework is instantiated using bilinear-map cryptosystems, the resulting protocol is also (reasonably) efficient. Somewhat surprisingly, our framework can be adapted to give protocols in the standard model that are universally composable while still using only one (simultaneous) round.}, doi = {10.1007/978-3-642-19571-6_18}, author = {Katz, Jonathan and Vaikuntanathan,V.} } @article {13950, title = {Random sampling for estimating the performance of fast summations}, journal = {Technical Reports of the Computer Science Department}, year = {2010}, month = {2010/10/18/}, abstract = {Summation of functions of N source points evaluated at M target pointsoccurs commonly in many applications. To scale these approaches for large datasets, many fast algorithms have been proposed. In this technical report, we propose a Chernoff bound based efficient approach to test the performance of a fast summation algorithms providing a probabilistic accuracy. We further validate and use our approach in separate comparisons. }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/10976}, author = {Srinivasan,Balaji Vasan and Duraiswami, Ramani} } @article {13398, title = {Ranking continuous probabilistic datasets}, journal = {Proc. VLDB Endow.}, volume = {3}, year = {2010}, month = {2010/09//}, pages = {638 - 649}, abstract = {Ranking is a fundamental operation in data analysis and decision support, and plays an even more crucial role if the dataset being explored exhibits uncertainty. This has led to much work in understanding how to rank uncertain datasets in recent years. In this paper, we address the problem of ranking when the tuple scores are uncertain, and the uncertainty is captured using continuous probability distributions (e.g. Gaussian distributions). We present a comprehensive solution to compute the values of a parameterized ranking function (PRF) [18] for arbitrary continuous probability distributions (and thus rank the uncertain dataset); PRF can be used to simulate or approximate many other ranking functions proposed in prior work. We develop exact polynomial time algorithms for some continuous probability distribution classes, and efficient approximation schemes with provable guarantees for arbitrary probability distributions. Our algorithms can also be used for exact or approximate evaluation of k-nearest neighbor queries over uncertain objects, whose positions are modeled using continuous probability distributions. Our experimental evaluation over several datasets illustrates the effectiveness of our approach at efficiently ranking uncertain datasets with continuous attribute uncertainty.}, isbn = {2150-8097}, url = {http://dl.acm.org/citation.cfm?id=1920841.1920923}, author = {Li,Jian and Deshpande, Amol} } @conference {15302, title = {Ranking under temporal constraints}, booktitle = {Proceedings of the 19th ACM international conference on Information and knowledge management}, series = {CIKM {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {79 - 88}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {This paper introduces the notion of temporally constrained ranked retrieval, which, given a query and a time constraint, produces the best possible ranked list within the specified time limit. Naturally, more time should translate into better results, but the ranking algorithm should always produce some results. This property is desirable from a number of perspectives: to cope with diverse users and information needs, as well as to better manage system load and variance in query execution times. We propose two temporally constrained ranking algorithms based on a class of probabilistic prediction models that can naturally incorporate efficiency constraints: one that makes independent feature selection decisions, and the other that makes joint feature selection decisions. Experiments on three different test collections show that both ranking algorithms are able to satisfy imposed time constraints, although the joint model outperforms the independent model in being able to deliver more effective results, especially under tight time constraints, due to its ability to capture feature dependencies.}, keywords = {efficiency, Learning to rank, Linear Models}, isbn = {978-1-4503-0099-5}, doi = {10.1145/1871437.1871452}, url = {http://doi.acm.org/10.1145/1871437.1871452}, author = {Wang,Lidan and Metzler,Donald and Jimmy Lin} } @conference {19291, title = {Rapid prototyping for digital signal processing systems using Parameterized Synchronous Dataflow graphs}, booktitle = {2010 21st IEEE International Symposium on Rapid System Prototyping (RSP)}, year = {2010}, month = {2010}, pages = {1 - 7}, abstract = {Parameterized Synchronous Dataflow (PSDF) has been used previously for abstract scheduling and as a model for architecting embedded software and FPGA implementations. PSDF has been shown to be attractive for these purposes due to its support for flexible dynamic reconfiguration, and efficient quasi-static scheduling. To apply PSDF techniques more deeply into the design flow, support for comprehensive functional simulation and efficient hardware mapping is important. By building on the DIF (Dataflow Interchange Format), which is a design language and associated software package for developing and experimenting with dataflow-based design techniques for signal processing systems, we have developed a tool for functional simulation of PSDF specifications. This simulation tool allows designers to model applications in PSDF and simulate their functionality, including use of the dynamic parameter reconfiguration capabilities offered by PSDF. Based on this simulation tool, we also present a systematic design methodology for applying PSDF to the design and implementation of digital signal processing systems, with emphasis on FPGA-based systems for signal processing. We demonstrate capabilities for rapid and accurate prototyping offered by our proposed design methodology, along with its novel support for PSDF-based FPGA system implementation.}, keywords = {abstract scheduling, Computational modeling, Computer architecture, data flow graphs, dataflow based design, dataflow interchange format, design flow, design language, Digital signal processing, digital signal processing systems, dynamic parameter reconfiguration, Dynamic scheduling, efficient hardware mapping, efficient quasistatic scheduling, Embedded software, embedded systems, Field programmable gate arrays, flexible dynamic reconfiguration, FPGA based systems, FPGA implementations, functional simulation, Hardware, parameterized synchronous dataflow graphs, rapid prototyping, Schedules, scheduling, semantics, simulation tool, software package, systematic design methodology}, author = {Wu, Hsiang-Huang and Kee, Hojin and Sane, N. and Plishker,W. and Bhattacharyya, Shuvra S.} } @article {13399, title = {Read-once functions and query evaluation in probabilistic databases}, journal = {Proc. VLDB Endow.}, volume = {3}, year = {2010}, month = {2010/09//}, pages = {1068 - 1079}, abstract = {Probabilistic databases hold promise of being a viable means for large-scale uncertainty management, increasingly needed in a number of real world applications domains. However, query evaluation in probabilistic databases remains a computational challenge. Prior work on efficient exact query evaluation in probabilistic databases has largely concentrated on query-centric formulations (e.g., safe plans, hierarchical queries), in that, they only consider characteristics of the query and not the data in the database. It is easy to construct examples where a supposedly hard query run on an appropriate database gives rise to a tractable query evaluation problem. In this paper, we develop efficient query evaluation techniques that leverage characteristics of both the query and the data in the database. We focus on tuple-independent databases where the query evaluation problem is equivalent to computing marginal probabilities of Boolean formulas associated with the result tuples. This latter task is easy if the Boolean formulas can be factorized into a form that has every variable appearing at most once (called read-once). However, a naive approach that directly uses previously developed Boolean formula factorization algorithms is inefficient, because those algorithms require the input formulas to be in the disjunctive normal form (DNF). We instead develop novel, more efficient factorization algorithms that directly construct the read-once expression for a result tuple Boolean formula (if one exists), for a large subclass of queries (specifically, conjunctive queries without self-joins). We empirically demonstrate that (1) our proposed techniques are orders of magnitude faster than generic inference algorithms for queries where the result Boolean formulas can be factorized into read-once expressions, and (2) for the special case of hierarchical queries, they rival the efficiency of prior techniques specifically designed to handle such queries.}, isbn = {2150-8097}, url = {http://dl.acm.org/citation.cfm?id=1920841.1920975}, author = {Sen,Prithviraj and Deshpande, Amol and Getoor, Lise} } @article {15667, title = {Realistic Compression of Kinetic Sensor Data}, volume = {CS-TR-4959}, year = {2010}, month = {2010/06/06/}, institution = {Department of Computer Science, University of Maryland, College Park}, abstract = {We introduce a realistic analysis for a framework for storing andprocessing kinetic data observed by sensor networks. The massive data sets generated by these networks motivates a significant need for compression. We are interested in the kinetic data generated by a finite set of objects moving through space. Our previously introduced framework and accompanying compression algorithm assumed a given set of sensors, each of which continuously observes these moving objects in its surrounding region. The model relies purely on sensor observations; it allows points to move freely and requires no advance notification of motion plans. Here, we extend the initial theoretical analysis of this framework and compression scheme to a more realistic setting. We extend the current understanding of empirical entropy to introduce definitions for joint empirical entropy, conditional empirical entropy, and empirical independence. We also introduce a notion of limited independence between the outputs of the sensors in the system. We show that, even with this notion of limited independence and in both the statistical and empirical settings, the previously introduced compression algorithm achieves an encoding size on the order of the optimal. }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu//handle/1903/10114}, author = {Friedler,Sorelle A. and Mount, Dave} } @conference {12473, title = {Recognizing offensive strategies from football videos}, booktitle = {Image Processing (ICIP), 2010 17th IEEE International Conference on}, year = {2010}, month = {2010/09//}, pages = {4585 - 4588}, abstract = {We address the problem of recognizing offensive play strategies from American football play videos. Specifically, we propose a probabilistic model which describes the generative process of an observed football play and takes into account practical issues in real football videos, such as difficulty in identifying offensive players, view changes, and tracking errors. In particular, we exploit the geometric properties of nonlinear spaces of involved variables and design statistical models on these manifolds. Then recognition is performed via {\textquoteright}analysis-by-synthesis{\textquoteright} technique. Experiments on a newly established dataset of American football videos demonstrate the effectiveness of the approach.}, keywords = {American, analysis;video, changes;image, errors;variables, football, identification;tracking, models;geometric, models;view, play, players, processing;, properties;nonlinear, recognition;offensive, recognition;statistical, signal, spaces;offensive, statistical, strategies, technique;design, videos;analysis-by-synthesis}, doi = {10.1109/ICIP.2010.5652192}, author = {Ruonan Li and Chellapa, Rama} } @inbook {14638, title = {Regulating the Regulators: Modulators of Transcription Factor Activity}, booktitle = {Computational Biology of Transcription Factor BindingComputational Biology of Transcription Factor Binding}, series = {Methods in Molecular Biology}, volume = {674}, year = {2010}, month = {2010///}, pages = {297 - 312}, publisher = {Humana Press}, organization = {Humana Press}, abstract = {Gene transcription is largely regulated by DNA-binding transcription factors ( TFs ). However, the TF activity itself is modulated via, among other things, post-translational modifications ( PTMs ) by specific modification enzymes in response to cellular stimuli. TF-PTMs thus serve as {\textquotedblleft}molecular switchboards{\textquotedblright} that map upstream signaling events to the downstream transcriptional events. An important long-term goal is to obtain a genome-wide map of {\textquotedblleft}regulatory triplets{\textquotedblright} consisting of a TF, target gene, and a modulator gene that specifically modulates the regulation of the target gene by the TF. A variety of genome-wide data sets can be exploited by computational methods to obtain a rough map of regulatory triplets, which can guide directed experiments. However, a prerequisite to developing such computational tools is a systematic catalog of known instances of regulatory triplets. We first describe PTM-Switchboard, a recent database that stores triplets of genes such that the ability of one gene (the TF) to regulate a target gene is dependent on one or more PTMs catalyzed by a third gene, the modifying enzyme. We also review current computational approaches to infer regulatory triplets from genome-wide data sets and conclude with a discussion of potential future research. PTM-Switchboard is accessible at http://cagr.pcbi.upenn.edu/PTMswitchboard /}, isbn = {978-1-60761-854-6}, url = {http://dx.doi.org/10.1007/978-1-60761-854-6_19}, author = {Everett,Logan and Hansen,Matthew and Hannenhalli, Sridhar}, editor = {Ladunga,Istvan} } @conference {15493, title = {Repairing GUI Test Suites Using a Genetic Algorithm}, booktitle = {Software Testing, Verification and Validation (ICST), 2010 Third International Conference on}, year = {2010}, month = {2010/04//}, pages = {245 - 254}, abstract = {Recent advances in automated functional testing of Graphical User Interfaces (GUIs) rely on deriving graph models that approximate all possible sequences of events that may be executed on the GUI, and then use the graphs to generate test cases (event sequences) that achieve a specified coverage goal. However, because these models are only approximations of the actual event flows, the generated test cases may suffer from problems of infeasibility, i.e., some events may not be available for execution causing the test case to terminate prematurely. In this paper we develop a method to automatically repair GUI test suites, generating new test cases that are feasible. We use a genetic algorithm to evolve new test cases that increase our test suite{\textquoteright}s coverage while avoiding infeasible sequences. We experiment with this algorithm on a set of synthetic programs containing different types of constraints and for test sequences of varying lengths. Our results suggest that we can generate new test cases to cover most of the feasible coverage and that the genetic algorithm outperforms a random algorithm trying to achieve the same goal in almost all cases.}, keywords = {automated functional testing, genetic algorithm, Genetic algorithms, graph model, graphical user interface, Graphical user interfaces, GUI test suite, program testing, random algorithm, synthetic program, test case}, doi = {10.1109/ICST.2010.39}, author = {Huang,Si and Cohen,M. B and Memon, Atif M.} } @article {14373, title = {REPORTS AI Theory and Practice: A Discussion on Hard Challenges and Opportunities Ahead}, journal = {AI Magazine}, volume = {31}, year = {2010}, month = {2010///}, pages = {87 - 87}, author = {Horvitz,E. and Getoor, Lise and Guestrin,C. and Hendler,J. and Kautz,H. and Konstan,J. and Subramanian,D. and Wellman,M.} } @conference {19593, title = {Resolving and Exploiting the k-CFA Paradox: Illuminating Functional vs. Object-oriented Program Analysis}, booktitle = {PLDI {\textquoteright}10 Proceedings of the 2010 ACM SIGPLAN Conference on Programming Language Design and Implementation}, series = {PLDI {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {305 - 315}, publisher = {ACM}, organization = {ACM}, abstract = {Low-level program analysis is a fundamental problem, taking the shape of "flow analysis" in functional languages and "points-to" analysis in imperative and object-oriented languages. Despite the similarities, the vocabulary and results in the two communities remain largely distinct, with limited cross-understanding. One of the few links is Shivers{\textquoteright}s k-CFA work, which has advanced the concept of "context-sensitive analysis" and is widely known in both communities. Recent results indicate that the relationship between the functional and object-oriented incarnations of k-CFA is not as well understood as thought. Van Horn and Mairson proved k-CFA for k >= 1 to be EXPTIME-complete; hence, no polynomial-time algorithm can exist. Yet, there are several polynomial-time formulations of context-sensitive points-to analyses in object-oriented languages. Thus, it seems that functional k-CFA may actually be a profoundly different analysis from object-oriented k-CFA. We resolve this paradox by showing that the exact same specification of k-CFA is polynomial-time for object-oriented languages yet exponential-time for functional ones: objects and closures are subtly different, in a way that interacts crucially with context-sensitivity and complexity. This illumination leads to an immediate payoff: by projecting the object-oriented treatment of objects onto closures, we derive a polynomial-time hierarchy of context-sensitive CFAs for functional programs.}, keywords = {control-flow analysis, functional, k-cfa, m-cfa, object-oriented, pointer analysis, static analysis}, isbn = {978-1-4503-0019-3}, url = {http://doi.acm.org/10.1145/1806596.1806631}, author = {Might, Matthew and Smaragdakis, Yannis and David Van Horn} } @conference {17984, title = {Resource-Aware Compiler Prefetching for Many-Cores}, booktitle = {Parallel and Distributed Computing (ISPDC), 2010 Ninth International Symposium on}, year = {2010}, month = {2010/07//}, pages = {133 - 140}, abstract = {Super-scalar, out-of-order processors that can have tens of read and write requests in the execution window place significant demands on Memory Level Parallelism (MLP). Multi-and many-cores with shared parallel caches further increase MLP demand. Current cache hierarchies however have been unable to keep up with this trend, with modern designs allowing only 4-16 concurrent cache misses. This disconnect is exacerbated by recent highly parallel architectures (e.g. GPUs) where power and area per-core budget favor lighter cores with less resources. Support for hardware and software prefetch increase MLP pressure since these techniques overlap multiple memory requests with existing computation. In this paper, we propose and evaluate a novel Resource-Aware Prefetching (RAP) compiler algorithm that is aware of the number of simultaneous prefetches supported, and optimized for the same. We show that in situations where not enough resources are available to issue prefetch instructions for all references in a loop, it is more beneficial to decrease the prefetch distance and prefetch for as many references as possible, rather than use a fixed prefetched distance and skip prefetching for some references, as in current approaches. We implemented our algorithm in a GCC-derived compiler and evaluated its performance using an emerging fine-grained many-core architecture. Our results show that the RAP algorithm outperforms a well-known loop prefetching algorithm by up to 40.15\% and the state-of-the art GCC implementation by up to 34.79\%. Moreover, we compare the RAP algorithm with a simple hardware prefetching mechanism, and show improvements of up to 24.61\%.}, keywords = {algorithm;memory, architecture;hardware-software, architectures;parallel, architectures;resource, aware, caches;super-scalar, compiler, compiler;Multicore, compilers;parallel, GCC-derived, level, management;, many-core, memories;storage, out-of-order, Parallel, parallelism;parallel, prefetch;loop, prefetching, prefetching;shared, processor;fine-grained, processors;multiprocessing, systems;optimising}, doi = {10.1109/ISPDC.2010.16}, author = {Caragea,G.C. and Tzannes,A. and Keceli,F. and Barua,R. and Vishkin, Uzi} } @article {18711, title = {Reversible Post-Translational Carboxylation Modulates the Enzymatic Activity of N-Acetyl-l-ornithine Transcarbamylase}, journal = {Biochemistry}, volume = {49}, year = {2010}, month = {2010///}, pages = {6887 - 6895}, abstract = {N-Acetyl-l-ornithine transcarbamylase (AOTCase), rather than ornithine transcarbamylase (OTCase), is the essential carbamylase enzyme in the arginine biosynthesis of several plant and human pathogens. The specificity of this unique enzyme provides a potential target for controlling the spread of these pathogens. Recently, several crystal structures of AOTCase from Xanthomonas campestris (xc) have been determined. In these structures, an unexplained electron density at the tip of the Lys302 side chain was observed. Using 13C NMR spectroscopy, we show herein that Lys302 is post-translationally carboxylated. The structure of wild-type AOTCase in a complex with the bisubstrate analogue Nδ-(phosphonoacetyl)-Nα-acetyl-l-ornithine (PALAO) indicates that the carboxyl group on Lys302 forms a strong hydrogen bonding network with surrounding active site residues, Lys252, Ser253, His293, and Glu92 from the adjacent subunit either directly or via a water molecule. Furthermore, the carboxyl group is involved in binding N-acetyl-l-ornithine via a water molecule. Activity assays with the wild-type enzyme and several mutants demonstrate that the post-translational modification of lysine 302 has an important role in catalysis.N-Acetyl-l-ornithine transcarbamylase (AOTCase), rather than ornithine transcarbamylase (OTCase), is the essential carbamylase enzyme in the arginine biosynthesis of several plant and human pathogens. The specificity of this unique enzyme provides a potential target for controlling the spread of these pathogens. Recently, several crystal structures of AOTCase from Xanthomonas campestris (xc) have been determined. In these structures, an unexplained electron density at the tip of the Lys302 side chain was observed. Using 13C NMR spectroscopy, we show herein that Lys302 is post-translationally carboxylated. The structure of wild-type AOTCase in a complex with the bisubstrate analogue Nδ-(phosphonoacetyl)-Nα-acetyl-l-ornithine (PALAO) indicates that the carboxyl group on Lys302 forms a strong hydrogen bonding network with surrounding active site residues, Lys252, Ser253, His293, and Glu92 from the adjacent subunit either directly or via a water molecule. Furthermore, the carboxyl group is involved in binding N-acetyl-l-ornithine via a water molecule. Activity assays with the wild-type enzyme and several mutants demonstrate that the post-translational modification of lysine 302 has an important role in catalysis. }, isbn = {0006-2960}, doi = {10.1021/bi1007386}, url = {http://dx.doi.org/10.1021/bi1007386}, author = {Li,Yongdong and Yu,Xiaolin and Ho,Jeremy and Fushman, David and Allewell,Norma M. and Tuchman,Mendel and Shi,Dashuang} } @conference {18803, title = {A Review of Bird-Inspired Flapping Wing Miniature Air Vehicle Designs}, volume = {2}, year = {2010}, month = {2010///}, pages = {57 - 67}, publisher = {ASME}, organization = {ASME}, address = {Montreal, Quebec, Canada}, abstract = {Physical and aerodynamic characteristics of the bird in flight may offer benefits over typical propeller or rotor driven miniature air vehicle (MAV) locomotion designs in certain types of scenarios. A number of research groups and companies have developed flapping wing vehicles that attempt to harness these benefits. The purpose of this paper is to report different types of flapping wing designs and compare their salient characteristics. For each category, advantages and disadvantages will be discussed. The discussion presented will be limited to miniature-sized flapping wing air vehicles, defined as 10{\textendash}100 grams total weight. The discussion will be focused primarily on ornithopters which have performed at least one successful test flight. Additionally, this paper is intended to provide a representation of the field of current technology, rather than providing a comprehensive listing of all possible designs. This paper will familiarize a newcomer to the field with existing designs and their distinguishing features. By studying existing designs, future designers will be able to adopt features from other successful designs. This paper also summarizes the design challenges associated with the further advancement of the field and deploying flapping wing vehicles in practice.}, isbn = {978-0-7918-4410-6}, doi = {10.1115/DETC2010-28513}, url = {http://link.aip.org/link/ASMECP/v2010/i44106/p57/s1\&Agg=doi}, author = {Gerdes,John W. and Gupta, Satyandra K. and Wilkerson,Stephen A.} } @article {16821, title = {Roads Belong in Databases}, journal = {Data Engineering}, year = {2010}, month = {2010///}, pages = {4 - 4}, author = {Samet, Hanan} } @article {13088, title = {A robust and scalable approach to face identification}, journal = {Computer Vision{\textendash}ECCV 2010}, year = {2010}, month = {2010///}, pages = {476 - 489}, abstract = {The problem of face identification has received significant attention over the years. For a given probe face, the goal of face identification is to match this unknown face against a gallery of known people. Due to the availability of large amounts of data acquired in a variety of conditions, techniques that are both robust to uncontrolled acquisition conditions and scalable to large gallery sizes, which may need to be incrementally built, are challenges. In this work we tackle both problems. Initially, we propose a novel approach to robust face identification based on Partial Least Squares (PLS) to perform multi-channel feature weighting. Then, we extend the method to a tree-based discriminative structure aiming at reducing the time required to evaluate novel probe samples. The method is evaluated through experiments on FERET and FRGC datasets. In most of the comparisons our method outperforms state-of-art face identification techniques. Furthermore, our method presents scalability to large datasets.}, author = {Schwartz,W. and Guo,H. and Davis, Larry S.} } @article {12475, title = {Robust Height Estimation of Moving Objects From Uncalibrated Videos}, journal = {IEEE Transactions on Image Processing}, volume = {19}, year = {2010}, month = {2010/08//}, pages = {2221 - 2232}, abstract = {This paper presents an approach for video metrology. From videos acquired by an uncalibrated stationary camera, we first recover the vanishing line and the vertical point of the scene based upon tracking moving objects that primarily lie on a ground plane. Using geometric properties of moving objects, a probabilistic model is constructed for simultaneously grouping trajectories and estimating vanishing points. Then we apply a single view mensuration algorithm to each of the frames to obtain height measurements. We finally fuse the multiframe measurements using the least median of squares (LMedS) as a robust cost function and the Robbins-Monro stochastic approximation (RMSA) technique. This method enables less human supervision, more flexibility and improved robustness. From the uncertainty analysis, we conclude that the method with auto-calibration is robust in practice. Results are shown based upon realistic tracking data from a variety of scenes.}, keywords = {algorithms, Biometry, Calibration, EM algorithm, geometric properties, Geometry, Image Enhancement, Image Interpretation, Computer-Assisted, Imaging, Three-Dimensional, least median of squares, least squares approximations, MOTION, motion information, multiframe measurements, Pattern Recognition, Automated, Reproducibility of results, Robbins-Monro stochastic approximation, robust height estimation, Sensitivity and Specificity, Signal Processing, Computer-Assisted, stochastic approximation, Subtraction Technique, tracking data, uncalibrated stationary camera, uncalibrated videos, uncertainty analysis, vanishing point, video metrology, Video Recording, video signal processing}, isbn = {1057-7149}, doi = {10.1109/TIP.2010.2046368}, author = {Jie Shao and Zhou,S. K and Chellapa, Rama} } @conference {12492, title = {Robust regression using sparse learning for high dimensional parameter estimation problems}, booktitle = {Acoustics Speech and Signal Processing (ICASSP), 2010 IEEE International Conference on}, year = {2010}, month = {2010/03//}, pages = {3846 - 3849}, abstract = {Algorithms such as Least Median of Squares (LMedS) and Random Sample Consensus (RANSAC) have been very successful for low-dimensional robust regression problems. However, the combinatorial nature of these algorithms makes them practically unusable for high-dimensional applications. In this paper, we introduce algorithms that have cubic time complexity in the dimension of the problem, which make them computationally efficient for high-dimensional problems. We formulate the robust regression problem by projecting the dependent variable onto the null space of the independent variables which receives significant contributions only from the outliers. We then identify the outliers using sparse representation/learning based algorithms. Under certain conditions, that follow from the theory of sparse representation, these polynomial algorithms can accurately solve the robust regression problem which is, in general, a combinatorial problem. We present experimental results that demonstrate the efficacy of the proposed algorithms. We also analyze the intrinsic parameter space of robust regression and identify an efficient and accurate class of algorithms for different operating conditions. An application to facial age estimation is presented.}, keywords = {algorithm;random, analysis;, combinatorial, complexity;least, complexity;parameter, consensus;robust, Estimation, estimation;polynomials;regression, learning;sparse, median, of, problem;cubic, problem;polynomial, problem;sparse, regression, representation;computational, sample, squares;parameter, TIME}, doi = {10.1109/ICASSP.2010.5495830}, author = {Mitra, K. and Veeraraghavan,A. and Chellapa, Rama} } @conference {12482, title = {Robust RVM regression using sparse outlier model}, booktitle = {Computer Vision and Pattern Recognition (CVPR), 2010 IEEE Conference on}, year = {2010}, month = {2010/06//}, pages = {1887 - 1894}, abstract = {Kernel regression techniques such as Relevance Vector Machine (RVM) regression, Support Vector Regression and Gaussian processes are widely used for solving many computer vision problems such as age, head pose, 3D human pose and lighting estimation. However, the presence of outliers in the training dataset makes the estimates from these regression techniques unreliable. In this paper, we propose robust versions of the RVM regression that can handle outliers in the training dataset. We decompose the noise term in the RVM formulation into a (sparse) outlier noise term and a Gaussian noise term. We then estimate the outlier noise along with the model parameters. We present two approaches for solving this estimation problem: (1) a Bayesian approach, which essentially follows the RVM framework and (2) an optimization approach based on Basis Pursuit Denoising. In the Bayesian approach, the robust RVM problem essentially becomes a bigger RVM problem with the advantage that it can be solved efficiently by a fast algorithm. Empirical evaluations, and real experiments on image de-noising and age estimation demonstrate the better performance of the robust RVM algorithms over that of the RVM reg ression.}, keywords = {3D, analysis;, approach;Gaussian, denoising;computer, denoising;lighting, denoising;regression, estimation;relevance, human, machine;robust, model;Gaussian, noise;basis, noise;computer, outlier, pose;Bayesian, pursuit, regression;sparse, RVM, vector, vision;image}, doi = {10.1109/CVPR.2010.5539861}, author = {Mitra, K. and Veeraraghavan,A. and Chellapa, Rama} } @conference {12490, title = {The role of geometry in age estimation}, booktitle = {2010 IEEE International Conference on Acoustics Speech and Signal Processing (ICASSP)}, year = {2010}, month = {2010/03/14/19}, pages = {946 - 949}, publisher = {IEEE}, organization = {IEEE}, abstract = {Understanding and modeling of aging in human faces is an important problem in many real-world applications such as biometrics, authentication, and synthesis. In this paper, we consider the role of geometric attributes of faces, as described by a set of landmark points on the face, in age perception. Towards this end, we show that the space of landmarks can be interpreted as a Grassmann manifold. Then the problem of age estimation is posed as a problem of function estimation on the manifold. The warping of an average face to a given face is quantified as a velocity vector that transforms the average to a given face along a smooth geodesic in unit-time. This deformation is then shown to contain important information about the age of the face. We show in experiments that exploiting geometric cues in a principled manner provides comparable performance to several systems that utilize both geometric and textural cues. We show results on age estimation using the standard FG-Net dataset and a passport dataset which illustrate the effectiveness of the approach.}, keywords = {age estimation, Aging, Biometrics, computational geometry, Face, Face Geometry, Facial animation, Feature extraction, function estimation problem, geometric face attributes, Geometry, Grassmann manifold, human face modeling, human face understanding, HUMANS, Mouth, regression, Regression analysis, SHAPE, Solid modeling, solid modelling, velocity vector}, isbn = {978-1-4244-4295-9}, doi = {10.1109/ICASSP.2010.5495292}, author = {Turaga,P. and Biswas,S. and Chellapa, Rama} } @conference {17636, title = {On random sampling auctions for digital goods}, booktitle = {Proceedings of the 10th ACM conference on Electronic commerce}, series = {EC {\textquoteright}09}, year = {2009}, month = {2009///}, pages = {187 - 196}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {In the context of auctions for digital goods, an interesting Random Sampling Optimal Price auction (RSOP) has been proposed by Goldberg, Hartline and Wright; this leads to a truthful mechanism. Since random sampling is a popular approach for auctions that aims to maximize the seller{\textquoteright}s revenue, this method has been analyzed further by Feige, Flaxman, Hartline and Kleinberg, who have shown that it is 15-competitive in the worst case -- which is substantially better than the previously proved bounds but still far from the conjectured competitive ratio of 4. In this paper, we prove that RSOP is indeed 4-competitive for a large class of instances in which the number λ of bidders receiving the item at the optimal uniform price, is at least 6. We also show that it is 4.68 competitive for the small class of remaining instances thus leaving a negligible gap between the lower and upper bound. Furthermore, we develop a robust version of RSOP -- one in which the seller{\textquoteright}s revenue is, with high probability, not much below its mean -- when the above parameter λ grows large. We employ a mix of probabilistic techniques and dynamic programming to compute these bounds.}, keywords = {auction, mechanism design, random sampling}, isbn = {978-1-60558-458-4}, doi = {10.1145/1566374.1566402}, url = {http://doi.acm.org/10.1145/1566374.1566402}, author = {Alaei,Saeed and Malekian,Azarakhsh and Srinivasan, Aravind} } @article {12515, title = {Rate-Invariant Recognition of Humans and Their Activities}, journal = {Image Processing, IEEE Transactions on}, volume = {18}, year = {2009}, month = {2009/06//}, pages = {1326 - 1339}, abstract = {Pattern recognition in video is a challenging task because of the multitude of spatio-temporal variations that occur in different videos capturing the exact same event. While traditional pattern-theoretic approaches account for the spatial changes that occur due to lighting and pose, very little has been done to address the effect of temporal rate changes in the executions of an event. In this paper, we provide a systematic model-based approach to learn the nature of such temporal variations (time warps) while simultaneously allowing for the spatial variations in the descriptors. We illustrate our approach for the problem of action recognition and provide experimental justification for the importance of accounting for rate variations in action recognition. The model is composed of a nominal activity trajectory and a function space capturing the probability distribution of activity-specific time warping transformations. We use the square-root parameterization of time warps to derive geodesics, distance measures, and probability distributions on the space of time warping functions. We then design a Bayesian algorithm which treats the execution rate function as a nuisance variable and integrates it out using Monte Carlo sampling, to generate estimates of class posteriors. This approach allows us to learn the space of time warps for each activity while simultaneously capturing other intra- and interclass variations. Next, we discuss a special case of this approach which assumes a uniform distribution on the space of time warping functions and show how computationally efficient inference algorithms may be derived for this special case. We discuss the relative advantages and disadvantages of both approaches and show their efficacy using experiments on gait-based person identification and activity recognition.}, keywords = {Automated;Video Recording;, Bayesian algorithm;Monte Carlo sampling;action recognition;activity-specific time warping transformations;computationally efficient inference algorithms;distance measures;execution rate function;function space;gait-based person identification;geodesics;no, Statistical;Monte Carlo Method;Movement;Pattern Recognition}, isbn = {1057-7149}, doi = {10.1109/TIP.2009.2017143}, author = {Veeraraghavan,A. and Srivastava, A. and Roy-Chowdhury, A.K. and Chellapa, Rama} } @article {17421, title = {The reader-to-leader framework: Motivating technology-mediated social participation}, journal = {AIS Transactions on Human-Computer Interaction}, volume = {1}, year = {2009}, month = {2009///}, pages = {13 - 32}, abstract = {Billions of people participate in online social activities. Most users participate as readers of discussion boards, searchers of blogposts, or viewers of photos. A fraction of users become contributors of user-generated content by writing consumer product reviews, uploading travel photos, or expressing political opinions. Some users move beyond such individual efforts to become collaborators, forming tightly connected groups with lively discussions whose outcome might be a Wikipedia article or a carefully edited YouTube video. A small fraction of users becomes leaders, who participate in governance by setting and upholding policies, repairing vandalized materials, or mentoring novices. We analyze these activities and offer the Reader-to-Leader Framework with the goal of helping researchers, designers, and managers understand what motivates technology-mediated social participation. This will enable them to improve interface design and social support for their companies, government agencies, and non-governmental organizations. These improvements could reduce the number of failed projects, while accelerating the application of social media for national priorities such as healthcare, energy sustainability, emergency response, economic development, education, and more. }, author = {Preece,J. and Shneiderman, Ben} } @conference {12412, title = {Reading tea leaves: How humans interpret topic models}, booktitle = {Proceedings of the 23rd Annual Conference on Neural Information Processing Systems}, volume = {31}, year = {2009}, month = {2009///}, abstract = {Probabilistic topic models are a popular tool for the unsupervised analysis of text, providing both a predictive model of future text and a latent topic representation of the corpus. Practitioners typically assume that the latent space is semantically meaningful. It is used to check models, summarize the corpus, and guide explo- ration of its contents. However, whether the latent space is interpretable is in need of quantitative evaluation. In this paper, we present new quantitative methods for measuring semantic meaning in inferred topics. We back these measures with large-scale user studies, showing that they capture aspects of the model that are undetected by previous measures of model quality based on held-out likelihood. Surprisingly, topic models which perform better on held-out likelihood may infer less semantically meaningful topics.}, author = {Chang,J. and Jordan Boyd-Graber and Gerrish,S. and Wang,C. and Blei,D.M.} } @conference {14251, title = {Real-time shape retrieval for robotics using skip Tri-Grams}, booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems, 2009. IROS 2009}, year = {2009}, month = {2009/10/10/15}, pages = {4731 - 4738}, publisher = {IEEE}, organization = {IEEE}, abstract = {The real time requirement is an additional constraint on many intelligent applications in robotics, such as shape recognition and retrieval using a mobile robot platform. In this paper, we present a scalable approach for efficiently retrieving closed contour shapes. The contour of an object is represented by piecewise linear segments. A skip Tri-Gram is obtained by selecting three segments in the clockwise order while allowing a constant number of segments to be {\~A}‚{\^A}{\textquestiondown}skipped{\~A}‚{\^A}{\textquestiondown} in between. The main idea is to use skip Tri-Grams of the segments to implicitly encode the distant dependency of the shape. All skip Tri-Grams are used for efficiently retrieving closed contour shapes without pairwise matching feature points from two shapes. The retrieval is at least an order of magnitude faster than other state-of-the-art algorithms. We score 80\% in the Bullseye retrieval test on the whole MPEG 7 shape dataset. We further test the algorithm using a mobile robot platform in an indoor environment. 8 objects are used for testing from different viewing directions, and we achieve 82\% accuracy.}, keywords = {Bullseye retrieval test, Clocks, closed contour shape retrieval, Image retrieval, Image segmentation, Indexing, Information retrieval, Intelligent robots, Jacobian matrices, mobile robot, Mobile robots, MPEG 7 shape dataset, piecewise linear segments, Piecewise linear techniques, Real time systems, real-time shape retrieval, robot vision, SHAPE, shape recognition, shape representation, skip Tri-Grams, Testing}, isbn = {978-1-4244-3803-7}, doi = {10.1109/IROS.2009.5354738}, author = {Li,Yi and Bitsakos,K. and Ferm{\"u}ller, Cornelia and Aloimonos, J.} } @conference {12512, title = {Recognition of quantized still face images}, booktitle = {Biometrics: Theory, Applications, and Systems, 2009. BTAS {\textquoteright}09. IEEE 3rd International Conference on}, year = {2009}, month = {2009/09//}, pages = {1 - 6}, abstract = {In applications such as document understanding, only binary face images may be available as inputs to a face recognition (FR) algorithm. In this paper, we investigate the effects of the number of grey levels on PCA, multiple exemplar discriminant analysis (MEDA) and the elastic bunch graph matching (EBGM) FR algorithms. The inputs to these FR algorithms are quantized images (binary images or images with small number of grey levels) modified by distance and Box-Cox transforms. The performances of PCA and MEDA algorithms are at 87.66\% for images in FRGC version 1 experiment 1 after they are thresholded and transformed while the EBGM algorithm achieves only 37.5\%. In many document understanding applications, it is also required to verify a degraded low-quality image against a high-quality image, both of which are from the same source. For this problem, the performances of PCA and MEDA are stable when the images were degraded by noise, downsampling or different thresholding parameters.}, keywords = {(signal);, algorithm;grey, algorithms;face, analysis;quantisation, analysis;quantized, Box-Cox, bunch, component, discriminant, exemplar, Face, FR, graph, images;distance, images;face, levels;multiple, MATCHING, recognition, recognition;principal, transforms;document, transforms;PCA;binary, understanding;elastic}, doi = {10.1109/BTAS.2009.5339030}, author = {Tao Wu and Chellapa, Rama} } @conference {13100, title = {Recognizing actions by shape-motion prototype trees}, booktitle = {Computer Vision, 2009 IEEE 12th International Conference on}, year = {2009}, month = {2009/10/29/2}, pages = {444 - 451}, abstract = {A prototype-based approach is introduced for action recognition. The approach represents an action as a sequence of prototypes for efficient and flexible action matching in long video sequences. During training, first, an action prototype tree is learned in a joint shape and motion space via hierarchical k-means clustering; then a lookup table of prototype-to-prototype distances is generated. During testing, based on a joint likelihood model of the actor location and action prototype, the actor is tracked while a frame-to-prototype correspondence is established by maximizing the joint likelihood, which is efficiently performed by searching the learned prototype tree; then actions are recognized using dynamic prototype sequence matching. Distance matrices used for sequence matching are rapidly obtained by look-up table indexing, which is an order of magnitude faster than brute-force computation of frame-to-frame distances. Our approach enables robust action matching in very challenging situations (such as moving cameras, dynamic backgrounds) and allows automatic alignment of action sequences. Experimental results demonstrate that our approach achieves recognition rates of 91.07\% on a large gesture dataset (with dynamic backgrounds), 100\% on the Weizmann action dataset and 95.77\% on the KTH action dataset.}, doi = {10.1109/ICCV.2009.5459184}, author = {Zhe Lin and Zhuolin Jiang and Davis, Larry S.} } @conference {12521, title = {Recognizing coordinated multi-object activities using a dynamic event ensemble model}, booktitle = {Acoustics, Speech and Signal Processing, 2009. ICASSP 2009. IEEE International Conference on}, year = {2009}, month = {2009/04//}, pages = {3541 - 3544}, abstract = {While video-based activity analysis and recognition has received broad attention, existing body of work mostly deals with single object/person case. Modeling involving multiple objects and recognition of coordinated group activities, present in a variety of applications such as surveillance, sports, biological records, and so on, is the main focus of this paper. Unlike earlier attempts which model the complex spatial temporal constraints among different activities of multiple objects with a parametric Bayesian network, we propose a dynamic dasiaevent ensemblepsila framework as a data-driven strategy to characterize the group motion pattern without employing any specific domain knowledge. In particular, we exploit the Riemannian geometric property of the set of ensemble description functions and develop a compact representation for group activities on the ensemble manifold. An appropriate classifier on the manifold is then designed for recognizing new activities. Experiments on football play recognition demonstrate the effectiveness of the framework.}, keywords = {activities, activity, analysis;Bayes, Bayesian, classification;video, description, ensemble, event, framework;dynamic, functions;ensemble, geometric, manifold;football, methods;geometry;image, model;ensemble, multiobject, network;video-based, play, processing;, property;classifier;coordinated, recognition;data-driven, recognition;parametric, Riemannian, signal, strategy;dynamic}, doi = {10.1109/ICASSP.2009.4960390}, author = {Ruonan Li and Chellapa, Rama} } @conference {12773, title = {Recovering Views of Inter-System Interaction Behaviors}, booktitle = {Reverse Engineering, Working Conference on}, year = {2009}, month = {2009///}, pages = {53 - 61}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {This paper presents an approach for recovering application-level views of the interaction behaviors between systems that communicate via networks. Rather than illustrating a single behavior, a sequence diagram is constructed that describes the characteristics of multiple combined behaviors. The approach has several properties that make it particularly suitable for analyzing heterogeneous systems. First, since the interactions are retrieved from observing the network communication, our technique can be applied to systems that are implemented in different languages and run on different platforms. Second, it does not require the availability or modification of source code. After the behaviors are extracted, we employ methods to merge multiple observed behaviors to a single sequence diagram that illustrates the overall behavior.The contributions of this paper are a technique for observing and processing the network communication to derive a model of the behavior. Furthermore, it describes a series of model transformations to construct a sequence diagram view of all observed behaviors.}, keywords = {distributed systems, program understanding, reliability}, doi = {http://doi.ieeecomputersociety.org/10.1109/WCRE.2009.34}, author = {Ackermann,Christopher and Lindvall,Mikael and Cleaveland, Rance} } @article {15116, title = {Reducing complexity assumptions for statistically-hiding commitment}, journal = {Journal of cryptology}, volume = {22}, year = {2009}, month = {2009///}, pages = {283 - 310}, abstract = {We revisit the following question: what are the minimal assumptions needed to construct statistically-hiding commitment schemes? Naor et al. show how to construct such schemes based on any one-way permutation. We improve upon this by showing a construction based on any approximable preimage-size one-way function. These are one-way functions for which it is possible to efficiently approximate the number of pre-images of a given output. A special case is the class of regular one-way functions where all points in the image of the function have the same (known) number of pre-images.We also prove two additional results related to statistically-hiding commitment. First, we prove a (folklore) parallel composition theorem showing, roughly speaking, that the statistical hiding property of any such commitment scheme is amplified exponentially when multiple independent parallel executions of the scheme are carried out. Second, we show a compiler which transforms any commitment scheme which is statistically hiding against an honest-but-curious receiver into one which is statistically hiding even against a malicious receiver. }, doi = {10.1007/s00145-007-9012-8}, author = {Haitner,I. and Horvitz,O. and Katz, Jonathan and Koo,C. Y and Morselli,R. and Shaltiel,R.} } @article {14368, title = {Reflect and correct: A misclassification prediction approach to active inference}, journal = {ACM Transactions on Knowledge Discovery from Data (TKDD)}, volume = {3}, year = {2009}, month = {2009/12//}, pages = {20:1{\textendash}20:32 - 20:1{\textendash}20:32}, abstract = {Information diffusion, viral marketing, graph-based semi-supervised learning, and collective classification all attempt to model and exploit the relationships among nodes in a network to improve the performance of node labeling algorithms. However, sometimes the advantage of exploiting the relationships can become a disadvantage. Simple models like label propagation and iterative classification can aggravate a misclassification by propagating mistakes in the network, while more complex models that define and optimize a global objective function, such as Markov random fields and graph mincuts, can misclassify a set of nodes jointly. This problem can be mitigated if the classification system is allowed to ask for the correct labels for a few of the nodes during inference. However, determining the optimal set of labels to acquire is intractable under relatively general assumptions, which forces us to resort to approximate and heuristic techniques. We describe three such techniques in this article. The first one is based on directly approximating the value of the objective function of label acquisition and greedily acquiring the label that provides the most improvement. The second technique is a simple technique based on the analogy we draw between viral marketing and label acquisition. Finally, we propose a method, which we refer to as reflect and correct, that can learn and predict when the classification system is likely to make mistakes and suggests acquisitions to correct those mistakes. We empirically show on a variety of synthetic and real-world datasets that the reflect and correct method significantly outperforms the other two techniques, as well as other approaches based on network structural measures such as node degree and network clustering.}, keywords = {active inference, collective classification, information diffusion, label acquisition, viral marketing}, isbn = {1556-4681}, doi = {10.1145/1631162.1631168}, url = {http://doi.acm.org/10.1145/1631162.1631168}, author = {Bilgic,Mustafa and Getoor, Lise} } @conference {18467, title = {Regularized HRTF fitting using spherical harmonics}, booktitle = {IEEE Workshop on Applications of Signal Processing to Audio and Acoustics, 2009. WASPAA {\textquoteright}09}, year = {2009}, month = {2009/10//}, pages = {257 - 260}, publisher = {IEEE}, organization = {IEEE}, abstract = {By the Helmholtz reciprocity principle, the head-related transfer function (HRTF) is equivalent to an acoustic field created by a transmitter placed at the ear location. Therefore, it can be represented as a spherical harmonics spectrum - a weighted sum of spherical harmonics. Such representations are useful in theoretical and computational analysis. Many different (often severely undersampled) grids are used for HRTF measurement, making the spectral reconstruction difficult. In this paper, two methods of obtaining the spectrum are presented and analyzed both on synthetic (ground-truth data available) and real HRTF measurements.}, keywords = {Acoustic applications, acoustic field, Acoustic fields, acoustic intensity measurement, Acoustic measurements, acoustic signal processing, Acoustic testing, acoustic waves, array signal processing, audio acoustics, circular arrays, computational analysis, Ear, ear location, head-related transfer function, Helmholtz reciprocity principle, HRTF, HRTF fitting, Loudspeakers, Microphones, Position measurement, signal reconstruction, spatial audio, spectral reconstruction, spherical harmonics, Transfer functions}, isbn = {978-1-4244-3678-1}, doi = {10.1109/ASPAA.2009.5346521}, author = {Zotkin,Dmitry N and Duraiswami, Ramani and Gumerov, Nail A.} } @conference {15856, title = {Replication and automation of expert judgments: Information engineering in legal e-discovery}, booktitle = {IEEE International Conference on Systems, Man and Cybernetics, 2009. SMC 2009}, year = {2009}, month = {2009/10/11/14}, pages = {102 - 107}, publisher = {IEEE}, organization = {IEEE}, abstract = {The retrieval of digital evidence responsive to discovery requests in civil litigation, known in the United States as {\~A}‚{\^A}{\textquestiondown}e-discovery,{\~A}‚{\^A}{\textquestiondown} presents several important and understudied conditions and challenges. Among the most important of these are (i) that the definition of responsiveness that governs the search effort can be learned and made explicit through effective interaction with the responding party, (ii) that the governing definition of responsiveness is generally complex, deriving both from considerations of subject-matter relevance and from considerations of litigation strategy, and (iii) that the result of the search effort is a set (rather than a ranked list) of documents, and sometimes a quite large set, that is turned over to the requesting party and that the responding party certifies to be an accurate and complete response to the request. This paper describes the design of an {\~A}‚{\^A}{\textquestiondown}interactive task{\~A}‚{\^A}{\textquestiondown} for the text retrieval conference{\textquoteright}s legal track that had the evaluation of the effectiveness of e-discovery applications at the {\~A}‚{\^A}{\textquestiondown}responsive review{\~A}‚{\^A}{\textquestiondown} task as its goal. Notable features of the 2008 interactive task were high-fidelity human-system task modeling, authority control for the definition of {\~A}‚{\^A}{\textquestiondown}responsiveness,{\~A}‚{\^A}{\textquestiondown} and relatively deep sampling for estimation of type 1 and type 2 errors (expressed as {\~A}‚{\^A}{\textquestiondown}precision{\~A}‚{\^A}{\textquestiondown} and {\~A}‚{\^A}{\textquestiondown}recall{\~A}‚{\^A}{\textquestiondown}). The paper presents a critical assessment of the strengths and weaknesses of the evaluation design from the perspectives of reliability, reusability, and cost-benefit tradeoffs.}, keywords = {authorisation, authority control, Automation, civil litigation, CYBERNETICS, Delay, digital evidence retrieval, discovery request, Educational institutions, expert judgment automation, Human computer interaction, Human-machine cooperation and systems, human-system task modeling, information engineering, Information retrieval, interactive task, Law, law administration, legal e-discovery, Legal factors, PROBES, Production, Protocols, search effort, Search methods, text analysis, text retrieval conference legal track, United States, USA Councils, User modeling}, isbn = {978-1-4244-2793-2}, doi = {10.1109/ICSMC.2009.5346118}, author = {Hedin,B. and Oard, Douglas} } @article {14640, title = {Resistin gene variation is associated with systemic inflammation but not plasma adipokine levels, metabolic syndrome or coronary atherosclerosis in nondiabetic Caucasians}, journal = {Clinical Endocrinology}, volume = {70}, year = {2009}, month = {2009/05/01/}, pages = {698 - 705}, abstract = {Objective Resistin causes insulin resistance and diabetes in mice whereas in humans it is linked to inflammation and atherosclerosis. Few human genetic studies of resistin in inflammation and atherosclerosis have been performed. We hypothesized that the {\textendash}420C>G putative gain-of-function resistin variant would be associated with inflammatory markers and atherosclerosis but not with metabolic syndrome or adipokines in humans.Design and methods We examined the association of three resistin polymorphisms, {\textendash}852A>G, {\textendash}420C>G and +157C>T, and related haplotypes with plasma resistin, cytokines, C-reactive protein (CRP), adipokines, plasma lipoproteins, metabolic syndrome and coronary artery calcification (CAC) in nondiabetic Caucasians (n~=~851). Results Resistin levels were higher, dose-dependently, with the {\textendash}420G allele (CC 5{\textperiodcentered}9~{\textpm}~2{\textperiodcentered}7~ng/ml, GC 6{\textperiodcentered}5~{\textpm}~4{\textperiodcentered}0~ng/ml and GG 7{\textperiodcentered}2~{\textpm}~4{\textperiodcentered}8~ng/ml, trend P~=~0{\textperiodcentered}04) after age and gender adjustment [fold higher for GC~+~GG vs. CC; 1{\textperiodcentered}07~(1{\textperiodcentered}00{\textendash}1{\textperiodcentered}15), P~<~0{\textperiodcentered}05)]. The {\textendash}852A>G single nucleotide polymorphism (SNP) was associated with higher soluble tumour necrosis factor-receptor~2 (sol-TNFR2) levels in fully adjusted models [1{\textperiodcentered}06~(95\%~CI 1{\textperiodcentered}01{\textendash}1{\textperiodcentered}11), P~=~0{\textperiodcentered}01)]. The estimated resistin haplotype (GGT) was associated with sol-TNFR2 (P~=~0{\textperiodcentered}04) and the AGT haplotype was related to CRP (P~=~0{\textperiodcentered}04) in the fully adjusted models. Resistin SNPs and haplotypes were not associated with body mass index (BMI), fasting glucose, insulin resistance, metabolic syndrome, adipokines or CAC scores. Conclusions Despite modest associations with plasma resistin and inflammatory biomarkers, resistin 5' variants were not associated with metabolic parameters or coronary calcification. This suggests that resistin is an inflammatory cytokine in humans but has little influence on adiposity, metabolic syndrome or atherosclerosis. }, isbn = {1365-2265}, doi = {10.1111/j.1365-2265.2008.03375.x}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1365-2265.2008.03375.x/abstract;jsessionid=3EAA174C4993EDEE1267334CDA36E086.d01t01}, author = {Qasim,Atif N and Metkus,Thomas S and Tadesse,Mahlet and Lehrke,Michael and Restine,Stephanie and Wolfe,Megan L and Hannenhalli, Sridhar and Cappola,Thomas and Rader,Daniel J and Reilly,Muredach P} } @conference {18598, title = {Resonance: dynamic access control for enterprise networks}, booktitle = {Proceedings of the 1st ACM workshop on Research on enterprise networking}, series = {WREN {\textquoteright}09}, year = {2009}, month = {2009///}, pages = {11 - 18}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Enterprise network security is typically reactive, and it relies heavily on host security and middleboxes. This approach creates complicated interactions between protocols and systems that can cause incorrect behavior and slow response to attacks. We argue that imbuing the network layer with mechanisms for dynamic access control can remedy these ills. We propose Resonance, a system for securing enterprise networks, where the network elements themselves enforce dynamic access control policies based on both flow-level information and real-time alerts. Resonance uses programmable switches to manipulate traffic at lower layers; these switches take actions (e.g., dropping or redirecting traffic) to enforce high-level security policies based on input from both higherlevel security policies and distributed monitoring and inference systems. We describe the design of Resonance, apply it to Georgia Tech{\textquoteright}s network access control system, show how it can both overcome the current shortcomings and provide new security functions, describe our proposed deployment, and discuss open research questions.}, keywords = {Access control, enterprise networks, programmable networks}, isbn = {978-1-60558-443-0}, doi = {10.1145/1592681.1592684}, url = {http://doi.acm.org/10.1145/1592681.1592684}, author = {Nayak,Ankur Kumar and Reimers,Alex and Feamster, Nick and Clark,Russ} } @article {15274, title = {Revealing biological modules via graph summarization}, journal = {Journal of Computational Biology}, volume = {16}, year = {2009}, month = {2009///}, pages = {253 - 264}, author = {Navlakha,S. and Schatz,M. C and Kingsford, Carl} } @article {16828, title = {Review of Spatial Databases and Geographic Information Systems}, journal = {Proceedings of the 17th Italian Symposium on Advanced Database Systems (SEBD{\textquoteright}09)}, volume = {23}, year = {2009}, month = {2009/06//}, pages = {26 - 26}, author = {Samet, Hanan} } @conference {17653, title = {Rigorous Probabilistic Trust-Inference with Applications to Clustering}, booktitle = {IEEE/WIC/ACM International Joint Conferences on Web Intelligence and Intelligent Agent Technologies, 2009. WI-IAT {\textquoteright}09}, volume = {1}, year = {2009}, month = {2009/09/15/18}, pages = {655 - 658}, publisher = {IEEE}, organization = {IEEE}, abstract = {The World Wide Web has transformed into an environment where users both produce and consume information. In order to judge the validity of information, it is important to know how trustworthy its creator is. Since no individual can have direct knowledge of more than a small fraction of information authors, methods for inferring trust are needed. We propose a new trust inference scheme based on the idea that a trust network can be viewed as a random graph, and a chain of trust as a path in that graph. In addition to having an intuitive interpretation, our algorithm has several advantages, noteworthy among which is the creation of an inferred trust-metric space where the shorter the distance between two people, the higher their trust. Metric spaces have rigorous algorithms for clustering, visualization, and related problems, any of which is directly applicable to our results.}, keywords = {Clustering algorithms, Conferences, Educational institutions, Extraterrestrial measurements, Inference algorithms, Intelligent agent, random graphs, Social network services, trust inferrence, Visualization, Voting, Web sites}, isbn = {978-0-7695-3801-3}, doi = {10.1109/WI-IAT.2009.109}, author = {DuBois,Thomas and Golbeck,Jennifer and Srinivasan, Aravind} } @article {15119, title = {Ring signatures: Stronger definitions, and constructions without random oracles}, journal = {Journal of Cryptology}, volume = {22}, year = {2009}, month = {2009///}, pages = {114 - 138}, abstract = {Ring signatures, first introduced by Rivest, Shamir, and Tauman, enable a user to sign a message so that a ring of possible signers (of which the user is a member) is identified, without revealing exactly which member of that ring actually generated the signature. In contrast to group signatures, ring signatures are completely {\textquotedblleft}ad-hoc{\textquotedblright} and do not require any central authority or coordination among the various users (indeed, users do not even need to be aware of each other); furthermore, ring signature schemes grant users fine-grained control over the level of anonymity associated with any particular signature.This paper has two main areas of focus. First, we examine previous definitions of security for ring signature schemes and suggest that most of these prior definitions are too weak, in the sense that they do not take into account certain realistic attacks. We propose new definitions of anonymity and unforgeability which address these threats, and give separation results proving that our new notions are strictly stronger than previous ones. Second, we show the first constructions of ring signature schemes in the standard model. One scheme is based on generic assumptions and satisfies our strongest definitions of security. Two additional schemes are more efficient, but achieve weaker security guarantees and more limited functionality. }, doi = {10.1007/s00145-007-9011-9}, author = {Bender,A. and Katz, Jonathan and Morselli,R.} } @article {12884, title = {RNA Colony Blot Hybridization Method for Enumeration of Culturable Vibrio Cholerae and Vibrio Mimicus Bacteria}, journal = {Applied and Environmental MicrobiologyAppl. Environ. Microbiol.}, volume = {75}, year = {2009}, month = {2009/09/01/}, pages = {5439 - 5444}, abstract = {A species-specific RNA colony blot hybridization protocol was developed for enumeration of culturable Vibrio cholerae and Vibrio mimicus bacteria in environmental water samples. Bacterial colonies on selective or nonselective plates were lysed by sodium dodecyl sulfate, and the lysates were immobilized on nylon membranes. A fluorescently labeled oligonucleotide probe targeting a phylogenetic signature sequence of 16S rRNA of V. cholerae and V. mimicus was hybridized to rRNA molecules immobilized on the nylon colony lift blots. The protocol produced strong positive signals for all colonies of the 15 diverse V. cholerae-V. mimicus strains tested, indicating 100\% sensitivity of the probe for the targeted species. For visible colonies of 10 nontarget species, the specificity of the probe was calculated to be 90\% because of a weak positive signal produced by Grimontia (Vibrio) hollisae, a marine bacterium. When both the sensitivity and specificity of the assay were evaluated using lake water samples amended with a bioluminescent V. cholerae strain, no false-negative or false-positive results were found, indicating 100\% sensitivity and specificity for culturable bacterial populations in freshwater samples when G. hollisae was not present. When the protocol was applied to laboratory microcosms containing V. cholerae attached to live copepods, copepods were found to carry approximately 10,000 to 50,000 CFU of V. cholerae per copepod. The protocol was also used to analyze pond water samples collected in an area of cholera endemicity in Bangladesh over a 9-month period. Water samples collected from six ponds demonstrated a peak in abundance of total culturable V. cholerae bacteria 1 to 2 months prior to observed increases in pathogenic V. cholerae and in clinical cases recorded by the area health clinic. The method provides a highly specific and sensitive tool for monitoring the dynamics of V. cholerae in the environment. The RNA blot hybridization protocol can also be applied to detection of other gram-negative bacteria for taxon-specific enumeration.}, isbn = {0099-2240, 1098-5336}, doi = {10.1128/AEM.02007-08}, url = {http://aem.asm.org/content/75/17/5439}, author = {Grim,Christopher J. and Zo,Young-Gun and Hasan,Nur A. and Ali,Afsar and Chowdhury,Wasimul B. and Islam,Atiqul and Rashid,Mohammed H. and Alam,Munirul and Morris,J. Glenn and Huq,Anwar and Rita R Colwell} } @article {12520, title = {Robust Estimation of Albedo for Illumination-Invariant Matching and Shape Recovery}, journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on}, volume = {31}, year = {2009}, month = {2009/05//}, pages = {884 - 899}, abstract = {We present a nonstationary stochastic filtering framework for the task of albedo estimation from a single image. There are several approaches in the literature for albedo estimation, but few include the errors in estimates of surface normals and light source direction to improve the albedo estimate. The proposed approach effectively utilizes the error statistics of surface normals and illumination direction for robust estimation of albedo, for images illuminated by single and multiple light sources. The albedo estimate obtained is subsequently used to generate albedo-free normalized images for recovering the shape of an object. Traditional shape-from-shading (SFS) approaches often assume constant/piecewise constant albedo and known light source direction to recover the underlying shape. Using the estimated albedo, the general problem of estimating the shape of an object with varying albedo map and unknown illumination source is reduced to one that can be handled by traditional SFS approaches. Experimental results are provided to show the effectiveness of the approach and its application to illumination-invariant matching and shape recovery. The estimated albedo maps are compared with the ground truth. The maps are used as illumination-invariant signatures for the task of face recognition across illumination variations. The recognition results obtained compare well with the current state-of-the-art approaches. Impressive shape recovery results are obtained using images downloaded from the Web with little control over imaging conditions. The recovered shapes are also used to synthesize novel views under novel illumination conditions.}, keywords = {albedo estimation;error statistics;face recognition;illumination-invariant matching;nonstationary stochastic filtering;robust estimation;shape recovery;shape-from-shading approach;error statistics;face recognition;filtering theory;image matching;Algorithm, Automated;Photometry;Reproducibility of Results;Sensitivity and Specificity;, Computer-Assisted;Imaging, Three-Dimensional;Lighting;Pattern Recognition}, isbn = {0162-8828}, doi = {10.1109/TPAMI.2008.135}, author = {Biswas,S. and Aggarwal,G. and Chellapa, Rama} } @article {12529, title = {Robust human detection under occlusion by integrating face and person detectors}, journal = {Proceedings of the Third International Conference on Advances in Biometrics}, year = {2009}, month = {2009///}, pages = {970 - 979}, abstract = {Human detection under occlusion is a challenging problemin computer vision. We address this problem through a framework which integrates face detection and person detection. We first investigate how the response of a face detector is correlated with the response of a person detector. From these observations, we formulate hypotheses that capture the intuitive feedback between the responses of face and person detectors and use it to verify if the individual detectors{\textquoteright} outputs are true or false. We illustrate the performance of our integration framework on challeng- ing images that have considerable amount of occlusion, and demonstrate its advantages over individual face and person detectors. }, author = {Schwartz, W.R. and Gopalan,R. and Chellapa, Rama and Davis, Larry S.} } @article {14254, title = {Robust Wavelet-Based Super-Resolution Reconstruction: Theory and Algorithm}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, volume = {31}, year = {2009}, month = {2009/04//}, pages = {649 - 660}, abstract = {We present an analysis and algorithm for the problem of super-resolution imaging, that is the reconstruction of HR (high-resolution) images from a sequence of LR (low-resolution) images. Super-resolution reconstruction entails solutions to two problems. One is the alignment of image frames. The other is the reconstruction of a HR image from multiple aligned LR images. Both are important for the performance of super-resolution imaging. Image alignment is addressed with a new batch algorithm, which simultaneously estimates the homographies between multiple image frames by enforcing the surface normal vectors to be the same. This approach can handle longer video sequences quite well. Reconstruction is addressed with a wavelet-based iterative reconstruction algorithm with an efficient de-noising scheme. The technique is based on a new analysis of video formation. At a high level our method could be described as a better-conditioned iterative back projection scheme with an efficient regularization criteria in each iteration step. Experiments with both simulated and real data demonstrate that our approach has better performance than existing super-resolution methods. It can remove even large amounts of mixed noise without creating artifacts.}, keywords = {batch algorithm, better-conditioned iterative back projection scheme, Enhancement, homography estimation, image denoising, image denoising scheme, image frame alignment, Image processing software, Image reconstruction, image resolution, image sequence, Image sequences, iterative methods, regularization criteria, robust wavelet-based iterative super-resolution reconstruction, surface normal vector, video formation analysis, video sequence, video signal processing, Wavelet transforms}, isbn = {0162-8828}, doi = {10.1109/TPAMI.2008.103}, author = {Hui Ji and Ferm{\"u}ller, Cornelia} } @article {14743, title = {The ruby intermediate language}, journal = {SIGPLAN Not.}, volume = {44}, year = {2009}, month = {2009/10//}, pages = {89 - 98}, abstract = {Ruby is a popular, dynamic scripting language that aims to "feel natural to programmers" and give users the "freedom to choose" among many different ways of doing the same thing. While this arguably makes programming in Ruby easier, it makes it hard to build analysis and transformation tools that operate on Ruby source code. In this paper, we present the Ruby Intermediate Language (RIL), a Ruby front-end and intermediate representation that addresses these. RIL includes an extensible GLR parser for Ruby, and an automatic translation into an easy-to-analyze intermediate form. This translation eliminates redundant language constructs, unravels the often subtle ordering among side effecting operations, and makes implicit interpreter operations explicit. We also describe several additional useful features of RIL, such as a dynamic instrumentation library for profiling source code and a dataflow analysis engine. We demonstrate the usefulness of RIL by presenting a static and dynamic analysis to eliminate null pointer errors in Ruby programs. We hope that RIL{\textquoteright}s features will enable others to more easily build analysis tools for Ruby, and that our design will inspire the of similar frameworks for other dynamic languages.}, keywords = {intermediate language, profile guided anlaysis, ril, ruby}, isbn = {0362-1340}, doi = {10.1145/1837513.1640148}, url = {http://doi.acm.org/10.1145/1837513.1640148}, author = {Furr,Michael and An,Jong-hoon (David) and Foster, Jeffrey S. and Hicks, Michael W.} } @article {16525, title = {Running memory span: A comparison of behavioral capacity limits with those of an attractor neural network}, journal = {Cognitive Systems Research}, volume = {10}, year = {2009}, month = {2009/06//}, pages = {161 - 171}, abstract = {We studied a computational model of short term memory capacity that performs a simulated running memory span task using Hebbian learning and rapid decay of connection strengths to keep recent items active for later recall. This model demonstrates recall performance similar to humans performing the same task, with a capacity limit of approximately three items and a prominent recency effect. The model also shows that this capacity depends on decay to release the model from accumulating interference. Model findings are compared with data from two behavioral experiments that used varying task demands to tax memory capacity limits. Following additional theoretical predictions from the computational model, behavioral data support that when task demands require attention to be spread too thin to keep items available for later recall, capacity limits suffer. These findings are important both for understanding the mechanisms underlying short term memory capacity, and also to memory researchers interested in the role of attention in capacity limitations.}, keywords = {Attractor neural network, Computational model, Running span, Short term memory, Working memory capacity}, isbn = {1389-0417}, doi = {10.1016/j.cogsys.2008.09.001}, url = {http://www.sciencedirect.com/science/article/pii/S1389041708000569}, author = {Weems,Scott A. and Winder,Ransom K. and Bunting,Michael and Reggia, James A.} } @inbook {17668, title = {The Randomized Coloring Procedure with Symmetry-Breaking}, booktitle = {Automata, Languages and ProgrammingAutomata, Languages and Programming}, series = {Lecture Notes in Computer Science}, volume = {5125}, year = {2008}, month = {2008///}, pages = {306 - 319}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {A basic randomized coloring procedure has been used in probabilistic proofs to obtain remarkably strong results on graph coloring. These results include the asymptotic version of the List Coloring Conjecture due to Kahn, the extensions of Brooks{\textquoteright} Theorem to sparse graphs due to Kim and Johansson, and Luby{\textquoteright}s fast parallel and distributed algorithms for graph coloring. The most challenging aspect of a typical probabilistic proof is showing adequate concentration bounds for key random variables. In this paper, we present a simple symmetry-breaking augmentation to the randomized coloring procedure that works well in conjunction with Azuma{\textquoteright}s Martingale Inequality to easily yield the requisite concentration bounds. We use this approach to obtain a number of results in two areas: frugal coloring and weighted equitable coloring . A β-frugal coloring of a graph G is a proper vertex-coloring of G in which no color appears more than β times in any neighborhood. Let G = ( V , E ) be a vertex-weighted graph with weight function w : V {\textrightarrow}[0, 1] and let W = ∑ v ∈ V w ( v ). A weighted equitable coloring of G is a proper k -coloring such that the total weight of every color class is {\textquotedblleft}large{\textquotedblright}, i.e., {\textquotedblleft}not much smaller{\textquotedblright} than W / k ; this notion is useful in obtaining tail bounds for sums of dependent random variables.}, isbn = {978-3-540-70574-1}, url = {http://dx.doi.org/10.1007/978-3-540-70575-8_26}, author = {Pemmaraju,Sriram and Srinivasan, Aravind}, editor = {Aceto,Luca and Damg{\r a}rd,Ivan and Goldberg,Leslie and Halld{\'o}rsson,Magn{\'u}s and Ing{\'o}lfsd{\'o}ttir,Anna and Walukiewicz,Igor} } @article {16071, title = {A range of indicators for the evaluation of state health department Web-based data query systems}, journal = {Issues in Evaluating Health Department Web-based Data Query Systems: Working Papers}, year = {2008}, month = {2008///}, pages = {39 - 39}, abstract = {This chapter explores the wide range of possible indicators (also called metrics) that can be used toevaluate state health department Web-based data query systems (WDQS). While the list of indicators we propose is not exhaustive, it strives to cast a wide net on the range of issues that should be considered when evaluating WDQS, from the richness of functionalities provided by the WDQS, to the usability of the WDQS, the level of human resources required to develop and maintain the WDQS, and the frequency and intensity of WDQS use. }, author = {Plaisant, Catherine} } @conference {12173, title = {Readability of scanned books in digital libraries}, booktitle = {Proceeding of the twenty-sixth annual SIGCHI conference on Human factors in computing systems}, year = {2008}, month = {2008///}, pages = {705 - 714}, author = {Quinn,A.J. and Hu,C. and Arisaka,T. and Rose,A. and Bederson, Benjamin B.} } @article {16390, title = {Reciprocity between the cerebellum and the cerebral cortex: Nonlinear dynamics in microscopic modules for generating voluntary motor commands}, journal = {Complexity}, volume = {14}, year = {2008}, month = {2008/11/01/}, pages = {29 - 45}, abstract = {The cerebellum and basal ganglia are reciprocally connected with the cerebral cortex, forming many loops that function as distributed processing modules. Here we present a detailed model of one microscopic loop between the motor cortex and the cerebellum, and we show how small arrays of these microscopic loops (CB modules) can be used to generate biologically plausible motor commands for controlling movement. A fundamental feature of CB modules is the presence of positive feedback loops between the cerebellar nucleus and the motor cortex. We use nonlinear dynamics to model one microscopic loop and to investigate its bistable properties. Simulations demonstrate an ability to program a motor command well in advance of command generation and an ability to vary command duration. However, control of command intensity is minimal, which could interfere with the control of movement velocity. To assess these hypotheses, we use a minimal nonlinear model of the neuromuscular (NM) system that translates motor commands into actual movements. Simulations of the combined CB-NM modular model indicate that movement duration is readily controlled, whereas velocity is poorly controlled. We then explore how an array of eight CB-NM modules can be used to control the direction and endpoint of a planar movement. In actuality, thousands of such microscopic loops function together as an array of adjustable pattern generators for programming and regulating the composite motor commands that control limb movements. We discuss the biological plausibility and limitations of the model. We also discuss ways in which an agent-based representation can take advantage of the modularity in order to model this complex system. {\textcopyright} 2008 Wiley Periodicals, Inc. Complexity, 2008}, keywords = {agent-based modeling, cerebellum, equilibrium point control, motor command, motor cortex, Movement, neural network, nonlinear dynamics}, isbn = {1099-0526}, doi = {10.1002/cplx.20241}, url = {http://onlinelibrary.wiley.com/doi/10.1002/cplx.20241/abstract}, author = {Wang,Jun and Dam,Gregory and Yildirim,Sule and Rand, William and Wilensky,Uri and Houk,James C.} } @article {12564, title = {Recognition of Humans and their Activities using Statistical analysis on Stiefel and Grassmann Manifolds}, journal = {Red}, volume = {7}, year = {2008}, month = {2008///}, pages = {643 - 643}, abstract = {Many applications in computer vision involve learning and recognition of patterns from exemplars which lie on certainmanifolds. Given a database of examples and a query, the following two questions are usually addressed {\textendash} a) what is the {\textquoteleft}closest{\textquoteright} example to the query in the database ? b) what is the {\textquoteleft}most probable{\textquoteright} class to which the query belongs ? The answer to the first question involves study of the geometric properties of the manifold, which then leads to appropriate definitions of distance metrics on the manifold (geodesics etc). The answer to the second question involves statistical modeling of inter- and intra-class variations on the manifold. In this paper, we concern ourselves with two related manifolds that often appear in several vision applications {\textendash} the Stiefel Manifold and the Grassmann Manifold. We describe statistical modeling and inference tools on these manifolds which result in significant improvements in performance over traditional distance-based classifiers. We illustrate applications to video-based face recognition and activity recognition. }, author = {Turaga,P. and Veeraraghavan,A. and Chellapa, Rama} } @conference {15497, title = {Relationships between Test Suites, Faults, and Fault Detection in GUI Testing}, booktitle = {Software Testing, Verification, and Validation, 2008 1st International Conference on}, year = {2008}, month = {2008/04//}, pages = {12 - 21}, abstract = {Software-testing researchers have long sought recipes for test suites that detect faults well. In the literature, empirical studies of testing techniques abound, yet the ideal technique for detecting the desired kinds of faults in a given situation often remains unclear. This work shows how understanding the context in which testing occurs, in terms of factors likely to influence fault detection, can make evaluations of testing techniques more readily applicable to new situations. We present a methodology for discovering which factors do statistically affect fault detection, and we perform an experiment with a set of test-suite- and fault-related factors in the GUI testing of two fielded, open-source applications. Statement coverage and GUI-event coverage are found to be statistically related to the likelihood of detecting certain kinds of faults.}, keywords = {Fault detection, fault-related factors, Graphical user interfaces, GUI testing, program testing, software-testing, test suites, test-suite-related factors}, doi = {10.1109/ICST.2008.26}, author = {Strecker,J. and Memon, Atif M.} } @article {17346, title = {Research Agenda: Visual Overviews for Exploratory Search}, journal = {Information Seeking Support Systems}, volume = {11}, year = {2008}, month = {2008///}, pages = {4 - 4}, abstract = {Exploratory search is necessary when users knowledge ofthe domain is incomplete or when initial user goals do not match available data or metadata that is the basis for search indexing attributes. Such mismatches mean that users need to learn more in order to develop a better understanding of the domain or to revise their search goals. Exploratory search processes may take weeks or months, so interfaces that support prolonged exploration are necessary. The attraction of exploratory search is that users can take on more ambitious goals that require substantial learning and creative leaps to bridge the gaps between what they know and that they seek. }, author = {Shneiderman, Ben} } @article {15799, title = {Residual periodograms for choosing regularization parameters for ill-posed problems}, journal = {Inverse Problems}, volume = {24}, year = {2008}, month = {2008/06/01/}, pages = {034005 - 034005}, abstract = {Consider an ill-posed problem transformed if necessary so that the errors in the data are independent identically normally distributed with mean zero and variance 1. We survey regularization and parameter selection from a linear algebra and statistics viewpoint and compare the statistical distributions of regularized estimates of the solution and the residual. We discuss methods for choosing a regularization parameter in order to assure that the residual for the model is statistically plausible. Ideally, as proposed by Rust (1998 Tech. Rep. NISTIR 6131, 2000 Comput. Sci. Stat. 32 333{\textendash}47 ), the results of candidate parameter choices should be evaluated by plotting the resulting residual along with its periodogram and its cumulative periodogram, but sometimes an automated choice is needed. We evaluate a method for choosing the regularization parameter that makes the residuals as close as possible to white noise, using a diagnostic test based on the periodogram. We compare this method with standard techniques such as the discrepancy principle, the L-curve and generalized cross validation, showing that it performs better on two new test problems as well as a variety of standard problems.}, isbn = {0266-5611, 1361-6420}, doi = {10.1088/0266-5611/24/3/034005}, url = {http://iopscience.iop.org/0266-5611/24/3/034005}, author = {Rust,Bert W. and O{\textquoteright}Leary, Dianne P.} } @article {13016, title = {Resolving arthropod phylogeny: exploring phylogenetic signal within 41 kb of protein-coding nuclear gene sequence}, journal = {Syst Biol}, volume = {57}, year = {2008}, month = {2008/12//}, pages = {920 - 938}, abstract = {This study attempts to resolve relationships among and within the four basal arthropod lineages (Pancrustacea, Myriapoda, Euchelicerata, Pycnogonida) and to assess the widespread expectation that remaining phylogenetic problems will yield to increasing amounts of sequence data. Sixty-eight regions of 62 protein-coding nuclear genes (approximately 41 kilobases (kb)/taxon) were sequenced for 12 taxonomically diverse arthropod taxa and a tardigrade outgroup. Parsimony, likelihood, and Bayesian analyses of total nucleotide data generally strongly supported the monophyly of each of the basal lineages represented by more than one species. Other relationships within the Arthropoda were also supported, with support levels depending on method of analysis and inclusion/exclusion of synonymous changes. Removing third codon positions, where the assumption of base compositional homogeneity was rejected, altered the results. Removing the final class of synonymous mutations{\textendash}first codon positions encoding leucine and arginine, which were also compositionally heterogeneous{\textendash}yielded a data set that was consistent with a hypothesis of base compositional homogeneity. Furthermore, under such a data-exclusion regime, all 68 gene regions individually were consistent with base compositional homogeneity. Restricting likelihood analyses to nonsynonymous change recovered trees with strong support for the basal lineages but not for other groups that were variably supported with more inclusive data sets. In a further effort to increase phylogenetic signal, three types of data exploration were undertaken. (1) Individual genes were ranked by their average rate of nonsynonymous change, and three rate categories were assigned{\textendash}fast, intermediate, and slow. Then, bootstrap analysis of each gene was performed separately to see which taxonomic groups received strong support. Five taxonomic groups were strongly supported independently by two or more genes, and these genes mostly belonged to the slow or intermediate categories, whereas groups supported only by a single gene region tended to be from genes of the fast category, arguing that fast genes provide a less consistent signal. (2) A sensitivity analysis was performed in which increasing numbers of genes were excluded, beginning with the fastest. The number of strongly supported nodes increased up to a point and then decreased slightly. Recovery of Hexapoda required removal of fast genes. Support for Mandibulata (Pancrustacea + Myriapoda) also increased, at times to "strong" levels, with removal of the fastest genes. (3) Concordance selection was evaluated by clustering genes according to their ability to recover Pancrustacea, Euchelicerata, or Myriapoda and analyzing the three clusters separately. All clusters of genes recovered the three concordance clades but were at times inconsistent in the relationships recovered among and within these clades, a result that indicates that the a priori concordance criteria may bias phylogenetic signal in unexpected ways. In a further attempt to increase support of taxonomic relationships, sequence data from 49 additional taxa for three slow genes (i.e., EF-1 alpha, EF-2, and Pol II) were combined with the various 13-taxon data sets. The 62-taxon analyses supported the results of the 13-taxon analyses and provided increased support for additional pancrustacean clades found in an earlier analysis including only EF-1 alpha, EF-2, and Pol II.}, doi = {10.1080/10635150802570791}, author = {Regier,J. C and Shultz,J. W and Ganley,A. R.D and Hussey,A. and Shi,D. and Ball,B. and Zwick,A. and Stajich,J. E and Cummings, Michael P. and Martin,J. W and Cunningham,CW} } @conference {15851, title = {Resolving personal names in email using context expansion}, booktitle = {Association for Computational Linguistics (ACL)}, year = {2008}, month = {2008///}, author = {Elsayed,T. and Oard, Douglas and Namata,G.} } @article {13133, title = {Resource allocation for tracking multiple targets using particle filters}, journal = {The Eighth International Workshop on Visual Surveillance}, year = {2008}, month = {2008///}, abstract = {Particle filters have been very widely used to track targets in video sequences. However, they suffer from an exponential rise in the number of particles needed to jointly track multiple targets. On the other hand, using multiple independent filters to track in crowded scenes often leads to erroneous results. We present a new particle filtering framework which uses an intelligent resource allocation scheme allowing us to track a large number of targets using a small set of particles. First, targets with overlapping posterior distributions and similar appearance models are clustered into interaction groups and tracked jointly, but independent of other targets in the scene. Second, different number of particles are allocated to different groups based on the following observations. Groups with higher associations (quantifying spatial proximity and pairwise appearance similarity) are given more particles. Groups with larger number of targets are given a larger number of particles. Finally, groups with ineffective proposal distributions are assigned more particles. Our experiments demonstrate the effectiveness of this framework over the commonly used joint particle filter with Markov Chain Monte Carlo (MCMC) sampling.}, author = {Kembhavi,A. and Schwartz, W.R. and Davis, Larry S. and others} } @conference {13628, title = {Re-Targetable OCR with Intelligent Character Segmentation}, booktitle = {DAS {\textquoteright}08: Proceedings of the 2008 The Eighth IAPRInternational Workshop on Document Analysis Systems}, year = {2008}, month = {2008/09//}, pages = {183 - 190}, abstract = {We have developed a font-model based intelligent character segmentation and recognition system. Using characteristics of structurally similar TrueType fonts, our system automatically builds a model to be used for the segmentation and recognition of the new script, independent of glyph composition. The key is a reliance on known font attributes. In our system three feature extraction methods are used to demonstrate the importance of appropriate features for classification. The methods are tested on both Latin (English) and non-Latin (Khmer) scripts. Results show that the character-level recognition accuracy exceeds 92\% for Khmer and 96\% for English on degraded documents. This work is a step toward the recognition of scripts of low-density languages which typically do not warrant the development of commercial OCR, yet often have complete TrueType font descriptions.}, author = {Agrawal,Mudit and David Doermann} } @inbook {13634, title = {Review of Classifier Combination Methods}, booktitle = {Studies in Computational Intelligence: Machine Learning in Document Analysis and RecognitionStudies in Computational Intelligence: Machine Learning in Document Analysis and Recognition}, year = {2008}, month = {2008///}, pages = {361 - 386}, publisher = {Springer}, organization = {Springer}, abstract = {Classifier combination methods have proved to be an effective tool to increase the performance of pattern recognition applications. In this chapter we review and categorize major advancements in this field. Despite a significant number of publications describing successful classifier combination implementations, the theoretical basis is still missing and achieved improvements are inconsistent. By introducing different categories of classifier combinations in this review we attempt to put forward more specific directions for future theoretical research.We also introduce a retraining effect and effects of locality based training as important properties of classifier combinations. Such effects have significant influence on the performance of combinations, and their study is necessary for complete theoretical understanding of combination algorithms.}, author = {Tulyakov,Sergey and Jaeger,Stefan and Govindaraju,Venu and David Doermann}, editor = {Simone Marinai,Hiromichi Fujisawa} } @article {12180, title = {A review of overview+ detail, zooming, and focus+ context interfaces}, journal = {ACM Computing Surveys (CSUR)}, volume = {41}, year = {2008}, month = {2008///}, pages = {1 - 31}, author = {Cockburn,A. and Karlson,A. and Bederson, Benjamin B.} } @conference {16010, title = {The role of metacognition in robust AI systems}, booktitle = {AAAI-08 Workshop on Metareasoning,(Chicago, IL)}, year = {2008}, month = {2008///}, author = {Schmill,M. D and Oates,T. and Anderson,M. and Fults,S. and Josyula,D. and Perlis, Don and Wilson,S.} } @article {14077, title = {Role of transposable elements in trypanosomatids}, journal = {Microbes and Infection}, volume = {10}, year = {2008}, month = {2008/05//}, pages = {575 - 581}, abstract = {Transposable elements constitute 2-5\% of the genome content in trypanosomatid parasites. Some of them are involved in critical cellular functions, such as the regulation of gene expression in Leishmania spp. In this review, we highlight the remarkable role extinct transposable elements can play as the source of potential new functions.}, keywords = {Cellular function, Domestication, Evolution, Gene expression, Leishmania, Regulation of mRNA stability, Retroposon, Transposable element, Trypanosoma}, isbn = {1286-4579}, doi = {16/j.micinf.2008.02.009}, url = {http://www.sciencedirect.com/science/article/pii/S1286457908000464}, author = {Bringaud,Fr{\'e}d{\'e}ric and Ghedin,Elodie and El-Sayed, Najib M. and Papadopoulou,Barbara} } @article {19202, title = {Route prediction from trip observations}, journal = {SAE SP}, volume = {2193}, year = {2008}, month = {2008}, author = {Jon Froehlich and Krumm,J.} } @article {14291, title = {Rule-based static analysis of network protocol implementations}, journal = {Information and Computation}, volume = {206}, year = {2008}, month = {2008///}, pages = {130 - 157}, author = {Udrea,O. and Lumezanu,C. and Foster, Jeffrey S.} } @inbook {17646, title = {Randomized Algorithms and Probabilistic Analysis in Wireless Networking}, booktitle = {Stochastic Algorithms: Foundations and ApplicationsStochastic Algorithms: Foundations and Applications}, series = {Lecture Notes in Computer Science}, volume = {4665}, year = {2007}, month = {2007///}, pages = {54 - 57}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Devices connected wirelessly, in various forms including computers, hand-held devices, ad hoc networks, and embedded systems, are expected to become ubiquitous all around us. Wireless networks pose interesting new challenges, some of which do not arise in standard (wired) networks. This survey discusses some key probabilistic notions {\textendash} both randomized algorithms and probabilistic analysis {\textendash} in wireless networking.}, isbn = {978-3-540-74870-0}, url = {http://dx.doi.org/10.1007/978-3-540-74871-7_5}, author = {Srinivasan, Aravind}, editor = {Hromkovic,Juraj and Kr{\'a}lovic,Richard and Nunkesser,Marc and Widmayer,Peter} } @conference {18919, title = {Reactive Query Policies: A Formalism for Planning with Volatile External Information}, year = {2007}, month = {2007/04/01/5}, pages = {243 - 250}, abstract = {To generate plans for collecting data for data mining, an important problem is information volatility during planning: the information needed by the planning system may change or expire during the planning process, as changes occur in the data being collected. In such situations, the planning system faces two challenges: how to generate plans despite these changes, and how to guarantee that a plan returned by the planner will remain valid for some period of time after the planning ends. The focus of our work is to address both of the above challenges. In particular, we provide: 1) A formalism for reactive query policies, a class of strategies for deciding when to reissue queries for information that has changed during the planning process. This class includes all query management strategies that have yet been developed. 2) A new reactive query policy called the presumptive strategy. In our experiments, the presumptive strategy ran exponentially faster than the lazy strategy, the best previously known query management strategy. In the hardest set of problems we tested, the presumptive strategy took 4.7\% as much time and generated 6.9\% as many queries as the lazy strategy}, keywords = {data collection, data mining, information volatility, planning (artificial intelligence), planning system, presumptive strategy, query management, Query processing, reactive query policies, volatile external information}, doi = {10.1109/CIDM.2007.368880}, author = {Au,Tsz-Chiu and Nau, Dana S.} } @conference {13988, title = {Real Time Capture of Audio Images and their Use with Video}, booktitle = {Applications of Signal Processing to Audio and Acoustics, 2007 IEEE Workshop on}, year = {2007}, month = {2007/10//}, pages = {10 - 13}, abstract = {Spherical microphone arrays provide an ability to compute the acoustical intensity corresponding to different spatial directions in a given frame of audio-data. These intensities may be exhibited as an image and these images updated at a high frame rate to achieve a video stream if the data capture and intensity computations can be performed sufficiently quickly, there by creating a frame-rate audio camera. We describe how such a camera can be built and the processing done sufficiently quickly using graphics processors. The joint processing of captured frame-rate audio and video images enables applications such as visual identification of noise sources, beamforming and noise-suppression in video conferencing and others, provided it is possible to account for the spatial differences in the location of the audio and the video cameras. Based on the recognition that the spherical array can be viewed as a central projection camera it is possible to perform such joint analysis. We provide several examples of real-time applications.}, doi = {10.1109/ASPAA.2007.4393037}, author = {O{\textquoteright}donovan,Adam and Duraiswami, Ramani and Gumerov, Nail A.} } @inbook {12601, title = {Recognizing Faces Across Age Progression}, booktitle = {Face Biometrics for Personal IdentificationFace Biometrics for Personal Identification}, series = {Signals and Communication Technology}, year = {2007}, month = {2007///}, pages = {27 - 42}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, isbn = {978-3-540-49346-4}, url = {http://dx.doi.org/10.1007/978-3-540-49346-4_3}, author = {Ramanathan,Narayanan and Chellapa, Rama}, editor = {Hammoud,Riad and Abidi,Besma and Abidi,Mongi} } @article {12919, title = {Recovery in culture of viable but nonculturable Vibrio parahaemolyticus: regrowth or resuscitation?}, journal = {The ISME Journal}, volume = {1}, year = {2007}, month = {2007/05/10/}, pages = {111 - 120}, abstract = {The objective of this study was to explore the recovery of culturability of viable but nonculturable (VBNC) Vibrio parahaemolyticus after temperature upshift and to determine whether regrowth or resuscitation occurred. A clinical strain of V. parahaemolyticus Vp5 was rendered VBNC by exposure to artificial seawater (ASW) at 4{\textdegree}C. Aliquots of the ASW suspension of cells (0.1, 1 and 10 ml) were subjected to increased temperatures of 20{\textdegree}C and 37{\textdegree}C. Culturability of the cells in the aliquots was monitored for colony formation on a rich medium and changes in morphology were measured by scanning (SEM) and transmission (TEM) electron microscopy. Samples of VBNC cells were fixed and examined by SEM, revealing a heterogeneous population comprising small cells and larger, flattened cells. Forty-eight hours after temperature upshift to 20{\textdegree}C or 37{\textdegree}C, both elongation and division by binary fission of the cells were observed, employing SEM and TEM, but only in the 10-ml aliquots. The results suggest that a portion of VBNC cells is able to undergo cell division. It is concluded that a portion of VBNC cells of V. parahaemolyticus subjected to cold temperatures remain viable. After temperature upshift, regrowth of those cells, rather than resuscitation of all bacteria of the initial inoculum, appears to be responsible for recovery of culturability of VBNC cells of V. parahaemolyticus. Nutrient in filtrates of VBNC cells is hypothesized to allow growth of the temperature-responsive cells, with cell division occurring via binary fission, but also including an atypical, asymmetric cell division.}, keywords = {ecophysiology, ecosystems, environmental biotechnology, geomicrobiology, ISME J, microbe interactions, microbial communities, microbial ecology, microbial engineering, microbial epidemiology, microbial genomics, microorganisms}, isbn = {1751-7362}, doi = {10.1038/ismej.2007.1}, url = {http://www.nature.com/ismej/journal/v1/n2/full/ismej20071a.html}, author = {Coutard,Fran|[ccedil]|ois and Crassous,Philippe and Droguet,Micka|[euml]|l and Gobin,Eric and Rita R Colwell and Pommepuy,Monique and Hervio-Heath,Dominique} } @conference {19592, title = {Relating Complexity and Precision in Control Flow Analysis}, booktitle = {ICFP {\textquoteright}07 Proceedings of the 12th ACM SIGPLAN International Conference on Functional Programming}, series = {ICFP {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {85 - 96}, publisher = {ACM}, organization = {ACM}, abstract = {We analyze the computational complexity of kCFA, a hierarchy of control flow analyses that determine which functions may be applied at a given call-site. This hierarchy specifies related decision problems, quite apart from any algorithms that may implement their solutions. We identify a simple decision problem answered by this analysis and prove that in the 0CFA case, the problem is complete for polynomial time. The proof is based on a nonstandard, symmetric implementation of Boolean logic within multiplicative linear logic (MLL). We also identify a simpler version of 0CFA related to η-expansion, and prove that it is complete for logarithmic space, using arguments based on computing paths and permutations. For any fixed k>0, it is known that kCFA (and the analogous decision problem) can be computed in time exponential in the program size. For k=1, we show that the decision problem is NP-hard, and sketch why this remains true for larger fixed values of k. The proof technique depends on using the approximation of CFA as an essentially nondeterministic computing mechanism, as distinct from the exactness of normalization. When k=n, so that the "depth" of the control flow analysis grows linearly in the program length, we show that the decision problem is complete for exponential time. In addition, we sketch how the analysis presented here may be extended naturally to languages with control operators. All of the insights presented give clear examples of how straightforward observations about linearity, and linear logic, may in turn be used to give a greater understanding of functional programming and program analysis.}, keywords = {complexity, continuation, control flow analysis, eta expansion, geometry of interaction, linear logic, normalization, proofnet, static analysis}, isbn = {978-1-59593-815-2}, url = {http://doi.acm.org/10.1145/1291151.1291166}, author = {David Van Horn and Mairson, Harry G.} } @article {14476, title = {Relationship identification for social network discovery}, journal = {PROCEEDINGS OF THE NATIONAL CONFERENCE ON ARTIFICIAL INTELLIGENCE}, volume = {22}, year = {2007}, month = {2007///}, pages = {546 - 546}, abstract = {In recent years, informal, online communication has trans- formed the ways in which we connect and collaborate with friends and colleagues. With millions of individuals commu- nicating online each day, we have a unique opportunity to ob- serve the formation and evolution of roles and relationships in networked groups and organizations. Yet a number of chal- lenges arise when attempting to infer the underlying social network from data that is often ambiguous, incomplete and context-dependent. In this paper, we consider the problem of collaborative network discovery from domains such as intel- ligence analysis and litigation support where the analyst is at- tempting to construct a validated representation of the social network. We specifically address the challenge of relation- ship identification where the objective is to identify relevant communications that substantiate a given social relationship type. We propose a supervised ranking approach to the prob- lem and assess its performance on a manager-subordinate re- lationship identification task using the Enron email corpus. By exploiting message content, the ranker routinely cues the analyst to relevant communications relationships and mes- sage traffic that are indicative of the social relationship.}, author = {Diehl,C. P and Namata,G. and Getoor, Lise} } @article {15476, title = {Reliable Effects Screening: A Distributed Continuous Quality Assurance Process for Monitoring Performance Degradation in Evolving Software Systems}, journal = {Software Engineering, IEEE Transactions on}, volume = {33}, year = {2007}, month = {2007/02//}, pages = {124 - 141}, abstract = {Developers of highly configurable performance-intensive software systems often use in-house performance-oriented "regression testing" to ensure that their modifications do not adversely affect their software{\textquoteright}s performance across its large configuration space. Unfortunately, time and resource constraints can limit in-house testing to a relatively small number of possible configurations, followed by unreliable extrapolation from these results to the entire configuration space. As a result, many performance bottlenecks escape detection until systems are fielded. In our earlier work, we improved the situation outlined above by developing an initial quality assurance process called "main effects screening". This process 1) executes formally designed experiments to identify an appropriate subset of configurations on which to base the performance-oriented regression testing, 2) executes benchmarks on this subset whenever the software changes, and 3) provides tool support for executing these actions on in-the-field and in-house computing resources. Our initial process had several limitations, however, since it was manually configured (which was tedious and error-prone) and relied on strong and untested assumptions for its accuracy (which made its use unacceptably risky in practice). This paper presents a new quality assurance process called "reliable effects screening" that provides three significant improvements to our earlier work. First, it allows developers to economically verify key assumptions during process execution. Second, it integrates several model-driven engineering tools to make process configuration and execution much easier and less error prone. Third, we evaluate this process via several feasibility studies of three large, widely used performance-intensive software frameworks. Our results indicate that reliable effects screening can detect performance degradation in large-scale systems more reliably and with significantly less resources than conventional t- echniques}, keywords = {configuration subset, distributed continuous quality assurance process, evolving software systems, in house testing, main effects screening, performance bottlenecks, performance degradation monitoring, performance intensive software systems, process configuration, process execution, program testing, regression testing, reliable effects screening, software benchmarks, Software performance, software performance evaluation, Software quality, software reliability, tool support}, isbn = {0098-5589}, doi = {10.1109/TSE.2007.20}, author = {Yilmaz,C. and Porter, Adam and Krishna,A. S and Memon, Atif M. and Schmidt,D. C and Gokhale,A.S. and Natarajan,B.} } @article {13400, title = {Report on the Fourth International Workshop on Data Management for Sensor Networks (DMSN 2007)}, journal = {SIGMOD Rec.}, volume = {36}, year = {2007}, month = {2007/12//}, pages = {53 - 55}, abstract = {Sensor networks enable an unprecedented level of access to the physical world, and hold tremendous potential to revolutionize many application domains. Research on sensor networks spans many areas of computer science, and there are now major conferences, e.g., IPSN and SenSys, devoted to sensor networks. However, there is no focused forum for discussion of early and innovative work on data management in sensor networks. The International Workshop on Data Management for Sensor Networks (DMSN), inaugurated in 2004, aims to fill this significant gap in the database and sensor network communities.}, isbn = {0163-5808}, doi = {10.1145/1361348.1361362}, url = {http://doi.acm.org/10.1145/1361348.1361362}, author = {Balazinska,Magdalena and Deshpande, Amol and Labrinidis,Alexandros and Luo,Qiong and Madden,Samuel and Yang,Jun} } @conference {13401, title = {Representing and Querying Correlated Tuples in Probabilistic Databases}, booktitle = {Data Engineering, 2007. ICDE 2007. IEEE 23rd International Conference on}, year = {2007}, month = {2007///}, pages = {596 - 605}, publisher = {IEEE}, organization = {IEEE}, abstract = {Probabilistic databases have received considerable attention recently due to the need for storing uncertain data produced by many real world applications. The widespread use of probabilistic databases is hampered by two limitations: (1) current probabilistic databases make simplistic assumptions about the data (e.g., complete independence among tuples) that make it difficult to use them in applications that naturally produce correlated data, and (2) most probabilistic databases can only answer a restricted subset of the queries that can be expressed using traditional query languages. We address both these limitations by proposing a framework that can represent not only probabilistic tuples, but also correlations that may be present among them. Our proposed framework naturally lends itself to the possible world semantics thus preserving the precise query semantics extant in current probabilistic databases. We develop an efficient strategy for query evaluation over such probabilistic databases by casting the query processing problem as an inference problem in an appropriately constructed probabilistic graphical model. We present several optimizations specific to probabilistic databases that enable efficient query evaluation. We validate our approach by presenting an experimental evaluation that illustrates the effectiveness of our techniques at answering various queries using real and synthetic datasets.}, isbn = {1-4244-0802-4}, doi = {10.1109/ICDE.2007.367905}, url = {zotero://attachment/12299/}, author = {Sen,Prithviraj and Deshpande, Amol} } @conference {13402, title = {Representing Tuple and Attribute Uncertainty in Probabilistic Databases}, booktitle = {Seventh IEEE International Conference on Data Mining Workshops, 2007. ICDM Workshops 2007}, year = {2007}, month = {2007/10/28/31}, pages = {507 - 512}, publisher = {IEEE}, organization = {IEEE}, abstract = {There has been a recent surge in work in probabilistic databases, propelled in large part by the huge increase in noisy data sources-sensor data, experimental data, data from uncurated sources, and many others. There is a growing need to be able to flexibly represent the uncertainties in the data, and to efficiently query the data. Building on existing probabilistic database work, we present a unifying framework which allows a flexible representation of correlated tuple and attribute level uncertainties. An important capability of our representation is the ability to represent shared correlation structures in the data. We provide motivating examples to illustrate when such shared correlation structures are likely to exist. Representing shared correlations structures allows the use of sophisticated inference techniques based on lifted probabilistic inference that, in turn, allows us to achieve significant speedups while computing probabilities for results of user-submitted queries.}, keywords = {attribute uncertainty, Computer science, Conferences, correlation structures, data mining, Data models, database management systems, Educational institutions, inference mechanisms, noisy data sources, probabilistic database, probabilistic inference, Probability distribution, Query processing, Relational databases, Sensor phenomena and characterization, tuple representation, Uncertainty, uncertainty handling}, isbn = {978-0-7695-3019-2}, doi = {10.1109/ICDMW.2007.11}, author = {Sen,P. and Deshpande, Amol and Getoor, Lise} } @article {17678, title = {A Residual Inverse Power Method}, volume = {UMIACS-TR-2007-09}, year = {2007}, month = {2007/02//}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {The inverse power method involves solving shifted equations of theform $(A -\sigma I)v = u$. This paper describes a variant method in which shifted equations may be solved to a fixed reduced accuracy without affecting convergence. The idea is to alter the right-hand side to produce a correction step to be added to the current approximations. The digits of this step divide into two parts: leading digits that correct the solution and trailing garbage. Hence the step can be be evaluated to a reduced accuracy corresponding to the correcting digits. The cost is an additional multiplication by $A$ at each step to generate the right-hand side. Analysis and experiments show that the method is suitable for normal and mildly nonnormal problems. }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/4260}, author = {Stewart, G.W.} } @article {18748, title = {Retrieving matching CAD models by using partial 3D point clouds}, journal = {Computer-Aided Design \& Applications}, volume = {4}, year = {2007}, month = {2007///}, pages = {629 - 638}, abstract = {The ability to search for a CAD model that represents a specific physical part is a useful capabilitythat can be used in many different applications. This paper presents an approach to use partial 3D point cloud of an artifact for retrieving the CAD model of the artifact. We assume that the information about the physical parts will be captured by a single 3D scan that produces dense point clouds. CAD models in our approach are represented as polygonal meshes. Our approach involves segmenting the point cloud and CAD mesh models into surface patches. The next step is to identify corresponding surface patches in point clouds and CAD models that could potentially match. Finally, we compute transformations to align the point cloud to the CAD model and compute distance between them. We also present experimental results to show that our approach can be used to retrieve CAD models of mechanical parts. }, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.92.8339\&rep=rep1\&type=pdf}, author = {Ip,C. Y and Gupta,S.K.} } @article {15162, title = {Review of "Probability and Computing: Randomized Algorithms and Probabilitic Analysis by Michael Mitzenmacher and Eli Upfal", Cambridge University Press, 2005}, journal = {SIGACT News}, volume = {38}, year = {2007}, month = {2007/09//}, pages = {20 - 22}, isbn = {0163-5700}, doi = {10.1145/1324215.1324220}, url = {http://doi.acm.org/10.1145/1324215.1324220}, author = {Katz, Jonathan} } @article {12008, title = {A roadmap to the integration of early visual modules}, journal = {International Journal of Computer Vision}, volume = {72}, year = {2007}, month = {2007///}, pages = {9 - 25}, author = {Ogale, A. S and Aloimonos, J.} } @article {13158, title = {Robust appearance modeling for pedestrian and vehicle tracking}, journal = {Multimodal Technologies for Perception of Humans}, year = {2007}, month = {2007///}, pages = {209 - 215}, abstract = {This paper describes a system for tracking people and vehicles for stationary-camera visual surveillance. The appearance of objects being tracked is modeled using mixtures of mixtures of Gaussians. Particles filters are used to track the states of object. Results show the robustness of the system to various lighting and object conditions.}, author = {Abd-Almageed, Wael and Davis, Larry S.} } @conference {13137, title = {Robust Object Tracking with Regional Affine Invariant Features}, booktitle = {Computer Vision, 2007. ICCV 2007. IEEE 11th International Conference on}, year = {2007}, month = {2007/10//}, pages = {1 - 8}, abstract = {We present a tracking algorithm based on motion analysis of regional affine invariant image features. The tracked object is represented with a probabilistic occupancy map. Using this map as support, regional features are detected and probabilistically matched across frames. The motion of pixels is then established based on the feature motion. The object occupancy map is in turn updated according to the pixel motion consistency. We describe experiments to measure the sensitivities of our approach to inaccuracy in initialization, and compare it with other approaches.}, keywords = {affine, algorithm;feature, analysis;image, analysis;pixel, consistency;regional, detection;motion, detection;tracking;, extraction;image, feature, features;robust, invariant, matching;image, MOTION, object, resolution;object, tracking}, doi = {10.1109/ICCV.2007.4408948}, author = {Tran,Son and Davis, Larry S.} } @conference {12250, title = {Robust Routing with Unknown Traffic Matrices}, booktitle = {INFOCOM 2007. 26th IEEE International Conference on Computer Communications. IEEE}, year = {2007}, month = {2007/05//}, pages = {2436 - 2440}, abstract = {In this paper, we present an algorithm for intra-domain traffic engineering. We assume that the traffic matrix, which specifies traffic load between every source-destination pair in the network, is unknown and varies with time, but that always lies inside an explicitly defined region. Our goal is to compute a fixed robust routing with best worst case performance for all traffic matrices inside the bounding region. We formulate this problem as a semi-infinite programming problem. Then, we focus on a special case with practical merits, where (1) the traffic matrix region is assumed to be a polytope specified by a finite set of linear inequalities, and (2) our objective is to find the routing that minimizes the maximum link utilization. Under these assumptions, the problem can be formulated as a polynomial size linear programming (LP) problem with finite number of constraints. We further consider two specific set of constraints for the traffic matrix region. The first set is based on the hose model and limits the total traffic rate of network point of presence (PoP) nodes. The second set is based on the pipe model and limits the traffic between source-destination pairs. We study the effectiveness of each set of constraints using extensive simulations.}, keywords = {engineering;linear, inequalities;maximum, intradomain, linear, link, matrices;linear, matrix, network, nodes;polynomial, of, point, presence, problem;traffic, Programming, programming;semiinfinite, programming;telecommunication, region;unknown, routing;telecommunication, size, traffic, traffic;, utilization;network}, doi = {10.1109/INFCOM.2007.296}, author = {Tabatabaee,V. and Kashyap,A. and Bhattacharjee, Bobby and La,R.J. and Shayman,M.A.} } @article {18269, title = {Robust scanner identification based on noise features}, journal = {Proc. SPIE, Electronic Imaging, Security, Steganography, and Watermarking of Multimedia Contents IX}, volume = {6505}, year = {2007}, month = {2007///}, pages = {0S{\textendash}0T - 0S{\textendash}0T}, abstract = {A large portion of digital image data available today is acquired using digital cameras or scanners. While camerasallow digital reproduction of natural scenes, scanners are often used to capture hardcopy art in more controlled scenarios. This paper proposes a new technique for non-intrusive scanner model identification, which can be further extended to perform tampering detection on scanned images. Using only scanned image samples that contain arbitrary content, we construct a robust scanner identifier to determine the brand/model of the scanner used to capture each scanned image. The proposed scanner identifier is based on statistical features of scanning noise. We first analyze scanning noise from several angles, including through image de-noising, wavelet analysis, and neighborhood prediction, and then obtain statistical features from each characterization. Experimental results demonstrate that the proposed method can effectively identify the correct scanner brands/models with high accuracy. }, author = {Gou,H. and Swaminathan,A. and Wu,M.} } @conference {12574, title = {Robust Visual Tracking Using the Time-Reversibility Constraint}, booktitle = {Computer Vision, 2007. ICCV 2007. IEEE 11th International Conference on}, year = {2007}, month = {2007/10//}, pages = {1 - 8}, abstract = {Visual tracking is a very important front-end to many vision applications. We present a new framework for robust visual tracking in this paper. Instead of just looking forward in the time domain, we incorporate both forward and backward processing of video frames using a novel time-reversibility constraint. This leads to a new minimization criterion that combines the forward and backward similarity functions and the distances of the state vectors between the forward and backward states of the tracker. The new framework reduces the possibility of the tracker getting stuck in local minima and significantly improves the tracking robustness and accuracy. Our approach is general enough to be incorporated into most of the current tracking algorithms. We illustrate the improvements due to the proposed approach for the popular KLT tracker and a search based tracker. The experimental results show that the improved KLT tracker significantly outperforms the original KLT tracker. The time-reversibility constraint used for tracking can be incorporated to improve the performance of optical flow, mean shift tracking and other algorithms.}, keywords = {backward, constraint;video, criterion;state, forward, frame, KLT, processing;, processing;video, processing;visual, signal, tracker;minimization, tracking;minimisation;video, vectors;time-reversibility}, doi = {10.1109/ICCV.2007.4408956}, author = {Wu,Hao and Chellapa, Rama and Sankaranarayanan,A. C and Zhou,S. K} } @conference {15105, title = {Round Complexity of Authenticated Broadcast with a Dishonest Majority}, booktitle = {Foundations of Computer Science, 2007. FOCS {\textquoteright}07. 48th Annual IEEE Symposium on}, year = {2007}, month = {2007/10//}, pages = {658 - 668}, abstract = {Broadcast among n parties in the presence of t ges n/3 malicious parties is possible only with some additional setup. The most common setup considered is the existence of a PKI and secure, digital signatures, where so-called authenticated broadcast is achievable for any t lt; n. It is known that t + 1 rounds are necessary and sufficient for deterministic protocols achieving authenticated broadcast. Recently, however, randomized protocols running in expected constant rounds have been shown for the case of t lt; n/2. It has remained open whether randomization can improve the round complexity when an honest majority is not present. We address this question and show upper/lower bounds on how much randomization can help: ldr For t les n/2 + k, we. show a randomized broadcast protocol that runs in expected O(k2) rounds. In particular, we obtain expected constant-round pivtocols for t = n/2 + O(1). ldr On the negative side, we show that even randomized protocols require Omega(2n/(n-t)) rounds. This in particular rules out expected constant-round protocols when the fraction of honest parties is sub-constant.}, keywords = {broadcast, complexity;cryptographic, complexity;deterministic, cryptography;, key, majority;randomized, PKI;authenticated, protocols;broadcasting;computational, protocols;digital, round, signatures;dishonest, signatures;public}, doi = {10.1109/FOCS.2007.44}, author = {Garay,J. A and Katz, Jonathan and Koo,Chiu-Yuen and Ostrovsky,R.} } @article {15080, title = {Round-efficient secure computation in point-to-point networks}, journal = {Advances in Cryptology-EUROCRYPT 2007}, year = {2007}, month = {2007///}, pages = {311 - 328}, abstract = {Essentially all work studying the round complexity of secure computation assume broadcast as an atomic primitive. Protocols constructed under this assumption tend to have very poor round complexity when compiled for a point-to-point network due to the high overhead of emulating each invocation of broadcast. This problem is compounded when broadcast is used in more than one round of the original protocol due to the complexity of handling sequential composition (when using round-efficient emulation of broadcast).We argue that if the goal is to optimize round complexity in point-to-point networks, then it is preferable to design protocols {\textemdash} assuming a broadcast channel {\textemdash} minimizing the number of rounds in which broadcast is used rather than minimizing the total number of rounds. With this in mind, we present protocols for secure computation in a number of settings that use only a single round of broadcast. In all cases, we achieve optimal security threshold for adaptive adversaries, and obtain protocols whose round complexity (in a point-to-point network) improves on prior work. }, doi = {10.1007/978-3-540-72540-4_18}, author = {Katz, Jonathan and Koo,C. Y} } @article {12276, title = {Ranking search results in P2P systems}, journal = {Technical Report, CR-TR-4779, Department of Computer Science, University of Maryland}, year = {2006}, month = {2006///}, abstract = {P2P deployments are a natural infrastructure for buildingdistributed search networks. Proposed systems support locating and retrieving all results, but lack the information necessary to rank them. Users, however, are primarily interested in the most relevant, and not all possible results. Using random sampling, we extend a class of well- known information retrieval ranking algorithms such that they can be applied in this distributed setting. We analyze the overhead of our approach, and quantify exactly how our system scales with increasing number of documents, system size, document to node mapping (uniform versus non-uniform), and types of queries (rare versus popular terms). Our analysis and simulations show that a) these extensions are efficient, and can scale with little overhead to large systems, and b) the accuracy of the results ob- tained using distributed ranking is comparable to a cen- tralized implementation. }, author = {Gopalakrishnan,V. and Morselli,R. and Bhattacharjee, Bobby and Keleher,P. and Srinivasan, Aravind} } @article {17649, title = {Ranking search results in peer-to-peer systems}, journal = {Technical Reports from UMIACS}, year = {2006}, month = {2006///}, abstract = {P2P deployments are a natural infrastructure for building distributed search networks. Proposed systems support locating and retrieving all results, but lack the information necessary to rank them. Users, however, are primarily interested in the most relevant, and not all possible results. Using random sampling, we extend a class of well-known information retrieval ranking algorithms such that they can be applied in this distributed setting. We analyze the overhead of our approach, and quantify exactly how our system scales with increasing number of documents, system size, document to node mapping (uniform versus non-uniform), and types of queries (rare versus popular terms). Our analysis and simulations show that a) these extensions are efficient, and can scale with little overhead to large systems, and b) the accuracy of the results obtained using distributed ranking is comparable to a centralized implementation.}, author = {Gopalakrishnan,V. and Morselli,R. and Bhattacharjee, Bobby and Keleher,P. and Srinivasan, Aravind} } @article {15154, title = {Rational secret sharing, revisited}, journal = {Security and Cryptography for Networks}, year = {2006}, month = {2006///}, pages = {229 - 241}, abstract = {We consider the problem of secret sharing among n rational players. This problem was introduced by Halpern and Teague (STOC 2004), who claim that a solution is impossible for n=2 but show a solution for the case n>=3. Contrary to their claim, we show a protocol for rational secret sharing among n=2 players; our protocol extends to the case n>=3, where it is simpler than the Halpern-Teague solution and also offers a number of other advantages. We also show how to avoid the continual involvement of the dealer, in either our own protocol or that of Halpern and Teague.Our techniques extend to the case of rational players trying to securely compute an arbitrary function, under certain assumptions on the utilities of the players. }, doi = {10.1007/11832072_16}, author = {Gordon,S. and Katz, Jonathan} } @book {13182, title = {Real-Time Distributed Algorithms for Visual and Battlefield Reasoning}, year = {2006}, month = {2006///}, publisher = {MARYLAND UNIV COLLEGE PARK OFFICE OF RESEARCH ADMINISTRATION AND ADVANCEMENT}, organization = {MARYLAND UNIV COLLEGE PARK OFFICE OF RESEARCH ADMINISTRATION AND ADVANCEMENT}, abstract = {Information is key to the success of the next generation battlefield. There is a critical need to determine, in real-time, what the enemy is doing, and to interpret that information in the context of past related events. In this project we examined two aspects of this issue: development of a high-level task definition language for tasking a network of sensors to carry out given objectives, and interpreting recounted events so that past related scenarios could be automatically identified from a case database.}, author = {Davis, Larry S. and Basili, Victor R. and V.S. Subrahmanian and Reggia, James A. and Aloimonos, J.} } @conference {13169, title = {Real-Time Human Detection, Tracking, and Verification in Uncontrolled Camera Motion Environments}, booktitle = {Computer Vision Systems, 2006 ICVS {\textquoteright}06. IEEE International Conference on}, year = {2006}, month = {2006/01//}, pages = {41 - 41}, abstract = {In environments where a camera is installed on a freely moving platform, e.g. a vehicle or a robot, object detection and tracking becomes much more difficult. In this paper, we presents a real time system for human detection, tracking, and verification in such challenging environments. To deliver a robust performance, the system integrates several computer vision algorithms to perform its function: a human detection algorithm, an object tracking algorithm, and a motion analysis algorithm. To utilize the available computing resources to the maximum possible extent, each of the system components is designed to work in a separate thread that communicates with the other threads through shared data structures. The focus of this paper is more on the implementation issues than on the algorithmic issues of the system. Object oriented design was adopted to abstract algorithmic details away from the system structure.}, doi = {10.1109/ICVS.2006.52}, author = {Hussein,M. and Abd-Almageed, Wael and Yang Ran and Davis, Larry S.} } @conference {12610, title = {Recognition of Multi-Object Events Using Attribute Grammars}, booktitle = {Image Processing, 2006 IEEE International Conference on}, year = {2006}, month = {2006/10//}, pages = {2897 - 2900}, abstract = {We present a method for representing and recognizing visual events using attribute grammars. In contrast to conventional grammars, attribute grammars are capable of describing features that are not easily represented by finite symbols. Our approach handles multiple concurrent events involving multiple entities by associating unique object identification labels with multiple event threads. Probabilistic parsing and probabilistic conditions on the attributes are used to achieve a robust recognition system. We demonstrate the effectiveness of our method for the task of recognizing vehicle casing in parking lots and events occurring in an airport tarmac}, keywords = {attribute, event, grammar;multiobject, grammars;image, identification, label;probabilistic, parsing;attribute, recognition;object, recognition;probability;, representation;object}, doi = {10.1109/ICIP.2006.313035}, author = {Joo,Seong-Wook and Chellapa, Rama} } @conference {12374, title = {Reconfigurable image registration on FPGA platforms}, booktitle = {Biomedical Circuits and Systems Conference, 2006. BioCAS 2006. IEEE}, year = {2006}, month = {2006///}, pages = {154 - 157}, author = {Sen,M. and Hemaraj,Y. and Bhattacharyya, Shuvra S. and Shekhar,R.} } @article {14637, title = {Recurring genomic breaks in independent lineages support genomic fragility}, journal = {BMC Evolutionary Biology}, volume = {6}, year = {2006}, month = {2006/11/07/}, pages = {90 - 90}, abstract = {Recent findings indicate that evolutionary breaks in the genome are not randomly distributed, and that certain regions, so-called fragile regions, are predisposed to breakages. Previous approaches to the study of genomic fragility have examined the distribution of breaks, as well as the coincidence of breaks with segmental duplications and repeats, within a single species. In contrast, we investigate whether this regional fragility is an inherent genomic characteristic and is thus conserved over multiple independent lineages.}, isbn = {1471-2148}, doi = {10.1186/1471-2148-6-90}, url = {http://www.biomedcentral.com/1471-2148/6/90}, author = {Hinsch,Hanno and Hannenhalli, Sridhar} } @conference {19459, title = {Reflecting on Health: A System for Students to Monitor Diet and Exercise}, booktitle = {SIGCHI EA {\textquoteright}06}, series = {CHI EA {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {1807 - 1812}, publisher = {ACM}, organization = {ACM}, abstract = {Using an iterative design process, we designed and evaluated a system for college students to encourage the development and maintenance of healthy diet and exercise habits. The system has three components: a camera phone application to support photographic diet and exercise journaling, an automatic workout tracking application for exercise machines in the gym, and a visualization application to support users as they reflect on their diet and exercise activities.}, keywords = {diet, exercise, journaling, mobile phone, Visualization}, isbn = {1-59593-298-4}, url = {http://doi.acm.org/10.1145/1125451.1125794}, author = {Brown, Brandon and Marshini Chetty and Grimes, Andrea and Harmon, Ellie} } @conference {12379, title = {Register File Partitioning with Constraint Programming}, booktitle = {System-on-Chip, 2006. International Symposium on}, year = {2006}, month = {2006///}, pages = {1 - 4}, author = {Salmela,P. and Shen,C. C and Bhattacharyya, Shuvra S. and Takala,J.} } @article {14394, title = {Relational clustering for entity resolution queries}, journal = {ICML 2006 Workshop on Statistical Relational Learning (SRL)}, year = {2006}, month = {2006///}, abstract = {The goal of entity resolution is to recon-cile database references corresponding to the same real-world entities. Given the abun- dance of publicly available databases where entities are not resolved, we motivate the problem of quickly processing queries that require resolved entities from such {\textquoteleft}unclean{\textquoteright} databases. We first propose a cut-based rela- tional clustering formulation for collective en- tity resolution. We then show how it can be performed on-the-fly by adaptively extract- ing and resolving those database references that are the most helpful for resolving the query. We validate our approach on two large real-world publication databases, where we show the usefulness of collective resolution and at the same time demonstrate the need for adaptive strategies for query processing. We then show how the same queries can be answered in real time using our adaptive ap- proach while preserving the gains of collective resolution. }, author = {Bhattacharya,I. and Licamele,L. and Getoor, Lise} } @conference {15251, title = {Relay Placement for Higher Order Connectivity in Wireless Sensor Networks}, booktitle = {INFOCOM 2006. 25th IEEE International Conference on Computer Communications. Proceedings}, year = {2006}, month = {2006/04//}, pages = {1 - 12}, abstract = {Not available}, doi = {10.1109/INFOCOM.2006.273}, author = {Kashyap,A. and Khuller, Samir and Shayman,M.} } @conference {15150, title = {Reliable broadcast in radio networks: the bounded collision case}, booktitle = {Proceedings of the twenty-fifth annual ACM symposium on Principles of distributed computing}, series = {PODC {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {258 - 264}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We study the problem of achieving global broadcast in a radio network where a node can multicast messages to all of its neighbors (that is, nodes within some given distance r), and up to t nodes in any single neighborhood may be corrupted. Previous work assumes that corrupted nodes can neither cause collisions nor spoof addresses of honest nodes. In this work, we eliminate these assumptions and allow each faulty node to cause a (known) bounded number of collisions and spoof the addresses of arbitrary other nodes. We show that the maximum tolerable t in this case is identical to the maximum tolerable t when collisions and address spoofing are not allowed. Thus, by causing collisions and spoofing addresses an adversary may be able to degrade the efficiency of achieving broadcast, but it cannot affect the feasibility of this task.}, keywords = {broadcast, byzantine failure, Fault tolerance, radio networks}, isbn = {1-59593-384-0}, doi = {10.1145/1146381.1146420}, url = {http://doi.acm.org/10.1145/1146381.1146420}, author = {Koo,Chiu-Yuen and Bhandari,Vartika and Katz, Jonathan and Vaidya,Nitin H.} } @conference {13849, title = {Reranking for Sentence Boundary Detection in Conversational Speech}, booktitle = {2006 IEEE International Conference on Acoustics, Speech and Signal Processing, 2006. ICASSP 2006 Proceedings}, volume = {1}, year = {2006}, month = {2006/05/14/19}, pages = {I-I - I-I}, publisher = {IEEE}, organization = {IEEE}, abstract = {We present a reranking approach to sentence-like unit (SU) boundary detection, one of the EARS metadata extraction tasks. Techniques for generating relatively small n-best lists with high oracle accuracy are presented. For each candidate, features are derived from a range of information sources, including the output of a number of parsers. Our approach yields significant improvements over the best performing system from the NIST RT-04F community evaluation}, keywords = {Automatic speech recognition, conversational speech, data mining, Ear, EARS metadata extraction tasks, Feature extraction, hidden Markov models, meta data, Model driven engineering, NIST, NIST RT-04F community evaluation, oracle accuracy, performance evaluation, reranking, sentence-like unit boundary detection, Speech processing, Speech recognition, Telephony}, isbn = {1-4244-0469-X}, doi = {10.1109/ICASSP.2006.1660078}, author = {Roark,B. and Liu,Yang and Harper,M. and Stewart,R. and Lease,M. and Snover,M. and Shafran,I. and Dorr, Bonnie J and Hale,J. and Krasnyanskaya,A. and Yung,L.} } @article {17348, title = {Research-based web design \& usability guidelines}, journal = {Background and Methodology. US Department of Health and Human Services. Washington}, year = {2006}, month = {2006///}, author = {Leavitt,M.O. and Shneiderman, Ben} } @article {17650, title = {Resilient multicast using overlays}, journal = {IEEE/ACM Transactions on Networking}, volume = {14}, year = {2006}, month = {2006/04//}, pages = {237 - 248}, abstract = {We introduce Probabilistic Resilient Multicast (PRM): a multicast data recovery scheme that improves data delivery ratios while maintaining low end-to-end latencies. PRM has both a proactive and a reactive components; in this paper we describe how PRM can be used to improve the performance of application-layer multicast protocols especially when there are high packet losses and host failures. Through detailed analysis in this paper, we show that this loss recovery technique has efficient scaling properties-the overheads at each overlay node asymptotically decrease to zero with increasing group sizes. As a detailed case study, we show how PRM can be applied to the NICE application-layer multicast protocol. We present detailed simulations of the PRM-enhanced NICE protocol for 10 000 node Internet-like topologies. Simulations show that PRM achieves a high delivery ratio (>97\%) with a low latency bound (600 ms) for environments with high end-to-end network losses (1\%-5\%) and high topology change rates (5 changes per second) while incurring very low overheads (<5\%).}, keywords = {application-layer multicast protocols, Computer science, Data communication, Delay, Internet, Internet-like topologies, IP networks, loss recovery technique, Multicast, multicast data recovery scheme, Multicast protocols, Network topology, NETWORKS, overlays, Performance loss, probabilistic forwarding, probabilistic resilient multicast, Protocols, Resilience, Streaming media, telecommunication network topology, Terminology}, isbn = {1063-6692}, doi = {10.1109/TNET.2006.872579}, author = {Banerjee,S. and Lee,Seungjoon and Bhattacharjee, Bobby and Srinivasan, Aravind} } @conference {12275, title = {Resource Discovery Techniques in Distributed Desktop Grid Environments}, booktitle = {Proceedings of the 7th IEEE/ACM International Conference on Grid Computing}, series = {GRID {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {9 - 16}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Washington, DC, USA}, abstract = {Desktop grids use opportunistic sharing to exploit large collections of personal computers and workstations across the Internet, achieving tremendous computing power at low cost. Traditional desktop grid systems are typically based on a client-server architecture, which has inherent shortcomings with respect to robustness, reliability and scalability. In this paper, we propose a decentralized, robust, highly available, and scalable infrastructure to match incoming jobs to available resources. Through a comparative analysis on the experimental results obtained via simulation of three different types of matchmaking algorithms under different workload scenarios, we show the trade-offs between effcient matchmaking and good load balancing in a fully decentralized, heterogeneous computational environment.}, isbn = {1-4244-0343-X}, doi = {10.1109/ICGRID.2006.310992}, url = {http://dx.doi.org/10.1109/ICGRID.2006.310992}, author = {Kim,Jik-Soo and Nam,Beomseok and Keleher,Peter and Marsh,Michael and Bhattacharjee, Bobby and Sussman, Alan} } @article {14641, title = {Retroviral DNA Integration: Viral and Cellular Determinants of Target-Site Selection}, journal = {PLoS PathogPLoS Pathog}, volume = {2}, year = {2006}, month = {2006/06/23/}, pages = {e60 - e60}, abstract = {Retroviruses differ in their preferences for sites for viral DNA integration in the chromosomes of infected cells. Human immunodeficiency virus (HIV) integrates preferentially within active transcription units, whereas murine leukemia virus (MLV) integrates preferentially near transcription start sites and CpG islands. We investigated the viral determinants of integration-site selection using HIV chimeras with MLV genes substituted for their HIV counterparts. We found that transferring the MLV integrase (IN) coding region into HIV (to make HIVmIN) caused the hybrid to integrate with a specificity close to that of MLV. Addition of MLV gag (to make HIVmGagmIN) further increased the similarity of target-site selection to that of MLV. A chimeric virus with MLV Gag only (HIVmGag) displayed targeting preferences different from that of both HIV and MLV, further implicating Gag proteins in targeting as well as IN. We also report a genome-wide analysis indicating that MLV, but not HIV, favors integration near DNase I{\textendash}hypersensitive sites (i.e., +/- 1 kb), and that HIVmIN and HIVmGagmIN also favored integration near these features. These findings reveal that IN is the principal viral determinant of integration specificity; they also reveal a new role for Gag-derived proteins, and strengthen models for integration targeting based on tethering of viral IN proteins to host proteins.}, doi = {10.1371/journal.ppat.0020060}, url = {UR - http://dx.plos.org/10.1371/journal.ppat.0020060,http://dx.plos.org/10.1371/journal.ppat.0020060}, author = {Lewinski,Mary K and Yamashita,Masahiro and Emerman,Michael and Ciuffi,Angela and Marshall,Heather and Crawford,Gregory and Collins,Francis and Shinn,Paul and Leipzig,Jeremy and Hannenhalli, Sridhar and Berry,Charles C and Ecker,Joseph R and Bushman,Frederic D.} } @conference {18487, title = {Revealing botnet membership using DNSBL counter-intelligence}, booktitle = {Proc. 2nd USENIX Steps to Reducing Unwanted Traffic on the Internet}, year = {2006}, month = {2006///}, pages = {49 - 54}, abstract = {Botnets{\textemdash}networks of (typically compromised) machines{\textemdash}are often used for nefarious activities (e.g., spam, click fraud, denial-of-service attacks, etc.). Identifying members of botnets could help stem these attacks, but passively detecting botnet membership (i.e., without disrupting the operation of the botnet) proves to be difficult. This paper studies the effectiveness of monitoring lookups to a DNS-based blackhole list (DNSBL) to expose botnet membership. We perform counter-intelligence based on the insight that botmasters themselves perform DNSBL lookups to determine whether their spamming bots are blacklisted. Using heuristics to identify which DNSBL lookups are perpetrated by a botmaster performing such reconnaissance, we are able to compile a list of likely bots. This paper studies the prevalence of DNSBL reconnaissance observed at a mirror of a well-known blacklist for a 45-day period, identifies the means by which botmasters are performing reconnaissance, and suggests the possibility of using counter-intelligence to discover likely bots. We find that bots are performing reconnaissance on behalf of other bots. Based on this finding, we suggest counter-intelligence techniques that may be useful for early bot detection. }, author = {Ramachandran,A. and Feamster, Nick and Dagon,D.} } @article {15160, title = {Review of "A Computational Introduction to Number Theory and Algebra by Victor Shoup", Cambridge University Press, 2005}, journal = {SIGACT News}, volume = {37}, year = {2006}, month = {2006/03//}, pages = {12 - 13}, isbn = {0163-5700}, doi = {10.1145/1122480.1122483}, url = {http://doi.acm.org/10.1145/1122480.1122483}, author = {Katz, Jonathan} } @article {15161, title = {Review of "Primality Testing in Polynomial Time by Martin Dietzfelbinger", Springer-Verlag, 2004}, journal = {SIGACT News}, volume = {37}, year = {2006}, month = {2006/03//}, pages = {14 - 15}, isbn = {0163-5700}, doi = {10.1145/1122480.1122484}, url = {http://doi.acm.org/10.1145/1122480.1122484}, author = {Katz, Jonathan} } @article {17652, title = {Review of "The Random Projection Method by Santosh Vempala"}, journal = {SIGACT News}, volume = {37}, year = {2006}, month = {2006/12//}, pages = {41 - 43}, isbn = {0163-5700}, doi = {10.1145/1189056.1189066}, url = {http://doi.acm.org/10.1145/1189056.1189066}, author = {Srinivasan, Aravind} } @article {18506, title = {Revisiting Internet addressing: Back to the future}, volume = {MIT-CSAIL-TR-2006-025}, year = {2006}, month = {2006///}, institution = {Massachusetts Institute of Technology Computer Science and Artificial Intelligence Laboratory}, abstract = {IP prefixes undermine three goals of Internet routing:accurate reflection of network-layer reachability, secure routing messages, and effective traffic control. This pa- per presents Atomic IP (AIP), a simple change to Inter- net addressing (which in fact reverts to how addressing once worked), that allows Internet routing to achieve these goals. }, author = {Vutukuru,M. and Feamster, Nick and Walfish,M. and Balakrishnan,H. and Shenker,S.} } @article {15166, title = {Ring signatures: Stronger definitions, and constructions without random oracles}, journal = {Theory of Cryptography}, year = {2006}, month = {2006///}, pages = {60 - 79}, abstract = {Ring signatures, first introduced by Rivest, Shamir, and Tauman, enable a user to sign a message so that a ring of possible signers (of which the user is a member) is identified, without revealing exactly which member of that ring actually generated the signature. In contrast to group signatures, ring signatures are completely {\textquotedblleft}ad-hoc{\textquotedblright} and do not require any central authority or coordination among the various users (indeed, users do not even need to be aware of each other); furthermore, ring signature schemes grant users fine-grained control over the level of anonymity associated with any particular signature.This paper has two main areas of focus. First, we examine previous definitions of security for ring signature schemes and suggest that most of these prior definitions are too weak, in the sense that they do not take into account certain realistic attacks. We propose new definitions of anonymity and unforgeability which address these threats, and then give separation results proving that our new notions are strictly stronger than previous ones. Next, we show two constructions of ring signature schemes in the standard model: one based on generic assumptions which satisfies our strongest definitions of security, and a second, more efficient scheme achieving weaker security guarantees and more limited functionality. These are the first constructions of ring signature schemes that do not rely on random oracles or ideal ciphers. }, doi = {10.1007/11681878_4}, author = {Bender,A. and Katz, Jonathan and Morselli,R.} } @article {18267, title = {Robust and secure image hashing}, journal = {Information Forensics and Security, IEEE Transactions on}, volume = {1}, year = {2006}, month = {2006/06//}, pages = {215 - 230}, abstract = {Image hash functions find extensive applications in content authentication, database search, and watermarking. This paper develops a novel algorithm for generating an image hash based on Fourier transform features and controlled randomization. We formulate the robustness of image hashing as a hypothesis testing problem and evaluate the performance under various image processing operations. We show that the proposed hash function is resilient to content-preserving modifications, such as moderate geometric and filtering distortions. We introduce a general framework to study and evaluate the security of image hashing systems. Under this new framework, we model the hash values as random variables and quantify its uncertainty in terms of differential entropy. Using this security framework, we analyze the security of the proposed schemes and several existing representative methods for image hashing. We then examine the security versus robustness tradeoff and show that the proposed hashing methods can provide excellent security and robustness.}, keywords = {content-preserving, cryptography;, differential, distortions;, entropy;, Filtering, Fourier, functions;, hash, hashing;, image, modifications;, processing;, secure, theory;, transform;, transforms;}, isbn = {1556-6013}, doi = {10.1109/TIFS.2006.873601}, author = {Swaminathan,A. and Mao,Yinian and M. Wu} } @article {12618, title = {Robust ego-motion estimation and 3-D model refinement using surface parallax}, journal = {Image Processing, IEEE Transactions on}, volume = {15}, year = {2006}, month = {2006/05//}, pages = {1215 - 1225}, abstract = {We present an iterative algorithm for robustly estimating the ego-motion and refining and updating a coarse depth map using parametric surface parallax models and brightness derivatives extracted from an image pair. Given a coarse depth map acquired by a range-finder or extracted from a digital elevation map (DEM), ego-motion is estimated by combining a global ego-motion constraint and a local brightness constancy constraint. Using the estimated camera motion and the available depth estimate, motion of the three-dimensional (3-D) points is compensated. We utilize the fact that the resulting surface parallax field is an epipolar field, and knowing its direction from the previous motion estimates, estimate its magnitude and use it to refine the depth map estimate. The parallax magnitude is estimated using a constant parallax model (CPM) which assumes a smooth parallax field and a depth based parallax model (DBPM), which models the parallax magnitude using the given depth map. We obtain confidence measures for determining the accuracy of the estimated depth values which are used to remove regions with potentially incorrect depth estimates for robustly estimating ego-motion in subsequent iterations. Experimental results using both synthetic and real data (both indoor and outdoor sequences) illustrate the effectiveness of the proposed algorithm.}, keywords = {3D model refinement, algorithms, Artificial intelligence, Automated;Subtraction Technique;, Computer-Assisted;Imaging, constant parallax model, depth based parallax model, digital elevation map, epipolar field, Image Enhancement, Image Interpretation, iterative algorithm, iterative methods, Motion estimation, robust ego-motion estimation, smooth parallax field, surface parallax, Three-Dimensional;Information Storage and Retrieval;Pattern Recognition}, isbn = {1057-7149}, doi = {10.1109/TIP.2005.864167}, author = {Agrawal,A. and Chellapa, Rama} } @article {15075, title = {Robust fuzzy extractors and authenticated key agreement from close secrets}, journal = {Advances in Cryptology-CRYPTO 2006}, year = {2006}, month = {2006///}, pages = {232 - 250}, abstract = {Consider two parties holding correlated random variables W and W', respectively, that are within distance t of each other in some metric space. These parties wish to agree on a uniformly distributed secret key R by sending a single message over an insecure channel controlled by an all-powerful adversary. We consider both the keyless case, where the parties share no additional secret information, and the keyed case, where the parties share a long-term secret SK that they can use to generate a sequence of session keys {R j } using multiple pairs {(W j , W' j )}. The former has applications to, e.g., biometric authentication, while the latter arises in, e.g., the bounded storage model with errors.Our results improve upon previous work in several respects: {\textendash} The best previous solution for the keyless case with no errors (i.e., t=0) requires the min-entropy of W to exceed 2|W|/3. We show a solution when the min-entropy of W exceeds the minimal threshold |W|/2. {\textendash} Previous solutions for the keyless case in the presence of errors (i.e., t>0) required random oracles. We give the first constructions (for certain metrics) in the standard model. {\textendash} Previous solutions for the keyed case were stateful. We give the first stateless solution. }, doi = {10.1007/11818175_14}, author = {Dodis,Y. and Katz, Jonathan and Reyzin,L. and Smith,A.} } @conference {15246, title = {A robust maximum completion time measure for scheduling}, booktitle = {Proceedings of the seventeenth annual ACM-SIAM symposium on Discrete algorithm}, series = {SODA {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {324 - 333}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {One popular measure for evaluating the performance of scheduling algorithms, is the maximum response time of any job (makespan). Typically the objective is to find a schedule that minimizes the maximum response time over all jobs. One drawback of this measure is that a relatively small number of jobs in the request set could cause the maximum response time to be very high. Thus, this measure reflects local rather than global properties of the request set. In this paper we consider a robust generalization of this measure. Our goal is to minimize T, such that a given fraction of jobs can be scheduled with a response time of at most T. We demonstrate the applicability of this measure in the context of broadcast scheduling. We show that in the online setting no constant factor online approximation is possible for the problem of minimizing the maximum response time for a given fraction of jobs in the context of broadcast scheduling. We give a factor 5, polynomial time offline approximation algorithm for the problem of minimizing the maximum response time for a given fraction of jobs in the context of broadcast scheduling.}, isbn = {0-89871-605-5}, doi = {10.1145/1109557.1109594}, url = {http://doi.acm.org/10.1145/1109557.1109594}, author = {Charikar,Moses and Khuller, Samir} } @article {13636, title = {Robust Point Matching for Nonrigid Shapes By Preserving Local Neighborhood Structures}, journal = {IEEETransactions on Pattern Analysis and Machine Intelligence}, volume = {28}, year = {2006}, month = {2006/04//}, pages = {643 - 649}, abstract = {In previous work on point matching, a set of points is often treated as an instance of a joint distribution to exploit global relationships in the point set. For nonrigid shapes, however, the local relationship among neighboring points is stronger and more stable than the global one. In this paper, we introduce the notion of a neighborhood structure for the general point matching problem. We formulate point matching as an optimization problem to preserve local neighborhood structures during matching. Our approach has a simple graph matching interpretation, where each point is a node in the graph, and two nodes are connected by an edge if they are neighbors. The optimal match between two graphs is the one that maximizes the number of matched edges. Existing techniques are leveraged to search for an optimal solution with the shape context distance used to initialize the graph matching, followed by relaxation labeling updates for refinement. Extensive experiments show the robustness of our approach under deformation, noise in point locations, outliers, occlusion, and rotation. It outperforms the shape context and TPS-RPM algorithms on most scenarios.}, author = {Yefeng Zheng and David Doermann} } @conference {15020, title = {Robust technologies for automated ingestion and long-term preservation of digital information}, booktitle = {Proceedings of the 2006 international conference on Digital government research}, series = {dg.o {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {285 - 286}, publisher = {Digital Government Society of North America}, organization = {Digital Government Society of North America}, abstract = {In this summary, we present an overview of our DIGARCH project and report on a number of significant advances achieved thus far. In particular, we highlight our contributions to the development of a novel architecture for the Global Digital Format Registry, the design of a highly reliable and scalable deep archive, and the development of the underpinnings of a policy-driven management of preservation processes. Challenges and future plans are briefly outlined.}, keywords = {automated ingestion, digital archiving, digital preservation, format registry, management of preservation processes}, doi = {10.1145/1146598.1146674}, url = {http://dx.doi.org/10.1145/1146598.1146674}, author = {JaJa, Joseph F.} } @conference {15305, title = {The role of knowledge in conceptual retrieval: a study in the domain of clinical medicine}, booktitle = {Proceedings of the 29th annual international ACM SIGIR conference on Research and development in information retrieval}, series = {SIGIR {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {99 - 106}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Despite its intuitive appeal, the hypothesis that retrieval at the level of "concepts" should outperform purely term-based approaches remains unverified empirically. In addition, the use of "knowledge" has not consistently resulted in performance gains. After identifying possible reasons for previous negative results, we present a novel framework for "conceptual retrieval" that articulates the types of knowledge that are important for information seeking. We instantiate this general framework in the domain of clinical medicine based on the principles of evidence-based medicine (EBM). Experiments show that an EBM-based scoring algorithm dramatically outperforms a state-of-the-art baseline that employs only term statistics. Ablation studies further yield a better understanding of the performance contributions of different components. Finally, we discuss how other domains can benefit from knowledge-based approaches.}, keywords = {Question answering, reranking, semantic models}, isbn = {1-59593-369-7}, doi = {10.1145/1148170.1148191}, url = {http://doi.acm.org/10.1145/1148170.1148191}, author = {Jimmy Lin and Demner-Fushman,Dina} } @conference {15677, title = {Routing for data delivery in dynamic networks}, booktitle = {Proceedings of the IEEE MILCOM}, year = {2006}, month = {2006///}, pages = {1 - 7}, author = {Mundur, Padma and Lee,S. and Seligman,M.} } @article {15695, title = {Routing in Delay Tolerant Networks Using Storage Domains}, volume = {UMIACS-TR-2007-01}, year = {2006}, month = {2006/11/20/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {In this paper, we present a routing algorithm for a class ofdynamic networks called the Delay Tolerant Networks (DTNs). The proposed algorithm takes into account the quintessential DTN characteristic namely, intermittent link connectivity. We modify the breadth first search (BFS) algorithm to take into account link state changes and find the quickest route between source and destination nodes. We adopt a message drop policy at intermediate nodes to incorporate storage constraint. We also introduce the idea of time-varying storage domains where all nodes connected for a length of time act as a single storage unit by sharing the aggregated storage capacity of the nodes. We evaluate the routing algorithm with and without storage domain in an extensive simulation. We analyze the performance using metrics such as delivery ratio, incomplete transfers with no routes and dropped messages. The DTN topology dynamics are analyzed by varying: number of nodes generating traffic, link probability, link availability through combinations of downtime/uptime values, storage per node, message size, and traffic. The delay performance of the proposed algorithms is conceptually the same as flooding-based algorithms but without the penalty of multiple copies. More significantly, we show that the Quickest Storage Domain (Quickest SD) algorithm distributes the storage demand across many nodes in the network topology, enabling balanced load and higher network utilization. In fact, we show that for the same level of performance, we can actually cut the storage requirement in half using the Quickest SD algorithm. }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu//handle/1903/4024}, author = {Mundur, Padma and Lee,Sookyoung and Seligman,Matthew} } @conference {15686, title = {Routing in intermittent network topologies}, booktitle = {Proceedings of the 9th ACM international symposium on Modeling analysis and simulation of wireless and mobile systems}, series = {MSWiM {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {385 - 389}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {delay tolerant network (DTN), performance evaluation, routing algorithms}, isbn = {1-59593-477-4}, doi = {10.1145/1164717.1164782}, url = {http://doi.acm.org/10.1145/1164717.1164782}, author = {Mundur, Padma and Lee,Sookyoung and Seligman,Matthew} } @article {16943, title = {A Rank-by-Feature Framework for Interactive Exploration of Multidimensional Data (2004)}, journal = {Institute for Systems Research Technical Reports}, year = {2005}, month = {2005///}, abstract = {Interactive exploration of multidimensional data sets is challenging because: (1) it is difficult to comprehend patterns in more than three dimensions, and (2) current systems often are a patchwork of graphical and statistical methods leaving many researchers uncertain about how to explore their data in an orderly manner. We offer a set of principles and a novel rank-by-feature framework that could enable users to better understand distributions in one (1D) or two dimensions (2D), and then discover relationships, clusters, gaps, outliers, and other features. Users of our framework can view graphical presentations (histograms, boxplots, and scatterplots), and then choose a feature detection criterion to rank 1D or 2D axis-parallel projections. By combining information visualization techniques (overview, coordination, and dynamic query) with summaries and statistical methods users can systematically examine the most important 1D and 2D axis-parallel projections. We summarize our Graphics, Ranking, and Interaction for Discovery (GRID) principles as: (1) 1D, 2D, then features (2) graphics, ranking, summaries, then statistics. We implemented the rank-by-feature framework in the Hierarchical Clustering Explorer, but the same data exploration principles could enable users to organize their discovery process so as to produce more thorough analyses and extract deeper insights in any multidimensional data application, such as spreadsheets, statistical packages, or information visualization tools.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6524}, author = {Seo,Jinwook and Shneiderman, Ben} } @article {16942, title = {A Rank-by-Feature Framework for Interactive Exploration of Multidimensional Data}, journal = {Information VisualizationInformation Visualization}, volume = {4}, year = {2005}, month = {2005/06/20/}, pages = {96 - 113}, abstract = {Interactive exploration of multidimensional data sets is challenging because: (1) it is difficult to comprehend patterns in more than three dimensions, and (2) current systems often are a patchwork of graphical and statistical methods leaving many researchers uncertain about how to explore their data in an orderly manner. We offer a set of principles and a novel rank-by-feature framework that could enable users to better understand distributions in one (1D) or two dimensions (2D), and then discover relationships, clusters, gaps, outliers, and other features. Users of our framework can view graphical presentations (histograms, boxplots, and scatterplots), and then choose a feature detection criterion to rank 1D or 2D axis-parallel projections. By combining information visualization techniques (overview, coordination, and dynamic query) with summaries and statistical methods users can systematically examine the most important 1D and 2D axis-parallel projections. We summarize our Graphics, Ranking, and Interaction for Discovery (GRID) principles as: (1) study 1D, study 2D, then find features (2) ranking guides insight, statistics confirm. We implemented the rank-by-feature framework in the Hierarchical Clustering Explorer, but the same data exploration principles could enable users to organize their discovery process so as to produce more thorough analyses and extract deeper insights in any multidimensional data application, such as spreadsheets, statistical packages, or information visualization tools.}, keywords = {dynamic query, exploratory data analysis, feature detection/selection, graphical displays, Information Visualization, Rank-by-feature framework}, isbn = {1473-8716, 1473-8724}, doi = {10.1057/palgrave.ivs.9500091}, url = {http://ivi.sagepub.com/content/4/2/96}, author = {Seo,Jinwook and Shneiderman, Ben} } @conference {15482, title = {Rapid "crash testing" for continuously evolving GUI-based software applications}, booktitle = {Software Maintenance, 2005. ICSM{\textquoteright}05. Proceedings of the 21st IEEE International Conference on}, year = {2005}, month = {2005/09//}, pages = {473 - 482}, abstract = {Several rapid-feedback-based quality assurance mechanisms are used to manage the quality of continuously evolving software. Even though graphical user interfaces (GUIs) are one of the most important parts of software, there are currently no mechanisms to quickly retest evolving GUI software. We leverage our previous work on GUI testing to define a new automatic GUI re-testing process called "crash testing" that is integrated with GUI evolution. We describe two levels of crash testing: (1) immediate feedback-based in which a developer indicates that a GUI bug was fixed in response to a previously reported crash; only select crash test cases are rerun and the developer is notified of the results in a matter of seconds, and (2) between code changes in which new crash test cases are generated on-the-fly and executed on the GUI. Since the code may be changed by another developer before all the crash tests have been executed, hence requiring restarting of the process, we use a simple rotation-based scheme to ensure that all crash tests are executed over a series of code changes. We show, via empirical studies, that our crash tests are effective at revealing serious problems in the GUI.}, keywords = {crash testing, graphical user interface software retesting, Graphical user interfaces, GUI-based software application, immediate feedback, program testing, rapid-feedback-based quality assurance, software evolution, Software maintenance, software prototyping, Software quality}, doi = {10.1109/ICSM.2005.72}, author = {Xie,Q. and Memon, Atif M.} } @conference {17790, title = {RDF aggregate queries and views}, booktitle = {Data Engineering, 2005. ICDE 2005. Proceedings. 21st International Conference on}, year = {2005}, month = {2005/04//}, pages = {717 - 728}, abstract = {Resource description framework (RDF) is a rapidly expanding Web standard. RDF databases attempt to track the massive amounts of Web data and services available. In this paper, we study the problem of aggregate queries. We develop an algorithm to compute answers to aggregate queries over RDF databases and algorithms to maintain views involving those aggregates. Though RDF data can be stored in a standard relational DBMS (and hence we can execute standard relational aggregate queries and view maintenance methods on them), we show experimentally that our algorithms that operate directly on the RDF representation exhibit significantly superior performance.}, keywords = {aggregate, databases;, DBMS;, description, framework;, languages;, Maintenance, methods;, processing;, queries;, query, RDF, relational, resource, standard;, standards;, view, Web}, doi = {10.1109/ICDE.2005.121}, author = {Hung,E. and Deng,Yu and V.S. Subrahmanian} } @article {13206, title = {Real-time foreground-background segmentation using codebook model}, journal = {Real-time imaging}, volume = {11}, year = {2005}, month = {2005///}, pages = {172 - 185}, author = {Kim,K. and Chalidabhongse,T.H. and Harwood,D. and Davis, Larry S.} } @conference {15994, title = {On the reasoning of real-world agents: Toward a semantics for active logic}, booktitle = {7-th Annual Symposium on the Logical Formalization of Commonsense Reasoning}, year = {2005}, month = {2005///}, author = {Anderson,M. L and Gomaa,W. and Grant,J. and Perlis, Don} } @article {12663, title = {Recognition of Humans and Their Activities Using Video}, journal = {Synthesis Lectures on Image, Video, and Multimedia Processing}, volume = {1}, year = {2005}, month = {2005/01//}, pages = {1 - 173}, abstract = {The recognition of humans and their activities from video sequences is currently a very active area of research because of its applications in video surveillance, design of realistic entertainment systems, multimedia communications, and medical diagnosis. In this lecture, we discuss the use of face and gait signatures for human identification and recognition of human activities from video sequences. We survey existing work and describe some of the more well-known methods in these areas. We also describe our own research and outline future possibilities.In the area of face recognition, we start with the traditional methods for image-based analysis and then describe some of the more recent developments related to the use of video sequences, 3D models, and techniques for representing variations of illumination. We note that the main challenge facing researchers in this area is the development of recognition strategies that are robust to changes due to pose, illumination, disguise, and aging. Gait recognition is a more recent area of research in video understanding, although it has been studied for a long time in psychophysics and kinesiology. The goal for video scientists working in this area is to automatically extract the parameters for representation of human gait. We describe some of the techniques that have been developed for this purpose, most of which are appearance based. We also highlight the challenges involved in dealing with changes in viewpoint and propose methods based on image synthesis, visual hull, and 3D models. In the domain of human activity recognition, we present an extensive survey of various methods that have been developed in different disciplines like artificial intelligence, image processing, pattern recognition, and computer vision. We then outline our method for modeling complex activities using 2D and 3D deformable shape theory. The wide application of automatic human identification and activity recognition methods will require the fusion of different modalities like face and gait, dealing with the problems of pose and illumination variations, and accurate computation of 3D models. The last chapter of this lecture deals with these areas of future research. }, isbn = {1559-8136, 1559-8144}, doi = {10.2200/S00002ED1V01Y200508IVM001}, url = {http://www.morganclaypool.com/doi/abs/10.2200/S00002ED1V01Y200508IVM001}, author = {Chellapa, Rama and Roy-Chowdhury,Amit K. and Zhou,S. Kevin} } @article {19025, title = {Recovering from Intrusions in the OSPF Data-plane}, year = {2005}, month = {2005}, institution = {School of Computer Science, Carnegie Mellon University}, abstract = {In this paper, we propose CONS-ROUTE, a data-plane intrusion recoverymechanism for securing the OSPF routing protocol. CONS-ROUTE allows routers to perform intrusion detection in a distributed manner. The intrusion detection outcome can be used globally to reevaluate routing decisions in a way that is resilient to the slandering attack, where a malicious router claims that a legitimate router is misbehaving. We evaluate CONS-ROUTE through simulation and compare it with several simple OSPF data plane resilience techniques. }, doi = {Technical Report}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.60.5274\&rep=rep1\&type=pdf$\#$page=51}, author = {Elaine Shi and Lu, Yong and Reid, Matt} } @article {14758, title = {Recovering system specific rules from software repositories}, journal = {SIGSOFT Softw. Eng. Notes}, volume = {30}, year = {2005}, month = {2005/05//}, pages = {1 - 5}, abstract = {One of the most successful applications of static analysis based bug finding tools is to search the source code for violations of system-specific rules. These rules may describe how functions interact in the code, how data is to be validated or how an API is to be used. To apply these tools, the developer must encode a rule that must be followed in the source code. The difficulty is that many of these system-specific rules are undocumented and "grow" over time as the source code changes. Most research in this area relies on expert programmers to document these little-known rules. In this paper we discuss a method to automatically recover a subset of these rules, function usage patterns, by mining the software repository. We present a preliminary study that applies our work to a large open source software project.}, keywords = {data warehouse and repository, debugging aids, design, experimentation, measurement, performance}, isbn = {0163-5948}, doi = {10.1145/1082983.1083144}, author = {Williams, Chadd C and Hollingsworth, Jeffrey K} } @article {15089, title = {Reducing complexity assumptions for statistically-hiding commitment}, journal = {Advances in Cryptology{\textendash}EUROCRYPT 2005}, year = {2005}, month = {2005///}, pages = {614 - 614}, abstract = {Determining the minimal assumptions needed to construct various cryptographic building blocks has been a focal point of research in theoretical cryptography. Here, we revisit the following question: what are the minimal assumptions needed to construct statistically-hiding commitment schemes? Previously, it was known how to construct such schemes based on one-way permutations. We improve upon this by constructing statistically-hiding commitment schemes based on approximable-preimage-size one-way functions. These are one-way functions for which there is an efficient way to approximate the number of preimages of a given output. A special case (for which we show a somewhat simpler construction) is that of regular one-way functions where all outputs have the same number of preimages.We utilize two different approaches in constructing statistically-hiding commitment schemes. Our first approach proceeds by showing that the scheme of Naor et al. can be implemented using any one-way function having an output distribution which is {\textquotedblleft}sufficiently similar{\textquotedblright} to uniform. We then construct one-way functions with this property from approximable-preimage-size one-way functions. Our second approach begins by constructing a commitment scheme which is statistically hiding against an honest-but-curious receiver. We then demonstrate a compiler which transforms any such commitment scheme into one which is statistically hiding even against a malicious receiver. This compiler and its analysis may be of independent interest. }, doi = {10.1007/11426639_4}, author = {Haitner,I. and Horvitz,O. and Katz, Jonathan and Koo,C. Y and Morselli,R. and Shaltiel,R.} } @conference {14462, title = {Relational clustering for multi-type entity resolution}, booktitle = {Proceedings of the 4th international workshop on Multi-relational mining}, series = {MRDM {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {3 - 12}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {In many applications, there are a variety of ways of referring to the same underlying entity. Given a collection of references to entities, we would like to determine the set of true underlying entities and map the references to these entities. The references may be to entities of different types and more than one type of entity may need to be resolved at the same time. We propose similarity measures for clustering references taking into account the different relations that are observed among the typed references. We pose typed entity resolution in relational data as a clustering problem and present experimental results on real data showing improvements over attribute-based models when relations are leveraged.}, isbn = {1-59593-212-7}, doi = {10.1145/1090193.1090195}, url = {http://doi.acm.org/10.1145/1090193.1090195}, author = {Bhattacharya,Indrajit and Getoor, Lise} } @article {13203, title = {Reliable Segmentation of Pedestrians in Moving Scenes}, journal = {The 2005 International Conference on Acoustics, Speech, and Signal Processing (ICASSP2005)}, year = {2005}, month = {2005///}, abstract = {This paper describes a periodic motion based pedestriansegmentation algorithm for videos acquired from moving platforms. Given a sequence of bounding boxes containing the detected and tracked walking human, the goal is to analyze the low dimension structure by considering every object sample as a point in the high dimensional manifold space and use the learned structure for segmentation. In this work, unlike the traditional top- down dimension reduction (manifold learning) methods such as Isomap and locally linear embedding (LLE) [9], we introduce a novel bottom-up learning approach. We represent the human stride as a cascade of models with increasing parameter numbers. These parameters describe the dynamics of pedestrians from coarse to fine. By applying the learned manifold structure, we can predict the location of body parts, especially legs, with high accuracy at every frame. The segmentation in consecutive images is done by EM clustering. With the accuracy for prediction using twin-pendulum model, EM is more likely to converge to global maximums. Experimental results for real videos are presented. The algorithm has demonstrated a reliable performance for videos acquired from moving platforms. }, author = {Ran, Y. and Zheng,Q. and Weiss, I. and Davis, Larry S.} } @conference {15286, title = {Representation of information needs and the elements of context: A case study in the domain of clinical medicine}, booktitle = {In Proceedings of the ACM SIGIR 2005 Workshop on Information Retrieval in Context (IRiX 2005)}, year = {2005}, month = {2005///}, author = {Jimmy Lin and Fushman,D. D.} } @inbook {16114, title = {Representing Unevenly-Spaced Time Series Data for Visualization and Interactive Exploration}, booktitle = {Human-Computer Interaction - INTERACT 2005Human-Computer Interaction - INTERACT 2005}, series = {Lecture Notes in Computer Science}, volume = {3585}, year = {2005}, month = {2005///}, pages = {835 - 846}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Visualizing time series is useful to support discovery of relations and patterns in financial, genomic, medical and other applications. Often, measurements are equally spaced over time. We discuss the challenges of unevenly-spaced time series and present fourrepresentationmethods: sampled events, aggregated sampled events, event index and interleaved event index. We developed these methods while studying eBay auction data with TimeSearcher. We describe the advantages, disadvantages, choices for algorithms and parameters, and compare the different methods for different tasks. Interaction issues such as screen resolution, response time for dynamic queries, and learnability are governed by these decisions.}, isbn = {978-3-540-28943-2}, url = {http://dx.doi.org/10.1007/11555261_66}, author = {Aris,Aleks and Shneiderman, Ben and Plaisant, Catherine and Shmueli,Galit and Jank,Wolfgang}, editor = {Costabile,Maria and Patern{\`o},Fabio} } @article {16113, title = {Representing Unevenly-Spaced Time Series Data for Visualization and Interactive Exploration (2005)}, journal = {Institute for Systems Research Technical Reports}, year = {2005}, month = {2005///}, abstract = {Visualizing time series data is useful to support discovery of relations and patterns in financial, genomic, medical and other applications. In most time series, measurements are equally spaced over time. This paper discusses the challenges for unevenly-spaced time series data and presents four methods to represent them: sampled events, aggregated sampled events, event index and interleaved event index. We developed these methods while studying eBay auction data with TimeSearcher. We describe the advantages, disadvantages, choices for algorithms and parameters, and compare the different methods. Since each method has its advantages, this paper provides guidance for choosing the right combination of methods, algorithms, and parameters to solve a given problem for unevenly-spaced time series. Interaction issues such as screen resolution, response time for dynamic queries, and meaning of the visual display are governed by these decisions.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6537}, author = {Aris,Aleks and Shneiderman, Ben and Plaisant, Catherine and Shmueli,Galit and Jank,Wolfgang} } @article {13346, title = {Resource-aware wireless sensor-actuator networks}, journal = {Data Engineering}, volume = {1001}, year = {2005}, month = {2005///}, pages = {40 - 40}, author = {Deshpande, Amol and Guestrin,C. and Madden,S. R} } @article {16094, title = {Robotic apparatus and wireless communication system}, volume = {10/085,821}, year = {2005}, month = {2005/05/17/}, abstract = {A robotic apparatus and system adapted to communicate with a wireless sensor. The apparatus may be either physical or virtual in nature and is adapted to communicate physical movements with a wireless sensor. Data received from the sensor and/or robotic apparatus may be reviewed in a real-time mode, or may be saved for review at a later time. In addition, the apparatus may be controlled through an operator that is in local or remote communication with the apparatus. The robotic system may include pre-programmed interactive platforms for enabling communication between a user and the apparatus in a dynamic mode. In addition, the system may allow an operator to program a game/story for use as an interactive platform. Accordingly, the apparatus and system provides a platform for rehabilitative exercise of a patient as well as an entertainment device.}, url = {http://www.google.com/patents?id=5-0VAAAAEBAJ}, author = {Lathan,Corinna E. and Tracey,Michael R. and Vice,Jack M. and Druin, Allison and Plaisant, Catherine} } @conference {14025, title = {A robust and self-reconfigurable design of spherical microphone array for multi-resolution beamforming}, booktitle = {Acoustics, Speech, and Signal Processing, 2005. Proceedings. (ICASSP {\textquoteright}05). IEEE International Conference on}, volume = {4}, year = {2005}, month = {2005/03//}, pages = {iv/1137 - iv/1140 Vol. 4 - iv/1137 - iv/1140 Vol. 4}, abstract = {We describe a robust and self-reconfigurable design of a spherical microphone array for beamforming. Our approach achieves a multi-resolution spherical beamformer with performance that is either optimal in the approximation of desired beampattern or is optimal in the directivity achieved, both robustly. Our implementation converges to the optimal performance quickly while exactly satisfying the specified frequency response and robustness constraint in each iteration step without accumulated round-off errors. The advantage of this design lies in its robustness and self-reconfiguration in microphone array reorganization, such as microphone failure, which is highly desirable in online maintenance and anti-terrorism. Design examples and simulation results are presented.}, keywords = {3D, anti-terrorism;, array, array;, arrays;, audio, beam, beamforming;, beampattern, directivity, Frequency, microphone, multiresolution, omnidirectional, optimisation;, optimization;, processing;, reorganization, response;, robustness;, sampling;, self-reconfigurable, signal, soundfield, spherical, steering;}, doi = {10.1109/ICASSP.2005.1416214}, author = {Zhiyun Li and Duraiswami, Ramani} } @conference {11975, title = {Robust Contrast Invariant Stereo Correspondence}, booktitle = {Proceedings of the 2005 IEEE International Conference on Robotics and Automation, 2005. ICRA 2005}, year = {2005}, month = {2005/04/18/22}, pages = {819 - 824}, publisher = {IEEE}, organization = {IEEE}, abstract = {A stereo pair of cameras attached to a robot will inevitably yield images with different contrast. Even if we assume that the camera hardware is identical, due to slightly different points of view, the amount of light entering the two cameras is also different, causing dynamically adjusted internal parameters such as aperture, exposure and gain to be different. Due to the difficulty of obtaining and maintaining precise intensity or color calibration between the two cameras, contrast invariance becomes an extremely desirable property of stereo correspondence algorithms. The problem of achieving point correspondence between a stereo pair of images is often addressed by using the intensity or color differences as a local matching metric, which is sensitive to contrast changes. We present an algorithm for contrast invariant stereo matching which relies on multiple spatial frequency channels for local matching. A fast global framework uses the local matching to compute the correspondences and find the occlusions. We demonstrate that the use of multiple frequency channels allows the algorithm to yield good results even in the presence of significant amounts of noise.}, keywords = {Apertures, Calibration, CAMERAS, Computer science, contrast invariance, diffusion, Educational institutions, Frequency, gabor, Hardware, occlusions, Robot vision systems, Robotics and automation, Robustness, stereo}, isbn = {0-7803-8914-X}, doi = {10.1109/ROBOT.2005.1570218}, author = {Ogale, A. S and Aloimonos, J.} } @article {18268, title = {Robust digital fingerprinting for curves}, journal = {Proc. of IEEE Int. Conf. on Acoustics, Speech, and Signal Processing (ICASSP{\textquoteright}05)}, volume = {2}, year = {2005}, month = {2005///}, pages = {529 - 532}, abstract = {Hiding data in curves can be achieved by parameterizing acurve using the B-spline model and adding spread spectrum sequences in B-spline control points. In this paper, we pro- pose an iterative alignment-minimization algorithm to per- form curve registration and deal with the non-uniqueness of B-spline control points. We demonstrate through experi- ments the robustness of our method against various attacks such as collusion, geometric transformation, and printing- and-scanning. We also show the feasibility of our method for fingerprinting topographic maps and detecting finger- prints from printed copies. }, author = {Gou,H. and Wu,M.} } @conference {13193, title = {Robust observations for object tracking}, booktitle = {Image Processing, 2005. ICIP 2005. IEEE International Conference on}, volume = {2}, year = {2005}, month = {2005/09//}, pages = {II - 442-5 - II - 442-5}, abstract = {It is a difficult task to find an observation model that will perform well for long-term visual tracking. In this paper, we propose an adaptive observation enhancement technique based on likelihood images, which are derived from multiple visual features. The most discriminative likelihood image is extracted by principal component analysis (PCA) and incrementally updated frame by frame to reduce temporal tracking error. In the particle filter framework, the feasibility of each sample is computed using this most discriminative likelihood image before the observation process. Integral image is employed for efficient computation of the feasibility of each sample. We illustrate how our enhancement technique contributes to more robust observations through demonstrations.}, keywords = {(numerical, adaptive, analysis;, component, enhancement;, filter, Filtering, framework;, image, images;, likelihood, methods);, object, observation, particle, PCA;, principal, tracking;}, doi = {10.1109/ICIP.2005.1530087}, author = {Han,Bohyung and Davis, Larry S.} } @conference {13637, title = {Robust Point Matching for Two-Dimensional Nonrigid Shapes}, booktitle = {Proceedings in the ICASSP{\textquoteright}04 IEEEInternational Conference on Computer Vision}, year = {2005}, month = {2005/10//}, pages = {1561 - 1566}, address = {Beijing, China}, abstract = {Recently, nonrigid shape matching has received more and more attention. For nonrigid shapes, most neighboring points cannot move independently under deformation due to physical constraints. Furthermore, the rough structure of a shape should be preserved under deformation, otherwise even people cannot match shapes reliably. Therefore, though the absolute distance between two points may change significantly, the neighborhood of a point is well preserved in general. Based on this observation, we formulate point matching as a graph matching problem. Each point is a node in the graph, and two nodes are connected by an edge if their Euclidean distance is less than a threshold. The optimal match between two graphs is the one that maximizes the number of matched edges. The shape context distance is used to initialize the graph matching, followed by relaxation labeling for refinement. Nonrigid deformation is overcome by bringing one shape closer to the other in each iteration using deformation parameters estimated from the current point correspondence. Experiments demonstrate the effectiveness of our approach: it outperforms the shape context and TPS-RPM algorithms under nonrigid deformation and noise on a public data set.}, author = {Yefeng Zheng and David Doermann} } @article {12082, title = {Robust routing in malicious environment for ad hoc networks}, journal = {Information Security Practice and Experience}, year = {2005}, month = {2005///}, pages = {36 - 47}, author = {Yu,Z. and Seng,C. Y and Jiang,T. and Wu,X. and Arbaugh, William A.} } @article {12104, title = {ROMER: resilient opportunistic mesh routing for wireless mesh networks}, journal = {Proc. of IEEE WiMesh}, volume = {166}, year = {2005}, month = {2005///}, author = {Yuan,Y. and Yang,H. and Wong,S. H.Y and Lu,S. and Arbaugh, William A.} } @article {15939, title = {The roots of self-awareness}, journal = {Phenomenology and the Cognitive Sciences}, volume = {4}, year = {2005}, month = {2005///}, pages = {297 - 333}, author = {Anderson,M. L and Perlis, Don} } @conference {16944, title = {A Rank-by-Feature Framework for Unsupervised Multidimensional Data Exploration Using Low Dimensional Projections}, booktitle = {IEEE Symposium on Information Visualization, 2004. INFOVIS 2004}, year = {2004}, month = {2004///}, pages = {65 - 72}, publisher = {IEEE}, organization = {IEEE}, abstract = {Exploratory analysis of multidimensional data sets is challenging because of the difficulty in comprehending more than three dimensions. Two fundamental statistical principles for the exploratory analysis are (1) to examine each dimension first and then find relationships among dimensions, and (2) to try graphical displays first and then find numerical summaries (D.S. Moore, (1999). We implement these principles in a novel conceptual framework called the rank-by-feature framework. In the framework, users can choose a ranking criterion interesting to them and sort 1D or 2D axis-parallel projections according to the criterion. We introduce the rank-by-feature prism that is a color-coded lower-triangular matrix that guides users to desired features. Statistical graphs (histogram, boxplot, and scatterplot) and information visualization techniques (overview, coordination, and dynamic query) are combined to help users effectively traverse 1D and 2D axis-parallel projections, and finally to help them interactively find interesting features}, keywords = {axis-parallel projections, boxplot, color-coded lower-triangular matrix, computational complexity, computational geometry, Computer displays, Computer science, Computer vision, Data analysis, data mining, data visualisation, Data visualization, Displays, dynamic query, Educational institutions, exploratory data analysis, feature detection, feature detection/selection, Feature extraction, feature selection, graph theory, graphical displays, histogram, Information Visualization, interactive systems, Laboratories, Multidimensional systems, Principal component analysis, rank-by-feature prism, scatterplot, statistical analysis, statistical graphics, statistical graphs, unsupervised multidimensional data exploration, very large databases}, isbn = {0-7803-8779-3}, doi = {10.1109/INFVIS.2004.3}, author = {Seo,J. and Shneiderman, Ben} } @conference {19649, title = {A Rao-Blackwellized particle filter for EigenTracking}, booktitle = {Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004}, volume = {2}, year = {2004}, month = {2004/06//}, pages = {II - 980-II-986 Vol.2}, abstract = {Subspace representations have been a popular way to model appearance in computer vision. In Jepson and Black{\textquoteright}s influential paper on EigenTracking, they were successfully applied in tracking. For noisy targets, optimization-based algorithms (including EigenTracking) often fail catastrophically after losing track. Particle filters have recently emerged as a robust method for tracking in the presence of multi-modal distributions. To use subspace representations in a particle filter, the number of samples increases exponentially as the state vector includes the subspace coefficients. We introduce an efficient method for using subspace representations in a particle filter by applying Rao-Blackwellization to integrate out the subspace coefficients in the state vector. Fewer samples are needed since part of the posterior over the state vector is analytically calculated. We use probabilistic principal component analysis to obtain analytically tractable integrals. We show experimental results in a scenario in which we track a target in clutter.}, keywords = {analytically tractable integrals, Computer vision, EigenTracking, Filters, Gaussian processes, modal analysis, multi-modal distributions, NOISE, noisy targets, optimisation, optimization-based algorithms, Particle filters, Particle measurements, Particle tracking, Principal component analysis, probabilistic principal component analysis, Rao-Blackwellized particle filter, Robustness, SHAPE, State estimation, state vector, subspace coefficients, Subspace representations, target tracking, vectors}, author = {Zia Khan and Balch, T. and Dellaert, F.} } @article {13221, title = {Real-Time Kernel-Based Tracking in Joint Feature-Spatial Spaces}, year = {2004}, month = {2004/04/19/}, abstract = {An object tracking algorithm that uses a novel simple symmetric similarityfunction between spatially-smoothed kernel-density estimates of the model and target distributions is proposed and tested. The similarity measure is based on the expectation of the density estimates over the model or target images. The density is estimated using radial-basis kernel functions which measure the affinity between points and provide a better outlier rejection property. The mean-shift algorithm is used to track objects by iteratively maximizing this similarity function. To alleviate the quadratic complexity of the density estimation, we employ Gaussian kernels and the fast Gauss transform to reduce the computations to linear order. This leads to a very efficient and robust nonparametric tracking algorithm. The proposed algorithm is tested with several image sequences and shown to achieve robust and reliable real-time tracking. (UMIACS-TR-2004-12) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/1341}, author = {Yang,Changjiang and Duraiswami, Ramani and Elgammal,Ahmed and Davis, Larry S.} } @article {17899, title = {Real-time rendering of translucent meshes}, journal = {ACM Trans. Graph.}, volume = {23}, year = {2004}, month = {2004/04//}, pages = {120 - 142}, abstract = {Subsurface scattering is important for photo-realistic rendering of translucent materials. We make approximations to the BSSRDF model and propose a simple lighting model to simulate the effects on translucent meshes. Our approximations are based on the observation that subsurface scattering is relatively local due to its exponential falloff.In the preprocessing stage we build subsurface scattering neighborhood information, which includes all the vertices within effective scattering range from each vertex. We then modify the traditional local illumination model into a run-time two-stage process. The first stage involves computation of reflection and transmission of light on surface vertices. The second stage bleeds in scattering effects from a vertex{\textquoteright}s neighborhood to generate the final result. We then merge the run-time two-stage process into a run-time single-stage process using precomputed integrals, and reduce the complexity of our run-time algorithm to O(N), where N is the number of vertices. The selection of the optimum set size for precomputed integrals is guided by a standard imagespace error-metric. Furthermore, we show how to compress the precomputed integrals using spherical harmonics. We compensate for the inadequacy of spherical harmonics for storing high frequency components by a reference points scheme to store high frequency components of the precomputed integrals explicitly. With this approach, we greatly reduce memory usage without loss of visual quality under a high-frequency lighting environment and achieve interactive frame rates for medium-sized scenes. Our model is able to capture the most important features of subsurface scattering: reflection and transmission due to multiple scattering.}, keywords = {BSSRDF, local illumination, reflection models, subsurface scattering}, isbn = {0730-0301}, doi = {10.1145/990002.990004}, url = {http://doi.acm.org/10.1145/990002.990004}, author = {Hao,Xuejun and Varshney, Amitabh} } @article {18018, title = {Reconfigurable optical wireless sensor networks}, journal = {Proceedings of SPIE}, volume = {5237}, year = {2004}, month = {2004/02/06/}, pages = {136 - 146}, abstract = {Optical wireless networks are emerging as a viable, cost effective technology for rapidly deployable broadband sensor communication infrastructures. The use of directional, narrow beam, optical wireless links provides great promise for secure, extremely high data rate communication between fixed or mobile nodes, very suitable for sensor networks in civil and military contexts. The main challenge is to maintain the quality of such networks, as changing atmosphericand platform conditions critically affect their performance. Topology control is used as the means to achieve survivable optical wireless networking under adverse conditions, based on dynamic and autonomous topology reconfiguration. The topology control process involves tracking and acquisition of nodes, assessment of link-state information, collection and distribution of topology data, and the algorithmic solution of an optimal topology. This paper focuses onthe analysis, implementation and evaluation of algorithms and heuristics for selecting the best possible topology in order to optimize a given performance objective while satisfying connectivity constraints. The work done at the physical layer is based on link cost information. A cost measure is defined in terms of bit-error-rate and the heuristics developed seek to form a bi-connected topology which minimizes total network cost. At the network layer a key factor is the traffic matrix, and heuristics were developed in order to minimize congestion, flow-rate or end-to-end delay.}, isbn = {0277786X}, doi = {doi:10.1117/12.511368}, url = {http://spiedigitallibrary.org/proceedings/resource/2/psisdg/5237/1/136_1?isAuthorized=no}, author = {Llorca,Jaime and Desai,Aniket and Vishkin, Uzi and Davis,Christopher C. and Milner,Stuart D} } @article {15794, title = {Reconstructing Images of Bar Codes for Construction Site Object Recognition}, journal = {Automation in Construction (Elsevier)}, volume = {13}, year = {2004}, month = {2004///}, pages = {21 - 35}, author = {Gilsinn,David E. and Cheok,Geraldine S. and O{\textquoteright}Leary, Dianne P.} } @conference {13208, title = {Recording and reproducing high order surround auditory scenes for mixed and augmented reality}, booktitle = {Mixed and Augmented Reality, 2004. ISMAR 2004. Third IEEE and ACM International Symposium on}, year = {2004}, month = {2004/11//}, pages = {240 - 249}, abstract = {Virtual reality systems are largely based on computer graphics and vision technologies. However, sound also plays an important role in human{\textquoteright}s interaction with the surrounding environment, especially for the visually impaired people. In this paper, we develop the theory of recording and reproducing real-world surround auditory scenes in high orders using specially designed microphone and loudspeaker arrays. It is complementary to vision-based technologies in creating mixed and augmented realities. Design examples and simulations are presented.}, keywords = {array;, audio, auditory, augmented, Computer, graphics;, high, loudspeaker, microphone, mixed, order, processing;, reality, reality;, scene;, signal, surround, system;, technology;, virtual, VISION, vision;}, doi = {10.1109/ISMAR.2004.51}, author = {Zhiyun Li and Duraiswami, Ramani and Davis, Larry S.} } @article {14046, title = {Recursions for the computation of multipole translation and rotation coefficients for the 3-D Helmholtz equation}, journal = {SIAM Journal on Scientific Computing}, volume = {25}, year = {2004}, month = {2004///}, pages = {1344 - 1381}, abstract = {We develop exact expressions for the coefficients of series representations of trans-lations and rotations of local and multipole fundamental solutions of the Helmholtz equation in spherical coordinates. These expressions are based on the derivation of recurrence relations, some of which, to our knowledge, are presented here for the first time. The symmetry and other properties of the coefficients are also examined and, based on these, efficient procedures for calculating them are presented. Our expressions are direct and do not use the Clebsch{\textendash}Gordan coefficients or the Wigner 3-j symbols, although we compare our results with methods that use these to prove their accuracy. For evaluating an Nt term truncation of the translated series (involving O(N2t) multipoles), our expressions require O(N3t) evaluations, compared to previous exact expressions that require O(N5t) operations. }, author = {Gumerov, Nail A. and Duraiswami, Ramani} } @conference {15109, title = {Reducing complexity assumptions for statistically-hiding commitment}, booktitle = {In EUROCRYPT}, year = {2004}, month = {2004///}, abstract = {Determining the minimal assumptions needed to construct various cryptographic buildingblocks has been a focal point of research in theoretical cryptography. For most {\textemdash} but not all! {\textemdash} cryptographic primitives, complexity assumptions both necessary and sufficient for their existence are known. Here, we revisit the following, decade-old question: what are the minimal assumptions needed to construct a statistically-hiding bit commitment scheme? Previously, it was known how to construct such schemes based on any one-way permutation. In this work, we show that regular one-way functions suffice. We show two constructions of statistically-hiding commitment schemes from regular one-way functions. Our first construction is more direct, and serves as a {\textquotedblleft}stepping-stone{\textquotedblright} for our second construction which has improved round complexity. Of independent interest, as part of our work we show a compiler transforming any commitment scheme which is statistically-hiding against an honest-but-curious receiver to one which is statistically-hiding against a malicious receiver. This demonstrates the equivalence of these two formulations of the problem. }, author = {Horvitz,O. and Katz, Jonathan and Koo,C. Y and Morselli,R.} } @article {18989, title = {Reducing storage requirements for biological sequence comparison}, journal = {BioinformaticsBioinformatics}, volume = {20}, year = {2004}, month = {2004/12/12/}, pages = {3363 - 3369}, abstract = {Motivation: Comparison of nucleic acid and protein sequences is a fundamental tool of modern bioinformatics. A dominant method of such string matching is the {\textquoteleft}seed-and-extend{\textquoteright} approach, in which occurrences of short subsequences called {\textquoteleft}seeds{\textquoteright} are used to search for potentially longer matches in a large database of sequences. Each such potential match is then checked to see if it extends beyond the seed. To be effective, the seed-and-extend approach needs to catalogue seeds from virtually every substring in the database of search strings. Projects such as mammalian genome assemblies and large-scale protein matching, however, have such large sequence databases that the resulting list of seeds cannot be stored in RAM on a single computer. This significantly slows the matching process.Results: We present a simple and elegant method in which only a small fraction of seeds, called {\textquoteleft}minimizers{\textquoteright}, needs to be stored. Using minimizers can speed up string-matching computations by a large factor while missing only a small fraction of the matches found using all seeds. }, isbn = {1367-4803, 1460-2059}, doi = {10.1093/bioinformatics/bth408}, url = {http://bioinformatics.oxfordjournals.org/content/20/18/3363}, author = {Roberts,Michael and Hayes,Wayne and Hunt,Brian R. and Mount, Stephen M. and Yorke,James A.} } @article {13215, title = {Rendering localized spatial audio in a virtual auditory space}, journal = {Multimedia, IEEE Transactions on}, volume = {6}, year = {2004}, month = {2004/08//}, pages = {553 - 564}, abstract = {High-quality virtual audio scene rendering is required for emerging virtual and augmented reality applications, perceptual user interfaces, and sonification of data. We describe algorithms for creation of virtual auditory spaces by rendering cues that arise from anatomical scattering, environmental scattering, and dynamical effects. We use a novel way of personalizing the head related transfer functions (HRTFs) from a database, based on anatomical measurements. Details of algorithms for HRTF interpolation, room impulse response creation, HRTF selection from a database, and audio scene presentation are presented. Our system runs in real time on an office PC without specialized DSP hardware.}, keywords = {(computer, 3-D, audio, audio;, auditory, augmented, data, environments;, functions;, graphics);, Head, interfaces;, perceptual, processing;, reality, reality;, related, rendering, rendering;, scene, signal, sonification;, spaces;, spatial, transfer, user, virtual}, isbn = {1520-9210}, doi = {10.1109/TMM.2004.827516}, author = {Zotkin,Dmitry N and Duraiswami, Ramani and Davis, Larry S.} } @article {18459, title = {Rendering localized spatial audio in a virtual auditory space}, journal = {IEEE Transactions on Multimedia}, volume = {6}, year = {2004}, month = {2004/08//}, pages = {553 - 564}, abstract = {High-quality virtual audio scene rendering is required for emerging virtual and augmented reality applications, perceptual user interfaces, and sonification of data. We describe algorithms for creation of virtual auditory spaces by rendering cues that arise from anatomical scattering, environmental scattering, and dynamical effects. We use a novel way of personalizing the head related transfer functions (HRTFs) from a database, based on anatomical measurements. Details of algorithms for HRTF interpolation, room impulse response creation, HRTF selection from a database, and audio scene presentation are presented. Our system runs in real time on an office PC without specialized DSP hardware.}, keywords = {-D audio processing, 3-D audio processing, Audio databases, audio signal processing, audio user interfaces, augmented reality, data sonification, Digital signal processing, head related transfer functions, head-related transfer function, Interpolation, Layout, perceptual user interfaces, Real time systems, Rendering (computer graphics), Scattering, spatial audio, Transfer functions, User interfaces, virtual audio scene rendering, virtual auditory spaces, virtual environments, Virtual reality, virtual reality environments}, isbn = {1520-9210}, doi = {10.1109/TMM.2004.827516}, author = {Zotkin,Dmitry N and Duraiswami, Ramani and Davis, Larry S.} } @conference {13632, title = {Representation and Recognition of Events in Surveillance Video Using Petri Nets}, booktitle = {Second IEEEWorkshop on Event Mining 2004, CVPR2004}, year = {2004}, month = {2004///}, pages = {112 - 112}, abstract = {Detection of events is an essential task in surveillance applications. This task requires finding a general event representation method and developing efficient recognition algorithms dealing with this representation. In this paper, we describe an interactive system for querying surveillance video about events. The queries may not be known in advance and have to be composed from primitive events and previously defined queries. We propose using Petri nets as both representation and recognition methods. The Petri net representation for users{\textquoteright} queries is derived automatically from simpler event nets. Recognition is then performed by tokens moving through the Petri nets.}, author = {Ghanem,N. and DeMenthon,D. and David Doermann and Davis, Larry S.} } @article {14768, title = {Resource policing to support fine-grain cycle stealing in networks of workstations}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {15}, year = {2004}, month = {2004/10//}, pages = {878 - 892}, abstract = {We present the design, implementation, and performance evaluation of a suite of resource policing mechanisms that allow guest processes to efficiently and unobtrusively exploit otherwise idle workstation resources. Unlike traditional policies that harvest cycles only from unused machines, we employ fine-grained cycle stealing to exploit resources even from machines that have active users. We developed a suite of kernel extensions that enable these policies to operate without significantly impacting host processes: 1) a new starvation-level CPU priority for guest jobs, 2) a new page replacement policy that imposes hard bounds on physical memory usage by guest processes, and 3) a new I/O scheduling mechanism called rate windows that throttle guest processes{\textquoteright} usage of I/O and network bandwidth. We evaluate both the individual impacts of each mechanism, and their utility for our fine-grain cycle stealing.}, keywords = {65, Application software, Bandwidth, cluster computing, Computer networks, Computer Society, Concurrent computing, cycle stealing, cycle stealing., grid computing, I/O scheduling, Intelligent networks, Kernel, network bandwidth, networks of workstations, page replacement policy, parallel computing, performance evaluation, Processor scheduling, resource allocation, resource scheduling, starvation-level CPU priority, workstation clusters, workstation resources, Workstations}, isbn = {1045-9219}, doi = {10.1109/TPDS.2004.58}, author = {Ryu, K. D and Hollingsworth, Jeffrey K} } @article {14253, title = {Robotics \& Automation Magazine Vol. 11}, journal = {IEEE Robotics \& Automation Magazine}, year = {2004}, month = {2004///}, pages = {132 - 132}, author = {Aliaga, D.G. and Allen,PK and Archibald,JK and Argyros,AA and Arkin,RC and Baker,C. and Baker, P. and Beard,RW and Bicchi,A. and Birgmajer,B. and others} } @conference {12686, title = {Robust Bayesian cameras motion estimation using random sampling}, booktitle = {Image Processing, 2004. ICIP {\textquoteright}04. 2004 International Conference on}, volume = {2}, year = {2004}, month = {2004/10//}, pages = {1361 - 1364 Vol.2 - 1361 - 1364 Vol.2}, abstract = {In this paper, we propose an algorithm for robust 3D motion estimation of wide baseline cameras from noisy feature correspondences. The posterior probability density function of the camera motion parameters is represented by weighted samples. The algorithm employs a hierarchy coarse-to-fine strategy. First, a coarse prior distribution of camera motion parameters is estimated using the random sample consensus scheme (RANSAC). Based on this estimate, a refined posterior distribution of camera motion parameters can then be obtained through importance sampling. Experimental results using both synthetic and real image sequences indicate the efficacy of the proposed algorithm.}, keywords = {3D, baseline, Bayesian, CAMERAS, cameras;, coarse-to-fine, consensus, density, estimation;, feature, function;, hierarchy, image, images;, importance, matching;, MOTION, posterior, probability, probability;, processing;, random, RANSAC;, real, realistic, sample, sampling;, scheme;, sequences;, stereo, strategy;, synthetic, wide}, doi = {10.1109/ICIP.2004.1419754}, author = {Qian, G. and Chellapa, Rama and Qinfen Zheng} } @conference {12687, title = {Robust ego-motion estimation and 3D model refinement using depth based parallax model}, booktitle = {Image Processing, 2004. ICIP {\textquoteright}04. 2004 International Conference on}, volume = {4}, year = {2004}, month = {2004/10//}, pages = {2483 - 2486 Vol. 4 - 2483 - 2486 Vol. 4}, abstract = {We present an iterative algorithm for robustly estimating the ego-motion and refining and updating a coarse, noisy and partial depth map using a depth based parallax model and brightness derivatives extracted from an image pair. Given a coarse, noisy and partial depth map acquired by a range-finder or obtained from a Digital Elevation Map (DFM), we first estimate the ego-motion by combining a global ego-motion constraint and a local brightness constancy constraint. Using the estimated camera motion and the available depth map estimate, motion of the 3D points is compensated. We utilize the fact that the resulting surface parallax field is an epipolar field and knowing its direction from the previous motion estimates, estimate its magnitude and use it to refine the depth map estimate. Instead of assuming a smooth parallax field or locally smooth depth models, we locally model the parallax magnitude using the depth map, formulate the problem as a generalized eigen-value analysis and obtain better results. In addition, confidence measures for depth estimates are provided which can be used to remove regions with potentially incorrect (and outliers in) depth estimates for robustly estimating ego-motion in the next iteration. Results on both synthetic and real examples are presented.}, keywords = {3D, algorithm;, analysis;, and, based, camera;, coarse, compensation;, DEM;, depth, digital, ego-motion, eigen-value, eigenfunctions;, eigenvalues, ELEVATION, epipolar, estimation;, extraction;, feature, field;, iteration, iterative, map;, method;, methods;, model, model;, MOTION, parallax, partial, range-finding;, refinement;, refining;, surface}, doi = {10.1109/ICIP.2004.1421606}, author = {Agrawala, Ashok K. and Chellapa, Rama} } @article {13635, title = {Robust Point Matching for Non-Rigid Shapes: ARelaxation Labeling Based Approach}, volume = {LAMP-TR-117,CAR-TR-1005,CS-TR-4633,UMIACS-TR-2004-75}, year = {2004}, month = {2004///}, institution = {University of Maryland, College Park}, abstract = {Shape matching or image registration, which is often formulated as a point matching problem, is frequently encountered in image analysis, computer vision, and pattern recognition. Although the problem of registering rigid shapes was widely studied, non-rigid shape matching has recently received more and more attention. For non-rigid shapes, most neighboring points cannot move independently under deformation due to physical constraints. Therefore, though the absolute distance between two points may change significantly, the neighborhood of a point is well preserved in general. Based on this observation, we formulate point matching as a graph matching problem. Each point is a node in the graph, and two nodes are connected by an edge if their Euclidean distance is less than a threshold. The optimal match between two graphs is the one that maximizes the number of matched edges. The shape context distance is used to initialize the graph matching, and relaxation labeling (after enforcing one-to-one matching) is used to refine the matching results. Non-rigid deformation is overcome by bringing one shape closer to the other in each iteration using deformation parameters estimated from the current point correspondence. Experiments on real and synthesized data demonstrate the effectiveness of our approach: it outperforms shape context and TPS-RPM algorithms under non-rigid deformation and noise on a public data set.}, author = {Yefeng Zheng and David Doermann} } @conference {12702, title = {Robust two-camera tracking using homography}, booktitle = {Acoustics, Speech, and Signal Processing, 2004. Proceedings. (ICASSP {\textquoteright}04). IEEE International Conference on}, volume = {3}, year = {2004}, month = {2004/05//}, pages = {iii - 1-4 vol.3 - iii - 1-4 vol.3}, abstract = {The paper introduces a two view tracking method which uses the homography relation between the two views to handle occlusions. An adaptive appearance-based model is incorporated in a particle filter to realize robust visual tracking. Occlusion is detected using robust statistics. When there is occlusion in one view, the homography from this view to other views is estimated from previous tracking results and used to infer the correct transformation for the occluded view. Experimental results show the robustness of the two view tracker.}, keywords = {Carlo, filter;, filters;, frame, framework;, homography;, image, method;, methods;, Monte, nonlinear, occlusions;, optical, particle, processing;, robust, sequences;, sequential, signal, statistics;, tracking, tracking;, two, two-camera, video, view, visual}, doi = {10.1109/ICASSP.2004.1326466}, author = {Yue,Zhanfeng and Zhou,S. K and Chellapa, Rama} } @conference {12696, title = {Role of shape and kinematics in human movement analysis}, booktitle = {Computer Vision and Pattern Recognition, 2004. CVPR 2004. Proceedings of the 2004 IEEE Computer Society Conference on}, volume = {1}, year = {2004}, month = {2004/07/02/june}, pages = {I-730 - I-737 Vol.1 - I-730 - I-737 Vol.1}, abstract = {Human gait and activity analysis from video is presently attracting a lot of attention in the computer vision community. In this paper we analyze the role of two of the most important cues in human motion-shape and kinematics. We present an experimental framework whereby it is possible to evaluate the relative importance of these two cues in computer vision based recognition algorithms. In the process, we propose a new gait recognition algorithm by computing the distance between two sequences of shapes that lie on a spherical manifold. In our experiments, shape is represented using Kendall{\textquoteright}s definition of shape. Kinematics is represented using a Linear Dynamical system We place particular emphasis on human gait. Our conclusions show that shape plays a role which is more significant than kinematics in current automated gait based human identification algorithms. As a natural extension we study the role of shape and kinematics in activity recognition. Our experiments indicate that we require models that contain both shape and kinematics in order to perform accurate activity classification. These conclusions also allow us to explain the relative performance of many existing methods in computer-based human activity modeling.}, keywords = {activity, algorithm;, algorithms;, analysis;, autoregressive, average, based, classification;, community;, Computer, definition;, dynamical, extraction;, feature, Gait, hidden, human, identification, image, Kendall, linear, manifold;, Markov, modeling;, models;, MOTION, Movement, moving, processes;, recognition, sequences;, SHAPE, spherical, system;, VISION, vision;}, doi = {10.1109/CVPR.2004.1315104}, author = {Veeraraghavan,A. and Chowdhury, A.R. and Chellapa, Rama} } @article {15086, title = {Round-optimal secure two-party computation}, journal = {Advances in Cryptology{\textendash}CRYPTO 2004}, year = {2004}, month = {2004///}, pages = {3 - 34}, abstract = {We consider the central cryptographic task of secure two-party computation: two parties wish to compute some function of their private inputs (each receiving possibly different outputs) where security should hold with respect to arbitrarily-malicious behavior of either of the participants. Despite extensive research in this area, the exact round-complexity of this fundamental problem (i.e., the number of rounds required to compute an arbitrary poly-time functionality) was not previously known.Here, we establish the exact round complexity of secure two-party computation with respect to black-box proofs of security. We first show a lower bound establishing (unconditionally) that four rounds are not sufficient to securely compute the coin-tossing functionality for any super-logarithmic number of coins; this rules out 4-round protocols for other natural functionalities as well. Next, we construct protocols for securely computing any (randomized) functionality using only five rounds. Our protocols may be based either on certified trapdoor permutations or homomorphic encryption schemes satisfying certain additional properties. The former assumption is implied by, e.g., the RSA assumption for large public exponents, while the latter is implied by, e.g., the DDH assumption. Finally, we show how our protocols may be modified {\textendash} without increasing their round complexity and without requiring erasures {\textendash} to tolerate an adaptive malicious adversary. }, doi = {10.1007/978-3-540-28628-8_21}, author = {Katz, Jonathan and Ostrovsky,R.} } @article {13852, title = {RT-S: Surface rich transcription scoring, methodology, and initial results}, journal = {Proc. DARPA Rich Transcription Workshop}, year = {2004}, month = {2004///}, abstract = {In this paper we present a methodoly for the scoring ofpunctuation annotated texts, as well as a preliminary system to perform the task. We modify SCLITE{\textquoteright}s scoring method- ology to support scoring of punctuation. Using this method- ology, we show that the error rate of an initial automatic sys- tem is comparable to annotator inconsistency. However, the use of multiple references allows us to differentiate between human inconsistencies and system errors. }, author = {Snover,M. and Schwartz,R. and Dorr, Bonnie J and Makhoul,J.} } @conference {12295, title = {Running on the bare metal with GeekOS}, booktitle = {Proceedings of the 35th SIGCSE technical symposium on Computer science education}, series = {SIGCSE {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {315 - 319}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Undergraduate operating systems courses are generally taught using one of two approaches: abstract or concrete. In the abstract approach, students learn the concepts underlying operating systems theory, and perhaps apply them using user-level threads in a host operating system. In the concrete approach, students apply concepts by working on a real operating system kernel. In the purest manifestation of the concrete approach, students implement operating system projects that run on real hardware.GeekOS is an instructional operating system kernel which runs on real hardware. It provides the minimum functionality needed to schedule threads and control essential devices on an x86 PC. On this foundation, we have developed projects in which students build processes, semaphores, a multilevel feedback scheduler, paged virtual memory, a filesystem, and inter-process communication. We use the Bochs emulator for ease of development and debugging. While this approach (tiny kernel run on an emulator) is not new, we believe GeekOS goes further towards the goal of combining realism and simplicity than previous systems have.}, keywords = {education, emulation, Hardware, Operating systems}, isbn = {1-58113-798-2}, doi = {10.1145/971300.971411}, url = {http://doi.acm.org/10.1145/971300.971411}, author = {Hovemeyer, David and Hollingsworth, Jeffrey K and Bhattacharjee, Bobby} } @article {15400, title = {Random doping-induced fluctuations of subthreshold characteristics in MOSFET devices}, journal = {Solid-State Electronics}, volume = {47}, year = {2003}, month = {2003/11//}, pages = {2055 - 2061}, abstract = {The random doping-induced fluctuations of subthreshold characteristics in MOSFET devices are analyzed. A technique for the computations of sensitivity coefficients and variances of subthreshold parameters is presented and applied to the computation of fluctuations of subthreshold current and gate-voltage swing. This technique is based on the linearization of transport equations with respect to the fluctuating quantities. It is computationally much more efficient than purely {\textquotedblleft}statistical{\textquotedblright} methods (Monte-Carlo methods) that are based on the simulations of a large number of devices with different doping realizations. The numerical implementation of this technique is discussed and numerous computational results are presented.}, keywords = {Fluctuations, Mismatch, MOSFET, Sensitivity analysis, Statistics, Submicron devices}, isbn = {0038-1101}, doi = {10.1016/S0038-1101(03)00236-3}, url = {http://www.sciencedirect.com/science/article/pii/S0038110103002363}, author = {Andrei,Petru and Mayergoyz, Issak D} } @conference {14543, title = {Random MAX SAT, random MAX CUT, and their phase transitions}, booktitle = {Proceedings of the fourteenth annual ACM-SIAM symposium on Discrete algorithms}, year = {2003}, month = {2003///}, pages = {364 - 373}, author = {Coppersmith,D. and Gamarnik,D. and Hajiaghayi, Mohammad T. and Sorkin,G. B} } @conference {12718, title = {Rank constrained recognition under unknown illuminations}, booktitle = {Analysis and Modeling of Faces and Gestures, 2003. AMFG 2003. IEEE International Workshop on}, year = {2003}, month = {2003/10//}, pages = {11 - 18}, abstract = {Recognition under illumination variations is a challenging problem. The key is to successfully separate the illumination source from the observed appearance. Once separated, what remains is invariant to illuminant and appropriate for recognition. Most current efforts employ a Lambertian reflectance model with varying albedo field ignoring both attached and cast shadows, but restrict themselves by using object-specific samples, which undesirably deprives them of recognizing new objects not in the training samples. Using rank constraints on the albedo and the surface normal, we accomplish illumination separation in a more general setting, e.g., with class-specific samples via a factorization approach. In addition, we handle shadows (both attached and cast ones) by treating them as missing values, and resolve the ambiguities in the factorization method by enforcing integrability. As far as recognition is concerned, a bootstrap set which is just a collection of two-dimensional image observations can be utilized to avoid the explicit requirement that three-dimensional information be available. Our approaches produce good recognition results as shown in our experiments using the PIE database.}, keywords = {albedo, approach;, constrained, database;, databases;, decomposition;, factorization, field;, illumination, image;, information;, Lambertian, lighting;, model;, object, object-specific, PIE, rank, recognition;, reflectance, samples;, singular, three-dimensional, two-dimensional, value, variations;, visual}, doi = {10.1109/AMFG.2003.1240818}, author = {Zhou, S. and Chellapa, Rama} } @article {13846, title = {Rapid porting of DUSTer to Hindi}, journal = {ACM Transactions on Asian Language Information Processing (TALIP)}, volume = {2}, year = {2003}, month = {2003/06//}, pages = {118 - 123}, abstract = {The frequent occurrence of divergences{\textemdash}structural differences between languages---presents a great challenge for statistical word-level alignment and machine translation. This paper describes the adaptation of DUSTer, a divergence unraveling package, to Hindi during the DARPA TIDES-2003 Surprise Language Exercise. We show that it is possible to port DUSTer to Hindi in under 3 days.}, keywords = {Divergences, Machine translation}, isbn = {1530-0226}, doi = {10.1145/974740.974744}, url = {http://doi.acm.org/10.1145/974740.974744}, author = {Dorr, Bonnie J and Ayan,Necip Fazil and Habash,Nizar and Madnani,Nitin and Hwa,Rebecca} } @conference {15868, title = {Rapid-response machine translation for unexpected languages}, booktitle = {Proceedings of the MT Summit IX}, year = {2003}, month = {2003///}, author = {Oard, Douglas and Och,F.J.} } @book {12096, title = {Real 802.11 Security: Wi-Fi Protected Access and 802.11 i, 480 pages}, year = {2003}, month = {2003///}, publisher = {Addison Wesley}, organization = {Addison Wesley}, author = {Edney,J. and Arbaugh, William A.} } @article {15019, title = {Recovery of a Digital Image Collection Through the SDSC/UMD/NARA Prototype Persistent Archive}, volume = {UMIACS-TR-2003-105}, year = {2003}, month = {2003/11/25/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {The San Diego Supercomputer Center (SDSC), the University of Maryland, and theNational Archives and Records Administration (NARA) are collaborating on building a pilot persistent archive using and extending data grid and digital library technologies. The current prototype consists of node servers at SDSC, University of Maryland, and NARA, connected through the Storage Request Broker (SRB) data grid middleware, and currently holds several terabytes of NARA selected collections. In particular, a historically important image collection that was on the verge of becoming inaccessible was fully restored and ingested into our pilot system. In this report, we describe the methodology behind our approach to fully restore this image collection and the process used to ingest it into the prototype persistent archive. (UMIACS-TR-2003-105) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/1321}, author = {Smorul,Mike and JaJa, Joseph F. and McCall,Fritz and Brown,Susan Fitch and Moore,Reagan and Marciano,Richard and Chen,Sheau-Yen and Lopez,Rick and Chadduck,Robert} } @article {12953, title = {Reduction of Cholera in Bangladeshi Villages by Simple Filtration}, journal = {Proceedings of the National Academy of SciencesPNAS}, volume = {100}, year = {2003}, month = {2003/02/04/}, pages = {1051 - 1055}, abstract = {Based on results of ecological studies demonstrating that Vibrio cholerae, the etiological agent of epidemic cholera, is commensal to zooplankton, notably copepods, a simple filtration procedure was developed whereby zooplankton, most phytoplankton, and particulates >20 μm were removed from water before use. Effective deployment of this filtration procedure, from September 1999 through July 2002 in 65 villages of rural Bangladesh, of which the total population for the entire study comprised ≈133,000 individuals, yielded a 48\% reduction in cholera (P < 0.005) compared with the control.}, isbn = {0027-8424, 1091-6490}, doi = {10.1073/pnas.0237386100}, url = {http://www.pnas.org/content/100/3/1051}, author = {Rita R Colwell and Huq,Anwar and M. Sirajul Islam and K. M. A. Aziz and Yunus,M. and N. Huda Khan and A. Mahmud and Sack,R. Bradley and Nair,G. B. and J. Chakraborty and Sack,David A. and E. Russek-Cohen} } @article {15434, title = {Refactoring using event-based profiling}, journal = {First International Workshop on REFactoring: Achievements, Challenges, Effects (REFACE)}, year = {2003}, month = {2003///}, abstract = {Refactoring is a disciplined process of restructuring soft-ware code in order to improve it, e.g., to make it more reusable, reliable and maintainable. The source of infor- mation that guides the refactoring process may be the soft- ware{\textquoteright}s user profiles. An increasingly important class of soft- ware is event-based software. Event-based software take an event as an input, change their state, and perhaps output an event. They provide new opportunities for refactoring. For example, reorganizing the objects related to an event and restructuring the event handlers based on the behavior of the software. These opportunities require that we collect user profiles at the level of events rather than the code and model the software in such a way that allows refactoring of event handlers. We present new techniques to collect event- level profiles and organize event handlers. We describe our techniques on one class of event-based software {\textendash} Graphi- cal User Interfaces (GUIs). We demonstrate the practicality and usefulness of our techniques on a large software system. }, author = {Nagarajan,A. and Memon, Atif M.} } @article {15410, title = {Regression testing of GUIs}, journal = {ACM SIGSOFT Software Engineering Notes}, volume = {28}, year = {2003}, month = {2003/09//}, pages = {118 - 127}, abstract = {Although graphical user interfaces (GUIs) constitute a large part of the software being developed today and are typically created using rapid prototyping, there are no effective regression testing techniques for GUIs. The needs of GUI regression testing differ from those of traditional software. When the structure of a GUI is modified, test cases from the original GUI are either reusable or unusable on the modified GUI. Since GUI test case generation is expensive, our goal is to make the unusable test cases usable. The idea of reusing these unusable (a.k.a. obsolete) test cases has not been explored before. In this paper, we show that for GUIs, the unusability of a large number of test cases is a serious problem. We present a novel GUI regression testing technique that first automatically determines the usable and unusable test cases from a test suite after a GUI modification. It then determines which of the unusable test cases can be repaired so they can execute on the modified GUI. The last step is to repair the test cases. Our technique is integrated into a GUI testing framework that, given a test case, automatically executes it on the GUI. We implemented our regression testing technique and demonstrate for two case studies that our approach is effective in that many of the test cases can be repaired, and is practical in terms of its time performance.}, keywords = {call-tree, classification of events, GUI call-graph, GUI control-flow graph, GUI testing, regression testing, repairing test cases}, isbn = {0163-5948}, doi = {10.1145/949952.940088}, url = {http://doi.acm.org/10.1145/949952.940088}, author = {Memon, Atif M. and Soffa,Mary Lou} } @article {12153, title = {Replicated studies: building a body of knowledge about software reading techniques}, journal = {SERIES ON SOFTWARE ENGINEERING AND KNOWLEDGE ENGINEERING}, volume = {12}, year = {2003}, month = {2003///}, pages = {39 - 84}, author = {Shull, F. and Carver, J. and Travassos,G.H. and Maldonado,J.C. and Conradi,R. and Basili, Victor R.} } @conference {13268, title = {A representation for abstract simplicial complexes: an analysis and a comparison}, booktitle = {Discrete Geometry for Computer Imagery}, year = {2003}, month = {2003///}, pages = {454 - 464}, abstract = {Abstract simplicial complexes are used in many application contexts to represent multi-dimensional, possibly non-manifold and non-uniformly dimensional, geometric objects. In this paper we introduce a new general yet compact data structure for representing simplicial complexes, which is based on a decomposition approach that we have presented in our previous work [3]. We compare our data structure with the existing ones and we discuss in which respect it performs better than others.}, doi = {10.1007/978-3-540-39966-7_43}, author = {De Floriani, Leila and Morando,F. and Puppo,E.} } @conference {13322, title = {Representation of non-manifold objects}, booktitle = {Proceedings of the eighth ACM symposium on Solid modeling and applications}, series = {SM {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {304 - 309}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {In our previous work [2], we have shown that a non-manifold, mixed-dimensional object described by simplicial complexes can be decomposed in a unique way into regular components, all belonging to a well-understood class. Based on such decomposition, we define here a two-level topological data structure for representing non-manifold objects in any dimension: the first level represents components; while the second level represents the connectivity relation among them. The resulting data structure is compact and scalable, allowing for the efficient treatment of singularities without burdening well-behaved parts of a complex with excessive space overheads.}, keywords = {Data structures, Non-manifold modeling, simplicial complexes}, isbn = {1-58113-706-0}, doi = {10.1145/781606.781656}, url = {http://doi.acm.org/10.1145/781606.781656}, author = {De Floriani, Leila and Morando,Franco and Puppo,Enrico} } @article {12302, title = {Resilient multicast using overlays}, journal = {SIGMETRICS Perform. Eval. Rev.}, volume = {31}, year = {2003}, month = {2003/06//}, pages = {102 - 113}, abstract = {We introduce PRM (Probabilistic Resilient Multicast): a multicast data recovery scheme that improves data delivery ratios while maintaining low end-to-end latencies. PRM has both a proactive and a reactive component; in this paper we describe how PRM can be used to improve the performance of application-layer multicast protocols, especially when there are high packet losses and host failures. Further, using analytic techniques, we show that PRM can guarantee arbitrarily high data delivery ratios and low latency bounds. As a detailed case study, we show how PRM can be applied to the NICE application-layer multicast protocol. We present detailed simulations of the PRM-enhanced NICE protocol for 10,000 node Internet-like topologies. Simulations show that PRM achieves a high delivery ratio (> 97\%) with a low latency bound (600 ms) for environments with high end-to-end network losses (1-5\%) and high topology change rates (5 changes per second) while incurring very low overheads (< 5\%).}, keywords = {overlay multicast, randomized forwarding, Resilience}, isbn = {0163-5999}, doi = {10.1145/885651.781041}, url = {http://doi.acm.org/10.1145/885651.781041}, author = {Banerjee,Suman and Lee,Seungjoon and Bhattacharjee, Bobby and Srinivasan, Aravind} } @conference {18266, title = {Resistance of orthogonal Gaussian fingerprints to collusion attacks}, booktitle = {Multimedia and Expo, 2003. ICME {\textquoteright}03. Proceedings. 2003 International Conference on}, volume = {1}, year = {2003}, month = {2003/07//}, pages = {I - 617-20 vol.1 - I - 617-20 vol.1}, abstract = {Digital fingerprinting is a means to offer protection to digital data by which fingerprints embedded in the multimedia are capable of identifying unauthorized use of digital content. A powerful attack that can be employed to reduce this tracing capability is collusion. In this paper, we study the collusion resistance of a fingerprinting system employing Gaussian distributed fingerprints and orthogonal modulation. We propose a likelihood-based approach to estimate the number of colluders, and introduce the thresholding detector for colluder identification. We first analyze the collusion resistance of a system to the average attack by considering the probability of a false negative and the probability of a false positive when identifying colluders. Lower and upper bounds for the maximum number of colluders Kmax are derived. We then show that the detectors are robust to different attacks. We further study different sets of performance criteria.}, keywords = {approach;, attacks;, capability;, collusion, data, data;, digital, distributed, embedded, fingerprinting;, fingerprints;, Gaussian, likelihood-based, modulation;, multimedia, of, orthogonal, probability;, processes;, protection;, Security, systems;, tracing}, doi = {10.1109/ICME.2003.1220993}, author = {Wang,Z.J. and M. Wu and Zhao,Hong and Liu,K. J.R and Trappe,W.} } @conference {15963, title = {RGL study in a hybrid real-time system}, booktitle = {Proceedings of the IASTED NCI}, year = {2003}, month = {2003///}, author = {Hennacy,K. and Swamy,N. and Perlis, Don} } @inbook {12739, title = {Robust Face Recognition in the Presence of Clutter}, booktitle = {Audio- and Video-Based Biometric Person AuthenticationAudio- and Video-Based Biometric Person Authentication}, series = {Lecture Notes in Computer Science}, volume = {2688}, year = {2003}, month = {2003///}, pages = {1062 - 1062}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We propose a new method within the framework of principal component analysis to robustly recognize faces in the presence of clutter. The traditional eigenface recognition method performs poorly when confronted with the more general task of recognizing faces appearing against a background. It misses faces completely or throws up many false alarms. We argue in favor of learning the distribution of background patterns and show how this can be done for a given test image. An eigenbackground space is constructed and this space in conjunction with the eigenface space is used to impart robustness in the presence of background. A suitable classifier is derived to distinguish non-face patterns from faces. When tested on real images, the performance of the proposed method is found to be quite good.}, isbn = {978-3-540-40302-9}, url = {http://dx.doi.org/10.1007/3-540-44887-X_1}, author = {Rajagopalan, AN and Chellapa, Rama and Koterba,Nathan}, editor = {Kittler,Josef and Nixon,Mark} } @conference {15140, title = {Round efficiency of multi-party computation with a dishonest majority}, booktitle = {Proceedings of the 22nd international conference on Theory and applications of cryptographic techniques}, series = {EUROCRYPT{\textquoteright}03}, year = {2003}, month = {2003///}, pages = {578 - 595}, publisher = {Springer-Verlag}, organization = {Springer-Verlag}, address = {Berlin, Heidelberg}, abstract = {We consider the round complexity of multi-party computation in the presence of a static adversary who controls a majority of the parties. Here, n players wish to securely compute some functionality and up to n - 1 of these players may be arbitrarily malicious. Previous protocols for this setting (when a broadcast channel is available) require O(n) rounds. We present two protocols with improved round complexity: The first assumes only the existence of trapdoor permutations and dense cryptosystems, and achieves round complexity O(log n) based on a proof scheduling technique of Chor and Rabin [13]; the second requires a stronger hardness assumption (along with the non-black-box techniques of Barak [2]) and achieves O(1) round complexity.}, isbn = {3-540-14039-5}, url = {http://dl.acm.org/citation.cfm?id=1766171.1766222}, author = {Katz, Jonathan and Ostrovsky,Rafail and Smith,Adam} } @article {15612, title = {Ray interpolants for fast raytracing reflections and refractions}, journal = {Journal of WSCG (Proc. International Conf. in Central Europe on Comp. Graph., Visualization and Comp. Vision)}, volume = {10}, year = {2002}, month = {2002///}, pages = {1 - 8}, abstract = {To render an object by ray tracing, one or more rays are shot from the viewpoint through every pixel ofthe image plane. For reflective and refractive objects, especially for multiple levels of reflections and/or refractions, this requires many expensive intersection calculations. This paper presents a new method for accelerating ray-tracing of reflective and refractive objects by substituting accurate-but-slow intersection calculations with approximate-but-fast interpolation computations. Our approach is based on modeling the reflective/refractive object as a function that maps input rays entering the object to output rays exiting the object. We are interested in computing the output ray without actually tracing the input ray through the object. This is achieved by adaptively sampling rays from multiple viewpoints in various directions, as a preprocessing phase, and then interpolating the collection of nearby samples to compute an approximate output ray for any input ray. In most cases, object boundaries and other discontinuities are handled by ap- plying various heuristics. In cases where we cannot find sufficient evidence to interpolate, we perform ray tracing as a last resort. We provide performance studies to demonstrate the efficiency of this method. }, author = {Atalay,F. B and Mount, Dave} } @article {17927, title = {A real-time seamless tiled display system for 3D graphics}, journal = {Immersive Projection Technology Symposium of the IEEE Virtual Reality 2002 Conference (VR2002 IPT)}, year = {2002}, month = {2002///}, abstract = {We outline our seamless tiled display system forinteractive 3D graphics applications that is low-cost, easy to calibrate, scalable, and portable. Our system achieves geometric alignment in software by pre-warping the 3D space in contrast with the current systems that usually achieve this by 2D image pre-warping. Our system accomplishes this through real-time image capture from a digital camcorder, image segmentation, and derivation of the 3D warping matrices for each 3D graphics pipeline that feeds a projector. Our prototype system demonstrates our results on a 2 {\texttimes} 2 tiled array of projectors. }, author = {Li,Z. and Varshney, Amitabh} } @conference {18156, title = {A recognition algorithm for Chinese characters in diverse fonts}, booktitle = {Image Processing. 2002. Proceedings. 2002 International Conference on}, volume = {3}, year = {2002}, month = {2002/06//}, pages = {981 - 984 vol.3 - 981 - 984 vol.3}, abstract = {The paper proposes an algorithm for recognizing Chinese characters in many diverse fonts including Song, Fang, Kai, Hei, Yuan, Lishu, Weibei and Xingkai. The algorithm is based on features derived from peripheral direction contributions and utilizes a set of dictionaries. A 3-level matching is first performed with respect to each dictionary. The distance measures associated with these matches are then fed into a central discriminator to output the final recognition result. We propose a new multi-dictionary matching algorithm for use in the central discriminator that utilizes estimated information of neighborhood fonts. Experiments have been performed on a practical OCR software system whose recognition kernel is based on the proposed algorithm. Fast and accurate recognition has been accomplished both in title recognition, involving all of the 8 fonts, and in main-body recognition, that usually involves only the first 4 most commonly used fonts.}, keywords = {algorithm;, central, character, Chinese, contributions;, direction, discriminator;, diverse, fonts;, image, interfaces;, LANGUAGE, MATCHING, matching;, multi-dictionary, natural, OCR, optical, peripheral, recognition, recognition;, sets;, software, system;}, doi = {10.1109/ICIP.2002.1039139}, author = {Wu,Xianli and M. Wu} } @book {14636, title = {RECOMB {\textquoteright}02: Proceedings of the sixth annual international conference on Computational biology}, year = {2002}, month = {2002///}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {The papers in this volume were presented at the Sixth Annual International Conference on Computational Biology RECOMB 2002, held in Washington, D.C. on April 18-21, 2002. The RECOMB series was started in 1997 by Sorin Istrail, Pavel Pevzner and Michael Waterman. RECOMB {\textquoteright}99 took place in Lyon, France, RECOMB 2000 was held in Tokyo, Japan, and RECOMB 2001 was held in Montreal, Quebec, Canada.This year{\textquoteright}s call for papers gave rise to 118 submissions, out of which the program committee selected 35 papers to be presented at the conference and included in the proceedings. Each submission was refereed by at least three members of the program committee. After the completion of the referees{\textquoteright} reports, an extensive web-based discussion took place.RECOMB 2002 had 8 invited speakers: Ruben Abagyan (The Scripps Research Institute), Ali H. Brivanlou (Rockefeller University), Evan Eichler (Case Western Reserve University), Harold "Skip" Garner (University of Texas Southwestern Medical Center at Dallas), David Ho (Rockefeller University), Gerry Rubin (Howard Hughes Medical Institute), J. Craig Venter (Celera) and Marc Vidal (Dana-Farber Cancer Institute). The Stanislaw Ulam Memorial Lecture was given by J. Craig Venter. The Distinguished Biology Lecture was given by David Ho. The Distinguished New Technologies Lecture was given by Harold Garner.Complete final versions of many of the papers presented in the conference will appear in a special issue of the Journal of Computational Biology, which is closely affiliated with the conference.}, isbn = {1-58113-498-3}, editor = {Myers,Gene and Hannenhalli, Sridhar and Sankoff,David and Istrail,Sorin and Pevzner,Pavel and Waterman,Michael} } @conference {14772, title = {Recompilation for debugging support in a JIT-compiler}, booktitle = {Proceedings of the 2002 ACM SIGPLAN-SIGSOFT workshop on Program analysis for software tools and engineering}, series = {PASTE {\textquoteright}02}, year = {2002}, month = {2002///}, pages = {10 - 17}, publisher = {ACM}, organization = {ACM}, address = {Charleston, South Carolina, USA}, abstract = {A static Java compiler converts Java source code into a verifiably secure and compact architecture-neutral intermediate format, called Java byte codes. The Java byte codes can be either interpreted by a Java Virtual Machine or translated into native code by Java Just-In-Time compilers. Static Java compilers embed debug information in the Java class files to be used by the source level debuggers. However, the debug information is generated for architecture independent byte codes and most of the debug information is valid only when the byte codes are interpreted. Translating byte codes into native instructions puts a limitation on the amount of usable debug information that can be used by source level debuggers. In this paper, we present a new technique to generate valid debug information when Just-In-Time compilers are used. Our approach is based on the dynamic recompilation of Java methods by a fast code generator and lazily generates debug information when it is required. We also present three implementations for field watch support in the Java Virtual Machine Debugger Interface to investigate the runtime overhead and code size growth by our approach.}, keywords = {algorithms, debug information, debugging aids, dynamic recompilation, field access watch, java, java virtual machine debugger interface, just-in-time compilation, measurement, performance}, isbn = {1-58113-479-7}, doi = {10.1145/586094.586100}, author = {Tikir, Mustafa M. and Hollingsworth, Jeffrey K and Lueh,Guei-Yuan} } @proceedings {15793, title = {Reconstructing Images of Bar Codes for Construction Site Object Recognition}, year = {2002}, month = {2002/09//}, author = {Gilsinn,David E. and Cheok,Geraldine S. and O{\textquoteright}Leary, Dianne P.} } @article {14726, title = {Region-based memory management in cyclone}, journal = {SIGPLAN Not.}, volume = {37}, year = {2002}, month = {2002/05//}, pages = {282 - 293}, abstract = {Cyclone is a type-safe programming language derived from C. The primary design goal of Cyclone is to let programmers control data representation and memory management without sacrificing type-safety. In this paper, we focus on the region-based memory management of Cyclone and its static typing discipline. The design incorporates several advancements, including support for region subtyping and a coherent integration with stack allocation and a garbage collector. To support separate compilation, Cyclone requires programmers to write some explicit region annotations, but a combination of default annotations, local type inference, and a novel treatment of region effects reduces this burden. As a result, we integrate C idioms in a region-based framework. In our experience, porting legacy C to Cyclone has required altering about 8\% of the code; of the changes, only 6\% (of the 8\%) were region annotations.}, isbn = {0362-1340}, doi = {10.1145/543552.512563}, url = {http://doi.acm.org/10.1145/543552.512563}, author = {Grossman,Dan and Morrisett,Greg and Jim,Trevor and Hicks, Michael W. and Wang,Yanling and Cheney,James} } @conference {13321, title = {Regular and irregular multi-resolution terrain models: a comparison}, booktitle = {Proceedings of the 10th ACM international symposium on Advances in geographic information systems}, series = {GIS {\textquoteright}02}, year = {2002}, month = {2002///}, pages = {143 - 148}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {The paper deals with the problem of modeling large-size terrain data sets. To this aim, we consider multi-resolution models based on triangle meshes. We analyze and compare two multi-resolution terrain models based on regular and irregular meshes. The two models are viewed as instances of a common multi-resolution model, that we call a multi-resolution triangle mesh. Our comparison takes into account the space requirements of the data structures implementing the two models as well their effectiveness in supporting the extraction of variable-resolution terrain representations.}, keywords = {Multi-resolution, regular and irregular structures, terrain models}, isbn = {1-58113-591-2}, doi = {10.1145/585147.585178}, url = {http://doi.acm.org/10.1145/585147.585178}, author = {De Floriani, Leila and Magillo,Paola} } @conference {16910, title = {Remote access to large spatial databases}, booktitle = {Proceedings of the 10th ACM international symposium on Advances in geographic information systems}, series = {GIS {\textquoteright}02}, year = {2002}, month = {2002///}, pages = {5 - 10}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Enterprises in the public and private sectors have been making their large spatial data archives available over the Internet. However, interactive work with such large volumes of online spatial data is a challenging task. We propose two efficient approaches to remote access to large spatial data. First, we introduce a client-server architecture where the work is distributed between the server and the individual clients for spatial query evaluation, data visualization, and data management. We enable the minimization of the requirements for system resources on the client side while maximizing system responsiveness as well as the number of connections one server can handle concurrently. Second, for prolonged periods of access to large online data, we introduce APPOINT (an Approach for Peer-to-Peer Offloading the INTernet). This is a centralized peer-to-peer approach that helps Internet users transfer large volumes of online data efficiently. In APPOINT, active clients of the client-server architecture act on the server{\textquoteright}s behalf and communicate with each other to decrease network latency, improve service bandwidth, and resolve server congestions.}, keywords = {client/server, GIS, Internet, peer-to-peer}, isbn = {1-58113-591-2}, doi = {10.1145/585147.585150}, url = {http://doi.acm.org/10.1145/585147.585150}, author = {Tanin,Egemen and Brabec,Franti{\v s}ek and Samet, Hanan} } @conference {16912, title = {Remote thin-client access to spatial database systems}, booktitle = {Proceedings of the 2002 annual national conference on Digital government research}, series = {dg.o {\textquoteright}02}, year = {2002}, month = {2002///}, pages = {1 - 8}, publisher = {Digital Government Society of North America}, organization = {Digital Government Society of North America}, abstract = {Numerous federal agencies produce official statistics that are made accessible to ordinary citizens for searching and data retrieval. This is often done via the Internet through a web browser interface. If this data is presented in textual format, it can often be searched and retrieved by such attributes as topic, responsible agency, keywords, or press release. However, if the data is of spatial nature, e.g., in the form of a map, then using text-based queries is often too cumbersome for the intended audience. We propose to use the capabilities of the SAND Spatial Browser to provide more power to users of these databases. Using the SAND Spatial Browser allows users to define the spatial region of interest with greater specificity, instead of forcing them to retrieve data just for a particular location or a region with a predefined boundary. They can also make use of ranking which is the ability to retrieve data in the order of distance from other instances of the data or aggregates of data that are user-defined. Work is distributed between the SAND server and the individual clients for query evaluation, data visualization and data management. This enables the minimization of the necessary requirements for system resources on the client side while maximizing the number of connections one server can handle concurrently. Concrete experience with interfacing the SAND system with FedStats data is also discussed.}, url = {http://dl.acm.org/citation.cfm?id=1123098.1123170}, author = {Samet, Hanan and Brabec,Franti{\v s}ek} } @article {17955, title = {Representing thermal vibrations and uncertainty in molecular surfaces}, journal = {SPIE Conference on Visualization and Data Analysis}, volume = {4665}, year = {2002}, month = {2002///}, pages = {80 - 90}, abstract = {The previous methods to compute smooth molecular surface assumed that each atom in a molecule has a fixedposition without thermal motion or uncertainty. In real world, the position of an atom in a molecule is fuzzy because of its uncertainty in protein structure determination and thermal energy of the atom. In this paper, we propose a method to compute smooth molecular surface for fuzzy atoms. The Gaussian distribution is used for modeling the fuzziness of each atom, and a p-probability sphere is computed for each atom with a certain confidence level. The smooth molecular surface with fuzzy atoms is computed efficiently from extended-radius p-probability spheres. We have implemented a program for visualizing three-dimensional molecular structures including the smooth molecular surface with fuzzy atoms using multi-layered transparent surfaces, where the surface of each layer has a different confidence level and the transparency associated with the confidence level. }, author = {Lee,C. H and Varshney, Amitabh} } @conference {12756, title = {A robust algorithm for probabilistic human recognition from video}, booktitle = {Pattern Recognition, 2002. Proceedings. 16th International Conference on}, volume = {1}, year = {2002}, month = {2002///}, pages = {226 - 229 vol.1 - 226 - 229 vol.1}, abstract = {Human recognition from video requires solving the two tasks, recognition and tracking, simultaneously. This leads to a parameterized time series state space model, representing both motion and identity of the human. Sequential Monte Carlo (SMC) algorithms, like Condensation, can be developed to offer numerical solutions to this model. However in outdoor environments, the solution is more likely to diverge from the foreground, causing failures in both recognition and tracking. In this paper we propose an approach for tackling this problem by incorporating the constraint of temporal continuity in the observations. Experimental results demonstrate improvements over its Condensation counterpart.}, keywords = {algorithm;, algorithms;, Carlo, continuity;, human, image, methods;, model;, Monte, parameterized, probabilistic, recognition;, robust, sequential, series, series;, space, state, state-space, temporal, TIME}, doi = {10.1109/ICPR.2002.1044661}, author = {Zhou,Shaohua and Chellapa, Rama} } @article {18157, title = {A robust error resilient approach for MPEG video transmission over internet}, journal = {Visual Communication and Image Processing, SPIE}, volume = {4671}, year = {2002}, month = {2002///}, pages = {103 - 111}, abstract = {In network delivery of compressed video, packets may be lost if the channel is unreliable. Such losses tend to occur inburst. In this paper, we develop an error resilient video encoding approach to help error concealment at the decoder. We introduce a new block shuffling scheme to isolate erroneous blocks caused by packet losses. And we apply data hiding to add additional protection for motion vectors. The incorporation of these scheme adds little complexity to the standard encoder. Experimental results suggest that our approach can achieve a reasonable quality for packet loss up to 30\% over a wide range of video materials. }, author = {Yin,P. and Wu,M. and Liu,B.} } @article {11913, title = {Rover: scalable location-aware computing}, journal = {Computer}, volume = {35}, year = {2002}, month = {2002/10//}, pages = {46 - 53}, abstract = {All the components necessary for realizing location-aware computing are available in the marketplace today. What has hindered the widespread deployment of location-based systems is the lack of an integration architecture that scales with user populations. The authors have completed the initial implementation of Rover, a system designed to achieve this sort of integration and to automatically tailor information and services to a mobile user{\textquoteright}s location. Their studies have validated Rover{\textquoteright}s underlying software architecture, which achieves system scalability through high-resolution, application-specific resource scheduling at the servers and network. The authors believe that this technology will greatly enhance the user experience in many places, including museums, amusement and theme parks, shopping malls, game fields, offices, and business centers. They designed the system specifically to scale to large user populations and expect its benefits to increase with them.}, keywords = {amusement, application-specific, architecture;, automation;, business, business;, computing;, data, entertainment;, handheld, humanities;, integration, LAN;, location-aware, malls;, mobile, museums;, office, parks;, processing;, resource, Rover;, scalability;, scalable, scheduling;, shopping, software, system, theme, units;, user;, wireless}, isbn = {0018-9162}, doi = {10.1109/MC.2002.1039517}, author = {Banerjee,S. and Agarwal,S. and Kamel,K. and Kochut, A. and Kommareddy,C. and Nadeem,T. and Thakkar,P. and Trinh,Bao and Youssef,A. and Youssef, M. and Larsen,R.L. and Udaya Shankar,A. and Agrawala, Ashok K.} } @conference {15898, title = {Rapidly retargetable interactive translingual retrieval}, booktitle = {Proceedings of the first international conference on Human language technology research}, series = {HLT {\textquoteright}01}, year = {2001}, month = {2001///}, pages = {1 - 5}, publisher = {Association for Computational Linguistics}, organization = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, abstract = {This paper describes a system for rapidly retargetable interactive translingual retrieval. Basic functionality can be achieved for a new document language in a single day, and further improvements require only a relatively modest additional investment. We applied the techniques first to search Chinese collections using English queries, and have successfully added French, German, and Italian document collections. We achieve this capability through separation of language-dependent and language-independent components and through the application of asymmetric techniques that leverage an extensive English retrieval infrastructure.}, keywords = {cross-language, information, retrieval}, doi = {10.3115/1072133.1072212}, url = {http://dx.doi.org/10.3115/1072133.1072212}, author = {Levow,Gina-Anne and Oard, Douglas and Resnik, Philip} } @article {18265, title = {Reading between the lines: Lessons from the SDMI challenge}, journal = {Proceedings of the 10th USENIX Security Symposium}, year = {2001}, month = {2001///}, pages = {13 - 17}, abstract = {The Secure Digital Music Initiative is a consortium ofparties interested in preventing piracy of digital music, and to this end they are developing architectures for con- tent protection on untrusted platforms. SDMI recently held a challenge to test the strength of four watermark- ing technologies, and two other security technologies. No documentation explained the implementations of the technologies, and neither watermark embedding nor de- tecting software was directly accessible to challenge par- ticipants. We nevertheless accepted the challenge, and explored the inner workings of the technologies. We re- port on our results here. }, author = {Craver,S.A. and Wu,M. and Liu,B. and Stubblefield,A. and Swartzlander,B. and Wallach,D.S. and Dean,D. and Felten,E.W.} } @article {13015, title = {Relating amino acid sequence to phenotype: analysis of peptide-binding data}, journal = {Biometrics}, volume = {57}, year = {2001}, month = {2001/06//}, pages = {632 - 642}, abstract = {We illustrate data analytic concerns that arise in the context of relating genotype, as represented by amino acid sequence, to phenotypes (outcomes). The present application examines whether peptides that bind to a particular major histocompatibility complex (MHC) class I molecule have characteristic amino acid sequences. However, the concerns identified and addressed are considerably more general. It is recognized that simple rules for predicting binding based solely on preferences for specific amino acids in certain (anchor) positions of the peptide{\textquoteright}s amino acid sequence are generally inadequate and that binding is potentially influenced by all sequence positions as well as between-position interactions. The desire to elucidate these more complex prediction rules has spawned various modeling attempts, the shortcomings of which provide motivation for the methods adopted here. Because of (i) this need to model between-position interactions, (ii) amino acids constituting a highly (20) multilevel unordered categorical covariate, and (iii) there frequently being numerous such covariates (i.e., positions) comprising the sequence, standard regression/classification techniques are problematic due to the proliferation of indicator variables required for encoding the sequence position covariates and attendant interactions. These difficulties have led to analyses based on (continuous) properties (e.g., molecular weights) of the amino acids. However, there is potential information loss in such an approach if the properties used are incomplete and/or do not capture the mechanism underlying association with the phenotype. Here we demonstrate that handling unordered categorical covariates with numerous levels and accompanying interactions can be done effectively using classification trees and recently devised bump-hunting methods. We further tackle the question of whether observed associations are attributable to amino acid properties as well as addressing the assessment and implications of between-position covariation.}, author = {Segal,M. R and Cummings, Michael P. and Hubbard,A. E} } @article {18710, title = {Rescuing a destabilized protein fold through backbone cyclization}, journal = {Journal of Molecular Biology}, volume = {308}, year = {2001}, month = {2001/05/18/}, pages = {1045 - 1062}, abstract = {We describe the physicochemical characterization of various circular and linear forms of the \~{}60 residue N-terminal Src homology 3 (SH3) domain from the murine c-Crk adapter protein. Structural, dynamic, thermodynamic, kinetic and biochemical studies reveal that backbone circularization does not prevent the adoption of the natural folded structure in any of the circular proteins. Both the folding and unfolding rate of the protein increased slightly upon circularization. Circularization did not lead to a significant thermodynamic stabilization of the full-length protein, suggesting that destabilizing enthalpic effects (e.g. strain) negate the expected favorable entropic contribution to overall stability. In contrast, we find circularization results in a dramatic stabilization of a truncated version of the SH3 domain lacking a key glutamate residue. The ability to rescue the destabilized mutant indicates that circularization may be a useful tool in protein engineering programs geared towards generating minimized proteins.}, keywords = {circular protein, ligation, SH3 domain}, isbn = {0022-2836}, doi = {10.1006/jmbi.2001.4631}, url = {http://www.sciencedirect.com/science/article/pii/S0022283601946315}, author = {Camarero,Julio A and Fushman, David and Sato,Satoshi and Giriat,Izabela and Cowburn,David and Raleigh,Daniel P and Muir,Tom W} } @article {13850, title = {Review of Natural Language Processing in R.A. Wilson and F.C. Keil (Eds.), The MIT Encyclopedia of the Cognitive Sciences}, journal = {Artificial Intelligence}, volume = {130}, year = {2001}, month = {2001/08//}, pages = {185 - 189}, abstract = {The MIT Encyclopedia of the Cognitive Sciences (MITECS) is the first encyclopediain cognitive sciences{\textemdash}a web-navigable resource with invaluable information and several hundred links to related resources. The material provided therein is thorough and very clearly presented by the leading scientists in each area. This is one of the most comprehensive resources in cognitive science to date. It will serve as a teaching and research guide that users may frequently refer to for important definitions, background information, and citations to relevant literature. This review covers areas relevant to Natural Language Processing (NLP), in particular, the entries entitled {\textquotedblleft}Natural Language Processing{\textquotedblright} (James Allen), {\textquotedblleft}Computational Linguistics{\textquotedblright} (Aravind Joshi), {\textquotedblleft}Generation{\textquotedblright} and {\textquotedblleft}Machine Translation{\textquotedblright} (both by Eduard Hovy), {\textquotedblleft}Computational Lexicons{\textquotedblright} (James Pustejovsky), and {\textquotedblleft}Statistical Techniques{\textquotedblright} (Eugene Charniak). I will also address issues concerning the use of MITECS as an online, web-navigable document. }, isbn = {0004-3702}, doi = {10.1016/S0004-3702(01)00096-0}, url = {http://www.sciencedirect.com/science/article/pii/S0004370201000960}, author = {Dorr, Bonnie J} } @article {11917, title = {Revolutionary Advances in Ubiquitious, Real-Time Multicomputers and Runtime Environments}, volume = {A340293}, year = {2001}, month = {2001///}, pages = {225 - 225}, institution = {MISSISSIPPI STATE UNIVERSITY}, abstract = {This work was a grant to enhance the Maruti operating system in several ways, in order to provide Mississippi State with a platform upon which their work on the Real-Time Message Passing Interface could be developed. Key technical achievements: (1) Developed predictable Myrinet communications for use in a real-time NOW; (2) Developed the MSU-Kernel to provide a POSIX OS for real- time NOWs; (3) Developed and implemented an algorithm for deploying a globally synchronized clock in a real-time NOW; (4) Developed an improved real-time scheduler for the Maruti hard real-time operating system at University of Maryland (UMD); and (5) Introduced a new parametric approach in Maruti for dynamic scheduling at UMD. Details of the results of the work are presented in papers, thesis and project reports.}, author = {Agrawala, Ashok K.} } @conference {15592, title = {Robust matching of wavelet features for sub-pixel registration of Landsat data}, booktitle = {Geoscience and Remote Sensing Symposium, 2001. IGARSS {\textquoteright}01. IEEE 2001 International}, volume = {2}, year = {2001}, month = {2001///}, pages = {706 -708 vol.2 - 706 -708 vol.2}, abstract = {For many Earth and space science applications, automatic geo-registration at sub-pixel accuracy has become a necessity. In this work, we are focusing on building an operational system, which will provide a sub-pixel accuracy registration of Landsat-5 and Landsat-7 data. The input to our registration method consists of scenes that have been geometrically and radiometrically corrected. Such preprocessed scenes are then geo-registered relative to a database of Landsat chips. The method assumes a transformation composed of a rotation and a translation, and utilizes rotation- and translation-invariant wavelets to extract image features that are matched using statistically robust feature matching and a partial Hausdorff distance metric. The registration process is described and results on four Landsat input scenes of the Washington, D.C., area are presented}, keywords = {Feature extraction, geo-registration, geophysical measurement technique, geophysical signal processing, geophysical techniques, Hausdorff distance metric, image registration, infrared, IR, land surface, Landsat, Landsat-5, Landsat-7, multispectral remote sensing, robust feature matching, robust matching, sub pixel registration, subpixel accuracy, terrain mapping, visible, wavelet feature, wavelet method, Wavelet transforms}, doi = {10.1109/IGARSS.2001.976609}, author = {Le Moigne,J. and Netanyahu,N. S and Masek,J. G and Mount, Dave and Goward, S.N.} } @article {15296, title = {The role of a natural language conversational interface in online sales: A case study}, journal = {International Journal of Speech Technology}, volume = {4}, year = {2001}, month = {2001///}, pages = {285 - 295}, abstract = {This paper describes the evaluation of a natural language dialog-based navigation system (HappyAssistant) that helps users access e-commerce sites to find relevant information about products and services. The prototype system leverages technologies in natural language processing and human-computer interaction to create a faster and more intuitive way of interacting with websites, especially for less experienced users. The result of a comparative study shows that users prefer the natural language-enabled navigation two to one over the menu driven navigation. In addition, the study confirmed the efficiency of using natural language dialog in terms of the number of clicks and the amount of time required to obtain the relevant information. In the case study, as compared to the menu driven system, the average number of clicks used in the natural language system was reduced by 63.2\% and the average time was reduced by 33.3\%.}, doi = {10.1023/A:1011316909641}, author = {Chai,J. and Jimmy Lin and Zadrozny,W. and Ye,Y. and Stys-Budzikowska,M. and Horvath,V. and Kambhatla,N. and Wolf,C.} } @conference {18442, title = {The role of independent verification and validation in maintaining a safety critical evolutionary software in a complex environment: the NASA Space Shuttle program}, booktitle = {Software Maintenance, 2001. Proceedings. IEEE International Conference on}, year = {2001}, month = {2001///}, pages = {118 - 126}, abstract = {The National Aeronautics and Space Administration (NASA) Space Shuttle program is a multi-billion dollar activity scheduled to span over 40 years. Maintaining such software with requirements for high reliability and mission safety taxes current development methods. The authors present how independent verification and validation (IV amp;V) activities are used to support these requirements. They also show how the IV amp;V activities for this program differ from those of more traditional software developments}, keywords = {activities;NASA, Administration;complex, Aeronautics, amp;V, and, computing;program, critical, developments;aerospace, environment;development, evolutionary, IV, maintenance;software, maintenance;traditional, methods;high, program;National, prototyping;space, reliability;independent, safety;safety, Shuttle, software, software;software, space, validation;mission, vehicles;, verification, verification;safety-critical}, doi = {10.1109/ICSM.2001.972722}, author = {Zelkowitz, Marvin V and Rus,L.} } @article {18271, title = {Rotation, scale, and translation resilient public watermarking for images using a log-polar fourier transform}, year = {2001}, month = {2001/08/28/}, abstract = {A method for detecting a watermark signal in digital image data. The detecting method includes the steps of: computing a log-polar Fourier transform of the image data to obtain a log-polar Fourier spectrum; projecting the log-polar Fourier spectrum down to a lower dimensional space to obtain an extracted signal; comparing the extracted signal to a target watermark signal; and declaring the presence or absence of the target watermark signal in the image data based on the comparison. Also provided is a method for inserting a watermark signal in digital image data to obtain a watermarked image. The inserting method includes the steps of: computing a log-polar Fourier transform of the image data to obtain a log-polar Fourier spectrum; projecting the log-polar Fourier spectrum down to a lower dimensional space to obtain an extracted signal; modifying the extracted signal such that it is similar to a target watermark; performing a one-to-many mapping of the modified signal back to...}, url = {http://www.google.com/patents?id=v4AIAAAAEBAJ}, author = {Bloom,Jeffrey A. and Cox,Ingemar J. and Miller,Matthew L. and M. Wu and Lin,Ching-Yung and Lui,Yui Man}, editor = {Signafy, Inc.} } @article {18272, title = {Rotation, scale, and translation resilient watermarking for images}, journal = {Image Processing, IEEE Transactions on}, volume = {10}, year = {2001}, month = {2001/05//}, pages = {767 - 782}, abstract = {Many electronic watermarks for still images and video content are sensitive to geometric distortions. For example, simple rotation, scaling, and/or translation (RST) of an image can prevent blind detection of a public watermark. In this paper, we propose a watermarking algorithm that is robust to RST distortions. The watermark is embedded into a one-dimensional (1-D) signal obtained by taking the Fourier transform of the image, resampling the Fourier magnitudes into log-polar coordinates, and then summing a function of those magnitudes along the log-radius axis. Rotation of the image results in a cyclical shift of the extracted signal. Scaling of the image results in amplification of the extracted signal, and translation of the image has no effect on the extracted signal. We can therefore compensate for rotation with a simple search, and compensate for scaling by using the correlation coefficient as the detection measure. False positive results on a database of 10 000 images are reported. Robustness results on a database of 2000 images are described. It is shown that the watermark is robust to rotation, scale, and translation. In addition, we describe tests examining the watermarks resistance to cropping and JPEG compression}, keywords = {axis;one-dimensional, coding;security, coefficient;cropping;cyclical, compression;RST, coordinates;log-radius, data;, detection;correlation, distortions;blind, distortions;images;log-polar, encapsulation;image, Fourier, images;translation, magnitudes;Fourier, measure;geometric, of, protection;data, resilient, shift;detection, signal;public, transform;JPEG, transforms;copy, watermark;resampling;rotation;scale;search;still, watermarking;video;Fourier}, isbn = {1057-7149}, doi = {10.1109/83.918569}, author = {Lin,C.-Y. and Wu,M. and Bloom,J.A. and Cox,I.J. and Miller,M.L. and Lui,Y.M.} } @conference {17532, title = {Receiver based management of low bandwidth access links}, booktitle = {IEEE INFOCOM 2000. Nineteenth Annual Joint Conference of the IEEE Computer and Communications Societies. Proceedings}, volume = {1}, year = {2000}, month = {2000///}, pages = {245-254 vol.1 - 245-254 vol.1}, publisher = {IEEE}, organization = {IEEE}, abstract = {In this paper, we describe a receiver-based congestion control policy that leverages TCP flow control mechanisms to prioritize mixed traffic loads across access links. We manage queueing at the access link to: (1) improve the response time of interactive network applications; (2) reduce congestion-related packet losses; while (3) maintaining high throughput for bulk-transfer applications. Our policy controls queue length by manipulating receive socket buffer sizes. We have implemented this solution in a dynamically loadable Linux kernel module, and tested it over low-bandwidth links. Our approach yields a 7-fold improvement in packet latency over an unmodified system while maintaining 94\% link utilization. In the common case, congestion-related packet losses at the access link can be eliminated. Finally, by prioritizing short flows, we show that our system reduces the time to download a complex Web page during a large background transfer by a factor of two}, keywords = {Bandwidth, buffer storage, bulk-transfer applications, complex Web page, congestion control policy, Delay, dynamically loadable Linux kernel module, information resources, interactive network, Internet, Kernel, link utilization, Linux, low-bandwidth access links, mixed traffic load, packet latency, queue length, queueing theory, receive socket buffer sizes, receiver-based management, response time, short flow prioritizing, Size control, Sockets, subscriber loops, TCP flow control, telecommunication congestion control, telecommunication network management, Telecommunication traffic, Testing, Throughput, Transport protocols, Unix, Web pages}, isbn = {0-7803-5880-5}, doi = {10.1109/INFCOM.2000.832194}, author = {Spring, Neil and Chesire,M. and Berryman,M. and Sahasranaman,V. and Anderson,T. and Bershad,B.} } @conference {14528, title = {Rectified Heat Transfer to Vapor Bubbles in Standing Acoustic Waves}, booktitle = {Microgravity fluid physics and heat transfer: proceedings of the International Conference on Microgravity Fluid Physics and Heat Transfer held at the Tutle Bay Hilton, Oahu, Hawaii, September 19-24, 1999}, year = {2000}, month = {2000///}, pages = {96 - 96}, author = {Gumerov, Nail A.} } @article {17345, title = {Report on closing the digital divide: meeting of the Deparment of Commerce, Washington, DC December 9, 1999}, journal = {ACM SIGCHI Bulletin}, volume = {32}, year = {2000}, month = {2000/04//}, pages = {43 - 44}, isbn = {0736-6906}, doi = {10.1145/360405.360445}, url = {http://doi.acm.org/10.1145/360405.360445}, author = {Shneiderman, Ben} } @article {17347, title = {Research Alerts}, journal = {Interactions}, volume = {7}, year = {2000}, month = {2000/11//}, pages = {9 - 17}, isbn = {1072-5520}, doi = {10.1145/352580.352583}, url = {http://doi.acm.org/10.1145/352580.352583}, editor = {Shneiderman, Ben} } @article {13633, title = {Residual coding in document image compression}, journal = {Image Processing, IEEE Transactions on}, volume = {9}, year = {2000}, month = {2000/06//}, pages = {961 - 969}, abstract = {Symbolic document image compression relies on the detection of similar patterns in a document image and construction of a prototype library. Compression is achieved by referencing multiple pattern instances ( ldquo;components rdquo;) through a single representative prototype. To provide a lossless compression, however, the residual difference between each component and its assigned prototype must be coded. Since the size of the residual can significantly affect the compression ratio, efficient coding is essential. We describe a set of residual coding models for use with symbolic document image compression that exhibit desirable characteristics for compression and rate-distortion and facilitate compressed-domain processing. The first model orders the residual pixels by their distance to the prototype edge. Grouping pixels based on this distance value allows for a more compact coding and lower entropy. This distance model is then extended to a model that defines the structure of the residue and uses it as a basis for continuous and packet reconstruction which provides desired functionality for use in lossy compression and progressive transmission}, keywords = {coding, coding;compressed-domain, coding;entropy;lossless, coding;image, communication;, compact, compression;data, compression;document, compression;multiple, construction;rate-distortion;representative, detection;symbolic, difference;residual, distortion, document, edge;prototype, image, library, model;efficient, models;residual, pattern, patterns, pixels;similar, processing;compression, processing;entropy;image, prototype;residual, ratio;continuous, reconstruction;distance, reconstruction;progressive, reconstruction;rate, referencing;packet, theory;visual, transmission;prototype}, isbn = {1057-7149}, doi = {10.1109/83.846239}, author = {Kia,O. E and David Doermann} } @article {15401, title = {Resolution enhancement by applying MFM under UHV conditions}, journal = {Magnetics, IEEE Transactions on}, volume = {36}, year = {2000}, month = {2000/09//}, pages = {2975 - 2977}, abstract = {The enhancement in signal-to-noise ratio and lateral resolution in MFM in going from ambient pressure to UHV is demonstrated. The performance of several cantilevers is evaluated using a patterned 50 nm thick permalloy film, with cross-tie as well as 90 deg; domain walls, and a 200 nm thick permalloy film with perpendicular magnetization. The increase in the quality factor of the cantilever oscillation in UHV improves the sensitivity, consequently allowing less magnetic material on the tip to achieve the same signal-to-noise ratio. This reduction in magnetic volume sharpens the lateral resolution. We also demonstrate that the magnetic interaction can be so weak that a magnetic contrast is visible only under UHV conditions}, keywords = {conditions;cantilevers;domain, contrast;magnetic, domain, enhancement;Permalloy;magnetic, enhancement;signal-to-noise, film;perpendicular, films;magnetisation;, force, magnetization;resolution, MFM;NiFe;UHV, microscopy;magnetic, ratio, resolution;magnetic, thin, volume;permalloy, walls;lateral, walls;magnetic}, isbn = {0018-9464}, doi = {10.1109/20.908645}, author = {Dreyer,M. and Gomez,R.D. and Mayergoyz, Issak D} } @inbook {14801, title = {Resource-aware meta-computing}, booktitle = {Emphasizing Distributed SystemsEmphasizing Distributed Systems}, volume = {Volume 53}, year = {2000}, month = {2000///}, pages = {109 - 169}, publisher = {Elsevier}, organization = {Elsevier}, abstract = {Meta-computing, is an increasingly popular and useful method of obtaining resources to solve large computational problems. However, meta-computer environments pose a number of unique challenges, many of which have yet to be addressed effectively. Among these are dynamicism in both applications and environments, and heterogeneity at several different levels. This chapter discusses current approaches to these problems, and uses them in the Active Harmony system as a running example. Harmony supports an interface that allows applications to export tuning alternatives to the higher-level system. By exposing different parameters that can be changed at runtime, applications can be automatically adapted to changes in their execution environment caused by other programs, the addition or deletion of nodes, or changes in the availability of resources like communication links. Applications expose not only options, but also expected resource utilization with each option and the effect that the option will have on the application{\textquoteright}s performance. We discuss how this flexibility can be used to tune the overall performance of a collection of applications in a system.}, isbn = {0065-2458}, url = {http://www.sciencedirect.com/science/article/pii/S0065245800800054}, author = {Hollingsworth, Jeffrey K and Keleher, Peter J. and Ryu, Kyung D.}, editor = {Marvin V. Zelkowits} } @article {12363, title = {Resynchronization for multiprocessor DSP systems}, journal = {Circuits and Systems I: Fundamental Theory and Applications, IEEE Transactions on}, volume = {47}, year = {2000}, month = {2000///}, pages = {1597 - 1609}, author = {Bhattacharyya, Shuvra S. and Sriram,S. and Lee,E. A} } @article {17651, title = {Retrieval scheduling for collaborative multimedia presentations}, journal = {Multimedia Systems}, volume = {8}, year = {2000}, month = {2000///}, pages = {146 - 155}, abstract = {The single-system approach is no longer sufficient to handle the load on popular Internet servers, especially for those offering extensive multimedia content. Such services have to be replicated to enhance their availability, performance, and reliability. In a highly replicated and available environment, server selection is an important issue. In this paper, we propose an application-layer broker (ALB) for this purpose. ALB employs a content-based, client-centric approach to negotiate with the servers and to identify the best server for the requested objects. ALB aims to maximize client buffer utilization in order to efficiently handle dynamic user interactions such as skip, reverse presentation, go back in time. We also present details of a collaborative multimedia presentation platform that we have developed based on ALB.}, isbn = {0942-4962}, url = {http://dx.doi.org/10.1007/s005300050157}, author = {Bai,Ping and Prabhakaran,B. and Srinivasan, Aravind} } @article {16437, title = {Reverse Engineering and UML: A Case Study of AuctionBot}, volume = {EECS 581}, year = {2000}, month = {2000///}, institution = {University of Michigan}, author = {Bangera,R. and Rand, William} } @article {18945, title = {Review of "Architectures and mechanisms for language processing" by Matthew W. Crocker, Martin Pickering, and Charles Clifton. Cambridge University Press 2000.}, journal = {Computational Linguistics}, volume = {26}, year = {2000}, month = {2000/12//}, pages = {648 - 651}, isbn = {0891-2017}, url = {http://dl.acm.org/citation.cfm?id=971882.971892}, author = {Weinberg, Amy} } @article {14652, title = {A Review of Current Routing Protocols for Ad Hoc}, journal = {IEEE Personal Communications}, volume = {29}, year = {2000}, month = {2000///}, pages = {156 - 71}, author = {Royer,E.M. and Toh,C.K. and Hicks, Michael W. and Kakkar,P. and Moore,J. T and Hicks, Michael W. and Moore,J. T and Alexander,D. S and Gunter,C. A and Nettles,S. and others} } @conference {15311, title = {REXTOR: a system for generating relations from natural language}, booktitle = {Proceedings of the ACL-2000 workshop on Recent advances in natural language processing and information retrieval: held in conjunction with the 38th Annual Meeting of the Association for Computational Linguistics - Volume 11}, series = {RANLPIR {\textquoteright}00}, year = {2000}, month = {2000///}, pages = {67 - 77}, publisher = {Association for Computational Linguistics}, organization = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, abstract = {This paper argues that a finite-state language model with a ternary expression representation is currently the most practical and suitable bridge between natural language processing and information retrieval. Despite the theoretical computational inadequacies of finite-state grammars, they are very cost effective (in time and space requirements) and adequate for practical purposes. The ternary expressions that we use are not only linguistically-motivated, but also amenable to rapid large-scale indexing. REXTOR (Relations EXtracTOR) is an implementation of this model; in one uniform framework, the system provides two separate grammars for extracting arbitrary patterns of text and building ternary expressions from them. These content representational structures serve as the input to our ternary expressions indexer. This approach to natural language information retrieval promises to significantly raise the performance of current systems.}, doi = {10.3115/1117755.1117764}, url = {http://dx.doi.org/10.3115/1117755.1117764}, author = {Katz,Boris and Jimmy Lin} } @article {18314, title = {Robustly estimating changes in image appearance}, journal = {Computer Vision and Image Understanding}, volume = {78}, year = {2000}, month = {2000///}, pages = {8 - 31}, author = {Black,M. J and Fleet,D. J and Yacoob,Yaser} } @article {18270, title = {Rotation, scale, and translation resilient public watermarking for images}, journal = {PROC SPIE INT SOC OPT ENG}, volume = {3971}, year = {2000}, month = {2000///}, pages = {90 - 98}, abstract = {Many electronic watermarks for still images and video content are sensitive to geometric distortions. For example,simple rotation, scaling, and/or translation (RST) of an image can prevent detection of a public watermark. In this paper, we propose a watermarking algorithm that is robust to RST distortions. The watermark is embedded into a 1-dimensional signal obtained by first taking the Fourier transform of the image, resampling the Fourier magnitudes into log-polar coordinates, and then summing a function of those magnitudes along the log-radius axis. If the image is rotated, the resulting signal is cyclically shifted. If it is scaled, the signal is multiplied by some value. And if the image is translated, the signal is unaffected. We can therefore compensate for rotation with a simple search, and for scaling by using the correlation coefficient for the detection metric. False positive results on a database of 10,000 images are reported. Robustness results on a database of 2,000 images are described. It is shown that the watermark is robust to rotation, scale and translation. In addition, the algorithm shows resistance to cropping. }, author = {Lin,C.Y. and Wu,M. and Bloom,J.A. and Cox,I.J. and Miller,M.L. and Lui,Y.M.} } @book {17339, title = {Readings in Information Visualization: Using Vision to Think}, year = {1999}, month = {1999/01/25/}, publisher = {Morgan Kaufmann}, organization = {Morgan Kaufmann}, abstract = {This groundbreaking book defines the emerging field of information visualization and offers the first-ever collection of the classic papers of the discipline, with introductions and analytical discussions of each topic and paper. The authors{\textquoteright} intention is to present papers that focus on the use of visualization to discover relationships, using interactive graphics to amplify thought. This book is intended for research professionals in academia and industry; new graduate students and professors who want to begin work in this burgeoning field; professionals involved in financial data analysis, statistics, and information design; scientific data managers; and professionals involved in medical, bioinformatics, and other areas.* Full-color reproduction throughout* Author power team - an exciting and timely collaboration between the field{\textquoteright}s pioneering, most-respected names* The only book on Information Visualization with the depth necessary for use as a text or as a reference for the information professional* Text includes the classic source papers as well as a collection of cutting edge work}, keywords = {Computer Graphics, Computers / Computer Engineering, Computers / Computer Graphics, Computers / Computer Graphics / General, Computers / Computer Science, Computers / General, Computers / Information Technology, Computers / Information Theory, Computers / Intelligence (AI) \& Semantics, Computers / Social Aspects / Human-Computer Interaction, IMAGE PROCESSING, Information display systems, Information Visualization, Psychology / General, Visualization}, isbn = {9781558605336}, author = {Card,Stuart K. and Mackinlay,Jock D. and Shneiderman, Ben} } @article {18455, title = {A real-time audio{\textendash}video front-end for multimedia applications}, journal = {The Journal of the Acoustical Society of America}, volume = {106}, year = {1999}, month = {1999///}, pages = {2271 - 2271}, author = {Zotkin,Dmitry N and Duraiswami, Ramani and Hariatoglu,I. and Davis, Larry S. and Otsuka,T.} } @conference {16167, title = {Refining query previews techniques for data with multivalued attributes: the case of NASA EOSDIS}, booktitle = {Research and Technology Advances in Digital Libraries, 1999. ADL {\textquoteright}99. Proceedings. IEEE Forum on}, year = {1999}, month = {1999///}, pages = {50 - 59}, abstract = {Query Previews allow users to rapidly gain an understanding of the content and scope of a digital data collection. These previews present overviews of abstracted metadata, enabling users to rapidly and dynamically avoid undesired data. We present our recent work on developing query previews for a variety of NASA EOSDIS situations. We focus on approaches that successfully address the challenge of multi-valued attribute data. Memory requirements and processing time associated with running these new solutions remain independent of the number of records in the dataset. We describe two techniques and their respective prototypes used to preview NASA Earth science data}, keywords = {attribute, attributes;processing, collection;memory, computing;meta, data, data;abstracted, data;digital, data;multivalued, data;query, Earth, EOSDIS;NASA, libraries;geophysics, metadata;dataset;digital, NASA, previews, processing;, requirements;multi-valued, Science, techniques;undesired, time;query}, doi = {10.1109/ADL.1999.777690}, author = {Plaisant, Catherine and Venkatraman,M. and Ngamkajorwiwat,K. and Barth,R. and Harberts,B. and Feng,Wenlan} } @article {13465, title = {Reports on the AAAI Fall Symposia}, journal = {AI Magazine}, volume = {20}, year = {1999}, month = {1999/09/15/}, pages = {87 - 87}, abstract = {The Association for the Advancement of Artificial Intelligence (AAAI) held its 1998 Fall Symposium Series on 23 to 25 October at the Omni Rosen Hotel in Orlando, Florida. This article contains summaries of seven of the symposia that were conducted: (1) Cognitive Robotics; (2) Distributed, Continual Planning; (3) Emotional and Intelligent: The Tangled Knot of Cognition; (4) Integrated Planning for Autonomous Agent Architectures; (5) Planning with Partially Observable Markov Decision Processes; (6) Reasoning with Visual and Diagrammatic Representations; and (7) Robotics and Biology: Developing Connections.}, isbn = {0738-4602}, doi = {10.1609/aimag.v20i3.1470}, url = {http://www.aaai.org/ojs/index.php/aimagazine/article/viewArticle/1470}, author = {De Giacomo,Giuseppe and desJardins, Marie and Canamero,Dolores and Wasson,Glenn and Littman,Michael and Allwein,Gerard and Marriott,Kim and Meyer,Bernd and Webb,Barbara and Consi,Tom} } @book {19005, title = {The RNA World}, year = {1999}, month = {1999///}, publisher = {Cold Spring Harbor Laboratory Press}, organization = {Cold Spring Harbor Laboratory Press}, address = {Cold Spring Harbor, New York}, author = {Woodson,Sarah A. and Mount, Stephen M.}, editor = {Gesteland,Raymond F. and Cech,Thomas R. and Atkins,John F.} } @article {13908, title = {The Role of Children in the Design of New Technology}, journal = {Saatavilla www-muodossa: ftp://ftp. cs. umd. edu/pub/hcil/Reports-Abstracts-Bibliography/99-23html/99-23. pdf (Luettu 17.1. 2007)}, year = {1999}, month = {1999///}, author = {Allison,D.} } @article {14911, title = {The role of convexity in perceptual completion: beyond good continuation}, journal = {Vision Research}, volume = {39}, year = {1999}, month = {1999/12//}, pages = {4244 - 4257}, abstract = {Since the seminal work of the Gestalt psychologists, there has been great interest in understanding what factors determine the perceptual organization of images. While the Gestaltists demonstrated the significance of grouping cues such as similarity, proximity and good continuation, it has not been well understood whether their catalog of grouping cues is complete {\textemdash} in part due to the paucity of effective methodologies for examining the significance of various grouping cues. We describe a novel, objective method to study perceptual grouping of planar regions separated by an occluder. We demonstrate that the stronger the grouping between two such regions, the harder it will be to resolve their relative stereoscopic depth. We use this new method to call into question many existing theories of perceptual completion (Ullman, S. (1976). Biological Cybernetics, 25, 1{\textendash}6; Shashua, A., \& Ullman, S. (1988). 2nd International Conference on Computer Vision (pp. 321{\textendash}327); Parent, P., \& Zucker, S. (1989). IEEE Transactions on Pattern Analysis and Machine Intelligence, 11, 823{\textendash}839; Kellman, P. J., \& Shipley, T. F. (1991). Cognitive psychology, Liveright, New York; Heitger, R., \& von der Heydt, R. (1993). A computational model of neural contour processing, figure-ground segregation and illusory contours. In Internal Conference Computer Vision (pp. 32{\textendash}40); Mumford, D. (1994). Algebraic geometry and its applications, Springer, New York; Williams, L. R., \& Jacobs, D .W. (1997). Neural Computation, 9, 837{\textendash}858) that are based on Gestalt grouping cues by demonstrating that convexity plays a strong role in perceptual completion. In some cases convexity dominates the effects of the well known Gestalt cue of good continuation. While convexity has been known to play a role in figure/ground segmentation (Rubin, 1927; Kanizsa \& Gerbino, 1976), this is the first demonstration of its importance in perceptual completion.}, keywords = {Amodal completion, Convexity, Good continuation, Grouping, Stereoscopic depth}, isbn = {0042-6989}, doi = {10.1016/S0042-6989(99)00141-8}, url = {http://www.sciencedirect.com/science/article/pii/S0042698999001418}, author = {Liu,Zili and Jacobs, David W. and Basri,Ronen} } @conference {18158, title = {A rotation, scale and translation resilient public watermark}, booktitle = {Acoustics, Speech, and Signal Processing, 1999. Proceedings., 1999 IEEE International Conference on}, volume = {4}, year = {1999}, month = {1999/03//}, pages = {2065 vol.4 - 2065 vol.4}, abstract = {Summary form only given. Watermarking algorithms that are robust to the common geometric transformations of rotation, scale and translation (RST) have been reported for cases in which the original unwatermarked content is available at the detector so as to allow the transformations to be inverted. However, for public watermarks the problem is significantly more difficult since there is no original content to register with. Two classes of solution have been proposed. The first embeds a registration pattern into the content while the second seeks to apply detection methods that are invariant to these geometric transformations. This paper describes a public watermarking method which is invariant (or bares a simple relation) to the common geometric transforms of rotation, scale, and translation. It is based on the Fourier-Mellin transform which has previously been suggested. We extend this work, using a variation based on the Radon transform. The watermark is inserted into a projection of the image. The properties of this projection are such that RST transforms produce simple or no effects on the projection waveform. When a watermark is inserted into a projection, the signal must eventually be back projected to the original image dimensions. This is a one to many mapping that allows for considerable flexibility in the watermark insertion process. We highlight some theoretical and practical issues that affect the implementation of an RST invariant watermark. Finally, we describe preliminary experimental results}, keywords = {algorithms;Fourier, coding;, coding;image, data;transform, dimensions;projection, Fourier-Mellin, image, invariant, methods;detector;experimental, of, pattern;rotation, projection;mapping;original, public, registration;security, resilient, results;geometric, transform;detection, transform;RST, transformations;image, transforms;image, transforms;Radon, watermark;RST, watermark;scale, watermark;translation, watermark;watermarking, waveform;registration}, doi = {10.1109/ICASSP.1999.758337}, author = {Wu,M. and Miller,M.L. and Bloom,J.A. and Cox,I.J.} } @conference {17529, title = {Running EveryWare on the computational grid}, booktitle = {Proceedings of the 1999 ACM/IEEE conference on Supercomputing (CDROM)}, year = {1999}, month = {1999///}, pages = {6{\textendash}es - 6{\textendash}es}, author = {Wolski,R. and Brevik,J. and Krintz,C. and Obertelli,G. and Spring, Neil and Su,A.} } @article {14936, title = {A Randomized Parallel Sorting Algorithm with an Experimental Study}, journal = {Journal of Parallel and Distributed Computing}, volume = {52}, year = {1998}, month = {1998/07/10/}, pages = {1 - 23}, abstract = {Previous schemes for sorting on general-purpose parallel machines have had to choose between poor load balancing and irregular communication or multiple rounds of all-to-all personalized communication. In this paper, we introduce a novel variation on sample sort which uses only two rounds of regular all-to-all personalized communication in a scheme that yields very good load balancing with virtually no overhead. Moreover, unlike previous variations, our algorithm efficiently handles the presence of duplicate values without the overhead of tagging each element with a unique identifier. This algorithm was implemented in Split-C and run on a variety of platforms, including the Thinking Machines CM-5, the IBM SP-2, and the Cray Research T3D. We ran our code using widely different benchmarks to examine the dependence of our algorithm on the input distribution. Our experimental results illustrate the efficiency and scalability of our algorithm across different platforms. In fact, it seems to outperform all similar algorithms known to the authors on these platforms, and its performance is invariant over the set of input distributions unlike previous efficient algorithms. Our results also compare favorably with those reported for the simpler ranking problem posed by the NAS Integer Sorting (IS) Benchmark.}, keywords = {generalized sorting, integer sorting, sample sort, parallel performance, Parallel algorithms}, isbn = {0743-7315}, doi = {10.1006/jpdc.1998.1462}, url = {http://www.sciencedirect.com/science/article/pii/S0743731598914629}, author = {Helman,David R. and Bader,David A. and JaJa, Joseph F.} } @article {17342, title = {Reflections on authoring, editing and managing hypertex}, journal = {Technical Reports of the Computer Science Department}, year = {1998}, month = {1998/10/15/}, abstract = {This chapter offers recommendations for potential authors of hypertextdocuments based on the experience of designing a hypertext system and of creating a series of substantial hypertext databases on personal computers and larger workstations. Advice on choosing projects, identifying useful author tool features, and structuring knowledge is presented. Additional issues such as the design of the root document, article size, and conversion from existing databases are covered. While hypertext has exciting potentials, the dangers of poor design must be overcome to create attractive and effective products. (Also cross-referenced as CAR-TR-410) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/357}, author = {Shneiderman, Ben} } @article {17343, title = {Relate{\textendash}Create{\textendash}Donate: a teaching/learning philosophy for the cyber-generation}, journal = {Computers \& Education}, volume = {31}, year = {1998}, month = {1998/08//}, pages = {25 - 39}, isbn = {0360-1315}, doi = {10.1016/S0360-1315(98)00014-1}, url = {http://www.sciencedirect.com/science/article/pii/S0360131598000141}, author = {Shneiderman, Ben} } @article {15800, title = {Restoring Images Degraded by Spatially Variant Blur}, journal = {SIAM Journal on Scientific Computing}, volume = {19}, year = {1998}, month = {1998///}, pages = {1063 - 1082}, abstract = {Restoration of images that have been blurred by the effects of a Gaussian blurring function is an ill-posed but well-studied problem. Any blur that is spatially invariant can be expressed as a convolution kernel in an integral equation. Fast and effective algorithms then exist for determining the original image by preconditioned iterative methods. If the blurring function is spatially variant, however, then the problem is more difficult. In this work we develop fast algorithms for forming the convolution and for recovering the original image when the convolution functions are spatially variant but have a small domain of support. This assumption leads to a discrete problem involving a banded matrix. We devise an effective preconditioner and prove that the preconditioned matrix differs from the identity by a matrix of small rank plus a matrix of small norm. Numerical examples are given, related to the Hubble Space Telescope (HST) Wide-Field/Planetary Camera. The algorithms that we develop are applicable to other ill-posed integral equations as well.}, keywords = {convolution, discrete ill-posed problems, first-kind integral equations, image restoration, regularization, spatially variant point spread function}, doi = {10.1137/S106482759528507X}, url = {http://link.aip.org/link/?SCE/19/1063/1}, author = {Nagy,James G. and O{\textquoteright}Leary, Dianne P.} } @article {17733, title = {Rounding Errors in Solving Block Hessenberg Systems}, volume = {UMIACS-TR-94-105}, year = {1998}, month = {1998/10/15/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {A rounding error analysis is presented for a divide-and-conquer algorithm to solve linear systems with block Hessenberg matrices. Conditions are derived under which the algorithm computes a backward stable solution. The algorithm is shown to be stable for diagonally dominant matrices and for M-matrices. (Also cross-referenced as UMIACS-TR-94-105) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/661}, author = {Von Matt,Urs and Stewart, G.W.} } @article {17647, title = {Randomized Distributed Edge Coloring via an Extension of the Chernoff--Hoeffding Bounds}, journal = {SIAM Journal on Computing}, volume = {26}, year = {1997}, month = {1997///}, pages = {350 - 350}, abstract = {Certain types of routing, scheduling, and resource-allocation problems in a distributed setting can be modeled as edge-coloring problems. We present fast and simple randomized algorithms for edge coloring a graph in the synchronous distributed point-to-point model of computation. Our algorithms compute an edge coloring of a graph $G$ with $n$ nodes and maximum degree $\Delta$ with at most $1.6 \Delta + O(\log^{1+ \delta} n)$ colors with high probability (arbitrarily close to 1) for any fixed $\delta > 0$; they run in polylogarithmic time. The upper bound on the number of colors improves upon the $(2 \Delta - 1)$-coloring achievable by a simple reduction to vertex coloring.To analyze the performance of our algorithms, we introduce new techniques for proving upper bounds on the tail probabilities of certain random variables. The Chernoff--Hoeffding bounds are fundamental tools that are used very frequently in estimating tail probabilities. However, they assume stochastic independence among certain random variables, which may not always hold. Our results extend the Chernoff--Hoeffding bounds to certain types of random variables which are not stochastically independent. We believe that these results are of independent interest and merit further study. }, isbn = {00975397}, doi = {10.1137/S0097539793250767}, url = {http://link.aip.org/link/SMJCAT/v26/i2/p350/s1\&Agg=doi}, author = {Panconesi,Alessandro and Srinivasan, Aravind} } @article {14872, title = {Recognition using region correspondences}, journal = {International Journal of Computer Vision}, volume = {25}, year = {1997}, month = {1997///}, pages = {145 - 166}, abstract = {Recognition systems attempt to recover information about the identity of observed objects and their location in the environment. A fundamental problem in recognition is pose estimation. This is the problem of using a correspondence between some portions of an object model and some portions of an image to determine whether the image contains an instance of the object, and, in case it does, to determine the transformation that relates the model to the image. The current approaches to this problem are divided into methods that use ldquoglobalrdquo properties of the object (e.g., centroid and moments of inertia) and methods that use ldquolocalrdquo properties of the object (e.g., corners and line segments). Global properties are sensitive to occlusion and, specifically, to self occlusion. Local properties are difficult to locate reliably, and their matching involves intensive computation.We present a novel method for recognition that uses region information. In our approach the model and the image are divided into regions. Given a match between subsets of regions (without any explicit correspondence between different pieces of the regions) the alignment transformation is computed. The method applies to planar objects under similarity, affine, and projective transformations and to projections of 3-D objects undergoing affine and projective transformations. The new approach combines many of the advantages of the previous two approaches, while avoiding some of their pitfalls. Like the global methods, our approach makes use of region information that reflects the true shape of the object. But like local methods, our approach can handle occlusion. }, doi = {10.1023/A:1007919917506}, author = {Basri,R. and Jacobs, David W.} } @inbook {15795, title = {Regularization algorithms based on total least squares}, booktitle = {Recent advances in total least squares techniques and errors-in-variables modeling (Leuven, 1996)Recent advances in total least squares techniques and errors-in-variables modeling (Leuven, 1996)}, year = {1997}, month = {1997///}, pages = {127 - 137}, publisher = {SIAM}, organization = {SIAM}, address = {Philadelphia, PA}, author = {Hansen,Per Christian and O{\textquoteright}Leary, Dianne P.} } @article {15796, title = {Regularization by Truncated Total Least Squares}, journal = {SIAM Journal on Scientific Computing}, volume = {18}, year = {1997}, month = {1997///}, pages = {1223 - 1241}, abstract = {The total least squares (TLS) method is a successful method for noise reduction in linear least squares problems in a number of applications. The TLS method is suited to problems in which both the coefficient matrix and the right-hand side are not precisely known. This paper focuses on the use of TLS for solving problems with very ill-conditioned coefficient matrices whose singular values decay gradually (so-called discrete ill-posed problems), where some regularization is necessary to stabilize the computed solution. We filter the solution by truncating the small singular values of the TLS matrix. We express our results in terms of the singular value decomposition (SVD) of the coefficient matrix rather than the augmented matrix. This leads to insight into the filtering properties of the truncated TLS method as compared to regularized least squares solutions. In addition, we propose and test an iterative algorithm based on Lanczos bidiagonalization for computing truncated TLS solutions.}, keywords = {bidiagonalization, discrete ill-posed problems, regularization, total least squares}, doi = {10.1137/S1064827594263837}, url = {http://link.aip.org/link/?SCE/18/1223/1}, author = {Fierro,R. D. and Golub, G. H and Hansen,P. C. and O{\textquoteright}Leary, Dianne P.} } @inbook {11921, title = {Regulation of Cable Television}, booktitle = {The Froehlich/Kent Encyclopedia of Telecommunications: Volume 15-Radio Astronomy to Submarine Cable SystemsThe Froehlich/Kent Encyclopedia of Telecommunications: Volume 15-Radio Astronomy to Submarine Cable Systems}, volume = {15}, year = {1997}, month = {1997///}, pages = {84 - 84}, author = {Agrawala, Ashok K. and CILINGIROGLU,A. and Lee,S.} } @conference {13680, title = {The Retrieval of Document Images: ABrief Survey}, booktitle = {ICDAR}, year = {1997}, month = {1997///}, pages = {945 - 949}, author = {David Doermann} } @conference {13681, title = {The role of compressed document images in transmission and retrieval}, booktitle = {Multimedia Signal Processing, 1997., IEEE First Workshop on}, year = {1997}, month = {1997/06//}, pages = {331 - 336}, abstract = {Document images belong to a unique class of images where the information content is contained in the language represented by a series of symbols on the page, rather than in the visual objects themselves. For this reason, it is essential to preserve the fidelity of individual components when considering methods of compression. Likewise the component level structure should be a prime consideration when ordering information for lossy or progressive transmission. We refine our work on document image compression as it applies to transmission and retrieval. We first overview the basic compression scheme, then describe a structural hierarchy which provides desirable properties for transmission. We present the results of a rate distortion experiment and discuss the implications for network applications}, keywords = {applications;progressive, coding;multimedia, component, compression;document, compression;image, computing;query, content;lossy, databases;, distortion, experiment;structural, hierarchy;symbols;visual, image, level, objects;data, processing;image, processing;visual, retrieval;image, structure;document, transmission;information, transmission;multimedia;network, transmission;rate}, doi = {10.1109/MMSP.1997.602657}, author = {Kia,O. and David Doermann} } @article {17879, title = {Run-time and compiler support for programming in adaptive parallel environments}, journal = {Scientific Programming}, volume = {6}, year = {1997}, month = {1997///}, pages = {215 - 227}, author = {Edjlali,G. and Agrawal,G. and Sussman, Alan and Humphries,J. and Saltz, J.} } @conference {18306, title = {Recognition of head gestures using hidden markov models}, booktitle = {Pattern Recognition, 1996., Proceedings of the 13th International Conference on}, volume = {3}, year = {1996}, month = {1996///}, pages = {461 - 465}, author = {Morimoto,C. and Yacoob,Yaser and Davis, Larry S.} } @article {18317, title = {Recognizing human facial expressions from long image sequences using optical flow}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, volume = {18}, year = {1996}, month = {1996/06//}, pages = {636 - 642}, abstract = {An approach to the analysis and representation of facial dynamics for recognition of facial expressions from image sequences is presented. The algorithms utilize optical flow computation to identify the direction of rigid and nonrigid motions that are caused by human facial expressions. A mid-level symbolic representation motivated by psychological considerations is developed. Recognition of six facial expressions, as well as eye blinking, is demonstrated on a large set of image sequences}, keywords = {Computer vision, Eyebrows, face recognition, facial dynamics, Facial features, human facial expression recognition, HUMANS, Image motion analysis, image recognition, image representation, Image sequences, Motion analysis, Motion estimation, Optical computing, optical flow, symbolic representation, tracking}, isbn = {0162-8828}, doi = {10.1109/34.506414}, author = {Yacoob,Yaser and Davis, Larry S.} } @article {18990, title = {Ribosomal RNA: Small nucleolar RNAs make their mark}, journal = {Current Biology}, volume = {6}, year = {1996}, month = {1996/11//}, pages = {1413 - 1415}, abstract = {Small nucleolar RNAs direct the location of certain methylations in ribosomal RNA by direct base pairing; although evolutionarily conserved, the physiological significance of these modifications remains unclear.}, isbn = {0960-9822}, doi = {10.1016/S0960-9822(96)00745-2}, url = {http://www.sciencedirect.com/science/article/pii/S0960982296007452}, author = {Peculis,Brenda A. and Mount, Stephen M.} } @article {14889, title = {Robust and efficient detection of salient convex groups}, journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on}, volume = {18}, year = {1996}, month = {1996/01//}, pages = {23 - 37}, abstract = {This paper describes an algorithm that robustly locates salient convex collections of line segments in an image. The algorithm is guaranteed to find all convex sets of line segments in which the length of the gaps between segments is smaller than some fixed proportion of the total length of the lines. This enables the algorithm to find convex groups whose contours are partially occluded or missing due to noise. We give an expected case analysis of the algorithm performance. This demonstrates that salient convexity is unlikely to occur at random, and hence is a strong clue that grouped line segments reflect underlying structure in the scene. We also show that our algorithm run time is O(n 2log(n)+nm), when we wish to find the m most salient groups in an image with n line segments. We support this analysis with experiments on real data, and demonstrate the grouping system as part of a complete recognition system}, keywords = {complexity;computer, complexity;contours;image, computational, convex, detection;feature, detection;object, extraction;object, groups;computational, organisation;proximity;salient, recognition;, recognition;line, recognition;perceptual, segment, vision;edge}, isbn = {0162-8828}, doi = {10.1109/34.476008}, author = {Jacobs, David W.} } @conference {13851, title = {Role of word sense disambiguation in lexical acquisition: predicting semantics from syntactic cues}, booktitle = {Proceedings of the 16th conference on Computational linguistics - Volume 1}, series = {COLING {\textquoteright}96}, year = {1996}, month = {1996///}, pages = {322 - 327}, publisher = {Association for Computational Linguistics}, organization = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, abstract = {This paper addresses the issue of word-sense ambiguity in extraction from machine-readable resources for the construction of large-scale knowledge sources. We describe two experiments: one which ignored word-sense distinctions, resulting in 6.3\% accuracy for semantic classification of verbs based on (Levin, 1993); and one which exploited word-sense distinctions, resulting in 97.9\% accuracy. These experiments were dual purpose: (1) to validate the central thesis of the work of (Levin, 1993), i.e., that verb semantics and syntactic behavior are predictably related; (2) to demonstrate that a 15-fold improvement can be achieved in deriving semantic information from syntactic cues if we first divide the syntactic cues into distinct groupings that correlate with different word senses. Finally, we show that we can provide effective acquisition techniques for novel word senses using a combination of online sources.}, doi = {10.3115/992628.992685}, url = {http://dx.doi.org/10.3115/992628.992685}, author = {Dorr, Bonnie J and Jones,Doug} } @article {17732, title = {Rounding errors in solving block Hessenberg systems}, journal = {Mathematics of Computation}, volume = {65}, year = {1996}, month = {1996/01//}, pages = {115 - 135}, abstract = {A rounding error analysis is presented for a divide-and-conquer algorithm to solve linear systems with block Hessenberg matrices. Conditions are derived under which the algorithm computes a stable solution. The algorithm is shown to be stable for block diagonally dominant matrices and for M-matrices.}, keywords = {block diagonally dominant matrices, block Hessenberg matrices, Linear systems, M-matrices, rounding error analysis}, isbn = {0025-5718}, doi = {10.1090/S0025-5718-96-00667-9}, url = {http://dx.doi.org/10.1090/S0025-5718-96-00667-9}, author = {Von Matt,Urs and Stewart, G.W.} } @conference {17872, title = {Runtime coupling of data-parallel programs}, booktitle = {Proceedings of the 10th international conference on Supercomputing}, year = {1996}, month = {1996///}, pages = {229 - 236}, author = {Ranganathan,M. and Acharya, A. and Edjlali,G. and Sussman, Alan and Saltz, J.} } @article {17648, title = {Randomness-Optimal Unique Element Isolation with Applications to Perfect Matching and Related Problems}, journal = {SIAM Journal on Computing}, volume = {24}, year = {1995}, month = {1995///}, pages = {1036 - 1036}, abstract = {In this paper, we precisely characterize the randomness complexity of the unique element isolation problem, a crucial step in the $RNC$ algorithm for perfect matching by Mulmuley, Vazirani, and Vazirani [Combinatorica, 7 (1987), pp. 105{\textendash}113] and in several other applications. Given a set $S$ and an unknown family $\mathcal{F} \subseteq 2^{S}$ with $|\mathcal{F}| \leq Z$, we present a scheme for assigning polynomially bounded weights to the elements of $S$ using only $O(\log Z + \log |S|)$ random bits, such that the minimum weight set in $\mathcal{F}$ is unique with high probability. This generalizes the solution of Mulmuley, Vazirani, and Vazirani, who use $O(S \log S)$ bits, independent of $Z$. We also provide a matching lower bound for the randomness complexity of this problem. The new weight assignment scheme yields a randomness-efficient $RNC^{2}$ algorithm for perfect matching which uses $O(\log Z + \log n)$ random bits, where $Z$ is any given upper bound on the number of perfect matchings in the input graph. This generalizes the result of Grigoriev and Karpinski [Proc. IEEE Symposium on Foundations of computer Science, 1987, pp. 166{\textendash}172], who presentan $NC^{3}$ algorithm when $Z$ is polynomial and improves the running time in this case. The worst-case randomness complexity of our algorithm is $O(n \log (m/n))$ random bits improving on the previous bound of $O(m \log n)$. Our scheme also gives randomness-efficient solutions for several problems where unique element isolation is used, such as $RNC$ algorithms for variants of matching and basic problems on linear matroids. We obtain a randomness-efficient random reduction from SAT to USAT, the language of uniquely satisfiable formulas, which can be derandomized in the case of languages in Few $P$ to yield new proofs of the results Few $P \subseteq \oplus P$ and Few $P \subseteq C_{=} P$.}, isbn = {00975397}, doi = {10.1137/S0097539793250330}, url = {http://link.aip.org/link/SMJCAT/v24/i5/p1036/s1\&Agg=doi}, author = {Chari,Suresh and Rohatgi,Pankaj and Srinivasan, Aravind} } @article {13679, title = {The representation of document structure: A generic object-process approach}, volume = {CAR-TR-785}, year = {1995}, month = {1995///}, institution = {University of Maryland, College Park}, author = {Dori,D. and David Doermann and Shin,C. and Haralick,R. and Phillips,I. and Buchman,M. and Ross,D.} } @conference {14180, title = {Representations for active vision}, booktitle = {Proceedings of the 14th international joint conference on Artificial intelligence - Volume 1}, year = {1995}, month = {1995///}, pages = {20 - 26}, publisher = {Morgan Kaufmann Publishers Inc.}, organization = {Morgan Kaufmann Publishers Inc.}, address = {San Francisco, CA, USA}, isbn = {1-55860-363-8, 978-1-558-60363-9}, url = {http://dl.acm.org/citation.cfm?id=1625855.1625858}, author = {Ferm{\"u}ller, Cornelia and Aloimonos, J.} } @article {13466, title = {Representing a student{\textquoteright}s learning states and transitions}, journal = {American Association of Artificial Intelligence Spring Symposium on Representing Mental States and Mechanisms}, year = {1995}, month = {1995///}, abstract = {We describe an ongoing project to develop an adaptive training system (ATS) that dynamically models a student{\textquoteright}s learning processes and can provide specialized tutoring adapted to a student{\textquoteright}s knowledge state and learning style. The student modeling component of the ATS, ML-Modeler, uses machine learning (ML) techniques to emulate the student{\textquoteright}s novice-toexpert transition. ML-Modeler infers which learning methods the student has used to reach the current knowledge state by comparing the student{\textquoteright}s solution trace to an expert solution and generating plausible hypotheses about what misconceptions and errors the student has made. A case-based approach is used to generate hypotheses through incorrectly applying analogy, overgeneralization, and overspecialization. The student and expert models use a network-based representation that includes abstract concepts and relationships as well as strategies for problem solving. Fuzzy methods are used to represent the uncertainty in the student model. This paper describes the design of the ATS and ML-Modeler, and gives a detailed example of how the system would model and tutor the student in a typical session. The domain we use for this example is high-school level chemistry.}, author = {Gurer,D. and desJardins, Marie and Schlager,M.} } @conference {13638, title = {Robust table-form structure analysis based on box-driven reasoning}, booktitle = {Document Analysis and Recognition, 1995., Proceedings of the Third International Conference on}, volume = {1}, year = {1995}, month = {1995/08//}, pages = {218 -221 vol.1 - 218 -221 vol.1}, abstract = {Table form document structure analysis is an important problem in the document processing domain. The paper presents a method called Box Driven Reasoning (BDR) to robustly analyze the structure of table form documents which include touching characters and broken lines. Most previous methods employ a line oriented approach. Real documents are copied repeatedly and overlaid with printed data, resulting in characters which touch cells and lines which are broken. BDR deals with regions directly, in contrast with other previous methods. Experimental tests show that BDR reliably recognizes cells and strings in document images with touching characters and broken lines}, keywords = {analysis;table, analysis;touching, BDR;box, characters;character, document, domain;robust, driven, form, image, images;document, lines;document, mechanisms;, PROCESSING, processing;inference, reasoning;broken, recognition;data, structure, structures;document, table}, doi = {10.1109/ICDAR.1995.598980}, author = {Hori,O. and David Doermann} } @conference {15588, title = {Randomized and deterministic algorithms for geometric spanners of small diameter}, booktitle = {Foundations of Computer Science, 1994 Proceedings., 35th Annual Symposium on}, year = {1994}, month = {1994/11//}, pages = {703 - 712}, abstract = {Let S be a set of n points in IRd and let t gt;1 be a real number. A t-spanner for S is a directed graph having the points of S as its vertices, such that for any pair p and q of points there is a path from p to q of length at most t times the Euclidean distance between p and p. Such a path is called a t-spanner path. The spanner diameter of such a spanner is defined as the smallest integer D such that for any pair p and q of points there is a t-spanner path from p to q containing at most D edges. Randomized and deterministic algorithms are given for constructing t-spanners consisting of O(n) edges and having O(log n) diameter. Also, it is shown how to maintain the randomized t-spanner under random insertions and deletions. Previously, no results were known for spanners with low spanner diameter and for maintaining spanners under insertions and deletions}, keywords = {computational geometry, deletions, deterministic algorithms, directed graph, directed graphs, geometric spanners, insertions, randomised algorithms, randomized algorithms}, doi = {10.1109/SFCS.1994.365722}, author = {Arya,S. and Mount, Dave and Smid,M.} } @article {13631, title = {Recovery of Temporal Information from Static Images of Handwriting}, journal = {International Journal of Computer Vision}, volume = {52}, year = {1994}, month = {1994///}, pages = {143 - 164}, abstract = {The problem of off-line handwritten character recognition has eluded a satisfactory solution for several decades. Researchers working in the area of on-line recognition have had greater success, but the possibility of extracting on-line information from static images has not been fully explored. The experience of forensic document examiners assures us that in many cases, such information can be successfully recovered.We outline the design of a system for the recovery of temporal information from static handwritten images. We provide a taxonomy of local, regional and global temporal clues which are often found in hand-written samples, and describe methods for recovering these clues from the image. We show how this system can benefit from obtaining a comprehensive understanding of the handwriting signal and a detailed analysis of stroke and sub-stroke properties. We suggest that the recovery task requires that we break away from traditional thresholding and thinning techniques, and we provide a framework for such analysis. We demonstrate how isolated temporal clues can reliably be extracted from this framework and propose a control structure for integrating the partial information. We show how many seemingly ambiguous situations can be resolved by the derived clues and our knowledge of the writing process, and provide several examples to illustrate our approach. The support of this research by the Ricoh Corporation is gratefully acknowledged. }, author = {David Doermann and Rosenfeld, A.} } @proceedings {15797, title = {Regularization by Truncated Total Least Squares}, year = {1994}, month = {1994///}, pages = {250 - 254}, publisher = {SIAM Press}, address = {Philadelphia}, author = {Fierro,Richardo and Golub, Gene H. and Hansen,Per Christian and O{\textquoteright}Leary, Dianne P.}, editor = {Lewis,J.G.} } @inbook {15798, title = {Regularization of Ill-Posed Problems in Image Restoration}, booktitle = {Proceedings of the Fifth SIAM Conference on Applied Linear AlgebraProceedings of the Fifth SIAM Conference on Applied Linear Algebra}, year = {1994}, month = {1994///}, pages = {102 - 105}, publisher = {SIAM Press}, organization = {SIAM Press}, address = {Philadelphia}, author = {O{\textquoteright}Leary, Dianne P.}, editor = {Lewis,J.G.} } @article {17422, title = {The river beyond the rapids: responsive services for responsible users}, journal = {Connecting the DOE community: partnerships in information}, year = {1994}, month = {1994///}, pages = {25 - 26}, abstract = {Providing responsive services to all members of our society will require new corporate alliances that put the users and their needs ahead of the pursuit of new technology. Responsive systems are the boats on which responsible users will travel to their work, social communities, and entertainment. With responsive systems users can take responsibility for what they promise to do and derive satisfaction from accomplishing their tasks. The keys to user responsibility are comprehensible, predictable, and controllable interfaces.}, author = {Shneiderman, Ben} } @conference {16688, title = {A rule-based approach to prepositional phrase attachment disambiguation}, booktitle = {Proceedings of the 15th conference on Computational linguistics-Volume 2}, year = {1994}, month = {1994///}, pages = {1198 - 1204}, author = {Brill,E. and Resnik, Philip} } @article {14252, title = {Recognizing 3-D Motion}, journal = {INTERNATIONAL JOINT CONFERENCE ON ARTIFICIAL INTELLIGENCE}, volume = {13}, year = {1993}, month = {1993///}, pages = {1624 - 1624}, author = {Ferm{\"u}ller, Cornelia and Aloimonos, J.} } @article {14915, title = {Recognizing 3-D objects using 2-D images}, year = {1993}, month = {1993///}, institution = {MASSACHUSETTS INSTITUTE OF TECHNOLOGY ARTIFICIAL INTELLIGENCE LAB}, abstract = {To visually recognize objects, we adopt the strategy of forming groups of image features with a bottom-up process, and then using these groups to index into a data base to find all of the matching groups of model features. This approach reduces the computation needed for recognition, since we only consider groups of model features that can account for these relatively large chunks of the image. To perform indexing, we represent a group of 3-D model features in terms of the 2-D images it can produce. Specifically, we show that the simplest and most space-efficient way of doing this for models consisting of general groups of 3-D point features is to represent the set of images each model group produces with two lines (1D subspaces), one in each of two orthogonal, high-dimensional spaces. These spaces represent all possible image groups so that a single image group corresponds to one point in each space. We determine the effects of bounded sensing error on a set of image points, so that we may build a robust and efficient indexing system. We also present an optimal indexing method for more complicated features, and we present bounds on the space required for indexing in a variety of situations. We use the representations of a model{\textquoteright}s images that we develop to analyze other approaches to matching. We show that there are no invariants of general 3-D models, and demonstrate limitations in the use of non-accidental properties, and in other approaches to reconstructing a 3-D scene from a single 2-D image. Grouping, Non- accidental properties, Indexing, Invariants, Recognition, Sensing error.}, author = {Jacobs, David W.} } @inbook {16223, title = {Remote direct manipulation: A case study of a telemedicine workstation}, booktitle = {Sparks of innovation in human-computer interactionSparks of innovation in human-computer interaction}, year = {1993}, month = {1993///}, pages = {51 - 51}, publisher = {Intellect Books}, organization = {Intellect Books}, isbn = {9781567500783}, author = {Keil-Slawik,R. and Plaisant, Catherine and Shneiderman, Ben} } @article {17351, title = {Restructuring knowledge for an electronic encyclopedia}, journal = {Sparks of innovation in human-computer interaction}, volume = {615}, year = {1993}, month = {1993///}, pages = {123 - 123}, author = {Kreitzberg,C.B. and Shneiderman, Ben} } @article {15402, title = {RF scattering and radiation by using a decoupled Helmholtz equation approach}, journal = {Magnetics, IEEE Transactions on}, volume = {29}, year = {1993}, month = {1993/03//}, pages = {2040 - 2042}, abstract = {A novel finite-element formulation for the solution of 3-D RF scattering and radiation problems is presented. This formulation is based on the solution of a set of decoupled Helmholtz equations for the Cartesian components of the field vectors. This results in a robust, computer-efficient method that eliminates previous difficulties associated with {\textquoteleft}curl-curl{\textquoteright} type partial differential equations. Although it is presented in the frequency domain, the method is easily extendible to the time domain}, keywords = {3D, analysis;, approach;, computer-efficient, computing;, decoupled, domain;, electrical, electromagnetic, element, engineering, equation, finite, finite-element, formulation;, Frequency, frequency-domain, Helmholtz, method;, Physics, problems;, propagation;, radiation, radiowave, RF, scattering;, wave}, isbn = {0018-9464}, doi = {10.1109/20.250811}, author = {D{\textquoteright}Angelo,J. and Mayergoyz, Issak D} } @article {14266, title = {The role of fixation in visual motion analysis}, journal = {International Journal of Computer Vision}, volume = {11}, year = {1993}, month = {1993///}, pages = {165 - 186}, abstract = {How does the ability of humans and primates to fixate at environmental points in the presence of relative motion help their visual systems in solving various tasks? To state the question in a more formal setting, we investigate in this article the following problem: Suppose that we have an active vision system, that is, a camera resting on a platform and being controlled through motors by a computer that has access to the images sensed by the camera in real time. The platform can move freely in the environment. If this machine can fixate on targets being in relative motion with it, can it solve visual tasks in an efficient and robust manner? By restricting our attention to a set of navigational tasks, we find that such an active observer can solve the problems of 3-D motion estimation, egomotion recovery, and estimation of time-to-contact in a very efficient manner, using as input the spatiotemporal derivatives of the image-intensity function (or normal flow). Fixation over time changes the input (motion field) in a controlled way and from this change additional information is derived making the previously mentioned tasks easier to solve.}, isbn = {0920-5691}, url = {http://dx.doi.org/10.1007/BF01469227}, author = {Ferm{\"u}ller, Cornelia and Aloimonos, J.} } @article {18424, title = {Role of verification in the software specification process}, journal = {Advances in computers}, volume = {36}, year = {1993}, month = {1993///}, pages = {43 - 109}, author = {Zelkowitz, Marvin V} } @conference {12796, title = {RTSL: a language for real-time schedulability analysis}, booktitle = {Real-Time Systems Symposium, 1993., Proceedings.}, year = {1993}, month = {1993/12/01/3}, pages = {274 - 283}, publisher = {IEEE}, organization = {IEEE}, abstract = {The paper develops a generalized approach to schedulability analysis that is mathematically founded in a process algebra called RTSL. Within RTSL one may describe the functional behavior, timing behavior, timing constraints (or deadlines), and scheduling discipline for real-time systems. The formal semantics of RTSL then allows the reachable state space of finite state systems to be automatically generated and searched for timing exceptions. We provide a generalized schedulability analysis technique to perform this state-based analysis}, keywords = {Algebra, Algorithm design and analysis, Dynamic scheduling, Failure analysis, finite state machines, finite state systems, formal logic, formal semantics, functional behavior, generalized approach, generalized schedulability analysis technique, Process algebra, Processor scheduling, reachable state space, Real time systems, real-time schedulability analysis, Real-Time Specification Language, real-time systems, RTSL, scheduling, Scheduling algorithm, scheduling discipline, Specification languages, state-based analysis, State-space methods, Time factors, Timing, timing behavior, timing constraints, timing exceptions}, isbn = {0-8186-4480-X}, doi = {10.1109/REAL.1993.393489}, author = {Fredette,A. N and Cleaveland, Rance} } @article {15604, title = {A randomized algorithm for slope selection}, journal = {International Journal of Computational Geometry and Applications}, volume = {2}, year = {1992}, month = {1992///}, pages = {1 - 27}, author = {Dillencourt,M. B and Mount, Dave and Netanyahu,N. S} } @article {18074, title = {Randomized range-maxima in nearly-constant parallel time}, journal = {Computational Complexity}, volume = {2}, year = {1992}, month = {1992///}, pages = {350 - 373}, author = {Berkman,O. and Matias,Y. and Vishkin, Uzi} } @conference {17935, title = {Real-time procedural textures}, booktitle = {Proceedings of the 1992 symposium on Interactive 3D graphics}, series = {I3D {\textquoteright}92}, year = {1992}, month = {1992///}, pages = {95 - 100}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-467-8}, doi = {10.1145/147156.147171}, url = {http://doi.acm.org/10.1145/147156.147171}, author = {Rhoades,John and Turk,Greg and Bell,Andrew and State,Andrei and Neumann,Ulrich and Varshney, Amitabh} } @conference {13629, title = {Recovery of temporal information from static images of handwriting}, booktitle = {Proceedings in the IEEE CVPR}, year = {1992}, month = {1992/06//}, pages = {162 - 168}, abstract = {The problem of off-line handwritten character recognition has eluded a satisfactory solution for several decades. Researchers working in the area of on-line recognition have had greater success, but the possibility of extracting on-line information from static images has not been fully explored. The experience of forensic document examiners assures us that in many cases, such information can be successfully recovered.We outline the design of a system for the recovery of temporal information from static handwritten images. We provide a taxonomy of local, regional and global temporal clues which are often found in hand-written samples, and describe methods for recovering these clues from the image. We show how this system can benefit from obtaining a comprehensive understanding of the handwriting signal and a detailed analysis of stroke and sub-stroke properties. We suggest that the recovery task requires that we break away from traditional thresholding and thinning techniques, and we provide a framework for such analysis. We demonstrate how isolated temporal clues can reliably be extracted from this framework and propose a control structure for integrating the partial information. We show how many seemingly ambiguous situations can be resolved by the derived clues and our knowledge of the writing process, and provide several examples to illustrate our approach. The support of this research by the Ricoh Corporation is gratefully acknowledged. }, author = {David Doermann and Rosenfeld, A.} } @article {13848, title = {Reflecting time in generated text: tense, aspect and temporal connecting words}, year = {1992}, month = {1992///}, institution = {University of Maryland at College Park}, address = {College Park, MD, USA}, author = {Dorr, Bonnie J and Gaasterland,Terry} } @conference {16228, title = {Remote manipulation interfaces the case of a telepathology workstation}, booktitle = {Posters and short talks of the 1992 SIGCHI conference on Human factors in computing systems}, series = {CHI {\textquoteright}92}, year = {1992}, month = {1992///}, pages = {65 - 65}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Telemedicine is the practice of medicine over communication links. The physician being consulted and the patient are in two different locations. A first telepathology system has been developed by Corabi Telemetrics. It allows a pathologist to render a diagnosis by examining tissue samples or body fluids under a remotely located microscope.}, doi = {10.1145/1125021.1125082}, url = {http://doi.acm.org/10.1145/1125021.1125082}, author = {Plaisant, Catherine and Carr,David A} } @article {15944, title = {Reasoning about ignorance: A note on the Bush-Gorbachev problem}, journal = {Fundam. Inform.}, volume = {15}, year = {1991}, month = {1991///}, pages = {325 - 332}, author = {Kraus,S. and Perlis, Don and Horty,J. F} } @article {16514, title = {Recent applications of competitive activation mechanisms}, journal = {Neural Networks: Advances and Applications}, year = {1991}, month = {1991///}, pages = {33 - 62}, author = {Reggia, James A. and Peng,Y. and Bourret,P.} } @article {13630, title = {Recovery of Temporal Information from Static Images of Handwriting}, volume = {CAR-TR-595}, year = {1991}, month = {1991///}, institution = {Center for Automation Research, University of Maryland}, address = {College Park, Maryland}, abstract = {The problem of off-line handwritten character recognition has eluded a satisfactory solution for several decades. Researchers working in the area of on-line recognition have had greater success, but the possibility of extracting on-line information from static images has not been fully explored. The experience of forensic document examiners assures us that in many cases, such information can be successfully recovered.We outline the design of a system for the recovery of temporal information from static handwritten images. We provide a taxonomy of local, regional and global temporal clues which are often found in hand-written samples, and describe methods for recovering these clues from the image. We show how this system can benefit from obtaining a comprehensive understanding of the handwriting signal and a detailed analysis of stroke and sub-stroke properties. We suggest that the recovery task requires that we break away from traditional thresholding and thinning techniques, and we provide a framework for such analysis. We demonstrate how isolated temporal clues can reliably be extracted from this framework and propose a control structure for integrating the partial information. We show how many seemingly ambiguous situations can be resolved by the derived clues and our knowledge of the writing process, and provide several examples to illustrate our approach. The support of this research by the Ricoh Corporation is gratefully acknowledged. }, author = {David Doermann and Rosenfeld, A.} } @conference {11990, title = {Relative depth from motion using normal flow: an active and purposive solution}, booktitle = {Proceedings of the IEEE Workshop on Visual Motion, 1991}, year = {1991}, month = {1991/10/07/9}, pages = {196 - 204}, publisher = {IEEE}, organization = {IEEE}, abstract = {The authors show how an active observer can compute the relative depth of (stationary or moving) objects in the field of view using only the spatiotemporal derivatives of the time varying image intensity function. The solution they propose is purposive in the sense that it solves only the relative depth from motion problem and cannot be used for other problems related to motion; active in the sense that the activity of the observer is essential for the solution of the problem. Results indicate that exact computation of retinal motion does not appear to be a necessary first step for some problems related to visual motion. In addition, optic flow, whose computation is an ill-posed problem, is related to the motion of the scene only under very restrictive assumptions. As a result, the use of optic flow in some quantitative motion analysis studies is questionable}, keywords = {3D information, Automation, Computer vision, Educational institutions, Image motion analysis, image recognition, Image sequences, Laboratories, Layout, Motion analysis, Motion estimation, Nonlinear optics, normal flow, optic flow, Optical computing, Optical sensors, relative depth, retinal motion, spatiotemporal derivatives, time varying image intensity function, visual motion}, isbn = {0-8186-2153-2}, doi = {10.1109/WVM.1991.212807}, author = {Huang, Liuqing and Aloimonos, J.} } @article {13017, title = {Review of Fundamentals of Molecular Evolution, by Li. W.-H. and D. Graur}, journal = {Cladistics}, volume = {7}, year = {1991}, month = {1991///}, pages = {310 - 312}, author = {Cummings, Michael P.} } @article {18441, title = {The role for executable specifications in system maintenance}, journal = {Information Sciences}, volume = {57{\textendash}58}, year = {1991}, month = {1991/09//}, pages = {347 - 359}, abstract = {As software becomes increasingly complex, two attributes of the system life cycle are taking on more important roles. We need the ability to formally specify the functionality of the systems we build in order to minimize costly development problems and, with long life times, we need the ability to enhance existing systems with new features in order to prolong their usefulness. This enhancement process also needs a mechanism for formally defining any new functionality on data objects placed upon the system. This paper describes the AS* research project which is addressing this issue. AS* is a language-independent specification language embedded within an existing programming language for the purpose of providing extensions to existing systems. This paper discusses the designs of AS*, the structure of the prototype implementation and describes some early experiences using the system.}, isbn = {0020-0255}, doi = {10.1016/0020-0255(91)90086-A}, url = {http://www.sciencedirect.com/science/article/pii/002002559190086A}, author = {Zelkowitz, Marvin V and Cardenas,Sergio} } @article {15991, title = {Reasoning situated in time I: Basic concepts}, journal = {Journal of Experimental and Theoretical Artificial Intellige}, volume = {2}, year = {1990}, month = {1990///}, pages = {75 - 98}, author = {Elgot-Drapkin,J. J and Perlis, Don} } @article {15801, title = {Robust Regression Computation Using Iteratively Reweighted Least Squares}, journal = {SIAM Journal of Matrix Analysis and Applications}, volume = {11}, year = {1990}, month = {1990///}, pages = {466 - 480}, author = {O{\textquoteright}Leary, Dianne P.} } @article {15132, title = {Rational Common Ground in the Sociology of Knowledge}, journal = {Philosophy of the social sciences}, volume = {19}, year = {1989}, month = {1989///}, pages = {257 - 271}, author = {Katz, Jonathan} } @article {14114, title = {Relaxed and stabilized incomplete factorizations for non-self-adjoint linear systems}, journal = {BIT Numerical Mathematics}, volume = {29}, year = {1989}, month = {1989///}, pages = {890 - 915}, author = {Elman, Howard} } @book {18419, title = {Requirements For a Software Engineering Environment: Proceedings of the University of Maryland Workshop, May 5-8, 1986}, year = {1989}, month = {1989///}, publisher = {Intellect Books}, organization = {Intellect Books}, keywords = {Computers / General, software engineering, Software engineering/ Congresses}, isbn = {9780893914479}, author = {Zelkowitz, Marvin V} } @article {14996, title = {On routing two-terminal nets in the presence of obstacles}, journal = {Computer-Aided Design of Integrated Circuits and Systems, IEEE Transactions on}, volume = {8}, year = {1989}, month = {1989/05//}, pages = {563 - 570}, abstract = {Consideration is given to the problem of routing k two-terminal nets in the presence of obstacles in two models: the standard two-layer model and the knock-knee model. Determining routability is known to be NP-complete for arbitrary k. The authors introduce a technique that reduces the general problem into finding edge-disjoint paths in a graph whose size depends only on the size of the obstacles. Two optimization criteria are considered: the total length of the wires and the number of vias used}, keywords = {CAD;, criteria;routability;routing, edge-disjoint, finding, Layout, length, model;number, model;total, nets;standard, of, paths;knock-knee, two-layer, two-terminal, vias;obstacles;optimization, wires;circuit}, isbn = {0278-0070}, doi = {10.1109/43.24884}, author = {JaJa, Joseph F. and Wu,S.A.} } @article {18422, title = {Resource utilization during software development}, journal = {Journal of Systems and Software}, volume = {8}, year = {1988}, month = {1988/09//}, pages = {331 - 336}, abstract = {This paper discusses resource utilization over the life cycle of software development and discusses the role that the current {\textquotedblleft}waterfall{\textquotedblright} model plays in the actual software life cycle. Software production in the NASA environment was analyzed to measure these differences. The data from 13 different projects were collected by the Software Engineering Laboratory at NASA Goddard Space Flight Center and analyzed for similarities and differences. The results indicate that the waterfall model is not very realistic in practice, and that as technology introduces further perturbations to this model with concepts like executable specifications, rapid prototyping, and wide-spectrum languages, we need to modify our model of this process.}, isbn = {0164-1212}, doi = {10.1016/0164-1212(88)90016-7}, url = {http://www.sciencedirect.com/science/article/pii/0164121288900167}, author = {Zelkowitz, Marvin V} } @article {12011, title = {Robust computation of intrinsic images from multiple cues}, journal = {Advances in Computer Vision}, volume = {1}, year = {1988}, month = {1988///}, pages = {115 - 163}, author = {Aloimonos, J. and Brown, C. M.} } @article {18100, title = {Randomized parallel speedups for list ranking* 1}, journal = {Journal of Parallel and Distributed Computing}, volume = {4}, year = {1987}, month = {1987///}, pages = {319 - 333}, author = {Vishkin, Uzi} } @article {13323, title = {Representations Based on Triangular Grids}, journal = {The Vkual Computer}, volume = {3}, year = {1987}, month = {1987///}, pages = {27 - 50}, author = {De Floriani, Leila} } @book {11944, title = {A robust algorithm for determining the translation of a rigidly moving surface without correspondence, for robotics applications}, year = {1987}, month = {1987///}, publisher = {University of Maryland}, organization = {University of Maryland}, author = {Basu, A. and Aloimonos, J.} } @article {16726, title = {The R+-tree: A dynamic index for multi-dimensional data}, journal = {Proceedings of VLDB 1987}, year = {1987}, month = {1987///}, pages = {507 - 518}, author = {Sellis,T. and Roussopoulos, Nick and Faloutsos,C.} } @conference {16598, title = {Relationships between deductive and abductive inference in knowledge-based diagnostic problem solving}, booktitle = {Expert Database Systems: Proceedings of the First International Workshop. New York: Benjamin Cummings}, year = {1986}, month = {1986///}, author = {Nau, Dana S. and Reggia, James A.} } @article {18420, title = {Research in Programming Languages and Software Engineering.}, year = {1985}, month = {1985/12/24/}, institution = {Department of Computer Science, University of Maryland, College Park}, abstract = {During the past year three research papers were written and two published conference presentations were given. Titles of the published research articles are: A Stochastic Analysis of a Modified Gain Extended Kalman Filter with Applications to Estimation with Bearings only Measurements; The Modified Gain Extended Kalman Kilter and Parameter Identification in Linear Systems and Maximum Information Guidance for Homing Missiles.}, keywords = {*BEARINGS, *COMPUTER PROGRAMS, *ESTIMATES, *GUIDANCE, *KALMAN FILTERING, *LINEAR SYSTEMS, *STOCHASTIC PROCESSES, COMPUTER PROGRAMMING AND SOFTWARE, GAIN, identification, measurement, programming languages, STATISTICS AND PROBABILITY, SYSTEMS ENGINEERING., TARGET DIRECTION, RANGE AND POSITION FINDING}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA186269}, author = {Gannon,John and Basili, Victor R. and Zelkowitz, Marvin V and Yeh,Raymond} } @article {18129, title = {Randomized and deterministic simulations of PRAMs by parallel machines with restricted granularity of parallel memories}, journal = {Acta Informatica}, volume = {21}, year = {1984}, month = {1984///}, pages = {339 - 374}, author = {Mehlhorn,K. and Vishkin, Uzi} } @conference {18126, title = {Randomized speed-ups in parallel computation}, booktitle = {Proceedings of the sixteenth annual ACM symposium on Theory of computing}, year = {1984}, month = {1984///}, pages = {230 - 239}, author = {Vishkin, Uzi} } @article {12001, title = {The relationship between optical flow and surface orientation}, journal = {Proc. of the 7-th ICPR, Montreal-Canada}, year = {1984}, month = {1984///}, author = {Aloimonos, J. and Brown, C. M.} } @article {17349, title = {Response time and display rate in human performance with computers}, journal = {ACM Comput. Surv.}, volume = {16}, year = {1984}, month = {1984/09//}, pages = {265 - 285}, isbn = {0360-0300}, doi = {10.1145/2514.2517}, url = {http://doi.acm.org/10.1145/2514.2517}, author = {Shneiderman, Ben} } @article {18991, title = {RNA splicing and the involvement of small ribonucleoproteins}, journal = {Modern cell biology}, volume = {3}, year = {1984}, month = {1984///}, pages = {249 - 297}, keywords = {Article synth{\`e}se, Articulo sintesis, review, Ribonucleoprotein, Ribonucleoproteina, Ribonucl{\'e}oprot{\'e}ine, RNA, Sn-RNA, splicing}, isbn = {0745-3000}, url = {http://cat.inist.fr/?aModele=afficheN\&cpsidt=9051421}, author = {Mount, Stephen M. and Steitz,J. A.} } @conference {16771, title = {A Relational Database to Support Graphical Design and Documentation}, booktitle = {Databases for Business and Office Applications, Database Week}, year = {1983}, month = {1983///}, pages = {135 - 149}, author = {Roussopoulos, Nick and Kelley,S.} } @article {18981, title = {RNA processing: Lessons from mutant globins}, journal = {Nature}, volume = {303}, year = {1983}, month = {1983/06/02/}, pages = {380 - 381}, isbn = {0028-0836}, doi = {10.1038/303380a0}, url = {http://ukpmc.ac.uk/abstract/MED/6855891/reload=0;jsessionid=6LgsyNwDXt7LrUcZkgTq.12}, author = {Mount, Stephen M. and Steitz,Joan} } @article {18962, title = {RNA processing: Sequences that signal where to splice}, journal = {Nature}, volume = {304}, year = {1983}, month = {1983///}, pages = {309 - 310}, author = {Mount, Stephen M.} } @article {16641, title = {The Role of Spatial Knowledge in Expert Systems}, journal = {Representation and Processing of Spatial Knowledge, TR-1275. Dept. of Comp. sci {\quotedblbase}Univ. of Maryland, May}, year = {1983}, month = {1983///}, pages = {3 - 8}, author = {Reggia, James A.} } @article {14998, title = {On the relationship between the biconnectivity augmentation and traveling salesman problem}, journal = {Theoretical Computer Science}, volume = {19}, year = {1982}, month = {1982///}, pages = {189 - 201}, author = {Fredrickson,G. N. and JaJa, Joseph F.} } @article {17350, title = {Response to N. C. Rowe{\textquoteright}s review}, journal = {ACM SIGMOD Record}, volume = {13}, year = {1982}, month = {1982/09//}, pages = {98 - 98}, isbn = {0163-5808}, doi = {10.1145/984514.984521}, url = {http://doi.acm.org/10.1145/984514.984521}, author = {Shneiderman, Ben} } @article {15975, title = {A re-evaluation of story grammars*}, journal = {Cognitive Science}, volume = {5}, year = {1981}, month = {1981///}, pages = {79 - 86}, author = {Frisch,A. M and Perlis, Don} } @conference {18421, title = {Resource estimation for medium-scale software projects}, booktitle = {Twelfth Conference on the Interface of Statistics and Computer Science, by IEEE Computer Society}, year = {1979}, month = {1979///}, pages = {267 - 272}, author = {Zelkowitz, Marvin V} } @article {17341, title = {Reduced combined indexes for efficient multiple attribute retrieval}, journal = {Information Systems}, volume = {2}, year = {1977}, month = {1977///}, pages = {149 - 154}, abstract = {Combined indexes were proposed by Lum[4] as an alternative to the traditional approach of single attribute indexes. The combined index approach is appealing for queries requiring conjunctions of attribute values since it eliminates the time consuming intersections. The penalty of wasted auxiliary storage space in the combined index approach can be minimized by adopting the Reduced Combined Index technique proposed in this paper.}, isbn = {0306-4379}, doi = {10.1016/0306-4379(77)90003-5}, url = {http://www.sciencedirect.com/science/article/pii/0306437977900035}, author = {Shneiderman, Ben} } @conference {11938, title = {The relationship between the pattern recognition problem and the workload characterization problem}, booktitle = {1977 CMG Conference}, year = {1977}, month = {1977///}, author = {Agrawala, Ashok K. and Mohr,J. M} } @article {18423, title = {Reversible execution}, journal = {Commun. ACM}, volume = {16}, year = {1973}, month = {1973/09//}, pages = {566{\textendash} - 566{\textendash}}, abstract = {The ability to backtrack, or retrace, the execution of a computer program has gained wider acceptance recently as a desired feature within a programming language. This is particularly useful in two different applications: (1) In debugging systems where the trace output is saved and can be interrogated under programmer control [1, 3]; (2) In artificial intelligence applications where one is trying to prove a certain result. It is frequently necessary to backup the proof and try some alternative path [2].}, keywords = {backtracking, Debugging, PL/I, reversible execution}, isbn = {0001-0782}, doi = {10.1145/362342.362360}, url = {http://doi.acm.org/10.1145/362342.362360}, author = {Zelkowitz, Marvin V} }