@conference {20251, title = {Interpolation-Based Gray-Level Co-Occurrence Matrix Computation for Texture Directionality Estimation}, booktitle = {IEEE 22nd Signal Processing Algorithms, Architectures, Arrangements, and Applications Conference}, year = {2018}, month = {12/06/2018}, publisher = {IEEE}, organization = {IEEE}, address = {Poznan, Poland}, abstract = {A novel interpolation-based model for the computation of the Gray Level Co-occurrence Matrix (GLCM) is presented. The model enables GLCM computation for any real valued angles and offsets, as opposed to the traditional, lattice-based model. A texture directionality estimation algorithm is defined using the GLCM-derived correlation feature. The robustness of the algorithm with respect to image blur and additive Gaussian noise is evaluated. It is concluded that directionality estimation is robust to image blur and low noise levels. For high noise levels, the mean error increases but remains bounded. The performance of the directionality estimation algorithm is illustrated on fluorescence microscopy images of fibroblast cells. The algorithm was implemented in C++ and the source code is available in an openly accessible repository.}, isbn = { 978-8-3620-6533-2}, doi = {10.23919/SPA.2018.8563413 }, url = {https://ieeexplore.ieee.org/document/8563413}, author = {Marcin Kociolek and Mary Brady and Peter Bajcsy and Cardone, Antonio} } @conference {20155, title = {Interactive exploration of microstructural features in gigapixel microscopy images}, booktitle = {IEEE International Conference on Image Processing}, year = {2017}, month = {09/2017}, publisher = {IEEE}, organization = {IEEE}, address = {Beijing, China}, abstract = {Modern imaging technologies enable the study of microstructural features, which require capturing the finest details in high-resolution gigapixel images. Nevertheless, the resolution disparity between gigapixel images and megapixel displays presents a challenge to effective visual analysis because subtle texture differences are hardly perceivable at coarser resolutions. In this paper we present a hierarchical segmentation technique based on joint distribution of intensity and noise-resistant local binary patterns to differentiate subtle microstructural textures across various scales. The coarse-to-fine segmentation procedure subdivides each parent segment into texturally-distinct child segments at progressively higher resolutions. The hierarchical structure of segments allows creating intermediate segmentation results interactively. Based on the intermediate results, we highlight regions with texture differences using distinct colors, which provide salient visual hints to users despite of the current viewing resolution. Our new technique has been validated on large microscopy images and shows promising results.}, keywords = {gigapixel images, Image segmentation}, author = {Hsueh-Chien Cheng and Cardone, Antonio and Varshney, Amitabh} } @article {20317, title = {Vibrio cholerae O1 with Reduced Susceptibility to Ciprofloxacin and Azithromycin Isolated from a Rural Coastal Area of Bangladesh}, journal = {Frontiers in Microbiology}, year = {2017}, month = {Sep-02-2018}, abstract = {Cholera outbreaks occur each year in the remote coastal areas of Bangladesh and epidemiological surveillance and routine monitoring of cholera in these areas is challenging. In this study, a total of 97 Vibrio cholerae O1 isolates from Mathbaria, Bangladesh, collected during 2010 and 2014 were analyzed for phenotypic and genotypic traits, including antimicrobial susceptibility. Of the 97 isolates, 95 possessed CTX-phage mediated genes, ctxA, ace, and zot, and two lacked the cholera toxin gene, ctxA. Also both CTX+ and CTX- V. cholerae O1 isolated in this study carried rtxC, tcpAET, and hlyA. The classical cholera toxin gene, ctxB1, was detected in 87 isolates, while eight had ctxB7. Of 95 CTX+ V. cholerae O1, 90 contained rstRET and 5 had rstRCL. All isolates, except two, contained SXT related integrase intSXT. Resistance to penicillin, streptomycin, nalidixic acid, sulfamethoxazole-trimethoprim, erythromycin, and tetracycline varied between the years of study period. Most importantly, 93\% of the V. cholerae O1 were multidrug resistant. Six different resistance profiles were observed, with resistance to streptomycin, nalidixic acid, tetracycline, and sulfamethoxazole-trimethoprim predominant every year. Ciprofloxacin and azithromycin MIC were 0.003{\textendash}0.75 and 0.19{\textendash}2.00 μg/ml, respectively, indicating reduced susceptibility to these antibiotics. Sixteen of the V. cholerae O1 isolates showed higher MIC for azithromycin (>=0.5 μg/ml) and were further examined for 10 macrolide resistance genes, erm(A), erm(B), erm(C), ere(A), ere(B), mph(A), mph(B), mph(D), mef(A), and msr(A) with none testing positive for the macrolide resistance genes.}, doi = {10.3389/fmicb.2017.00252}, url = {http://journal.frontiersin.org/article/10.3389/fmicb.2017.00252}, author = {Rashed, Shah M. and Hasan, Nur A. and Alam, Munirul and Sadique, Abdus and Sultana, Marzia and Hoq, Md. Mozammel and Sack, R. Bradley and Rita R Colwell and Huq, Anwar} } @article {20331, title = {In Plain View: A Transparent Systems Approach for Enhancing Health Policy Decisions}, year = {2016}, month = {07/2016}, url = {https://www.nae.edu/19582/Bridge/155266/155393.aspx}, author = {Guru Madhavan and Charles E. Phelps and Rita R Colwell and Rino Rappuoli and Harvey V. Fineberg} } @article {20330, title = {IncA/C Conjugative Plasmids Mobilize a New Family of Multidrug Resistance Islands in Clinical Vibrio cholerae Non-O1/Non-O139 Isolates from Haiti}, journal = {mBio}, year = {2016}, month = {Jul-09-2016}, abstract = {Mobile genetic elements play a pivotal role in the adaptation of bacterial populations, allowing them to rapidly cope with hostile conditions, including the presence of antimicrobial compounds. IncA/C conjugative plasmids (ACPs) are efficient vehicles for dissemination of multidrug resistance genes in a broad range of pathogenic species of Enterobacteriaceae. ACPs have sporadically been reported in Vibrio cholerae, the infectious agent of the diarrheal disease cholera. The regulatory network that controls ACP mobility ultimately depends on the transcriptional activation of multiple ACP-borne operons by the master activator AcaCD. Beyond ACP conjugation, AcaCD has also recently been shown to activate the expression of genes located in the Salmonella genomic island 1 (SGI1). Here, we describe MGIVchHai6, a novel and unrelated mobilizable genomic island (MGI) integrated into the 3' end of trmE in chromosome I of V. cholerae HC-36A1, a non-O1/non-O139 multidrug-resistant clinical isolate recovered from Haiti in 2010. MGIVchHai6 contains a mercury resistance transposon and an integron In104-like multidrug resistance element similar to the one of SGI1. We show that MGIVchHai6 excises from the chromosome in an AcaCD-dependent manner and is mobilized by ACPs. Acquisition of MGIVchHai6 confers resistance to β-lactams, sulfamethoxazole, tetracycline, chloramphenicol, trimethoprim, and streptomycin/spectinomycin. In silico analyses revealed that MGIVchHai6-like elements are carried by several environmental and clinical V. cholerae strains recovered from the Indian subcontinent, as well as from North and South America, including all non-O1/non-O139 clinical isolates from Haiti.}, doi = {10.1128/mBio.00509-16}, url = {http://mbio.asm.org/lookup/doi/10.1128/mBio.00509-16}, author = {Carraro, Nicolas and Rivard, Nicolas and Ceccarelli, Daniela and Rita R Colwell and Burrus, Vincent} } @article {20359, title = {Vibrio metoecus sp. nov., a close relative of Vibrio cholerae isolated from coastal brackish ponds and clinical specimens}, journal = {INTERNATIONAL JOURNAL OF SYSTEMATIC AND EVOLUTIONARY MICROBIOLOGY}, year = {2014}, month = {Jan-09-2014}, pages = {3208 - 3214}, issn = {1466-5026}, doi = {10.1099/ijs.0.060145-0}, url = {http://ijs.microbiologyresearch.org/content/journal/ijsem/10.1099/ijs.0.060145-0}, author = {Kirchberger, P. C. and Turnsek, M. and Hunt, D. E. and Haley, B. J. and Rita R Colwell and Polz, M. F. and Tarr, C. L. and Boucher, Y.} } @article {20358, title = {Vibrio ecology, pathogenesis, and evolution}, journal = {Frontiers in Microbiology}, year = {2014}, month = {Apr-05-2016}, doi = {10.3389/fmicb.2014.00256}, url = {http://journal.frontiersin.org/article/10.3389/fmicb.2014.00256}, author = {Ceccarelli, Daniela and Rita R Colwell} } @conference {19454, title = {"I Want to Imagine How That Place Looks": Designing Technologies to Support Connectivity Between Africans Living Abroad and Home}, booktitle = {SIGCHI {\textquoteright}13}, series = {CHI {\textquoteright}13}, year = {2013}, month = {2013///}, pages = {2755 - 2764}, publisher = {ACM}, organization = {ACM}, abstract = {Uneven access to Information and Communication Technologies (ICTs) in parts of the African continent make it challenging for some Africans who migrate to the U.S. to communicate with family members in their countries of origin. However, Internet access is becoming more widespread throughout the continent and this development presents an opportunity to explore how future interactive systems can support exchanges between families with members living in developed and less developed countries. To investigate these design possibilities we interviewed 27 African-born students, currently living in Virginia, U.S., and asked them how they used ICTs to connect with family members in their home countries. Our findings informed the development of a low-fidelity prototype that eight students lived with for four to five months. Findings from this deployment study motivate a discussion regarding features to include in interfaces designed to support transnational family communication. Features include personally meaningful imagery, country specific content, and the ability to monitor the weather and changing currency rates in migrants{\textquoteright} countries of origin.}, keywords = {diaspora communities, family communication, hci4d/ictd, Interaction design, research through design, transnational}, isbn = {978-1-4503-1899-0}, url = {http://doi.acm.org/10.1145/2470654.2481381}, author = {Wyche, Susan P. and Marshini Chetty} } @article {20364, title = {Identification of bacteria in enrichment cultures of sulfate reducers in the Cariaco Basin water column employing Denaturing Gradient Gel Electrophoresis of 16S ribosomal RNA gene fragments}, journal = {Aquatic Biosystems}, volume = {9}, year = {2013}, month = {Jan-01-2013}, pages = {17}, abstract = {Background The Cariaco Basin is characterized by pronounced and predictable vertical layering of microbial communities dominated by reduced sulfur species at and below the redox transition zone. Marine water samples were collected in May, 2005 and 2006, at the sampling stations A (10{\textdegree}30' N, 64{\textdegree}40' W), B (10{\textdegree}40' N, 64{\textdegree}45' W) and D (10{\textdegree}43{\textquoteright}N, 64{\textdegree}32{\textquoteright}W) from different depths, including surface, redox interface, and anoxic zones. In order to enrich for sulfate reducing bacteria (SRB), water samples were inoculated into anaerobic media amended with lactate or acetate as carbon source. To analyze the composition of enrichment cultures, we performed DNA extraction, PCR-DGGE, and sequencing of selected bands. Results DGGE results indicate that many bacterial genera were present that are associated with the sulfur cycle, including Desulfovibrio spp., as well as heterotrophs belonging to Vibrio, Enterobacter, Shewanella, Fusobacterium, Marinifilum, Mariniliabilia, and Spirochaeta. These bacterial populations are related to sulfur coupling and carbon cycles in an environment of variable redox conditions and oxygen availability. Conclusions In our studies, we found an association of SRB-like Desulfovibrio with Vibrio species and other genera that have a previously defined relevant role in sulfur transformation and coupling of carbon and sulfur cycles in an environment where there are variable redox conditions and oxygen availability. This study provides new information about microbial species that were culturable on media for SRB at anaerobic conditions at several locations and water depths in the Cariaco Basin.}, issn = {2046-9063}, doi = {10.1186/2046-9063-9-17}, url = {http://aquaticbiosystems.biomedcentral.com/articles/10.1186/2046-9063-9-17}, author = {Bozo-Hurtado, Lorelei and Garc{\'\i}a-Amado, M and Chistoserdov, Andrei and Varela, Ramon and Narvaez, J and Rita R Colwell and Su{\'a}rez, Paula} } @article {19700, title = {Improving public transit accessibility for blind riders by crowdsourcing bus stop landmark locations with Google street view}, journal = {The 15th International ACM SIGACCESS Conference}, year = {2013}, month = {2013/00/21}, pages = {16 - 8}, publisher = {SIGACCESS, ACM Special Interest Group on Accessible ComputingACM}, address = {New York, New York, USA}, abstract = {Abstract Low-vision and blind bus riders often rely on known physical landmarks to help locate and verify bus stop locations (eg., by searching for a shelter, bench, newspaper bin). However, there are currently few, if any, methods to determine this information a priori via ...}, isbn = {9781450324052}, doi = {10.1145/2513383.2513448}, url = {http://dl.acm.org/citation.cfm?doid=2513383.2513448}, author = {Hara, Kotaro and Azenkot, Shiri and Campbell, Megan and Bennett, Cynthia L and Le, Vicki and Pannella, Sean and Moore, Robert and Minckler, Kelly and Ng, Rochelle H and Jon Froehlich} } @article {19703, title = {An Initial Study of Automatic Curb Ramp Detection with Crowdsourced Verification Using Google Street View Images}, journal = {First AAAI Conference on Human Computation and Crowdsourcing}, year = {2013}, month = {2013/00/11}, abstract = {In our previous research, we examined whether minimally trained crowd workers could find, categorize, and assess sidewalk accessibility problems using Google Street View (GSV) images. This poster paper presents a first step towards combining automated methods ( e.g., machine vision-based curb ramp detectors) in concert with human computation to improve the overall scalability of our approach.}, url = {http://www.aaai.org/ocs/index.php/HCOMP/HCOMP13/paper/view/7507}, author = {Hara, Kotaro and Sun, Jin and Chazan, Jonah and Jacobs, David and Jon Froehlich} } @article {19129, title = {Identification of Coli Surface Antigen 23, a Novel Adhesin of Enterotoxigenic Escherichia coli}, journal = {Infection and immunity}, volume = {80}, year = {2012}, month = {2012}, pages = {2791 - 2801}, abstract = {Enterotoxigenic Escherichia coli (ETEC) is an important cause of diarrhea, mainly in developing countries. Although there are 25 different ETEC adhesins described in strains affecting humans, between 15\% and 50\% of the clinical isolates from different geographical regions are negative for these adhesins, suggesting that additional unidentified adhesion determinants might be present. Here, we report the discovery of Coli Surface Antigen 23 (CS23), a novel adhesin expressed by an ETEC serogroup O4 strain (ETEC 1766a), which was negative for the previously known ETEC adhesins, albeit it has the ability to adhere to Caco-2 cells. CS23 is encoded by an 8.8-kb locus which contains 9 open reading frames (ORFs), 7 of them sharing significant identity with genes required for assembly of K88-related fimbriae. This gene locus, named aal (adhesion-associated locus), is required for the adhesion ability of ETEC 1766a and was able to confer this adhesive phenotype to a nonadherent E. coli HB101 strain. The CS23 major structural subunit, AalE, shares limited identity with known pilin proteins, and it is more closely related to the CS13 pilin protein CshE, carried by human ETEC strains. Our data indicate that CS23 is a new member of the diverse adhesin repertoire used by ETEC strains.}, author = {Del Canto, F. and Botkin, D.J. and Valenzuela, P. and Popov, V. and Ruiz-Perez, F. and Nataro, J.P. and Levine, M.M. and Stine, O.C. and Pop, Mihai and Torres, A.G. and others} } @article {19216, title = {Individuals among commuters: Building personalised transport information services from fare collection systems}, journal = {Pervasive and Mobile Computing}, year = {2012}, month = {2012}, author = {Lathia,N. and Smith,C. and Jon Froehlich and Capra,L.} } @article {17249, title = {Interactive Dynamics for Visual Analysis}, journal = {Queue - Micoprocessors}, volume = {10}, year = {2012}, month = {2012/02//}, pages = {30:30{\textendash}30:55 - 30:30{\textendash}30:55}, abstract = {A taxonomy of tools that support the fluent and flexible use of visualizations}, isbn = {1542-7730}, doi = {10.1145/2133416.2146416}, url = {http://doi.acm.org/10.1145/2133416.2146416}, author = {Heer,Jeffrey and Shneiderman, Ben} } @article {19721, title = {InterPro in 2011: new developments in the family and domain prediction database.}, journal = {Nucleic Acids Res}, volume = {40}, year = {2012}, month = {2012 Jan}, pages = {D306-12}, abstract = {

InterPro (http://www.ebi.ac.uk/interpro/) is a database that integrates diverse information about protein families, domains and functional sites, and makes it freely available to the public via Web-based interfaces and services. Central to the database are diagnostic models, known as signatures, against which protein sequences can be searched to determine their potential function. InterPro has utility in the large-scale analysis of whole genomes and meta-genomes, as well as in characterizing individual protein sequences. Herein we give an overview of new developments in the database and its associated software since 2009, including updates to database content, curation processes and Web and programmatic interfaces.

}, keywords = {Databases, Protein, Protein Structure, Tertiary, Proteins, Sequence Analysis, Protein, software, Terminology as Topic, User-Computer Interface}, issn = {1362-4962}, doi = {10.1093/nar/gkr948}, author = {Hunter, Sarah and Jones, Philip and Mitchell, Alex and Apweiler, Rolf and Attwood, Teresa K and Bateman, Alex and Bernard, Thomas and Binns, David and Bork, Peer and Burge, Sarah and de Castro, Edouard and Coggill, Penny and Corbett, Matthew and Das, Ujjwal and Daugherty, Louise and Duquenne, Lauranne and Finn, Robert D and Fraser, Matthew and Gough, Julian and Haft, Daniel and Hulo, Nicolas and Kahn, Daniel and Kelly, Elizabeth and Letunic, Ivica and Lonsdale, David and Lopez, Rodrigo and Madera, Martin and Maslen, John and McAnulla, Craig and McDowall, Jennifer and McMenamin, Conor and Mi, Huaiyu and Mutowo-Muellenet, Prudence and Mulder, Nicola and Natale, Darren and Orengo, Christine and Pesseat, Sebastien and Punta, Marco and Quinn, Antony F and Rivoire, Catherine and Sangrador-Vegas, Amaia and Jeremy D Selengut and Sigrist, Christian J A and Scheremetjew, Maxim and Tate, John and Thimmajanarthanan, Manjulapramila and Thomas, Paul D and Wu, Cathy H and Yeats, Corin and Yong, Siew-Yit} } @article {19589, title = {Introspective Pushdown Analysis of Higher-Order Programs}, journal = {arXiv:1207.1813 [cs]}, year = {2012}, note = {Comment: Proceedings of the 17th ACM SIGPLAN International Conference on Functional Programming, 2012, ACM}, month = {2012/07/07/}, abstract = {In the static analysis of functional programs, pushdown flow analysis and abstract garbage collection skirt just inside the boundaries of soundness and decidability. Alone, each method reduces analysis times and boosts precision by orders of magnitude. This work illuminates and conquers the theoretical challenges that stand in the way of combining the power of these techniques. The challenge in marrying these techniques is not subtle: computing the reachable control states of a pushdown system relies on limiting access during transition to the top of the stack; abstract garbage collection, on the other hand, needs full access to the entire stack to compute a root set, just as concrete collection does. \emph{Introspective} pushdown systems resolve this conflict. Introspective pushdown systems provide enough access to the stack to allow abstract garbage collection, but they remain restricted enough to compute control-state reachability, thereby enabling the sound and precise product of pushdown analysis and abstract garbage collection. Experiments reveal synergistic interplay between the techniques, and the fusion demonstrates "better-than-both-worlds" precision.}, keywords = {Computer Science - Programming Languages, D.3.4, F.3.2}, url = {http://arxiv.org/abs/1207.1813}, author = {Earl, Christopher and Sergey, Ilya and Might, Matthew and David Van Horn} } @article {12854, title = {Vibrio Cholerae Classical Biotype Strains Reveal Distinct Signatures in Mexico}, journal = {Journal of Clinical Microbiology}, year = {2012}, month = {04/2012}, abstract = {Vibrio cholerae O1 Classical (CL) biotype caused the 5th and 6th, and probably the earlier cholera pandemics, before the El Tor (ET) biotype initiated the 7th pandemic in Asia in the 1970{\textquoteright}s by completely displacing the CL biotype. Although the CL biotype was thought to be extinct in Asia, and it had never been reported from Latin America, V. cholerae CL and ET biotypes, including hybrid ET were found associated with endemic cholera in Mexico between 1991 and 1997. In this study, CL biotype strains isolated from endemic cholera in Mexico, between 1983 and 1997 were characterized in terms of major phenotypic and genetic traits, and compared with CL biotype strains isolated in Bangladesh between 1962 and 1989. According to sero- and bio-typing data, all V. cholerae strains tested had the major phenotypic and genotypic characteristics specific for the CL biotype. Antibiograms revealed the majority of the Bangladeshi strains to be resistant to trimethoprim/sulfamethoxazole, furazolidone, ampicillin, and gentamycin, while the Mexican strains were sensitive to all of these drugs, as well as to ciprofloxacin, erythromycin, and tetracycline. Pulsed-field gel electrophoresis (PFGE) of NotI-digested genomic DNA revealed characteristic banding patterns for all the CL biotype strains, although the Mexican strains differed with the Bangladeshi strains in 1-2 DNA bands. The difference may be subtle, but consistent, as confirmed by the sub-clustering patterns in the PFGE-based dendrogram, and can serve as regional signature, suggesting pre-1991 existence and evolution of the CL biotype strains in the Americas, independent from that of Asia.}, isbn = {0095-1137, 1098-660X}, doi = {10.1128/JCM.00189-12}, url = {http://jcm.asm.org/content/early/2012/04/12/JCM.00189-12}, author = {Alam,Munirul and Islam,M. Tarequl and Rashed,Shah Manzur and Johura,Fatema-Tuz and Bhuiyan,Nurul A. and Delgado,Gabriela and Morales,Rosario and Mendez,Jose Luis and Navarro,Armando and Watanabe,Haruo and Hasan,Nur-A. and Rita R Colwell and Cravioto,Alejandro} } @article {20382, title = {Vibrio cholerae in a historically cholera-free country}, journal = {Environmental Microbiology Reports}, year = {2012}, month = {Jan-08-2012}, pages = {381 - 389}, abstract = {We report the autochthonous existence of Vibrio cholerae in coastal waters of Iceland, a geothermally active country where cholera is absent and has never been reported. Seawater, mussel and macroalgae samples were collected close to, and distant from, sites where geothermal activity causes a significant increase in water temperature during low tides. Vibrio cholerae was detected only at geothermal-influenced sites during low-tides. None of the V. cholerae isolates encoded cholera toxin (ctxAB) and all were non-O1/non-O139 serogroups. However, all isolates encoded other virulence factors that are associated with cholera as well as extra-intestinal V. cholerae infections. The virulence factors were functional at temperatures of coastal waters of Iceland, suggesting an ecological role. It is noteworthy that V. cholerae was isolated from samples collected at sites distant from anthropogenic influence, supporting the conclusion that V. cholerae is autochthonous to the aquatic environment of Iceland.}, doi = {10.1111/j.1758-2229.2012.00332.x}, url = {http://doi.wiley.com/10.1111/j.1758-2229.2012.00332.x}, author = {Haley, Bradd J. and Chen, Arlene and Grim, Christopher J. and Clark, Philip and Diaz, Celia M and Taviani, Elisa and Hasan, Nur A. and Sancomb, Elizabeth and Elnemr, Wessam M and Islam, Muhammad A. and Huq, Anwar and Rita R Colwell and Benediktsd{\'o}ttir, Eva} } @article {13296, title = {IA*: An adjacency-based representation for non-manifold simplicial shapes in arbitrary dimensions}, journal = {Computers \& Graphics}, volume = {35}, year = {2011}, month = {2011/06//}, pages = {747 - 753}, abstract = {We propose a compact, dimension-independent data structure for manifold, non-manifold and non-regular simplicial complexes, that we call the Generalized Indexed Data Structure with Adjacencies (IA⁎ data structure). It encodes only top simplices, i.e. the ones that are not on the boundary of any other simplex, plus a suitable subset of the adjacency relations. We describe the IA⁎ data structure in arbitrary dimensions, and compare the storage requirements of its 2D and 3D instances with both dimension-specific and dimension-independent representations. We show that the IA⁎ data structure is more cost effective than other dimension-independent representations and is even slightly more compact than the existing dimension-specific ones. We present efficient algorithms for navigating a simplicial complex described as an IA⁎ data structure. This shows that the IA⁎ data structure allows retrieving all topological relations of a given simplex by considering only its local neighborhood and thus it is a more efficient alternative to incidence-based representations when information does not need to be encoded for boundary simplices.}, keywords = {Non-manifold data structures, simplicial complexes, Topological data structures}, isbn = {0097-8493}, doi = {10.1016/j.cag.2011.03.009}, url = {http://www.sciencedirect.com/science/article/pii/S0097849311000483}, author = {Canino,David and De Floriani, Leila and Weiss,Kenneth} } @article {14865, title = {Illumination modeling for face recognition}, journal = {Handbook of face recognition}, year = {2011}, month = {2011///}, pages = {169 - 195}, abstract = {In this chapter, we show that effective systems can account for the effects of lighting using fewer than 10 degrees of freedom. This can have considerable impact on the speed and accuracy of recognition systems. We will describe theoretical results that, with some simplifying assumptions, prove the validity of low-dimensional, linear approximations to the set of images produced by a face.}, doi = {10.1007/978-0-85729-932-1_7}, author = {Basri,R. and Jacobs, David W.} } @article {14868, title = {Illumination Recovery From Image With Cast Shadows Via Sparse Representation}, journal = {Image Processing, IEEE Transactions on}, volume = {20}, year = {2011}, month = {2011/08//}, pages = {2366 - 2377}, abstract = {In this paper, we propose using sparse representation for recovering the illumination of a scene from a single image with cast shadows, given the geometry of the scene. The images with cast shadows can be quite complex and, therefore, cannot be well approximated by low-dimensional linear subspaces. However, it can be shown that the set of images produced by a Lambertian scene with cast shadows can be efficiently represented by a sparse set of images generated by directional light sources. We first model an image with cast shadows composed of a diffusive part (without cast shadows) and a residual part that captures cast shadows. Then, we express the problem in an $\#$x2113;1-regularized least-squares formulation, with nonnegativity constraints (as light has to be non-negative at any point in space). This sparse representation enjoys an effective and fast solution thanks to recent advances in compressive sensing. In experiments on synthetic and real data, our approach performs favorably in comparison with several previously proposed methods.}, keywords = {$\#$x2113;1-regularized, approximations;, coding;image, compression;image, constraints;sparse, formulation;Lambertian, illumination, image, least-square, light, linear, reconstruction;image, recovery;low-dimensional, representation;data, representation;least, scene;cast, sensing;directional, shadows;compressive, sources;image, squares, subspaces;nonnegativity}, isbn = {1057-7149}, doi = {10.1109/TIP.2011.2118222}, author = {Mei,Xue and Ling,Haibin and Jacobs, David W.} } @conference {12446, title = {Illumination robust dictionary-based face recognition}, booktitle = {2011 18th IEEE International Conference on Image Processing (ICIP)}, year = {2011}, month = {2011/09/11/14}, pages = {777 - 780}, publisher = {IEEE}, organization = {IEEE}, abstract = {In this paper, we present a face recognition method based on simultaneous sparse approximations under varying illumination. Our method consists of two main stages. In the first stage, a dictionary is learned for each face class based on given training examples which minimizes the representation error with a sparseness constraint. In the second stage, a test image is projected onto the span of the atoms in each learned dictionary. The resulting residual vectors are then used for classification. Furthermore, to handle changes in lighting conditions, we use a relighting approach based on a non-stationary stochastic filter to generate multiple images of the same person with different lighting. As a result, our algorithm has the ability to recognize human faces with good accuracy even when only a single or a very few images are provided for training. The effectiveness of the proposed method is demonstrated on publicly available databases and it is shown that this method is efficient and can perform significantly better than many competitive face recognition algorithms.}, keywords = {albedo, approximation theory, classification, competitive face recognition algorithms, Databases, Dictionaries, Face, face recognition, face recognition method, filtering theory, human face recognition, illumination robust dictionary-based face recognition, illumination variation, image representation, learned dictionary, learning (artificial intelligence), lighting, lighting conditions, multiple images, nonstationary stochastic filter, publicly available databases, relighting, relighting approach, representation error, residual vectors, Robustness, simultaneous sparse approximations, simultaneous sparse signal representation, sparseness constraint, Training, varying illumination, vectors}, isbn = {978-1-4577-1304-0}, doi = {10.1109/ICIP.2011.6116670}, author = {Patel, Vishal M. and Tao Wu and Biswas,S. and Phillips,P.J. and Chellapa, Rama} } @inbook {12456, title = {Image and Video-Based Biometrics}, booktitle = {Visual Analysis of HumansVisual Analysis of Humans}, year = {2011}, month = {2011///}, pages = {437 - 454}, publisher = {Springer London}, organization = {Springer London}, abstract = {Biometrics deals with the problem of identifying individuals based on physiological or behavioral characteristics. Since many physical characteristics, such as face, iris, etc., and behavioral characteristics, such as voice, expression, etc., are unique to an individual, biometric analysis offers a reliable and natural solution to the problem of identity verification. In this chapter, we discuss image and video-based biometrics involving face, iris and gait. In particular, we discuss several recent approaches to physiological biometrics based on Sparse Representations and Compressed Sensing. Some of the most compelling challenges and issues that confront research in biometrics are also addressed.}, isbn = {978-0-85729-997-0}, url = {http://dx.doi.org/10.1007/978-0-85729-997-0_22}, author = {Patel, Vishal M. and Pillai,Jaishanker K. and Chellapa, Rama}, editor = {Moeslund,Thomas B. and Hilton,Adrian and Kr{\"u}ger,Volker and Sigal,Leonid} } @conference {13073, title = {Image ranking and retrieval based on multi-attribute queries}, booktitle = {Computer Vision and Pattern Recognition (CVPR), 2011 IEEE Conference on}, year = {2011}, month = {2011/06//}, pages = {801 - 808}, abstract = {We propose a novel approach for ranking and retrieval of images based on multi-attribute queries. Existing image retrieval methods train separate classifiers for each word and heuristically combine their outputs for retrieving multiword queries. Moreover, these approaches also ignore the interdependencies among the query terms. In contrast, we propose a principled approach for multi-attribute retrieval which explicitly models the correlations that are present between the attributes. Given a multi-attribute query, we also utilize other attributes in the vocabulary which are not present in the query, for ranking/retrieval. Furthermore, we integrate ranking and retrieval within the same formulation, by posing them as structured prediction problems. Extensive experimental evaluation on the Labeled Faces in the Wild(LFW), FaceTracer and PASCAL VOC datasets show that our approach significantly outperforms several state-of-the-art ranking and retrieval methods.}, keywords = {datasets;image, datasets;PASCAL, faces, FaceTracer, in, methods;labeled, queries;image, ranking;image, retrieval, retrieval;, the, VOC, wild;multiattribute}, doi = {10.1109/CVPR.2011.5995329}, author = {Siddiquie,B. and Feris,R.S. and Davis, Larry S.} } @book {19360, title = {Image Registration for Remote Sensing}, year = {2011}, month = {2011}, pages = {515}, publisher = {Cambridge University Press}, organization = {Cambridge University Press}, abstract = {Image registration is a digital image processing discipline that studies how to bring two or more digital images into precise alignment for analysis and comparison. Accurate registration algorithms are essential for creating mosaics of satellite images and tracking changes on the planet{\textquoteright}s surface over time. Bringing together invited contributions from 36 distinguished researchers, the book presents a detailed overview of current research and practice in the application of image registration to remote sensing imagery. Chapters cover the problem definition, theoretical issues in accuracy and efficiency, fundamental algorithms, and real-world case studies of image registration software applied to imagery from operational satellite systems. This book provides a comprehensive and practical overview for Earth and space scientists, presents image processing researchers with a summary of current research, and can be used for specialized graduate courses.}, keywords = {Science / Astronomy, Science / Earth Sciences / General, Technology \& Engineering / Environmental / General, Technology \& Engineering / Remote Sensing \& Geographic Information Systems}, isbn = {9781139494373}, author = {LeMoigne,Jacqueline and Netanyahu,Nathan S. and Eastman,Roger D.} } @inbook {19034, title = {Implicit Authentication through Learning User Behavior}, booktitle = {Information Security}, series = {Lecture Notes in Computer Science}, volume = {6531}, year = {2011}, month = {2011}, pages = {99 - 113}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Users are increasingly dependent on mobile devices. However, current authentication methods like password entry are significantly more frustrating and difficult to perform on these devices, leading users to create and reuse shorter passwords and pins, or no authentication at all. We present implicit authentication - authenticating users based on behavior patterns. We describe our model for performing implicit authentication and assess our techniques using more than two weeks of collected data from over 50 subjects.}, keywords = {Computer science}, isbn = {978-3-642-18177-1}, url = {http://www.springerlink.com/content/m57u551u3133475m/abstract/}, author = {Elaine Shi and Niu, Yuan and Jakobsson, Markus and Chow, Richard}, editor = {Burmester, Mike and Tsudik, Gene and Magliveras, Spyros and Ilic, Ivana} } @article {15762, title = {Implicitly-weighted total least squares}, journal = {Linear Algebra and its Applications}, volume = {435}, year = {2011}, month = {2011/08/01/}, pages = {560 - 577}, abstract = {In a total least squares (TLS) problem, we estimate an optimal set of model parameters X , so that ( A - Δ A ) X = B - Δ B , where A is the model matrix, B is the observed data, and Δ A and Δ B are corresponding corrections. When B is a single vector, Rao (1997) and Paige and Strako{\v s} (2002) suggested formulating standard least squares problems, for which Δ A = 0 , and data least squares problems, for which Δ B = 0 , as weighted and scaled TLS problems. In this work we define an implicitly-weighted TLS formulation (ITLS) that reparameterizes these formulations to make computation easier. We derive asymptotic properties of the estimates as the number of rows in the problem approaches infinity, handling the rank-deficient case as well. We discuss the role of the ratio between the variances of errors in A and B in choosing an appropriate parameter in ITLS. We also propose methods for computing the family of solutions efficiently and for choosing the appropriate solution if the ratio of variances is unknown. We provide experimental results on the usefulness of the ITLS family of solutions.}, keywords = {Data least squares, Errors in variables, least squares, Linear regression, total least squares, Variance estimation}, isbn = {0024-3795}, doi = {10.1016/j.laa.2010.06.020}, url = {http://www.sciencedirect.com/science/article/pii/S0024379510003162}, author = {Park,Sungwoo and O{\textquoteright}Leary,Dianne P.} } @article {12866, title = {The Importance of Chitin in the Marine Environment}, journal = {Marine Biotechnology}, year = {2011}, month = {2011///}, pages = {1 - 8}, abstract = {Chitin is the most abundant renewable polymer in the oceans and is an important source of carbon and nitrogen for marine organisms. The process of chitin degradation is a key step in the cycling of nutrients in the oceans and chitinolytic bacteria play a significant role in this process. These bacteria are autochthonous to both marine and freshwater ecosystems and produce chitinases that degrade chitin, an insoluble polysaccharide, to a biologically useful form. In this brief review, a description of the structure of chitin and diversity of chitinolytic bacteria in the oceans is provided, in the context of the significance of chitin degradation for marine life.}, doi = {10.1007/s10126-011-9388-1}, author = {Souza,C. P. and Almeida,B. C. and Rita R Colwell and Rivera,I. N. G.} } @article {15177, title = {Impossibility of blind signatures from one-way permutations}, journal = {Theory of Cryptography}, year = {2011}, month = {2011///}, pages = {615 - 629}, abstract = {A seminal result in cryptography is that signature schemes can be constructed (in a black-box fashion) from any one-way function. The minimal assumptions needed to construct blind signature schemes, however, have remained unclear. Here, we rule out black-box constructions of blind signature schemes from one-way functions. In fact, we rule out constructions even from a random permutation oracle, and our results hold even for blind signature schemes for 1-bit messages that achieve security only against honest-but-curious behavior.}, doi = {10.1007/978-3-642-19571-6_37}, author = {Katz, Jonathan and Schr{\"o}der,D. and Yerukhimovich,A.} } @article {16043, title = {Improved Identification and Visualization of Emergency Department Patient Visits}, journal = {Annals of Emergency Medicine}, volume = {58}, year = {2011}, month = {2011///}, pages = {S309 - S309}, author = {Hettinger,AZ and Rackoff,A. and Wongsuphasawat,K. and Cheng,H. and Fairbanks,RJ and Plaisant, Catherine and Smith,M. S} } @article {14353, title = {Improving Classifier Performance by Autonomously Collecting Background Knowledge from the Web}, journal = {2011 10th International Conference on Machine Learning and Applications Workshops}, year = {2011}, month = {2011///}, pages = {1 - 6}, abstract = {Many websites allow users to tag data items to makethem easier to find. In this paper we consider the problem of classifying tagged data according to user-specified interests. We present an approach for aggregating background knowledge from the Web to improve the performance of a classier. In previous work, researchers have developed technology for extracting knowledge, in the form of relational tables, from semi- structured websites. In this paper we integrate this extraction technology with generic machine learning algorithms, showing that knowledge extracted from the Web can significantly benefit the learning process. Specifically, the knowledge can lead to better generalizations, reduce the number of samples required for supervised learning, and eliminate the need to retrain the system when the environment changes. We validate the approach with an application that classifies tagged Fickr data. }, author = {Minton,S.N. and Michelson,M. and See,K. and Macskassy,S. and Gazen,B.C. and Getoor, Lise} } @article {13003, title = {Increased gene sampling provides stronger support for higher-level groups within gracillariid leaf mining moths and relatives (Lepidoptera: Gracillariidae)}, journal = {BMC Evol Biol}, volume = {11:182}, year = {2011}, month = {2011///}, author = {Kawahara,A. Y and Ohshima,I and Kawakita,A and Regier,J. C and Mitter,C and Cummings, Michael P. and Davis,DR and Wagner,DL and De Prinis,J and Lopez-Vaamonde,C} } @article {13004, title = {Increased gene sampling yields robust support for higher-level clades within Bombycoidea (Lepidoptera)}, journal = {Systematic Entomology}, volume = {36}, year = {2011}, month = {2011/01/01/}, pages = {31 - 43}, abstract = {This study has as its primary aim the robust resolution of higher-level relationships within the lepidopteran superfamily Bombycoidea. Our study builds on an earlier analysis of five genes (\~{}6.6 kbp) sequenced for 50 taxa from Bombycoidea and its sister group Lasiocampidae, plus representatives of other macrolepidoteran superfamilies. The earlier study failed to yield strong support for the monophyly of and basal splits within Bombycoidea, among others. Therefore, in an effort to increase support specifically for higher-level nodes, we generated 11.7 kbp of additional data from 20 genes for 24 of 50 bombycoid and lasiocampid taxa. The data from the genes are all derived from protein-coding nuclear genes previously used to resolve other lepidopteran relationships. With these additional data, all but a few higher-level nodes are strongly supported. Given our decision to minimize project costs by augmenting genes for only 24 of the 50 taxa, we explored whether the resulting pattern of missing data in the combined-gene matrix introduced a nonphylogenetic bias, a possibility reported by others. This was achieved by comparing node support values (i.e. nonparametric bootstrap values) based on likelihood and parsimony analyses of three datasets that differ in their number of taxa and level of missing data: 50 taxa/5 genes (dataset A), 50 taxa/25 genes (dataset B) and 24 taxa/25 genes (dataset C). Whereas datasets B and C provided similar results for common nodes, both frequently yielded higher node support relative to dataset A, arguing that: (i) more data yield increased node support and (ii) partial gene augmentation does not introduce an obvious nonphylogenetic bias. A comparison of single-gene bootstrap analyses identified four nodes for which one or two of the 25 genes provided modest to strong support for a grouping not recovered by the combined-gene result. As a summary proposal, two of these four groupings (one each within Bombycoidea and Lasiocampidae) were deemed sufficiently problematic to regard them as unresolved trichotomies. Since the alternative groupings were always highly localized on the tree, we did not judge a combined-gene analysis to present a problem outside those regions. Based on our robustly resolved results, we have revised the classification of Bombycoidea: the family Bombycidae is restricted to its nominate subfamily, and its tribe Epiini is elevated to subfamily rank (Epiinae stat.rev.), whereas the bombycid subfamily Phiditiinae is reinstated as a separate family (Phiditiidae stat.rev.). The bombycid subfamilies Oberthueriinae Kuznetzov \& Stekolnikov, 1985, syn.nov. and Prismostictinae Forbes, 1955, syn.nov., and the family Mirinidae Kozlov, 1985, syn.nov. are established as subjective junior synonyms of Endromidae Boisduval, 1828. The family Anthelidae (Lasiocampoidea) is reincluded in the superfamily Bombycoidea.}, isbn = {1365-3113}, doi = {10.1111/j.1365-3113.2010.00543.x}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1365-3113.2010.00543.x/abstract;jsessionid=B5B1F8BF73034A4C41427FB1FBD7383F.d02t04}, author = {Zwick,Andreas and Regier,Jerome C and Mitter,Charles and Cummings, Michael P.} } @article {16814, title = {An incremental Hausdorff distance calculation algorithm}, journal = {Proceedings of the VLDB Endowment}, volume = {4}, year = {2011}, month = {2011/05//}, pages = {506 - 517}, abstract = {The Hausdorff distance is commonly used as a similarity measure between two point sets. Using this measure, a set X is considered similar to Y iff every point in X is close to at least one point in Y. Formally, the Hausdorff distance HausDist(X, Y) can be computed as the Max-Min distance from X to Y, i.e., find the maximum of the distance from an element in X to its nearest neighbor (NN) in Y. Although this is similar to the closest pair and farthest pair problems, computing the Hausdorff distance is a more challenging problem since its Max-Min nature involves both maximization and minimization rather than just one or the other. A traditional approach to computing HausDist(X, Y) performs a linear scan over X and utilizes an index to help compute the NN in Y for each x in X. We present a pair of basic solutions that avoid scanning X by applying the concept of aggregate NN search to searching for the element in X that yields the Hausdorff distance. In addition, we propose a novel method which incrementally explores the indexes of the two sets X and Y simultaneously. As an example application of our techniques, we use the Hausdorff distance as a measure of similarity between two trajectories (represented as point sets). We also use this example application to compare the performance of our proposed method with the traditional approach and the basic solutions. Experimental results show that our proposed method outperforms all competitors by one order of magnitude in terms of the tree traversal cost and total response time.}, isbn = {2150-8097}, url = {http://dl.acm.org/citation.cfm?id=2002974.2002978}, author = {Nutanong,Sarana and Jacox,Edwin H. and Samet, Hanan} } @article {16032, title = {Information Visualization: State of the Field and New Research Directions}, journal = {Information VisualizationInformation Visualization}, volume = {10}, year = {2011}, month = {2011/10/01/}, pages = {269 - 270}, isbn = {1473-8716, 1473-8724}, doi = {10.1177/1473871611418138}, url = {http://ivi.sagepub.com/content/10/4/269}, author = {Kerren,Andreas and Plaisant, Catherine and Stasko,John T} } @article {18579, title = {Instrumenting home networks}, journal = {SIGCOMM Comput. Commun. Rev.}, volume = {41}, year = {2011}, month = {2011///}, pages = {84 - 89}, abstract = {In managing and troubleshooting home networks, one of the challenges is in knowing what is actually happening. Availability of a record of events that occurred on the home network before trouble appeared would go a long way toward addressing that challenge. In this position/work-in-progress paper, we consider requirements for a general-purpose logging facility for home networks. Such a facility, if properly designed, would potentially have other uses. We describe several such uses and discuss requirements to be considered in the design of a logging platform that would be widely supported and accepted. We also report on our initial deployment of such a facility.}, keywords = {home network management, home network troubleshooting}, isbn = {0146-4833}, doi = {10.1145/1925861.1925875}, url = {http://doi.acm.org/10.1145/1925861.1925875}, author = {Calvert,Kenneth L. and Edwards,W. Keith and Feamster, Nick and Grinter,Rebecca E. and Deng,Ye and Zhou,Xuzi} } @article {12867, title = {Interaction of Vibrio cholerae non-O1/non-O139 with Copepods, Cladocerans and Competing Bacteria in the Large Alkaline Lake Neusiedler See, Austria}, journal = {Microbial ecology}, volume = {61}, year = {2011}, month = {2011///}, pages = {496 - 506}, abstract = {Vibrio cholerae is a human pathogen and natural inhabitant of aquatic environments. Serogroups O1/O139 have been associated with epidemic cholera, while non-O1/non-O139 serogroups usually cause human disease other than classical cholera. V. cholerae non-O1/non-O139 from the Neusiedler See, a large Central European lake, have caused ear and wound infections, including one case of fatal septicaemia. Recent investigations demonstrated rapid planktonic growth of V. cholerae non-O1/non-O139 and correlation with zooplankton biomass. The aim of this study was to elucidate the interaction of autochthonous V. cholerae with two dominant crustacean zooplankton species in the lake and investigate the influence of the natural bacterial community on this interaction. An existing data set was evaluated for statistical relationships between zooplankton species and V. cholerae and co-culture experiments were performed in the laboratory. A new fluorescence in situ hybridisation protocol was applied for quantification of V. cholerae non-O1/non-O139 cells, which significantly reduced analysis time. The experiments clearly demonstrated a significant relationship of autochthonous V. cholerae non-O1/non-O139 with cladocerans by promoting growth of V. cholerae non-O1/non-O139 in the water and on the surfaces of the cladocerans. In contrast, copepods had a negative effect on the growth of V. cholerae non-O1/non-O139 via competing bacteria from their surfaces. Thus, beside other known factors, biofilm formation by V. cholerae on crustacean zooplankton appears to be zooplankton taxon specific and may be controlled by the natural bacterial community.}, doi = {10.1007/s00248-010-9764-9}, author = {Kirschner,A. K. T. and Schauer,S. and Steinberger,B. and Wilhartitz,I. and Grim,C. J. and Huq,A. and Rita R Colwell and Herzig,A. and Sommer,R.} } @article {19689, title = {Interactive topic modeling}, year = {2011}, month = {2011/00/19}, pages = {248 - 257}, publisher = {Association for Computational Linguistics}, abstract = {Abstract Topic models have been used extensively as a tool for corpus exploration, and a cottage industry has developed to tweak topic models to better encode human intuitions or to better model data. However, creating such extensions requires expertise in machine ...}, url = {http://dl.acm.org/citation.cfm?id=2002472.2002505}, author = {Hu, Yuening and Jordan Boyd-Graber and Satinoff, Brianna} } @conference {12218, title = {IP geolocation in metropolitan areas}, booktitle = {Proceedings of the ACM SIGMETRICS joint international conference on Measurement and modeling of computer systems}, series = {SIGMETRICS {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {155 - 156}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Current IP geoloation techniques can geolocate an IP address to a region approximately 700 square miles, roughly the size of a metropolitan area. We model geolocation as a pattern-recognition problem, and introduce techniques that geolocate addresses to within 5 miles inside a metropolitan area. We propose two complementary algorithms: The first algorithm, Pattern Based Geolocation (PBG), models the distribution of latencies to the target and compares it to those of the reference landmarks to resolve an address to within 5 miles in a metropolitan area. The second approach, Perturbation Augmented PBG (PAPBG), provides higher resolution by sending extra traffic in the network. While sending an aggregate of 600 Kbps extra traffic to 20 nodes for approximately 2 minutes, PAPBG geolocates addresses to within 3 miles.}, keywords = {geolocation, pattern recognition, perturbation}, isbn = {978-1-4503-0814-4}, doi = {10.1145/1993744.1993803}, url = {http://doi.acm.org/10.1145/1993744.1993803}, author = {Singh,Satinder Pal and Baden,Randolph and Lee,Choon and Bhattacharjee, Bobby and La,Richard and Shayman,Mark} } @article {13274, title = {An iterative algorithm for homology computation on simplicial shapes}, journal = {Computer-Aided Design}, volume = {43}, year = {2011}, month = {2011/11//}, pages = {1457 - 1467}, abstract = {We propose a new iterative algorithm for computing the homology of arbitrary shapes discretized through simplicial complexes. We demonstrate how the simplicial homology of a shape can be effectively expressed in terms of the homology of its sub-components. The proposed algorithm retrieves the complete homological information of an input shape including the Betti numbers, the torsion coefficients and the representative homology generators.To the best of our knowledge, this is the first algorithm based on the constructive Mayer{\textendash}Vietoris sequence, which relates the homology of a topological space to the homologies of its sub-spaces, i.e. the sub-components of the input shape and their intersections. We demonstrate the validity of our approach through a specific shape decomposition, based only on topological properties, which minimizes the size of the intersections between the sub-components and increases the efficiency of the algorithm. }, keywords = {Computational topology, Generators, Mayer{\textendash}Vietoris sequence, shape decomposition, simplicial complexes, Z -homology}, isbn = {0010-4485}, doi = {10.1016/j.cad.2011.08.015}, url = {http://www.sciencedirect.com/science/article/pii/S0010448511002144}, author = {Boltcheva,Dobrina and Canino,David and Merino Aceituno,Sara and L{\'e}on,Jean-Claude and De Floriani, Leila and H{\'e}troy,Franck} } @article {12865, title = {Vibrio Cholerae O1 Detection in Estuarine and Coastal Zooplankton}, journal = {Journal of Plankton Research}, volume = {33}, year = {2011}, month = {01/2011}, pages = {51 - 62}, abstract = {Vibrio cholerae is an autochthonous marine bacterium, and its association with diverse planktonic crustaceans has been extensively investigated; however, the presence of V. cholerae on individuals of most phyla of planktonic animals is still incompletely understood. The objective of this study was to analyze the distribution of V. cholerae serogroup O1 associated with specific zooplankton taxa in an estuary and the adjacent continental shelf of the southeastern Brazilian coast. The occurrence of the bacterium was assessed in zooplankton samples, specifically on the most abundant taxa, using direct fluorescence assay (DFA) and direct viable count{\textendash}direct fluorescence assay (DVC{\textendash}DFA) methods. Vibrio cholerae O1 was detected in 88\% of samples collected from the Santos-Bertioga estuary and in 67\% of samples from the shelf. The salinity of the estuarine water ranged from 21.8 to 34.6, significantly lower than the shelf water which was 32.1{\textendash}36.1. Salinity was the only environmental variable measured that displayed a significant correlation with the presence of V. cholerae (P< 0.05). Vibrio cholerae O1 was detected in chaetognaths, pluteus larvae of echinoderms and planktonic fish eggs (Engraulidae), all new sites for this bacterium.}, keywords = {DFA, estuary, plankton, Southwest Atlantic}, isbn = {0142-7873, 1464-3774}, doi = {10.1093/plankt/fbq093}, url = {http://plankt.oxfordjournals.org/content/33/1/51}, author = {Martinelli Filho,Jos{\'e} E. and Lopes,Rubens M. and Rivera,Irma N. G. and Rita R Colwell} } @article {12875, title = {Identification of Pathogenic Vibrio Species by Multilocus PCR-Electrospray Ionization Mass Spectrometry and Its Application to Aquatic Environments of the Former Soviet Republic of Georgia}, journal = {Applied and Environmental MicrobiologyAppl. Environ. Microbiol.}, volume = {76}, year = {2010}, month = {2010/03/15/}, pages = {1996 - 2001}, abstract = {The Ibis T5000 is a novel diagnostic platform that couples PCR and mass spectrometry. In this study, we developed an assay that can identify all known pathogenic Vibrio species and field-tested it using natural water samples from both freshwater lakes and the Georgian coastal zone of the Black Sea. Of the 278 total water samples screened, 9 different Vibrio species were detected, 114 (41\%) samples were positive for V. cholerae, and 5 (0.8\%) samples were positive for the cholera toxin A gene (ctxA). All ctxA-positive samples were from two freshwater lakes, and no ctxA-positive samples from any of the Black Sea sites were detected.}, isbn = {0099-2240, 1098-5336}, doi = {10.1128/AEM.01919-09}, url = {http://aem.asm.org/content/76/6/1996}, author = {Whitehouse,Chris A. and Baldwin,Carson and Sampath,Rangarajan and Blyn,Lawrence B. and Melton,Rachael and Li,Feng and Hall,Thomas A. and Harpin,Vanessa and Matthews,Heather and Tediashvili,Marina and Jaiani,Ekaterina and Kokashvili,Tamar and Janelidze,Nino and Grim,Christopher and Rita R Colwell and Huq,Anwar} } @inbook {16274, title = {Identifying Differentially Abundant Metabolic Pathways in Metagenomic Datasets}, booktitle = {Bioinformatics Research and Applications}, series = {Lecture Notes in Computer Science}, volume = {6053}, year = {2010}, month = {2010///}, pages = {101 - 112}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Enabled by rapid advances in sequencing technology, metagenomic studies aim to characterize entire communities of microbes bypassing the need for culturing individual bacterial members. One major goal of such studies is to identify specific functional adaptations of microbial communities to their habitats. Here we describe a powerful analytical method (MetaPath) that can identify differentially abundant pathways in metagenomic data-sets, relying on a combination of metagenomic sequence data and prior metabolic pathway knowledge. We show that MetaPath outperforms other common approaches when evaluated on simulated datasets. We also demonstrate the power of our methods in analyzing two, publicly available, metagenomic datasets: a comparison of the gut microbiome of obese and lean twins; and a comparison of the gut microbiome of infant and adult subjects. We demonstrate that the subpathways identified by our method provide valuable insights into the biological activities of the microbiome.}, isbn = {978-3-642-13077-9}, url = {http://dx.doi.org/10.1007/978-3-642-13078-6_12}, author = {Liu,Bo and Pop, Mihai}, editor = {Borodovsky,Mark and Gogarten,Johann and Przytycka,Teresa and Rajasekaran,Sanguthevar} } @article {14510, title = {Identifying Modifiers in Web Queries Over Structured Data}, volume = {12/473,286}, year = {2010}, month = {2010/12/02/}, abstract = {Described is using modifiers in online search queries for queries that map to a database table. A modifier (e.g., an adjective or a preposition) specifies the intended meaning of a target, in which the target maps to a column in that table. The modifier thus corresponds to one or more functions that determine which rows of data in the column match the query, e.g., {\textquotedblleft}cameras under $400{\textquotedblright} maps to a camera (or product) table, and {\textquotedblleft}under{\textquotedblright} is the modifier that represents a function (less than) that is used to evaluate a {\textquotedblleft}price{\textquotedblright} target/data column. Also described are different classes of modifiers, and generating the dictionaries for a domain (corresponding to a table) via query log mining.}, url = {http://www.google.com/patents?id=gQTkAAAAEBAJ}, author = {Paparizos,Stelios and Joshi,Amrula Sadanand and Getoor, Lise and Ntoulas,Alexandros}, editor = {Microsoft Corporation} } @article {14222, title = {Illusory Lightness Perception Due to Signal Compression and Reconstruction}, journal = {Journal of VisionJ Vis}, volume = {10}, year = {2010}, month = {2010/08/02/}, pages = {426 - 426}, abstract = {We propose a computational model that can account for a large number of lightness illusions, including the seemingly opposing effects of brightness contrast and assimilation. The underlying mathematics is based on the new theory of compressive sensing, which provides an efficient method for sampling and reconstructing a signal that is sparse or compressible. The model states that at the retina the intensity signal is compressed. This process amounts to a random sampling of locally averaged values. In the cortex the intensity values are reconstructed using as input the compressed signal, and combined with the edges. Reconstruction amounts to solving an underdetermined linear equation system using L1 norm minimization. Assuming that the intensity signal is sparse in the Fourier domain, the reconstructed signal, which is a linear combination of a small number of Fourier components, deviates from the original signal. The reconstruction error is consistent with the perception of many well known lightness illusions, including the contrast and the assimilation effect, the articulated enhanced brightness contrast, the checker shadow illusion, and the grating induction. Considering in addition, the space-variant resolution of the human eye, the model also explains illusory patterns with changes in perceived lightness over large ranges, such as the Cornsweet and related illusions. We conducted experiments with new variations of the White and the Dungeon illusion, whose perception changes with the resolution at which the different parts of the patterns appear on the eye, and found that the model predicted well the perception in these stimuli.}, isbn = {, 1534-7362}, doi = {10.1167/10.7.426}, url = {http://www.journalofvision.org/content/10/7/426}, author = {Ferm{\"u}ller, Cornelia and Li,Yi} } @article {14173, title = {Illusory motion due to causal time filtering}, journal = {Vision research}, volume = {50}, year = {2010}, month = {2010///}, pages = {315 - 329}, author = {Ferm{\"u}ller, Cornelia and Ji,H. and Kitaoka,A.} } @conference {12429, title = {Image classification of vascular smooth muscle cells}, booktitle = {Proceedings of the 1st ACM International Health Informatics Symposium}, series = {IHI {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {484 - 486}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {cell biology, digital image processing, machine learning}, isbn = {978-1-4503-0030-8}, doi = {10.1145/1882992.1883068}, url = {http://doi.acm.org/10.1145/1882992.1883068}, author = {Grasso,Michael A. and Mokashi,Ronil and Dalvi,Darshana and Cardone, Antonio and Dima,Alden A. and Bhadriraju,Kiran and Plant,Anne L. and Brady,Mary and Yesha,Yaacov and Yesha,Yelena} } @conference {16818, title = {Images in News}, booktitle = {Pattern Recognition (ICPR), 2010 20th International Conference on}, year = {2010}, month = {2010/08//}, pages = {3240 - 3243}, abstract = {A system, called News Stand, is introduced that automatically extracts images from news articles. The system takes RSS feeds of news article and applies an online clustering algorithm so that articles belonging to the same news topic can be associated with the same cluster. Using the feature vector associated with the cluster, the images from news articles that form the cluster are extracted. First, the caption text associated with each of the images embedded in the news article is determined. This is done by analyzing the structure of the news article{\textquoteright}s HTML page. If the caption and feature vector of the cluster are found to contain keywords in common, then the image is added to an image repository. Additional meta-information are now associated with each image such as caption, cluster features, names of people in the news article, etc. A very large repository containing more than 983k images from 12 million news articles was built using this approach. This repository also contained more than 86.8 million keywords associated with the images. The key contribution of this work is that it combines clustering and natural language processing tasks to automatically create a large corpus of news images with good quality tags or meta-information so that interesting vision tasks can be performed on it.}, keywords = {algorithm;feature, articles;news, clustering, clustering;publishing;, extraction;hypermedia, extraction;image, feeds;caption, HTML, images;news, LANGUAGE, languages;image, markup, page;RSS, processing;news, processing;pattern, repository;meta-information;natural, retrieval;natural, stand;online, text;feature, vector;image}, doi = {10.1109/ICPR.2010.792}, author = {Sankaranarayanan,J. and Samet, Hanan} } @article {14487, title = {On the importance of sharing negative results}, journal = {SIGKDD explorations}, volume = {12}, year = {2010}, month = {2010///}, pages = {3 - 4}, author = {Giraud-Carrier,C. and Dunham,M.H. and Atreya,A. and Elkan,C. and Perlich,C. and Swirszcz,G. and Shi,X. and Philip,S.Y. and F{\"u}rnkranz,J. and Sima,J.F.} } @mastersthesis {19498, title = {Improving the Dependability of Distributed Systems Through Air Software Upgrades}, year = {2010}, month = {2010///}, school = {Carnegie Mellon University}, address = {Pittsburgh, PA, USA}, abstract = {Traditional fault-tolerance mechanisms concentrate almost entirely on responding to, avoiding, or tolerating unexpected faults or security violations. However, scheduled events, such as software upgrades, account for most of the system unavailability and often introduce data corruption or latent errors. Through two empirical studies, this dissertation identifies the leading causes of upgrade failure{\textemdash}breaking hidden dependencies{\textemdash}and of planned downtime{\textemdash}complex data conversions{\textemdash}in distributed enterprise systems. These findings represent the foundation of a new benchmark for software-upgrade dependability. This dissertation further introduces the AIR properties{\textemdash}A TOMICITY, ISOLATION and RUNTIME-TESTING{\textemdash}required for improving the dependability of distributed systems that undergo major software upgrades. The AIR properties are realized in Imago, a system designed to reduce both planned and unplanned downtime by upgrading distributed systems end-to-end. Imago builds upon the idea of isolating the production system from the upgrade operations, in order to avoid breaking hidden dependencies and to decouple the data conversions from the normal system operation. Imago includes novel mechanisms, such as providing a parallel universe for the new version, performing data conversions opportunistically, intercepting the live workload at the ingress and egress points or executing an atomic switchover to the new version, which allow it to deliver the AIR properties. Imago harnesses opportunities provided by the emerging cloud-computing technologies, by trading resource overhead (needed by the parallel universe) for an improved dependability of the software upgrades. This approach separates the functional aspects of the upgrade from the mechanisms for online upgrade, enabling an upgrade-as-a-service model. This dissertation also describes techniques for assessing the impact of software upgrades, in order to reason about the implications of relaxing the AIR guarantees .}, author = {Tudor Dumitras} } @conference {16692, title = {Improving translation via targeted paraphrasing}, booktitle = {Proceedings of the 2010 Conference on Empirical Methods in Natural Language Processing}, year = {2010}, month = {2010///}, pages = {127 - 137}, author = {Resnik, Philip and Buzek,O. and Hu,C. and Kronrod,Y. and Quinn,A. and Bederson, Benjamin B.} } @conference {13373, title = {Increasing representational power and scaling reasoning in probabilistic databases}, booktitle = {Proceedings of the 13th International Conference on Database Theory}, series = {ICDT {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {1 - 1}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Increasing numbers of real-world application domains are generating data that is inherently noisy, incomplete, and probabilistic in nature. Statistical analysis and probabilistic inference, widely used in those domains, often introduce additional layers of uncertainty. Examples include sensor data analysis, data integration and information extraction on the Web, social network analysis, and scientific and biomedical data management. Managing and querying such data requires us to combine the tools and the techniques from a variety of disciplines including databases, first-order logic, and probabilistic reasoning. There has been much work at the intersection of these research areas in recent years. The work on probabilistic databases has made great advances in efficiently executing SQL and inference queries over large-scale uncertain datasets [2, 1]. The research in first-order probabilistic models like probabilistic relational models [5], Markov logic networks [10] etc. (see Getoor and Taskar [6] for a comprehensive overview), and the work on lifted inference [9, 3, 8, 11] has resulted in several techniques for efficiently integrating first-order logic and probabilistic reasoning. In this talk, I will present some of the foundations of large-scale probabilistic data management, and the challenges in scaling the representational power and the reasoning capabilities of probabilistic databases. I will use the PrDB probabilistic data management system being developed at the University of Maryland as a case study for this purpose [4, 7, 12]. Unlike the other recent work on probabilistic databases, PrDB is designed to represent uncertain data with rich correlation structures, and it uses probabilistic graphical models as the basic representation model. I will discuss how PrDB supports compact specification of uncertainties at different abstraction levels, from "schema-level" uncertainties that apply to entire relations to "tuple-specific" uncertainties that apply to a specific tuple or a specific set of tuples; I will also discuss how this relates to the work on first-order probabilistic models. Query evaluation in PrDB can be formulated as inference in appropriately constructed graphical models, and I will briefly present some of the key novel techniques that we have developed for efficient query evaluation, and their relationship to recent work on efficient lifted inference. I will conclude with a discussion of some of the open research challenges moving forward.}, isbn = {978-1-60558-947-3}, doi = {10.1145/1804669.1804671}, url = {http://doi.acm.org/10.1145/1804669.1804671}, author = {Deshpande, Amol} } @article {14377, title = {Indirect two-sided relative ranking: a robust similarity measure for gene expression data}, journal = {BMC bioinformatics}, volume = {11}, year = {2010}, month = {2010///}, pages = {137 - 137}, abstract = {There is a large amount of gene expression data that exists in the public domain. This data has been generated under a variety of experimental conditions. Unfortunately, these experimental variations have generally prevented researchers from accurately comparing and combining this wealth of data, which still hides many novel insights. Results In this paper we present a new method, which we refer to as indirect two-sided relative ranking, for comparing gene expression profiles that is robust to variations in experimental conditions. This method extends the current best approach, which is based on comparing the correlations of the up and down regulated genes, by introducing a comparison based on the correlations in rankings across the entire database. Because our method is robust to experimental variations, it allows a greater variety of gene expression data to be combined, which, as we show, leads to richer scientific discoveries. Conclusions We demonstrate the benefit of our proposed indirect method on several datasets. We first evaluate the ability of the indirect method to retrieve compounds with similar therapeutic effects across known experimental barriers, namely vehicle and batch effects, on two independent datasets (one private and one public). We show that our indirect method is able to significantly improve upon the previous state-of-the-art method with a substantial improvement in recall at rank 10 of 97.03\% and 49.44\%, on each dataset, respectively. Next, we demonstrate that our indirect method results in improved accuracy for classification in several additional datasets. These datasets demonstrate the use of our indirect method for classifying cancer subtypes, predicting drug sensitivity/resistance, and classifying (related) cell types. Even in the absence of a known (i.e., labeled) experimental barrier, the improvement of the indirect method in each of these datasets is statistically significant.}, author = {Licamele,L. and Getoor, Lise} } @article {19614, title = {On the initialization methods of an exterior point algorithm for the assignment problem}, journal = {International Journal of Computer Mathematics}, volume = {87}, year = {2010}, month = {2010///}, pages = {1831 - 1846}, abstract = {In this paper, we present a theoretical investigation and an extensive computational study of exterior point simplex algorithm (EPSA) initialization methods for the assignment problem (AP). We describe the exterior point algorithm using three different initialization methods. Effective implementations are explored for each initialization method. Then we perform an experimental evaluation on a large set of benchmark problems from the TSPLib 95 and OR Library collections. The results obtained demonstrate the advantages of the three initialization methods. Finally, we give a theoretical justification of the initialization methods efficiency. We explain theoretically the computational ranking for these methods.}, isbn = {0020-7160}, url = {http://www.tandfonline.com/doi/abs/10.1080/00207160802524739}, author = {Charalampos Papamanthou and Paparrizos, K. and Samaras, N. and Sifaleras, A.} } @article {13957, title = {Insights into head-related transfer function: Spatial dimensionality and continuous representation}, journal = {The Journal of the Acoustical Society of America}, volume = {127}, year = {2010}, month = {2010///}, pages = {2347 - 2357}, abstract = {This paper studies head-related transfer function (HRTF) sampling and synthesis in a three-dimensional auditory scene based on a general modal decomposition of the HRTF in all frequency-range-angle domains. The main finding is that the HRTF decomposition with the derived spatial basis function modes can be well approximated by a finite number, which is defined as the spatial dimensionality of the HRTF. The dimensionality determines the minimum number of parameters to represent the HRTF corresponding to all directions and also the required spatial resolution in HRTF measurement. The general model is further developed to a continuous HRTF representation, in which the normalized spatial modes can achieve HRTF near-field and far-field representations in one formulation. The remaining HRTF spectral components are compactly represented using a Fourier spherical Bessel series, where the aim is to generate the HRTF with much higher spectral resolution in fewer parameters from typical measurements, which usually have limited spectral resolution constrained by sampling conditions. A low-computation algorithm is developed to obtain the model coefficients from the existing measurements. The HRTF synthesis using the proposed model is validated by three sets of data: (i) synthetic HRTFs from the spherical head model, (ii) the MIT KEMAR (Knowles Electronics Mannequin for Acoustics Research) data, and (iii) 45-subject CIPIC HRTF measurements.}, keywords = {acoustic signal processing, Bessel functions, Fourier series, hearing, Transfer functions}, doi = {10.1121/1.3336399}, url = {http://link.aip.org/link/?JAS/127/2347/1}, author = {Zhang,Wen and Abhayapala,Thushara D. and Kennedy,Rodney A. and Duraiswami, Ramani} } @article {14401, title = {Integrating structured metadata with relational affinity propagation}, journal = {In proceedings of AAAI Workshop on Statistical Relational AI}, year = {2010}, month = {2010///}, abstract = {Structured and semi-structured data describing entities, tax- onomies and ontologies appears in many domains. There is a huge interest in integrating structured information from multiple sources; however integrating structured data to in- fer complex common structures is a difficult task because the integration must aggregate similar structures while avoiding structural inconsistencies that may appear when the data is combined. In this work, we study the integration of struc- tured social metadata: shallow personal hierarchies specified by many individual users on the Social Web, and focus on in- ferring a collection of integrated, consistent taxonomies. We frame this task as an optimization problem with structural constraints. We propose a new inference algorithm, which we refer to as Relational Affinity Propagation (RAP) that ex- tends affinity propagation (Frey and Dueck 2007) by intro- ducing structural constraints. We validate the approach on a real-world social media dataset, collected from the photoshar- ing website Flickr. Our empirical results show that our pro- posed approach is able to construct deeper and denser struc- tures compared to an approach using only the standard affin- ity propagation algorithm.}, author = {Plangprasopchok,A. and Lerman,K. and Getoor, Lise} } @article {12967, title = {Intensity normalization improves color calling in SOLiD sequencing}, journal = {Nat MethNat Meth}, volume = {7}, year = {2010}, month = {2010/05//}, pages = {336 - 337}, isbn = {1548-7091}, doi = {10.1038/nmeth0510-336}, url = {http://dx.doi.org/10.1038/nmeth0510-336}, author = {Wu,Hao and Irizarry,Rafael A and Corrada Bravo, Hector} } @article {16052, title = {Interactive information visualization for exploring and querying electronic health records: A systematic review}, year = {2010}, month = {2010///}, institution = {Human-Computer Interaction Lab, University of Maryland}, abstract = {To overcome the complexity and scale of making medical decisions based on electronic health records (EHRs) a variety of visual methods have been proposed. This paper surveys twelve state-of-the-art information visualization systems described in the scientific literature and compares them based on a set of carefully selected criteria. It aims to systematically examine the systems{\textquoteright} strengths and weaknesses to inform future information visualization designs.We select twelve state-of-the-art information visualization systems from information visualization, medical information systems and human-computer interaction conferences and journals. We compare the systems using the following criteria: (1) data types covered, (2) multivariate analysis support, (3) number of patients records used (one or many), and (4) user intents addressed. The review describes the twelve systems in detail and evaluates the systems using the aforementioned criteria. We discuss how the systems differ in their features and highlight how these differences are related to their design and affect the user intent model. Examples of findings include: (1) most systems handle numerical or categorical data but not both, (2) most systems are specifically designed for looking at a single patient or multiple patients but not both, (3) most systems utilize horizontal time lines to represent time, (4) only systems that handle multiple patient records have good support for Filter, and (5) some specific user intents (e.g. the Encode and Connect intents) are rarely addressed. Based on our review results, we believe that effective information visualization can facilitate analysis of patient records, and we encourage the information visualization community to study the application of their systems and conduct more in depth evaluations. We identify potential future research topics in interactive support for data abstraction and medical tasks that involve looking at a single or multiple records. Finally, we propose to create a repository for data and tasks so benchmarks can be established for both academic and commercial patient record visualization systems. }, author = {Rind,A. and Wang,T. D and Aigner,W. and Miksh,S. and Wongsuphasawat,K. and Plaisant, Catherine and Shneiderman, Ben} } @article {13798, title = {Interlingual Annotation of Parallel Text Corpora: A New Framework for Annotation and Evaluation}, journal = {Natural Language Engineering}, volume = {16}, year = {2010}, month = {2010///}, pages = {197 - 243}, abstract = {This paper focuses on an important step in the creation of a system of meaning representation and the development of semantically annotated parallel corpora, for use in applications such as machine translation, question answering, text summarization, and information retrieval. The work described below constitutes the first effort of any kind to annotate multiple translations of foreign-language texts with interlingual content. Three levels of representation are introduced: deep syntactic dependencies (IL0), intermediate semantic representations (IL1), and a normalized representation that unifies conversives, nonliteral language, and paraphrase (IL2). The resulting annotated, multilingually induced, parallel corpora will be useful as an empirical basis for a wide range of research, including the development and evaluation of interlingual NLP systems and paraphrase-extraction systems as well as a host of other research and development efforts in theoretical and applied linguistics, foreign language pedagogy, translation studies, and other related disciplines.}, doi = {10.1017/S1351324910000070}, author = {Dorr, Bonnie J and Passonneau,Rebecca J. and Farwell,David and Green,Rebecca and Habash,Nizar and Helmreich,Stephen and Hovy,Eduard and Levin,Lori and Miller,Keith J. and Mitamura,Teruko and Rambow,Owen and Siddharthan,Advaith} } @article {18522, title = {An Internet Wide View into DNS Lookup Patterns}, year = {2010}, month = {2010///}, institution = {VeriSign Labs, School of Computer Science, Georgia Tech}, abstract = {This paper analyzes the DNS lookup patterns from a largeauthoritative top-level domain server and characterizes how the lookup patterns for unscrupulous domains may differ from those for legitimate domains. We examine domains for phishing attacks and spam and malware related domains, and see how these lookup patterns vary in terms of both their temporal and spatial characteristics. We find that malicious domains tend to exhibit more variance in the networks that look up these domains, and we also find that these domains become popular considerably more quickly after their initial registration time. We also note that miscreant domains ex- hibit distinct clusters, in terms to the networks that look up these domains. The distinct spatial and temporal character- istics of these domains, and their tendency to exhibit simi- lar lookup behavior, suggests that it may be possible to ulti- mately develop more effective blacklisting techniques based on these differing lookup patterns. }, author = {Hao,S. and Feamster, Nick and Pandrangi,R.} } @article {15883, title = {Investigating multi-label classification for human values}, journal = {Proceedings of the American Society for Information Science and Technology}, volume = {47}, year = {2010}, month = {2010///}, pages = {1 - 4}, author = {Ishita,E. and Oard, Douglas and Fleischmann,K.R. and Cheng,A.S. and Templeton,T.C.} } @conference {13932, title = {Investigating the impact of design processes on children}, booktitle = {Proceedings of the 9th International Conference on Interaction Design and Children}, year = {2010}, month = {2010///}, pages = {198 - 201}, author = {Guha,M.L. and Druin, Allison and Fails,J. A} } @article {13804, title = {iOpener Workbench: Tools for rapid understanding of scientific literature}, journal = {Human-Computer Interaction Lab 27th Annual Symposium, University of Maryland, College Park, MD}, year = {2010}, month = {2010///}, author = {Dunne,C. and Shneiderman, Ben and Dorr, Bonnie J and Klavans,J.} } @article {13300, title = {Isodiamond Hierarchies: An Efficient Multiresolution Representation for Isosurfaces and Interval Volumes}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {16}, year = {2010}, month = {2010///}, pages = {583 - 598}, abstract = {Efficient multiresolution representations for isosurfaces and interval volumes are becoming increasingly important as the gap between volume data sizes and processing speed continues to widen. Our multiresolution scalar field model is a hierarchy of tetrahedral clusters generated by longest edge bisection that we call a hierarchy of diamonds. We propose two multiresolution models for representing isosurfaces, or interval volumes, extracted from a hierarchy of diamonds which exploit its regular structure. These models are defined by subsets of diamonds in the hierarchy that we call isodiamonds, which are enhanced with geometric and topological information for encoding the relation between the isosurface, or interval volume, and the diamond itself. The first multiresolution model, called a relevant isodiamond hierarchy, encodes the isodiamonds intersected by the isosurface, or interval volume, as well as their nonintersected ancestors, while the second model, called a minimal isodiamond hierarchy, encodes only the intersected isodiamonds. Since both models operate directly on the extracted isosurface or interval volume, they require significantly less memory and support faster selective refinement queries than the original multiresolution scalar field, but do not support dynamic isovalue modifications. Moreover, since a minimal isodiamond hierarchy only encodes intersected isodiamonds, its extracted meshes require significantly less memory than those extracted from a relevant isodiamond hierarchy. We demonstrate the compactness of isodiamond hierarchies by comparing them to an indexed representation of the mesh at full resolution.}, keywords = {Computer-Assisted;Imaging, data processing speed;edge bisection;encoding;interval volumes;isodiamond hierarchies;isosurfaces;mesh representation;minimal isodiamond hierarchy;multiresolution representation;multiresolution scalar field model;relevant isodiamond hierarchy;volume data, Theoretical;User-Computer Interface;, Three-Dimensional;Models}, isbn = {1077-2626}, doi = {10.1109/TVCG.2010.29}, author = {Weiss,K. and De Floriani, Leila} } @article {15440, title = {Iterative execution-feedback model-directed GUI testing}, journal = {Information and Software Technology}, volume = {52}, year = {2010}, month = {2010/05//}, pages = {559 - 575}, abstract = {Current fully automatic model-based test-case generation techniques for GUIs employ a static model. Therefore they are unable to leverage certain state-based relationships between GUI events (e.g., one enables the other, one alters the other{\textquoteright}s execution) that are revealed at run-time and non-trivial to infer statically. We present ALT {\textendash} a new technique to generate GUI test cases in batches. Because of its {\textquotedblleft}alternating{\textquotedblright} nature, ALT enhances the next batch by using GUI run-time information from the current batch. An empirical study on four fielded GUI-based applications demonstrated that ALT was able to detect new 4- and 5-way GUI interaction faults; in contrast, previous techniques, due to their requirement of too many test cases, were unable to even test 4- and 5-way GUI interactions.}, keywords = {event-driven software, event-flow graphs, GUI testing, model-based testing, test-case generation}, isbn = {0950-5849}, doi = {10.1016/j.infsof.2009.11.009}, url = {http://www.sciencedirect.com/science/article/pii/S0950584909002092}, author = {Xun Yuan and Memon, Atif M.} } @conference {12238, title = {Identifying close friends on the internet}, booktitle = {Proc. of workshop on Hot Topics in Networks (HotNets-VIII)}, year = {2009}, month = {2009///}, abstract = {Online Social Networks (OSNs) encourage users to createan online presence that reflects their offline identity. OSNs create the illusion that these online accounts correspond to the correct offline person, but in reality the OSN lacks the re- sources to detect impersonation. We propose that OSN users identify each other based on interaction and experience. We believe that impersonation can be thwarted by users who possess exclusive shared knowledge, secret informa- tion shared only between a pair of OSN friends. We describe existing protocols that use shared secrets to exchange public keys without revealing those secrets to attackers. We present results from a user study on Facebook to show that users do share exclusive knowledge with their Facebook friends and attackers are rarely able to guess that knowledge. Finally, we show that friend identification can be extended using a web of trust built on the OSN friend graph. }, author = {Baden,R. and Spring, Neil and Bhattacharjee, Bobby} } @conference {14451, title = {Identifying graphs from noisy and incomplete data}, booktitle = {Proceedings of the 1st ACM SIGKDD Workshop on Knowledge Discovery from Uncertain Data}, series = {U {\textquoteright}09}, year = {2009}, month = {2009///}, pages = {23 - 29}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {There is a growing wealth of data describing networks of various types, including social networks, physical networks such as transportation or communication networks, and biological networks. At the same time, there is a growing interest in analyzing these networks, in order to uncover general laws that govern their structure and evolution, and patterns and predictive models to develop better policies and practices. However, a fundamental challenge in dealing with this newly available observational data describing networks is that the data is often of dubious quality -- it is noisy and incomplete -- and before any analysis method can be applied, the data must be cleaned, and missing information inferred. In this paper, we introduce the notion of graph identification, which explicitly models the inference of a "cleaned" output network from a noisy input graph. It is this output network that is appropriate for further analysis. We present an illustrative example and use the example to explore the types of inferences involved in graph identification, as well as the challenges and issues involved in combining those inferences. We then present a simple, general approach to combining the inferences in graph identification and experimentally show the utility of our combined approach and how the performance of graph identification is sensitive to the inter-dependencies among these inferences.}, keywords = {classification, data mining, entity resolution, link prediction, social networks, statistical relational learning}, isbn = {978-1-60558-675-5}, doi = {10.1145/1610555.1610559}, url = {http://doi.acm.org/10.1145/1610555.1610559}, author = {Namata,Jr.,Galileo Mark S. and Getoor, Lise} } @article {12029, title = {Image Transformations and Blurring}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, volume = {31}, year = {2009}, month = {2009///}, pages = {811 - 823}, abstract = {Since cameras blur the incoming light during measurement, different images of the same surface do not contain the same information about that surface. Thus, in general, corresponding points in multiple views of a scene have different image intensities. While multiple-view geometry constrains the locations of corresponding points, it does not give relationships between the signals at corresponding locations. This paper offers an elementary treatment of these relationships. We first develop the notion of "ideal{\textquotedblright} and "real{\textquotedblright} images, corresponding to, respectively, the raw incoming light and the measured signal. This framework separates the filtering and geometric aspects of imaging. We then consider how to synthesize one view of a surface from another; if the transformation between the two views is affine, it emerges that this is possible if and only if the singular values of the affine matrix are positive. Next, we consider how to combine the information in several views of a surface into a single output image. By developing a new tool called "frequency segmentation,{\textquotedblright} we show how this can be done despite not knowing the blurring kernel.}, keywords = {reconstruction, restoration, sharpening and deblurring, smoothing.}, isbn = {0162-8828}, doi = {http://doi.ieeecomputersociety.org/10.1109/TPAMI.2008.133}, author = {Domke, Justin and Aloimonos, J.} } @article {13973, title = {Imaging room acoustics with the audio camera.}, journal = {The Journal of the Acoustical Society of America}, volume = {125}, year = {2009}, month = {2009///}, pages = {2544 - 2544}, abstract = {Using a spherical microphone array and real time signal processing using a graphical processing unit (GPU), an audio camera has been developed. This device provides images of the intensity of the sound field arriving at a point from a specified direction to the spherical array. Real-time performance is achieved via use of GPUs. The intensity can be displayed integrated over the whole frequency band of the array, or in false color, with different frequency bands mapped to different color bands. The resulting audio camera may be combined with video cameras to achieve multimodal scene capture and analysis. A theory of registration of audio camera images with video camera images is developed, and joint analysis of audio and video images performed. An interesting application of the audio camera is the imaging of concert hall acoustics. The individual reflections that constitute the impulse response measured at a particular seat may be imaged, and their spatial origin determined. Other applications of the audio camera to people tracking, noise suppression, and camera pointing are also presented. [Work partially supported by NVIDIA and the VA.]}, url = {http://link.aip.org/link/?JAS/125/2544/2}, author = {O{\textquoteright}donovan,Adam and Duraiswami, Ramani and Gumerov, Nail A. and Zotkin,Dmitry N} } @article {14941, title = {An Implementation of the Audit Control Environment (ACE) to Support the Long Term Integrity of Digital Archives}, journal = {Proceedings of DigCCurr2009 Digital Curation: Practice, Promise and Prospects}, year = {2009}, month = {2009///}, pages = {164 - 164}, abstract = {In this paper, we describe the implementation of the AuditControl Environment (ACE)[1] system that provides a scalable, auditable platform for ensuring the integrity of digital archival holdings. The core of ACE is a small integrity token issued for each monitored item, which is part of a larger, externally auditable cryptographic system. Two components that describe this system, an Audit Manager and Integrity Management Service, have been developed and released. The Audit Manager component is designed to be installed locally at the archive, while the Integrity Management Service is a centralized, publically available service. ACE allows for the monitoring of collections on a variety of disk and grid based storage systems. Each collection in ACE is subject to monitoring based on a customizable policy. The released ACE Version 1.0 has been tested extensively on a wide variety of collections in both centralized and distributed environments. }, author = {Smorul,M. and Song,S. and JaJa, Joseph F.} } @conference {19033, title = {Implicit authentication for mobile devices}, series = {HotSec{\textquoteright}09}, year = {2009}, month = {2009}, pages = {9 - 9}, publisher = {USENIX Association}, organization = {USENIX Association}, abstract = {We introduce the notion of implicit authentication - the ability to authenticate mobile users based on actions they would carry out anyway. We develop a model for how to perform implicit authentication, and describe experiments aimed at assessing the benefits of our techniques. Our preliminary findings support that this is a meaningful approach, whether used to increase usability or increase security.}, url = {http://dl.acm.org/citation.cfm?id=1855628.1855637}, author = {Jakobsson, Markus and Elaine Shi and Golle, Philippe and Chow, Richard} } @conference {14560, title = {Improved approximation algorithms for prize-collecting Steiner tree and TSP}, booktitle = {2009 50th Annual IEEE Symposium on Foundations of Computer Science}, year = {2009}, month = {2009///}, pages = {427 - 436}, author = {Archer,A. and Bateni,M. H and Hajiaghayi, Mohammad T. and Karloff,H.} } @inbook {19638, title = {Improved Non-committing Encryption with Applications to Adaptively Secure Protocols}, booktitle = {Advances in Cryptology {\textendash} ASIACRYPT 2009}, series = {Lecture Notes in Computer Science}, year = {2009}, month = {2009/01/01/}, pages = {287 - 302}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We present a new construction of non-committing encryption schemes. Unlike the previous constructions of Canetti et al. (STOC {\textquoteright}96) and of Damg{\r a}rd and Nielsen (Crypto {\textquoteright}00), our construction achieves all of the following properties: Optimal round complexity. Our encryption scheme is a 2-round protocol, matching the round complexity of Canetti et al. and improving upon that in Damg{\r a}rd and Nielsen. Weaker assumptions. Our construction is based on trapdoor simulatable cryptosystems, a new primitive that we introduce as a relaxation of those used in previous works. We also show how to realize this primitive based on hardness of factoring. Improved efficiency. The amortized complexity of encrypting a single bit is O(1) public key operations on a constant-sized plaintext in the underlying cryptosystem. As a result, we obtain the first non-committing public-key encryption schemes under hardness of factoring and worst-case lattice assumptions; previously, such schemes were only known under the CDH and RSA assumptions. Combined with existing work on secure multi-party computation, we obtain protocols for multi-party computation secure against a malicious adversary that may adaptively corrupt an arbitrary number of parties under weaker assumptions than were previously known. Specifically, we obtain the first adaptively secure multi-party protocols based on hardness of factoring in both the stand-alone setting and the UC setting with a common reference string.}, keywords = {adaptive corruption, Algorithm Analysis and Problem Complexity, Applications of Mathematics, Data Encryption, Data Structures, Cryptology and Information Theory, Discrete Mathematics in Computer Science, non-committing encryption, public-key encryption, secure multi-party computation, Systems and Data Security}, isbn = {978-3-642-10365-0, 978-3-642-10366-7}, url = {http://link.springer.com/chapter/10.1007/978-3-642-10366-7_17}, author = {Choi, Seung Geol and Dana Dachman-Soled and Malkin, Tal and Wee, Hoeteck}, editor = {Matsui, Mitsuru} } @conference {16679, title = {Improved statistical machine translation using monolingually-derived paraphrases}, booktitle = {Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing: Volume 1-Volume 1}, year = {2009}, month = {2009///}, pages = {381 - 390}, author = {Marton,Y. and Callison-Burch,C. and Resnik, Philip} } @article {15763, title = {Improvement and analysis of computational methods for prediction of residual dipolar couplings}, journal = {Journal of Magnetic Resonance}, volume = {201}, year = {2009}, month = {2009/11//}, pages = {25 - 33}, abstract = {We describe a new, computationally efficient method for computing the molecular alignment tensor based on the molecular shape. The increase in speed is achieved by re-expressing the problem as one of numerical integration, rather than a simple uniform sampling (as in the PALES method), and by using a convex hull rather than a detailed representation of the surface of a molecule. This method is applicable to bicelles, PEG/hexanol, and other alignment media that can be modeled by steric restrictions introduced by a planar barrier. This method is used to further explore and compare various representations of protein shape by an equivalent ellipsoid. We also examine the accuracy of the alignment tensor and residual dipolar couplings (RDC) prediction using various ab initio methods. We separately quantify the inaccuracy in RDC prediction caused by the inaccuracy in the orientation and in the magnitude of the alignment tensor, concluding that orientation accuracy is much more important in accurate prediction of RDCs.}, keywords = {Ab initio prediction, Alignment tensor, PALES, Residual dipolar coupling}, isbn = {1090-7807}, doi = {10.1016/j.jmr.2009.07.028}, url = {http://www.sciencedirect.com/science/article/pii/S1090780709002304}, author = {Berlin,Konstantin and O{\textquoteright}Leary,Dianne P. and Fushman, David} } @article {17224, title = {Improving graph drawing readability by incorporating readability metrics: A software tool for network analysts}, journal = {University of Maryland, HCIL Tech Report HCIL-2009-13}, year = {2009}, month = {2009///}, abstract = {Designing graph drawings that effectively communicate the under-lying network is challenging as for every network there are many potential unintelligible or even misleading drawings. Automated graph layout algorithms have helped, but frequently generate in- effective drawings. In order to build awareness of effective graph drawing strategies, we detail readability metrics on a [0,1] contin- uous scale for node occlusion, edge crossing, edge crossing angle, and edge tunneling and summarize many more. Additionally, we define new node \& edge readability metrics to provide more lo- calized identification of where improvement is needed. These are implemented in SocialAction, a tool for social network analysis, in order to direct users towards poor areas of the drawing and provide real-time readability metric feedback as users manipulate it. These contributions are aimed at heightening the awareness of network analysts that the images they share or publish could be of higher quality, so that readers could extract relevant information. }, author = {Dunne,C. and Shneiderman, Ben} } @article {17613, title = {Improving recommendation accuracy by clustering social networks with trust}, journal = {Recommender Systems \& the Social Web}, year = {2009}, month = {2009///}, pages = {1 - 8}, abstract = {Social trust relationships between users in social networksspeak to the similarity in opinions between the users, both in general and in important nuanced ways. They have been used in the past to make recommendations on the web. New trust metrics allow us to easily cluster users based on trust. In this paper, we investigate the use of trust clusters as a new way of improving recommendations. Previous work on the use of clusters has shown the technique to be relatively un- successful, but those clusters were based on similarity rather than trust. Our results show that when trust clusters are integrated into memory-based collaborative filtering algo- rithms, they lead to statistically significant improvements in accuracy. In this paper we discuss our methods, experi- ments, results, and potential future applications of the tech- nique. }, author = {DuBois,T. and Golbeck,J. and Kleint,J. and Srinivasan, Aravind} } @conference {16604, title = {Improving rule extraction from neural networks by modifying hidden layer representations}, booktitle = {Neural Networks, 2009. IJCNN 2009. International Joint Conference on}, year = {2009}, month = {2009///}, pages = {1316 - 1321}, author = {Huynh,T. Q and Reggia, James A.} } @conference {15863, title = {Improving search effectiveness in the legal e-discovery process using relevance feedback}, booktitle = {Proceedings of the global E-Discovery/E-Disclosure workshop on electronically stored information in discovery at the 12th international conference on artificial intelligence and law (ICAIL09 DESI Workshop). DESI Press, Barcelona}, year = {2009}, month = {2009///}, author = {Zhao,F.C. and Oard, Douglas and Baron,J.R.} } @article {15110, title = {Improving the round complexity of VSS in point-to-point networks}, journal = {Information and Computation}, volume = {207}, year = {2009}, month = {2009/08//}, pages = {889 - 899}, abstract = {We revisit the following question: what is the optimal round complexity of verifiable secret sharing (VSS)? We focus here on the case of perfect VSS where the number of corrupted parties t satisfies t \< n / 3 , with n the total number of parties. Work of Gennaro et al.\&$\#$xa0;(STOC 2001) and Fitzi et al. (TCC 2006) shows that, assuming a broadcast channel, three rounds are necessary and sufficient for efficient VSS. Existing protocols, however, treat the broadcast channel as being available {\textquotedblleft}for free{\textquotedblright} and do not attempt to minimize its usage. This approach leads to relatively poor round complexity when such protocols are compiled to run over a point-to-point network.We show here a VSS protocol that is simultaneously optimal in terms of both the number of rounds and the number of invocations of broadcast. Our protocol also satisfies a certain {\textquotedblleft}2-level sharing{\textquotedblright} property that makes it useful for constructing protocols for general secure computation. }, isbn = {0890-5401}, doi = {10.1016/j.ic.2009.03.007}, url = {http://www.sciencedirect.com/science/article/pii/S0890540109000935}, author = {Katz, Jonathan and Koo,Chiu-Yuen and Kumaresan,Ranjit} } @inbook {18850, title = {In Situ Characterization and Modeling of Strains near Embedded Electronic Components During Processing and Break-in for Multifunctional Polymer Structures}, booktitle = {Advances in Mathematical Modeling and Experimental Methods for Materials and Structures}, series = {Solid Mechanics and Its Applications}, volume = {168}, year = {2009}, month = {2009///}, pages = {145 - 159}, publisher = {Springer Netherlands}, organization = {Springer Netherlands}, abstract = {Emerging molding concepts, such as in-mold assembly, are enabling electronic structures to be directly embedded in thermoplastic polymers to provide integrated packaging for better protection and a more multifunctional structure in {\textquotedblleft}in-mold assembly processes{\textquotedblright}. During the molding process, stress can develop at the interface of the polymer and embedded electronic component due to shrinkage of the polymer that precipitates fracture or fatigue during the life cycle of the product. Additionally, the interaction between a mold and the polymer melt is altered in a multi-stage molding process where a polymer for superior impact protection can be molded over another polymer that is more compatible with the embedded electronic component. Currently, we do not fully understand the impact of various parameters governing the in-mold assembly process on the residual strains that develop in polymers around embedded electronic components in order to develop process models. Therefore, in this chapter experiments are presented that are designed and executed to measure the strains involved and the manner in which they develop. An in situ open mold experiment is employed using the full-field deformation technique of Digital Image Correlation (DIC) to characterize the displacement and corresponding strain fields that evolve near embedded electronic elements as the polymer shrinks from the molten to the solid state during processes and during break-in of the electronic component. It was determined that the use of multi-stage molding may reduce the residual stresses in addition to providing superior impact protection. However, there was a higher concentration of strain near the polymer-component interface during break-due to lower thermal conductivity. Experimental data was consistent with a thermomechanical model up until the point of failure.}, keywords = {engineering}, isbn = {978-90-481-3467-0}, url = {http://www.springerlink.com/content/lh52x2475g7x00k7/abstract/}, author = {Gershon,Alan L. and Gyger,Lawrence S. and Bruck,Hugh A. and Gupta, Satyandra K.}, editor = {Gilat,Rivka and Banks-Sills,Leslie and Gladwell,G. M. L.} } @conference {16317, title = {Incremental covering array failure characterization in large configuration spaces}, booktitle = {Proceedings of the eighteenth international symposium on Software testing and analysis}, year = {2009}, month = {2009///}, pages = {177 - 188}, author = {Fouch{\'e},S. and Cohen,M. B and Porter, Adam} } @conference {13101, title = {Incremental Multiple Kernel Learning for object recognition}, booktitle = {Computer Vision, 2009 IEEE 12th International Conference on}, year = {2009}, month = {2009/10/29/2}, pages = {638 - 645}, abstract = {A good training dataset, representative of the test images expected in a given application, is critical for ensuring good performance of a visual categorization system. Obtaining task specific datasets of visual categories is, however, far more tedious than obtaining a generic dataset of the same classes. We propose an Incremental Multiple Kernel Learning (IMKL) approach to object recognition that initializes on a generic training database and then tunes itself to the classification task at hand. Our system simultaneously updates the training dataset as well as the weights used to combine multiple information sources. We demonstrate our system on a vehicle classification problem in a video stream overlooking a traffic intersection. Our system updates itself with images of vehicles in poses more commonly observed in the scene, as well as with image patches of the background, leading to an increase in performance. A considerable change in the kernel combination weights is observed as the system gathers scene specific training data over time. The system is also seen to adapt itself to the illumination change in the scene as day transitions to night.}, doi = {10.1109/ICCV.2009.5459179}, author = {Kembhavi,Aniruddha and Siddiquie,Behjat and Miezianko,Roland and McCloskey,Scott and Davis, Larry S.} } @article {14478, title = {Index interactions in physical design tuning: modeling, analysis, and applications}, journal = {Proceedings of the VLDB Endowment}, volume = {2}, year = {2009}, month = {2009/08//}, pages = {1234 - 1245}, abstract = {One of the key tasks of a database administrator is to optimize the set of materialized indices with respect to the current workload. To aid administrators in this challenging task, commercial DBMSs provide advisors that recommend a set of indices based on a sample workload. It is left for the administrator to decide which of the recommended indices to materialize and when. This decision requires some knowledge of how the indices benefit the workload, which may be difficult to understand if there are any dependencies or interactions among indices. Unfortunately, advisors do not provide this crucial information as part of the recommendation. Motivated by this shortcoming, we propose a framework and associated tools that can help an administrator understand the interactions within the recommended set of indices. We formalize the notion of index interactions and develop a novel algorithm to identify the interaction relationships that exist within a set of indices. We present experimental results with a prototype implementation over IBM DB2 that demonstrate the efficiency of our approach. We also describe two new database tuning tools that utilize information about index interactions. The first tool visualizes interactions based on a partitioning of the index-set into non-interacting subsets, and the second tool computes a schedule that materializes the indices over several maintenance windows with maximal overall benefit. In both cases, we provide strong analytical results showing that index interactions can enable enhanced functionality.}, isbn = {2150-8097}, url = {http://dl.acm.org/citation.cfm?id=1687627.1687766}, author = {Schnaitter,Karl and Polyzotis,Neoklis and Getoor, Lise} } @conference {13375, title = {Indexing correlated probabilistic databases}, booktitle = {Proceedings of the 35th SIGMOD international conference on Management of data}, series = {SIGMOD {\textquoteright}09}, year = {2009}, month = {2009///}, pages = {455 - 468}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {With large amounts of correlated probabilistic data being generated in a wide range of application domains including sensor networks, information extraction, event detection etc., effectively managing and querying them has become an important research direction. While there is an exhaustive body of literature on querying independent probabilistic data, supporting efficient queries over large-scale, correlated databases remains a challenge. In this paper, we develop efficient data structures and indexes for supporting inference and decision support queries over such databases. Our proposed hierarchical data structure is suitable both for in-memory and disk-resident databases. We represent the correlations in the probabilistic database using a junction tree over the tuple-existence or attribute-value random variables, and use tree partitioning techniques to build an index structure over it. We show how to efficiently answer inference and aggregation queries using such an index, resulting in orders of magnitude performance benefits in most cases. In addition, we develop novel algorithms for efficiently keeping the index structure up-to-date as changes (inserts, updates) are made to the probabilistic database. We present a comprehensive experimental study illustrating the benefits of our approach to query processing in probabilistic databases.}, keywords = {caching, Indexing, inference queries, junction trees, Probabilistic databases}, isbn = {978-1-60558-551-2}, doi = {10.1145/1559845.1559894}, url = {http://doi.acm.org/10.1145/1559845.1559894}, author = {Kanagal,Bhargav and Deshpande, Amol} } @conference {16275, title = {Inexact Local Alignment Search over Suffix Arrays}, booktitle = {IEEE International Conference on Bioinformatics and Biomedicine, 2009. BIBM {\textquoteright}09}, year = {2009}, month = {2009/11/01/4}, pages = {83 - 87}, publisher = {IEEE}, organization = {IEEE}, abstract = {We describe an algorithm for finding approximate seeds for DNA homology searches. In contrast to previous algorithms that use exact or spaced seeds, our approximate seeds may contain insertions and deletions. We present a generalized heuristic for finding such seeds efficiently and prove that the heuristic does not affect sensitivity. We show how to adapt this algorithm to work over the memory efficient suffix array with provably minimal overhead in running time. We demonstrate the effectiveness of our algorithm on two tasks: whole genome alignment of bacteria and alignment of the DNA sequences of 177 genes that are orthologous in human and mouse. We show our algorithm achieves better sensitivity and uses less memory than other commonly used local alignment tools.}, keywords = {bacteria, Bioinformatics, biology computing, Computational Biology, Costs, DNA, DNA homology searches, DNA sequences, Educational institutions, generalized heuristic, genes, Genetics, genome alignment, Genomics, human, inexact local alignment search, inexact seeds, local alignment, local alignment tools, memory efficient suffix array, microorganisms, molecular biophysics, mouse, Organisms, Sensitivity and Specificity, sequences, suffix array, USA Councils}, isbn = {978-0-7695-3885-3}, doi = {10.1109/BIBM.2009.25}, author = {Ghodsi,M. and Pop, Mihai} } @conference {19110, title = {Inexact Local Alignment Search over Suffix Arrays}, year = {2009}, month = {2009}, pages = {83 - 87}, author = {Ghodsi,M. and Pop, Mihai} } @article {13040, title = {The infinite hierarchical factor regression model}, journal = {Arxiv preprint arXiv:0908.0570}, year = {2009}, month = {2009///}, author = {Rai,P. and Daum{\'e}, Hal} } @inbook {17233, title = {Information Visualization}, booktitle = {Human-Computer InteractionHuman-Computer Interaction}, volume = {20093960}, year = {2009}, month = {2009/03/02/}, pages = {181 - 215}, publisher = {CRC Press}, organization = {CRC Press}, isbn = {978-1-4200-8885-4, 978-1-4200-8886-1}, url = {http://www.crcnetbase.com/doi/abs/10.1201/9781420088861.ch10}, author = {Card,Stuart}, editor = {Sears,Andrew and Jacko,Julie} } @conference {15491, title = {An Initial Characterization of Industrial Graphical User Interface Systems}, booktitle = {Software Testing Verification and Validation, 2009. ICST {\textquoteright}09. International Conference on}, year = {2009}, month = {2009/04//}, pages = {11 - 20}, abstract = {To date we have developed and applied numerous model-based GUI testing techniques; however, we are unable to provide definitive improvement schemes to real-world GUI test planners, as our data was derived from open source applications, small compared to industrial systems. This paper presents a study of three industrial GUI-based software systems developed at ABB, including data on classified defects detected during late-phase testing and customer usage, test suites, and source code change metrics. The results show that (1) 50\% of the defects found through the GUI are categorized as data access and handling, control flow and sequencing, correctness, and processing defects, (2) system crashes exposed defects 12-19\% of the time, and (3) GUI and non-GUI components are constructed differently, in terms of source code metrics.}, keywords = {Graphical user interfaces, GUI-based software systems, industrial graphical user interface systems, model-based GUI testing techniques, program testing, software metrics, source code change metrics}, doi = {10.1109/ICST.2009.11}, author = {Brooks,P.A. and Robinson,B.P. and Memon, Atif M.} } @article {19460, title = {The Ins and Outs of Home Networking: The Case for Useful and Usable Domestic Networking}, journal = {ACM Trans. Comput.-Hum. Interact.}, volume = {16}, year = {2009}, month = {2009/06//}, pages = {8:1 - 8:28}, abstract = {Householders are increasingly adopting home networking as a solution to the demands created by the presence of multiple computers, devices, and the desire to access the Internet. However, current network solutions are derived from the world of work (and initially the military) and provide poor support for the needs of the home. We present the key findings to emerge from empirical studies of home networks in the UK and US. The studies reveal two key kinds of work that effective home networking relies upon: one, the technical work of setting up and maintaining the home network, and the other, the collaborative and socially organized work of the home which the network is embedded in and supports. The two are thoroughly intertwined and rely upon one another for their realization, yet neither is adequately supported by current networking technologies and applications. Explication of the {\textquotedblleft}work to make the home network work{\textquotedblright} opens up the design space for the continued integration of the home network in domestic life and elaboration of future support. Key issues for development include the development of networking facilities that do not require advanced networking knowledge, that are flexible and support the local social order of the home and the evolution of its routines, and which ultimately make the home network visible and accountable to household members.}, keywords = {home networking, Human computer interaction}, isbn = {1073-0516}, url = {http://doi.acm.org/10.1145/1534903.1534905}, author = {Grinter, Rebecca E. and Edwards, W. Keith and Marshini Chetty and Poole, Erika S. and Sung, Ja-Young and Yang, Jeonghwa and Crabtree, Andy and Tolmie, Peter and Rodden, Tom and Greenhalgh, Chris and Benford, Steve} } @article {18751, title = {Integrated product and process design for a flapping wing drive mechanism}, journal = {Journal of Mechanical Design}, volume = {131}, year = {2009}, month = {2009///}, pages = {061006 - 061006}, author = {Bejgerowski,W. and Ananthanarayanan,A. and Mueller,D. and Gupta,S.K.} } @article {17238, title = {Integrating Statistics and Visualization for Exploratory Power: From Long-Term Case Studies to Design Guidelines}, journal = {IEEE Computer Graphics and Applications}, volume = {29}, year = {2009}, month = {2009/06//May}, pages = {39 - 51}, abstract = {Evaluating visual-analytics systems is challenging because laboratory-based controlled experiments might not effectively represent analytical tasks. One such system, Social Action, integrates statistics and visualization in an interactive exploratory tool for social network analysis. This article describes results from long-term case studies with domain experts and extends established design goals for information visualization.}, keywords = {case studies, Control systems, Data analysis, data mining, data visualisation, Data visualization, data-mining, design guidelines, Employment, exploration, Filters, Guidelines, Information Visualization, insights, laboratory-based controlled experiments, Performance analysis, social network analysis, Social network services, social networking (online), social networks, SocialAction, statistical analysis, Statistics, visual analytics, visual-analytics systems, Visualization}, isbn = {0272-1716}, doi = {10.1109/MCG.2009.44}, author = {Perer,A. and Shneiderman, Ben} } @mastersthesis {18346, title = {Interacting with computers using images for search and automation}, year = {2009}, month = {2009///}, school = {Massachusetts Institute of Technology}, address = {Cambridge, MA, USA}, abstract = {A picture is worth a thousand words. Images have been used extensively by us to interact with other human beings to solve certain problems, for example, showing an image of a hind to a bird expert to identify its species or giving an image of a cosmetic product to a husband to help purchase the right product. However, images have been rarely used to support similar interactions with computers.In this thesis, I present a series of useful applications for users to interact with computers using images and develop several computer vision algorithms; necessary to support such interaction. On the application side, I examine two functional roles of images in human-computer interactions: search and automation. For search, I develop systems for users to obtain useful information about a location or a consumer product by taking its picture using a camera phone, to search online documentation about a GUI by taking its screenshot, and to ask general questions using pictures in a community-based QA service. For automation, I design a visual scripting system to allow end-users insert screenshots of GUI elements directly into program statements. On the computer vision side, I describe the Adaptive Vocabulary Tree algorithm for indexing and searching a large and dynamic collection of images, the Dynamic Visual Category Learning algorithm for training and updating a set of dynamically changing object categories, the Vocabulary Tree SVM algorithm for fast object recognition by approximating the margins of a set of SVM classifiers efficiently, and the Multiclass Brand-and-Bound Window Search algorithm for simultaneously estimating the optimal location and label of an object in a large input image. Finally, I demonstrate the usability of each proposed application with user studies and the technical performance of each algorithm with series of experiments with large datasets. }, author = {Tom Yeh} } @conference {17242, title = {Interacting with eHealth: towards grand challenges for HCI}, booktitle = {Proceedings of the 27th international conference extended abstracts on Human factors in computing systems}, year = {2009}, month = {2009///}, pages = {3309 - 3312}, abstract = {While health records are increasingly stored electronically, we, as citizens, have little access to this data about ourselves. We are not used to thinking of these official records either as ours or as useful to us. We increasingly turn to the Web, however, to query any ache, pain or health goal we may have before consulting with health care professionals. Likewise, for proactive health care such as nutrition or fitness, or to find fellow-sufferers for post diagnosis support, we turn to online resources. There is a potential disconnect between points at which professional and lay eHealth data and resources intersect for preventative or proactive health care. Such gaps in information sharing may have direct impact on practices we decide to take up, the care we seek, or the support professionals offer. In this panel, we consider several places within proactive, preventative health care in particular HCI has a role towards enhancing health knowledge discovery and health support interaction. Our goal is to demonstrate how now is the time for eHealth to come to the forefront of the HCI research agenda.}, author = {Andr{\'e},P. and White,R. and Tan,D. and Berners-Lee,T. and Consolvo,S. and Jacobs,R. and Kohane,I. and Le Dantec,C.A. and Mamykina,L. and Marsden,G.} } @article {14976, title = {Interactive direct volume rendering on desktop multicore processors}, journal = {Concurrency and Computation: Practice and Experience}, volume = {21}, year = {2009}, month = {2009/09/10/}, pages = {2199 - 2212}, abstract = {We present a new multithreaded implementation for the computationally demanding direct volume rendering (DVR) of volumetric data sets on desktop multicore processors using ray casting. The new implementation achieves interactive rendering of very large volumes, even on high resolution screens. Our implementation is based on a new algorithm that combines an object-order traversal of the volumetric data followed by a focused ray casting. Using a very compact data structure, our method starts with a quick association of data subcubes with fine-grain screen tiles appearing along the viewing direction in front-to-back order. The next stage uses very limited ray casting on the generated sets of subcubes while skipping empty or transparent space and applying early ray termination in an effective way. Our multithreaded implementation makes use of new dynamic techniques to ensure effective memory management and load balancing. Our software enables a user to interactively explore large data sets through DVR while arbitrarily specifying a 2D transfer function. We test our system on a wide variety of well-known volumetric data sets on a two-processor Clovertown platform, each consisting of a Quad-Core 1.86 GHz Intel Xeon Processor. Our experimental tests demonstrate DVR at interactive rates for the largest data sets that can fit in the main memory on our platform. These tests also indicate a high degree of scalability, excellent load balancing, and efficient memory management across the data sets used. Copyright {\textcopyright} 2009 John Wiley \& Sons, Ltd.}, keywords = {direct volume rendering, multicore processors, multithreaded algorithms, Parallel algorithms, volume visualization}, isbn = {1532-0634}, doi = {10.1002/cpe.1485}, url = {http://onlinelibrary.wiley.com/doi/10.1002/cpe.1485/abstract?userIsAuthenticated=false\&deniedAccessCustomisedMessage=}, author = {Wang,Qin and JaJa, Joseph F.} } @inbook {13797, title = {Interlingual annotation of multilingual text corpora and FrameNet}, booktitle = {Multilingual FrameNets in Computational LexicographyMultilingual FrameNets in Computational Lexicography}, volume = {200}, year = {2009}, month = {2009/07/14/}, pages = {287 - 318}, publisher = {Mouton de Gruyter}, organization = {Mouton de Gruyter}, address = {Berlin, New York}, isbn = {978-3-11-021296-9, 978-3-11-021297-6}, url = {http://www.degruyter.com/view/books/9783110212976/9783110212976.4.287/9783110212976.4.287.xml}, author = {Farwell,David and Dorr, Bonnie J and Habash,Nizar and Helmreich,Stephen and Hovy,Eduard and Green,Rebecca and Levin,Lori and Miller,Keith and Mitamura,Teruko and Rambow,Owen and Reeder,Flo and Siddharthan,Advaith}, editor = {Bisang,Walter and Hock,Hans Henrich and Winter,Werner and Boas,Hans C.} } @article {18230, title = {Intrinsic Sensor Noise Features for Forensic Analysis on Scanners and Scanned Images}, journal = {Information Forensics and Security, IEEE Transactions on}, volume = {4}, year = {2009}, month = {2009/09//}, pages = {476 - 491}, abstract = {A large portion of digital images available today are acquired using digital cameras or scanners. While cameras provide digital reproduction of natural scenes, scanners are often used to capture hard-copy art in a more controlled environment. In this paper, new techniques for nonintrusive scanner forensics that utilize intrinsic sensor noise features are proposed to verify the source and integrity of digital scanned images. Scanning noise is analyzed from several aspects using only scanned image samples, including through image denoising, wavelet analysis, and neighborhood prediction, and then obtain statistical features from each characterization. Based on the proposed statistical features of scanning noise, a robust scanner identifier is constructed to determine the model/brand of the scanner used to capture a scanned image. Utilizing these noise features, we extend the scope of acquisition forensics to differentiating scanned images from camera-taken photographs and computer-generated graphics. The proposed noise features also enable tampering forensics to detect postprocessing operations on scanned images. Experimental results are presented to demonstrate the effectiveness of employing the proposed noise features for performing various forensic analysis on scanners and scanned images.}, keywords = {analysis;image, camera-taken, cameras;digital, denoising;intrinsic, denoising;wavelet, features;natural, forensics;scanned, graphics;digital, identifier;wavelet, images;digital, images;scanner, NOISE, photographs;computer-generated, prediction;nonintrusive, reproduction;forensic, scanner, scenes;neighborhood, sensor, transforms;}, isbn = {1556-6013}, doi = {10.1109/TIFS.2009.2026458}, author = {Gou,Hongmei and Swaminathan,A. and M. Wu} } @conference {15485, title = {Introducing a test suite similarity metric for event sequence-based test cases}, booktitle = {Software Maintenance, 2009. ICSM 2009. IEEE International Conference on}, year = {2009}, month = {2009/09//}, pages = {243 - 252}, abstract = {Most of today{\textquoteright}s event driven software (EDS) systems are tested using test cases that are carefully constructed as sequences of events; they test the execution of an event in the context of its preceding events. Because sizes of these test suites can be extremely large, researchers have developed techniques, such as reduction and minimization, to obtain test suites that are ldquosimilarrdquo to the original test suite, but smaller. Existing similarity metrics mostly use code coverage; they do not consider the contextual relationships between events. Consequently, reduction based on such metrics may eliminate desirable test cases. In this paper, we present a new parameterized metric, CONTeSSi(n) which uses the context of n preceding events in test cases to develop a new context-aware notion of test suite similarity for EDS. This metric is defined and evaluated by comparing four test suites for each of four open source applications. Our results show that CONT eSSi(n) is a better indicator of the similarity of EDS test suites than existing metrics.}, keywords = {event driven software systems, event sequence-based test cases, open source systems, program testing, public domain software, software metrics, Software testing, test suite similarity metric}, doi = {10.1109/ICSM.2009.5306305}, author = {Brooks,P.A. and Memon, Atif M.} } @conference {15913, title = {Inverse halftoning using a shearlet representation}, booktitle = {Proceedings of SPIE}, volume = {7446}, year = {2009}, month = {2009///}, pages = {74460C - 74460C}, author = {Easley,G. R and Patel, Vishal M. and Healy Jr,D.M.} } @conference {19455, title = {It{\textquoteright}s Not Easy Being Green: Understanding Home Computer Power Management}, booktitle = {SIGCHI {\textquoteright}09}, series = {CHI {\textquoteright}09}, year = {2009}, month = {2009///}, pages = {1033 - 1042}, publisher = {ACM}, organization = {ACM}, abstract = {Although domestic computer use is increasing, most efforts to reduce energy use through improved power management have focused on computers in the workplace. We studied 20 households to understand how people use power management strategies on their home computers. We saw computers in the home, particularly desktop computers, are left on much more than they are actively used suggesting opportunities for economic and energy savings. However, for most of our participants, the economic incentives were too minor to motivate them to turn off devices when not in use, especially given other frustrations such as long boot up times. We suggest research directions for home computer power management that could help users be more green without having to dramatically change their home computing habits.}, keywords = {home computer use, power management, Sustainability}, isbn = {978-1-60558-246-7}, url = {http://doi.acm.org/10.1145/1518701.1518860}, author = {Marshini Chetty and Brush, A.J. Bernheim and Meyers, Brian R. and Johns, Paul} } @article {13297, title = {Identification of Form Features in Non-Manifold Shapes Through a Decomposition Approach}, journal = {ASME Conference Proceedings}, volume = {2008}, year = {2008}, month = {2008///}, pages = {293 - 300}, abstract = {In Computer-Aided Design (CAD), the idealization process reduces the complexity of the model of a solid object, thus resulting in a simplified representation which captures only the essential elements of its shape. Form features extraction is a relevant issue in order to recover semantic information from an idealized object model, since such information is typically lost during the idealization process. An idealized model is usually composed of non-manifold parts, whose topology carries significant structural information about the object shape. To this aim, we define form features for non-manifold object by extending the taxonomy of form features provided by STEP [19]. We describe an approach for the identification of features, which interact with non-manifold singularities in the object, based on a decomposition of a non-manifold object into nearly manifold components and on the properties of the graph representing such decomposition.}, doi = {10.1115/ESDA2008-59566}, url = {http://link.aip.org/link/abstract/ASMECP/v2008/i48371/p293/s1}, author = {De Floriani, Leila and Hui,Annie and Giannini,Franca} } @article {19195, title = {ilearn on the iphone: Real-time human activity classification on commodity mobile phones}, journal = {University of Washington CSE Tech Report UW-CSE-08-04-02}, year = {2008}, month = {2008}, author = {Saponas,T. and Lester,J. and Jon Froehlich and Fogarty,J. and Landay,J.} } @conference {18224, title = {Image acquisition forensics: Forensic analysis to identify imaging source}, booktitle = {Acoustics, Speech and Signal Processing, 2008. ICASSP 2008. IEEE International Conference on}, year = {2008}, month = {2008/04/31/4}, pages = {1657 - 1660}, abstract = {With widespread availability of digital images and easy-to-use image editing softwares, the origin and integrity of digital images has become a serious concern. This paper introduces the problem of image acquisition forensics and proposes a fusion of a set of signal processing features to identify the source of digital images. Our results show that the devices{\textquoteright} color interpolation coefficients and noise statistics can jointly serve as good forensic features to help accurately trace the origin of the input image to its production process and to differentiate between images produced by cameras, cell phone cameras, scanners, and computer graphics. Further, the proposed features can also be extended to determining the brand and model of the device. Thus, the techniques introduced in this work provide a unified framework for image acquisition forensics.}, keywords = {ACQUISITION, acquisition;image, analysis;, analysis;image, analysis;interpolation;statistical, cameras;color, cell, coefficients;computer, colour, editing, forensics;image, graphics;digital, identification;noise, images;forensic, Interpolation, phone, processing;data, softwares;imaging, source, statistics;scanners;signal}, doi = {10.1109/ICASSP.2008.4517945}, author = {McKay,C. and Swaminathan,A. and Gou,Hongmei and M. Wu} } @conference {18477, title = {Imaging concert hall acoustics using visual and audio cameras}, booktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing, 2008. ICASSP 2008}, year = {2008}, month = {2008///}, pages = {5284 - 5287}, publisher = {IEEE}, organization = {IEEE}, abstract = {Using a developed real time audio camera, that uses the output of a spherical microphone array beamformer steered in all directions to create central projection to create acoustic intensity images, we present a technique to measure the acoustics of rooms and halls. A panoramic mosaiced visual image of the space is also create. Since both the visual and the audio camera images are central projection, registration of the acquired audio and video images can be performed using standard computer vision techniques. We describe the technique, and apply it to the examine the relation between acoustical features and architectural details of the Dekelbaum concert hall at the Clarice Smith Performing Arts Center in College Park, MD.}, keywords = {Acoustic imaging, acoustic intensity images, acoustic measurement, Acoustic measurements, Acoustic scattering, acoustic signal processing, acoustical camera, acoustical scene analysis, acquired audio registration, audio cameras, audio signal processing, CAMERAS, central projection, Computer vision, Educational institutions, HUMANS, image registration, Image segmentation, imaging concert hall acoustics, Layout, microphone arrays, panoramic mosaiced visual image, Raman scattering, reverberation, room acoustics, spherical microphone array beamformer, spherical microphone arrays, video image registration, visual cameras}, isbn = {978-1-4244-1483-3}, doi = {10.1109/ICASSP.2008.4518852}, author = {O{\textquoteright}Donovan,A. and Duraiswami, Ramani and Zotkin,Dmitry N} } @article {17995, title = {An Immediate Concurrent Execution (ICE) Abstraction Proposal for Many-Cores}, journal = {Computer Science Research Works}, year = {2008}, month = {2008/12//}, abstract = {Settling on a simple abstraction that programmers aim at, and hardware and software systems people enable and support, is an important step towards convergence to a robust many-core platform.The current paper: (i) advocates incorporating a quest for the simplest possible abstraction in the debate on the future of many-core computers, (ii) suggests {\textquotedblleft}immediate concurrent execution (ICE){\textquotedblright} as a new abstraction, and (iii) argues that an XMT architecture is one possible demonstration of ICE providing an easy-to-program general-purpose many-core platform. }, keywords = {abstraction, many-cores, parallelism, XMT architecture}, url = {http://drum.lib.umd.edu/handle/1903/8694}, author = {Vishkin, Uzi} } @conference {15694, title = {Immunity-Based Epidemic Routing in Intermittent Networks}, booktitle = {5th Annual IEEE Communications Society Conference on Sensor, Mesh and Ad Hoc Communications and Networks, 2008. SECON {\textquoteright}08}, year = {2008}, month = {2008/06/16/20}, pages = {609 - 611}, publisher = {IEEE}, organization = {IEEE}, abstract = {In this research, we propose to modify and extend epidemic routing used in intermittent networks. In particular, we propose to include immunity-based information disseminated in the reverse once messages get delivered to their destination. The goal is to design a more efficient routing protocol in terms of resource utilization. The idea is to analyze and evaluate the network performance using an immunity scheme in the context of epidemic routing and its variants. The reverse dissemination of such information requires minimal resources and the tradeoff in timely purging of delivered messages can be significant. We are using ns2 to implement a detailed simulation of the proposed immunity-based epidemic routing.}, keywords = {Analytical models, Delay, delivered messages, Disruption tolerant networking, Educational institutions, immunity-based epidemic routing, information dissemination, intermittent networks, Mobile ad hoc networks, Network topology, Performance analysis, Resource management, resource utilization, routing protocol, routing protocols, telecommunication network topology}, isbn = {978-1-4244-1777-3}, doi = {10.1109/SAHCN.2008.86}, author = {Mundur, Padma and Seligman,M. and Jin Na Lee} } @article {15761, title = {Implementing an Interior Point Method for Linear Programs on a CPU-GPU System}, journal = {Electronic Transactions on Numerical Analysis}, volume = {28}, year = {2008}, month = {2008///}, pages = {174 - 189}, url = {http://etna.mcs.kent.edu/vol.28.2007-2008/pp174-189.dir/pp174-189.pdfhttp://etna.mcs.kent.edu/vol.28.2007-2008/pp174-189.dir/pp174-189.pdf}, author = {Jung,Jin Hyuk and O{\textquoteright}Leary, Dianne P.} } @inbook {14700, title = {Implicit Flows: Can{\textquoteright}t Live with {\textquoteleft}Em, Can{\textquoteright}t Live without {\textquoteleft}Em}, booktitle = {Information Systems SecurityInformation Systems Security}, series = {Lecture Notes in Computer Science}, volume = {5352}, year = {2008}, month = {2008///}, pages = {56 - 70}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Verifying that programs trusted to enforce security actually do so is a practical concern for programmers and administrators. However, there is a disconnect between the kinds of tools that have been successfully applied to real software systems (such as taint mode in Perl and Ruby), and information-flow compilers that enforce a variant of the stronger security property of noninterference. Tools that have been successfully used to find security violations have focused on explicit flows of information, where high-security information is directly leaked to output. Analysis tools that enforce noninterference also prevent implicit flows of information, where high-security information can be inferred from a program{\textquoteright}s flow of control. However, these tools have seen little use in practice, despite the stronger guarantees that they provide. To better understand why, this paper experimentally investigates the explicit and implicit flows identified by the standard algorithm for establishing noninterference. When applied to implementations of authentication and cryptographic functions, the standard algorithm discovers many real implicit flows of information, but also reports an extremely high number of false alarms, most of which are due to conservative handling of unchecked exceptions (e.g., null pointer exceptions). After a careful analysis of all sources of true and false alarms, due to both implicit and explicit flows, the paper concludes with some ideas to improve the false alarm rate, toward making stronger security analysis more practical.}, isbn = {978-3-540-89861-0}, url = {http://dx.doi.org/10.1007/978-3-540-89862-7_4}, author = {King,Dave and Hicks,Boniface and Hicks, Michael W. and Jaeger,Trent}, editor = {Sekar,R. and Pujari,Arun} } @conference {17601, title = {Improved algorithmic versions of the Lov{\'a}sz Local Lemma}, booktitle = {Proceedings of the nineteenth annual ACM-SIAM symposium on Discrete algorithms}, series = {SODA {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {611 - 620}, publisher = {Society for Industrial and Applied Mathematics}, organization = {Society for Industrial and Applied Mathematics}, address = {Philadelphia, PA, USA}, abstract = {The Lov{\'a}sz Local Lemma is a powerful tool in combinatorics and computer science. The original version of the lemma was nonconstructive, and efficient algorithmic versions have been developed by Beck, Alon, Molloy \& Reed, et al. In particular, the work of Molloy \& Reed lets us automatically extract efficient versions of essentially any application of the symmetric version of the Local Lemma. However, with some exceptions, there is a significant gap between what one can prove using the original Lemma nonconstructively, and what is possible through these efficient versions; also, some of these algorithmic versions run in super-polynomial time. Here, we lessen this gap, and improve the running time of all these applications (which cover all applications in the Molloy \& Reed framework) to polynomial. We also improve upon the parallel algorithmic version of the Local Lemma for hypergraph coloring due to Alon, by allowing noticeably more overlap among the edges.}, url = {http://dl.acm.org/citation.cfm?id=1347082.1347150}, author = {Srinivasan, Aravind} } @article {15550, title = {Improved approximation bounds for planar point pattern matching}, journal = {Algorithmica}, volume = {50}, year = {2008}, month = {2008///}, pages = {175 - 207}, abstract = {We analyze the performance of simple algorithms for matching two planar point sets under rigid transformations so as to minimize the directed Hausdorff distance between the sets. This is a well studied problem in computational geometry. Goodrich, Mitchell, and Orletsky presented a very simple approximation algorithm for this problem, which computes transformations based on aligning pairs of points. They showed that their algorithm achieves an approximation ratio of 4. We introduce a modification to their algorithm, which is based on aligning midpoints rather than endpoints. This modification has the same simplicity and running time as theirs, and we show that it achieves a better approximation ratio of roughly 3.14. We also analyze the approximation ratio in terms of a instance-specific parameter that is based on the ratio between diameter of the pattern set to the optimum Hausdorff distance. We show that as this ratio increases (as is common in practical applications) the approximation ratio approaches 3 in the limit. We also investigate the performance of the algorithm by Goodrich et al. as a function of this ratio, and present nearly matching lower bounds on the approximation ratios of both algorithms.}, doi = {10.1007/s00453-007-9059-9}, author = {Cho,M. and Mount, Dave} } @conference {15064, title = {An improved mean shift tracking method based on nonparametric clustering and adaptive bandwidth}, booktitle = {Machine Learning and Cybernetics, 2008 International Conference on}, volume = {5}, year = {2008}, month = {2008/07//}, pages = {2779 - 2784}, abstract = {An improved mean shift method for object tracking based on nonparametric clustering and adaptive bandwidth is presented in this paper. Based on partitioning the color space of a tracked object by using a modified nonparametric clustering, an appearance model of the tracked object is built. It captures both the color information and spatial layout of the tracked object. The similarity measure between the target model and the target candidate is derived from the Bhattacharyya coefficient. The kernel bandwidth parameters are automatically selected by maximizing the lower bound of a log-likelihood function, which is derived from a kernel density estimate using the bandwidth matrix and the modified weight function. The experimental results show that the method can converge in an average of 2.6 iterations per frame.}, keywords = {adaptive bandwidth, appearance model, bandwidth matrix, Bhattacharyya coefficient, color information, color space partitioning, image colour analysis, iterative procedure, kernel bandwidth parameter, kernel density estimate, log-likelihood function, mean shift tracking method, modified weight function, nonparametric clustering, Object detection, object representation, object tracking, pattern clustering, similarity measure, spatial layout, target candidate, target model, tracking}, doi = {10.1109/ICMLC.2008.4620880}, author = {Zhuolin Jiang and Li,Shao-Fa and Jia,Xi-Ping and Zhu,Hong-Li} } @article {15100, title = {Improving the round complexity of vss in point-to-point networks}, journal = {Automata, Languages and Programming}, year = {2008}, month = {2008///}, pages = {499 - 510}, abstract = {We revisit the following question: what is the optimal round complexity of verifiable secret sharing (VSS)? We focus here on the case of perfectly-secure VSS where the number of corrupted parties t satisfies t < n/3, with n being the total number of parties. Work of Gennaro et al. (STOC 2001) and Fitzi et al. (TCC 2006) shows that, assuming a broadcast channel, 3 rounds are necessary and sufficient for efficient VSS. The efficient 3-round protocol of Fitzi et al., however, treats the broadcast channel as being available {\textquotedblleft}for free{\textquotedblright} and does not attempt to minimize its usage. This approach leads to relatively poor round complexity when protocols are compiled for a point-to-point network.We show here a VSS protocol that is simultaneously optimal in terms of both the number of rounds and the number of invocations of broadcast. Our protocol also has a certain {\textquotedblleft}2-level sharing{\textquotedblright} property that makes it useful for constructing protocols for general secure computation. }, doi = {10.1007/978-3-540-70583-3_41}, author = {Katz, Jonathan and Koo,C. Y and Kumaresan,R.} } @conference {17766, title = {Inconsistency management policies}, booktitle = {Proc. 2008 Intl. Conference on Knowledge Representation and Reasoning (KR 2008)}, year = {2008}, month = {2008///}, abstract = {Though there is much work on how inconsistency indatabases should be managed, there is good reason to believe that end users will want to bring their domain expertise and needs to bear in how to deal with inconsistencies. In this paper, we propose the concept of inconsistency management policies (IMPs). We show that IMPs are rich enough to spec- ify many types of inconsistency management methods pro- posed previously, but provide end users with tools that allow them to use the policies that they want. Our policies are also capable of allowing inconsistency to persist in the database or of eliminating more than a minimal subset of tuples involved in the inconsistency. We present a formal axiomatic definition of IMPs and present appropriate complexity results, together with results linking different IMPs together. We extend the relational algebra (RA) to incorporate IMPs and present the- oretical results showing how IMPs and classical RA operators interact. }, author = {Martinez,M. V and Parisi,F. and Pugliese, A. and Simari,G. I and V.S. Subrahmanian} } @conference {16843, title = {Indexing planar point quartets via geometric attributes}, booktitle = {Proceedings of the 16th ACM SIGSPATIAL international conference on Advances in geographic information systems}, series = {GIS {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {71:1{\textendash}71:4 - 71:1{\textendash}71:4}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {An index is devised to support position-independent search for images containing quartets of icons. Point quartets exist that do not unambiguously map to quadrilaterals, however, four points do unambiguously determine a set of six interpoint line segments. Values for the "size", "shape", and "orientation" attributes of an icon quartet can be derived as functions of this interpoint line set, and can be used to construct a point-based index, in which each point quartet maps to a single point in the resulting hyperdimensional index space. Orientation can be represented by a single, spatially closed dimension. However, assignment of a reference direction for quartets possessing a k-fold rotational symmetry presents a significant challenge. Methods are described for determining shape and orientation attributes for point quartets, and for mapping these attributes onto a set of attribute axes to form a combined index. The orientation computation supplies, as a byproduct, one component of the shape attribute. All attributes are continuous with respect to small variations in the indexed point quartets.}, isbn = {978-1-60558-323-5}, doi = {10.1145/1463434.1463516}, url = {http://doi.acm.org/10.1145/1463434.1463516}, author = {Cranston,Charles B. and Samet, Hanan} } @article {12829, title = {An instrumentation-based approach to controller model validation}, journal = {Model-Driven Development of Reliable Automotive Services}, year = {2008}, month = {2008///}, pages = {84 - 97}, author = {Cleaveland, Rance and Smolka,S. and Sims,S.} } @conference {12240, title = {Integrating categorical resource types into a P2P desktop grid system}, booktitle = {Grid Computing, 2008 9th IEEE/ACM International Conference on}, year = {2008}, month = {2008/10/29/1}, pages = {284 - 291}, abstract = {We describe and evaluate a set of protocols that implement a distributed, decentralized desktop grid. Incoming jobs are matched with system nodes through proximity in an N-dimensional resource space. This work improves on prior work by (1) efficiently accommodating node and job characterizations that include both continuous and categorical resource types, and (2) scaling gracefully to large system sizes even with highly non-uniform distributions of job and node types. We use extensive simulation results to show that the resulting system handles both continuous and categorical constraints efficiently, and that the new scalability techniques are effective.}, keywords = {computing;peer-to-peer, computing;protocols;, computing;protocols;scalability, desktop, Grid, grid;distributed, grid;peer-to-peer, N-dimensional, resource, space;P2P, system;categorical, technique;grid, types;decentralized}, doi = {10.1109/GRID.2008.4662810}, author = {Kim,Jik-Soo and Nam,Beomseok and Marsh,M. and Keleher,P. and Bhattacharjee, Bobby and Sussman, Alan} } @conference {17239, title = {Integrating statistics and visualization: case studies of gaining clarity during exploratory data analysis}, booktitle = {Proceedings of the twenty-sixth annual SIGCHI conference on Human factors in computing systems}, series = {CHI {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {265 - 274}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Although both statistical methods and visualizations have been used by network analysts, exploratory data analysis remains a challenge. We propose that a tight integration of these technologies in an interactive exploratory tool could dramatically speed insight development. To test the power of this integrated approach, we created a novel social network analysis tool, SocialAction, and conducted four long-term case studies with domain experts, each working on unique data sets with unique problems. The structured replicated case studies show that the integrated approach in SocialAction led to significant discoveries by a political analyst, a bibliometrician, a healthcare consultant, and a counter-terrorism researcher. Our contributions demonstrate that the tight integration of statistics and visualizations improves exploratory data analysis, and that our evaluation methodology for long-term case studies captures the research strategies of data analysts.}, keywords = {case studies, Evaluation, exploratory data analysis, Information Visualization, social networks, Statistics}, isbn = {978-1-60558-011-1}, doi = {10.1145/1357054.1357101}, url = {http://doi.acm.org/10.1145/1357054.1357101}, author = {Perer,Adam and Shneiderman, Ben} } @article {16068, title = {Interactive auditory data exploration: A framework and evaluation with geo-referenced data sonification}, journal = {ACM Transactions on Computer-Human Interaction}, year = {2008}, month = {2008///}, abstract = {We describe an Action-by-Design-Component (ADC) framework to guide auditory interface designers for exploratory data analysis. The framework characterizes data interaction in the auditory mode as a set of Auditory Information Seeking Actions (AISAs). Contrasting AISAs with actions in visualizations, the framework also discusses design considerations for a set of Design Components to support AISAs. Applying the framework to geo-referenced data, we systematically explored and evaluated its design space. A data exploration tool, iSonic, was built for blind users. In depth case studies with 7 blind users, with over 42 hours of data collection, showed that iSonic enabled them to find facts and discover trends of geo-referenced data, even in unfamiliar geographical contexts, without special devices. The results also showed that blind users dramatically benefited from the rich set of task-oriented actions (AISAs) and the use of multiple highly coordinated data views provided by the ADC framework. Some widely used techniques in visualization, with appropriate adaptation, also work in the auditory mode. The application of the framework to scatterplots shows that the framework can be generalized and lead to the design of a unified auditory workspace for general exploratory data analysis. Readers can view a supplementary video demonstration of iSonic by visiting www.cs.umd.edu/hcil/iSonic/}, author = {Zhao,H. and Plaisant, Catherine and Shneiderman, Ben and Lazar,J.} } @article {17250, title = {Interactive Entity Resolution in Relational Data: A Visual Analytic Tool and Its Evaluation}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {14}, year = {2008}, month = {2008/10//Sept}, pages = {999 - 1014}, abstract = {Databases often contain uncertain and imprecise references to real-world entities. Entity resolution, the process of reconciling multiple references to underlying real-world entities, is an important data cleaning process required before accurate visualization or analysis of the data is possible. In many cases, in addition to noisy data describing entities, there is data describing the relationships among the entities. This relational data is important during the entity resolution process; it is useful both for the algorithms which determine likely database references to be resolved and for visual analytic tools which support the entity resolution process. In this paper, we introduce a novel user interface, D-Dupe, for interactive entity resolution in relational data. D-Dupe effectively combines relational entity resolution algorithms with a novel network visualization that enables users to make use of an entity{\textquoteright}s relational context for making resolution decisions. Since resolution decisions often are interdependent, D-Dupe facilitates understanding this complex process through animations which highlight combined inferences and a history mechanism which allows users to inspect chains of resolution decisions. An empirical study with 12 users confirmed the benefits of the relational context visualization on the performance of entity resolution tasks in relational data in terms of time as well as users{\textquoteright} confidence and satisfaction.}, keywords = {algorithms, Computer Graphics, D-Dupe, data visualisation, database management systems, Databases, Factual, graphical user interface, Graphical user interfaces, human-centered computing, Image Interpretation, Computer-Assisted, Information Storage and Retrieval, Information Visualization, interactive entity resolution, relational context visualization, Relational databases, relational entity resolution algorithm, User interfaces, user-centered design, User-Computer Interface, visual analytic tool}, isbn = {1077-2626}, doi = {10.1109/TVCG.2008.55}, author = {Kang,Hyunmo and Getoor, Lise and Shneiderman, Ben and Bilgic,M. and Licamele,L.} } @article {14977, title = {Interactive High-Resolution Isosurface Ray Casting on Multicore Processors}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {14}, year = {2008}, month = {2008/06//may}, pages = {603 - 614}, abstract = {We present a new method for the interactive rendering of isosurfaces using ray casting on multicore processors. This method consists of a combination of an object-order traversal that coarsely identifies possible candidate three-dimensional (3D) data blocks for each small set of contiguous pixels and an isosurface ray casting strategy tailored for the resulting limited-size lists of candidate 3D data blocks. Our implementation scheme results in a compact indexing structure and makes careful use of multithreading and memory management environments commonly present in multicore processors. Although static screen partitioning is widely used in the literature, our scheme starts with an image partitioning for the initial stage and then performs dynamic allocation of groups of ray casting tasks among the different threads to ensure almost equal loads among the different cores while maintaining spatial locality. We also pay a particular attention to the overhead incurred by moving the data across the different levels of the memory hierarchy. We test our system on a two-processor Clovertown platform, each consisting of a Quad-Core 1.86-GHz Intel Xeon Processor and present detailed experimental results for a number of widely different benchmarks. We show that our system is efficient and scalable and achieves high cache performance and excellent load balancing, resulting in an overall performance that is superior to any of the previous algorithms. In fact, we achieve interactive isosurface rendering on a screen with 1.0242 resolution for all the data sets tested up to the maximum size that can fit in the main memory of our platform.}, keywords = {compact indexing structure;interactive high-resolution isosurface ray casting;interactive isosurface rendering;memory management;multicore processor;multithreading;object-order traversal;static screen partitioning;multi-threading;multiprocessing systems;r, Computer-Assisted;Imaging, Computer-Assisted;User-Computer Interface;, Three-Dimensional;Information Storage and Retrieval;Reproducibility of Results;Sensitivity and Specificity;Signal Processing}, isbn = {1077-2626}, doi = {10.1109/TVCG.2007.70630}, author = {Wang,Qin and JaJa, Joseph F.} } @book {15195, title = {Introduction to Modern Cryptography}, year = {2008}, month = {2008///}, publisher = {CRC Press}, organization = {CRC Press}, abstract = {Cryptography plays a key role in ensuring the privacy and integrity of data and the security of computer networks. Introduction to Modern Cryptography provides a rigorous yet accessible treatment of modern cryptography, with a focus on formal definitions, precise assumptions, and rigorous proofs. The authors introduce the core principles of modern cryptography, including the modern, computational approach to security that overcomes the limitations of perfect secrecy. An extensive treatment of private-key encryption and message authentication follows. The authors also illustrate design principles for block ciphers, such as the Data Encryption Standard (DES) and the Advanced Encryption Standard (AES), and present provably secure constructions of block ciphers from lower-level primitives. The second half of the book focuses on public-key cryptography, beginning with a self-contained introduction to the number theory needed to understand the RSA, Diffie-Hellman, El Gamal, and other cryptosystems. After exploring public-key encryption and digital signatures, the book concludes with a discussion of the random oracle model and its applications. Serving as a textbook, a reference, or for self-study, Introduction to Modern Cryptography presents the necessary tools to fully understand this fascinating subject.}, keywords = {Computer networks, computer security, Computers / Operating Systems / General, Computers / Security / General, cryptography, Language Arts \& Disciplines / Communication Studies, Mathematics / Combinatorics}, isbn = {9781584885511}, author = {Katz, Jonathan and Lindell,Yehuda} } @article {14150, title = {IFISS: A Matlab toolbox for modelling incompressible flow}, journal = {ACM Transactions on Mathematical Software}, volume = {33}, year = {2007}, month = {2007///}, author = {Elman, Howard and Ramage, A. and Silvester, D. J} } @article {14223, title = {Illusory Motion Due to Causal Time Filtering}, journal = {Journal of Vision}, volume = {7}, year = {2007}, month = {2007/06/30/}, pages = {977 - 977}, abstract = {Static patterns by Kitaoka (2006), the most well known of which is the {\textquotedblleft}Rotating Snake{\textquotedblright}, elicit forceful illusory motion. The patterns are composed of repeating patches of asymmetric intensity profile, in most cases organized circularly. Motion perception depends on the size of the patches and is found to occur in the periphery for larger patches and closer to the center of the eye for small patches. We propose as main cause for these illusions erroneous estimation of image motion due to eye movements. The reason is that image motion is estimated from the spatial and temporal energy of the image signal with filters which are symmetric in space, but asymmetric (causal) in time. In other words, only the past, but not the future, is used to estimate the temporal energy. It is shown that such filters mis-estimate the motion of locally asymmetric intensity signals for a range of spatial frequencies. This mis-estimation predicts the perceived motion in the different patterns of Kitaoka as well as the peripheral drift illusion, and accounts for the effect at varying patch size. This study builds upon our prior work on the distortion of image features and movement (Ferm{\"u}ller and Malm 2004). Kiatoka (2006): http://www.ritsumei.ac.jp/~akitaoka/index-e.html. C. Ferm{\"u}ller and H. Malm (2004).{\textquotedblleft} Uncertainty in visual processes predicts geometrical optical illusions {\textquotedblright}, Vision Research, 4, 727{\textendash}749.}, isbn = {, 1534-7362}, doi = {10.1167/7.9.977}, url = {http://www.journalofvision.org/content/7/9/977}, author = {Ferm{\"u}ller, Cornelia and Hui Ji} } @article {13156, title = {On implementing graph cuts on cuda}, journal = {First Workshop on General Purpose Processing on Graphics Processing Units}, year = {2007}, month = {2007///}, abstract = {The Compute Unified Device Architecture (CUDA)has enabled graphics processors to be explicitly programmed as general-purpose shared-memory multi-core processors with a high level of parallelism. In this paper, we present our preliminary results of implementing the Graph Cuts algorithm on CUDA. Our primary focus is on implementing Graph Cuts on grid graphs, which are extensively used in imaging applications. We first explain our implementation of breadth first search (BFS) graph traversal on CUDA, which is extensively used in our Graph Cuts implementation. We then present a basic implementation of Graph Cuts that succeeds to achieve absolute and relative speedups when used for foreground-background segmentation on synthesized images. Finally, we introduce two optimizations that utilize the special structure of grid graphs. The first one is lockstep BFS, which is used to reduce the overhead of BFS traversals. The second is cache emulation, which is a general technique to regularize memory access patterns and hence enhance memory access throughput. We experimentally show how each of the two optimizations can enhance the performance of the basic implementation on the image segmentation application. }, author = {Hussein,M. and Varshney, Amitabh and Davis, Larry S.} } @article {18576, title = {Implications of Autonomy for the Expressiveness of Policy Routing}, journal = {Networking, IEEE/ACM Transactions on}, volume = {15}, year = {2007}, month = {2007/12//}, pages = {1266 - 1279}, abstract = {Thousands of competing autonomous systems must cooperate with each other to provide global Internet connectivity. Each autonomous system (AS) encodes various economic, business, and performance decisions in its routing policy. The current interdomain routing system enables each AS to express policy using rankings that determine how each router in the AS chooses among different routes to a destination, and filters that determine which routes are hidden from each neighboring AS. Because the Internet is composed of many independent, competing networks, the interdomain routing system should provide autonomy, allowing network operators to set their rankings independently, and to have no constraints on allowed filters. This paper studies routing protocol stability under these conditions. We first demonstrate that ldquonext-hop rankings,rdquo commonly used in practice, may not ensure routing stability. We then prove that, when providers can set rankings and filters autonomously, guaranteeing that the routing system will converge to a stable path assignment imposes strong restrictions on the rankings ASes are allowed to choose. We discuss the implications of these results for the future of interdomain routing.}, keywords = {autonomous systems, global Internet connectivity, interdomain routing system, Internet, next-hop rankings, routing protocol, routing protocols, routing stability, stable path assignment}, isbn = {1063-6692}, doi = {10.1109/TNET.2007.896531}, author = {Feamster, Nick and Johari,R. and Balakrishnan,H.} } @article {16848, title = {An improved asymmetry measure to detect breast cancer}, journal = {Proceedings of SPIE}, volume = {6514}, year = {2007}, month = {2007/03/08/}, pages = {65141Q-65141Q-9 - 65141Q-65141Q-9}, abstract = {Radiologists can use the differences between the left and right breasts, or asymmetry, in mammograms to help detect certain malignant breast cancers. An image similarity method has been improved to make use of this knowledge base to recognize breast cancer. Image similarity is determined using computer-aided detection (CAD) prompts as the features, and then a cluster comparison is done to determine whether there is asymmetry. We develop the analysis through a combination of clustering and supervised learning of model parameters. This process correctly classifies cancerous mammograms 95\% of the time, and all mammograms 84\% of the time, and thus asymmetry is a measure that can play an important role in significantly improving computer-aided breast cancer detection systems. This technique represents an improvement in accuracy of 121\% over commercial techniques on non-cancerous cases. Most computer-aided detection (CAD) systems are tested on images which contain cancer on the assumption that images without cancer would produce the same number of false positives. However, a pre-screening system is designed to remove the normal cases from consideration, and so the inclusion of a pre-screening system into CAD dramatically reduces the number of false positives reported by the CAD system. We define three methods for the inclusion of pre-screening into CAD, and improve the performance of the CAD system by over 70\% at low levels of false positives.}, isbn = {0277786X}, doi = {doi:10.1117/12.708327}, url = {http://spiedigitallibrary.org/proceedings/resource/2/psisdg/6514/1/65141Q_1?isAuthorized=no}, author = {Tahmoush,Dave and Samet, Hanan} } @article {14000, title = {The improved fast Gauss transform with applications to machine learning}, journal = {Large Scale Kernel Machines}, year = {2007}, month = {2007///}, pages = {175 - 201}, author = {Raykar,V.C. and Duraiswami, Ramani} } @conference {18228, title = {Improving Embedding Payload in Binary Imageswith "Super-Pixels"}, booktitle = {Image Processing, 2007. ICIP 2007. IEEE International Conference on}, volume = {3}, year = {2007}, month = {2007/10/16/19}, pages = {III -277 -III -280 - III -277 -III -280}, abstract = {Hiding data in binary images can facilitate authentication of important documents in the digital domain, which generally requires a high embedding payload. Recently, a steganography framework known as the wet paper coding has been employed in binary image watermarking to achieve high embedding payload. In this paper, we introduce a new concept of super-pixels, and study how to incorporate them in the framework of wet paper coding to further improve the embedding payload in binary images. Using binary text documents as an example, we demonstrate the effectiveness of the proposed super-pixel technique.}, keywords = {analysis;watermarking;, analysis;wet, authentication;embedding, authentication;text, binary, coding;cryptography;data, coding;message, encapsulation;document, hiding;document, image, paper, payload;steganography;text, processing;image, watermarking;data}, doi = {10.1109/ICIP.2007.4379300}, author = {Gou,Hongmei and M. Wu} } @conference {14701, title = {Improving software quality with static analysis}, booktitle = {Proceedings of the 7th ACM SIGPLAN-SIGSOFT workshop on Program analysis for software tools and engineering}, series = {PASTE {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {83 - 84}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {At the University of Maryland, we have been working to improve the reliability and security of software by developing new, effective static analysis tools. These tools scan software for bug patterns or show that the software is free from a particular class of defects. There are two themes common to our different projects: 1. Our ultimate focus is on utility: can a programmer actually improve the quality of his or her software using an analysis tool? The important first step toward answering this question is to engineer tools so that they can analyze existing, nontrivial programs, and to carefully report the results of such analyses experimentally. The desire to better understand a more human-centered notion of utility underlies much of our future work. 2. We release all of our tools open source. This allows other researchers to verify our results, and to reuse some or all of our implementations, which often required significant effort to engineer. We believe that releasing source code is important for accelerating the pace of research results software quality, and just as importantly allows feedback from the wider community. In this research group presentation, we summarize some recent work and sketch future directions.}, keywords = {bug patterns, bugs, C, Data races, FFIs, java, modularity, network protocols, Software quality}, isbn = {978-1-59593-595-3}, doi = {10.1145/1251535.1251549}, url = {http://doi.acm.org/10.1145/1251535.1251549}, author = {Foster, Jeffrey S. and Hicks, Michael W. and Pugh, William} } @conference {15902, title = {Improving text classification for oral history archives with temporal domain knowledge}, booktitle = {Proceedings of the 30th annual international ACM SIGIR conference on Research and development in information retrieval}, series = {SIGIR {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {623 - 630}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {This paper describes two new techniques for increasing the accuracy oftopic label assignment to conversational speech from oral history interviews using supervised machine learning in conjunction with automatic speech recognition. The first, time-shifted classification, leverages local sequence information from the order in which the story is told. The second, temporal label weighting, takes the complementary perspective by using the position within an interview to bias label assignment probabilities. These methods, when used in combination, yield between 6\% and 15\% relative improvements in classification accuracy using a clipped R-precision measure that models the utility of label sets as segment summaries in interactive speech retrieval applications.}, keywords = {automatic topic classification, classifying with domain knowledge, spoken document classification}, isbn = {978-1-59593-597-7}, doi = {10.1145/1277741.1277848}, url = {http://doi.acm.org/10.1145/1277741.1277848}, author = {Olsson,J. Scott and Oard, Douglas} } @conference {12582, title = {In Situ Evaluation of Tracking Algorithms Using Time Reversed Chains}, booktitle = {Computer Vision and Pattern Recognition, 2007. CVPR {\textquoteright}07. IEEE Conference on}, year = {2007}, month = {2007/06//}, pages = {1 - 8}, abstract = {Automatic evaluation of visual tracking algorithms in the absence of ground truth is a very challenging and important problem. In the context of online appearance modeling, there is an additional ambiguity involving the correctness of the appearance model. In this paper, we propose a novel performance evaluation strategy for tracking systems based on particle filter using a time reversed Markov chain. Starting from the latest observation, the time reversed chain is propagated back till the starting time t = 0 of the tracking algorithm. The posterior density of the time reversed chain is also computed. The distance between the posterior density of the time reversed chain (at t = 0) and the prior density used to initialize the tracking algorithm forms the decision statistic for evaluation. It is postulated that when the data is generated true to the underlying models, the decision statistic takes a low value. We empirically demonstrate the performance of the algorithm against various common failure modes in the generic visual tracking problem. Finally, we derive a small frame approximation that allows for very efficient computation of the decision statistic.}, keywords = {(numerical, algorithm;Markov, chain;tracking, decision, density;time, detection;particle, Evaluation, evaluation;object, filter;performance, Filtering, Markov, methods);tracking;visual, processes;decision, reversed, servoing;, situ, statistics;in, strategy;posterior, systems;visual, theory;object, tracking, tracking;particle}, doi = {10.1109/CVPR.2007.382992}, author = {Wu,Hao and Sankaranarayanan,A. C and Chellapa, Rama} } @article {18851, title = {Incorporating manufacturability considerations during design of injection molded multi-material objects}, journal = {Research in Engineering Design}, volume = {17}, year = {2007}, month = {2007///}, pages = {207 - 231}, abstract = {The presence of an already molded component during the second and subsequent molding stages makes multi-material injection molding different from traditional injection molding process. Therefore, designing multi-material molded objects requires addressing many additional manufacturability considerations. In this paper, we first present an approach to systematically identifying potential manufacturability problems that are unique to the multi-material molding processes and design rules to avoid these problems. Then we present a comprehensive manufacturability analysis approach that incorporates both the traditional single material molding rules as well as the specific rules that have been identified for multi-material molding. Our analysis shows that sometimes the traditional rules need to be suppressed or modified. Lastly, for each of the new manufacturability problem, this paper describes algorithms for automatically detecting potential occurrences and generating redesign suggestions. These algorithms have been implemented in a computer-aided manufacturability analysis system. The approach presented in this paper is applicable to multi-shot and over molding processes. We expect that the manufacturability analysis techniques presented in this paper will help in decreasing the product development time for the injection molded multi-material objects.}, keywords = {engineering}, isbn = {0934-9839}, doi = {10.1007/s00163-007-0027-9}, url = {http://www.springerlink.com/content/c35337h3r5129525/abstract/}, author = {Banerjee,Ashis and Li,Xuejun and Fowler,Greg and Gupta, Satyandra K.} } @conference {14334, title = {Increasing the breadth: applying sensors, inference and self-report in field studies with the MyExperience tool}, booktitle = {Proceedings of the 1st international workshop on System evaluation for mobile platforms}, series = {MobiEval {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {27 - 27}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {In situ evaluation, context-aware systems, experience sampling method (ESM), field studies, Mobile computing, toolkits}, isbn = {978-1-59593-762-9}, doi = {10.1145/1247721.1247727}, url = {http://doi.acm.org/10.1145/1247721.1247727}, author = {Jon Froehlich and Chen,Mike Y. and Consolvo,Sunny and Harrison,Beverly and Landay,James A.} } @conference {16845, title = {Indexing Methods for Similarity Searching}, booktitle = {Current Trends in Computer Science, 2007. ENC 2007. Eighth Mexican International Conference on}, year = {2007}, month = {2007/09//}, pages = {xv - xv}, abstract = {An overview is given of the various techniques and issues involved in providing indexing support for similarity searching. Similarity searching is a crucial part of retrieval in multimedia databases used for applications such as pattern recognition, image databases, and content-based retrieval. It involves finding objects in a data set S that are similar to a query object q based on some distance measure d which is usually a distance metric. The search process is usually achieved by means of nearest neighbor finding. Existing methods for handling similarity search in this setting fall into one of two classes. The first is based on mapping to a vector space. The vector space is usually of high dimension which requires special handling due to the fact indexing methods do not discriminate well in such spaces. In particular, the query regions often overlap all of the blocks that result from the decomposition of the underlying space. This has led to some special solutions that make use of a sequential scan. An alternative is to use dimension reduction to find a mapping from a high-dimensional space into a low-dimensional space by finding the most discriminating dimensions and then index the data using one of a number of different data structures such as k-d trees, R-trees, quadtrees, etc. The second directly indexes the objects based on distances making use of data structures such as the vp-tree, M-tree, etc.}, keywords = {content-based, database;indexing, database;pattern, databases;, indexing;multimedia, methods;multimedia, recognition;similarity, retrieval;data, searching;data, structures;database, structures;image}, doi = {10.1109/ENC.2007.9}, author = {Samet, Hanan} } @conference {16847, title = {Indexing Point Triples Via Triangle Geometry}, booktitle = {Data Engineering, 2007. ICDE 2007. IEEE 23rd International Conference on}, year = {2007}, month = {2007/04//}, pages = {936 - 945}, abstract = {Database search for images containing icons with specific mutual spatial relationships can be facilitated by an appropriately structured index. For the case of images containing subsets each of which consist of three icons, the one-to-one correspondence between (distinct) point triples and triangles allows the use of such triangle attributes as position, size, orientation, and "shape" in constructing a point-based index, in which each triangle maps to a single point in a resulting hyperdimensional index space. Size (based on the triangle perimeter) can be represented by a single linear dimension. The abstract "shape" of a triangle induces a space that is inherently two-dimensional, and a number of alternative definitions of a basis for this space are examined. Within a plane, orientation reduces to rotation, and (after assignment of a reference direction for the triangle) can be represented by a single, spatially closed dimension. However, assignment of a reference direction for triangles possessing a k-fold rotational symmetry presents a significant challenge. Methods are described for characterizing shape and orientation of triangles, and for mapping these attributes onto a set of linear axes to form a combined index. The shape attribute is independent of size, orientation, and position, and the characterization of shape and orientation is stable with respect to small variations in the indexed triangles.}, keywords = {database, databases;, dimension;spatial, geometry;database, hyperdimensional, index, index;single, index;triangle, indexing;query, linear, point;k-fold, processing;visual, relationships;structured, rotational, search;indexing, space;image, symmetry;point-based}, doi = {10.1109/ICDE.2007.367939}, author = {Cranston,C.B. and Samet, Hanan} } @article {14285, title = {Inferring aliasing and encapsulation properties for Java}, journal = {ACM SIGPLAN Notices}, volume = {42}, year = {2007}, month = {2007///}, pages = {423 - 440}, author = {Ma,K.K. and Foster, Jeffrey S.} } @article {14491, title = {Inferring organizational titles in online communication}, journal = {Statistical Network Analysis: Models, Issues, and New Directions}, year = {2007}, month = {2007///}, pages = {179 - 181}, abstract = {There is increasing interest in the storage, retrieval, and analysis of email communications. One active area of research focuses on the inference of properties of the underlying social network giving rise to the email communications[1,2]. Email communication between individuals implies some type of relationship, whether it is formal, such as a manager-employee relationship, or informal, such as friendship relationships. Understanding the nature of these observed relationships can be problematic given there is a shared context among the individuals that isn{\textquoteright}t necessarily communicated. This provides a challenge for analysts that wish to explore and understand email archives for legal or historical research.}, doi = {10.1007/978-3-540-73133-7_15}, author = {Namata,G. and Getoor, Lise and Diehl,C.} } @conference {14973, title = {Information-Aware 2^ n-Tree for Efficient Out-of-Core Indexing of Very Large Multidimensional Volumetric Data}, booktitle = {Scientific and Statistical Database Management, 2007. SSBDM{\textquoteright}07. 19th International Conference on}, year = {2007}, month = {2007///}, pages = {9 - 9}, author = {Kim,J. and JaJa, Joseph F.} } @conference {13138, title = {An Interactive Approach to Pose-Assisted and Appearance-based Segmentation of Humans}, booktitle = {Computer Vision, 2007. ICCV 2007. IEEE 11th International Conference on}, year = {2007}, month = {2007/10//}, pages = {1 - 8}, abstract = {An interactive human segmentation approach is described. Given regions of interest provided by users, the approach iteratively estimates segmentation via a generalized EM algorithm. Specifically, it encodes both spatial and color information in a nonparametric kernel density estimator, and incorporates local MRF constraints and global pose inferences to propagate beliefs over image space iteratively to determine a coherent segmentation. This ensures the segmented humans resemble the shapes of human poses. Additionally, a layered occlusion model and a probabilistic occlusion reasoning method are proposed to handle segmentation of multiple humans in occlusion. The approach is tested on a wide variety of images containing single or multiple occluded humans, and the segmentation performance is evaluated quantitatively.}, keywords = {algorithm;appearance-based, algorithm;hidden, approach;layered, density, EM, Estimation, estimation;, estimator;pose-assisted, feature, human, Kernel, mechanisms;pose, method;expectation-maximisation, model;nonparametric, occlusion, reasoning, removal;image, segmentation;inference, segmentation;interactive, segmentation;probabilistic}, doi = {10.1109/ICCV.2007.4409123}, author = {Zhe Lin and Davis, Larry S. and David Doermann and DeMenthon,D.} } @conference {13443, title = {Interactive visual clustering}, booktitle = {Proceedings of the 12th international conference on Intelligent user interfaces}, series = {IUI {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {361 - 364}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Interactive Visual Clustering (IVC) is a novel method that allows a user to explore relational data sets interactively, in order to produce a clustering that satisfies their objectives. IVC combines spring-embedded graph layout with user interaction and constrained clustering. Experimental results on several synthetic and real-world data sets show that IVC yields better clustering performance than alternative methods.}, keywords = {clustering, constraints, interaction}, isbn = {1-59593-481-2}, doi = {10.1145/1216295.1216367}, url = {http://doi.acm.org/10.1145/1216295.1216367}, author = {desJardins, Marie and MacGlashan,James and Ferraioli,Julia} } @article {18229, title = {Intrinsic fingerprints for image authentication and steganalysis}, journal = {Proc. SPIE Conf. Security, Steganography, Watermarking of Multimedia Contents}, year = {2007}, month = {2007///}, abstract = {With growing popularity of digital imaging devices and low-cost image editing software, the integrity of imagecontent can no longer be taken for granted. This paper introduces a methodology for forensic analysis of digital camera images, based on the observation that many in-camera and post-camera processing operations leave distinct traces on digital images. We present methods to identify these intrinsic fingerprint traces of the various processing operations and employ them to verify the authenticity of digital data. We develop an explicit imaging model to characterize the properties that should be satisfied by a direct camera output, and model any further processing applied to the camera captured image by a manipulation filter. Utilizing the manipulation filter coefficients and reference patterns estimated from direct camera outputs using blind deconvolution techniques, the proposed methods are capable of detecting manipulations made by previously unseen operations and steganographic embedding. }, author = {Swaminathan,A. and Wu,M. and Liu,K. J.R} } @book {14505, title = {Introduction to Statistical Relational Learning}, year = {2007}, month = {2007/11/30/}, publisher = {MIT Press}, organization = {MIT Press}, abstract = {Handling inherent uncertainty and exploiting compositional structure are fundamental to understanding and designing large-scale systems. Statistical relational learning builds on ideas from probability theory and statistics to address uncertainty while incorporating tools from logic, databases and programming languages to represent structure. In Introduction to Statistical Relational Learning, leading researchers in this emerging area of machine learning describe current formalisms, models, and algorithms that enable effective and robust reasoning about richly structured systems and data. The early chapters provide tutorials for material used in later chapters, offering introductions to representation, inference and learning in graphical models, and logic. The book then describes object-oriented approaches, including probabilistic relational models, relational Markov networks, and probabilistic entity-relationship models as well as logic-based formalisms including Bayesian logic programs, Markov logic, and stochastic logic programs. Later chapters discuss such topics as probabilistic models with unknown objects, relational dependency networks, reinforcement learning in relational domains, and information extraction. By presenting a variety of approaches, the book highlights commonalities and clarifies important differences among proposed approaches and, along the way, identifies important representational and algorithmic issues. Numerous applications are provided throughout.Lise Getoor is Assistant Professor in the Department of Computer Science at the University of Maryland. Ben Taskar is Assistant Professor in the Computer and Information Science Department at the University of Pennsylvania.}, keywords = {Business \& Economics / Statistics, Computer algorithms, Computers / Database Management / General, Computers / Logic Design, Computers / Machine Theory, Computers / Programming / Algorithms, Education / Statistics, machine learning, Machine learning - Statistical methods, Machine learning/ Statistical methods, Relational databases}, isbn = {9780262072885}, author = {Getoor, Lise} } @book {19350, title = {Introduction to Statistical Relational Learning}, year = {2007}, month = {2007}, pages = {602}, publisher = {MIT Press}, organization = {MIT Press}, abstract = {Handling inherent uncertainty and exploiting compositional structure are fundamental to understanding and designing large-scale systems. Statistical relational learning builds on ideas from probability theory and statistics to address uncertainty while incorporating tools from logic, databases and programming languages to represent structure. In Introduction to Statistical Relational Learning, leading researchers in this emerging area of machine learning describe current formalisms, models, and algorithms that enable effective and robust reasoning about richly structured systems and data. The early chapters provide tutorials for material used in later chapters, offering introductions to representation, inference and learning in graphical models, and logic. The book then describes object-oriented approaches, including probabilistic relational models, relational Markov networks, and probabilistic entity-relationship models as well as logic-based formalisms including Bayesian logic programs, Markov logic, and stochastic logic programs. Later chapters discuss such topics as probabilistic models with unknown objects, relational dependency networks, reinforcement learning in relational domains, and information extraction. By presenting a variety of approaches, the book highlights commonalities and clarifies important differences among proposed approaches and, along the way, identifies important representational and algorithmic issues. Numerous applications are provided throughout.Lise Getoor is Assistant Professor in the Department of Computer Science at the University of Maryland. Ben Taskar is Assistant Professor in the Computer and Information Science Department at the University of Pennsylvania.}, keywords = {Computers / Logic Design, Computers / Machine Theory, Education / Statistics}, isbn = {9780262072885}, author = {Getoor,Lise and Taskar,Ben} } @conference {18855, title = {Investigation of Revolute Joint Clearances Created by an In-Mold Assembly Process}, year = {2007}, month = {2007/07//}, pages = {112 - 117}, abstract = {Revolute joints are frequently used in articulated structures. Traditionally, such a joint is formed by assembling two components. As an alternative, revolute joints can be created inside the mold using an in-mold assembly process. This process eliminates the need for post-molding assembly, thus significantly reducing the cycle time and part count. The functional performance of a revolute joint depends on the clearance in the joint. The clearance in turn depends on the part shrinkage and the mold deformation during the molding process. The presence of a polymer part during the second molding stage makes an in-mold assembly process significantly different from the traditional molding process due to the difference in heat transfer and deformation characteristics. This paper presents experimental data and a preliminary model to explain the differences in clearance produced by an aluminum mold and an Aluminum mold with an Acrylonitrile butadiene styrene (ABS) insert. Our data indicates that there is a significant difference between the clearances observed from these two different types of molds. We believe that clearances produced depend strongly on the thermal history of the parts.}, keywords = {acrylonitrile butadiene styrene insert, aluminum mold, assembling, couplings, cycle time reduction, heat transfer, injection moulding, inmold assembly process, mold deformation, molding process, moulding equipment, part count reduction, plastics industry, revolute joint clearances}, doi = {10.1109/ISAM.2007.4288458}, author = {Ananthanarayanan,A. and Thamire,C. and Gupta,S.K.} } @article {14623, title = {Identification and cross-species comparison of canine osteoarthritic gene regulatory cis-elements}, journal = {Osteoarthritis and Cartilage}, volume = {14}, year = {2006}, month = {2006/08//}, pages = {830 - 838}, abstract = {SummaryObjectiveTo better understand transcription regulation of osteoarthritis (OA) by examining common promoter motifs in canine osteoarthritic genes, to identify other genes containing these motifs and to assess the conservation of these motifs between canine, human, mouse and rat. Design Differentially expressed transcripts in canine OA were mapped to the human genome. We thus identified 20 orthologous human transcripts representing 19 up-regulated genes and 62 orthologous transcripts representing 60 down-regulated genes. The 5\&$\#$xa0;kbp upstream regions of these transcripts were used to identify binding sites and build promoter models based on those sites. The human genome was subsequently searched for other transcripts likely to be regulated by the same promoter models. Orthologous transcripts were then identified in canine, rat and mouse for determination of potential cross-species conservation of binding sites comprising the promoter model. Results Four promoter models containing 5{\textendash}6 transcripts and 5{\textendash}8 common transcription factor binding sites were developed. They include binding sites for AP-4, AP-2α and γ, and E2F. Several hundred other human genes were found to contain these promoter motifs. Furthermore these motifs were significantly over represented in the orthologous genes in canine, rat and mouse genomes. Conclusions We have developed and applied a computational methodology to identify common promoter elements implicated in OA and shared amongst four higher vertebrates. The transcription factors associated with these binding sites and other genes driven by these promoter motifs have been implicated in OA, chondrocyte development and with other biological factors involved in the disease. }, keywords = {Chondrocyte, Gene expression, Osteoarthritis, Promoter}, isbn = {1063-4584}, doi = {10.1016/j.joca.2006.02.007}, url = {http://www.sciencedirect.com/science/article/pii/S1063458406000379}, author = {Hannenhalli, Sridhar and Middleton,R.P. and Levy,S. and Perroud,B. and Holzwarth,J.A. and McDonald,K. and Hannah,S.S.} } @conference {12137, title = {Identifying domain-specific defect classes using inspections and change history}, booktitle = {Proceedings of the 2006 ACM/IEEE international symposium on Empirical software engineering}, series = {ISESE {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {346 - 355}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We present an iterative, reading-based methodology for analyzing defects in source code when change history is available. Our bottom-up approach can be applied to build knowledge of recurring defects in a specific domain, even if other sources of defect data such as defect reports and change requests are unavailable, incomplete or at the wrong level of abstraction for the purposes of the defect analysis. After defining the methodology, we present the results of an empirical study where our method was applied to analyze defects in parallel programs which use the MPI (Message Passing Interface) library to express parallelism. This library is often used in the domain of high performance computing, where there is much discussion but little empirical data about the frequency and severity of defect types. Preliminary results indicate the methodology is feasible and can provide insights into the nature of real defects. We present the results, derived hypothesis, and lessons learned.}, keywords = {change history, code reading, domain specific defects, Inspection}, isbn = {1-59593-218-6}, doi = {10.1145/1159733.1159785}, url = {http://doi.acm.org/10.1145/1159733.1159785}, author = {Nakamura,Taiga and Hochstein, Lorin and Basili, Victor R.} } @article {18345, title = {IDiexis: Mobile image-based search on world wide web-a picture is worth a thousand keywords}, journal = {Proc. of Mobisys}, year = {2006}, month = {2006///}, abstract = {Images of objects as queries is a new approach to searchfor information on the web. Image-based information retrieval goes beyond only matching images, as information in other modalities also can be extracted from data collections using image search. We demonstrate a new system that uses images to search for web-based information. We introduce a point-by-photograph paradigm, where users can specify an object simply by taking pictures. Our technique uses content-based image retrieval methods to search the web or other databases for matching images and their source pages to find relevant location-based information. We have developed a prototype on a camera phone and conducted user studies to demonstrate the efficacy of our approach compared to other alternatives. }, author = {Tollmar,K. and Tom Yeh and Darrell,T.} } @book {16872, title = {Image Database Systems and Techniques: A Symbolic Approach}, year = {2006}, month = {2006///}, publisher = {Morgan Kaufmann Publishers Inc.}, organization = {Morgan Kaufmann Publishers Inc.}, address = {San Francisco, CA, USA}, isbn = {1558605495}, author = {Samet, Hanan and Soffer,Aya} } @article {15590, title = {Image fusion using cokriging}, journal = {Geosci. and Remote Sens. Symp., 2006. IGARSS 2006. IEEE Int. Conf. on}, year = {2006}, month = {2006///}, pages = {2518 - 2521}, author = {Memarsadeghi,N. and Le Moigne,J. and Mount, Dave} } @conference {15543, title = {Image Registration and Fusion Studies for the Integration of Multiple Remote Sensing Data}, booktitle = {Acoustics, Speech and Signal Processing, 2006. ICASSP 2006 Proceedings. 2006 IEEE International Conference on}, volume = {5}, year = {2006}, month = {2006/05//}, pages = {V - V}, abstract = {The future of remote sensing will see the development of spacecraft formations, and with this development will come a number of complex challenges such as maintaining precise relative position and specified attitudes. At the same time, there will be increasing needs to understand planetary system processes and build accurate prediction models. One essential technology to accomplish these goals is the integration of multiple source data. For this integration, image registration and fusion represent the first steps and need to be performed with very high accuracy. In this paper, we describe studies performed in both image registration and fusion, including a modular framework that was built to describe registration algorithms, a Web-based image registration toolbox, and the comparison of several image fusion techniques using data from the EO-1/ALI and Hyperion sensors}, keywords = {ALI, EO-1, fusion studies, geophysical signal processing, Hyperion sensors, image registration, Internet, multiple remote sensing data, multiple source data, Remote sensing, Web-based image registration toolbox}, doi = {10.1109/ICASSP.2006.1661494}, author = {Le Moigne,J. and Cole-Rhodes,A. and Eastman,R. and Jain,P. and Joshua,A. and Memarsadeghi,N. and Mount, Dave and Netanyahu,N. and Morisette,J. and Uko-Ozoro,E.} } @article {16867, title = {Image Similarity and Asymmetry to Improve Computer-Aided Detection of Breast Cancer}, journal = {Digital Mammography}, year = {2006}, month = {2006///}, pages = {221 - 228}, abstract = {An improved image similarity method is introduced to recognize breast cancer, and it is incorporated into a computer-aided breast cancer detection system through Bayes Theorem. Radiologists can use the differences between the left and right breasts, or asymmetry, in mammograms to help detect certain malignant breast cancers. Image similarity is used to determine asymmetry using a contextual and then a spatial comparison. The mammograms are filtered to find the most contextually significant points, and then the resulting point set is analyzed for spatial similarity. We develop the analysis through a combination of modeling and supervised learning of model parameters. This process correctly classifies mammograms 84\% of the time, and significantly improves the accuracy of a computer-aided breast cancer detection system by 71\%.}, doi = {10.1007/11783237_31}, author = {Tahmoush,D. and Samet, Hanan} } @conference {18226, title = {Image Tampering Identification using Blind Deconvolution}, booktitle = {Image Processing, 2006 IEEE International Conference on}, year = {2006}, month = {2006/10//}, pages = {2309 - 2312}, abstract = {Digital images have been used in growing number of applications from law enforcement and surveillance, to medical diagnosis and consumer photography. With such widespread popularity and the presence of low-cost image editing softwares, the integrity of image content can no longer be taken for granted. In this paper, we propose a novel technique based on blind deconvolution to verify image authenticity. We consider the direct output images of a camera as authentic, and introduce algorithms to detect further processing such as tampering applied to the image. Our proposed method is based on the observation that many tampering operations can be approximated as a combination of linear and non-linear components. We model the linear part of the tampering process as a filter, and obtain its coefficients using blind deconvolution. These estimated coefficients are then used to identify possible manipulations. We demonstrate the effectiveness of the proposed image authentication technique and compare our results with existing works}, keywords = {(access, approximation;blind, authentication;image, coding;, compression;deconvolution;filtering, control);data, deconvolution;camera;consumer, diagnosis;surveillance;tampering, editing, identification;approximation, images;filter, photography;digital, process;image, softwares;medical, theory;biometrics, theory;image}, doi = {10.1109/ICIP.2006.312848}, author = {Swaminathan,A. and M. Wu and Liu,K. J.R} } @conference {13574, title = {Imaging as an Alternative Data Channel for Camera Phones}, booktitle = {ACM International Conference Proceeding Series; Proceedings of the 5th International Conference on Mobile and Ubiquitous Multimedia}, year = {2006}, month = {2006/12//}, pages = {No. 5 - No. 5}, abstract = {In this paper, we demonstrate a solution to use cameras to download data to cell phones as an alternative to existing wireless (CDMA/GPRS, BlueTooth), infrared or cable connections. In our method the data is encoded as a sequence of images, which can be displayed on any flat display, captured by users with their camera phones, and decoded by pre-embedded software. To solve this problem we need to be able to (1) encode arbitrary data as a sequence of images, (2) process captured images under various lighting variations and perspective distortions while maintaining real-time performance, and (3) decode the processed images robustly even when partial data is lost. In the paper we address these challenges in detail and present our solution. We have implemented a prototype which allows users to successfully download various types of files, including pictures, ring tones and Java programs to the camera phones. We discuss the limitations of our solution, and future works to overcome these limitations.}, author = {Liu,Xu and Huiping Li and David Doermann} } @conference {16451, title = {Implementing a bioinformatics pipeline (bip) on a mediator platform: Comparing cost and quality of alternate choices}, booktitle = {Data Engineering Workshops, 2006. Proceedings. 22nd International Conference on}, year = {2006}, month = {2006///}, pages = {67 - 67}, author = {Eckman,B. A and Gaasterland,T. and Lacroix,Z. and Raschid, Louiqa and Snyder,B. and Vidal,M. E} } @conference {15642, title = {On the importance of idempotence}, booktitle = {Proceedings of the thirty-eighth annual ACM symposium on Theory of computing}, series = {STOC {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {564 - 573}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Range searching is among the most fundamental problems in computational geometry. An n-element point set in Rd is given along with an assignment of weights to these points from some commutative semigroup. Subject to a fixed space of possible range shapes, the problem is to preprocess the points so that the total semigroup sum of the points lying within a given query range η can be determined quickly. In the approximate version of the problem we assume that η is bounded, and we are given an approximation parameter ε > 0. We are to determine the semigroup sum of all the points contained within η and may additionally include any of the points lying within distance ε {\textbullet} diam(η) of η{\textquoteright}s boundar.In this paper we contrast the complexity of range searching based on semigroup properties. A semigroup (S,+) is idempotent if x + x = x for all x ∈ S, and it is integral if for all k >= 2, the k-fold sum x + ... + x is not equal to x. For example, (R, min) and (0,1, ∨) are both idempotent, and (N, +) is integral. To date, all upper and lower bounds hold irrespective of the semigroup. We show that semigroup properties do indeed make a difference for both exact and approximate range searching, and in the case of approximate range searching the differences are dramatic.First, we consider exact halfspace range searching. The assumption that the semigroup is integral allows us to improve the best lower bounds in the semigroup arithmetic model. For example, assuming O(n) storage in the plane and ignoring polylog factors, we provide an Ω*(n2/5) lower bound for integral semigroups, improving upon the best lower bound of Ω*(n1/3), thus closing the gap with the O(n1/2) upper bound.We also consider approximate range searching for Euclidean ball ranges. We present lower bounds and nearly matching upper bounds for idempotent semigroups. We also present lower bounds for range searching for integral semigroups, which nearly match existing upper bounds. These bounds show that the advantages afforded by idempotency can result in major improvements. In particular, assuming roughly linear space, the exponent in the ε-dependencies is smaller by a factor of nearly 1/2. All our results are presented in terms of space-time tradeoffs, and our lower and upper bounds match closely throughout the entire spectrum.To our knowledge, our results provide the first proof that semigroup properties affect the computational complexity of range searching in the semigroup arithmetic model. These are the first lower bound results for any approximate geometric retrieval problems. The existence of nearly matching upper bounds, throughout the range of space-time tradeoffs, suggests that we are close to resolving the computational complexity of both idempotent and integral approximate spherical range searching in the semigroup arithmetic model.}, keywords = {Approximation algorithms, Idempotence, Range searching}, isbn = {1-59593-134-1}, doi = {10.1145/1132516.1132598}, url = {http://doi.acm.org/10.1145/1132516.1132598}, author = {Arya,Sunil and Malamatos,Theocharis and Mount, Dave} } @article {15247, title = {Improved algorithms for data migration}, journal = {Approximation, Randomization, and Combinatorial Optimization. Algorithms and Techniques}, year = {2006}, month = {2006///}, pages = {164 - 175}, abstract = {Our work is motivated by the need to manage data on a collection of storage devices to handle dynamically changing demand. As demand for data changes, the system needs to automatically respond to changes in demand for different data items. The problem of computing a migration plan among the storage devices is called the data migration problem. This problem was shown to be NP-hard, and an approximation algorithm achieving an approximation factor of 9.5 was presented for the half-duplex communication model in [Khuller, Kim and Wan: Algorithms for Data Migration with Cloning, SIAM J. on Computing, Vol. 33(2):448{\textendash}461 (2004)]. In this paper we develop an improved approximation algorithm that gives a bound of 6.5+o(1) using various new ideas. In addition, we develop better algorithms using external disks and get an approximation factor of 4.5. We also consider the full duplex communication model and develop an improved bound of 4 +o(1) for this model, with no external disks.}, doi = {10.1007/11830924_17}, author = {Khuller, Samir and Kim,Y. A and Malekian,A.} } @article {15249, title = {An improved approximation algorithm for vertex cover with hard capacities}, journal = {Journal of Computer and System Sciences}, volume = {72}, year = {2006}, month = {2006/02//}, pages = {16 - 33}, abstract = {We study the capacitated vertex cover problem, a generalization of the well-known vertex-cover problem. Given a graph G = ( V , E ) , the goal is to cover all the edges by picking a minimum cover using the vertices. When we pick a vertex, we can cover up to a pre-specified number of edges incident on this vertex (its capacity). The problem is clearly NP-hard as it generalizes the well-known vertex-cover problem. Previously, approximation algorithms with an approximation factor of 2 were developed with the assumption that an arbitrary number of copies of a vertex may be chosen in the cover. If we are allowed to pick at most a fixed number of copies of each vertex, the approximation algorithm becomes much more complex. Chuzhoy and Naor (FOCS, 2002) have shown that the weighted version of this problem is at least as hard as set cover; in addition, they developed a 3-approximation algorithm for the unweighted version. We give a 2-approximation algorithm for the unweighted version, improving the Chuzhoy{\textendash}Naor bound of three and matching (up to lower-order terms) the best approximation ratio known for the vertex-cover problem.}, keywords = {Approximation algorithms, Capacitated covering, Linear programming, Randomized rounding, Set cover, Vertex cover}, isbn = {0022-0000}, doi = {10.1016/j.jcss.2005.06.004}, url = {http://www.sciencedirect.com/science/article/pii/S0022000005000747}, author = {Gandhi,Rajiv and Halperin,Eran and Khuller, Samir and Kortsarz,Guy and Srinivasan, Aravind} } @conference {14547, title = {Improved lower and upper bounds for universal TSP in planar metrics}, booktitle = {Proceedings of the seventeenth annual ACM-SIAM symposium on Discrete algorithm}, year = {2006}, month = {2006///}, pages = {649 - 658}, author = {Hajiaghayi, Mohammad T. and Kleinberg,R. and Leighton,T.} } @article {18577, title = {In VINI veritas: realistic and controlled network experimentation}, journal = {SIGCOMM Comput. Commun. Rev.}, volume = {36}, year = {2006}, month = {2006/08//}, pages = {3 - 14}, abstract = {This paper describes VINI, a virtual network infrastructure that allows network researchers to evaluate their protocols and services in a realistic environment that also provides a high degree of control over network conditions. VINI allows researchers to deploy and evaluate their ideas with real routing software, traffic loads, and network events. To provide researchers flexibility in designing their experiments, VINI supports simultaneous experiments with arbitrary network topologies on a shared physical infrastructure. This paper tackles the following important design question: What set of concepts and techniques facilitate flexible, realistic, and controlled experimentation (e.g., multiple topologies and the ability to tweak routing algorithms) on a fixed physical infrastructure? We first present VINI{\textquoteright}s high-level design and the challenges of virtualizing a single network. We then present PL-VINI, an implementation of VINI on PlanetLab, running the "Internet In a Slice". Our evaluation of PL-VINI shows that it provides a realistic and controlled environment for evaluating new protocols and services.}, keywords = {architecture, experimentation, Internet, Routing, virtualization}, isbn = {0146-4833}, doi = {10.1145/1151659.1159916}, url = {http://doi.acm.org/10.1145/1151659.1159916}, author = {Bavier,Andy and Feamster, Nick and Huang,Mark and Peterson,Larry and Rexford,Jennifer} } @article {18885, title = {The incompleteness of planning with volatile external information}, journal = {FRONTIERS IN ARTIFICIAL INTELLIGENCE AND APPLICATIONS}, volume = {141}, year = {2006}, month = {2006///}, pages = {839 - 839}, abstract = {In many real-world planning environments, someof the information about the world is both external (the planner must request it from external information sources) and volatile (it changes before the planning process completes). In such en- vironments, a planner faces two challenges: how to generate plans despite changes in the external information during plan- ning, and how to guarantee that a plan returned by the plan- ner will remain valid for some period of time after the plan- ning ends. Previous works on planning with volatile informa- tion have addressed the first challenge, but not the second one. This paper provides a general model for planning with volatile external information in which the planner offers a guar- antee of how long the solution will remain valid after it is re- turned, and an incompleteness theorem showing that there is no planner that can succeed in solving all solvable planning prob- lems in which there is volatile external information. }, url = {http://www.cs.umd.edu/~nau/papers/au2006incompleteness.pdf}, author = {Au,T. and Nau, Dana S.} } @article {14440, title = {Inferring formal titles in organizational email archives}, journal = {Proc. of the ICML Workshop on Statistical Network Analysis}, year = {2006}, month = {2006///}, abstract = {In the social network of large groups of people, such as companiesand organizations, formal hierarchies with titles and lines of authority are established to define the responsibilities and order of power within that group. Although this information may be readily available for individuals within that group, the context this hierarchy provides in communications is not available to those outside the group. In this paper, we define the problem of inferring this formal hierarchy in the context of organizational email archives. We present a new dataset for the widely used Enron email dataset, for use with analysis of the Enron social network hierarchy. We then provide some preliminary results on the problem of classifying individuals to their formal titles using standard classification algorithms. }, author = {Namata,G.M. and Getoor, Lise and Diehl,C.} } @article {14974, title = {Information-aware HyperOctree for effective isosurface rendering of large scale time-varying data}, volume = {UMIACS-TR-2006-00}, year = {2006}, month = {2006///}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {We develop a new indexing structure and a new out-of-core scheme to extract and render isosurfaces forlarge scale time-varying 3-D volume data. The new algorithm enables the fast visualization of arbitrary isosurfaces cut by a user-specified hyperplane along any of the four dimensions. Our data structure makes use of the entropy measure to establish the relative resolutions of the spatial and temporal dimensions rather than treating the temporal dimension just as any other dimension. The preprocessing is very efficient and the resulting indexing structure is very compact. We have tested our scheme on 40GB subset of the Richtmyer-Meshkov instability data set and obtained very good performance for a wide range of isosurface extraction queries. }, author = {Kim,J. and JaJa, Joseph F.} } @article {13929, title = {Initial findings from a three-year international case study exploring children{\textquoteright}s responses to literature in a digital library}, journal = {Library trends}, volume = {54}, year = {2006}, month = {2006///}, pages = {245 - 265}, author = {Druin, Allison and Massey,S. and Weeks,A. C} } @conference {12620, title = {Integrated Motion Detection and Tracking for Visual Surveillance}, booktitle = {Computer Vision Systems, 2006 ICVS {\textquoteright}06. IEEE International Conference on}, year = {2006}, month = {2006/01//}, pages = {28 - 28}, abstract = {Visual surveillance systems have gained a lot of interest in the last few years. In this paper, we present a visual surveillance system that is based on the integration of motion detection and visual tracking to achieve better performance. Motion detection is achieved using an algorithm that combines temporal variance with background modeling methods. The tracking algorithm combines motion and appearance information into an appearance model and uses a particle filter framework for tracking the object in subsequent frames. The systems was tested on a large ground-truthed data set containing hundreds of color and FLIR image sequences. A performance evaluation for the system was performed and the average evaluation results are reported in this paper.}, doi = {10.1109/ICVS.2006.35}, author = {Abdelkader, M.F. and Chellapa, Rama and Qinfen Zheng and Chan, A.L.} } @article {16088, title = {Integrating data and interfaces to enhance understanding of government statistics: toward the national statistical knowledge network project briefing}, journal = {Proceedings of 7th Annual International Conference on Digital Libraries (DG06)}, year = {2006}, month = {2006///}, pages = {21 - 24}, abstract = {This paper reports the results of work in the final no-costextension year of a digital government project that developed user interface models and prototypes to help people find and understand government statistics; proposed a Statistical Knowledge Network architecture that supports cross agency information access; and demonstrated models for government- academic collaboration. }, author = {Marchionini,G. and Haas,S. and Plaisant, Catherine and Shneiderman, Ben} } @inbook {12637, title = {Integrating Video Information over Time. Example: Face Recognition from Video}, booktitle = {Cognitive Vision SystemsCognitive Vision Systems}, series = {Lecture Notes in Computer Science}, volume = {3948}, year = {2006}, month = {2006///}, pages = {127 - 144}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {The ability to integrate information over time in order to come to a conclusion is a strength of cognitive systems. It allows the system, e.g., to 1 verify insecure observations: This is the case when data is noisy or of low-quality, or if conditions in general are non-optimal. 2 exploit general knowledge about spatio-temporal relations: This allows the system to exploit the specific dynamics of an object as an additional feature for, e.g., recognition, interpretation and prediction of actions of other agents. 3 In general, using dynamics allows the system to recursively generate and verify hypotheses for object and scene interpretation and to generate warnings when {\textquoteleft}implausible{\textquoteright} hypotheses occur or to circumvent them altogether. We have studied the effectiveness of temporal integration for recognition purposes by using the face recognition as an example study case. Face recognition is a prominent problem and has been studied more extensively than almost any other recognition problem. An observation is that face recognition works well in ideal conditions. If those conditions, however, are not met, then all present algorithms break down disgracefully. This problem appears to be general to all vision techniques that intend to extract visual information out of low-SNR image data. It is exactly a strength of cognitive systems that they are able to cope with non-ideal situations. In this chapter we will deal with this problem.}, isbn = {978-3-540-33971-7}, url = {http://dx.doi.org/10.1007/11414353_9}, author = {Kr{\"u}ger,Volker and Zhou,Shaohua and Chellapa, Rama}, editor = {Christensen,Henrik and Nagel,Hans-Hellmut} } @conference {12014, title = {Integration of visual and inertial information for egomotion: a stochastic approach}, booktitle = {Proceedings 2006 IEEE International Conference on Robotics and Automation, 2006. ICRA 2006}, year = {2006}, month = {2006/05/15/19}, pages = {2053 - 2059}, publisher = {IEEE}, organization = {IEEE}, abstract = {We present a probabilistic framework for visual correspondence, inertial measurements and egomotion. First, we describe a simple method based on Gabor filters to produce correspondence probability distributions. Next, we generate a noise model for inertial measurements. Probability distributions over the motions are then computed directly from the correspondence distributions and the inertial measurements. We investigate combining the inertial and visual information for a single distribution over the motions. We find that with smaller amounts of correspondence information, fusion of the visual data with the inertial sensor results in much better egomotion estimation. This is essentially because inertial measurements decrease the "translation-rotation" ambiguity. However, when more correspondence information is used, this ambiguity is reduced to such a degree that the inertial measurements provide negligible improvement in accuracy. This suggests that inertial and visual information are more closely integrated in a compositional sense}, keywords = {Computer vision, data mining, Distributed computing, egomotion estimation, Gabor filters, Gravity, inertial information, inertial sensor, Laboratories, Motion estimation, Noise measurement, Probability distribution, probability distributions, Rotation measurement, stochastic approach, Stochastic processes, visual information}, isbn = {0-7803-9505-0}, doi = {10.1109/ROBOT.2006.1642007}, author = {Domke, J. and Aloimonos, J.} } @inbook {14593, title = {An Interaction-Dependent Model for Transcription Factor Binding}, booktitle = {Systems Biology and Regulatory GenomicsSystems Biology and Regulatory Genomics}, series = {Lecture Notes in Computer Science}, volume = {4023}, year = {2006}, month = {2006///}, pages = {225 - 234}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Transcriptional regulation is accomplished by several transcription factor proteins that bind to specific DNA elements in the relative vicinity of the gene, and interact with each other and with Polymerase enzyme. Thus the determination of transcription factor-DNA binding is an important step toward understanding transcriptional regulation. An effective way to experimentally determine the genomic regions bound by a transcription factor is by a ChIP-on-chip assay. Then, given the putative genomic regions, computational motif finding algorithms are applied to estimate the DNA binding motif or positional weight matrix for the TF. The a priori expectation is that the presence or absence of the estimated motif in a promoter should be a good indicator of the binding of the TF to that promoter. This association between the presence of the transcription factor motif and its binding is however weak in a majority of cases where the whole genome ChIP experiments have been performed. One possible reason for this is that the DNA binding of a particular transcription factor depends not only on its own motif, but also on synergistic or antagonistic action of neighboring motifs for other transcription factors. We believe that modeling this interaction-dependent binding with linear regression can better explain the observed binding data. We assess this hypothesis based on the whole genome ChIP-on-chip data for Yeast. The derived interactions are largely consistent with previous results that combine ChIP-on-chip data with expression data. We additionally apply our method to determine interacting partners for CREB and validate our findings based on published experimental results.}, isbn = {978-3-540-48293-2}, url = {http://dx.doi.org/10.1007/978-3-540-48540-7_19}, author = {Wang,Li-San and Jensen,Shane and Hannenhalli, Sridhar}, editor = {Eskin,Eleazar and Ideker,Trey and Raphael,Ben and Workman,Christopher} } @article {16091, title = {Interactive sonification for blind people exploration of geo-referenced data: comparison between a keyboard-exploration and a haptic-exploration interfaces}, journal = {Cognitive Processing}, volume = {7}, year = {2006}, month = {2006///}, pages = {178 - 179}, isbn = {1612-4782}, url = {http://dx.doi.org/10.1007/s10339-006-0137-8}, author = {Delogu,Franco and Belardinelli,Marta and Palmiero,Massimiliano and Pasqualotto,Emanuele and Zhao,Haixia and Plaisant, Catherine and Federici,Stefano} } @article {18705, title = {Interdomain mobility in di-ubiquitin revealed by NMR}, journal = {Proteins: Structure, Function, and Bioinformatics}, volume = {63}, year = {2006}, month = {2006///}, pages = {787 - 796}, abstract = {Domain orientation and dynamics can play an essential role in the function of multidomain proteins. Lys48-linked polyubiquitin chains, the principal signal for proteasomal protein degradation, adopt a closed conformation at physiological conditions, in which the functionally important residues Leu8, Ile44, and Val70 are sequestered at the interdomain interface. This interface must open in order for these groups to become available for interactions with various chain-recognition factors. Knowledge of the mechanism of domain motion leading to the opening of the interdomain interface in polyubiqutin is, therefore, essential for the understanding of the processes controlling molecular recognition events in polyubiquitin signaling. Here we use NMR to characterize the interdomain dynamics that open the interface in a di-ubiquitin chain. This process occurs via domain reorientations on a 10-ns time scale and with the amplitudes that are sufficient for making functionally important hydrophobic residues in polyubiquitin available for direct interactions with various ubiquitin-binding factors. The analysis revealed the structures of the interconverting conformational states of di-ubiquitin and the rates and amplitudes of this process at near-physiological and acidic pH. The proposed mechanism of domain reorientation is quite general and could serve as a paradigm of interdomain mobility in other multidomain systems. Proteins 2006. {\textcopyright} 2006 Wiley-Liss, Inc.}, keywords = {anisotropic diffusion, domain motion, interdomain orientation, polyubiquitin, spin relaxation}, isbn = {1097-0134}, doi = {10.1002/prot.20917}, url = {http://onlinelibrary.wiley.com/doi/10.1002/prot.20917/abstract}, author = {Ryabov,Yaroslav and Fushman, David} } @article {14385, title = {An introduction to probabilistic graphical models for relational data}, journal = {Data Engineering Bulletin}, volume = {29}, year = {2006}, month = {2006///}, abstract = {We survey some of the recent work on probabilistic graphical models for relational data. The models thatwe describe are all based upon {\textquoteright}graphical models{\textquoteright} [12]. The models can capture statistical correlations among attributes within a single relational table, between attributes in different tables, and can capture certain structural properties, such as the expected size of a join between tables. These models can then be used for a variety of tasks including filling in missing values, data summarization and anomaly detection. Here we describe two complementary semantics for the models: one approach suited to making probabilistic statements about individuals and the second approach suited to making statements about frequencies in relational data. After describing the semantics, we briefly describe algorithms for automatically constructing the models from an existing (non-probabilistic) database. }, author = {Getoor, Lise} } @conference {12611, title = {Invariant Geometric Representation of 3D Point Clouds for Registration and Matching}, booktitle = {Image Processing, 2006 IEEE International Conference on}, year = {2006}, month = {2006/10//}, pages = {1209 - 1212}, abstract = {Though implicit representations of surfaces have often been used for various computer graphics tasks like modeling and morphing of objects, it has rarely been used for registration and matching of 3D point clouds. Unlike in graphics, where the goal is precise reconstruction, we use isosurfaces to derive a smooth and approximate representation of the underlying point cloud which helps in generalization. Implicit surfaces are generated using a variational interpolation technique. Implicit function values on a set of concentric spheres around the 3D point cloud of object are used as features for matching. Geometric-invariance is achieved by decomposing implicit values based feature set into various spherical harmonics. The decomposition provides a compact representation of 3D point clouds while achieving rotation invariance}, keywords = {3D, cloud;computer, function, geometric, graphics;geophysical, graphics;image, Interpolation, matching;image, point, processing;image, reconstruction;image, registration;image, registration;implicit, representation;interpolation;, representation;variational, signal, technique;clouds;computer, value;invariant}, doi = {10.1109/ICIP.2006.312542}, author = {Biswas,S. and Aggarwal,G. and Chellapa, Rama} } @article {14625, title = {Invited Talk: Deciphering Gene Regulatory Networks by in silico approaches}, journal = {6th International Workshop on Data Mining in Bioinformatics (BIOKDD06)}, year = {2006}, month = {2006///}, pages = {31 - 31}, abstract = {Biological processes are controlled at various levels in the celland while these mechanisms are poorly understood, tran- scriptional control is widely recognized as an important com- ponent and a better understanding of which will provide an efficient means for the therapeutic intervention in disease processes. We have been focusing on various computational problems pertaining to transcriptional regulation, namely, (1) representation and identification of transcription factor binding sites,(2) PolII promoter prediction,(3) Predicting interaction among transcription factors,(4) Transcriptional modeling, ie identifying arrangements of TFs that co- regulate a set of transcripts. I will present a brief overview of the computational approaches and challenges as well as a number of applications including transcriptional regulation in memory storage, heart failure, and osteoarthritis. }, author = {Hannenhalli, Sridhar} } @article {14979, title = {Isosurface Extraction and Spatial Filtering using Persistent Octree (POT)}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {12}, year = {2006}, month = {2006/10//sept}, pages = {1283 - 1290}, abstract = {We propose a novel persistent octree (POT) indexing structure for accelerating isosurface extraction and spatial filtering from volumetric data. This data structure efficiently handles a wide range of visualization problems such as the generation of view-dependent isosurfaces, ray tracing, and isocontour slicing for high dimensional data. POT can be viewed as a hybrid data structure between the interval tree and the branch-on-need octree (BONO) in the sense that it achieves the asymptotic bound of the interval tree for identifying the active cells corresponding to an isosurface and is more efficient than BONO for handling spatial queries. We encode a compact octree for each isovalue. Each such octree contains only the corresponding active cells, in such a way that the combined structure has linear space. The inherent hierarchical structure associated with the active cells enables very fast filtering of the active cells based on spatial constraints. We demonstrate the effectiveness of our approach by performing view-dependent isosurfacing on a wide variety of volumetric data sets and 4D isocontour slicing on the time-varying Richtmyer-Meshkov instability dataset}, keywords = {4D isocontour slicing;Richtmyer-Meshkov instability dataset;branch-on-need octree;hybrid data structure;isosurface extraction;persistent octree;spatial filtering;data visualisation;database indexing;feature extraction;octrees;spatial data structures;Algor, Computer-Assisted;Imaging, Three-Dimensional;Information Storage and Retrieval;User-Computer Interface;}, isbn = {1077-2626}, doi = {10.1109/TVCG.2006.157}, author = {Shi,Q. and JaJa, Joseph F.} } @article {18013, title = {Issues in writing a parallel compiler starting from a serial compiler}, year = {2006}, month = {2006///}, institution = {draft. Technical report, University of Maryland Institute for Advanced Compuer Studies}, author = {Tzannes,A. and Barua,R. and Caragea,G. and Vishkin, Uzi} } @conference {16099, title = {"I hear the pattern": interactive sonification of geographical data patterns}, booktitle = {CHI {\textquoteright}05 extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {1905 - 1908}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Interactive sonification (non-speech sound) is a novel strategy to present the geographical distribution patterns of statistical data to vision impaired users. We discuss the design space with dimensions of interaction actions, data representation forms, input devices, navigation structures, and sound feedback encoding. Two interfaces were designed, one using a keyboard and another using a smooth surface touch tablet. A study with three blind users shows that they are able to perceive patterns of 5-category values on both familiar and unknown maps, and learn new map geography, in both interfaces.}, keywords = {auditory user interfaces, information seeking, sonification, universal usability, vision impairment}, isbn = {1-59593-002-7}, doi = {10.1145/1056808.1057052}, url = {http://doi.acm.org/10.1145/1056808.1057052}, author = {Zhao,Haixia and Plaisant, Catherine and Shneiderman, Ben} } @conference {11897, title = {Identifying and segmenting human-motion for mobile robot navigation using alignment errors}, booktitle = {12th International Conference on Advanced Robotics, 2005. ICAR {\textquoteright}05. Proceedings}, year = {2005}, month = {2005/07//}, pages = {398 - 403}, publisher = {IEEE}, organization = {IEEE}, abstract = {This paper presents a new human-motion identification and segmentation algorithm, for mobile robot platforms. The algorithm is based on computing the alignment error between pairs of object images acquired from a moving platform. Pairs of images generating relatively small alignment errors are used to estimate the fundamental frequency of the object{\textquoteright}s motion. A decision criterion is then used to test the significance of the estimated frequency and to classify the object{\textquoteright}s motion. To verify the validity of the proposed approach, experimental results are shown on different classes of objects}, keywords = {Computer errors, Educational institutions, Frequency estimation, human-motion identification, human-motion segmentation, HUMANS, Image motion analysis, Image segmentation, mobile robot navigation, Mobile robots, Motion estimation, Navigation, Object detection, robot vision, SHAPE}, isbn = {0-7803-9178-0}, doi = {10.1109/ICAR.2005.1507441}, author = {Abd-Almageed, Wael and Burns,B. J and Davis, Larry S.} } @conference {13569, title = {Identifying Script on Word-Level with Informational Confidence}, booktitle = {8th Int. Conf. on Document Analysis and Recognition}, year = {2005}, month = {2005/08//}, pages = {416 - 420}, abstract = {In this paper, we present a multiple classifier system for script identification. Applying a Gabor filter analysis of textures on word-level, our system identifies Latin and non-Latin words in bilingual printed documents. The classifier system comprises four different architectures based on nearest neighbors, weighted Euclidean distances, Gaussian mixture models, and support vector machines. We report results for Arabic, Chinese, Hindi, and Korean script. Moreover, we show that combining informational confidence values using sum-rule can consistently outperform the best single recognition rate.}, author = {Jaeger,Stefan and Ma,Huanfeng and David Doermann} } @article {15155, title = {Identity-based zero-knowledge}, journal = {Security in Communication Networks}, year = {2005}, month = {2005///}, pages = {180 - 192}, abstract = {We introduce and define the notion of identity-based zero-knowledge, concentrating on the non-interactive setting. In this setting, our notion allows any prover to widely disseminate a proof of a statement while protecting the prover from plagiarism in the following sense: although proofs are transferable (i.e., publicly verifiable), they are also bound to the identity of the prover in a way which is recognizable to any verifier. Furthermore, an adversary is unable to change this identity (i.e., to claim the proof as his own, or to otherwise change the authorship), unless he could have proved the statement on his own.While we view the primary contribution of this work as a formal definition of the above notion, we also explore the relation of this notion to that of non-malleable (non-interactive) zero-knowledge. On the one hand, we show that these two notions are incomparable: that is, there are proof systems which are non-malleable but not identity-based, and vice versa. On the other hand, we show that a proof system of either type essentially implies a proof system of the other type. }, doi = {10.1007/978-3-540-30598-9_13}, author = {Katz, Jonathan and Ostrovsky,R. and Rabin,M.} } @inbook {12677, title = {Image Sequence Stabilization, Mosaicking, and Superresolution}, booktitle = {Handbook of Image and Video Processing (Second Edition)Handbook of Image and Video Processing (Second Edition)}, year = {2005}, month = {2005///}, pages = {309-VII - 309-VII}, publisher = {Academic Press}, organization = {Academic Press}, address = {Burlington}, abstract = {A sequence of temporal images gathered from a single sensor adds a whole new dimension to two-dimensional (2D) image data. Availability of an image sequence permits the measurement of quantities such as subpixel intensities, camera motion and depth, and detection and tracking of moving objects. In turn, the processing of image sequences necessitates the development of sophisticated techniques to extract this information. With the recent availability of powerful yet inexpensive computers, data storage systems, and image acquisition devices, image sequence analysis has transitioned from an esoteric research domain to a practical area with significant commercial interest.}, isbn = {978-0-12-119792-6}, url = {http://www.sciencedirect.com/science/article/pii/B9780121197926500826}, author = {Chellapa, Rama and Srinivasan, S. and Aggarwal,G. and Veeraraghavan,A.}, editor = {Al Bovik} } @article {12660, title = {Image-based face recognition under illumination and pose variations}, journal = {Journal of the Optical Society of America AJ. Opt. Soc. Am. A}, volume = {22}, year = {2005}, month = {2005/02/01/}, pages = {217 - 229}, abstract = {We present an image-based method for face recognition across different illuminations and poses, where the term image-based means that no explicit prior three-dimensional models are needed. As face recognition under illumination and pose variations involves three factors, namely, identity, illumination, and pose, generalizations in all these three factors are desired. We present a recognition approach that is able to generalize in the identity and illumination dimensions and handle a given set of poses. Specifically, the proposed approach derives an identity signature that is illumination- and pose-invariant, where the identity is tackled by means of subspace encoding, the illumination is characterized with a Lambertian reflectance model, and the given set of poses is treated as a whole. Experimental results using the Pose, Illumination, and Expression (PIE) database demonstrate the effectiveness of the proposed approach.}, keywords = {illumination, pattern recognition}, doi = {10.1364/JOSAA.22.000217}, url = {http://josaa.osa.org/abstract.cfm?URI=josaa-22-2-217}, author = {Zhou,Shaohua Kevin and Chellapa, Rama} } @article {17221, title = {Image-Based Highly Interactive Web Mapping for Geo-Referenced Data Publishing (2002)}, journal = {Institute for Systems Research Technical Reports}, year = {2005}, month = {2005///}, abstract = {This paper describes an image-based technique that enables highly interactive Web choropleth maps for geo-referenced data publishing and visual exploration. Geographic knowledge is encoded into raster images and delivered to the client, instead of in vector formats. Differing from traditional raster-imagebased approaches that are static and allow very little user interaction, it allows varieties of sub-second fine-grained interface controls such as dynamic query, dynamic classification, geographic object data identification, user setting adjusting, as well as turning on/off layers, panning and zooming, with no or minimum server support. Compared to Web GIS approaches that are based on vector geographic data, this technique has the features of short initial download time, near-constant performance scalability for larger numbers of geographic objects, and download-map-segment-only-when-necessary which potentially reduces the overall data transfer over the network. As a result, it accommodates general public users with slow modem network connections and low-end machines, as well as users with fast T-1 connections and fast machines. The client-side (browser) is implemented as light-weight Java applets. YMap, an easy-to-use, user-task-oriented highly interactive mapping tool prototype for visual georeferenced data exploration is implemented using this technique.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6498}, author = {Zhao,Haixia and Shneiderman, Ben} } @article {16107, title = {Immediate Usability: A Case Study of Public Access Design for a Community Photo Library (2003)}, year = {2005}, month = {2005///}, abstract = {This paper describes a novel instantiation of a digital photo library in a public access system. It demonstrates how designers can utilize characteristics of a target user community (social constraints, trust, and a lack of anonymity) to provide capabilities that would be impractical in other types of public access systems. It also presents a compact set of design principles and guidelines for ensuring the immediate usability of public access information systems. These principles and guidelines were derived from our experience developing PhotoFinder Kiosk, a community photo library. Attendees of a major HCI conference (CHI 2001 Conference on Human Factors in Computing Systems) successfully used the tool to browse and annotate collections of photographs spanning 20 years of HCI-related conferences, producing a richly annotated photo history of the field of human-computer interaction. Observations and log data were used to evaluate the tool and develop the guidelines. They provide specific guidance for practitioners, as well as a useful framework for additional research in public access interfaces.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6504}, author = {Kules,Bill and Kang,Hyunmo and Plaisant, Catherine and Rose,Anne and Shneiderman, Ben} } @article {16106, title = {Immediate Usability: Kiosk design principles from the CHI 2001 Photo Library (2001)}, year = {2005}, month = {2005///}, abstract = {This paper describes a novel set of design principles and guidelines for ensuring the immediate usability of public access systems. These principles and guidelines were formulated while developing PhotoFinder Kiosk, a community photo library. Attendees of CHI 2001 successfully used the tool to browse and annotate collections of photographs spanning 20 years of CHI and related conferences, producing a richly annotated photo history of the field of human-computer interaction. We used observations and log data to evaluate the tool and refine the guidelines. They provide specific guidance for practitioners, as well as a useful framework for additional research in public access interfaces.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6488}, author = {Kules,Bill and Kang,Hyunmo and Plaisant, Catherine and Rose,Anne and Shneiderman, Ben} } @conference {18575, title = {Implications of autonomy for the expressiveness of policy routing}, booktitle = {Proceedings of the 2005 conference on Applications, technologies, architectures, and protocols for computer communications}, series = {SIGCOMM {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {25 - 36}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Thousands of competing autonomous systems must cooperate with each other to provide global Internet connectivity. Each autonomous system (AS) encodes various economic, business, and performance decisions in its routing policy. The current interdomain routing system enables each AS to express policy using rankings that determine how each router inthe AS chooses among different routes to a destination, and filters that determine which routes are hidden from each neighboring AS. Because the Internet is composed of many independent, competing networks, the interdomain routing system should provide autonomy, allowing network operators to set their rankings independently, and to have no constraints on allowed filters. This paper studies routing protocol stability under these conditions. We first demonstrate that certain rankings that are commonly used in practice may not ensure routing stability. We then prove that, when providers can set rankings and filters autonomously, guaranteeing that the routing system will converge to a stable path assignment essentially requires ASes to rank routes based on AS-path lengths. We discuss the implications of these results for the future of interdomain routing.}, keywords = {autonomy, BGP, Internet, policy, protocol, Routing, Safety, stability}, isbn = {1-59593-009-4}, doi = {10.1145/1080091.1080096}, url = {http://doi.acm.org/10.1145/1080091.1080096}, author = {Feamster, Nick and Johari,Ramesh and Balakrishnan,Hari} } @article {15555, title = {Improved approximation bounds for planar point pattern matching}, journal = {Algorithms and Data Structures}, year = {2005}, month = {2005///}, pages = {432 - 443}, abstract = {We consider the well known problem of matching two planar point sets under rigid transformations so as to minimize the directed Hausdorff distance. This is a well studied problem in computational geometry. Goodrich, Mitchell, and Orletsky [GMO94] presented a very simple approximation algorithm for this problem, which computes transformations based on aligning pairs of points. They showed that their algorithm achieves an approximation ratio of 4. We consider a minor modification to their algorithm, which is based on aligning midpoints rather than endpoints. We show that this algorithm achieves an approximation ratio not greater than 3.13. Our analysis is sensitive to the ratio between the diameter of the pattern set and the Hausdorff distance, and we show that the approximation ratio approaches 3 in the limit. Finally, we provide lower bounds that show that our approximation bounds are nearly tight.}, doi = {10.1007/11534273_38}, author = {Cho,M. and Mount, Dave} } @article {15180, title = {Improved efficiency for CCA-secure cryptosystems built using identity-based encryption}, journal = {Topics in Cryptology{\textendash}CT-RSA 2005}, year = {2005}, month = {2005///}, pages = {87 - 103}, abstract = {Recently, Canetti, Halevi, and Katz showed a general method for constructing CCA-secure encryption schemes from identity-based encryption schemes in the standard model. We improve the efficiency of their construction, and show two specific instantiations of our resulting scheme which offer the most efficient encryption (and, in one case, key generation) of any CCA-secure encryption scheme to date.}, doi = {10.1007/978-3-540-30574-3_8}, author = {Boneh,D. and Katz, Jonathan} } @conference {16676, title = {Improved HMM alignment models for languages with scarce resources}, booktitle = {Proceedings of the ACL Workshop on Building and Using Parallel Texts}, year = {2005}, month = {2005///}, pages = {83 - 86}, author = {Lopez,A. and Resnik, Philip} } @article {17223, title = {Improving Accessibility and Usability of Geo-referenced Statistical Data (2003)}, year = {2005}, month = {2005///}, abstract = {Several technology breakthroughs are needed to achieve the goals of universal accessibility and usability. These goals are especially challenging in the case of geo-referenced statistical data that many U.S. government agencies supply. We present technical and user-interface design challenges in accommodating users with low-end technology (slow network connection and low-end machine) and users who are blind or vision-impaired. Our solutions are presented and future work is discussed.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6502}, author = {Zhao,Haixia and Plaisant, Catherine and Shneiderman, Ben} } @conference {18227, title = {Improving collusion resistance of error correcting code based multimedia fingerprinting}, booktitle = {Acoustics, Speech, and Signal Processing, 2005. Proceedings. (ICASSP {\textquoteright}05). IEEE International Conference on}, volume = {2}, year = {2005}, month = {2005/03//}, pages = {ii/1029 - ii/1032 Vol. 2 - ii/1029 - ii/1032 Vol. 2}, abstract = {Digital fingerprinting protects multimedia content from illegal redistribution by uniquely marking copies of the content distributed to each user. Collusion is a powerful attack whereby several differently fingerprinted copies of the same content are combined together to attenuate or remove the fingerprints. Focusing on the error correction code (ECC) based fingerprinting, we explore in this paper new avenues that can substantially improve its collusion resistance, and in the mean time retain its advantages in detection complexity and fast distribution. Our analysis suggests a great need of jointly considering the coding, embedding, and detection issues, and inspires the proposed technique of permuted subsegment embedding that is able to substantially improve the collusion resistance of ECC based fingerprinting.}, keywords = {based, code, codes;, coding;, collusion, correcting, correction, data;, detection;, digital, ECC;, embedding;, error, fingerprint, fingerprinting, fingerprinting;, multimedia, of, permuted, resistance;, Security, signatures;, subsegment, systems;}, doi = {10.1109/ICASSP.2005.1415583}, author = {He,Shan and M. Wu} } @article {17228, title = {Improving Web-based Civic Information Access: A Case Study of the 50 US States (2002)}, year = {2005}, month = {2005///}, abstract = {An analysis of the home pages of all fifty U. S. states reveals great variety in key design features that influence efficacy. Some states had excessively large byte counts that would slow users connected by commonly-used 56K modems. Many web sites had low numbers of or poorly organized links that would make it hard for citizens to find what they were interested in. Features such as search boxes, privacy policies, online help, or contact information need to be added by several states. Our analysis concludes with ten recommendations and finds many further opportunities for individual states to improve their websites. However still greater benefits will come through collaboration among the states that would lead to consistency, appropriate tagging, and common tools.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6494}, author = {Ceaparu,Irina and Shneiderman, Ben} } @article {16881, title = {Indexing issues in supporting similarity searching}, journal = {Advances in Multimedia Information Processing-PCM 2004}, year = {2005}, month = {2005///}, pages = {463 - 470}, abstract = {Indexing issues that arise in the support of similarity searching are presented. This includes a discussion of the curse of dimensionality, as well as multidimensional indexing, distance-based indexing, dimension reduction, and embedding methods.}, doi = {10.1007/978-3-540-30542-2_57}, author = {Samet, Hanan} } @article {13051, title = {Induction of Word and Phrase Alignments for Automatic Document Summarization}, journal = {Computational Linguistics}, volume = {31}, year = {2005}, month = {2005///}, pages = {505 - 530}, isbn = {0891-2017, 1530-9312}, doi = {10.1162/089120105775299140}, url = {http://dl.acm.org/citation.cfm?id=1110825.1110829}, author = {Daum{\'e}, Hal and Marcu,Daniel} } @article {18908, title = {Inferencing in Support of Active Templates}, year = {2005}, month = {2005/02//}, institution = {Department of Computer Science, University of Maryland, College Park}, abstract = {The primary accomplishments of this project include the following: (1) HICAP is a general purpose planning architecture to assist military commanders with planning NEOs (Noncombatant Evacuation Operations). HICAP integrates a hierarchical task editor with a planning tool. Military planners select a task to decompose in the task editor and then use the planning tool to interactively refine it into an operational plan. (2) SHOP and SHOP2 are some simple, practical planning tools based on HTN planning. Their practical utility is shown by the mergence of an active set of users, which include government laboratories, industrial R\&D projects, and academic settings. SHOP2 received one of the top four awards in the 2002 International Planning Competition. (3) The Air Force Research Laboratory{\textquoteright}s Causal Analysis Tool (CAT) is a system for creating and analyzing causal models similar to Bayesian networks. We have enhanced CAT by developing an approach to quickly generate plans that have high probabilities of success.}, keywords = {*ARTILLERY AMMUNITION, *HIGH CAPACITY PROJECTILES, AMMUNITION AND EXPLOSIVES, BAYES THEOREM, COALITION, Collaboration, Computer architecture, COMPUTER HARDWARE, Computer networks, HICAP(HIGH CAPACITY ARTILLARY PROJECTILES), MILITARY COMMANDERS, NEO(NONCOMBATANT EVACUATION OPERATIONS), NONCOMBATANT., PE63760E, planning, probability, PROJECTILE TRAJECTORIES, TASK SUPPORT, WUAFRLATEMPO02}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA431023}, author = {Nau, Dana S.} } @conference {18774, title = {Information and Knowledge Modeling for Computer Supported Micro Electro-Mechanical Systems Design and Development}, year = {2005}, month = {2005///}, abstract = {In this paper, we present a preliminary research effort towardsan effective computer support environment for the design and development of micro-electro-mechanical systems (MEMS). We first identify the characteristics of MEMS product design and development processes and examine the state-of-the-art of MEMS Computer-aided Design (CAD) and simulation systems. We then propose a function-(environment-effect)-behavior- (principle-state)-form (FEEBPSF) framework based on the NIST core product model and its extensions for modeling MEMS products, and apply the OESM (open embedded system model) developed to model information and knowledge for embedded MEMS design and development. Moreover, in order to tackle the knowledge-intensive tasks of design and development for MEMS products, we develop a general and flexible knowledge repository, called KR-MEMS, based on the FEEBPSF framework and the Unified Modeling Language (UML)/ Extensible Markup Language (XML) model, and integrate KR-MEMS into a web-based MEMS design support system. Throughout the paper, a micro fluidic dispensing device (a biomedical device for drug-delivery) is used as an example to illustrate the concepts, models, and knowledge bases necessary to support the MEMS product design and development. }, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.160.7822\&rep=rep1\&type=pdf}, author = {Zha,X. F. and Sriram,R.D. and Gupta,S.K.} } @article {18910, title = {Information gathering during planning for Web Service composition}, journal = {Web Semantics: Science, Services and Agents on the World Wide Web}, volume = {3}, year = {2005}, month = {2005/10//}, pages = {183 - 205}, abstract = {Hierarchical task network (HTN) based planning techniques have been applied to the problem of composing Web Services, especially when described using the OWL-S service ontologies. Many of the existing Web Services are either exclusively information providing or crucially depend on information-providing services. Thus, many interesting service compositions involve collecting information either during execution or during the composition process itself. In this paper, we focus on the latter issue. In particular, we present ENQUIRER , an HTN-planning algorithm designed for planning domains in which the information about the initial state of the world may not be complete, but it is discoverable through plan-time information-gathering queries. We have shown that ENQUIRER is sound and complete, and derived several mathematical relationships among the amount of available information, the likelihood of the planner finding a plan, and the quality of the plan found. We have performed experimental tests that confirmed our theoretical results and that demonstrated how ENQUIRER can be used for Web Service composition.}, keywords = {HTN planning, Information gathering, Web Service composition}, isbn = {1570-8268}, doi = {10.1016/j.websem.2005.07.001}, url = {http://www.sciencedirect.com/science/article/pii/S1570826805000168}, author = {Kuter,Ugur and Sirin,Evren and Parsia,Bijan and Nau, Dana S. and Hendler,James} } @inbook {16103, title = {Information Visualization and the Challenge of Universal Usability}, booktitle = {Exploring GeovisualizationExploring Geovisualization}, year = {2005}, month = {2005///}, pages = {53 - 82}, publisher = {Elsevier}, organization = {Elsevier}, address = {Oxford}, abstract = {Information Visualization aims to provide compact graphical presentations and user interfaces for interactively manipulating large numbers of items. We present a simple {\textquotedblleft}data by tasks taxonomy{\textquotedblright} then discuss the challenges of providing universal usability, with example applications using geo-referenced data. Information Visualization has been shown to be a powerful visual thinking or decision tool but it is becoming important for services to reach and empower every citizen. Technological advances are needed to deal with user diversity (age, language, disabilities, etc.) but also with the variety of technology used (screen size, network speed, etc.) and the gaps in user{\textquoteright}s knowledge (general knowledge, knowledge of the application domain, of the interface syntax or semantic). We present examples that illustrate how those challenges can be addressed.}, isbn = {978-0-08-044531-1}, url = {http://www.sciencedirect.com/science/article/pii/B9780080445311504218}, author = {Plaisant, Catherine}, editor = {Jason Dykes and Alan M. MacEachren and Menno-Jan KraakA2 - Jason Dykes,Alan M. MacEachren and Menno-Jan Kraak} } @conference {12803, title = {An integrated framework for scenarios and state machines}, booktitle = {Integrated Formal Methods}, year = {2005}, month = {2005///}, pages = {366 - 385}, author = {Sengupta,B. and Cleaveland, Rance} } @conference {14224, title = {Integration of motion fields through shape}, booktitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2005. CVPR 2005}, volume = {2}, year = {2005}, month = {2005/06/20/25}, pages = {663- 669 vol. 2 - 663- 669 vol. 2}, publisher = {IEEE}, organization = {IEEE}, abstract = {Structure from motion from single flow fields has been studied intensively, but the integration of information from multiple flow fields has not received much attention. Here we address this problem by enforcing constraints on the shape (surface normals) of the scene in view, as opposed to constraints on the structure (depth). The advantage of integrating shape is two-fold. First, we do not need to estimate feature correspondences over multiple frames, but we only need to match patches. Second, the shape vectors in the different views are related only by rotation. This constraint on shape can be combined easily with motion estimation, thus formulating motion and structure estimation from multiple views as a practical constrained minimization problem using a rank-3 constraint. Based on this constraint, we develop a 3D motion technique, which locates through color and motion segmentation, planar patches in the scene, matches patches over multiple frames, and estimates the motion between multiple frames and the shape of the selected scene patches using the image gradients. Experiments evaluate the accuracy of the 3D motion estimation and demonstrate the motion and shape estimation of the technique by super-resolving an image sequence.}, keywords = {3D motion estimation, Automation, CAMERAS, computational geometry, Computer vision, constrained minimization problem, decoupling translation from rotation, Educational institutions, image colour analysis, image gradients, image resolution, Image segmentation, image sequence, Image sequences, integration of motion fields, Layout, minimisation, Motion estimation, motion field integration, motion segmentation, parameter estimation, planar patches, rank-3 constraint, scene patches, SHAPE, shape and rotation, shape estimation, structure estimation}, isbn = {0-7695-2372-2}, doi = {10.1109/CVPR.2005.190}, author = {Ji,H. and Ferm{\"u}ller, Cornelia} } @article {18163, title = {An interactive and team approach to multimedia design curriculum}, journal = {Signal Processing Magazine, IEEE}, volume = {22}, year = {2005}, month = {2005/11//}, pages = {14 - 19}, abstract = {Over the past decade, increasingly powerful technologies have made it easier to compress, distribute, and store multimedia content. The merger of computing and communications has created a ubiquitous infrastructure that brings digital multimedia closer to the users and opens up tremendous educational and commercial opportunities in multimedia content creation, delivery, rendering, and archiving for millions of users worldwide. Multimedia has become a basic skill demanded by an increasing number of potential jobs for electrical engineering/computer science graduates. In this article, the authors intend to share their experiences and new ways of thinking about curriculum development. It is beneficial for colleagues in the multimedia signal processing areas for use in developing or revising the curriculum to fit the needs and resources of their own programs.}, keywords = {approach;, communication;, courses;, curriculum, curriculum;, design, development;, digital, education;, educational, interactive, learning;, multimedia, multimedia;, processing;, signal, team}, isbn = {1053-5888}, doi = {10.1109/MSP.2005.1550186}, author = {M. Wu and Liu,K. J.R} } @article {17247, title = {Interactive Color Mosaic and Dendogram Displays for Signal/Noise Optimization in Microarray Data Analysis (2003)}, journal = {Institute for Systems Research Technical Reports}, year = {2005}, month = {2005///}, abstract = {Data analysis and visualization is strongly influenced by noise and noise filters. There are multiple sources of oisein microarray data analysis, but signal/noise ratios are rarely optimized, or even considered. Here, we report a noise analysis of a novel 13 million oligonucleotide dataset - 25 human U133A (~500,000 features) profiles of patient muscle biposies. We use our recently described interactive visualization tool, the Hierarchical Clustering Explorer (HCE) to systemically address the effect of different noise filters on resolution of arrays into orrectbiological groups (unsupervised clustering into three patient groups of known diagnosis). We varied probe set interpretation methods (MAS 5.0, RMA), resent callfilters, and clustering linkage methods, and investigated the results in HCE. HCE interactive features enabled us to quickly see the impact of these three variables. Dendrogram displays showed the clustering results systematically, and color mosaic displays provided a visual support for the results. We show that each of these three variables has a strong effect on unsupervised clustering. For this dataset, the strength of the biological variable was maximized, and noise minimized, using MAS 5.0, 10\% present call filter, and Average Group Linkage. We propose a general method of using interactive tools to identify the optimal signal/noise balance or the optimal combination of these three variables to maximize the effect of the desired biological variable on data interpretation.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6506}, author = {Seo,Jinwook and Bakay,Marina and Zhao,Po and Chen,Yi-Wen and Clarkson,Priscilla and Shneiderman, Ben and Hoffman,Eric P} } @article {17251, title = {Interactive Exploration of Multidimensional Microarray Data: Scatterplot Ordering, Gene Ontology Browser, and Profile Search (2003)}, journal = {Institute for Systems Research Technical Reports}, year = {2005}, month = {2005///}, abstract = {Multidimensional data sets are common in many research areas, including microarray experiment data sets. Genome researchers are using cluster analysis to find meaningful groups in microarray data. However, the high dimensionality of the data sets hinders users from finding interesting patterns, clusters, and outliers. Determining the biological significance of such features remains problematic due to the difficulties of integrating biological knowledge. In addition, it is not efficient to perform a cluster analysis over the whole data set in cases where researchers know the approximate temporal pattern of the gene expression that they are seeking.

To address these problems, we add three new features to the Hierarchical Clustering Explorer (HCE): (1) scatterplot ordering methods so that all 2D projections of a high dimensional data set can be ordered according to relevant criteria, (2) a gene ontology browser, coupled with clustering results so that known gene functions within a cluster can be easily studied, (3) a profile search so that genes with a certain temporal pattern can be easily identified.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6528}, author = {Seo,Jinwook and Shneiderman, Ben} } @article {17255, title = {Interactive pattern search in time series}, journal = {Proceedings of SPIE}, volume = {5669}, year = {2005}, month = {2005/03/11/}, pages = {175 - 186}, abstract = {The need for pattern discovery in long time series data led researchers to develop algorithms for similarity search. Most of the literature about time series focuses on algorithms that index time series and bring the data into the main storage, thus providing fast information retrieval on large time series. This paper reviews the state of the art in visualizing time series, and focuses on techniques that enable users to visually and interactively query time series. Then, it presents TimeSearcher 2, a tool that enables users to explore multidimensional data using synchronized tables and graphs with overview+detail, filter the time series data to reduce the scope of the search, select an existing pattern to find similar occurrences, and interactively adjust similarity parameters to narrow the result set. This tool is an extension of previous work, TimeSearcher 1, which uses graphical timeboxes to interactively query time series data.}, isbn = {0277786X}, doi = {doi:10.1117/12.587537}, url = {http://spiedigitallibrary.org/proceedings/resource/2/psisdg/5669/1/175_1?isAuthorized=no}, author = {Buono,Paolo and Aris,Aleks and Plaisant, Catherine and Khella,Amir and Shneiderman, Ben} } @article {17256, title = {Interactive Pattern Search in Time Series (2004)}, year = {2005}, month = {2005///}, abstract = {The need for pattern discovery in long time series data led researchers to develop algorithms for similarity search. Most of the literature about time series focuses on algorithms that index time series and bring the data into the main storage, thus providing fast information retrieval on large time series. This paper reviews the state of the art in visualizing time series, and focuses on techniques that enable users to interactively query time series. Then it presents TimeSearcher 2, a tool that enables users to explore multidimensional data using coordinated tables and graphs with overview+detail, filter the time series data to reduce the scope of the search, select an existing pattern to find similar occurrences, and interactively adjust similarity parameters to narrow the result set. This tool is an extension of previous work, TimeSearcher 1, which uses graphical timeboxes to interactively query time series data.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6519}, author = {Buono,Paolo and Aris,Aleks and Plaisant, Catherine and Khella,Amir and Shneiderman, Ben} } @article {17258, title = {Interactive sonification of choropleth maps}, journal = {IEEE Multimedia}, volume = {12}, year = {2005}, month = {2005/06//April}, pages = {26 - 35}, abstract = {Auditory information is an important channel for the visually impaired. Effective sonification (the use of non-speech audio to convey information) promotes equal working opportunities for people with vision impairments by helping them explore data collections for problem solving and decision making. Interactive sonification systems can make georeferenced data accessible to people with vision impairments. The authors compare methods for using sound to encode georeferenced data patterns and for navigating maps.}, keywords = {audio signal processing, audio user interfaces, Auditory (non-speech) feedback, auditory information, cartography, choropleth maps, data collections, decision making, Evaluation, Feedback, georeferenced data, Guidelines, handicapped aids, Hardware, HUMANS, information resources, interaction style, Interactive sonification, interactive systems, Navigation, nonspeech audio, problem solving, Problem-solving, sound, universal usability, US Government, User interfaces, vision impairments, World Wide Web}, isbn = {1070-986X}, doi = {10.1109/MMUL.2005.28}, author = {Zhao,Haixia and Smith,B. K and Norman,K. and Plaisant, Catherine and Shneiderman, Ben} } @conference {13920, title = {The International Children{\textquoteright}s Digital Library: A Case Study in Designing for a MultiLingual}, booktitle = {Multi-Cultural, Multi-Generational Audience. Information Technology and Libraries}, year = {2005}, month = {2005///}, author = {Hutchinson,H.B. and Rose,A. and Bederson, Benjamin B. and Weeks,A. C and Druin, Allison} } @article {12675, title = {Interpretation of state sequences in HMM for activity representation}, journal = {Proc. IEEE Conf. Acoustic Speech and Signal Processing}, volume = {2}, year = {2005}, month = {2005///}, pages = {709 - 712}, abstract = {We propose a method for activity representation based on seman-tic events, using the HMM framework. For every time instant, the probability of event occurrence is computed by exploring a subset of state sequences. The idea is that while activity trajectories may have large variations at the data or the state levels, they may ex- hibit similarities at the event level. Our experiments show the ap- plication of these events to activity recognition in an office envi- ronment and to anomalous trajectory detection using surveillance video data. }, author = {Cuntoor, N.P. and Yegnanarayana,B. and Chellapa, Rama} } @article {16095, title = {InterSon: Interactive Sonification for Geo-referenced Data Exploration for the Vision Impaired}, journal = {Tech Report HCIL-2005-13}, year = {2005}, month = {2005/05//}, abstract = {InterSon is an interactive sonification tool that allows vision im-paired users to explore complex geo-referenced statistical data for fact finding, problem solving and decision making. Examples include maps of population density, crime rates or housing prices. The integrated use of sounds and speech allows users to hear the overall distribution of values on maps and to explore the map to get more details. Users can use the standard computer keyboard, or take advantage of special devices such as a touchpad when they are available. Synchronized auditory and visual displays allow the use of residual vision and facilitate collaboration with sighted colleagues.}, author = {Zhao,H. and Plaisant, Catherine} } @article {14359, title = {Introduction to the special issue on link mining}, journal = {ACM SIGKDD Explorations Newsletter}, volume = {7}, year = {2005}, month = {2005/12//}, pages = {1 - 2}, abstract = {An emerging challenge for data mining is the problem of mining richly structured datasets, where the objects are linked in some way. Many real-world datasets describe a variety of entity types linked via multiple types of relations. These links provide additional context that can be helpful for many data mining tasks. Yet multi-relational data violates the traditional assumption of independent, identically distributed data instances that provides the basis for many statistical machine learning algorithms. Therefore, new approaches are needed that can exploit the dependencies across the attribute and link structure.}, isbn = {1931-0145}, doi = {10.1145/1117454.1117455}, url = {http://doi.acm.org/10.1145/1117454.1117455}, author = {Getoor, Lise and Diehl,Christopher P.} } @article {16017, title = {Introduction to the special review issue}, journal = {Artificial Intelligence}, volume = {169}, year = {2005}, month = {2005///}, pages = {103 - 103}, author = {Perlis, Don and Norvig,P.} } @article {17266, title = {Inventing Discovery Tools: Combining Information Visualization with Data Mining (2001)}, year = {2005}, month = {2005///}, abstract = {The growing use of information visualization tools and data mining algorithms stems from two separate lines of research. Information visualization researchers believe in the importance of giving users an overview and insight into the data distributions, while data mining researchers believe that statistical algorithms and machine learning can be relied on to find the interesting patterns. This paper discusses two issues that influence design of discovery tools: statistical algorithms vs. visual data presentation, and hypothesis testing vs. exploratory data analysis. I claim that a combined approach could lead to novel discovery tools that preserve user control, enable more effective exploration, and promote responsibility.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6484}, author = {Shneiderman, Ben} } @conference {16098, title = {iSonic: interactive sonification for non-visual data exploration}, booktitle = {Proceedings of the 7th international ACM SIGACCESS conference on Computers and accessibility}, series = {Assets {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {194 - 195}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {iSonic is an interactive sonification tool for vision impaired users to explore geo-referenced statistical data, such as population or crime rates by geographical regions. Users use a keyboard or a smooth surface touchpad to interact with coordinated map and table views of the data. The integrated use of musical sounds and speech allows users to grasp the overall data trends and to explore the data to get more details. Scenarios of use are described.}, keywords = {auditory user interfaces, information seeking, sonification, universal usability, vision impairment}, isbn = {1-59593-159-7}, doi = {10.1145/1090785.1090826}, url = {http://doi.acm.org/10.1145/1090785.1090826}, author = {Zhao,Haixia and Plaisant, Catherine and Shneiderman, Ben} } @inbook {12141, title = {Iterative enhancement: a practical technique for software development}, booktitle = {Foundations of Empirical Software Engineering: The Legacy of Victor R. BasiliFoundations of Empirical Software Engineering: The Legacy of Victor R. Basili}, volume = {1}, year = {2005}, month = {2005///}, pages = {28 - 28}, isbn = {9783540245476}, author = {Basili, Victor R. and Turner,A. J} } @conference {13805, title = {Iterative translation disambiguation for cross-language information retrieval}, booktitle = {Proceedings of the 28th annual international ACM SIGIR conference on Research and development in information retrieval}, series = {SIGIR {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {520 - 527}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Finding a proper distribution of translation probabilities is one of the most important factors impacting the effectiveness of a cross-language information retrieval system. In this paper we present a new approach that computes translation probabilities for a given query by using only a bilingual dictionary and a monolingual corpus in the target language. The algorithm combines term association measures with an iterative machine learning approach based on expectation maximization. Our approach considers only pairs of translation candidates and is therefore less sensitive to data-sparseness issues than approaches using higher n-grams. The learned translation probabilities are used as query term weights and integrated into a vector-space retrieval system. Results for English-German cross-lingual retrieval show substantial improvements over a baseline using dictionary lookup without term weighting.}, keywords = {cross-language retrieval, query formulation, term co-occurrence measures, term weighting, translation disambiguation}, isbn = {1-59593-034-5}, doi = {10.1145/1076034.1076123}, url = {http://doi.acm.org/10.1145/1076034.1076123}, author = {Monz,Christof and Dorr, Bonnie J} } @inbook {13787, title = {iCLEF 2003 at Maryland: Translation Selection and Document Selection}, booktitle = {Comparative Evaluation of Multilingual Information Access SystemsComparative Evaluation of Multilingual Information Access Systems}, series = {Lecture Notes in Computer Science}, volume = {3237}, year = {2004}, month = {2004///}, pages = {231 - 265}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Maryland performed two sets of experiments for the 2003 Cross-Language Evaluation Forum{\textquoteright}s interactive track, one focused on interactive selection of appropriate translations for query terms, the second focused on interactive selection of relevant documents. Translation selection was supported using possible synonyms discovered through back translation and two techniques for generating KeyWord In Context (KWIC) examples of usage. The results indicate that searchers typically achieved a similar search effectiveness using fewer query iterations when interactive translation selection was available. For document selection, a complete extract of the first 40 words of each news story was compared to a compressed extract generated using an automated parse-and-trim approach that approximates one way in which people can produce headlines. The results indicate that compressed {\textquotedblleft}headlines{\textquotedblright} result in faster assessment, but with a 20\% relative reduction in the F α = 0.8 search effectiveness measure.}, isbn = {978-3-540-24017-4}, url = {http://dx.doi.org/10.1007/978-3-540-30222-3_42}, author = {Dorr, Bonnie J and He,Daqing and Luo,Jun and Oard, Douglas and Schwartz,Richard and Wang,Jianqiang and Zajic, David}, editor = {Peters,Carol and Gonzalo,Julio and Braschler,Martin and Kluck,Michael} } @article {15875, title = {iCLEF 2003 at Maryland: Translation selection and document selection}, journal = {Comparative Evaluation of Multilingual Information Access Systems}, year = {2004}, month = {2004///}, pages = {231 - 265}, author = {Dorr, Bonnie J and He,D. and Luo,J. and Oard, Douglas and Schwartz,R. and Wang,J. and Zajic, David} } @conference {15842, title = {iCLEF 2004 at Maryland: Summarization design for interactive cross-language question answering}, booktitle = {Proceeding of Cross Language Evaluation Forum (CLEF2004)}, year = {2004}, month = {2004///}, author = {He,D. and Wang,J. and Luo,J. and Oard, Douglas} } @conference {18344, title = {IDeixis: image-based Deixis for finding location-based information}, booktitle = {CHI {\textquoteright}04 extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {781 - 782}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We demonstrate an image-based approach to specifying location and finding location-based information from camera-equipped mobile devices. We introduce a point-by-photograph paradigm, where users can specify a location simply by taking pictures. Our technique uses content-based image retrieval methods to search the web or other databases for matching images and their source pages to find relevant location-based information. In contrast to conventional approaches to location detection, our method can refer to distant locations and does not require any physical infrastructure beyond mobile internet service. We have developed a prototype on a camera phone and conducted user studies to demonstrate the efficacy of our approach compared to other alternatives.}, keywords = {content-based image retrieval, context-aware computing, location-awareness, Mobile computing}, isbn = {1-58113-703-6}, doi = {10.1145/985921.985933}, url = {http://doi.acm.org/10.1145/985921.985933}, author = {Tom Yeh and Tollmar,Konrad and Darrell,Trevor} } @article {18342, title = {Ideixis-image-based deixis for finding location-based information}, journal = {Mobile HCI, Vienna, Austria, Pages}, year = {2004}, month = {2004///}, pages = {781 - 782}, author = {Tollmar,K. and Tom Yeh and Darrell,T.} } @article {18343, title = {IDeixis{\textendash}Searching the Web with Mobile Images for Location-Based Information}, journal = {Mobile Human-Computer Interaction{\textendash}MobileHCI 2004}, year = {2004}, month = {2004///}, pages = {61 - 125}, author = {Tollmar,K. and Tom Yeh and Darrell,T.} } @conference {13788, title = {Identification of confusable drug names: a new approach and evaluation methodology}, booktitle = {Proceedings of the 20th international conference on Computational Linguistics}, series = {COLING {\textquoteright}04}, year = {2004}, month = {2004///}, publisher = {Association for Computational Linguistics}, organization = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, abstract = {This paper addresses the mitigation of medical errors due to the confusion of sound-alike and look-alike drug names. Our approach involves application of two new methods---one based on orthographic similarity ("look-alike") and the other based on phonetic similarity ("sound-alike"). We present a new recall-based evaluation methodology for determining the effectiveness of different similarity measures on drug names. We show that the new orthographic measure (BI-SIM) outperforms other commonly used measures of similarity on a set containing both look-alike and sound-alike pairs, and that the feature-based phonetic approach (ALINE) outperforms orthographic approaches on a test set containing solely sound-alike confusion pairs. However, an approach that combines several different measures achieves the best results on both test sets.}, doi = {10.3115/1220355.1220492}, url = {http://dx.doi.org/10.3115/1220355.1220492}, author = {Kondrak,Grzegorz and Dorr, Bonnie J} } @article {12690, title = {Identification of humans using gait}, journal = {Image Processing, IEEE Transactions on}, volume = {13}, year = {2004}, month = {2004/09//}, pages = {1163 - 1173}, abstract = {We propose a view-based approach to recognize humans from their gait. Two different image features have been considered: the width of the outer contour of the binarized silhouette of the walking person and the entire binary silhouette itself. To obtain the observation vector from the image features, we employ two different methods. In the first method, referred to as the indirect approach, the high-dimensional image feature is transformed to a lower dimensional space by generating what we call the frame to exemplar (FED) distance. The FED vector captures both structural and dynamic traits of each individual. For compact and effective gait representation and recognition, the gait information in the FED vector sequences is captured in a hidden Markov model (HMM). In the second method, referred to as the direct approach, we work with the feature vector directly (as opposed to computing the FED) and train an HMM. We estimate the HMM parameters (specifically the observation probability B) based on the distance between the exemplars and the image features. In this way, we avoid learning high-dimensional probability density functions. The statistical nature of the HMM lends overall robustness to representation and recognition. The performance of the methods is illustrated using several databases.}, keywords = {Automated;Reproducibility of Results;Sensitivity and Specificity;Signal Processing, binary silhouette;frame to exemplar distance;gait databases;gait recognition;hidden Markov model;human identification;image features;observation probability;observation vector;hidden Markov models;image recognition;image representation;image sequences;pro, Biological;Models, Computer-Assisted;Models, Computer-Assisted;Subtraction Technique;Task Performance and Analysis;Video Recording;, Statistical;Pattern Recognition}, isbn = {1057-7149}, doi = {10.1109/TIP.2004.832865}, author = {Kale, A. and Sundaresan, A. and Rajagopalan, AN and Cuntoor, N.P. and Roy-Chowdhury, A.K. and Kruger, V. and Chellapa, Rama} } @conference {12428, title = {Identifying similar parts for assisting cost estimation of prismatic machined parts}, booktitle = {ASME Design for Manufacturing Conference, Salt Lake City, Utah}, year = {2004}, month = {2004///}, author = {Cardone, Antonio and Gupta,S.K. and Karnik,M.} } @conference {12701, title = {Illuminating light field: image-based face recognition across illuminations and poses}, booktitle = {Automatic Face and Gesture Recognition, 2004. Proceedings. Sixth IEEE International Conference on}, year = {2004}, month = {2004/05//}, pages = {229 - 234}, abstract = {We present an image-based method for face recognition across different illuminations and different poses, where the term {\textquoteright}image-based{\textquoteright} means that only 2D images are used and no explicit 3D models are needed. As face recognition across illuminations and poses involves three factors, namely identity, illumination, and pose, generalizations from known identities to novel identities, from known illuminations to novel illuminations, and from known poses to unknown poses are desired. Our approach, called the illuminating light field, derives an identity signature that is invariant to illuminations and poses, where a subspace encoding is assumed for the identity, a Lambertain reflectance model for the illumination, and a light field model for the poses. Experimental results using the PIE database demonstrate the effectiveness of the proposed approach.}, keywords = {Face, field;, illuminating, image-based, Lambertain, light, lighting;, model;, multidimensional, poses;, processing;, recognition;, reflectance, reflectivity;, signal}, doi = {10.1109/AFGR.2004.1301536}, author = {Zhou,Shaohua and Chellapa, Rama} } @article {14880, title = {Illumination, Reflectance, and Reflection-Characterization of Human Faces under Illumination Variations Using Rank, Integrability, and Symmetry Constraints}, journal = {Lecture Notes in Computer Science}, volume = {3021}, year = {2004}, month = {2004///}, pages = {588 - 601}, author = {Zhou,S. K and Chellapa, Rama and Jacobs, David W.} } @conference {18225, title = {Image hashing resilient to geometric and filtering operations}, booktitle = {Multimedia Signal Processing, 2004 IEEE 6th Workshop on}, year = {2004}, month = {2004/10/01/sept}, pages = {355 - 358}, abstract = {Image hash functions provide compact representations of images, which is useful for search and authentication applications. In this work, we have identified a general three step framework and proposed a new image hashing scheme that achieves a better overall performance than the existing approaches under various kinds of image processing distortions. By exploiting the properties of discrete polar Fourier transform and incorporating cryptographic keys, the proposed image hash is resilient to geometric and filtering operations, and is secure against guessing and forgery attacks.}, keywords = {compact, cryptographic, cryptography;, discrete, distortion;, Filtering, Fourier, function;, geometric, hash, image, key, key;, operation;, polar, PROCESSING, public, representation;, theory;, transform;, transforms;}, doi = {10.1109/MMSP.2004.1436566}, author = {Swaminathan,A. and Mao,Yinian and M. Wu} } @article {16115, title = {Immediate usability: a case study of public access design for a community photo library}, journal = {Interacting with Computers}, volume = {16}, year = {2004}, month = {2004/12//}, pages = {1171 - 1193}, abstract = {This paper describes a novel instantiation of a digital photo library in a public access system. It demonstrates how designers can utilize characteristics of a target user community (social constraints, trust, and a lack of anonymity) to provide capabilities, such as unrestricted annotation and uploading of photos, which would be impractical in other types of public access systems. It also presents a compact set of design principles and guidelines for ensuring the immediate usability of public access information systems. These principles and guidelines were derived from our experience developing PhotoFinder Kiosk, a community photo library. Attendees of a major HCI conference (CHI 2001 Conference on Human Factors in Computing Systems) successfully used the tool to browse and annotate collections of photographs spanning 20 years of HCI-related conferences, producing a richly annotated photo history of the field of human{\textendash}computer interaction. Observations and usage log data were used to evaluate the tool and develop the guidelines. They provide specific guidance for practitioners, as well as a useful framework for additional research in public access interfaces.}, keywords = {Casual use, Community photo library, direct annotation, direct manipulation, Drag-and-drop, Group annotation, Immediate usability, Photo collection, Public access system, Walk-up-and-use, Zero-trial learning}, isbn = {0953-5438}, doi = {10.1016/j.intcom.2004.07.005}, url = {http://www.sciencedirect.com/science/article/pii/S0953543804000840}, author = {Kules,Bill and Kang,Hyunmo and Plaisant, Catherine and Rose,Anne and Shneiderman, Ben} } @article {15760, title = {Implementation of the regularized structured total least squares algorithms for blind image deblurring}, journal = {Linear Algebra and its Applications}, volume = {391}, year = {2004}, month = {2004/11/01/}, pages = {203 - 221}, abstract = {The structured total least squares (STLS) problem has been introduced to handle problems involving structured matrices corrupted by noise. Often the problem is ill-posed. Recently, regularization has been proposed in the STLS framework to solve ill-posed blind deconvolution problems encountered in image deblurring when both the image and the blurring function have uncertainty. The kernel of the regularized STLS (RSTLS) problem is a least squares problem involving Block{\textendash}Toeplitz{\textendash}Toeplitz{\textendash}Block matrices.In this paper an algorithm is described to solve this problem, based on a particular implementation of the generalized Schur Algorithm (GSA). It is shown that this new implementation improves the computational efficiency of the straightforward implementation of GSA from O(N2.5) to O(N2), where N is the number of pixels in the image. }, keywords = {Block Toeplitz matrix, Displacement rank, Generalized Schur algorithm, Image deblurring, Structured total least squares, Tikhonov regularization}, isbn = {0024-3795}, doi = {10.1016/j.laa.2004.07.006}, url = {http://www.sciencedirect.com/science/article/pii/S0024379504003362}, author = {Mastronardi,N. and Lemmerling,P. and Kalsi,A. and O{\textquoteright}Leary,D. P and Huffel,S. Van} } @conference {17806, title = {Implementing stable semantics by linear programming}, booktitle = {Logic Programming and Non-Monotonic Reasoning, Proceedings of the Second International Workshop}, volume = {7}, year = {2004}, month = {2004///}, pages = {23 - 42}, author = {Bell,C. and Nerode,A. and Ng,R. T and V.S. Subrahmanian} } @conference {16887, title = {Importing abstract spatial data into the SAND database system}, booktitle = {Proceedings of the 2004 annual national conference on Digital government research}, series = {dg.o {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {62:1{\textendash}62:2 - 62:1{\textendash}62:2}, publisher = {Digital Government Society of North America}, organization = {Digital Government Society of North America}, abstract = {The World Wide Web has opened ways to operate large applications by users with simple client platforms located half way around the world by connecting servers around the world together. We present a system that makes a centrally stored spatial database available to off-site users. Regardless of the specific platform available to a user, all they need to do is simply establish a link between their client and the server. Unlike well-known web-based services such as MapQuest [2] that rely on computational power of the server, our system distributes the workload among the client and the server in such a manner that the user will observe the system as being interactive, with minimal delay between the user action and appropriate response for most types of operations.}, url = {http://dl.acm.org/citation.cfm?id=1124191.1124253}, author = {Samet, Hanan and Brabec,Frantisek and Sankaranarayanan,Jagan} } @article {13227, title = {Improved Multiuser Detectors Employing Genetic Algorithms in a Space-Time Block Coded System, Yinggang Du and Kam Tai Chan}, journal = {Signal}, volume = {2004}, year = {2004}, month = {2004///}, pages = {640 - 648}, author = {Self-Similarity,G.R.U.I. and BenAbdelkader,C. and Cutler,R.G. and Davis, Larry S. and Local,F.R.U. and Global Features,J.H. and Yuen, P.C. and Lai,JH and Li,C.} } @article {15458, title = {Improving browsing environment compliance evaluations for websites}, journal = {Proceedings of the International Workshop on Web Quality (WQ{\textquoteright}04)}, year = {2004}, month = {2004///}, abstract = {Identifying accessibility issues that can threaten universal website usability is criticalfor web service and content providers who wish to accommodate the diverse web audience. Detecting page-to-environment incompliance and modifying pages to promote universal accessibility is one important step in improving the process of exploration and navigation in the web user experience. To address this issue, we have designed a system that evaluates the accessibility of a web page in a given browsing environment based on knowledge of the HyperText Markup Language (HTML) tags that comprise the page and knowledge of the tag support provided in respective browsing environments. Given this approach, one of the most important aspects of the system is the comprehensive nature of tag support knowledge. The more support rules known, the more environment-specific bugs the system can accurately identify. In order to optimize knowledge of tag support criteria, we have also incorporated a learning mechanism that can inductively determine HTML tags that are unsupported in a given environment by observing both positive and negative examples of web page appearance and behavior. }, author = {Eaton,C. and Memon, Atif M.} } @article {17808, title = {Improving performance of heterogeneous agents}, journal = {Annals of Mathematics and Artificial Intelligence}, volume = {41}, year = {2004}, month = {2004///}, pages = {339 - 395}, author = {\textbackslash{\"O}zcan,F. and V.S. Subrahmanian and Dix,J.} } @conference {16308, title = {Improving the Quality of Performance-intensive Software via Model-integrated Distributed Continuous Quality Assurance}, booktitle = {Proceedings of the 8th International Conference on Software Reuse}, year = {2004}, month = {2004///}, author = {Krishna,A. S and Schmidt,D. C and Porter, Adam and Memon, Atif M. and Sevilla-Ruiz,D.} } @article {13233, title = {In Memory of Azriel Rosenfeld}, journal = {International Journal of Computer Vision}, volume = {60}, year = {2004}, month = {2004///}, pages = {3 - 4}, author = {Davis, Larry S.} } @article {13229, title = {Incremental density approximation and kernel-based bayesian filtering for object tracking}, journal = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition}, volume = {1}, year = {2004}, month = {2004///}, abstract = {Statistical density estimation techniques are used in manycomputer vision applications such as object tracking, back- ground subtraction, motion estimation and segmentation. The particle filter (Condensation) algorithm provides a gen- eral framework for estimating the probability density func- tions (pdf) of general non-linear and non-Gaussian systems. However, since this algorithm is based on a Monte Carlo ap- proach, where the density is represented by a set of random samples, the number of samples is problematic, especially for high dimensional problems. In this paper, we propose an alternative to the classical particle filter in which the un- derlying pdf is represented with a semi-parametric method based on a mode finding algorithm using mean-shift. A mode propagation technique is designed for this new representa- tion for tracking applications. A quasi-random sampling method [14] in the measurement stage is used to improve performance, and sequential density approximation for the measurements distribution is performed for efficient compu- tation. We apply our algorithm to a high dimensional color- based tracking problem, and demonstrate its performance by showing competitive results with other trackers. }, author = {Han,B. and Comaniciu, D. and Zhu,Y. and Davis, Larry S.} } @conference {16886, title = {Indexing distributed complex data for complex queries}, booktitle = {Proceedings of the 2004 annual national conference on Digital government research}, series = {dg.o {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {63:1{\textendash}63:10 - 63:1{\textendash}63:10}, publisher = {Digital Government Society of North America}, organization = {Digital Government Society of North America}, abstract = {Peer-to-peer networks are becoming a common form of online data exchange. Querying data, mostly files, using keywords on peer-to-peer networks is well-known. But users cannot perform many types of queries on complex data and on many of the attributes of the data on such networks other than mostly exact-match queries. We introduce a distributed hashing-based index for enabling more powerful accesses on complex data over peer-to-peer networks that we expect to be commonly deployed for digital government applications. Preliminary experiments show that our index scales well and we believe that it can be extended to obtain similar indices for many other data types for performing various complex queries, such as range queries.}, url = {http://dl.acm.org/citation.cfm?id=1124191.1124254}, author = {Tanin,Egemen and Harwood,Aaron and Samet, Hanan} } @conference {13791, title = {Inducing a semantic frame lexicon from WordNet data}, booktitle = {Proceedings of the 2nd Workshop on Text Meaning and Interpretation}, series = {TextMean {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {65 - 72}, publisher = {Association for Computational Linguistics}, organization = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, abstract = {This paper presents SemFrame, a system that automatically induces the names and internal structures of semantic frames. After SemFrame identifies sets of frame-evoking verb synsets, the conceptual density of nodes in the WordNet network for corresponding nouns and noun synsets is computed and analyzed. Conceptually dense nodes are candidates for frame names and frame slots. Ca. 76\% of the frame names and 87\% of the frame slots generated by SemFrame are rated adequate by human judges.}, url = {http://dl.acm.org/citation.cfm?id=1628275.1628284}, author = {Green,Rebecca and Dorr, Bonnie J} } @conference {13792, title = {Inducing frame semantic verb classes from WordNet and LDOCE}, booktitle = {Proceedings of the 42nd Annual Meeting on Association for Computational Linguistics}, year = {2004}, month = {2004///}, pages = {375 - 375}, abstract = {This paper presents SemFrame, a systemthat induces frame semantic verb classes from WordNet and LDOCE. Semantic frames are thought to have significant potential in resolving the paraphrase problem challenging many language- based applications. When compared to the handcrafted FrameNet, SemFrame achieves its best recall-precision balance with 83.2\% recall (based on SemFrame{\textquoteright}s coverage of FrameNet frames) and 73.8\% precision (based on SemFrame verbs{\textquoteright} semantic relatedness to frame-evoking verbs). The next best performing semantic verb classes achieve 56.9\% recall and 55.0\% precision. }, author = {Green,R. and Dorr, Bonnie J and Resnik, Philip} } @article {12937, title = {Infectious disease and environment: cholera as a paradigm for waterborne disease}, journal = {International Microbiology}, volume = {7}, year = {2004}, month = {2004/12//}, pages = {285 - 289}, isbn = {1139-6709}, url = {http://scielo.isciii.es/scielo.php?pid=S1139-67092004000400008\&script=sci_arttext}, author = {Rita R Colwell} } @conference {12034, title = {The influence of shape on image correspondence}, booktitle = {2nd International Symposium on 3D Data Processing, Visualization and Transmission, 2004. 3DPVT 2004. Proceedings}, year = {2004}, month = {2004/09/06/9}, pages = {945 - 952}, publisher = {IEEE}, organization = {IEEE}, abstract = {We examine the implications of shape on the process of finding dense correspondence and half-occlusions for a stereo pair of images. The desired property of the depth map is that it should be a piecewise continuous function which is consistent with the images and which has the minimum number of discontinuities. To zeroeth order, piecewise continuity becomes piecewise constancy. Using this approximation, we first discuss an approach for dealing with such a fronto-parallel shapeless world, and the problems involved therein. We then introduce horizontal and vertical slant to create a first order approximation to piecewise continuity. We highlight the fact that a horizontally slanted surface (ie. having depth variation in the direction of the separation of the two cameras) appears horizontally stretched in one image as compared to the other image. Thus, while corresponding two images, N pixels on a scanline in one image may correspond to a different number of pixels M in the other image, which has consequences with regard to sampling and occlusion detection. We also discuss the asymmetry between vertical and horizontal slant, and the central role of nonhorizontal edges in the context of vertical slant. Using experiments, we discuss cases where existing algorithms fail, and how the incorporation of new constraints provides correct results.}, keywords = {Automation, CAMERAS, Computational modeling, first order approximation, Geometrical optics, hidden feature removal, image sampling, Image segmentation, Layout, occlusion detection, piecewise continuous function, Pixel, SHAPE, Simulated annealing, stereo image processing, surface fitting}, isbn = {0-7695-2223-8}, doi = {10.1109/TDPVT.2004.1335418}, author = {Ogale, A. S and Aloimonos, J.} } @inbook {18909, title = {Information Gathering During Planning for Web Service Composition}, booktitle = {The Semantic Web {\textendash} ISWC 2004}, series = {Lecture Notes in Computer Science}, volume = {3298}, year = {2004}, month = {2004///}, pages = {335 - 349}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Hierarchical Task-Network (HTN) based planning techniques have been applied to the problem of composing Web Services, especially when described using the OWL - S service ontologies. Many of the existing Web Services are either exclusively information providing or crucially depend on information-providing services. Thus, many interesting service compositions involve collecting information either during execution or during the composition process itself. In this paper, we focus on the latter issue. In particular, we present ENQUIRER , an HTN-planning algorithm designed for planning domains in which the information about the initial state of the world may not be complete, but it is discoverable through plan-time information-gathering queries. We have shown that ENQUIRER is sound and complete, and derived several mathematical relationships among the amount of available information, the likelihood of the planner finding a plan, and the quality of the plan found. We have performed experimental tests that confirmed our theoretical results and that demonstrated how ENQUIRER can be used in Web Service composition.}, keywords = {Computer science}, isbn = {978-3-540-23798-3}, url = {http://www.springerlink.com/content/v829m5080fc0bpng/abstract/}, author = {Kuter,Ugur and Sirin,Evren and Nau, Dana S. and Parsia,Bijan and Hendler,James}, editor = {McIlraith,Sheila and Plexousakis,Dimitris and van Harmelen,Frank} } @article {12698, title = {An information theoretic criterion for evaluating the quality of 3-D reconstructions from video}, journal = {Image Processing, IEEE Transactions on}, volume = {13}, year = {2004}, month = {2004/07//}, pages = {960 - 973}, abstract = {Even though numerous algorithms exist for estimating the three-dimensional (3-D) structure of a scene from its video, the solutions obtained are often of unacceptable quality. To overcome some of the deficiencies, many application systems rely on processing more data than necessary, thus raising the question: how is the accuracy of the solution related to the amount of data processed by the algorithm? Can we automatically recognize situations where the quality of the data is so bad that even a large number of additional observations will not yield the desired solution? Previous efforts to answer this question have used statistical measures like second order moments. They are useful if the estimate of the structure is unbiased and the higher order statistical effects are negligible, which is often not the case. This paper introduces an alternative information-theoretic criterion for evaluating the quality of a 3-D reconstruction. The accuracy of the reconstruction is judged by considering the change in mutual information (MI) (termed as the incremental MI) between a scene and its reconstructions. An example of 3-D reconstruction from a video sequence using optical flow equations and known noise distribution is considered and it is shown how the MI can be computed from first principles. We present simulations on both synthetic and real data to demonstrate the effectiveness of the proposed criterion.}, keywords = {3D reconstruction, algorithms, Artificial intelligence, Automated;Reproducibility of Results;Sensitivity and Specificity;Signal Processing, Computer Graphics, Computer-Assisted;Imaging, Computer-Assisted;Software Validation;Subtraction Technique;Video Recording;, Image Enhancement, Image Interpretation, Image reconstruction, Image sequences, information theoretic criterion, Mutual information, NOISE, noise distribution, optical flow equations, second order moments, statistical analysis, Three-Dimensional;Information Storage and Retrieval;Information Theory;Movement;Pattern Recognition, Video sequences, video signal processing}, isbn = {1057-7149}, doi = {10.1109/TIP.2004.827240}, author = {Roy-Chowdhury, A.K. and Chellapa, Rama} } @article {14087, title = {The ingi and RIME non-LTR retrotransposons are not randomly distributed in the genome of Trypanosoma brucei}, journal = {Molecular biology and evolution}, volume = {21}, year = {2004}, month = {2004///}, pages = {520 - 520}, author = {Bringaud,F. and Biteau,N. and Zuiderwijk,E. and Berriman,M. and El-Sayed, Najib M. and Ghedin,E. and Melville,S. E. and Hall,N. and Baltz,T.} } @article {13356, title = {An initial study of overheads of eddies}, journal = {SIGMOD Rec.}, volume = {33}, year = {2004}, month = {2004/03//}, pages = {44 - 49}, abstract = {An eddy [2] is a highly adaptive query processing operator that continuously reoptimizes a query in response to changing runtime conditions. It does this by treating query processing as routing of tuples through operators and making per-tuple routing decisions. The benefits of such adaptivity can be significant, especially in highly dynamic environments such as data streams, sensor query processing, web querying, etc. Various parties have asserted that the cost of making per-tuple routing decisions is prohibitive. We have implemented eddies in the PostgreSQL open source database system [1] in the context of the TelegraphCQ project. In this paper, we present an "apples-to-apples" comparison of PostgreSQL query processing overhead with and without eddies. Our results show that with some minor tuning, the overhead of the eddy mechanism is negligible.}, isbn = {0163-5808}, doi = {10.1145/974121.974129}, url = {http://doi.acm.org/10.1145/974121.974129}, author = {Deshpande, Amol} } @conference {14771, title = {Interactive binary instrumentation}, booktitle = {Second International Workshop on Remote Analysis and Measurement of Software Systems (RAMSS)}, year = {2004}, month = {2004///}, author = {Williams, C. C and Hollingsworth, Jeffrey K} } @article {15850, title = {Interactive cross-language document selection}, journal = {information retrieval}, volume = {7}, year = {2004}, month = {2004///}, pages = {205 - 228}, author = {Oard, Douglas and Gonzalo,J. and Sanderson,M. and L{\'o}pez-Ostenero,F. and Wang,J.} } @article {17260, title = {Interactively Optimizing Signal-to-Noise Ratios in Expression Profiling: Project-Specific Algorithm Selection and Detection P-Value Weighting in Affymetrix Microarrays}, journal = {BioinformaticsBioinformatics}, volume = {20}, year = {2004}, month = {2004/11/01/}, pages = {2534 - 2544}, abstract = {Motivation: The most commonly utilized microarrays for mRNA profiling (Affymetrix) include {\textquoteleft}probe sets{\textquoteright} of a series of perfect match and mismatch probes (typically 22 oligonucleotides per probe set). There are an increasing number of reported {\textquoteleft}probe set algorithms{\textquoteright} that differ in their interpretation of a probe set to derive a single normalized {\textquoteleft}signal{\textquoteright} representative of expression of each mRNA. These algorithms are known to differ in accuracy and sensitivity, and optimization has been done using a small set of standardized control microarray data. We hypothesized that different mRNA profiling projects have varying sources and degrees of confounding noise, and that these should alter the choice of a specific probe set algorithm. Also, we hypothesized that use of the Microarray Suite (MAS) 5.0 probe set detection p-value as a weighting function would improve the performance of all probe set algorithms.Results: We built an interactive visual analysis software tool (HCE2W) to test and define parameters in Affymetrix analyses that optimize the ratio of signal (desired biological variable) versus noise (confounding uncontrolled variables). Five probe set algorithms were studied with and without statistical weighting of probe sets using the MAS 5.0 probe set detection p-values. The signal-to-noise ratio optimization method was tested in two large novel microarray datasets with different levels of confounding noise, a 105 sample U133A human muscle biopsy dataset (11 groups: mutation-defined, extensive noise), and a 40 sample U74A inbred mouse lung dataset (8 groups: little noise). Performance was measured by the ability of the specific probe set algorithm, with and without detection p-value weighting, to cluster samples into the appropriate biological groups (unsupervised agglomerative clustering with F-measure values). Of the total random sampling analyses, 50\% showed a highly statistically significant difference between probe set algorithms by ANOVA [F(4,10) > 14, p < 0.0001], with weighting by MAS 5.0 detection p-value showing significance in the mouse data by ANOVA [F(1,10) > 9, p < 0.013] and paired t-test [t(9) = -3.675, p = 0.005]. Probe set detection p-value weighting had the greatest positive effect on performance of dChip difference model, ProbeProfiler and RMA algorithms. Importantly, probe set algorithms did indeed perform differently depending on the specific project, most probably due to the degree of confounding noise. Our data indicate that significantly improved data analysis of mRNA profile projects can be achieved by optimizing the choice of probe set algorithm with the noise levels intrinsic to a project, with dChip difference model with MAS 5.0 detection p-value continuous weighting showing the best overall performance in both projects. Furthermore, both existing and newly developed probe set algorithms should incorporate a detection p-value weighting to improve performance. Availability: The Hierarchical Clustering Explorer 2.0 is available at http://www.cs.umd.edu/hcil/hce/. Murine arrays (40 samples) are publicly available at the PEPR resource (http://microarray.cnmcresearch.org/pgadatatable.asp; http://pepr.cnmcresearch.org; Chen et al., 2004). }, isbn = {1367-4803, 1460-2059}, doi = {10.1093/bioinformatics/bth280}, url = {http://bioinformatics.oxfordjournals.org/content/20/16/2534}, author = {Seo,Jinwook and Bakay,Marina and Chen,Yi-Wen and Hilmer,Sara and Shneiderman, Ben and Hoffman,Eric P} } @book {12353, title = {Interconnect Synthesis for Systems on Chip}, year = {2004}, month = {2004///}, publisher = {MARYLAND UNIV COLLEGE PARK DEPT OF ELECTRICAL AND COMPUTER ENGINEERING}, organization = {MARYLAND UNIV COLLEGE PARK DEPT OF ELECTRICAL AND COMPUTER ENGINEERING}, author = {Bhattacharyya, Shuvra S. and Bambha,N. K} } @inbook {13795, title = {Interlingual Annotation for MT Development}, booktitle = {Machine Translation: From Real Users to ResearchMachine Translation: From Real Users to Research}, series = {Lecture Notes in Computer Science}, volume = {3265}, year = {2004}, month = {2004///}, pages = {236 - 245}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {MT systems that use only superficial representations, including the current generation of statistical MT systems, have been successful and useful. However, they will experience a plateau in quality, much like other {\textquotedblleft}silver bullet{\textquotedblright} approaches to MT. We pursue work on the development of interlingual representations for use in symbolic or hybrid MT systems. In this paper, we describe the creation of an interlingua and the development of a corpus of semantically annotated text, to be validated in six languages and evaluated in several ways. We have established a distributed, well-functioning research methodology, designed a preliminary interlingua notation, created annotation manuals and tools, developed a test collection in six languages with associated English translations, annotated some 150 translations, and designed and applied various annotation metrics. We describe the data sets being annotated and the interlingual (IL) representation language which uses two ontologies and a systematic theta-role list. We present the annotation tools built and outline the annotation process. Following this, we describe our evaluation methodology and conclude with a summary of issues that have arisen.}, isbn = {978-3-540-23300-8}, url = {http://dx.doi.org/10.1007/978-3-540-30194-3_26}, author = {Reeder,Florence and Dorr, Bonnie J and Farwell,David and Habash,Nizar and Helmreich,Stephen and Hovy,Eduard and Levin,Lori and Mitamura,Teruko and Miller,Keith and Rambow,Owen and Siddharthan,Advaith}, editor = {Frederking,Robert and Taylor,Kathryn} } @article {13796, title = {Interlingual annotation of multilingual text corpora}, journal = {Proceedings of the North American Chapter of the Association for Computational Linguistics Workshop on Frontiers in Corpus Annotation}, year = {2004}, month = {2004///}, pages = {55 - 62}, abstract = {This paper describes a multi-site project toannotate six sizable bilingual parallel corpora for interlingual content. After presenting the background and objectives of the effort, we describe the data set that is being annotated, the interlingua representation language used, an interface environment that supports the an- notation task and the annotation process itself. We will then present a preliminary version of our evaluation methodology and conclude with a summary of the current status of the project along with a number of issues which have arisen. }, author = {Farwell,D. and Helmreich,S. and Dorr, Bonnie J and Habash,N. and Reeder,F. and Miller,K. and Levin,L. and Mitamura,T. and Hovy,E. and Rambow,O. and others} } @article {14047, title = {INTERPOLATION AND RANGE EXTRAPOLATION OF HEAD RELATED TRANSFER FUNCTIONS}, journal = {IEEE INTERNATIONAL CONFERENCE ON ACOUSTICS SPEECH AND SIGNAL PROCESSING}, volume = {4}, year = {2004}, month = {2004///}, author = {Duraiswami, Ramani and Zotkin,Dmitry N and Gumerov, Nail A.} } @conference {12700, title = {Intra-personal kernel space for face recognition}, booktitle = {Automatic Face and Gesture Recognition, 2004. Proceedings. Sixth IEEE International Conference on}, year = {2004}, month = {2004/05//}, pages = {235 - 240}, abstract = {Intra-personal space modeling proposed by Moghaddam et al. has been successfully applied in face recognition. In their work the regular principal subspaces are derived from the intra-personal spacce using a principal componen analysis and embedded in a probabilistic formulation. In this paper, we derive the principal subspace from the intro-personal kernel space by developing a probabilistic analysis for kernel principal components for face recognition. We test this algorithm on a subset of the FERET database with illumination and facial expression variations. The recognition performance demonstrates its advantage over other traditional subspace approaches.}, keywords = {analysis;, component, Expression, Face, facial, illumination, intra-personal, Kernel, lighting;, principal, probabilistic, probability;, recognition;, space;, variation;}, doi = {10.1109/AFGR.2004.1301537}, author = {Zhou,Shaohua and Chellapa, Rama and Moghaddam, B.} } @conference {13216, title = {Iterative figure-ground discrimination}, booktitle = {Pattern Recognition, 2004. ICPR 2004. Proceedings of the 17th International Conference on}, volume = {1}, year = {2004}, month = {2004/08//}, pages = {67 - 70 Vol.1 - 67 - 70 Vol.1}, abstract = {Figure-ground discrimination is an important problem in computer vision. Previous work usually assumes that the color distribution of the figure can be described by a low dimensional parametric model such as a mixture of Gaussians. However, such approach has difficulty selecting the number of mixture components and is sensitive to the initialization of the model parameters. In this paper, we employ non-parametric kernel estimation for color distributions of both the figure and background. We derive an iterative sampling-expectation (SE) algorithm for estimating the color, distribution and segmentation. There are several advantages of kernel-density estimation. First, it enables automatic selection of weights of different cues based on the bandwidth calculation from the image itself. Second, it does not require model parameter initialization and estimation. The experimental results on images of cluttered scenes demonstrate the effectiveness of the proposed algorithm.}, keywords = {algorithm;, analysis;, Bandwidth, calculation;, Color, colour, Computer, density, dimensional, discrimination;, distribution;, distributions;, Estimation, estimation;, expectation, figure, Gaussian, ground, image, initialization;, iterative, Kernel, low, methods;, mixture;, model, model;, nonparametric, parameter, parametric, processes;, sampling, sampling;, segmentation, segmentation;, statistics;, theory;, vision;}, doi = {10.1109/ICPR.2004.1334006}, author = {Zhao, L. and Davis, Larry S.} } @conference {14464, title = {Iterative record linkage for cleaning and integration}, booktitle = {Proceedings of the 9th ACM SIGMOD workshop on Research issues in data mining and knowledge discovery}, series = {DMKD {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {11 - 18}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Record linkage, the problem of determining when two records refer to the same entity, has applications for both data cleaning (deduplication) and for integrating data from multiple sources. Traditional approaches use a similarity measure that compares tuples{\textquoteright} attribute values; tuples with similarity scores above a certain threshold are declared to be matches. While this method can perform quite well in many domains, particularly domains where there is not a large amount of noise in the data, in some domains looking only at tuple values is not enough. By also examining the context of the tuple, i.e. the other tuples to which it is linked, we can come up with a more accurate linkage decision. But this additional accuracy comes at a price. In order to correctly find all duplicates, we may need to make multiple passes over the data; as linkages are discovered, they may in turn allow us to discover additional linkages. We present results that illustrate the power and feasibility of making use of join information when comparing records.}, keywords = {clustering, deduplication, distance measure, record linkage}, isbn = {1-58113-908-X}, doi = {10.1145/1008694.1008697}, url = {http://doi.acm.org/10.1145/1008694.1008697}, author = {Bhattacharya,Indrajit and Getoor, Lise} } @article {16301, title = {ICSE workshop on remote analysis and measurement of software systems (RAMSS)}, journal = {ACM SIGSOFT Software Engineering NotesSIGSOFT Softw. Eng. Notes}, volume = {28}, year = {2003}, month = {2003/11//}, pages = {10 - 10}, isbn = {01635948}, doi = {10.1145/966221.966232}, url = {http://dl.acm.org/citation.cfm?id=966232}, author = {Orso,Alessandro and Porter, Adam} } @book {12138, title = {Identifying Relevant Information for Testing Technique Selection: An Instantiated Characterization Schema}, year = {2003}, month = {2003/04/01/}, publisher = {Springer}, organization = {Springer}, abstract = {The importance of properly selecting testing techniques is widely accepted in the software engineering community today. However, there are chiefly two reasons why the selections now made by software developers are difficult to evaluate as correct. First, there are several techniques with which the average developer is unfamiliar, often leaving testers with limited knowledge of all the techniques currently available. Second, the available information regarding the different testing techniques is primarily procedure (focused on how to use the technique), rather than pragmatic (focused on the effect and appropriateness of using the technique). The problem addressed in this book is aimed at improving software testing technique selection.Identifying Relevant Information for Testing Technique Selection: An Instantiated Characterization Schema will train its readers how to use the conceptual tool presented here in various ways. Developers will improve their testing technique selection process by systematically and objectively selecting the testing techniques for a software project. Developers will also build a repository containing their own experience with the application of various software testing techniques. Researchers will focus their research on the relevant aspects of testing technique when creating it, and when comparing different techniques.Identifying Relevant Information for Testing Technique Selection: An Instantiated Characterization Schema is designed to meet the needs of a professional audience in software engineering. This book is also suitable for graduate-level students in computer science and engineering.}, keywords = {Business \& Economics / Information Management, Computer software, Computer software - Testing, Computer software/ Testing, Computers / Information Technology, Computers / Internet / Application Development, Computers / Programming / General, Computers / Programming Languages / General, Computers / Software Development \& Engineering / General, Computers / Software Development \& Engineering / Quality Assurance \& Testing, Technology \& Engineering / Materials Science}, isbn = {9781402074356}, author = {Vegas,Sira and Juristo,Natalia and Basili, Victor R.} } @article {17220, title = {Image-based highly interactive Web mapping for geo-referenced data publishing}, journal = {Technical Reports from UMIACS}, year = {2003}, month = {2003/01/21/}, abstract = {This paper describes an image-based technique that enables highly interactiveWeb choropleth maps for geo-referenced data publishing and visual exploration. Geographic knowledge is encoded into raster images and delivered to the client, instead of in vector formats. Differing from traditional raster-image-based approaches that are static and allow very little user interaction, it allows varieties of sub-second fine-grained interface controls such as dynamic query, dynamic classification, geographic object data identification, user setting adjusting, as well as turning on/off layers, panning and zooming, with no or minimum server support. Compared to Web GIS approaches that are based on vector geographic data, this technique has the features of short initial download time, near-constant performance scalability for larger numbers of geographic objects, and download-map-segment-only-when-necessary which potentially reduces the overall data transfer over the network. As a result, it accommodates general public users with slow modem network connections and low-end machines, as well as users with fast T-1 connections and fast machines. The client-side (browser) is implemented as light-weight Java applets. YMap, an easy-to-use, user-task-oriented highly interactive mapping tool prototype for visual geo-referenced data exploration is implemented using this technique. (UMIACS-TR-2003-02) (HCIL-TR-2002-26) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/1248}, author = {Zhao,Haixia and Shneiderman, Ben} } @conference {13244, title = {Image-based pan-tilt camera control in a multi-camera surveillance environment}, booktitle = {Multimedia and Expo, 2003. ICME {\textquoteright}03. Proceedings. 2003 International Conference on}, volume = {1}, year = {2003}, month = {2003/07//}, pages = {I - 645-8 vol.1 - I - 645-8 vol.1}, abstract = {In automated surveillance systems with multiple cameras, the system must be able to position the cameras accurately. Each camera must be able to pan-tilt such that an object detected in the scene is in a vantage position in the camera{\textquoteright}s image plane and subsequently capture images of that object. Typically, camera calibration is required. We propose an approach that uses only image-based information. Each camera is assigned a pan-tilt zero-position. Position of an object detected in one camera is related to the other cameras by homographies between the zero-positions while different pan-tilt positions of the same camera are related in the form of projective rotations. We then derive that the trajectories in the image plane corresponding to these projective rotations are approximately circular for pan and linear for tilt. The camera control technique is subsequently tested in a working prototype.}, keywords = {automated, camera, cameras;, control;, detection;, environment;, image, image-based, information;, multicamera, object, pan-tilt, position;, processing;, sensors;, Surveillance, surveillance;, systems;, vantage, zero-position;, zero-positions;}, doi = {10.1109/ICME.2003.1221000}, author = {Lim,Ser-Nam and Elgammal,A. and Davis, Larry S.} } @article {16125, title = {Immediate Usability: Kiosk design principles from the CHI 2001 Photo Library}, journal = {Technical Reports from UMIACS}, year = {2003}, month = {2003/01/21/}, abstract = {This paper describes a novel set of design principles and guidelines forensuring the immediate usability of public access systems. These principles and guidelines were formulated while developing PhotoFinder Kiosk, a community photo library. Attendees of CHI 2001 successfully used the tool to browse and annotate collections of photographs spanning 20 years of CHI and related conferences, producing a richly annotated photo history of the field of human-computer interaction. We used observations and log data to evaluate the tool and refine the guidelines. They provide specific guidance for practitioners, as well as a useful framework for additional research in public access interfaces. Keywords Photo collection, community photo library, group annotation, public access system, direct annotation, direct manipulation, drag-and-drop, immediate usability, zero-trial learning, walk-up-and-use, casual use. (UMIACS-TR-2001-71) (HCIL-TR-2001-23) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/1155}, author = {Kules,Bill and Kang,Hyunmo and Plaisant, Catherine and Rose,Anne and Shneiderman, Ben} } @article {18907, title = {IMPACTing SHOP: Putting an AI Planner Into a Multi-Agent Environment}, journal = {Annals of Mathematics and Artificial Intelligence}, volume = {37}, year = {2003}, month = {2003///}, pages = {381 - 407}, abstract = {In this paper we describe a formalism for integrating the SHOP HTN planning system with the IMPACT multi-agent environment. We define the A-SHOP algorithm, an agentized adaptation of the SHOP planning algorithm that takes advantage of IMPACT{\textquoteright}s capabilities for interacting with external agents, performing mixed symbolic/numeric computations, and making queries to distributed, heterogeneous information sources (such as arbitrary legacy and/or specialized data structures or external databases). We show that A-SHOP is both sound and complete if certain conditions are met.}, keywords = {Computer science}, isbn = {1012-2443}, doi = {10.1023/A:1021560510377}, url = {http://www.springerlink.com/content/g4r0mn835846p65w/abstract/}, author = {Dix,J{\"u}rgen and Mu{\~n}oz-Avila,H{\'e}ctor and Nau, Dana S. and Zhang,Lingling} } @mastersthesis {14699, title = {Implementing On-line Software Upgrades in Java}, year = {2003}, month = {2003/05//}, school = {University of Maryland, College Park}, abstract = {There are many systems that have to be available without interruption and so cannot bebrought down for modifications. Examples of such applications include financial transaction processors, telephone switches and air traffic control systems. On the other hand, hardly any software is deployed error-free or fully functional, thus requiring fixes and changes. Therefore, there is a need for runtime software upgrading capability for such systems. This paper discusses existing approaches of {\textquotedblleft}hotswapping{\textquotedblright} {\textendash} runtime updating of classes {\textendash} for the Java language. The different implementations of hotswapping are compared with respect to functionality, efficiency, ease of use and limitations. In addition, an improvement for one hotswapping mechanism is suggested and implemented. }, author = {Gebremichael,M. and Hicks, Michael W.} } @article {17552, title = {An Improved Approximation Ratio for the Covering Steiner Problem}, journal = {Theory of Computing}, volume = {2}, year = {2003}, month = {2003///}, pages = {53 - 64}, abstract = {In the Covering Steiner problem, we are given an undirected graph with edge-costs, and some subsets of vertices called groups, with each group being equipped with a non-negative integer value (called its requirement); the problem is to find a minimum-cost tree which spans at least the required number of vertices from every group. The Covering Steiner problem is a common generalization of the k-MST and the Group Steiner problems; indeed, when all the vertices of the graph lie in one group with a requirement of k, we get the k-MST problem, and when there are multiple groups with unit requirements, we obtain the Group Steiner problem. While many covering problems (e.g., the covering integer programs such as set cover) become easier to approximate as the requirements increase, the Covering Steiner problem remains at least as hard to approximate as the Group Steiner problem; in fact, the best guarantees previously known for the Covering Steiner problem were worse than those for Group Steiner as the requirements became large. In this work, we present an improved approximation algorithm whose guarantee equals the best known guarantee for the Group Steiner problem.}, url = {http://repository.cmu.edu/compsci/850}, author = {Gupta,Anupam and Srinivasan, Aravind} } @conference {13237, title = {Improved fast gauss transform and efficient kernel density estimation}, booktitle = {Computer Vision, 2003. Proceedings. Ninth IEEE International Conference on}, year = {2003}, month = {2003/10//}, pages = {664 -671 vol.1 - 664 -671 vol.1}, abstract = {Evaluating sums of multivariate Gaussians is a common computational task in computer vision and pattern recognition, including in the general and powerful kernel density estimation technique. The quadratic computational complexity of the summation is a significant barrier to the scalability of this algorithm to practical applications. The fast Gauss transform (FGT) has successfully accelerated the kernel density estimation to linear running time for low-dimensional problems. Unfortunately, the cost of a direct extension of the FGT to higher-dimensional problems grows exponentially with dimension, making it impractical for dimensions above 3. We develop an improved fast Gauss transform to efficiently estimate sums of Gaussians in higher dimensions, where a new multivariate expansion scheme and an adaptive space subdivision technique dramatically improve the performance. The improved FGT has been applied to the mean shift algorithm achieving linear computational complexity. Experimental results demonstrate the efficiency and effectiveness of our algorithm.}, keywords = {adaptive, algorithm;multivariate, complexity;computer, complexity;Gaussian, computational, density, estimation;mean, expansion, Gauss, processes;computational, recognition;quadratic, scheme;pattern, shift, space, subdivision, technique;computer, theory;, transform;kernel, vision;estimation, vision;fast}, doi = {10.1109/ICCV.2003.1238383}, author = {Yang,C. and Duraiswami, Ramani and Gumerov, Nail A. and Davis, Larry S.} } @article {16892, title = {Improved search heuristics for the sa-tree}, journal = {Pattern Recognition Letters}, volume = {24}, year = {2003}, month = {2003/11//}, pages = {2785 - 2795}, abstract = {The sa-tree is an interesting metric space indexing structure that is inspired by the Voronoi diagram. In essence, the sa-tree records a portion of the Delaunay graph of the data set, a graph whose vertices are the Voronoi cells, with edges between adjacent cells. An improvement is presented on the original search strategy for the sa-tree. This consists of details on the intuition behind the improvement as well as the original search strategy and a proof of their correctness. Furthermore, it is shown how to adapt an incremental nearest neighbor algorithm to the sa-tree, which allows computing nearest neighbor in a progressive manner. Unlike other adaptations, the resulting algorithm does not take the unnecessary steps to ensure that keys of {\textquotedblleft}node{\textquotedblright} elements are monotonically non-decreasing.}, keywords = {distance-based indexing, Metric spaces, Nearest neighbor algorithms}, isbn = {0167-8655}, doi = {10.1016/S0167-8655(03)00122-3}, url = {http://www.sciencedirect.com/science/article/pii/S0167865503001223}, author = {Hjaltason,G{\i}́sli R. and Samet, Hanan} } @conference {16900, title = {Improving access to large volumes of online data}, booktitle = {Proceedings of the 2003 annual national conference on Digital government research}, series = {dg.o {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {1 - 6}, publisher = {Digital Government Society of North America}, organization = {Digital Government Society of North America}, abstract = {The Internet has recently become the medium of interaction with large volumes of data. Enterprises in the public and private sectors made their databases available over the Internet. Working with such large volumes of online data is a challenging task. For efficient access to large data from remote locations we introduced APPOINT (an Approach for Peer-to-Peer Offloading the INTernet). In APPOINT, active clients of a client-server architecture act on the server{\textquoteright}s behalf and communicate with each other to transfer large volumes of online data more efficiently. In essence, a server is enabled to appoint alternatives and create a scalable collaborative virtual mirror from the active clients. Multiple parameters such as availability of clients and bandwidth information on clients are considered to decide on how to best forward a download request. APPOINT is built as an add-on to existing client-server systems. A library of functions, with a simple application programming interface (API) that can be used within an existing client-server system to improve the service, is developed. Our experimental findings show that APPOINT can dramatically improve the performance of existing client-server based database systems.}, url = {http://dl.acm.org/citation.cfm?id=1123196.1123260}, author = {Tanin,Egemen and Samet, Hanan} } @conference {17878, title = {Improving access to multi-dimensional self-describing scientific datasets}, booktitle = {3rd IEEE/ACM International Symposium on Cluster Computing and the Grid, 2003. Proceedings. CCGrid 2003}, year = {2003}, month = {2003/05/12/15}, pages = {172 - 179}, publisher = {IEEE}, organization = {IEEE}, abstract = {Applications that query into very large multidimensional datasets are becoming more common. Many self-describing scientific data file formats have also emerged, which have structural metadata to help navigate the multi-dimensional arrays that are stored in the files. The files may also contain application-specific semantic metadata. In this paper, we discuss efficient methods for performing searches for subsets of multi-dimensional data objects, using semantic information to build multidimensional indexes, and group data items into properly sized chunks to maximize disk I/O bandwidth. This work is the first step in the design and implementation of a generic indexing library that will work with various high-dimension scientific data file formats containing semantic information about the stored data. To validate the approach, we have implemented indexing structures for NASA remote sensing data stored in the HDF format with a specific schema (HDF-EOS), and show the performance improvements that are gained from indexing the datasets, compared to using the existing HDF library for accessing the data.}, keywords = {Application software, application-specific semantic metadata, Bandwidth, Computer science, database indexing, disk I/O bandwidth, distributed databases, Educational institutions, Indexing, indexing structures, Libraries, meta data, Middleware, multidimensional arrays, multidimensional datasets, Multidimensional systems, NASA, NASA remote sensing data, Navigation, query formulation, self-describing scientific data file formats, structural metadata, very large databases}, isbn = {0-7695-1919-9}, doi = {10.1109/CCGRID.2003.1199366}, author = {Nam,B. and Sussman, Alan} } @article {16124, title = {Improving Accessibility and Usability of Geo-referenced Statistical Data}, year = {2003}, month = {2003/06/04/}, abstract = {Several technology breakthroughs are needed to achieve the goals ofuniversal accessibility and usability. These goals are especially challenging in the case of geo-referenced statistical data that many U.S. government agencies supply. We present technical and user-interface design challenges in accommodating users with low-end technology (slow network connection and low-end machine) and users who are blind or vision-impaired. Our solutions are presented and future work is discussed. (UMIACS-TR-2003-37) (HCIL-2003-11) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/1277}, author = {Zhao,Haixia and Plaisant, Catherine and Shneiderman, Ben} } @article {18980, title = {Improving the Arabidopsis genome annotation using maximal transcript alignment assemblies}, journal = {Nucleic Acids ResearchNucl. Acids Res.}, volume = {31}, year = {2003}, month = {2003/10/01/}, pages = {5654 - 5666}, abstract = {The spliced alignment of expressed sequence data to genomic sequence has proven a key tool in the comprehensive annotation of genes in eukaryotic genomes. A novel algorithm was developed to assemble clusters of overlapping transcript alignments (ESTs and full-length cDNAs) into maximal alignment assemblies, thereby comprehensively incorporating all available transcript data and capturing subtle splicing variations. Complete and partial gene structures identified by this method were used to improve The Institute for Genomic Research Arabidopsis genome annotation (TIGR release v.4.0). The alignment assemblies permitted the automated modeling of several novel genes and >1000 alternative splicing variations as well as updates (including UTR annotations) to nearly half of the \~{}27 000 annotated protein coding genes. The algorithm of the Program to Assemble Spliced Alignments (PASA) tool is described, as well as the results of automated updates to Arabidopsis gene annotations.}, isbn = {0305-1048, 1362-4962}, doi = {10.1093/nar/gkg770}, url = {http://nar.oxfordjournals.org/content/31/19/5654}, author = {Haas,Brian J. and Delcher,Arthur L. and Mount, Stephen M. and Wortman,Jennifer R. and Jr,Roger K. Smith and Hannick,Linda I. and Maiti,Rama and Ronning,Catherine M. and Rusch,Douglas B and Town,Christopher D. and Salzberg,Steven L. and White,Owen} } @article {15510, title = {In Memoriam: Raymond Reiter}, journal = {AI Magazine}, volume = {24}, year = {2003}, month = {2003///}, pages = {13 - 13}, author = {Minker, Jack} } @article {17229, title = {In vivo filtering of in vitro expression data reveals MyoD targets}, journal = {Comptes Rendus Biologies}, volume = {326}, year = {2003}, month = {2003/10//}, pages = {1049 - 1065}, abstract = {A published set of downstream targets of MyoD defined in a well-controlled in vitro experiment was filtered for relevance to muscle regeneration using a 27-time-point in vivo murine regeneration series. Using interactive hierarchical and Bayes soft clustering, only a minority of the targets defined in vitro can be confirmed in vivo (\~{}50\% of induced transcripts, and none of repressed transcripts). This approach provided strong support that 18 targets including of MyoD are biologically relevant during myoblast differentiation. To cite this article: P.~Zhao et al., C.~R. Biologies 326 (2003).}, keywords = {clustering, expression profiling, Microarray, micror{\'e}seaux, muscle regeneration, MyoD, profil d{\textquoteright}expression, r{\'e}g{\'e}n{\'e}ration musculaire, regroupement}, isbn = {1631-0691}, doi = {10.1016/j.crvi.2003.09.035}, url = {http://www.sciencedirect.com/science/article/pii/S1631069103002324}, author = {Zhao,Po and Seo,Jinwook and Wang,Zuyi and Wang,Yue and Shneiderman, Ben and Hoffman,Eric P} } @article {16891, title = {Index-driven similarity search in metric spaces (Survey Article)}, journal = {ACM Trans. Database Syst.}, volume = {28}, year = {2003}, month = {2003/12//}, pages = {517 - 580}, abstract = {Similarity search is a very important operation in multimedia databases and other database applications involving complex objects, and involves finding objects in a data set S similar to a query object q, based on some similarity measure. In this article, we focus on methods for similarity search that make the general assumption that similarity is represented with a distance metric d. Existing methods for handling similarity search in this setting typically fall into one of two classes. The first directly indexes the objects based on distances (distance-based indexing), while the second is based on mapping to a vector space (mapping-based approach). The main part of this article is dedicated to a survey of distance-based indexing methods, but we also briefly outline how search occurs in mapping-based methods. We also present a general framework for performing search based on distances, and present algorithms for common types of queries that operate on an arbitrary "search hierarchy." These algorithms can be applied on each of the methods presented, provided a suitable search hierarchy is defined.}, keywords = {distance-based indexing, Hiearchical metric data structures, nearest neighbor queries, range queries, Ranking, similarity searching}, isbn = {0362-5915}, doi = {10.1145/958942.958948}, url = {http://doi.acm.org/10.1145/958942.958948}, author = {Hjaltason,Gisli R. and Samet, Hanan} } @inbook {17237, title = {Innovating the Interaction}, booktitle = {The craft of information visualization: readings and reflectionsThe craft of information visualization: readings and reflections}, year = {2003}, month = {2003///}, pages = {295 - 295}, publisher = {Morgan Kaufmann}, organization = {Morgan Kaufmann}, address = {San Francisco}, isbn = {978-1-55860-915-0}, author = {Bederson, Benjamin B. and Plaisant, Catherine and Mushlin,R. and Snyder,A. and Li,J. and Heller,D. and Shneiderman, Ben and Hochheiser,H. and Fekete,J. D and Czenvinski,M.} } @conference {14053, title = {Integral equation solution of electromagnetic scattering from a multilayered cylindrical waveguide}, booktitle = {Antennas and Propagation Society International Symposium, 2003. IEEE}, volume = {3}, year = {2003}, month = {2003/06//}, pages = {524 - 527 vol.3 - 524 - 527 vol.3}, abstract = {This paper is devoted to the electromagnetic scattering from an N multilayered cylinder. We consider waveguides in the z direction, that is: we look for the solution of Maxwell equations along the z direction. We assume a dielectric core and discuss the problem for the case of general domains. We use an integral equation approach to solve the problem and the Nystrom method for the numerical approximation.}, keywords = {approximation, circular, core;, cylinder;, cylindrical, dielectric, dielectric-loaded, electromagnetic, EM, equations;, integral, Maxwell, method;, multilayered, numerical, Nystrom, scattering;, theory;, wave, waveguide, waveguides;}, doi = {10.1109/APS.2003.1219901}, author = {Seydou,F. and Duraiswami, Ramani and Seppanen,T.} } @conference {17615, title = {Integrality ratio for group Steiner trees and directed steiner trees}, booktitle = {Proceedings of the fourteenth annual ACM-SIAM symposium on Discrete algorithms}, series = {SODA {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {275 - 284}, publisher = {Society for Industrial and Applied Mathematics}, organization = {Society for Industrial and Applied Mathematics}, address = {Philadelphia, PA, USA}, abstract = {We present an Ω(log2k) lower bound on the integrality ratio of the flow-based relaxation for the Group Steiner Tree problem, where k denotes the number of groups; this holds even for input graphs that are Hierarchically Well-Separated Trees, introduced by Bartal [Symp. Foundations of Computer Science, pp. 184--193, 1996], in which case this lower bound is tight. This relaxation appears to be the only one that have been studied for the problem, as well as for its generalization, the Directed Steiner Tree problem. For the latter problem, our results imply an Ω(log2n/(log logn)2) integrality ratio, where n is the number of vertices in the graph. For both problems, this is the first known lower bound on the integrality ratio that is superlogarithmic in the input size. We also show algorithmically that the integrality ratio for Group Steiner Tree is much better for certain families of instances, which helps pinpoint the types of instances that appear to be most difficult to approximate.}, isbn = {0-89871-538-5}, url = {http://dl.acm.org/citation.cfm?id=644108.644155}, author = {Halperin,Eran and Kortsarz,Guy and Krauthgamer,Robert and Srinivasan, Aravind and Wang,Nan} } @conference {12782, title = {The integrated CWB-NC/PIOAtool for functional verification and performance analysis of concurrent systems}, booktitle = {Proceedings of the 9th international conference on Tools and algorithms for the construction and analysis of systems}, year = {2003}, month = {2003///}, pages = {431 - 436}, author = {Zhang,D. and Cleaveland, Rance and Stark,E. W} } @article {16418, title = {The interaction between zoning regulations and residential preferences as a driver of urban form}, journal = {Proceedings of the 2003 UTEP Distinguished Faculty and Student Symposium}, year = {2003}, month = {2003///}, author = {Zellner,M.L. and Riolo,R and Rand, William and Page,S.E. and Brown,D.G. and Fernandez,L.E.} } @conference {17248, title = {Interactive color mosaic and dendrogram displays for signal/noise optimization in microarray data analysis}, booktitle = {Multimedia and Expo, 2003. ICME{\textquoteright}03. Proceedings. 2003 International Conference on}, volume = {3}, year = {2003}, month = {2003///}, pages = {III{\textendash}461 - III{\textendash}461}, abstract = {Data analysis and visualization is strongly influenced by noise and noise filters. There are multiple sources of oisein microarray data analysis, but signal/noise ratios are rarely optimized, or even considered. Here, we report a noise analysis of a novel 13 million oligonucleotide dataset - 25 human U133A (~500,000 features) profiles of patient muscle biposies. We use our recently described interactive visualization tool, the Hierarchical Clustering Explorer (HCE) to systemically address the effect of different noise filters on resolution of arrays into orrectbiological groups (unsupervised clustering into three patient groups of known diagnosis). We varied probe set interpretation methods (MAS 5.0, RMA), resent callfilters, and clustering linkage methods, and investigated the results in HCE. HCE interactive features enabled us to quickly see the impact of these three variables. Dendrogram displays showed the clustering results systematically, and color mosaic displays provided a visual support for the results. We show that each of these three variables has a strong effect on unsupervised clustering. For this dataset, the strength of the biological variable was maximized, and noise minimized, using MAS 5.0, 10\% present call filter, and Average Group Linkage. We propose a general method of using interactive tools to identify the optimal signal/noise balance or the optimal combination of these three variables to maximize the effect of the desired biological variable on data interpretation.}, author = {Seo,J. and Bakay,M. and Zhao,P. and Chen,Y.W. and Clarkson,P. and Shneiderman, Ben and Hoffman,E.P.} } @conference {17939, title = {Interactive subsurface scattering for translucent meshes}, booktitle = {Proceedings of the 2003 symposium on Interactive 3D graphics}, series = {I3D {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {75 - 82}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We propose a simple lighting model to incorporate subsurface scattering effects within the local illumination framework. Subsurface scattering is relatively local due to its exponential falloff and has little effect on the appearance of neighboring objects. These observations have motivated us to approximate the BSSRDF model and to model subsurface scattering effects by using only local illumination. Our model is able to capture the most important features of subsurface scattering: reflection and transmission due to multiple scattering.In our approach we build the neighborhood information as a preprocess and modify the traditional local illumination model into a run-time two-stage process. In the first stage we compute the reflection and transmission of light on the surface. The second stage involves bleeding the scattering effects from a vertex{\textquoteright}s neighborhood to produce the final result. We then show how to merge the run-time two-stage process into a run-time single-stage process using precomputed integral. The complexity of our run-time algorithm is O(N), where N is the number of vertices. Using this approach, we achieve interactive frame rates with about one to two orders of magnitude speedup compared with the state-of-the-art methods.}, keywords = {BSSRDF, local illumination, reflection models, subsurface scattering}, isbn = {1-58113-645-5}, doi = {10.1145/641480.641497}, url = {http://doi.acm.org/10.1145/641480.641497}, author = {Hao,Xuejun and Baby,Thomas and Varshney, Amitabh} } @article {13299, title = {Interactive Visualization of Large Tetrahedral Meshes through Selective Refinement}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2003}, month = {2003///}, abstract = {In this paper, we address the problem of the efficient visualization of very largeirregular volume datasets. To this aim, we exploit a multiresolution representation based on a domain decomposition into tetrahedral cells. A new compact data struc- ture is described which encodes the whole dataset at a virtually continuous range of different resolutions, with a storage cost six times lower than a standard data struc- ture for tetrahedral meshes. Such structure supports on-line selective refinement to focus resolution on areas that the user considers more critical, based on either field values, or domain location, or opacity of the transfer function. Selective refinement is used to trade-off between resolution and speed in visualization, according to user needs and hardware constraints. These features have been implemented in a new system, called TAn2 (Tetrahedra Analyzer), for the interactive visualization of three- dimensional scalar fields defined on very large tetrahedral meshes. Multiresolution representation and selective refinement make the system fully scalable with respect to the size of the dataset and to hardware requirements. }, author = {Cignoni,P. and De Floriani, Leila and Magillo,P. and Puppo,E. and Scopigno,R. and di Genova,U.} } @article {12190, title = {The International Children{\textquoteright}s Digital Library}, journal = {First Monday}, volume = {8}, year = {2003}, month = {2003///}, author = {Druin, Allison and Bederson, Benjamin B. and Weeks,A. and Farber,A. and Grosjean,J. and Guha,M.L. and Hourcade,J. P and Lee,J. and Liao,S. and Reuter,K. and others} } @article {13928, title = {The International Children{\textquoteright}s Digital Library: Description and analysis of first use}, journal = {First Monday}, volume = {8}, year = {2003}, month = {2003///}, pages = {315 - 315}, author = {Druin, Allison and Bederson, Benjamin B. and Weeks,A. and Farber,A. and Grosjean,J. and Guha,M.L. and Hourcade,J. P and Lee,J. and Liao,S. and Reuter,K. and others} } @article {13904, title = {The International Children{\textquoteright}s Digital Library: viewing digital books online}, journal = {Interacting with Computers}, volume = {15}, year = {2003}, month = {2003///}, pages = {151 - 167}, author = {Hourcade,J. P and Bederson, Benjamin B. and Druin, Allison and Rose,A. and Farber,A. and Takayama,Y.} } @conference {16903, title = {The internet spatial spreadsheet: enabling remote visualization of dynamic spatial data and ongoing query results over a network}, booktitle = {Proceedings of the 11th ACM international symposium on Advances in geographic information systems}, series = {GIS {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {154 - 160}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Moving object databases store and process data for objects that change location frequently. Materialized views maintained over time must be updated to reflect changes due to the motion of objects in their environment. To visualize view query results, displays must be updated to reflect the change. In this paper we present the Internet Spatial Spreadsheet (ISS) as a means to organize, query, and visualize changing spatial data in a network environment such as the Internet.The goal of the ISS is to keep client visualizations of query results up to date with the server state. This is accomplished by pushing the minimal set of spatial data needed for rendering query results on the client. Incremental changes to query results are subsequently transmitted to the client as the database is updated to keep the visualization current. Additional constraints in the network environment such as firewall limitations are also considered.}, keywords = {client server, GIS, Spatial databases, Visualization}, isbn = {1-58113-730-3}, doi = {10.1145/956676.956697}, url = {http://doi.acm.org/10.1145/956676.956697}, author = {Iwerks,Glenn S. and Samet, Hanan} } @conference {15632, title = {Interpolation over light fields with applications in computer graphics}, booktitle = {Proceedings of the Fifth Workshop on Algorithm Engineering and Experiments}, year = {2003}, month = {2003///}, pages = {56 - 68}, author = {Atalay,F. B and Mount, Dave} } @article {12300, title = {Intradomain Overlays: Architecture and Applications}, journal = {Technical Reports from UMIACS, UMIACS-TR-2003-70}, year = {2003}, month = {2003/08/01/}, abstract = {We introduce an architecture for {\textquoteleft}{\textquoteleft}Intradomain Overlays{\textquoteright}{\textquoteright}, where a subset of routers within a domain is augmented with a dedicated host. These strategically placed hosts form an overlay network, and we describe a number of applications for such overlays. These applications include efficient network monitoring, policy- and load-based packet re-routing, and network resource accounting. In this paper, we elaborate on the network monitoring application and describe a distributed protocol for monitoring routers within an AS which has been augmented with a few overlay nodes. The routers and other infrastructure are unaware of the overlay nodes, and the monitoring of individual elements is conducted using plain SNMP. We describe techniques for efficiently synthesizing and transporting the monitored SNMP data, and present results using trace data collected from an AS with 400+ routers. Our results show that the overlay-based monitoring reduces overheads by 2--4 orders of magnitude, and thus enables much finer grained monitoring and traffic engineering than is otherwise possible. (UMIACS-TR-2003-70) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/1295}, author = {Kommareddy,Christopher and Guven,Tuna and Bhattacharjee, Bobby and La,Richard and Shayman,Mark} } @article {17264, title = {INTRODUCTION TO VOLUME 1, ISSUE 3: WEB NAVIGATION}, journal = {IT\&SOCIETY}, volume = {1}, year = {2003}, month = {2003///}, author = {Shneiderman, Ben and Lazar,J. and IVORY,M.} } @article {15183, title = {Intrusion-resilient public-key encryption}, journal = {Topics in Cryptology{\textemdash}CT-RSA 2003}, year = {2003}, month = {2003///}, pages = {19 - 32}, abstract = {Exposure of secret keys seems to be inevitable, and may in practice represent the most likely point of failure in a cryptographic system. Recently, the notion of intrusion-resilience [17] (which extends both the notions of forward security [3], [5] and key insulation [11]) was proposed as a means of mitigating the harmful effects that key exposure can have. In this model, time is divided into distinct periods; the public key remains fixed throughout the lifetime of the protocol but the secret key is periodically updated. Secret information is stored by both a user and a base; the user performs all cryptographic operations during a given time period, while the base helps the user periodically update his key. Intrusion-resilient schemes remain secure in the face of multiple compromises of both the user and the base, as long as they are not both compromised simultaneously. Furthermore, in case the user and base are compromised simultaneously, prior time periods remain secure (as in forward-secure schemes). Intrusion-resilient signature schemes have been previously constructed [17], [15]. Here, we give the first construction of an intrusion-resilient publickey encryption scheme, based on the recently-constructed forwardsecure encryption scheme of [8]. We also consider generic transformations for securing intrusion-resilient encryption schemes against chosenciphertext attacks.}, doi = {10.1007/3-540-36563-X_2}, author = {Dodis,Y. and Franklin,M. and Katz, Jonathan and Miyaji,A. and Yung,M.} } @article {13376, title = {IRIS: Internet-scale Resource-Intensive Sensor Services}, journal = {Intel Research, UC Berkeley, Carnegie Mellon University}, year = {2003}, month = {2003///}, abstract = {The proliferation and affordability of smart sensors such aswebcams, microphones etc., has created opportunities for exciting new classes of distributed services. A key stum- bling block to mining these rich information sources is the lack of a common, scalable networked infrastructure for col- lecting, filtering, and combining the video feeds, extracting the useful information, and enabling distributed queries. In this demo, we demonstrate the design and an early prototype of such an infrastructure, called IRIS (Internet- scale Resource-Intensive Sensor services). IRIS is a poten- tially global network of smart sensor nodes, with webcams or other sensors, and organizing nodes that provide the means to query recent and historical sensor-based data. IRIS ex- ploits the fact that high-volume sensor feeds are typically attached to devices with significant computing power and storage, and running a standard operating system. Aggres- sive filtering, smart query routing, and semantic caching are used to dramatically reduce network bandwidth utilization and improve query response times, as we will demonstrate. The service that we demonstrate here is that of a park- ing space finder. This service utilizes webcams that monitor parking spaces to answer queries such as the availability of parking spaces near a user{\textquoteright}s destination. }, author = {Deshpande, Amol and Nath,S. and Gibbons,P.B. and Seshan,S.} } @conference {13378, title = {IrisNet: an architecture for internet-scale sensing services}, booktitle = {Proceedings of the 29th international conference on Very large data bases - Volume 29}, series = {VLDB {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {1137 - 1140}, publisher = {VLDB Endowment}, organization = {VLDB Endowment}, abstract = {We demonstrate the design and an early prototype of IrisNet (Internet-scale Resource-Intensive Sensor Network services), a common, scalable networked infrastructure for deploying wide area sensing services. IrisNet is a potentially global network of smart sensing nodes, with webcams or other monitoring devices, and organizing nodes that provide the means to query recent and historical sensor-based data. IrisNet exploits the fact that high-volume sensor feeds are typically attached to devices with significant computing power and storage, and running a standard operating system. It uses aggressive filtering, smart query routing, and semantic caching to dramatically reduce network bandwidth utilization and improve query response times, as we demonstrate. Our demo will present two services built on Iris-Net, from two very different application domains. The first one, a parking space finder, utilizes webcams that monitor parking spaces to answer queries such as the availability of parking spaces near a user{\textquoteright}s destination. The second one, a distributed infrastructure monitor, uses measurement tools installed in individual nodes of a large distributed infrastructure to answer queries such as average network bandwidth usage of a set of nodes.}, isbn = {0-12-722442-4}, url = {http://dl.acm.org/citation.cfm?id=1315451.1315568}, author = {Nath,Suman and Deshpande, Amol and Ke,Yan and Gibbons,Phillip B. and Karp,Brad and Seshan,Srinivasan} } @article {12140, title = {Iterative and incremental developments. a brief history}, journal = {Computer}, volume = {36}, year = {2003}, month = {2003/06//}, pages = {47 - 56}, abstract = {Although many view iterative and incremental development as a modern practice, its application dates as far back as the mid-1950s. Prominent software-engineering thought leaders from each succeeding decade supported IID practices, and many large projects used them successfully. These practices may have differed in their details, but all had a common theme-to avoid a single-pass sequential, document-driven, gated-step approach.}, keywords = {agile, and, developments;iterative, engineering;, engineering;software, enhancement;software, incremental, methods;iterative, system;history;software}, isbn = {0018-9162}, doi = {10.1109/MC.2003.1204375}, author = {Larman,C. and Basili, Victor R.} } @article {16895, title = {Iterative spatial join}, journal = {ACM Trans. Database Syst.}, volume = {28}, year = {2003}, month = {2003/09//}, pages = {230 - 256}, abstract = {The key issue in performing spatial joins is finding the pairs of intersecting rectangles. For unindexed data sets, this is usually resolved by partitioning the data and then performing a plane sweep on the individual partitions. The resulting join can be viewed as a two-step process where the partition corresponds to a hash-based join while the plane-sweep corresponds to a sort-merge join. In this article, we look at extending the idea of the sort-merge join for one-dimensional data to multiple dimensions and introduce the Iterative Spatial Join. As with the sort-merge join, the Iterative Spatial Join is best suited to cases where the data is already sorted. However, as we show in the experiments, the Iterative Spatial Join performs well when internal memory is limited, compared to the partitioning methods. This suggests that the Iterative Spatial Join would be useful for very large data sets or in situations where internal memory is a shared resource and is therefore limited, such as with today{\textquoteright}s database engines which share internal memory amongst several queries. Furthermore, the performance of the Iterative Spatial Join is predictable and has no parameters which need to be tuned, unlike other algorithms. The Iterative Spatial Join is based on a plane sweep algorithm, which requires the entire data set to fit in internal memory. When internal memory overflows, the Iterative Spatial Join simply makes additional passes on the data, thereby exhibiting only a gradual performance degradation. To demonstrate the use and efficacy of the Iterative Spatial Join, we first examine and analyze current approaches to performing spatial joins, and then give a detailed analysis of the Iterative Spatial Join as well as present the results of extensive testing of the algorithm, including a comparison with partitioning-based spatial join methods. These tests show that the Iterative Spatial Join overcomes the performance limitations of the other algorithms for data sets of all sizes as well as differing amounts of internal memory.}, keywords = {external memory algorithms, plane-sweep, Spatial databases, Spatial join}, isbn = {0362-5915}, doi = {10.1145/937598.937600}, url = {http://doi.acm.org/10.1145/937598.937600}, author = {Jacox,Edwin H. and Samet, Hanan} } @conference {15881, title = {iCLEF 2001 at Maryland: comparing term-for-term gloss and MT}, booktitle = {Evaluation of Cross-Language Information Retrieval Systems}, year = {2002}, month = {2002///}, pages = {167 - 235}, author = {Wang,J. and Oard, Douglas} } @conference {12762, title = {Identification of humans using gait}, booktitle = {IEEE Transactions on Image Processing}, year = {2002}, month = {2002///}, abstract = {We propose a view-based approach to recognize humans from their gait. Two different imagefeatures have been considered: the width of the outer contour of the binarized silhouette of the walking person and the entire binary silhouette itself. To obtain the observation vector from the image features we employ two different methods. In the first method referred to as the indirect approach, the high-dimensional image feature is transformed to a lower-dimensional space by generating what we call the Frame to Exemplar (FED) distance. The FED vector captures both structural and dynamic traits of each individual. For compact and effective gait representation and recognition, the gait information in the FED vector sequences is captured in a hidden Markov model (HMM). In the second method referred to as the direct approach, we work with the feature vector directly (as opposed to computing the FED) and train an HMM. We estimate the HMM parameters (specifically the observation probability B) based on the distance between the exemplars and the image features. In this way we avoid learning high-dimensional probability density functions. The statistical nature of the HMM lends overall robustness to representation and recognition. The performance of the methods is illustrated using several databases. }, author = {Kale, A. and Rajagopalan, AN and Cuntoor, N. and Krueger, V. and Chellapa, Rama} } @article {14091, title = {Identification of non-autonomous non-LTR retrotransposons in the genome of Trypanosoma cruzi}, journal = {Molecular and Biochemical Parasitology}, volume = {124}, year = {2002}, month = {2002/09/10/}, pages = {73 - 78}, abstract = {As observed for most eukaryotic cells, trypanosomatids contains non-LTR retrotransposons randomly inserted in the nuclear genome. Autonomous retroelements which, code for their own transposition, have been characterized in Trypanosoma brucei (ingi) and Trypanosoma cruzi (L1Tc), whereas non-autonomous retroelements have only been characterized in T. brucei (RIME). Here, we have characterized in the genome of Trypanosoma cruzi four complete copies of a non-autonomous non-LTR retrotransposon, called NARTc. This 0.26 kb NARTc element has the characteristics of non-LTR retrotransposons: the presence a poly(dA) tail and of a short flanking duplicated motif. Analysis of the Genome Survey Sequence databases indicated that the Trypanosoma cruzi haploid genome contains about 140 NARTc copies and about twice as many L1Tc copies. Interestingly, the NARTc and L1Tc retroelements share, with the Trypanosoma brucei ingi and RIME retrotransposons, a common sequence (the first 45 bp with 91\% identity), whereas the remaining sequences are very divergent. This suggests that these four trypanosome non-LTR retrotransposons were derived from the same common ancester and the sequence of their 5{\textquoteright}-extremity may have a functional role. In addition, the genome of Leishmania major contains the same conserved motif present in the trypanosome retroelements, whicle no transposable elements have been detected so far in Leishmania sp.}, keywords = {Ingi, L1Tc, Non-LTR retrotransposon, RIME, Trypanosoma brucei, Trypanosoma cruzi}, isbn = {0166-6851}, doi = {16/S0166-6851(02)00167-6}, url = {http://www.sciencedirect.com/science/article/pii/S0166685102001676}, author = {Bringaud,Fr{\'e}d{\'e}ric and Garc{\'\i}a-P{\'e}rez,Jos{\'e} Luis and Heras,Sara R. and Ghedin,Elodie and El-Sayed, Najib M. and Andersson,Bj{\"o}rn and Baltz,Th{\'e}o and Lopez,Manuel C.} } @article {14624, title = {Identification of transcription factor binding sites in the human genome sequence}, journal = {Mammalian Genome}, volume = {13}, year = {2002}, month = {2002///}, pages = {510 - 514}, abstract = {The identification of transcription factor binding sites (TFBS) is an important initial step in determining the DNA signals that regulate transcription of the genome. We tested the performance of three distinct computational methods for the identification of TFBS applied to the human genome sequence, as judged by their ability to recover the location of experimentally determined, and uniquely mapped, TFBS taken from the TRANSFAC database. These identification methods all attempt to filter the quantity of TFBS identified by aligning positional weight matrices that describe the binding site and employ either (i) a P-value threshold for accepting a site, (ii) an over-representation measure of neighboring sites, or (iii) conservation with the mouse genome and application of P-value thresholds. The results show that the best recognition of TFBS is achieved by combining the identification of TFBS in regions of human{\textendash}mouse conservation and also by applying a high stringency P-value to the TFBS identified in non-coding regions that are not conserved. Additionally, we find that only half of the 481 experimentally mapped sites can be found in sequence regions conserved with mouse, but the predictive power of the binding site identification method is up to threefold higher in the conserved regions.}, isbn = {0938-8990}, url = {http://dx.doi.org/10.1007/s00335-002-2175-6}, author = {Levy,Samuel and Hannenhalli, Sridhar} } @article {17219, title = {IEEE Multimedia: Visions and views: Meeting human needs with new digital imaging technologies}, journal = {IEEE Distributed Systems Online}, volume = {3}, year = {2002}, month = {2002///}, author = {Shneiderman, Ben} } @article {15758, title = {Image Restoration through Subimages and Confidence Images}, journal = {Electronic Transactions on Numerical Analysis}, volume = {13}, year = {2002}, month = {2002///}, pages = {22 - 37}, url = {http://etna.mcs.kent.edu/vol.13.2002/pp22-37.dir/pp22-37.pdfhttp://etna.mcs.kent.edu/vol.13.2002/pp22-37.dir/pp22-37.pdf}, author = {Nagy,James G. and O{\textquoteright}Leary, Dianne P.} } @inbook {12757, title = {Image-based face recognition: Issues and methods}, booktitle = {Image recognition and classification: algorithms, systems, and applicationsImage recognition and classification: algorithms, systems, and applications}, year = {2002}, month = {2002///}, pages = {375 - 402}, publisher = {CRC Press}, organization = {CRC Press}, isbn = {9780824707835}, author = {Zhao, W.Y. and Chellapa, Rama} } @article {18395, title = {Impediments to software engineering technology transfer}, journal = {Journal of Systems and Software. Forthcoming}, year = {2002}, month = {2002///}, author = {Zelkowitz, Marvin V and Wallace,D. R and Binkley,D. W} } @article {15111, title = {Implementation of chosen-ciphertext attacks against PGP and GnuPG}, journal = {Information Security}, year = {2002}, month = {2002///}, pages = {90 - 101}, abstract = {We recently noted [6] that PGP and other e-mail encryption protocols are, in theory, highly vulnerable to chosen-ciphertext attacks in which the recipient of the e-mail acts as an unwitting {\textquotedblleft}decryption oracle{\textquotedblright}. We argued further that such attacks are quite feasible and therefore represent a serious concern. Here, we investigate these claims in more detail by attempting to implement the suggested attacks. On one hand, we are able to successfully implement the described attacks against PGP and GnuPG (two widely-used software packages) in a number of different settings. On the other hand, we show that the attacks largely fail when data is compressed before encryption.Interestingly, the attacks are unsuccessful for largely fortuitous reasons; resistance to these attacks does not seem due to any conscious effort made to prevent them. Based on our work, we discuss those instances in which chosen-ciphertext attacks do indeed represent an important threat and hence must be taken into account in order to maintain confidentiality. We also recommend changes in the OpenPGP standard [3] to reduce the effectiveness of our attacks in these settings. }, doi = {10.1007/3-540-45811-5_7}, author = {Jallad,K. and Katz, Jonathan and Schneier,B.} } @conference {13053, title = {The importance of lexicalized syntax models for natural language generation tasks}, booktitle = {Proc. of INLG}, year = {2002}, month = {2002///}, pages = {9 - 16}, author = {Daum{\'e}, Hal and Knight,K. and Langkilde-Geary,I. and Marcu,D. and Yamada,K.} } @inbook {17604, title = {Improved Approximation Algorithms for the Partial Vertex Cover Problem}, booktitle = {Approximation Algorithms for Combinatorial OptimizationApproximation Algorithms for Combinatorial Optimization}, series = {Lecture Notes in Computer Science}, volume = {2462}, year = {2002}, month = {2002///}, pages = {161 - 174}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {The partial vertex cover problem is a generalization of the vertex cover problem:given an undirected graph G = ( V,E ) and an integer k , we wish to choose a minimum number of vertices such that at least k edges are covered. Just as for vertex cover, 2-approximation algorithms are known for this problem, and it is of interest to see if we can do better than this.The current-best approximation ratio for partial vertex cover, when parameterized by the maximum degree d of G , is (2 - Θ (1/ d )).We improve on this by presenting a $$ {\l}eft( 2 - \Theta {\l}eft( \tfrac{\l}n {\l}n d {\l}n d \right) \right) $$ -approximation algorithm for partial vertex cover using semidefinite programming, matching the current-best bound for vertex cover. Our algorithmuses a new rounding technique, which involves a delicate probabilistic analysis.}, isbn = {978-3-540-44186-1}, url = {http://dx.doi.org/10.1007/3-540-45753-4_15}, author = {Halperin,Eran and Srinivasan, Aravind}, editor = {Jansen,Klaus and Leonardi,Stefano and Vazirani,Vijay} } @article {13790, title = {Improved Word-Level Alignment: Injecting Knowledge about MT Divergences}, year = {2002}, month = {2002/02/14/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {Word-level alignments of bilingual text (bitexts) are not an integral part of statistical machine translation models, but also useful for lexical acquisition, treebank construction. and part-of-speech tagging. The frequent occurrence of divergences, structural differences between languages, presents a great challenge to the alignment task. We resolve some of the most prevalent divergence cases by using syntactic parse information to transform the sentence structure of one language to bear a closer resemblance to that of the other language. In this paper, we show that common divergence types can be found in multiple language pairs (in particular, we focus on English-Spanish and English-Arabic) and systematically identified. We describe our techniques for modifying English parse trees to form resulting sentences that share more similarity with the sentences in the other languages; finally, we present an empirical analysis comparing the complexities of performing word-level alignments with an without divergence handling. Our results suggest that divergence-handling can improve word-level alignment.}, keywords = {*LEXICOGRAPHY, *MACHINE TRANSLATION, *STATISTICAL ANALYSIS, *WORDS(LANGUAGE), ACQUISITION, ALIGNMENT, EXPERIMENTAL DATA, LANGUAGE, linguistics, MATHEMATICAL MODELS, STATISTICS AND PROBABILITY, TREES}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA458774}, author = {Dorr, Bonnie J and Pearl,Lisa and Hwa,Rebecca and Habash,Nizar} } @article {17816, title = {Improving Performance of Agents by Activity Partitioning}, journal = {Technical Reports from UMIACS, UMIACS-TR-2002-96}, year = {2002}, month = {2002///}, abstract = {There is growing interest in software agents that provide a variety of services to humans, other agents, and third party software applications. Some of these agents are engaged in hundreds of activities at any given time point. In such cases, agents may try to examine a set A of activities and leverage commonalities between them in order to reduce their load. We call this activity merging. Unfortunately, in most application domains, activity merging turns out to be NP-complete. Thus, for each application domain, there is an integer k (which varies from domain to domain) such that activity merging can merge up to k activities while satisfying the application{\textquoteright}s performance expectations. In this paper, we consider the problem of what to do when the set of activities exceeds k. Our approach partitions A into disjoint sets A1 union A2 union ... union An such that each Ai contains at most k activities in it (thus the activities in each Ai can be merged using a merging algorithm). When creating such partitions, we would like to ensure that the activities inside each Ai share a lot of commonality, so that merging yields a lot of savings. In this paper, we propose two optimal algorithms (based on the A* algorithm and the branch and bound paradigm), as well as numerous greedy algorithms to solve the problem. We have implemented these algorithms and conducted detailed experiments. The results point out which algorithms are most appropriate for scaling agent performance.}, author = {Ozcan,F. and V.S. Subrahmanian} } @conference {17227, title = {Improving Web-based civic information access: a case study of the 50 US states}, booktitle = {2002 International Symposium on Technology and Society, 2002. (ISTAS{\textquoteright}02)}, year = {2002}, month = {2002///}, pages = {275 - 282}, publisher = {IEEE}, organization = {IEEE}, abstract = {An analysis of the home pages of all fifty US states reveals great variety in key design features that influence efficacy. Some states had excessively large byte counts that would slow users connected by commonly-used 56 K modems. Many web sites had low numbers of or poorly organized links that would make it hard for citizens to find what they were interested in. Features such as search boxes, privacy policies, online help, or contact information need to be added by several states. Our analysis concludes with ten recommendations and finds many further opportunities for individual states to improve their Websites. However still greater benefits will come through collaboration among the states that would lead to consistency, appropriate tagging, and common tools.}, keywords = {Computer aided software engineering, Computer science, contact information, Educational institutions, government data processing, Guidelines, home page design features, information resources, Laboratories, Modems, Navigation, online help, privacy, privacy policies, search boxes, Tagging, Uniform resource locators, US states, USA, User interfaces, Web sites, Web-based civic information access}, isbn = {0-7803-7284-0}, doi = {10.1109/ISTAS.2002.1013826}, author = {Ceaparu,I. and Shneiderman, Ben} } @article {12963, title = {In vitro adhesion to human cells by viable but nonculturable Enterococcus faecalis}, journal = {Current microbiology}, volume = {45}, year = {2002}, month = {2002///}, pages = {105 - 110}, abstract = {The ability of viable but nonculturable (VBNC) Enterococcus faecalis to adhere to Caco-2 and Girardi heart cultured cells and to urinary tract epithelial cells (ECs) was studied. Enterococci were harvested during the vegetative growth phase (early exponential and stationary), in the VBNC state, and after recovery of the ability to divide. VBNC bacteria maintained their adherence capability but the efficiency of attachment was reduced by about 50 to 70\%, depending on the target cell employed. The decrease was transient, since enterococci that regained their culturability showed adherence values similar to those observed for actively growing cells. Analysis of the invasive properties of E. faecalis revealed that the VBNC state caused a decrease in the number of bacteria that entered the cultured HEK cells as a result of the reduction in the number of adhering bacteria. These results highlight the importance of studies of the VBNC phenomenon, with respect to both microbial survival in the environment and the impact on human health.}, doi = {10.1007/s00284-001-0089-2}, author = {Pruzzo,C. and Tarsi,R. and Lle{\`o},M. M. and Signoretto,C. and Zampini,M. and Rita R Colwell and Canepari,P.} } @article {17230, title = {In Web we trust: establishing strategic trust among online customers}, journal = {E-Service: new directions in theory and practice}, year = {2002}, month = {2002///}, pages = {90 - 107}, abstract = {Electronic commerce (e-commerce) provides an important chance for established large companies to gain more customers as well as for new small companies to have a good start and rapid growth. However, establishing trust between customers and companies through web interface, a key component of successful e-service, is not as easy as through human-buyer-human-seller interaction. In the past few years, a number of experiments have been conducted in order to determine the factors that influence customers{\textquoteright} trust in online businesses. The goal of our experiment was to establish which features that appear on commercial websites are trust-inducing. Our study focused on three independent variables: customer service (limited and extensive), testimonial (self and external) and security features representation (graphics and text). We designed the homepages of eight web sites with all combinations of treatments of the three independent variables. Each of the 52 subjects reviewed all the eight homepages and gave them a relative rank of trustworthiness. After the experiment subjects answered additional e-trust related questions. The experiment results and the survey support the belief that specific customer service commitments, third party testimonials and graphic security representations are important in establishing strategic trust among on-line customers.}, author = {Ceaparu,I. and Demner,D. and Hung,E. and Zhao,H. and Shneiderman, Ben} } @article {15101, title = {Incremental unforgeable encryption}, journal = {Fast Software Encryption}, year = {2002}, month = {2002///}, pages = {317 - 325}, abstract = {The recent selection of the AES block cipher to replace DES has generated interest in developing new modes of operation to supplement the modes defined as part of the DES standard [1,16,23]. We initiate the study of modes of encryption which are both incremental and unforgeable, and point out a number of applications for modes meeting these requirements. We also propose three specific modes achieving these goals, and discuss the strengths and weaknesses of each.}, doi = {10.1007/3-540-45473-X_9}, author = {Buonanno,E. and Katz, Jonathan and Yung,M.} } @conference {17539, title = {Inferring link weights using end-to-end measurements}, booktitle = {Proceedings of the 2nd ACM SIGCOMM Workshop on Internet measurment}, series = {IMW {\textquoteright}02}, year = {2002}, month = {2002///}, pages = {231 - 236}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We describe a novel constraint-based approach to approximate ISP link weights using only end-to-end measurements. Common routing protocols such as OSPF and IS-IS choose least-cost paths using link weights, so inferred weights provide a simple, concise, and useful model of intradomain routing. Our approach extends router-level ISP maps, which include only connectivity, with link weights that are consistent with routing. Our inferred weights agree well with observed routing: while our inferred weights fully characterize the set of shortest paths between 84--99\% of the router-pairs, alternative models based on hop count and latency do so for only 47--81\% of the pairs.}, isbn = {1-58113-603-X}, doi = {10.1145/637201.637237}, url = {http://doi.acm.org/10.1145/637201.637237}, author = {Mahajan,Ratul and Spring, Neil and Wetherall,David and Anderson,Tom} } @article {18489, title = {Infranet: Circumventing web censorship and surveillance}, journal = {Proceedings of the 11th USENIX Security Symposium}, year = {2002}, month = {2002///}, pages = {247 - 262}, abstract = {An increasing number of countries and companies routinely block or monitor access to parts of the Internet. To counteract these measures, we propose Infranet, a system that enables clients to surreptitiously retrieve sensitive content via cooperating Web servers distributed across the global Internet. These Infranet servers provide clients access to censored sites while continuing to host normal uncensored content. Infranet uses a tunnel protocol that provides a covert communication channel between its clients and servers, modulated over standard HTTP transactions that resemble innocuous Web browsing. In the upstream direction, Infranet clients send covert messages to Infranet servers by associating meaning to the sequence of HTTP requests being made. In the downstream direction, Infranet servers return content by hiding censored data in uncensored images using steganographic techniques. We describe the design, a prototype implementation, security properties, and performance of Infranet. Our security analysis shows that Infranet can successfully circumvent several sophisticated censoring techniques.}, author = {Feamster, Nick and Balazinska,M. and Harfst,G. and Balakrishnan,H. and Karger,D.} } @article {16907, title = {Integration of local and global shape analysis for logo classification}, journal = {Pattern Recognition Letters}, volume = {23}, year = {2002}, month = {2002/10//}, pages = {1449 - 1457}, abstract = {A comparison is made of global and local methods for the shape analysis of logos in an image database. The qualities of the methods are judged by using the shape signatures to define a similarity metric on the logos. As representatives for the two classes of methods, we use the negative shape method which is based on local shape information and a wavelet-based method which makes use of global information. We apply both methods to images with different kinds of degradations and examine how a particular degradation highlights the strengths and shortcomings of each method. Finally, we use these results to develop a new adaptive weighting scheme which is based on the relative performances of the two methods. This scheme gives rise to a new method that is much more robust with respect to all degradations examined and works by automatically predicting if the negative shape or wavelet method is performing better.}, keywords = {Image databases, Logos, shape recognition, shape representation, Symbol recognition}, isbn = {0167-8655}, doi = {10.1016/S0167-8655(02)00105-8}, url = {http://www.sciencedirect.com/science/article/pii/S0167865502001058}, author = {Neumann, Jan and Samet, Hanan and Soffer,Aya} } @conference {17243, title = {Interacting with identification technology: can it make us more secure?}, booktitle = {CHI {\textquoteright}02 extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}02}, year = {2002}, month = {2002///}, pages = {564 - 565}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {Biometrics, civil liberties, face recognition, national id card, privacy, Security}, isbn = {1-58113-454-1}, doi = {10.1145/506443.506484}, url = {http://doi.acm.org/10.1145/506443.506484}, author = {Scholtz,Jean and Johnson,Jeff and Shneiderman, Ben and Hope-Tindall,Peter and Gosling,Marcus and Phillips,Jonathon and Wexelblat,Alan} } @article {13444, title = {Interactive, Incremental Scheduling for Virtual Telescopes in Education}, journal = {THIRD INTERNATIONAL NASA WORKSHOP ON PLANNING AND SCHEDULING FOR SPACE}, year = {2002}, month = {2002///}, pages = {27--29 - 27--29}, abstract = {The Telescopes in Education (TIE) project, which began in 1992, provides remote access for students to control large observatory telescopes in real time. TIE began with a single telescope, and with manual evaluation and scheduling of student requests. With the sucess of TIE, 20 or more additional telescopes are expected to come on line. With proportionally more student requests, we anticipate that managing the requests and telescope resources will rapidly become too complex and time-consuming to handle manually. To respond to this problem, the Virtual Telescopes in Education (VTIE) project was begun last year. VTIE will provide networked capabilities for automated proposal preparation and evaluation, scheduling, and data archival. This paper describes the interactive scheduling capability that is under development for VTIE. Accepted observation requests will be incrementally and interactively scheduled by a constraint-based scheduler, and rescheduled as needed in response to dynamically changing weather and telescope conditions. The scheduling system incorporates a novel method for cost-sensitive constraint satisfaction, which will enable modeling of the costs of data gathering and user interaction during schedule repair.}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.134.4105}, author = {Rathod,Priyang and desJardins, Marie and Sansare,Suryakant} } @conference {16153, title = {Interactive information visualization of a million items}, booktitle = {Information Visualization, 2002. INFOVIS 2002. IEEE Symposium on}, year = {2002}, month = {2002///}, pages = {117 - 124}, abstract = {Existing information visualization techniques are usually limited to the display of a few thousand items. This article describes new interactive techniques capable of handling a million items (effectively visible and manageable on screen). We evaluate the use of hardware-based techniques available with newer graphics cards, as well as new animation techniques and non-standard graphical features such as stereovision and overlap count. These techniques have been applied to two popular information visualizations: treemaps and scatter plot diagrams; but are generic enough to be applied to other 2D representations as well.}, keywords = {animation, animation;, cards;, Computer, count;, data, diagrams;, Graphics, hardware-based, information, interactive, interpolation;, overlap, plot, scatter, stereovision;, systems;, techniques;, treemaps;, visualisation;, visualization;}, doi = {10.1109/INFVIS.2002.1173156}, author = {Fekete,J.-D. and Plaisant, Catherine} } @article {17259, title = {Interactively exploring hierarchical clustering results [gene identification]}, journal = {Computer}, volume = {35}, year = {2002}, month = {2002/07//}, pages = {80 - 86}, abstract = {To date, work in microarrays, sequenced genomes and bioinformatics has focused largely on algorithmic methods for processing and manipulating vast biological data sets. Future improvements will likely provide users with guidance in selecting the most appropriate algorithms and metrics for identifying meaningful clusters-interesting patterns in large data sets, such as groups of genes with similar profiles. Hierarchical clustering has been shown to be effective in microarray data analysis for identifying genes with similar profiles and thus possibly with similar functions. Users also need an efficient visualization tool, however, to facilitate pattern extraction from microarray data sets. The Hierarchical Clustering Explorer integrates four interactive features to provide information visualization techniques that allow users to control the processes and interact with the results. Thus, hybrid approaches that combine powerful algorithms with interactive visualization tools will join the strengths of fast processors with the detailed understanding of domain experts}, keywords = {algorithmic methods, arrays, Bioinformatics, biological data sets, biology computing, Data analysis, data mining, data visualisation, Data visualization, DNA, Fluorescence, gene functions, gene identification, gene profiles, Genetics, Genomics, Hierarchical Clustering Explorer, hierarchical systems, interactive exploration, interactive information visualization tool, interactive systems, Large screen displays, meaningful cluster identification, metrics, microarray data analysis, pattern clustering, pattern extraction, Process control, Sensor arrays, sequenced genomes, Tiles}, isbn = {0018-9162}, doi = {10.1109/MC.2002.1016905}, author = {Seo,Jinwook and Shneiderman, Ben} } @article {13794, title = {Interlingua approximation: A generation-heavy approach}, journal = {Proceedings of AMTA-2002}, year = {2002}, month = {2002///}, author = {Dorr, Bonnie J and Habash,N.} } @article {12320, title = {Intermediate representations for design automation of multiprocessor DSP systems}, journal = {Design Automation for Embedded Systems}, volume = {7}, year = {2002}, month = {2002///}, pages = {307 - 323}, author = {Bambha,N. and Kianzad,V. and Khandelia,M. and Bhattacharyya, Shuvra S.} } @article {13377, title = {Irisnet: An architecture for compute-intensive wide-area sensor network services}, journal = {Intel Corporation, Pittsburgh IRPTR-02}, volume = {10}, year = {2002}, month = {2002///}, abstract = {Previous work on sensor networks has targeted adhoc wireless networks of closely-colocated, resource- constrained scalar sensor motes. Such work has over- looked richer sensor types such as webcams and mi- crophones, which are typically attached to Internet- connected machines with significant computing power and storage. In this paper, we describe IrisNet (Internet- scale Resource-Intensive Sensor Network services), an architecture for wide-area sensor networks based on these more capable sensing nodes. IrisNet provides a common, scalable software infrastructure that enables the flexible creation of sensor-based Internet services. It dramatically reduces network bandwidth utilization through the use of senselets, binary code fragments that perform intensive data filtering at the sensing nodes, leveraging the available processing power and mem- ory. IrisNet employs a two-tier hierarchy of sensing nodes and query processing nodes. Key features of IrisNet include flexible data partitioning, efficient and protected sharing of sensor nodes, low-latency queries, partial match caching, query-specified freshness toler- ances, and monitoring and logging support. This pa- per reports on experiments with a working IrisNet pro- totype running a parking space finder service, demon- strating the effectiveness of IrisNet{\textquoteright}s features in achiev- ing scalability and reducing query response times. }, author = {Nath,S. and Deshpande, Amol and Ke,Y. and Gibbons,P. and Karp,B. and Seshan,S.} } @conference {12139, title = {Implementing the Experience Factory concepts as a set of Experience Bases}, booktitle = {Proc. 13th Int{\textquoteright}l Conf. Software Eng. and Knowledge Eng}, year = {2001}, month = {2001///}, pages = {102 - 109}, abstract = {This talk takes the Experience Factory concept, which was originally developed as organizational support for softwaredevelopment and generalizes it to organizational support for any aspect of a business, e.g., business practices. The Experience Factory supports the evolution of processes and other forms of knowledge, based upon experiences within the organization, and related knowledge gathered from outside the organization. It then discusses how you might design an appropriate experience base for the particular set of organizational needs determined to be of importance. Specific examples are given in developing experience bases for specific organizations and it discusses the Experience Management System (EMS) currently being evolved and how it has been applied. }, author = {Basili, Victor R. and Lindvall,M. and Costa,P.} } @article {17610, title = {Improved Bounds on the Sample Complexity of Learning}, journal = {Journal of Computer and System Sciences}, volume = {62}, year = {2001}, month = {2001/05//}, pages = {516 - 527}, abstract = {We present a new general upper bound on the number of examples required to estimate all of the expectations of a set of random variables uniformly well. The quality of the estimates is measured using a variant of the relative error proposed by Haussler and Pollard. We also show that our bound is within a constant factor of the best possible. Our upper bound implies improved bounds on the sample complexity of learning according to Haussler{\textquoteright}s decision theoretic model.}, keywords = {agnostic learning, empirical process theory, machine learning, PAC learning, sample complexity}, isbn = {0022-0000}, doi = {10.1006/jcss.2000.1741}, url = {http://www.sciencedirect.com/science/article/pii/S0022000000917410}, author = {Li,Yi and Long,Philip M. and Srinivasan, Aravind} } @conference {16702, title = {Improved cross-language retrieval using backoff translation}, booktitle = {Proceedings of the first international conference on Human language technology research}, year = {2001}, month = {2001///}, pages = {1 - 3}, author = {Resnik, Philip and Oard, Douglas and Levow,G.} } @article {13374, title = {Independence is good: dependency-based histogram synopses for high-dimensional data}, journal = {SIGMOD Rec.}, volume = {30}, year = {2001}, month = {2001/05//}, pages = {199 - 210}, abstract = {Approximating the joint data distribution of a multi-dimensional data set through a compact and accurate histogram synopsis is a fundamental problem arising in numerous practical scenarios, including query optimization and approximate query answering. Existing solutions either rely on simplistic independence assumptions or try to directly approximate the full joint data distribution over the complete set of attributes. Unfortunately, both approaches are doomed to fail for high-dimensional data sets with complex correlation patterns between attributes. In this paper, we propose a novel approach to histogram-based synopses that employs the solid foundation of statistical interaction models to explicitly identify and exploit the statistical characteristics of the data. Abstractly, our key idea is to break the synopsis into (1) a statistical interaction model that accurately captures significant correlation and independence patterns in data, and (2) a collection of histograms on low-dimensional marginals that, based on the model, can provide accurate approximations of the overall joint data distribution. Extensive experimental results with several real-life data sets verify the effectiveness of our approach. An important aspect of our general, model-based methodology is that it can be used to enhance the performance of other synopsis techniques that are based on data-space partitioning (e.g., wavelets) by providing an effective tool to deal with the {\textquotedblleft}dimensionality curse{\textquotedblright}.}, isbn = {0163-5808}, doi = {10.1145/376284.375685}, url = {http://doi.acm.org/10.1145/376284.375685}, author = {Deshpande, Amol and Garofalakis,Minos and Rastogi,Rajeev} } @mastersthesis {15323, title = {Indexing and Retrieving Natural Language Using Ternary Expressions}, year = {2001}, month = {2001///}, school = {MASSACHUSETTS INSTITUTE OF TECHNOLOGY}, abstract = {Traditional information retrieval systems based on the "bag-of-words" paradigm can-not completely capture the semantic content of documents. Yet it is impossible with current technology to build a practical information access system that fully ana- lyzes and understands unrestricted natural language. However, if we avoid the most complex and processing-intensive natural language understanding techniques, we can construct a large-scale information access system which is capable of processing unre- stricted text, largely understanding it, and answering natural language queries with high precision. We believe that ternary expressions are the most suitable representa- tional structure for such a system; they are expressive enough for information retrieval purposes, yet amenable to rapid large-scale indexing. }, author = {Lin,J. J} } @conference {17234, title = {Information Visualization: The Path from Innovation to Adoption}, booktitle = {Information Visualisation, International Conference on}, year = {2001}, month = {2001///}, pages = {0003 - 0003}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, isbn = {0-7695-1195-3}, doi = {http://doi.ieeecomputersociety.org/10.1109/IV.2001.10004}, author = {Shneiderman, Ben} } @conference {13064, title = {Integrated information management: an interactive, extensible architecture for information retrieval}, booktitle = {Proceedings of the first international conference on Human language technology research}, series = {HLT {\textquoteright}01}, year = {2001}, month = {2001///}, pages = {1 - 6}, publisher = {Association for Computational Linguistics}, organization = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, abstract = {Most current IR research is focused on specific technologies, such as filtering, classification, entity extraction, question answering, etc. There is relatively little research on merging multiple technologies into sophisticated applications, due in part to the high cost of integrating independently-developed text processing modules.}, doi = {10.3115/1072133.1072191}, url = {http://dx.doi.org/10.3115/1072133.1072191}, author = {Nyberg,Eric and Daum{\'e}, Hal} } @article {18852, title = {Integrated Microelectromechanical System (MEMS) Inertial Measurement Unit (IMIMU)}, year = {2001}, month = {2001/12//}, institution = {CARNEGIE-MELLON UNIVERSITY}, abstract = {Processes, designs and design tools are developed to enable the monolithic integration of arrays of inertial microsensors with electronics. Accelerometers and gyroscopes, fabricated in a single CMOS process, are functional and demonstrate a single chip IMU. Two integrated post CMOS micro-machining processes are demonstrated. Thin-film microstructures are defined from the metal-dielectric stack of a conventional process. In the second process, a back-side silicon etch, followed by front-side DRIE produces bulk silicon microstructures. Accelerometer and gyroscope designs are developed with accompanying low noise electronic circuitry. Noise performance was limited to 1/f circuit noise. The chip output sensibility is set by the interface circuit design. A thermally stabilized accelerometer and circuit design is demonstrated using embedded polysilicon resistors as temperature sensors and heaters in a closed loop. Nested gyroscope topologies are demonstrated with a lateral MEMS accelerometer used as a coriolis acceleration sensor. Modeling and simulation tools that simultaneously consider the electromechanical transducer and the electronic circuit to predict system performance are developed. Electrical, electromechanical and mechanical parasitics required to enable predictive lumped parameter simulation are identified and can be extracted, enabling a designer to confidently estimate design performance prior to fabrication. Generic physics-based fault models for surface-micromachined actuators and sensors are developed that enable effective testing, diagnosis and design for manufacturability.}, keywords = {*ELECTROMECHANICAL DEVICES, *GYROSCOPES, *INERTIAL MEASUREMENT UNITS, *INERTIAL NAVIGATION, *MICROELECTROMECHANICAL SYSTEMS, ACCELEROMETERS, BULK MATERIALS, CHIPS(ELECTRONICS), CIRCUITS, CLOSED LOOP SYSTEMS, CORIOLIS EFFECT, Detectors, DIELECTRICS, ELECTRICAL AND ELECTRONIC EQUIPMENT, EMBEDDING, METALS, MICROSENSORS., MICROSTRUCTURE, MONOLITHIC STRUCTURES(ELECTRONICS), NAVIGATION AND GUIDANCE, PARASITES, PE63739E, PERFORMANCE(ENGINEERING), POLYSILICONS, RESISTORS, SILICON, TEMPERATURE SENSITIVE ELEMENTS, THERMAL STABILITY, THIN FILMS, tools, Topology, WUAFRLE1170030}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA399565}, author = {Fedder,Gary K. and Blanton,Shawn and Carley,L. R. and Gupta, Satyandra K. and Koester,David} } @conference {16779, title = {Integrating distributed scientific data sources with MOCHA and XRoaster}, booktitle = {Thirteenth International Conference on Scientific and Statistical Database Management, 2001. SSDBM 2001. Proceedings}, year = {2001}, month = {2001///}, pages = {263 - 266}, publisher = {IEEE}, organization = {IEEE}, abstract = {MOCHA is a novel middleware system for integrating distributed data sources that we have developed at the University of Maryland. MOCHA is based on the idea that the code that implements user-defined types and functions should be automatically deployed to remote sites by the middleware system itself. To this end, we have developed an XML-based framework to specify metadata about data sites, data sets, and user-defined types and functions. XRoaster is a graphical tool that we have developed to help the user create all the XML metadata elements to be used in MOCHA}, keywords = {client-server systems, data sets, data sites, Databases, Distributed computing, distributed databases, distributed scientific data source integration, Educational institutions, graphical tool, hypermedia markup languages, IP networks, java, Large-scale systems, Maintenance engineering, meta data, metadata, Middleware, middleware system, MOCHA, Query processing, remote sites, scientific information systems, user-defined types, visual programming, XML, XML metadata elements, XML-based framework, XRoaster}, isbn = {0-7695-1218-6}, doi = {10.1109/SSDM.2001.938560}, author = {Rodriguez-Martinez,M. and Roussopoulos, Nick and McGann,J. M and Kelley,S. and Mokwa,J. and White,B. and Jala,J.} } @article {18731, title = {Intelligent assembly modeling and simulation}, journal = {Assembly Automation}, volume = {21}, year = {2001}, month = {2001///}, pages = {215 - 235}, abstract = {Because of the intense competition in the current global economy, a company must conceive, design, and manufacture new products quickly and inexpensively. The design cycle can be shortened through simulation. Rapid technical advances in many different areas of scientific computing provide the enabling technologies for creating a comprehensive simulation and visualization environment for assembly design and planning. An intelligent environment has been built in which simple simulation tools can be composed into complex simulations for detecting potential assembly problems. The goal in this research is to develop high fidelity assembly simulation and visualization tools that can detect assembly related problems without going through physical mock-ups. In addition, these tools can be used to create easy-to-visualize instructions for performing assembly and service operations.}, url = {http://www.ingentaconnect.com/content/mcb/033/2001/00000021/00000003/art00004}, author = {Gupta,S.K. and Paredis,C. J. J. and Sinha,R.} } @article {18488, title = {On the interactions between layered quality adaptation and congestion control for streaming video}, journal = {11th International Packet Video Workshop}, year = {2001}, month = {2001///}, abstract = {This paper uses analysis and experiments to study the impact of var-ious congestion control algorithms and receiver buffering strategies on the performance of streaming media delivery. While traditional congestion avoidance schemes such as TCP{\textquoteright}s additive-increase/- multiplicativedecrease (AIMD) achieve high utilization, they also cause large oscillations in transmission rates that degrade the smooth- ness and perceptual quality of the video stream. We focus on un- derstanding the interactions of a family of congestion control al- gorithms that generalize AIMD, with buffer-based quality adapta- tion algorithms for hierarchically-encoded and simulcast video. Our work builds on and extends the results of Rejaie et al. [19]; we find that the combination of a non-AIMD algorithm that has smaller os- cillations than AIMD and a suitable receiver buffer allocation and management strategy provides a good combination of low playout delay and TCP-friendly congestion control. The paper describes these mechanisms and the results of experiments conducted using a prototype video server for MPEG-4 video, showing that our ap- proach can improve the interactivity and adaptivity of Internet video. }, author = {Feamster, Nick and Bansal,D. and Balakrishnan,H.} } @conference {15865, title = {Interactive cross-language information retrieval}, booktitle = {ACM SIGIR Forum}, volume = {35}, year = {2001}, month = {2001///}, pages = {1 - 3}, author = {Oard, Douglas} } @inbook {17252, title = {Interactive Exploration of Time Series Data}, booktitle = {Discovery ScienceDiscovery Science}, series = {Lecture Notes in Computer Science}, volume = {2226}, year = {2001}, month = {2001///}, pages = {441 - 446}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Widespread interest in discovering features and trends in time- series has generated a need for tools that support interactive exploration.This paper introduces timeboxes: a powerful direct-manipulation metaphor for the specification of queries over time series datasets. Our TimeSearcher implementation of timeboxes supports interactive formulation and modification of queries, thus speeding the process of exploring time series data sets and guiding data mining.}, isbn = {978-3-540-42956-2}, url = {http://dx.doi.org/10.1007/3-540-45650-3_38}, author = {Hochheiser,Harry and Shneiderman, Ben}, editor = {Jantke,Klaus and Shinohara,Ayumi} } @inbook {13801, title = {Interpretation of Compound Nominals Using WordNet}, booktitle = {Computational Linguistics and Intelligent Text ProcessingComputational Linguistics and Intelligent Text Processing}, series = {Lecture Notes in Computer Science}, volume = {2004}, year = {2001}, month = {2001///}, pages = {169 - 181}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We describe an approach to interpreting noun-noun compounds within a question answering system. The system{\textquoteright}s lexicon, based on WordNet, provides the basis for heuristics that group noun-noun compounds with seman- tically similar words. The semantic relationship between the nouns in a com- pound is determined by the choice of heuristic for the compound. We discuss procedures for selecting one heuristic in cases where several can apply to a compound, the effects of lexical ambiguity, and some initial results of our methods.}, isbn = {978-3-540-41687-6}, url = {http://dx.doi.org/10.1007/3-540-44686-9_17}, author = {Barrett,Leslie and Davis,Anthony and Dorr, Bonnie J}, editor = {Gelbukh,Alexander} } @conference {18634, title = {Intrusion tolerance approaches in ITUA}, volume = {64}, year = {2001}, month = {2001///}, url = {http://www.dist-systems.bbn.com/papers/2001/ICDSN/01CUK01.pdf}, author = {Michel Cukier and Lyons,J. and Pandey,P. and Ramasamy,H. V. and Sanders,W. H. and Pal,P. and Webber,F. and Schantz,R. and Loyall,J. and Watro,R.} } @inbook {17265, title = {Inventing Discovery Tools: Combining Information Visualization with Data Mining}, booktitle = {Discovery Science}, series = {Lecture Notes in Computer Science}, volume = {2226}, year = {2001}, month = {2001///}, pages = {17 - 28}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {The growing use of information visualization tools and data mining algorithms stems from two separate lines of research. Information visualization researchers believe in the importance of giving users an overview and insight into the data distributions, while data mining researchers believe that statistical algorithms and machine learning can be relied on to find the interesting patterns. This paper discusses two issues that influence design of discovery tools: statistical algorithms vs. visual data presentation, and hypothesis testing vs. exploratory data analysis. I claim that a combined approach could lead to novel discovery tools that preserve user control, enable more effective exploration, and promote responsibility.}, isbn = {978-3-540-42956-2}, url = {http://dx.doi.org/10.1007/3-540-45650-3_4}, author = {Shneiderman, Ben}, editor = {Jantke,Klaus and Shinohara,Ayumi} } @conference {13570, title = {Identifying Sports Videos using Replay, Text and Camera Motion Features}, booktitle = {SPIE Conference on Storage and Retrieval for Image and Video Databases}, year = {2000}, month = {2000/01//}, pages = {332 - 343}, abstract = {Automated classification of digital video is emerging as an important piece of the puzzle in the design of contentmanagement systems for digital libraries. The ability to classify videos into various classes such as sports, news, movies, or documentaries, increases the efficiency of indexing, browsing, and retrieval of video in large databases. In this paper, we discuss the extraction of features that enable identification of sports videos directly from the compressed domain of MPEG video. These features include detecting the presence of action replays, determining the amount of scene text in video, and calculating various statistics on camera and/or object motion. The features are derived from the macroblock, motion, and bit-rate information that is readily accessible from MPEG video with very minimal decoding, leading to substantial gains in processing speeds. Full-decoding of selective frames is required only for text analysis. A decision tree classifier built using these features is able to identify sports clips with an accuracy of about 93\%. }, author = {Kobla,V. and DeMenthon,D. and David Doermann} } @article {17603, title = {Improved Algorithms via Approximations of Probability Distributions}, journal = {Journal of Computer and System Sciences}, volume = {61}, year = {2000}, month = {2000/08//}, pages = {81 - 107}, abstract = {We present two techniques for constructing sample spaces that approximate probability distributions. The first is a simple method for constructing the small-bias probability spaces introduced by Naor and Naor. We show how to efficiently combine this construction with the method of conditional probabilities to yield improved parallel algorithms for problems such as set discrepancy, finding large cuts in graphs, and finding large acyclic subgraphs. The second is a construction of small probability spaces approximating general independent distributions which are of smaller size than the constructions of Even, Goldreich, Luby, Nisan, and Veli{\v c}kovi{\'c}.}, keywords = {derandomization, discrepancy, explicit constructions, graph coloring, Parallel algorithms, small sample spaces}, isbn = {0022-0000}, doi = {10.1006/jcss.1999.1695}, url = {http://www.sciencedirect.com/science/article/pii/S0022000099916951}, author = {Chari,Suresh and Rohatgi,Pankaj and Srinivasan, Aravind} } @conference {14844, title = {In search of illumination invariants}, booktitle = {Computer Vision and Pattern Recognition, 2000. Proceedings. IEEE Conference on}, volume = {1}, year = {2000}, month = {2000///}, pages = {254 -261 vol.1 - 254 -261 vol.1}, abstract = {We consider the problem of determining functions of an image of an object that are insensitive to illumination changes. We first show that for an object with Lambertian reflectance there are no discriminative functions that are invariant to illumination. This result leads as to adopt a probabilistic approach in which we analytically determine a probability distribution for the image gradient as a function of the surface{\textquoteright}s geometry and reflectance. Our distribution reveals that the direction of the image gradient is insensitive to changes in illumination direction. We verify this empirically by constructing a distribution for the image gradient from more than 20 million samples of gradients in a database of 1,280 images of 20 inanimate objects taken under varying lighting condition. Using this distribution we develop an illumination insensitive measure of image comparison and test it on the problem of face recognition}, keywords = {comparison;image, gradient;face, invariants;image, Lambertian, recognition;, recognition;illumination, recognition;object, reflectance;face}, doi = {10.1109/CVPR.2000.855827}, author = {Chen,H. F and Belhumeur,P. N. and Jacobs, David W.} } @book {15819, title = {The Interface of Three Areas of Computer Science with the Mathematical Sciences}, year = {2000}, month = {2000///}, publisher = {National Academy Press}, organization = {National Academy Press}, address = {Washington, DC}, url = {http://www.nap.edu/books/NI000351/html/http://www.nap.edu/books/NI000351/html/}, author = {O{\textquoteright}Leary, Dianne P. and Weidman,Scott T.} } @inbook {15515, title = {Introduction to logic-based artificial intelligence}, booktitle = {Logic-based artificial intelligenceLogic-based artificial intelligence}, year = {2000}, month = {2000///}, pages = {3 - 33}, author = {Minker, Jack} } @article {14523, title = {An inverse method for the acoustic detection, localization and determination of the shape evolution of a bubble}, journal = {Inverse Problems}, volume = {16}, year = {2000}, month = {2000///}, pages = {1741 - 1741}, author = {Gumerov, Nail A. and Chahine,G. L} } @article {14128, title = {Iterative methods for stabilized discrete convection-diffusion problems}, journal = {IMA journal of numerical analysis}, volume = {20}, year = {2000}, month = {2000///}, pages = {333 - 333}, author = {Shih, Y. T and Elman, Howard} } @article {17705, title = {Iterative regularization and MINRES}, journal = {SIAM Journal on Matrix Analysis and Applications}, volume = {21}, year = {2000}, month = {2000///}, pages = {613 - 628}, abstract = {In this paper we present three theorems which give insight into the regularizingproperties of MINRES. While our theory does not completely characterize the regularizing behavior of the algorithm, it provides a partial explanation of the observed behavior of the method. Unlike traditional attempts to explain the regularizing properties of Krylov subspace methods, our approach focuses on convergence properties of the residual rather than on convergence analysis of the harmonic Ritz values. The import of our analysis is illustrated by two examples. In particular, our theoret- ical and numerical results support the following important observation: in some circumstances the dimension of the optimal Krylov subspace can be much smaller than the number of the components of the truncated spectral solution that must be computed to attain comparable accuracy. }, author = {Kilmer,M. and Stewart, G.W.} } @article {18704, title = {Impact of Cl- and Na+ ions on simulated structure and dynamics of βARK1 PH domain}, journal = {Proteins: Structure, Function, and Bioinformatics}, volume = {35}, year = {1999}, month = {1999///}, pages = {206 - 217}, abstract = {A nonzero net charge of proteins at pH 7 is usually compensated by the addition of charge-balancing counter ions during molecular dynamics simulation, which reduces electrostatic interactions. For highly charged proteins, like the βARK1 PH domain used here, it seems reasonable to also add explicit salt ions. To assess the impact of explicit salt ions, two molecular dynamics simulations of solvated βARK1 PH domain have been carried out with different numbers of Cl- and Na+ ions, based on the Cornell et al. force field and the Ewald summation, which was used in the treatment of long-range electrostatic interactions. Initial positions of ions were obtained from the AMBER CION program. Increasing the number of ions alters the average structure in loop regions, as well as the fluctuation amplitudes of dihedral angles. We found unnaturally strong interactions between side chains in the absence of salt ions. The presence of salt ions reduces these electrostatic interactions. The time needed for the equilibration of the ionic environment around the protein, after initial placement of ions close to oppositely charged side chains, is in the nanosecond time range, which can be shortened by using a higher ionic strength. Our results also suggest selecting those methods that do not place the ions initially close to the protein surface. Proteins 1999;35:206{\textendash}217. {\textcopyright} 1999 Wiley-Liss, Inc.}, keywords = {counter ions, electrostatic interaction, equilibration, GRK2 PH domain, hydrogen bonds, ionic solvent, ionic strength, molecular dynamics simulation, Proteins}, isbn = {1097-0134}, doi = {10.1002/(SICI)1097-0134(19990501)35:2<206::AID-PROT7>3.0.CO;2-A}, url = {http://onlinelibrary.wiley.com/doi/10.1002/(SICI)1097-0134(19990501)35:2<206::AID-PROT7>3.0.CO;2-A/abstract}, author = {Pfeiffer,Stefania and Fushman, David and Cowburn,David} } @conference {14790, title = {Imprecise Calendars: an Approach to Scheduling Computational Grids}, booktitle = {Distributed Computing Systems, International Conference on}, year = {1999}, month = {1999///}, pages = {0352 - 0352}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {We describe imprecise calendars, a way to organize and schedule clusters of nodes in a computation grid. Imprecise calendars permit the easy and efficient sharing of resources between different clusters of computers that are part of a computational grid. In addition, they can be used to provide specific time reservations for applications. We describe the algorithms and policies for manipulation of imprecise calendars. We also include a series of simulation studies that compare our approach to previous batch scheduling systems for both a single cluster and collection of clusters up to over 3,000 nodes.}, keywords = {parallel computing distributed scheduling computational grid batch scheduler meta computing}, isbn = {0-7695-0222-9}, doi = {http://doi.ieeecomputersociety.org/10.1109/ICDCS.1999.776537}, author = {Hollingsworth, Jeffrey K and Maneewongvatana, Songrit} } @article {17606, title = {Improved Approximation Guarantees for Packing and Covering Integer Programs}, journal = {SIAM Journal on Computing}, volume = {29}, year = {1999}, month = {1999///}, pages = {648 - 648}, abstract = {Several important NP-hard combinatorial optimization problems can be posed as packing/covering integer programs; the randomized rounding technique of Raghavan and Thompson is a powerful tool with which to approximate them well. We present one elementary unifying property of all these integer linear programs and use the FKG correlation inequality to derive an improved analysis of randomized rounding on them. This yields a pessimistic estimator, thus presenting deterministic polynomial-time algorithms for them with approximation guarantees that are significantly better than those known.}, isbn = {00975397}, doi = {10.1137/S0097539796314240}, url = {http://link.aip.org/link/SMJCAT/v29/i2/p648/s1\&Agg=doi}, author = {Srinivasan, Aravind} } @article {15229, title = {Improved Methods for Approximating Node Weighted Steiner Trees and Connected Dominating Sets}, journal = {Information and Computation}, volume = {150}, year = {1999}, month = {1999/04/10/}, pages = {57 - 74}, abstract = {In this paper we study the Steiner tree problem in graphs for the case when vertices as well as edges have weights associated with them. A greedy approximation algorithm based on {\textquotedblleft}spider decompositions{\textquotedblright} was developed by Klein and Ravi for this problem. This algorithm provides a worst case approximation ratio of 2\&$\#$xa0;ln\&$\#$xa0;k, wherekis the number of terminals. However, the best known lower bound on the approximation ratio is (1-o(1))\&$\#$xa0;ln\&$\#$xa0;k, assuming thatNP⊈DTIME[nO(log\&$\#$xa0;log\&$\#$xa0;n)], by a reduction from set cover. We show that for the unweighted case we can obtain an approximation factor of ln\&$\#$xa0;k. For the weighted case we develop a new decomposition theorem and generalize the notion of {\textquotedblleft}spiders{\textquotedblright} to {\textquotedblleft}branch-spiders{\textquotedblright} that are used to design a new algorithm with a worst case approximation factor of 1.5\&$\#$xa0;ln\&$\#$xa0;k. We then generalize the method to yield an approximation factor of (1.35+ε)\&$\#$xa0;ln\&$\#$xa0;k, for any constantε\>0. These algorithms, although polynomial, are not very practical due to their high running time, since we need to repeatedly find many minimum weight matchings in each iteration. We also develop a simple greedy algorithm that is practical and has a worst case approximation factor of 1.6103\&$\#$xa0;ln\&$\#$xa0;k. The techniques developed for this algorithm imply a method of approximating node weighted network design problems defined by 0{\textendash}1 proper functions as well. These new ideas also lead to improved approximation guarantees for the problem of finding a minimum node weighted connected dominating set. The previous best approximation guarantee for this problem was 3\&$\#$xa0;ln\&$\#$xa0;nby Guha and Khuller. By a direct application of the methods developed in this paper we are able to develop an algorithm with an approximation factor of (1.35+ε)\&$\#$xa0;ln\&$\#$xa0;nfor any fixedε\>0.}, isbn = {0890-5401}, doi = {10.1006/inco.1998.2754}, url = {http://www.sciencedirect.com/science/article/pii/S0890540198927547}, author = {Guha,Sudipto and Khuller, Samir} } @conference {11986, title = {Independent motion: the importance of history}, booktitle = {Computer Vision and Pattern Recognition, 1999. IEEE Computer Society Conference on.}, volume = {2}, year = {1999}, month = {1999///}, pages = {97 Vol. 2 - 97 Vol. 2}, publisher = {IEEE}, organization = {IEEE}, abstract = {We consider a problem central in aerial visual surveillance applications-detection and tracking of small, independently moving objects in long and noisy video sequences. We directly use spatiotemporal image intensity gradient measurements to compute an exact model of background motion. This allows the creation of accurate mosaics over many frames and the definition of a constraint violation function which acts as an indication of independent motion. A novel temporal integration method maintains confidence measures over long subsequences without computing the optic flow, requiring object models, or using a Kalman filler. The mosaic acts as a stable feature frame, allowing precise localization of the independently moving objects. We present a statistical analysis of the effects of image noise on the constraint violation measure and find a good match between the predicted probability distribution function and the measured sample frequencies in a test sequence}, keywords = {aerial visual surveillance, background image, Fluid flow measurement, Frequency measurement, History, Motion detection, Motion estimation, Motion measurement, Noise measurement, Optical computing, Optical noise, spatiotemporal image intensity gradient measurements, Spatiotemporal phenomena, Surveillance, Video sequences}, isbn = {0-7695-0149-4}, doi = {10.1109/CVPR.1999.784614}, author = {Pless, R. and Brodsky, T. and Aloimonos, J.} } @article {17235, title = {Information workspaces}, journal = {Readings in information visualization}, year = {1999}, month = {1999///}, pages = {513 - 514}, author = {Card,S.K. and Mackinlay,J.D. and Shneiderman, Ben} } @article {17236, title = {Infosphere, workspace, tools, objects}, journal = {Readings in information visualization}, year = {1999}, month = {1999///}, pages = {463 - 464}, author = {Card,S.K. and Mackinlay,J.D. and Shneiderman, Ben} } @conference {17874, title = {Infrastructure for building parallel database systems for multi-dimensional data}, booktitle = {IPPS}, year = {1999}, month = {1999///}, pages = {582 - 582}, author = {Chang,C. and Ferreira,R. and Sussman, Alan and Saltz, J.} } @conference {15691, title = {Integrated admission control in hierarchical video-on-demand systems}, booktitle = {IEEE International Conference on Multimedia Computing and Systems, 1999}, volume = {1}, year = {1999}, month = {1999/07//}, pages = {220-225 vol.1 - 220-225 vol.1}, publisher = {IEEE}, organization = {IEEE}, abstract = {We develop a unified model of a hierarchical video-on-demand (VoD) system by integrating the storage and the network subsystems. Rather than restricting the analysis to an isolated subsystem the performance of the VoD system is analyzed as an end-to-end system. On a system-wide basis, request handling and admission control policies are designed to minimize global performance metrics. Through our simulation, we compare different request handling policies and show that a hierarchical VoD architecture with request handling that allows retrials at more than one resource will minimize overall blocking}, keywords = {Admission control, Bandwidth, blocking, Computer science, Design methodology, end-to-end system, hierarchical video-on-demand systems, integrated admission control, Intelligent networks, Load management, Motion pictures, Network servers, network subsystem, performance, Performance analysis, performance evaluation, quality of service, request handling, resource allocation, Resource management, simulation, storage subsystem, video on demand, video servers}, isbn = {0-7695-0253-9}, doi = {10.1109/MMCS.1999.779196}, author = {Mundur, Padma and Simon,R. and Sood,A.} } @inbook {17245, title = {Interaction}, booktitle = {Readings in information visualizationReadings in information visualization}, year = {1999}, month = {1999///}, pages = {231 - 234}, publisher = {Morgan Kaufmann Publishers Inc.}, organization = {Morgan Kaufmann Publishers Inc.}, address = {San Francisco}, isbn = {1-55860-533-9}, author = {Card,S.K. and Mackinlay,J.D. and Shneiderman, Ben} } @inbook {17246, title = {Interactive analysis}, booktitle = {Readings in information visualizationReadings in information visualization}, year = {1999}, month = {1999///}, pages = {261 - 262}, publisher = {Morgan Kaufmann Publishers Inc.}, organization = {Morgan Kaufmann Publishers Inc.}, address = {San Francisco}, isbn = {1-55860-533-9}, author = {Card,S.K. and Mackinlay,J.D. and Shneiderman, Ben} } @article {16165, title = {Interface and data architecture for query preview in networked information systems}, journal = {ACM Trans. Inf. Syst.}, volume = {17}, year = {1999}, month = {1999/07//}, pages = {320 - 341}, abstract = {There are numerous problems associated with formulating queries onnetworked information systems. These include increased data volume and complexity, accompanied by slow network access. This article proposes a new approach to a network query user interfaces that consists of two phases: query preview and query refinement. This new approach is based on the concepts of dynamic queries and query previews, which guides users in rapidly and dynamically eliminating undesired records, reducing the data volume to a manageable size, and refining queries locally before submission over a network. Examples of two applications are given: a Restaurant Finder and a prototype for NASA{\textquoteright}s Earth Observing Systems Data Information Systems (EOSDIS). Data architecture is discussed, and user feedback is presented. }, keywords = {direct manipulation, dynamic query, EOSDIS, graphical user interface, query preview, query refinement, science data}, isbn = {1046-8188}, doi = {10.1145/314516.314522}, url = {http://doi.acm.org/10.1145/314516.314522}, author = {Plaisant, Catherine and Shneiderman, Ben and Doan,Khoa and Bruns,Tom} } @article {16565, title = {Interhemispheric effects on map organization following simulated cortical lesions}, journal = {Artificial intelligence in medicine}, volume = {17}, year = {1999}, month = {1999///}, pages = {59 - 85}, author = {Levitan,S. and Reggia, James A.} } @conference {16972, title = {An international SIGCHI research agenda}, booktitle = {CHI {\textquoteright}99 extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}99}, year = {1999}, month = {1999///}, pages = {171 - 171}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {The goal of this workshop is to articulate an HCI research agenda statement, along with plans for continued refinement with the greater CHI community and plans for disseminating the information beyond the CHI community. There are been several similar prior efforts, including:{\textbullet} 1998 Universal Access program [2]{\textbullet} European community report, "Building the European Information Society for Us All" [1];{\textbullet} CHI 97 workshop [5] and paper [6], "HCI Research and Practice Agenda based on Human Needs {\textellipsis}";{\textbullet} CHI 96 Workshop, "CHI Ten-Year View{\textellipsis}" [3];{\textbullet} 1995 US National Science Foundation report, "New Directions in Human-Computer Interaction {\textellipsis}" [9];{\textbullet} 1995 report from the US National Research Council [7];{\textbullet} 1991 report, "{\textellipsis}HCI{\textellipsis} Serving Human Needs" [4]}, keywords = {agenda, human-computer, interaction, research}, isbn = {1-58113-158-5}, doi = {10.1145/632716.632821}, url = {http://doi.acm.org/10.1145/632716.632821}, author = {Scholtz,Jean C. and Muller,Michael and Novick,David and Olsen,Jr.,Dan R. and Shneiderman, Ben and Wharton,Cathleen} } @inbook {17262, title = {Internet and Infosphere}, booktitle = {Readings in information visualizationReadings in information visualization}, year = {1999}, month = {1999///}, pages = {465 - 468}, publisher = {Morgan Kaufmann Publishers Inc.}, organization = {Morgan Kaufmann Publishers Inc.}, address = {San Francisco}, isbn = {1-55860-533-9}, author = {Card,S.K. and Mackinlay,J.D. and Shneiderman, Ben} } @article {17263, title = {Introduction to Special Issue on Usability Engineering}, journal = {Empirical Software Engineering}, volume = {4}, year = {1999}, month = {1999///}, pages = {5 - 10}, isbn = {1382-3256}, url = {http://dx.doi.org/10.1023/A:1009852413783}, author = {Scholtz,Jean and Shneiderman, Ben} } @article {15560, title = {It{\textquoteright}s okay to be skinny, if your friends are fat}, journal = {Center for Geometric Computing 4th Annual Workshop on Computational Geometry}, year = {1999}, month = {1999///}, abstract = {The kd-tree is a popular and simple data structure for range searching and nearest neighborsearching. Such a tree subdivides space into rectangular cells through the recursive application of some splitting rule. The choice of splitting rule affects the shape of cells and the structure of the resulting tree. It has been shown that an important element in achieving efficient query times for approximate queries is that each cell should be fat, meaning that the ratio of its longest side to shortest side (its aspect ratio) should be bounded. Subdivisions with fat cells satisfy a property called the packing constraint, which bounds the number of disjoint cells of a given size that can overlap a ball of a given radius. We consider a splitting rule called the sliding-midpoint rule. It has been shown to provide efficient search times for approximate nearest neighbor and range searching, both in practice and in terms of expected case query time. However it has not been possible to prove results about this tree because it can produce cells of unbounded aspect ratio. We show that in spite of this, the sliding-midpoint rule generates subdivisions that satisfy the packing constraint, thus explaining their good performance. }, author = {Maneewongvatana,S. and Mount, Dave} } @article {18849, title = {IMACS: a case study in real-world planning}, journal = {Intelligent Systems and their Applications, IEEE}, volume = {13}, year = {1998}, month = {1998/06//may}, pages = {49 - 60}, abstract = {This article discusses the complexities of real-world planning and how to create planning systems to address them. IMACS (Interactive Manufacturability Analysis and Critiquing System), an automated designer{\textquoteright}s aid, evaluates machined parts and suggests design modifications to improve their manufacturability, offering advantages over the planning techniques used in classical planning systems}, keywords = {automated designer{\textquoteright}s aid, CAD/CAM, case study, computer aided analysis, computer aided production planning, design modifications, IMACS, intelligent design assistants, Interactive Manufacturability Analysis and Critiquing System, interactive systems, machined parts evaluation, machining, manufacturability, planning (artificial intelligence), planning systems}, isbn = {1094-7167}, doi = {10.1109/5254.683210}, author = {Gupta,S.K. and Nau, Dana S. and Regli,W. C.} } @article {12174, title = {Implementing a zooming User Interface: experience building Pad++}, journal = {Software: Practice and Experience}, volume = {28}, year = {1998}, month = {1998/08/01/}, pages = {1101 - 1135}, abstract = {We are investigating a novel user interface paradigm based on zooming, in which users are presented with a zooming view of a huge planar information surface. We have developed a system called Pad++ to explore this approach. The implementation of Pad++ is related to real-time 3D graphics systems and to 2D windowing systems. However, the zooming nature of Pad++ requires new approaches to rendering, screen management, and spatial indexing. In this paper, we describe the design and implementation of the Pad++ engine, focusing in particular on rendering and data structure issues. Our goal is to present useful techniques that can be adopted in other real-time graphical systems, and also to discuss how 2D zooming systems differ from other graphical systems. {\textcopyright} 1998 John Wiley \& Sons, Ltd.}, keywords = {3D graphics, animation, Pad++, real-time computer graphics, User Interface Management Systems (UIMS), windowing systems, zooming User Interfaces (ZUIs)}, isbn = {1097-024X}, doi = {10.1002/(SICI)1097-024X(199808)28:10<1101::AID-SPE190>3.0.CO;2-V}, url = {http://onlinelibrary.wiley.com/doi/10.1002/(SICI)1097-024X(199808)28:10<1101::AID-SPE190>3.0.CO;2-V/abstract}, author = {Bederson, Benjamin B. and Meyer,Jon} } @article {17703, title = {Implementing an Algorithm for Solving Block Hessenberg Systems}, volume = {UMIACS-TR-94-70}, year = {1998}, month = {1998/10/15/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {This paper describes the implementation of a recursive descent methodfor solving block Hessenberg systems. Although the algorithm is conceptually simple, its implementation in C (a natural choice of language given the recursive nature of the algorithm and its data) is nontrivial. Particularly important is the balance between ease of use, computational efficiency, and flexibility. (Also cross-referenced as UMIACS-TR-94-70) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/642}, author = {Stewart, G.W.} } @conference {15635, title = {Improved algorithms for robust point pattern matching and applications to image registration}, booktitle = {Proceedings of the fourteenth annual symposium on Computational geometry}, series = {SCG {\textquoteright}98}, year = {1998}, month = {1998///}, pages = {155 - 164}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-973-4}, doi = {10.1145/276884.276902}, url = {http://doi.acm.org/10.1145/276884.276902}, author = {Mount, Dave and Netanyahu,Nathan S. and LeMoigne,Jacqueline} } @conference {17609, title = {Improved bounds and algorithms for hypergraph two-coloring}, booktitle = {39th Annual Symposium on Foundations of Computer Science, 1998. Proceedings}, year = {1998}, month = {1998/11/08/11}, pages = {684 - 693}, publisher = {IEEE}, organization = {IEEE}, abstract = {We show that for all large n, every n-uniform hypergraph with at most 0.7√(n/lnn){\texttimes}2n edges can be two-colored. We, in fact, present fast algorithms that output a proper two-coloring with high probability for such hypergraphs. We also derandomize and parallelize these algorithms, to derive NC1 versions of these results. This makes progress on a problem of Erdos (1963), improving the previous-best bound of n1/3-0(1){\texttimes}2n due to Beck (1978). We further generalize this to a {\textquotedblleft}local{\textquotedblright} version, improving on one of the first applications of the Lovasz Local Lemma}, keywords = {algorithms, Application software, Approximation algorithms, bounds, computational geometry, Computer science, Contracts, Erbium, graph colouring, History, hypergraph two-coloring, Lab-on-a-chip, MATHEMATICS, n-uniform hypergraph, Parallel algorithms, Polynomials, probability}, isbn = {0-8186-9172-7}, doi = {10.1109/SFCS.1998.743519}, author = {Radhakrishnan,J. and Srinivasan, Aravind} } @article {16182, title = {Incorporating String Search in a Hypertext System:User Interface and Signature File Design Issues}, journal = {Technical Reports of the Computer Science Department}, year = {1998}, month = {1998/10/15/}, abstract = {Hypertext systems provide an appealing mechanism forinformally browsing databases by traversing selectable links. However, in many fact finding situations string search is an effective complement to browsing. This paper describes the application of the signature file method to achieve rapid and convenient string search in small personal computer hypertext environments. The method has been implemented in a prototype, as well as in a commercial product. Performance data for search times and storage space are presented from a commercial hypertext database. User interface issues are then discussed. Experience with the string search interface indicates that it was used sucessfully by novice users. (Also cross-referenced as CAR-TR-448) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/359}, author = {Faloutsos,Christos and Lee,Raymond and Plaisant, Catherine and Shneiderman, Ben} } @article {17704, title = {Incremental Condition Calculation and Column Selection}, volume = {UMIACS-TR-90-87}, year = {1998}, month = {1998/10/15/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {This paper describes a method for calculating the condition number ofa matrix in the Frobenius norm that can be used to select columns in the course of computing a QR decomposition. When the number of rows of the matrix is much greater than the number of columns, the additional overhead is negligible. Limited numerical experiments suggest that the method is quite good at finding gaps in the singular values of the matrix. Additional files are available via anonymous ftp at: thales.cs.umd.edu in the directory pub/reports (Also cross-referenced as UMIACS-TR-90-87) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/550}, author = {Stewart, G.W.} } @article {13675, title = {The Indexing and Retrieval of Document Images: A Survey}, journal = {Computer Vision and Image Understanding}, volume = {70}, year = {1998}, month = {1998///}, pages = {287 - 298}, abstract = {The economic feasibility of maintaining large databases of document images has created a tremendous demand for robust ways to access and manipulate the information these images contain. In an attempt to move toward a paper-less office, large quantities of printed documents are often scanned and archived as images, without adequate index information. One way to provide traditional database indexing and retrieval capabilities is to fully convert the document to an electronic representation which can be indexed automatically. Unfortunately, there are many factors which prohibit complete conversion including high cost, low document quality, and the fact that many non-text components cannot be adequately represented in a converted form. In such cases, it can be advantageous to maintain a copy of and use the document in image form. In this paper, we provide a survey of methods developed by researchers to access and manipulate document images without the need for complete and accurate conversion. We briefly discuss traditional text indexing techniques on imperfect data and the retrieval of partially converted documents. This is followed by a more comprehensive review of techniques for the direct characterization, manipulation and retrieval of images of documents containing text, graphics and scene images.}, author = {Doermann, David} } @article {13575, title = {Indexing and Retrieval of MPEG-compressed video}, journal = {The Journal of Electronic Imaging}, year = {1998}, month = {1998///}, pages = {294 - 307}, abstract = {To keep pace with the increased popularity of digital video as an archival medium, thedevelopment of techniques for fast and e cient analysis of video streams is essential. In particular, solutions to the problems of storing, indexing, browsing, and retrieving video data from large multimedia databases are necessary to allow access to these collections. Given that video is often stored e ciently in a compressed format, the costly overhead of decompression can be reduced by analyzing the compressed representation directly. In earlier work, we presented compressed domain parsing techniques which identi ed shots, subshots, and scenes. In this paper, we present e cient key frame selection, feature extraction, indexing, and retrieval techniques that are directly applicable to MPEG compressed video. We develop a frame type independent representation which normalizes spatial and temporal features including frame type, frame size, macroblock encoding, and motion compensation vectors. Features for indexing are derived directly from this representation and mapped to a low- dimensional space where they can be accessed using standard database techniques. Spatial information is used as primary index into the database and temporal information is used to rank retrieved clips and enhance the robustness of the system. The techniques presented enable e cient indexing, querying, and retrieval of compressed video as demonstrated by our system which typically takes a fraction of a second to retrieve similar video scenes from a database, with over 95\% recall. }, author = {Kobla,V. and David Doermann} } @article {17710, title = {On an Inexpensive Triangular Approximation to the Singular Value Decomposition}, volume = {UMIACS-TR-97-75}, year = {1998}, month = {1998/10/15/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {In this paper we introduce a new decomposition called the pivotedQLP~decomposition. It is computed by applying pivoted orthogonal triangularization to the columns of the matrix $X$ in question to get an upper triangular factor $R$ and then applying the same procedure to the rows of $R$ to get a lower triangular matrix $L$. The diagonal elements of $R$ are called the R-values of $X$; those of $L$ are called the L-values. Numerical examples show that the L-values track the singular values of $X$ with considerable fidelity\,---\,far better than the R-values. At a gap in the L-values the decomposition provides orthonormal bases of analogues of row, column, and null spaces provided of $X$. The decomposition requires no more than twice the work required for a pivoted QR~decomposition. The computation of $R$ and $L$ can be interleaved, so that the computation can be the rows of $R$ to get a lower triangular matrix $L$. The diagonal elements of $R$ are called the R-values of $X$; those of $L$ are called the L-values. Numerical examples show that the L-values track the singular values of $X$ with considerable fidelity\,---\,far better than the R-values. At a gap in the L-values the decomposition provides orthonormal bases of analogues of row, column, and null spaces provided of $X$. The decomposition requires no more than twice the work required for a pivoted QR~decomposition. The computation of $R$ and $L$ can be interleaved, so that the computation can be terminated at any suitable point, which makes the decomposition especially suitable for low-rank determination problems. The interleaved algorithm also suggests a new, efficient 2-norm estimator. (Also cross-referenced as UMIACS-TR-97-75) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/920}, author = {Stewart, G.W.} } @conference {12781, title = {Infinite Probabilistic and Nonprobabilistic Testing*}, booktitle = {Foundations of software technology and theoretical computer science: 18th Conference, Chennai, India, December 17-19, 1998: proceedings}, year = {1998}, month = {1998///}, pages = {209 - 209}, author = {Kumar,K. N and Cleaveland, Rance and Smolka,S. A} } @article {17713, title = {On Infinitely Many Algorithms for Solving Equations}, volume = {UMIACS-TR-92-121}, year = {1998}, month = {1998/10/15/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {Translated by G. W. StewartThis report contains a translation of {\textquoteleft}{\textquoteleft}Ueber unendlich viele Algorithmen zur Aufl\"osung der Gleichungen,{\textquoteright}{\textquoteright} a paper by E. Schr\"oder which appeared in {\it Mathematische Annalen\/} in 1870. (Also cross-referenced as UMIACS-TR-92-121) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/577}, author = {Schroeder,E. and Stewart, G.W.} } @article {16184, title = {An information architecture to support the visualization of personal histories}, journal = {Information Processing \& Management}, volume = {34}, year = {1998}, month = {1998/09//}, pages = {581 - 597}, abstract = {This paper proposes an information architecture for personal history data and describes how the data model can be extended to a runtime model for a compact visualization using graphical timelines. Our information architecture was developed for juvenile justice and medical patient records, but is usable in other application domains such as personal resumes, financial histories, or customer support. Our model groups personal history events into aggregates that are contained in facets (e.g., doctor visits, hospitalizations, or lab tests). Crosslinks enable representation of arbitrary relationships across events and aggregates. Data attributes, such as severity, can be mapped by data administrators to visual attributes such as color and line thickness. End-users have powerful controls over the display contents, and they can modify the mapping to fit their tasks.}, keywords = {Graphical user interfaces, Information Visualization, LifeLines, medical patient record, personal histories, temporal data, timelines}, isbn = {0306-4573}, doi = {10.1016/S0306-4573(98)00024-7}, url = {http://www.sciencedirect.com/science/article/pii/S0306457398000247}, author = {Plaisant, Catherine and Shneiderman, Ben and Mushlin,Rich} } @conference {16186, title = {Information visualization advanced interface and Web design}, booktitle = {CHI 98 conference summary on Human factors in computing systems}, series = {CHI {\textquoteright}98}, year = {1998}, month = {1998///}, pages = {145 - 146}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {1-58113-028-7}, doi = {10.1145/286498.286625}, url = {http://doi.acm.org/10.1145/286498.286625}, author = {Shneiderman, Ben and Plaisant, Catherine} } @conference {14806, title = {Instrumentation and measurement}, booktitle = {The grid}, year = {1998}, month = {1998///}, pages = {339 - 365}, author = {Hollingsworth, Jeffrey K and Miller, B.} } @conference {18728, title = {Integrated design and rapid manufacturing over the Internet}, year = {1998}, month = {1998///}, abstract = {An Internet-based infrastructure is being developed inorder to provide designers with access to multiple layered- manufacturing services. The design domain being addressed is that of small mechanisms or electro-mechanical assemblies that would be used in robots or other mechatronic devices. The approach presented relies on the formalization of the data exchange interface between designers and manufacturers. The primary operatives in this system are Design Clients, Manufacturing Services and Process Brokers. The Design Client allows designers to submit completed designs for algorithmic decomposition, or alternately, to compose a design from primitives and library components that have been primed with some process-related information. During this early phase, the Manufacturing Service consists of a highly automated machine that can be used to build ceramic parts, and the associated software components for design decomposition, process planning and machine control. In later phases, multiple service providers will be made accessible. The Process Broker implements a number of supporting services including process selection and optimal part orientation. Future broker services will include manufacturability analysis, directory services and accreditation etc. Currently, this interface is being built and evaluated internally at Stanford and CMU. It will be made available for use by other selected universities in the near future. }, url = {http://cdr.stanford.edu/interface/publications/DETC98CIE-5519.pdf}, author = {Rajagopalan,S. and Pinilla,J. M. and Losleben,P. and Tian,Q. and Gupta,S.K.} } @article {15522, title = {Integrity constraints: Semantics and applications}, journal = {Logics for databases and information systems}, year = {1998}, month = {1998///}, pages = {265 - 306}, abstract = {Integrity constraints axe introduced in a logical framework. Examples are given to illustrate the expressiveness of integrity constraints. Various definitions for the semantics of integrity constraints are defined and compared. Additional types of constraints are also mentioned. Techniques of reasoning with integrity constraints, including model elimination and the residue method, are explained. Applications of integrity constraints considered in detail, including semantic query optimization, cooperative answering, combining databases, and view updates. Additional applications to order optimization, query folding, object-oriented databases, and database security are sketched. The conclusion lists areas of integrity constraints that need to be investigated.}, doi = {10.1007/978-1-4615-5643-5_9}, author = {Godfrey,P. and Grant,J. and Gryz,J. and Minker, Jack} } @conference {18739, title = {An intelligent environment for simulating mechanical assembly operations}, year = {1998}, month = {1998///}, pages = {13 - 16}, abstract = {Rapid technical advances in many different areas of scientificcomputing provide the enabling technologies for creating a com- prehensive simulation and visualization environment for assembly design and planning. We have built an intelligent environment in which simple simulations can be composed together to create com- plex simulations for detecting potential assembly problems. Our goal in this project is to develop high fidelity assembly simulation and visualization tools that can detect assembly related problems without going through physical mock-ups. In addition, these tools can be used to create easy-to-visualize instructions for performing assembly and service operations. }, url = {http://www.cs.cmu.edu/afs/cs.cmu.edu/Web/People/paredis/pubs/DFM98.pdf}, author = {Gupta,S.K. and Paredis,C. J. J. and Sinha,R. and Wang,C. H. and Brown,P. F.} } @article {17257, title = {Interactive Smooth Zoomming in a Starfield Information Visualization}, journal = {Technical Reports of the Computer Science Department}, year = {1998}, month = {1998/10/15/}, abstract = {This paper discusses the design and implementation of interactivesmooth zooming of a starfield display. A starfield display is a two dimensional scatterplot of a multidimensional database where every item from the database is represented as a small colored glyph whose position is determined by its ranking along ordinal attributes of the items laid out on the axes. One way of navigating this visual information is by using a zooming tool to incrementally zoom in on the items by varying the attribute range on either axis independently - such zooming causes the glyphs to move continuously and to grow or shrink. To get a feeling of flying through the data, users should be able to track the motion of each glyph without getting distracted by flicker or large jumps - conditions that necessitate high display refresh rates and closely spaced glyphs on successive frames. Although the use of high-speed hardware can achieve the required visual effect for small databases, the twin software bottlenecks of rapidly accessing display items and constructing a new display image fundamentally retard the refresh rate. Our work explores several methods to overcome these bottlenecks, presents a taxonomy of various zooming methods and introduces a new widget, the zoom bar, that facilitates zooming. (Also cross-referenced as CAR-TR-714) (Also cross-referenced as ISR-TR-94-46) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/411}, author = {Jog,Ninad and Shneiderman, Ben} } @article {16185, title = {Interfaces and tools for the library of congress national digital library program}, journal = {Information Processing \& Management}, volume = {34}, year = {1998}, month = {1998/09//}, pages = {535 - 555}, abstract = {This paper describes a collaborative effort to explore user needs in a digital library, develop interface prototypes for a digital library and suggest and prototype tools for digital librarians and users at the Library of Congress (LC). Interfaces were guided by an assessment of user needs and aimed to maximize interaction with primary resources and support both browsing and analytical search strategies. Tools to aid users and librarians in overviewing collections, previewing objects and gathering results were created and serve as the beginnings of a digital librarian toolkit. The design process and results are described and suggestions for future work are offered.}, isbn = {0306-4573}, doi = {10.1016/S0306-4573(98)00020-X}, url = {http://www.sciencedirect.com/science/article/pii/S030645739800020X}, author = {Marchionini,Gary and Plaisant, Catherine and Komlodi,Anita} } @conference {16328, title = {If your version control system could talk}, booktitle = {ICSE Workshop on Process Modelling and Empirical Studies of Software Engineering}, year = {1997}, month = {1997///}, author = {Ball,T. and Kim,J. M and Porter, Adam and Siy,H. P} } @conference {13573, title = {Image Indexing with Minimum Adaptive Spatial Segmentation}, booktitle = {Proceedings of VISUAL 1997}, year = {1997}, month = {1997///}, author = {Zhong,S. and David Doermann and Rosenfeld, A.} } @conference {17543, title = {Implementing a performance forecasting system for metacomputing: the Network Weather Service}, booktitle = {Proceedings of the 1997 ACM/IEEE conference on Supercomputing (CDROM)}, series = {Supercomputing {\textquoteright}97}, year = {1997}, month = {1997///}, pages = {1 - 19}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {In this paper we describe the design and implementation of a system called the Network Weather Service (NWS) that takes periodic measurements of deliverable resource performance from distributed networked resources, and uses numerical models to dynamically generate forecasts of future performance levels. These performance forecasts, along with measures of performance fluctuation (e.g. the mean square prediction error) and forecast lifetime that the NWS generates, are made available to schedulers and other resource management mechanisms at runtime so that they may determine the quality-of-service that will be available from each resource.We describe the architecture of the NWS and implementations that we have developed and are currently deploying for the Legion [13] and Globus/Nexus [7] metacomputing infrastructures. We also detail NWS forecasts of resource performance using both the Legion and Globus/Nexus implementations. Our results show that simple forecasting techniques substantially outperform measurements of current conditions (commonly used to gauge resource availability and load) in terms of prediction accuracy. In addition, the techniques we have employed are almost as accurate as substantially more complex modeling methods. We compare our techniques to a sophisticated time-series analysis system in terms of forecasting accuracy and computational complexity.}, isbn = {0-89791-985-8}, doi = {10.1145/509593.509600}, url = {http://doi.acm.org/10.1145/509593.509600}, author = {Wolski,Rich and Spring, Neil and Peterson,Chris} } @article {16706, title = {Implicit object constructions and the (in) transitivity continuum}, journal = {33rd Proceedings of the Chicago Linguistic Society}, year = {1997}, month = {1997///}, pages = {327 - 336}, author = {Olsen,M.B. and Resnik, Philip} } @conference {17607, title = {Improved approximations for edge-disjoint paths, unsplittable flow, and related routing problems}, booktitle = {, 38th Annual Symposium on Foundations of Computer Science, 1997. Proceedings}, year = {1997}, month = {1997/10/20/22}, pages = {416 - 425}, publisher = {IEEE}, organization = {IEEE}, abstract = {We present improved approximation algorithms for a family of problems involving edge-disjoint paths and unsplittable flow, and for some related routing problems. The central theme of all our algorithms is the underlying multi-commodity flow relaxation}, keywords = {Approximation algorithms, Bandwidth, Channel allocation, computational complexity, Computer science, edge-disjoint paths, graph theory, High speed integrated circuits, IEL, Image motion analysis, Information systems, multi-commodity flow relaxation, Multiprocessor interconnection networks, network routing, Optical fiber networks, Routing, routing problems, unsplittable flow}, isbn = {0-8186-8197-7}, doi = {10.1109/SFCS.1997.646130}, author = {Srinivasan, Aravind} } @article {17612, title = {Improved parallel approximation of a class of integer programming problems}, journal = {Algorithmica}, volume = {17}, year = {1997}, month = {1997///}, pages = {449 - 462}, abstract = {We present a method to derandomize RNC algorithms, converting them to NC algorithms. Using it, we show how to approximate a class of NP-hard integer programming problems in NC , to within factors better than the current-best NC algorithms (of Berger and Rompel and Motwani et al. ); in some cases, the approximation factors are as good as the best-known sequential algorithms, due to Raghavan. This class includes problems such as global wire-routing in VLSI gate arrays and a generalization of telephone network planning in SONET rings. Also for a subfamily of the {\textquotedblleft}packing{\textquotedblright} integer programs, we provide the first NC approximation algorithms; this includes problems such as maximum matchings in hypergraphs, and generalizations. The key to the utility of our method is that it involves sums of superpolynomially many terms, which can however be computed in NC ; this superpolynomiality is the bottleneck for some earlier approaches, due to Berger and Rompel and Motwani et al.}, isbn = {0178-4617}, url = {http://dx.doi.org/10.1007/BF02523683}, author = {Alon,N. and Srinivasan, Aravind} } @conference {17614, title = {Improving the discrepancy bound for sparse matrices: better approximations for sparse lattice approximation problems}, booktitle = {Proceedings of the eighth annual ACM-SIAM symposium on Discrete algorithms}, series = {SODA {\textquoteright}97}, year = {1997}, month = {1997///}, pages = {692 - 701}, publisher = {Society for Industrial and Applied Mathematics}, organization = {Society for Industrial and Applied Mathematics}, address = {Philadelphia, PA, USA}, isbn = {0-89871-390-0}, url = {http://dl.acm.org/citation.cfm?id=314161.314418}, author = {Srinivasan, Aravind} } @conference {13578, title = {Integrated Segmentation and Clustering for Enhanced Compression of Document Images}, booktitle = {ICDAR}, year = {1997}, month = {1997///}, pages = {406 - 406}, author = {Kia,Omid E and David Doermann} } @conference {17240, title = {Intelligent software agents vs. user-controlled direct manipulation: a debate}, booktitle = {CHI {\textquoteright}97 extended abstracts on Human factors in computing systems: looking to the future}, series = {CHI EA {\textquoteright}97}, year = {1997}, month = {1997///}, pages = {105 - 106}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Critical issues in human-computer interaction - in particular, the advantages and disadvantages of intelligent agents and direct manipulation - will be discussed, debated, and hotly contested. The intent of the participants is to strike an appropriate balance between a serious discussion of the issues and an entertaining debate.}, keywords = {agents, direct manipulation, graphical representation, intelligent interfaces}, isbn = {0-89791-926-2}, doi = {10.1145/1120212.1120281}, url = {http://doi.acm.org/10.1145/1120212.1120281}, author = {Maes,Pattie and Shneiderman, Ben and Miller,Jim} } @conference {17881, title = {Interoperability of data parallel runtime libraries}, booktitle = {IPPS}, year = {1997}, month = {1997///}, pages = {451 - 451}, author = {Edjlali,G. and Sussman, Alan and Saltz, J.} } @article {15987, title = {Interpreting presuppositions using active logic: From contexts to utterances}, journal = {Computational Intelligence}, volume = {13}, year = {1997}, month = {1997///}, pages = {391 - 413}, author = {Gurney,J. and Perlis, Don and Purang,K.} } @conference {16348, title = {Iteration space slicing and its application to communication optimization}, booktitle = {Proceedings of the 11th international conference on Supercomputing}, year = {1997}, month = {1997///}, pages = {221 - 228}, author = {Pugh, William and Rosser,E.} } @book {14158, title = {Iterative Methods for Linear and Nonlinear Equations.}, volume = {66}, year = {1997}, month = {1997///}, publisher = {JSTOR}, organization = {JSTOR}, author = {Elman, Howard} } @article {14148, title = {Iterative methods for problems in computational fluid dynamics}, journal = {Iterative Methods in Scientific Computing}, year = {1997}, month = {1997///}, pages = {271 - 327}, author = {Elman, Howard and Silvester, D. J and Wathen, A. J} } @article {18703, title = {Identification of the Binding Site for Acidic Phospholipids on the PH Domain of Dynamin: Implications for Stimulation of GTPase Activity}, journal = {Journal of Molecular Biology}, volume = {255}, year = {1996}, month = {1996/01/12/}, pages = {14 - 21}, abstract = {It has recently been suggested that pleckstrin homology (PH) domains bind specifically to phospholipids, with phosphatidylinositol-4,5-bisphosphate (PtdIns(4,5)P2) being most strongly bound. This observation suggests that PH domains may be responsible for membrane association of proteins in which they occur. Further, this membrane association may be regulated by enzymes that modify lipid head groups to which PH domains may bind. We have studied the binding of phospholipids to the PH domain of human dynamin, a 100 kDa GTPase that is involved in the initial stages of endocytosis. We describe a rapid method for screening PH domain/ligand interactions that gives precise binding constants. We confirm that PtdIns(4,5)P2can bind to dynamin PH domain, although not in an aggregated state. Using NMR spectroscopy, we have mapped a specific site on the surface of dynamin PH domain of which binding of gIns(1,4,5)P3(the head-group skeleton of PtdIns(4,5)P2) occurs. The relative affinity of acidic phospholipids for dynamin PH domain correlates with their ability to activate the GTPase of dynamin. We propose, therefore, that the interaction of these phospholipids with dynamin is likely to occurviathe PH domain. Given the fact that PH domains are often found in pro- teins associated with GTPase activity, or in guanine nucleotide exchange factors, we suggest that one role of PH domains may be to couple phosphatidylinositol signalling to GTP hydrolysis.}, keywords = {Fluorescence, ligand binding, NMR, PH domain, phospholipid}, isbn = {0022-2836}, doi = {10.1006/jmbi.1996.0002}, url = {http://www.sciencedirect.com/science/article/pii/S0022283696900029}, author = {Zheng,Jie and Cahill,Sean M. and Lemmon,Mark A. and Fushman, David and Schlessinger,Joseph and Cowburn,David} } @article {17231, title = {Incremental data structures and algorithms for dynamic query interfaces}, journal = {ACM SIGMOD Record}, volume = {25}, year = {1996}, month = {1996/12//}, pages = {21 - 24}, abstract = {Dynamic query interfaces (DQIs) form a recently developed method of database access that provides continuous realtime feedback to the user during the query formulation process. Previous work shows that DQIs are elegant and powerful interfaces to small databases. Unfortunately, when applied to large databases, previous DQI algorithms slow to a crawl. We present a new approach to DQI algorithms that works well with large databases.}, keywords = {algorithm, data structure, database, direct manipulation, dynamic query, Information Visualization, user interface}, isbn = {0163-5808}, doi = {10.1145/245882.245891}, url = {http://doi.acm.org/10.1145/245882.245891}, author = {Tanin,Egemen and Beigel,Richard and Shneiderman, Ben} } @inbook {13005, title = {Inferring phylogenies from DNA sequence data: The effects of sampling}, booktitle = {New Uses for New PhylogeniesNew Uses for New Phylogenies}, year = {1996}, month = {1996///}, pages = {103 - 115}, publisher = {Oxford University Press}, organization = {Oxford University Press}, author = {Otto,S. P and Cummings, Michael P. and Wakeley,J.}, editor = {Harvey,PH and Leigh Brown,AJ and Maynard Smith,J and Nee,S} } @article {16202, title = {Integrated network management of hybrid networks}, journal = {AIP Conference Proceedings}, volume = {361}, year = {1996}, month = {1996/03/01/}, pages = {345 - 350}, abstract = {We describe our collaborative efforts towards the design and implementation of a next generation integrated network management system for hybrid networks (INMS/HN). We describe the overall software architecture of the system at its current stage of development. This network management system is specifically designed to address issues relevant for complex heterogeneous networks consisting of seamlessly interoperable terrestrial and satellite networks. Network management systems are a key element for interoperability in such networks. We describe the integration of configuration management and performance management. The next step in this integration is fault management. In particular we describe the object model, issues of the Graphical User Interface (GUI), browsing tools and performance data graphical widget displays, management information database (MIB) organization issues. Several components of the system are being commercialized by Hughes Network Systems. {\textcopyright} 1996 American Institute of Physics.}, isbn = {0094243X}, doi = {doi:10.1063/1.50028}, url = {http://proceedings.aip.org/resource/2/apcpcs/361/1/345_1?isAuthorized=no}, author = {Baras,John S and Ball,Mike and Karne,Ramesh K and Kelley,Steve and Jang,Kap D and Plaisant, Catherine and Roussopoulos, Nick and Stathatos,Kostas and Vakhutinsky,Andrew and Jaibharat,Valluri and Whitefield,David} } @book {14225, title = {Interaction between 3D Shape and Motion: Theory and Applications}, year = {1996}, month = {1996/06//}, publisher = {Computer Vision Laboratory, University of Maryland}, organization = {Computer Vision Laboratory, University of Maryland}, abstract = {Research during the past few years has attempted to characterize the errors that arise in computing 3D motion (egomotion estimation) and in a scene{\textquoteright}s structure (structure from motion) from a sequence of images acquired by a moving sensor. This paper presents a new geometric framework that characterizes how the three-dimensional space recovered by a moving monocular observer, whose 3D motion is estimated with some error, is distorted. We characterize the space of distortions by its level sets, that is, by a family of iso-distortion surfaces, each of which describes the locus over which the depths of points in the scene are distorted by the same multiplicative factor. By analyzing the geometry of the regions where the distortion factor is negative, that is, where the visibility constraint is violated, we make explicit situations which are likely to give rise to ambiguities in motion estimation. We also apply our approach to a uniqueness analysis for 3D motion analysis from normal flow; we study the constraints on egomotion, object motion and depth for an independently moving object to be detectable by a moving observer; and we offer a quantitative account of the precision needed in an inertial sensor for accurate estimation of 3D motion.}, author = {Ferm{\"u}ller, Cornelia and Aloimonos, J. and Cheong,L.} } @book {13298, title = {Image Analysis and Processing: 8th International Conference, Iciap {\textquoteright}95, San Remo, Italy, September 13-15, 1995 : Proceedings}, year = {1995}, month = {1995/09/28/}, publisher = {Springer}, organization = {Springer}, abstract = {This book presents the proceedings of the 8th International Conference on Image Analysis and Processing, ICIAP {\textquoteright}95, held in Sanremo, Italy in September 1995 under the sponsorship of the International Association of Pattern Recognition IAPR.The volume presents 108 papers selected from more than 180 submissions together with six invited contributions. The papers are written by a total of 265 contributing authors and give a comprehensive state-of-the-art report on all current issues of image analysis and processing. Theoretical aspects are addressed as well as systems design and advanced applications, particularly in medical imaging.}, keywords = {Artificial intelligence, COMPUTER AIDED DESIGN, Computer Graphics, Computer science, Computer vision, Computers / CAD-CAM, Computers / Computer Graphics, Computers / Computer Science, Computers / Computer Vision \& Pattern Recognition, Computers / Image Processing, Computers / Intelligence (AI) \& Semantics, Computers / Optical Data Processing, Computers / Software Development \& Engineering / General, Electronic books, IMAGE PROCESSING, Image processing/ Congresses, Imaging systems, Optical data processing, Optical pattern recognition, software engineering}, isbn = {9783540602989}, author = {Braccini,Carlo and De Floriani, Leila and Vernazza,Gianni} } @article {17222, title = {Image-browser taxonomy and guidelines for designers}, journal = {IEEE Software}, volume = {12}, year = {1995}, month = {1995/03//}, pages = {21 - 32}, abstract = {In many applications users must browse large images. Most designers merely use two one-dimensional scroll bars or ad hoc designs for two-dimensional scroll bars. However, the complexity of two-dimensional browsing suggests that more careful analysis, design, and evaluation might lead to significant improvements. Our exploration of existing 2D browsers has led us to identify many features and a wide variety of tasks performed with the browsers. We introduce an informal specification technique to describe 2D browsers and a task taxonomy, suggest design features and guidelines, and assess existing strategies. We focus on the tools to explore a selected image and so do not cover techniques to browse a series of images or to browse large-image databases}, keywords = {analysis, Computer Graphics, design, designer guidelines, Equations, Europe, Evaluation, Formal specifications, Graphical user interfaces, Guidelines, IMAGE PROCESSING, image-browser taxonomy, informal specification technique, Laboratories, large image browsing, Layout, Road transportation, selected image exploration, SHAPE, Software design, task taxonomy, Taxonomy, tools, two-dimensional browsing, user interface management systems, visual databases}, isbn = {0740-7459}, doi = {10.1109/52.368260}, author = {Plaisant, Catherine and Carr,D. and Shneiderman, Ben} } @conference {15219, title = {Improved approximation algorithms for uniform connectivity problems}, booktitle = {Proceedings of the twenty-seventh annual ACM symposium on Theory of computing}, series = {STOC {\textquoteright}95}, year = {1995}, month = {1995///}, pages = {1 - 10}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-718-9}, doi = {10.1145/225058.225066}, url = {http://doi.acm.org/10.1145/225058.225066}, author = {Khuller, Samir and Raghavachari,Balaji} } @article {17605, title = {Improved approximation guarantees for packing and covering integer programs}, year = {1995}, month = {1995/09//}, abstract = {Several important NP-hard combinatorial optimization problems canbe posed as packing/covering integer programs; the randomized rounding technique of Raghavan \& Thompson is a powerful tool to approximate them well. We present one elementary unifying property of all these in- teger programs (IPs), and use the FKG correlation inequality to derive an improved analysis of randomized rounding on them. This also yields a pessimistic estimator, thus presenting deterministic polynomial-time algo- rithms for them with approximation guarantees signi cantly better than those known. }, author = {Srinivasan, Aravind} } @conference {17608, title = {Improved approximations of packing and covering problems}, booktitle = {Proceedings of the twenty-seventh annual ACM symposium on Theory of computing}, series = {STOC {\textquoteright}95}, year = {1995}, month = {1995///}, pages = {268 - 276}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-718-9}, doi = {10.1145/225058.225138}, url = {http://doi.acm.org/10.1145/225058.225138}, author = {Srinivasan, Aravind} } @article {17416, title = {The info superhighway: for the people}, journal = {Communications of the ACM}, volume = {38}, year = {1995}, month = {1995/01//}, pages = {162{\textendash} - 162{\textendash}}, abstract = {The opportunities are attractive, but some pavers of the Information Superhighway (ISH) are too eager to pour concrete. They risk making rough roads that will alienate the very users they seek. These technologically oriented ISH devotees may be building dramatic overpasses and painting stripes without figuring out where the highway should be going. I believe greater attention should be paid to identifying appropriate services, designing a consistent user interface, and developing a clearer model of the diverse user communities.}, isbn = {0001-0782}, doi = {10.1145/204865.204898}, url = {http://doi.acm.org/10.1145/204865.204898}, author = {Shneiderman, Ben} } @conference {14171, title = {The information in the direction of image flow}, booktitle = {, International Symposium on Computer Vision, 1995. Proceedings}, year = {1995}, month = {1995/11/21/23}, pages = {461 - 466}, publisher = {IEEE}, organization = {IEEE}, abstract = {If instead of the full motion field, we consider only the direction of the motion field due to a rigid motion, what can we say about the information regarding the three-dimensional motion? In this paper it is shown that considering as the imaging surface the whole sphere, independently of the scene in view, two different rigid motions cannot give rise to the same directional motion field. If we restrict the image to half of a sphere (or an infinitely large image plane) two different rigid motions with instantaneous translational and rotational velocities (t1, ω1) and (t2, ω2) cannot give rise to the same directional motion field unless the plane through t1 and t2 is perpendicular to the plane through ω1 and ω2 (i.e., (t1{\texttimes}t2){\textperiodcentered}(ω1 {\texttimes}ω2)=0). In addition, in order to give a practical significance to these uniqueness results for the case of a limited field of view we also characterize the locations on the image where the motion vectors due to the different motions must have different directions. If (ω1{\texttimes}ω2){\textperiodcentered}(t1 {\texttimes}t2)=0 and certain additional constraints are met, then the two rigid motions could produce motion fields with the same direction. For this to happen the depth of each corresponding surface has to be within a certain range, defined by a second and a third order surface}, keywords = {Automation, CAMERAS, Computer vision, Educational institutions, image flow, Image motion analysis, Image sequences, imaging surface, Laboratories, Layout, Motion analysis, Motion estimation, motion field, motion vectors, Optical imaging, rigid motion, rigid motions, three-dimensional motion}, isbn = {0-8186-7190-4}, doi = {10.1109/ISCV.1995.477071}, author = {Brodsky, T. and Ferm{\"u}ller, Cornelia and Aloimonos, J.} } @article {17839, title = {An integrated runtime and compile-time approach for parallelizing structured and block structured applications}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {6}, year = {1995}, month = {1995/07//}, pages = {747 - 754}, abstract = {In compiling applications for distributed memory machines, runtime analysis is required when data to be communicated cannot be determined at compile-time. One such class of applications requiring runtime analysis is block structured codes. These codes employ multiple structured meshes, which may be nested (for multigrid codes) and/or irregularly coupled (called multiblock or irregularly coupled regular mesh problems). In this paper, we present runtime and compile-time analysis for compiling such applications on distributed memory parallel machines in an efficient and machine-independent fashion. We have designed and implemented a runtime library which supports the runtime analysis required. The library is currently implemented on several different systems. We have also developed compiler analysis for determining data access patterns at compile time and inserting calls to the appropriate runtime routines. Our methods can be used by compilers for HPF-like parallel programming languages in compiling codes in which data distribution, loop bounds and/or strides are unknown at compile-time. To demonstrate the efficacy of our approach, we have implemented our compiler analysis in the Fortran 90D/HPF compiler developed at Syracuse University. We have experimented with a multi-bloc Navier-Stokes solver template and a multigrid code. Our experimental results show that our primitives have low runtime communication overheads and the compiler parallelized codes perform within 20\% of the codes parallelized by manually inserting calls to the runtime library}, keywords = {Bandwidth, block structured applications, block structured codes, compile-time approach, compiling applications, data access patterns, Data analysis, Delay, distributed memory machines, distributed memory systems, FORTRAN, Fortran 90D/HPF compiler, High performance computing, HPF-like parallel programming languages, integrated runtime approach, irregularly coupled regular mesh problems, multigrid code, Navier-Stokes solver template, Parallel machines, parallel programming, Pattern analysis, performance evaluation, program compilers, Program processors, Runtime library, Uninterruptible power systems}, isbn = {1045-9219}, doi = {10.1109/71.395403}, author = {Agrawal,G. and Sussman, Alan and Saltz, J.} } @inbook {17241, title = {Interacting with computers}, booktitle = {Human-computer interactionHuman-computer interaction}, year = {1995}, month = {1995///}, pages = {399 - 400}, publisher = {Morgan Kaufmann Publishers Inc.}, organization = {Morgan Kaufmann Publishers Inc.}, address = {San Francisco}, isbn = {1-55860-246-1}, author = {Shneiderman, Ben} } @conference {18750, title = {Interactive feature recognition using multi-processor methods}, year = {1995}, month = {1995///}, author = {Regli,W. C. and Gupta,S.K. and Nau, Dana S.} } @article {16457, title = {Interoperable query processing from object to relational schemas based on a parameterized canonical representation}, journal = {Int. J. Cooperative Inf. Syst.}, volume = {4}, year = {1995}, month = {1995///}, pages = {81 - 120}, author = {Raschid, Louiqa and Chang,Y.} } @conference {12031, title = {Iso-distortion contours and egomotion estimation}, booktitle = {Proceedings of International Symposium on Computer Vision, 1995}, year = {1995}, month = {1995/11/21/23}, pages = {55 - 60}, publisher = {IEEE}, organization = {IEEE}, abstract = {This paper introduces the framework of iso-distortion contour to deal with the problem of depth distortion due to erroneous motion estimates, and various related aspects such as the effectiveness of the visibility constraint. The framework can also be used to inquire the uniqueness aspect of normal flow. Future work will examine the implications of the iso-distortion contours for the problem of multiple frame integration}, keywords = {Automation, Computer vision, Degradation, depth distortion, Educational institutions, egomotion estimation, Equations, erroneous motion estimates, Error analysis, HUMANS, Image sequences, iso-distortion contours, Laboratories, Layout, Motion estimation, Robustness, visibility constraint}, isbn = {0-8186-7190-4}, doi = {10.1109/ISCV.1995.476977}, author = {LoongFah Cheong and Aloimonos, J.} } @article {17688, title = {An Iterative Method for Solving Linear Inequalities}, volume = {CS-TR-1833}, year = {1995}, month = {1995/02/06/}, institution = {Department of Computer Science, University of Maryland, College Park}, abstract = {This paper describes and analyzes a method for finding nontrivialsolutions of the inequality $Ax \geq 0$, where $A$ is an $m \times n$ matrix of rank $n$. The method is based on the observation that a certain function $f$ has a unique minimum if and only if the inequality {\it fails to have} a nontrivial solution. Moreover, if there is a solution, an attempt to minimize $f$ will produce a sequence that will diverge in a direction that converges to a solution of the inequality. The technique can also be used to solve inhomogeneous inequalities and hence linear programming problems, although no claims are made about competitiveness with existing methods. }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/355}, author = {Stewart, G.W.} } @conference {13789, title = {ILustrate: a MT Developers{\textquoteright} Tool with a Two-Component View of the Interlingua}, booktitle = {in Proceedings of the First AMTA Conference. Columbia MD}, year = {1994}, month = {1994///}, pages = {40 - 47}, abstract = {The interlingua (IL) in machine translation (MT) systems can be defined in terms of two components: (i) \"lexical IL forms \" within language-specific lexicons where each lexical entry has associated with it one or more lexical representations, and (ii) algorithms for creating and decomposing the instantiated \"pivot \" representation. Within this framework, we examine five different approaches to the level of representation for the lexical IL forms and then discuss a tool, ILustrate, 2 we are building to develop and evaluate different IL representations coupled with their corresponding translation algorithms. 1}, author = {Dorr, Bonnie J and Voss,Clare R.} } @article {16215, title = {Image Browsers: Taxonomy, Guidelines, and Informal Specifications}, journal = {Institute for Systems Research Technical Reports}, year = {1994}, month = {1994///}, abstract = {Image browsing is necessary in numerous applications. Designers have merely used two one-dimensional scroll bars or they have made ad hoc designs for a two-dimensional scroll bar. However, the complexity of two-dimensional browsing suggests that more careful analysis, design, and evaluation might lead to significant improvements. We present a task taxonomy for image browsing, suggest design features and guidelines, assess existing strategies, and introduce an informal specification}, keywords = {Graphics, Systems Integration, use interface}, url = {http://drum.lib.umd.edu/handle/1903/5591}, author = {Plaisant, Catherine and Carr,David A and Shneiderman, Ben} } @article {14120, title = {Inexact and Preconditioned Uzawa Algorithms for Saddle Point Problems}, journal = {SIAM Journal on Numerical Analysis}, volume = {31}, year = {1994}, month = {1994/12/01/}, pages = {1645 - 1661}, abstract = {Variants of the Uzawa algorithm for solving symmetric indefinite linear systems are developed and analyzed. Each step of this algorithm requires the solution of a symmetric positive-definite system of linear equations. It is shown that if this computation is replaced by an approximate solution produced by an arbitrary iterative method, then with relatively modest requirements on the accuracy of the approximate solution, the resulting inexact Uzawa algorithm is convergent, with a convergence rate close to that of the exact algorithm. In addition, it is shown that preconditioning can be used to improve performance. The analysis is illustrated and supplemented using several examples derived from mixed finite element discretization of the Stokes equations.}, isbn = {0036-1429}, url = {http://www.jstor.org/stable/2158371}, author = {Elman, Howard and Golub, Gene H.} } @article {15407, title = {The input dependent Preisach model with stochastic input as a model for aftereffect}, journal = {Magnetics, IEEE Transactions on}, volume = {30}, year = {1994}, month = {1994/11//}, pages = {4368 - 4370}, abstract = {In a previous work, it has been suggested to use the Preisach model driven by stochastic inputs as a model for aftereffect. Here, we further extend this approach by employing the more accurate nonlinear input dependent Preisach model. The time evolution of the expected value of the output (magnetization) is derived in terms of the stochastic description of the input. It is shown that the mathematical machinery of the ldquo;exit problem rdquo; is instrumental for calculations of time evolutions of the expected value of the output of the nonlinear input dependent Preisach model}, keywords = {aftereffect;input, aftereffect;magnetic, dependent, evolutions;magnetic, hysteresis;, input;time, model;stochastic, Preisach}, isbn = {0018-9464}, doi = {10.1109/20.334090}, author = {Korman,C.E. and Mayergoyz, Issak D} } @article {13577, title = {Instrument Grasp: AModel and its Effects on Handwritten Strokes}, journal = {Pattern Recognition}, volume = {27}, year = {1994}, month = {1994///}, pages = {233 - 245}, author = {David Doermann and Varma,V. and Rosenfeld, A.} } @conference {11924, title = {Integrated approaches for improving the effectiveness of Plan Reuse (A Progress Report)}, booktitle = {ARPA/Rome Laboratory knowledge-based planning and scheduling initiative: workshop proceedings: Tuscon, Arizona, February 21-24, 1994}, year = {1994}, month = {1994///}, pages = {325 - 325}, author = {Kambhampati,S. and Ihrig,L. and Katukam,S. and Chen,J. and Hendler,J. A and Agrawala, Ashok K.} } @article {18853, title = {Integrating DFM with CAD through Design Critiquing}, journal = {Concurrent EngineeringConcurrent Engineering}, volume = {2}, year = {1994}, month = {1994/06/01/}, pages = {85 - 95}, abstract = {The increasing focus on design for manufacturability (DFM) in research in concurrent engineering and engineering design is expanding the scope of traditional design activities in order to identify and eliminate manufacturing problems during the design stage. Manufacturing a product generally involves many different kinds of manufacturing activities, each having different characteristics. A design that is good for one kind of activity may not be good for another; for example, a design that is easy to assemble may not be easy to machine. One obstacle to DFM is the difficulty involved in building a single system that can handle the various manufacturing domains relevant to a design.}, keywords = {design critiquing, design for manufacturability, feature-based modeling, manufacturability analysis, multi-agent coordination.}, isbn = {1063-293X, 1531-2003}, doi = {10.1177/1063293X9400200202}, url = {http://cer.sagepub.com/content/2/2/85}, author = {Gupta, Satyandra K. and Regli,William C. and Nau, Dana S.} } @book {17244, title = {Interacting with virtual environments}, year = {1994}, month = {1994///}, publisher = {John Wiley \& Sons}, organization = {John Wiley \& Sons}, author = {MacDonald,L. and Vince,J. and Shneiderman, Ben} } @conference {17948, title = {Interactive visualization of weighted three-dimensional alpha hulls}, booktitle = {Proceedings of the tenth annual symposium on Computational geometry}, series = {SCG {\textquoteright}94}, year = {1994}, month = {1994///}, pages = {395 - 396}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {An interactive visualization of weighted three-dimensional \&agr;-hulls is presented for static and dynamic spheres. The \&agr;-hull is analytically computed and represented by a triangulated mesh. The entire surface is computed and displayed in real-time at interactive rates. The weighted three-dimensional \&agr;-hulls are equivalent to smooth molecular surfaces of biochemistry. Biochemistry applications of interactive computation and display of \&agr;-hulls or smooth molecular surfaces are outlined.}, isbn = {0-89791-648-4}, doi = {10.1145/177424.178120}, url = {http://doi.acm.org/10.1145/177424.178120}, author = {Varshney, Amitabh and Brooks,Frederick P. and Wright,William V.} } @conference {16629, title = {Interpretation of Doppler blood flow velocity waveforms using neural networks.}, booktitle = {Proceedings of the Annual Symposium on Computer Application in Medical Care}, year = {1994}, month = {1994///}, pages = {865 - 865}, author = {Baykal,N. and Reggia, James A. and Yalabik,N. and Erkmen,A. and Beksac,M. S.} } @article {13802, title = {Introduction: Special issue on building lexicons for machine translation}, journal = {Machine Translation}, volume = {9}, year = {1994}, month = {1994///}, pages = {151 - 153}, isbn = {0922-6567}, url = {http://dx.doi.org/10.1007/BF00980576}, author = {Dorr, Bonnie J and Klavans,Judith} } @article {14159, title = {Iterative methods for linear systems}, journal = {Large-Scale Matrix Problems and the Numerical Solution of Partial Differential Equations}, volume = {3}, year = {1994}, month = {1994///}, pages = {69 - 177}, author = {Elman, Howard} } @conference {13571, title = {Image based typographic analysis of documents}, booktitle = {Document Analysis and Recognition, 1993., Proceedings of the Second International Conference on}, year = {1993}, month = {1993/10//}, pages = {769 - 773}, abstract = {An approach to image based typographic analysis of documents is provided. The problem requires a spatial understanding of the document layout as well as knowledge of the proper syntax. The system performs a page synthesis from the stream of formatting commands defined in a DVI file. Since the two-dimensional relationships between document components are not explicit in the page language, the authors develop a representation which preserves the two-dimensional layout, the read-order and the attributes of document components. From this hierarchical representation of the page layout we extract and analyze relevant typographic features such as margins, line and character spacing, and figure placement}, keywords = {2D, analysis;, attributes;, based, character, commands;, component, data, description, document, DVI, extraction;, feature, figure, file;, formatting, hierarchical, image, language;, languages;, layout;, line, margins;, page, placement;, processing;, read-order;, relationships;, representation;, spacing;, spatial, structures;, syntax;, synthesis;, typographic, understanding;}, doi = {10.1109/ICDAR.1993.395624}, author = {David Doermann and Furuta,R.} } @article {17225, title = {Improving the accuracy of touchscreens: an experimental evaluation of three strategies}, journal = {Sparks of innovation in human-computer interaction}, year = {1993}, month = {1993///}, pages = {161 - 161}, author = {Potter,R.L. and Weldon,L.J. and Shneiderman, Ben} } @book {13793, title = {Information mediation techniques for problem solving with multiple knowledge servers}, year = {1993}, month = {1993///}, publisher = {University of Maryland}, organization = {University of Maryland}, abstract = {This paper describes two important information mediation prob-lems that arise when multiple knowledge and data servers are accessed for problem solving. The rst problem is building an intelligent interface be- tween a knowledge server (KS) and a processor (KP) so that queries may be answered intelligently and completely. The second problem is to provide interoperability among multiple KP/KS pairs so that a query may be an- swered using information from multiple sources. We present example scenar- ios which highlight each of these problems and then outline techniques and tasks that are applied towards obtaining a solution. These techniques draw upon disciplines such as query and transaction processing with knowledge bases and machine understanding and translation of natural language. The techniques for solving the information mediation problems described involve parameterized canonical representations (CR) for the KP/KS pairs. The CR will represent possible mappings between a query and the knowledge model and will be used during query transformation to produce a query which will provide more complete and correct answers. Parameterization will help in merging CRs to support interoperability for distributed problem solving. }, author = {Dorr, Bonnie J and Raschid, Louiqa} } @article {13799, title = {Interlingual machine translation A parameterized approach}, journal = {Artificial Intelligence}, volume = {63}, year = {1993}, month = {1993/10//}, pages = {429 - 492}, abstract = {The task of designing a machine translation system is difficult because, in order to achieve any degree of accuracy, such systems must capture language-independent information while still systematically processing many types of language-specific phenomena in each of the individual languages. This paper provides a catalog of certain types of distinctions among Spanish, English, and German, and describes a parameterized interlingual approach that characterizes these distinctions, both at the syntactic level and at the lexical-semantic level. The parameter-setting approach to machine translation is desirable because it simplifies the descriptions of natural grammars, facilitates the task of modifying and augmenting the system, accounts for cross-linguistic variation uniformly, and provides a more constrained theory of processing. The approach described here is implemented in a system called UNITRAN, an interlingual machine translation system that translates English, Spanish, and German bidirectionally.}, isbn = {0004-3702}, doi = {10.1016/0004-3702(93)90023-5}, url = {http://www.sciencedirect.com/science/article/pii/0004370293900235}, author = {Dorr, Bonnie J} } @conference {13800, title = {Interoperable query processing with multiple heterogeneous knowledge servers}, booktitle = {Proceedings of the second international conference on Information and knowledge management}, series = {CIKM {\textquoteright}93}, year = {1993}, month = {1993///}, pages = {461 - 470}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-626-3}, doi = {10.1145/170088.170395}, url = {http://doi.acm.org/10.1145/170088.170395}, author = {Raschid, Louiqa and Chang,Yahui and Dorr, Bonnie J} } @conference {13677, title = {The Interpretation and Recognition of Interfering Strokes}, booktitle = {IWFHR}, year = {1993}, month = {1993///}, pages = {41 - 50}, author = {David Doermann and Rosenfeld, A.} } @article {18854, title = {Interpreting Product Designs for Manufacturability Evaluation}, volume = {ISR; TR 1993-45}, year = {1993}, month = {1993///}, institution = {Institute for Systems Research, University of Maryland, College Park}, abstract = {The ability to quickly introduce new quality products is a decisive factor in capturing market share. Because of pressing demands to reduce lead time, analyzing the manufacturability of the proposed design has become an important step in the design stage. In this paper we present an approach for evaluating the manufacturability of machined parts.

Evaluating manufacturability involves finding a way to manufacture the proposed design, and estimating the associated production cost and quality. However, there often can be several different ways to manufacture a proposed design - so to evaluate the manufacturability of the proposed design, we need to consider different ways to manufacture it, and determine which one best meets the manufacturing objectives.

In this paper we describe a methodology for systematically generating and evaluating alternative operation plans. As a first step, we identify all machining operations which can potentially be used to create the given design. Using these operations, we generate different operation plans for machining the part. Each time we generate a new operation plan, we assign it a manufacturability rating. The manufacturability rating for the design is the rating of the best operation plan.

We anticipate that by providing feedback about possible problems with the design, this work will be useful in providing a way to speed up the evaluation of new product designs in order to decide how or whether to manufacture them.}, keywords = {Automation, computer aided manufacturing, manufacturability, Manufacturing Systems}, url = {http://drum.lib.umd.edu//handle/1903/5390}, author = {Gupta, Satyandra K. and Nau, Dana S. and Zhang,G. M.} } @article {17267, title = {Investigating touchscreen typing: the effect of keyboard size on typing speed}, journal = {Behaviour \& Information Technology}, volume = {12}, year = {1993}, month = {1993///}, pages = {17 - 22}, abstract = {Abstract Two studies investigated the effect keyboard size has on typing speed and error rates for touchscreen keyboards using the lift-off strategy. A cursor appeared when users touched the screen and a key was selected when they lifted their finger from the screen. Four keyboard sizes were investigated ranging from 24.6 cm to 6.8 cm wide. Results indicate that novices can type approximately 10 words per minute (WPM) on the smallest keyboard and 20 WPM on the largest. Experienced users improved to 21 WPM on the smallest keyboard and 32 WPM on the largest. These results indicate that, although slower, small touchscreen keyboards can be used for limited data entry when the presence of a regular keyboard is not practical. Applications include portable pocket-sized or palmtop computers, messaging systems, and personal information resources. Results also suggest the increased importance of experience on these smaller keyboards. Research directions are suggested.Abstract Two studies investigated the effect keyboard size has on typing speed and error rates for touchscreen keyboards using the lift-off strategy. A cursor appeared when users touched the screen and a key was selected when they lifted their finger from the screen. Four keyboard sizes were investigated ranging from 24.6 cm to 6.8 cm wide. Results indicate that novices can type approximately 10 words per minute (WPM) on the smallest keyboard and 20 WPM on the largest. Experienced users improved to 21 WPM on the smallest keyboard and 32 WPM on the largest. These results indicate that, although slower, small touchscreen keyboards can be used for limited data entry when the presence of a regular keyboard is not practical. Applications include portable pocket-sized or palmtop computers, messaging systems, and personal information resources. Results also suggest the increased importance of experience on these smaller keyboards. Research directions are suggested. }, isbn = {0144-929X}, doi = {10.1080/01449299308924362}, url = {http://www.tandfonline.com/doi/abs/10.1080/01449299308924362}, author = {Sears,Andrew and REVIS,DOREEN and SWATSKI,JANET and CRITTENDEN,ROB and Shneiderman, Ben} } @inbook {15764, title = {Iterative Methods for Finding the Stationary Vector for Markov Chains}, booktitle = {Linear Algebra, Markov Chains, and Queuing ModelsLinear Algebra, Markov Chains, and Queuing Models}, volume = {48}, year = {1993}, month = {1993///}, pages = {125 - 136}, publisher = {Springer-Verlag IMA Volumes in Math. and Its Applics.}, organization = {Springer-Verlag IMA Volumes in Math. and Its Applics.}, address = {New York}, author = {O{\textquoteright}Leary, Dianne P.}, editor = {Meyer,Carl and Plemmons,Robert} } @conference {18373, title = {An improved classification tree analysis of high cost modules based upon an axiomatic definition of complexity}, booktitle = {Software Reliability Engineering, 1992. Proceedings., Third International Symposium on}, year = {1992}, month = {1992/10//}, pages = {164 - 172}, abstract = {Identification of high cost modules has been viewed as one mechanism to improve overall system reliability, since such modules tend to produce more than their fair share of problems. A decision tree model has previously been used to identify such modules. In this paper, a previously developed axiomatic model of program complexity is merged with the previously developed decision tree process for an improvement in the ability to identify such modules. This improvement has been tested using data from the NASA Software Engineering Laboratory}, keywords = {(mathematics);, analysis;, axiomatic, classification, classification;, complexity;, computational, cost, decision, definition;, high, metrics;, model;, modules;, overall, program, reliability;, software, subroutines;, system, tree, TREES}, doi = {10.1109/ISSRE.1992.285848}, author = {Tian,Jianhui and Porter, Adam and Zelkowitz, Marvin V} } @conference {17611, title = {Improved distributed algorithms for coloring and network decomposition problems}, booktitle = {Proceedings of the twenty-fourth annual ACM symposium on Theory of computing}, series = {STOC {\textquoteright}92}, year = {1992}, month = {1992///}, pages = {581 - 592}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-511-9}, doi = {10.1145/129712.129769}, url = {http://doi.acm.org/10.1145/129712.129769}, author = {Panconesi,Alessandro and Srinivasan, Aravind} } @article {15622, title = {Incremental construction and dynamic maintenance of constrained Delaunay triangulations}, journal = {Proceedings of the 4th Canadian Conference on Computational Geometry}, year = {1992}, month = {1992///}, pages = {170 - 175}, author = {Kao,T.C. and Mount, Dave} } @article {16727, title = {Incrementally maintained network{\textrightarrow}relational database mapping}, journal = {Software: Practice and Experience}, volume = {22}, year = {1992}, month = {1992/12/01/}, pages = {1099 - 1131}, abstract = {An incrementally maintained mapping from a network to a relational database is presented. This mapping may be established either to support the efficient retrieval of data from a network database through a relational interface, or as the first step in a gradual conversion of data and applications from a network to a relational database system. After the mapping has been established, the only data mapped from the network to the relational database are the increments resulting from updates on the network database. The mapping is therefore an efficient alternative to mappings that repeatedly map the results of retrievals through the relational interface from the network database to the relational database. This is in particular the case when the two databases reside on different hosts. Applications on the network database may, under certain restrictions, gradually be moved to the relational database, while the mapping incrementally maintains the relational database for the applications that remain on the network database.}, keywords = {Database conversion, Database gateway, Database mapping, Incremental database mapping}, isbn = {1097-024X}, doi = {10.1002/spe.4380221205}, url = {http://onlinelibrary.wiley.com/doi/10.1002/spe.4380221205/abstract}, author = {Mark,Leo and Roussopoulos, Nick and Newsome,Tina and Laohapipattana,Piriya} } @article {15206, title = {On independent spanning trees}, journal = {Information Processing Letters}, volume = {42}, year = {1992}, month = {1992/07/24/}, pages = {321 - 323}, abstract = {We prove that if any k-vertex connected graph has k vertex independent spanning trees, then any k-edge connected graph has k edge independent spanning trees. Thus, answering a question raised by Zehavi and Itai [J. Graph Theory 13 (1989)] in the affirmative.}, keywords = {combinatorial problems, edge connectivity, spanning trees, vertex connectivity}, isbn = {0020-0190}, doi = {10.1016/0020-0190(92)90230-S}, url = {http://www.sciencedirect.com/science/article/pii/002001909290230S}, author = {Khuller, Samir and Schieber,Baruch} } @article {13576, title = {Instrument Grasp: AModel and its Effects on Handwritten Strokes}, volume = {CAR-TR-614}, year = {1992}, month = {1992///}, institution = {University of Maryland, College Park}, author = {David Doermann and Varma,V. and Rosenfeld, A.} } @conference {15626, title = {Intersection detection and separators for simple polygons}, booktitle = {Proceedings of the eighth annual symposium on Computational geometry}, series = {SCG {\textquoteright}92}, year = {1992}, month = {1992///}, pages = {303 - 311}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {A simple algorithm is presented for detecting whether two preprocessed simple polygons intersect one another. Given a simple polygon, A, in O(n log n) time and O(n) space we preprocess A constructing an enveloping triangulation called a scaffold. To determine whether two preprocessed polygons A and B overlap another, we start with these two envelopes and successively strip away overlapping triangles of the scaffolds until we either detect an intersection between the objects or until we have succeeded in separating them spatially. The running time of the intersection query depends on the complexity of the minimum link polygonal curve separating the two objects. Given two preprocessed simple polygons A and B, placed at arbitrary locations in the plane we can determine whether these polygons intersect one another in O(m log2n is the total number of vertices and m is the complexity of a minimum link polygonal curve separating A from B. We generalize this to the problem of computing arbitrary Boolean functions of two preprocessed polygons.}, isbn = {0-89791-517-8}, doi = {10.1145/142675.142737}, url = {http://doi.acm.org/10.1145/142675.142737}, author = {Mount, Dave} } @book {14942, title = {An introduction to parallel algorithms}, year = {1992}, month = {1992///}, publisher = {Addison Wesley Longman Publishing Co., Inc.}, organization = {Addison Wesley Longman Publishing Co., Inc.}, address = {Redwood City, CA, USA}, isbn = {0-201-54856-9}, author = {JaJa, Joseph F.} } @book {14978, title = {Introduction to Parallel Computing}, year = {1992}, month = {1992///}, publisher = {Addison-Wesley Publishing Co}, organization = {Addison-Wesley Publishing Co}, author = {JaJa, Joseph F.} } @conference {17218, title = {Identifying aggregates in hypertext structures}, booktitle = {Proceedings of the third annual ACM conference on Hypertext}, series = {HYPERTEXT {\textquoteright}91}, year = {1991}, month = {1991///}, pages = {63 - 74}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-461-9}, doi = {10.1145/122974.122981}, url = {http://doi.acm.org/10.1145/122974.122981}, author = {Botafogo,Rodrigo A. and Shneiderman, Ben} } @article {16764, title = {Incremental implementation model for relational databases with transaction time}, journal = {IEEE Transactions on Knowledge and Data Engineering}, volume = {3}, year = {1991}, month = {1991/12//}, pages = {461 - 473}, abstract = {An implementation model for the standard relational data model extended with transaction time is presented. The implementation model integrates techniques of view materialization, differential computation, and deferred update into a coherent whole. It is capable of storing any view (reflecting past or present states) and subsequently using stored views as outsets for incremental and decremental computations of requested views, making it more flexible than previously proposed partitioned storage models. The working and the expressiveness of the model are demonstrated by sample queries that show how historical data are retrieved}, keywords = {Computational modeling, Computer science, Data models, data retrieval, Database languages, Database systems, database theory, decremental computations, deferred update, Degradation, differential computation, historical data, History, incremental computations, Information retrieval, partitioned storage models, queries, relational data model, Relational databases, stored views, Transaction databases, transaction time, view materialization}, isbn = {1041-4347}, doi = {10.1109/69.109107}, author = {Jensen,C. S and Mark,L. and Roussopoulos, Nick} } @conference {14769, title = {The integration of application and system based metrics in a parallel program performance tool}, booktitle = {ACM Sigplan Notices}, volume = {26}, year = {1991}, month = {1991///}, pages = {189 - 200}, author = {Hollingsworth, Jeffrey K and Irvin, R. B and Miller, B. P} } @article {14141, title = {Iterative methods for cyclically reduced non-self-adjoint linear systems II}, journal = {Math. Comp}, volume = {56}, year = {1991}, month = {1991///}, pages = {215 - 242}, author = {Elman, Howard and Golub, G. H} } @article {11988, title = {Image motion estimation by clustering}, journal = {International Journal of Imaging Systems and Technology}, volume = {2}, year = {1990}, month = {1990/12/01/}, pages = {345 - 355}, abstract = {Image motion is estimated by matching feature {\textquotedblleft}interest{\textquotedblright} points in different frames of video image sequences. The matching is based on local similarity of the displacement vectors. Clustering in the displacement vector space is used to determine the set of plausible match vectors. Subsequently, a similarity-based algorithm performs the actual matching. The feature points are computed using a multiple-filter image decomposition operator. The algorithm has been tested on synthetic as well as real video images. The novelty of the approach is that it handles multiple motions and performs motion segmentation.}, isbn = {1098-1098}, doi = {10.1002/ima.1850020409}, url = {http://onlinelibrary.wiley.com/doi/10.1002/ima.1850020409/abstract}, author = {Bandopadhay, Amit and Aloimonos, J.} } @book {16563, title = {Inductive inference model for diagnostic problem-solving}, year = {1990}, month = {1990///}, publisher = {Springer-Verlag}, organization = {Springer-Verlag}, author = {Peng,Y. and Reggia, James A.} } @article {16730, title = {Information interchange between self-describing databases}, journal = {Information Systems}, volume = {15}, year = {1990}, month = {1990///}, pages = {393 - 400}, abstract = {Within the framework of a self-describing database system we describe a set of data management tools and a data dictionary supporting information interchange. The concepts are based on our experience from a project on standardized information interchange in NASA.}, isbn = {0306-4379}, doi = {16/0306-4379(90)90043-O}, url = {http://www.sciencedirect.com/science/article/pii/030643799090043O}, author = {Mark,Leo and Roussopoulos, Nick} } @article {16021, title = {Intentionality and defaults}, journal = {International J. of Expert Systems}, volume = {3}, year = {1990}, month = {1990///}, pages = {345 - 354}, author = {Perlis, Don} } @article {16783, title = {Interoperability of multiple autonomous databases}, journal = {ACM Computing SurveysACM Comput. Surv.}, volume = {22}, year = {1990}, month = {1990/09//}, pages = {267 - 293}, isbn = {03600300}, doi = {10.1145/96602.96608}, url = {http://dl.acm.org/citation.cfm?id=96608}, author = {Litwin,Witold and Mark,Leo and Roussopoulos, Nick} } @article {15388, title = {Investigation of the threshold voltage of MOSFETs with position and potential-dependent interface trap distributions using a fixed-point iteration method}, journal = {Electron Devices, IEEE Transactions on}, volume = {37}, year = {1990}, month = {1990/04//}, pages = {1031 - 1038}, abstract = {Simulation results are presented for a MOSFET with position- and energy- (potential-) dependent interface trap distributions that may be typical for devices subjected to interface-trap-producing processes such as hot-electron degradation. The interface-trap distribution is modeled as a Gaussian peak at a given position along the channel, and the energy dependence is derived from C-V measurements from an MOS capacitor exposed to ionizing radiation. A novel fixed-point technique is used to solve the two-dimensional boundary-value problem. The technique is shown to be globally convergent for arbitrary distributions of interface traps. A comparison of the convergence properties of the Newton and fixed-point methods is presented, and it is shown that for some important cases the Newton technique fails to converge while the fixed-point technique converges with a geometric convergence rate}, keywords = {boundary-value, C-V, carriers;insulated, characteristics;Gaussian, Convergence, convergence;hot-electron, degradation;position, dependence;fixed-point, dependent, device, distribution;potential-dependent, distributions;simulation;threshold, effect, electron, field, gate, interface, iteration, method;fixed-point, methods;hot, methods;semiconductor, models;, numerical, of, peak;MOSFET;energy, problem;boundary-value, problems;convergence, rate;global, states;iterative, technique;geometric, transistors;interface, trap, voltage;two-dimensional}, isbn = {0018-9383}, doi = {10.1109/16.52438}, author = {Gaitan,M. and Mayergoyz, Issak D and Korman,C.E.} } @article {14791, title = {IPS-2: the second generation of a parallel program measurement system}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {1}, year = {1990}, month = {1990/04//}, pages = {206 - 217}, abstract = {IPS, a performance measurement system for parallel and distributed programs, is currently running on its second implementation. IPS{\textquoteright}s model of parallel programs uses knowledge about the semantics of a program{\textquoteright}s structure to provide two important features. First, IPS provides a large amount of performance data about the execution of a parallel program, and this information is organized so that access to it is easy and intuitive. Secondly, IPS provides performance analysis techniques that help to guide the programmer automatically to the location of program bottlenecks. The first implementation of IPS was a testbed for the basic design concepts, providing experience with a hierarchical program and measurement model, interactive program analysis, and automatic guidance techniques. It was built on the Charlotte distributed operating system. The second implementation, IPS-2, extends the basic system with new instrumentation techniques, an interactive and graphical user interface, and new automatic guidance analysis techniques. This implementation runs on 4.3BSD UNIX systems, on the VAX, DECstation, Sun 4, and Sequent Symmetry multiprocessor}, keywords = {4.3BSD UNIX systems, automatic guidance techniques, Automatic testing, Charlotte distributed operating system, CPA, DECstation, design concepts, distributed programs, graphical user interface, Graphical user interfaces, Instruments, interactive program analysis, IPS-2, measurement, message systems, network operating systems, Operating systems, parallel program measurement system, parallel programming, parallel programs, Performance analysis, performance analysis techniques, performance evaluation, performance measurement system, Power system modeling, program bottlenecks, program diagnostics, Programming profession, semantics, Sequent Symmetry multiprocessor, shared-memory systems, software tools, Springs, Sun, Sun 4, Unix, VAX}, isbn = {1045-9219}, doi = {10.1109/71.80132}, author = {Miller, B. P and Clark, M. and Hollingsworth, Jeffrey K and Kierstead, S. and Lim,S. -S and Torzewski, T.} } @article {14160, title = {Iterative methods for cyclically reduced non-self-adjoint linear systems}, journal = {Math. Comp}, volume = {54}, year = {1990}, month = {1990///}, pages = {671 - 700}, author = {Elman, Howard and Golub, G. H} } @conference {16382, title = {Incremental computation via function caching}, booktitle = {Proceedings of the 16th ACM SIGPLAN-SIGACT symposium on Principles of programming languages}, year = {1989}, month = {1989///}, pages = {315 - 328}, author = {Pugh, William and Teitelbaum,T.} } @article {17253, title = {Interactive graphics interfaces in hypertext systems}, journal = {Proc. 28th Annual ACM DC Technical Symposium}, volume = {23}, year = {1989}, month = {1989///}, pages = {28 - 28}, abstract = {One of the key design aspects of hypertext systems is the rapid selection of items displayed on the screen in a direct manipulation manner. The user can select next or back page turning icons or buttons, or select another item that might jump to a remote destination. Authors and browsers are confronted with the problem of recognizing and selecting these choices in textual and graphic databases. This paper discusses the problems and offers a variety of solutions.}, author = {Weiland,W. and Shneiderman, Ben} } @conference {16345, title = {An improved replacement strategy for function caching}, booktitle = {Proceedings of the 1988 ACM conference on LISP and functional programming - LFP {\textquoteright}88}, year = {1988}, month = {1988///}, pages = {269 - 276}, address = {Snowbird, Utah, United States}, doi = {10.1145/62678.62719}, url = {http://dl.acm.org/citation.cfm?id=62678.62719}, author = {Pugh, William} } @conference {14975, title = {Input sensitive VLSI layouts for graphs of arbitrary degree}, booktitle = {VLSI Algorithms and Architectures}, year = {1988}, month = {1988///}, pages = {268 - 277}, abstract = {A general method to find area-efficient VLSI layouts of graphs of arbitrary degree is presented. For graphs of maximum degree Δ, the layouts obtained are smaller by a factor of Δ2 than those obtained using existing methods.Optimal planar layouts, and near-optimal nonplanar layouts are also derived for planar graphs of arbitrary degree and gauge. The results span the spectrum between outerplanar graphs (gauge 1), and arbitrary planar graphs (gauge O(n)). Optimality is established by developing families of planar graphs of varying gauge and degree, and proving lower bounds on their layout area. These techniques can be combined to exhibit a trade-off between area and the number of contact cuts. The resulting scheme is sensitive to all three parameters that affect the area: the maximum degree, the gauge, and the number of contact cuts. }, doi = {10.1007/BFb0040394}, author = {Sherlekar,D. and JaJa, Joseph F.} } @article {15961, title = {Intentionality as internality}, journal = {Behavioral and Brain Sciences}, volume = {9}, year = {1986}, month = {1986///}, pages = {151 - 152}, author = {Perlis, Don and Hall,R.} } @article {17261, title = {Interfaces: multi-media and multi-user}, journal = {Computer Supported Cooperative Work: Proceedings of the 1986 ACM conference on Computer-supported cooperative work}, year = {1986}, month = {1986///}, author = {Shneiderman, Ben} } @conference {18110, title = {Introducing efficient parallelism into approximate string matching and a new serial algorithm}, booktitle = {Proceedings of the eighteenth annual ACM symposium on Theory of computing}, year = {1986}, month = {1986///}, pages = {220 - 230}, author = {Landau,G. M and Vishkin, Uzi} } @article {14972, title = {Information Transfer in Distributed Computing with Applications to VLSI}, journal = {Journal of the ACM (JACM)}, volume = {31}, year = {1984}, month = {1984/01//}, pages = {150 - 162}, isbn = {0004-5411}, doi = {10.1145/2422.322421}, url = {http://doi.acm.org/10.1145/2422.322421}, author = {JaJa, Joseph F. and Prasanna Kumar,V. K.} } @conference {16801, title = {An introduction to PSQL: A pictorial structured query language}, booktitle = {Proc. IEEE Workshop on Visual Languages}, year = {1984}, month = {1984///}, pages = {77 - 87}, author = {Roussopoulos, Nick and Leifker,D.} } @article {18135, title = {Implementation of simultaneous memory address access in models that forbid it}, journal = {Journal of algorithms}, volume = {4}, year = {1983}, month = {1983///}, pages = {45 - 50}, author = {Vishkin, Uzi} } @article {15759, title = {Image Smoothing and Segmentation by Cost Minimization}, journal = {IEEE Transactions on Systems, Man, and Cybernetics}, volume = {SMC-12}, year = {1982}, month = {1982///}, pages = {91 - 96}, author = {Narayanan,K. A. and O{\textquoteright}Leary, Dianne P. and Rosenfeld,Azriel} } @conference {15633, title = {Isomorphism of graphs with bounded eigenvalue multiplicity}, booktitle = {Proceedings of the fourteenth annual ACM symposium on Theory of computing}, series = {STOC {\textquoteright}82}, year = {1982}, month = {1982///}, pages = {310 - 324}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We investigate the connection between the spectrum of a graph, i.e. the eigenvalues of the adjacency matrix, and the complexity of testing isomorphism. In particular we describe two polynomial time algorithms which test isomorphism of undirected graphs whose eigenvalues have bounded multiplicity. If X and Y are graphs of eigenvalue multiplicity m, then the isomorphism of X and Y can be tested by an O(n4m+c) deterministic and by an O(n2m+c) Las Vegas algorithm, where n is the number of vertices of X and Y.}, isbn = {0-89791-070-2}, doi = {10.1145/800070.802206}, url = {http://doi.acm.org/10.1145/800070.802206}, author = {Babai,L{\'a}szl{\'o} and Grigoryev,D. Yu. and Mount, Dave} } @article {14126, title = {Iterative methods for large, sparse, nonsymmetric systems of linear equations.}, journal = {Dissertation Abstracts International Part B: Science and Engineering[DISS. ABST. INT. PT. B- SCI. \& ENG.],}, volume = {43}, year = {1982}, month = {1982///}, pages = {1982 - 1982}, author = {Elman, Howard} } @article {18397, title = {Implementation of language enhancements}, journal = {Computer Languages}, volume = {6}, year = {1981}, month = {1981///}, pages = {139 - 153}, abstract = {Data abstractions have been proposed as a means to enhance program modularity. The implementation of such new features to an existing language is typically handled by either rewriting large portions of an existing compiler or by using a preprocessor to translate the extensions into the standard language. The first technique is expensive to implement while the latter is usually slow and clumsy to use. In this paper a data abstraction addition to PL 1 is described and a hybrid implementation is given. A minimal set of primitive features are added to the compiler and the other extensions are added via an internal macro processor that expands the new syntax into the existing language.}, keywords = {Compiler design, Data abstraction, Language extensions, specifications}, isbn = {0096-0551}, doi = {10.1016/0096-0551(81)90026-6}, url = {http://www.sciencedirect.com/science/article/pii/0096055181900266}, author = {Zelkowitz, Marvin V and Lyle,James R.} } @article {18396, title = {Implementation of a Capability-Based Data Abstraction}, journal = {Software Engineering, IEEE Transactions on}, volume = {SE-4}, year = {1978}, month = {1978/01//}, pages = {56 - 64}, abstract = {One important feature in programming language design is an appropriate data definitional facility. Criteria now recognized as important are the concepts of information hiding and data abstraction. The problem, however, is to embed these ideas into languages. Although including these ideas has often led to the design of a new language, that is not always necessary. Such facilities may be added to languages like PL/1 or Pascal. This report discusses the inclusion of such facilities within one such PL/1 compiler. While the resulting system does not have the optimal set of protection features, it does have several advantages: the base language is known to a large class of programmers, there are many such compilers already written, and the system achieves almost as much protection as is needed.}, keywords = {Data abstraction; Pascal, PL/1; infonnation hiding; pointer variables;}, isbn = {0098-5589}, doi = {10.1109/TSE.1978.231466}, author = {Zelkowitz, Marvin V and Larsen,H.J.} } @article {17226, title = {Improving the human factors aspect of database interactions}, journal = {ACM Trans. Database Syst.}, volume = {3}, year = {1978}, month = {1978/12//}, pages = {417 - 439}, abstract = {The widespread dissemination of computer and information systems to nontechnically trained individuals requires a new approach to the design and development of database interfaces. This paper provides the motivational background for controlled psychological experimentation in exploring the person/machine interface. Frameworks for the reductionist approach are given, research methods discussed, research issues presented, and a small experiment is offered as an example of what can be accomplished. This experiment is a comparison of natural and artificial language query facilities. Although subjects posed approximately equal numbers of valid queries with either facility, natural language users made significantly more invalid queries which could not be answered from the database that was described.}, keywords = {Data models, Database systems, experimentation, human factors, natural language interfaces, Psychology, query languages}, isbn = {0362-5915}, doi = {10.1145/320289.320295}, url = {http://doi.acm.org/10.1145/320289.320295}, author = {Shneiderman, Ben} } @article {17232, title = {Information policy issues: selecting a policy framework and defining the schema horizon}, journal = {Information \& Management}, volume = {1}, year = {1978}, month = {1978///}, pages = {207 - 218}, abstract = {Technical advances in database management systems are rebalancing organizational structures as management seeks to accommodate these powerful new tools. Managers must participate in establishing a framework for policy decisions and in setting goals for database usage. First, responsibility and authority must be delegated to management and not technical personnel. Second, the bounds of the application, called the schema horizon, should be carefully considered. This paper covers ten information policy issues within these two caregories and attempts to clarify management tasks.}, keywords = {conceptual schema, data independence, data mode theory, data submodels, database administrator, Database management, decision making, extended set theory, external schema, functional model, internal schema, management information systems, organizational behavior, organizational design, schema}, isbn = {0378-7206}, doi = {10.1016/0378-7206(78)90027-7}, url = {http://www.sciencedirect.com/science/article/pii/0378720678900277}, author = {Shneiderman, Ben} } @article {16973, title = {An internship in information systems: Combining computer science education with realistic problems}, journal = {SIGCSE Bull.}, volume = {8}, year = {1976}, month = {1976/07//}, pages = {80 - 83}, abstract = {Computer science graduates who become professional programmers will have a direct and substantial influence on the impact of applications, but little in traditional computer science training curriculum prepares them for this serious responsibility. Recognizing this situation, we designed a two term sequence for advanced undergraduates and masters students which would not only provide them with the required academic knowledge. The educational atmosphere that we tried to create resembles the internship phase followed in teacher training, medical schools, law schools, clinical psychology and other disciplines.}, isbn = {0097-8418}, doi = {10.1145/952991.804761}, url = {http://doi.acm.org/10.1145/952991.804761}, author = {Buck,John and Shneiderman, Ben} } @article {18398, title = {Interactive PL/1}, journal = {SIGPLAN Not.}, volume = {9}, year = {1974}, month = {1974/09//}, pages = {29 - 32}, abstract = {An investigation of the PL/1 language in an interactive environment has lead to the discovery of several deficiencies in the language when used as a pedagogical tool. How these deficiencies were eliminated (or avoided) in one particular implementation is discussed.}, isbn = {0362-1340}, doi = {10.1145/953214.953218}, url = {http://doi.acm.org/10.1145/953214.953218}, author = {Zelkowitz, Marvin V} } @article {18399, title = {It is not time to define "structured programming"}, journal = {SIGOPS Oper. Syst. Rev.}, volume = {8}, year = {1974}, month = {1974/04//}, pages = {7 - 8}, isbn = {0163-5980}, doi = {10.1145/775271.775272}, url = {http://doi.acm.org/10.1145/775271.775272}, author = {Zelkowitz, Marvin V} }