@article {19559, title = {System and Method for Optimal Verification of Operations on Dynamic Sets}, year = {Submitted}, abstract = {A system and method for cryptographically checking the correctness of outsourced set operations performed by an untrusted server over a dynamic collection of sets that are owned (and updated) by a trusted source is disclosed. The system and method provides new authentication mechanisms that allow any entity to publicly verify a proof attesting the correctness of primitive set operations such as intersection, union, subset and set difference. Based on a novel extension of the security properties of bilinear-map accumulators as well as on a primitive called accumulation tree, the system and method achieves optimal verification and proof complexity, as well as optimal update complexity, while incurring no extra asymptotic space overhead. The method provides an efficient proof construction, adding a logarithmic overhead to the computation of the answer of a set-operation query. Applications of interest include efficient verification of keyword search and database queries.}, url = {http://www.google.com/patents?id=mrcCAgAAEBAJ}, author = {Charalampos Papamanthou and Tamassia, Roberto and Triandopoulos, Nikolaos} } @article {20548, title = {Rapid, quantitative therapeutic screening for Alzheimer{\textquoteright}s enzymes enabled by optimal signal transduction with transistors}, journal = {The Analyst}, volume = {145}, year = {2020}, month = {Feb-04-2021}, pages = {2925 - 2936}, issn = {0003-2654}, doi = {10.1039/C9AN01804B}, url = {http://xlink.rsc.org/?DOI=C9AN01804Bhttp://pubs.rsc.org/en/content/articlepdf/2020/AN/C9AN01804Bhttp://pubs.rsc.org/en/content/articlepdf/2020/AN/C9AN01804B}, author = {Le, Son T. and Morris, Michelle A. and Cardone, Antonio and Guros, Nicholas B. and Klauda, Jeffery B. and Sperling, Brent A. and Richter, Curt A. and Pant, Harish C. and Balijepalli, Arvind} } @article {20391, title = {Metagenome sequencing-based strain-level and functional characterization of supragingival microbiome associated with dental caries in children}, journal = {Journal of Oral Microbiology}, year = {2019}, month = {12/2018}, pages = {1557986}, abstract = {Studies of the microbiome associated with dental caries have largely relied on 16S rRNA sequence analysis, which is associated with PCR biases, low taxonomic resolution, and inability to accurately study functions. Here, we employed whole metagenome shotgun sequencing, coupled with high-resolution analysis algorithm, to analyze supragingival microbiomes from 30 children with or without dental caries. A total of 726 bacterial strains belonging to 406 species, in addition to 34 bacteriophages were identified. A core bacteriome was identified at the species and strain levels. Species of Prevotella, Veillonella, as yet unnamed Actinomyces, and Atopobium showed strongest association with caries; Streptococcus sp. AS14 and Leptotrichia sp. Oral taxon 225, among others, were overabundant in caries-free. For several species, the association was strain-specific. Furthermore, for some species, e.g. Streptococcus mitis and Streptococcus sanguinis, sister strains showed differential associations. Noteworthy, associations were also identified for phages: Streptococcus phage M102 with caries and Haemophilus phage HP1 with caries-free. Functionally, potentially relevant features were identified including urate, vitamin K2, and polyamine biosynthesis in association with caries; and three deiminases and lactate dehydrogenase with health. The results demonstrate new associations between the microbiome and dental caries at the strain and functional levels that need further investigation.}, doi = {10.1080/20002297.2018.1557986}, url = {https://www.tandfonline.com/doi/full/10.1080/20002297.2018.1557986}, author = {Al-Hebshi, Nezar Noor and Baraniya, Divyashri and Chen, Tsute and Hill, Jennifer and Puri, Sumant and Tellez, Marisol and Hassan, Nur A. and Rita R Colwell and Ismail, Amid} } @article {20449, title = {Quantum Capacitance-Limited MoS2 Biosensors Enable Remote Label-Free Enzyme Measurements}, journal = {Nanoscale}, year = {2019}, month = {Jan-01-2019}, issn = {2040-3364}, doi = {10.1039/C9NR03171E}, url = {http://pubs.rsc.org/en/Content/ArticleLanding/2019/NR/C9NR03171Ehttp://pubs.rsc.org/en/content/articlepdf/2019/NR/C9NR03171E}, author = {Le, Son T and Guros, Nicholas B and Bruce, Robert C and Cardone, Antonio and Amin, Niranjana D and Zhang, Siyuan and Klauda, Jeffery and Pant, Harish C and Richter, Curt A and Balijepalli, Arvind} } @article {20288, title = {Characterization of the Microbiome at the World{\textquoteright}s Largest Potable Water Reuse Facility}, journal = {Frontiers in Microbiology}, year = {2018}, month = {Feb-10-2020}, abstract = {Conventional water resources are not sufficient in many regions to meet the needs of growing populations. Due to cyclical weather cycles, drought, and climate change, water stress has increased worldwide including in Southern California, which serves as a model for regions that integrate reuse of wastewater for both potable and non-potable use. The Orange County Water District (OCWD) Advanced Water Purification Facility (AWPF) is a highly engineered system designed to treat and produce up to 100 million gallons per day (MGD) of purified water from a municipal wastewater source for potable reuse. Routine facility microbial water quality analysis is limited to standard indicators at this and similar facilities. Given recent advances in high throughput DNA sequencing techniques, complete microbial profiling of communities in water samples is now possible. By using 16S/18S rRNA gene sequencing, metagenomic and metatranscriptomic sequencing coupled to a highly accurate identification method along with 16S rRNA gene qPCR, we describe a detailed view of the total microbial community throughout the facility. The total bacterial load of the water at stages of the treatment train ranged from 3.02 {\texttimes} 106 copies in source, unchlorinated wastewater feed to 5.49 {\texttimes} 101 copies of 16S rRNA gene/mL after treatment (consisting of microfiltration, reverse osmosis, and ultraviolet/advanced oxidation). Microbial diversity and load decreased by several orders of magnitude after microfiltration and reverse osmosis treatment, falling to almost non-detectable levels that more closely resembled controls of molecular grade laboratory water than the biomass detected in the source water. The presence of antibiotic resistance genes and viruses was also greatly reduced. Overall, system design performance was achieved, and comprehensive microbial community analysis was found to enable a more complete characterization of the water/wastewater microbial signature. Keywords: water reuse, pathogens, water purification, metatranscriptomics, metagenomics}, doi = {10.3389/fmicb.2018.02435}, url = {https://www.frontiersin.org/articles/10.3389/fmicb.2018.02435/full}, author = {Stamps, Blake W. and Leddy, Menu B. and Plumlee, Megan H. and Hasan, Nur A. and Rita R Colwell and Spear, John R.} } @article {20300, title = {High-Throughput DNA Sequencing To Profile Microbial Water Quality Of Potable Reuse}, year = {2018}, url = {https://www.wateronline.com/doc/high-throughput-dna-sequencing-to-profile-microbial-water-quality-of-potable-reuse-0001}, author = {Menu B. Leddy and Megan H. Plumlee and Rose S. Kantor and Kara L. Nelson and Scott E. Miller and Lauren C. Kennedy and Blake W. Stamps and John R. Spear and Nur A. Hasan and Rita R Colwell} } @article {20320, title = {Application of a paper based device containing a new culture medium to detect Vibrio cholerae in water samples collected in Haiti}, journal = {Journal of Microbiological Methods}, volume = {133}, year = {2017}, month = {Jan-02-2017}, pages = {23 - 31}, issn = {01677012}, doi = {10.1016/j.mimet.2016.12.014}, url = {https://www.sciencedirect.com/science/article/pii/S0167701216303578?via\%3Dihub}, author = {Briquaire, Romain and Rita R Colwell and Boncy, Jacques and Rossignol, Emmanuel and Dardy, Aline and Pandini, Isabelle and Villeval, Fran{\c c}ois and Machuron, Jean-Louis and Huq, Anwar and Rashed, Shah and Vandevelde, Thierry and Rozand, Christine} } @article {20314, title = {Beyond cost-effectiveness: Using systems analysis for infectious disease preparedness}, journal = {Vaccine}, volume = {35}, year = {2017}, month = {Jan-01-2017}, pages = {A46 - A49}, abstract = {Until the recent outbreaks, Ebola vaccines ranked low in decision makers{\textquoteright} priority lists based on cost-effectiveness analysis and (or) corporate profitability. Despite a relatively small number of Ebola-related cases and deaths (compared to other causes), Ebola vaccines suddenly leapt to highest priority among international health agencies and vaccine developers. Clearly, earlier cost-effectiveness analyses badly missed some factors affecting real world decisions. Multi-criteria systems analysis can improve evaluation and prioritization of vaccine development and also of many other health policy and investment decisions. Neither cost-effectiveness nor cost-benefit analysis can capture important aspects of problems such as Ebola or the emerging threat of Zika, especially issues of inequality and disparity{\textemdash}issues that dominate the planning of many global health and economic organizations. Cost-benefit analysis requires assumptions about the specific value of life{\textemdash}an idea objectionable to many analysts and policy makers. Additionally, standard cost-effectiveness calculations cannot generally capture effects on people uninfected with Ebola for example, but nevertheless affected through such factors as contagion, herd immunity, and fear of dread disease, reduction of travel and commerce, and even the hope of disease eradication. Using SMART Vaccines, we demonstrate how systems analysis can visibly include important {\textquotedblleft}other factors{\textquotedblright} and more usefully guide decision making and beneficially alter priority setting processes.}, issn = {0264410X}, doi = {10.1016/j.vaccine.2016.08.090}, url = {https://linkinghub.elsevier.com/retrieve/pii/S0264410X16309501}, author = {Phelps, Charles and Madhavan, Guruprasad and Rappuoli, Rino and Rita R Colwell and Fineberg, Harvey} } @article {20448, title = {A Bioinformatics 3D Cellular Morphotyping Strategy for Assessing Biomaterial Scaffold Niches}, journal = {ACS Biomaterials Science \& Engineering}, volume = {3}, year = {2017}, month = {Sep-10-2017}, pages = {2302 - 2313}, issn = {2373-9878}, doi = {10.1021/acsbiomaterials.7b00473}, url = {http://pubs.acs.org/doi/10.1021/acsbiomaterials.7b00473http://pubs.acs.org/doi/pdf/10.1021/acsbiomaterials.7b00473}, author = {Florczyk, Stephen J. and Simon, Mylene and Juba, Derek and Pine, P. Scott and Sarkar, Sumona and Chen, Desu and Baker, Paula J. and Bodhak, Subhadip and Cardone, Antonio and Brady, Mary C. and Bajcsy, Peter and Simon, Carl G.} } @article {20305, title = {Characterization of Two Cryptic Plasmids Isolated in Haiti from Clinical Vibrio cholerae Non-O1/Non-O139}, journal = {Frontiers in Microbiology}, year = {2017}, month = {Nov-11-2018}, doi = {10.3389/fmicb.2017.02283}, url = {http://journal.frontiersin.org/article/10.3389/fmicb.2017.02283}, author = {Ceccarelli, Daniela and Garriss, Genevieve and Choi, Seon Y. and Hasan, Nur A. and Stepanauskas, Ramunas and Pop, Mihai and Huq, Anwar and Rita R Colwell} } @article {20308, title = {Comprehensive benchmarking and ensemble approaches for metagenomic classifiers}, journal = {Genome Biology}, year = {2017}, month = {Jan-12-2017}, doi = {10.1186/s13059-017-1299-7}, url = {http://genomebiology.biomedcentral.com/articles/10.1186/s13059-017-1299-7}, author = {McIntyre, Alexa B. R. and Ounit, Rachid and Afshinnekoo, Ebrahim and Prill, Robert J. and H{\'e}naff, Elizabeth and Alexander, Noah and Minot, Samuel S. and Danko, David and Foox, Jonathan and Ahsanuddin, Sofia and Tighe, Scott and Hasan, Nur A. and Subramanian, Poorani and Moffat, Kelly and Levy, Shawn and Lonardi, Stefano and Greenfield, Nick and Rita R Colwell and Rosen, Gail L. and Mason, Christopher E.} } @article {20318, title = {CRISPR-Cas and Contact-Dependent Secretion Systems Present on Excisable Pathogenicity Islands with Conserved Recombination Modules}, journal = {Journal of Bacteriology}, year = {2017}, month = {Mar-05-2018}, abstract = {Pathogenicity islands (PAIs) are mobile integrated genetic elements that contain a diverse range of virulence factors. PAIs integrate into the host chromosome at a tRNA locus that contains their specific bacterial attachment site, attB, via integrase-mediated site-specific recombination generating attL and attR sites. We identified conserved recombination modules (integrases and att sites) previously described in choleragenic Vibrio cholerae PAIs but with novel cargo genes. Clustered regularly interspaced short palindromic repeat (CRISPR)-associated proteins (Cas proteins) and a type VI secretion system (T6SS) gene cluster were identified at the Vibrio pathogenicity island 1 (VPI-1) insertion site in 19 V. cholerae strains and contained the same recombination module. Two divergent type I-F CRISPR-Cas systems were identified, which differed in Cas protein homology and content. The CRISPR repeat sequence was identical among all V. cholerae strains, but the CRISPR spacer sequences and the number of spacers varied. In silico analysis suggests that the CRISPR-Cas systems were active against phages and plasmids. A type III secretion system (T3SS) was present in 12 V. cholerae strains on a 68-kb island inserted at the same tRNA-serine insertion site as VPI-2 and contained the same recombination module. Bioinformatics analysis showed that two divergent T3SSs exist among the strains examined. Both the CRISPR and T3SS islands excised site specifically from the bacterial chromosome as complete units, and the cognate integrases were essential for this excision. These data demonstrated that identical recombination modules that catalyze integration and excision from the chromosome can acquire diverse cargo genes, signifying a novel method of acquisition for both CRISPR-Cas systems and T3SSs.}, issn = {0021-9193}, doi = {10.1128/JB.00842-16}, url = {http://jb.asm.org/lookup/doi/10.1128/JB.00842-16}, author = {Carpenter, Megan R. and Kalburge, Sai S. and Borowski, Joseph D. and Peters, Molly C. and Rita R Colwell and Boyd, E. Fidelma}, editor = {DiRita, Victor J.} } @article {20313, title = {Genomic Methods and Microbiological Technologies for Profiling Novel and Extreme Environments for the Extreme Microbiome Project (XMP)}, journal = {Journal of Biomolecular Techniques : JBT}, volume = {28}, year = {2017}, month = {Jan-04-2017}, pages = {31 - 39}, abstract = {The Extreme Microbiome Project (XMP) is a project launched by the Association of Biomolecular Resource Facilities Metagenomics Research Group (ABRF MGRG) that focuses on whole genome shotgun sequencing of extreme and unique environments using a wide variety of biomolecular techniques. The goals are multifaceted, including development and refinement of new techniques for the following: 1) the detection and characterization of novel microbes, 2) the evaluation of nucleic acid techniques for extremophilic samples, and 3) the identification and implementation of the appropriate bioinformatics pipelines. Here, we highlight the different ongoing projects that we have been working on, as well as details on the various methods we use to characterize the microbiome and metagenome of these complex samples. In particular, we present data of a novel multienzyme extraction protocol that we developed, called Polyzyme or MetaPolyZyme. Presently, the XMP is characterizing sample sites around the world with the intent of discovering new species, genes, and gene clusters. Once a project site is complete, the resulting data will be publically available. Sites include Lake Hillier in Western Australia, the {\textquotedblleft}Door to Hell{\textquotedblright} crater in Turkmenistan, deep ocean brine lakes of the Gulf of Mexico, deep ocean sediments from Greenland, permafrost tunnels in Alaska, ancient microbial biofilms from Antarctica, Blue Lagoon Iceland, Ethiopian toxic hot springs, and the acidic hypersaline ponds in Western Australia.}, issn = {1524-0215}, doi = {10.7171/jbt.17-2801-004}, url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5345951/}, author = {Tighe, Scott and Afshinnekoo, Ebrahim and Rock, Tara M. and McGrath, Ken and Alexander, Noah and McIntyre, Alexa and Ahsanuddin, Sofia and Bezdan, Daniela and Green, Stefan J. and Joye, Samantha and Stewart Johnson, Sarah and Baldwin, Don A. and Bivens, Nathan and Ajami, Nadim and Carmical, Joseph R. and Herriott, Ian Charold and Rita R Colwell and Donia, Mohamed and Foox, Jonathan and Greenfield, Nick and Hunter, Tim and Hoffman, Jessica and Hyman, Joshua and Jorgensen, Ellen and Krawczyk, Diana and Lee, Jodie and Levy, Shawn and Garcia-Reyero, {\`a}lia and Settles, Matthew and Thomas, Kelley and {\'o}mez, Felipe and Schriml, Lynn and Kyrpides, Nikos and Zaikova, Elena and Penterman, Jon and Mason, Christopher E.} } @article {20309, title = {The microbiomes of blowflies and houseflies as bacterial transmission reservoirs}, journal = {Scientific Reports}, year = {2017}, month = {Jan-12-2017}, abstract = {Blowflies and houseflies are mechanical vectors inhabiting synanthropic environments around the world. They feed and breed in fecal and decaying organic matter, but the microbiome they harbour and transport is largely uncharacterized. We sampled 116 individual houseflies and blowflies from varying habitats on three continents and subjected them to high-coverage, whole-genome shotgun sequencing. This allowed for genomic and metagenomic analyses of the host-associated microbiome at the species level. Both fly host species segregate based on principal coordinate analysis of their microbial communities, but they also show an overlapping core microbiome. Legs and wings displayed the largest microbial diversity and were shown to be an important route for microbial dispersion. The environmental sequencing approach presented here detected a stochastic distribution of human pathogens, such as Helicobacter pylori, thereby demonstrating the potential of flies as proxies for environmental and public health surveillance.}, doi = {10.1038/s41598-017-16353-x}, url = {http://www.nature.com/articles/s41598-017-16353-x}, author = {Junqueira, AC and Ratan, Aakrosh and Acerbi, Enzo and Drautz-Moses, Daniela I. and Premkrishnan, BNV and Costea, PI and Linz, Bodo and Purbojati, Rikky W. and Paulo, Daniel F. and Gaultier, Nicolas E. and Subramanian, Poorani and Hasan, Nur A. and Rita R Colwell and Bork, Peer and Azeredo-Espin, Ana Maria L. and Bryant, Donald A. and Schuster, Stephan C.} } @conference {20065, title = {3D Cellular Morphotyping of Scaffold Niches}, booktitle = {2016 32nd Southern Biomedical Engineering Conference (SBEC)}, year = {2016}, publisher = {IEEE}, organization = {IEEE}, author = {Florczyk, Stephen J and Simon, Mylene and Juba, Derek and Pine, P Scott and Sarkar, Sumona and Chen, Desu and Baker, Paula J and Bodhak, Subhadip and Cardone, Antonio and Brady, Mary and others} } @article {20329, title = {Climate influence on Vibrio and associated human diseases during the past half-century in the coastal North Atlantic}, journal = {Proceedings of the National Academy of Sciences}, year = {2016}, month = {Nov-08-2017}, pages = {E5062 - E5071}, abstract = {Climate change is having a dramatic impact on marine animal and plant communities but little is known of its influence on marine prokaryotes, which represent the largest living biomass in the world oceans and play a fundamental role in maintaining life on our planet. In this study, for the first time to our knowledge, experimental evidence is provided on the link between multidecadal climatic variability in the temperate North Atlantic and the presence and spread of an important group of marine prokaryotes, the vibrios, which are responsible for several infections in both humans and animals. Using archived formalin-preserved plankton samples collected by the Continuous Plankton Recorder survey over the past half-century (1958{\textendash}2011), we assessed retrospectively the relative abundance of vibrios, including human pathogens, in nine areas of the North Atlantic and North Sea and showed correlation with climate and plankton changes. Generalized additive models revealed that long-term increase in Vibrio abundance is promoted by increasing sea surface temperatures (up to \~{}1.5 {\textdegree}C over the past 54 y) and is positively correlated with the Northern Hemisphere Temperature (NHT) and Atlantic Multidecadal Oscillation (AMO) climatic indices (P < 0.001). Such increases are associated with an unprecedented occurrence of environmentally acquired Vibrio infections in the human population of Northern Europe and the Atlantic coast of the United States in recent years.}, issn = {0027-8424}, doi = {10.1073/pnas.1609157113}, url = {http://www.pnas.org/lookup/doi/10.1073/pnas.1609157113}, author = {Vezzulli, Luigi and Grande, Chiara and Reid, Philip C. and {\'e}laou{\"e}t, Pierre and Edwards, Martin and {\"o}fle, Manfred G. and Brettar, Ingrid and Rita R Colwell and Pruzzo, Carla} } @article {20060, title = {Computational study of the inhibitory mechanism of~the kinase CDK5 hyperactivity by peptide p5 and derivation of a pharmacophore}, journal = {Journal of Computer-Aided Molecular Design}, volume = {30}, year = {2016}, month = {Jan-06-2016}, pages = {513 - 521}, issn = {0920-654X}, doi = {10.1007/s10822-016-9922-3}, url = {http://link.springer.com/10.1007/s10822-016-9922-3http://link.springer.com/content/pdf/10.1007/s10822-016-9922-3http://link.springer.com/content/pdf/10.1007/s10822-016-9922-3.pdfhttp://link.springer.com/article/10.1007/s10822-016-9922-3/fulltext.html}, author = {Cardone, Antonio and Brady, M. and Sriram, R. and Pant, H. C. and Hassan, S. A.} } @article {20336, title = {Cross-talk among flesh-eating Aeromonas hydrophila strains in mixed infection leading to necrotizing fasciitis}, journal = {Proceedings of the National Academy of Sciences}, volume = {11312161268}, year = {2016}, month = {Jul-01-2017}, pages = {722 - 727}, abstract = {Necrotizing fasciitis (NF) caused by flesh-eating bacteria is associated with high case fatality. In an earlier study, we reported infection of an immunocompetent individual with multiple strains of Aeromonas hydrophila (NF1{\textendash}NF4), the latter three constituted a clonal group whereas NF1 was phylogenetically distinct. To understand the complex interactions of these strains in NF pathophysiology, a mouse model was used, whereby either single or mixed A. hydrophila strains were injected intramuscularly. NF2, which harbors exotoxin A (exoA) gene, was highly virulent when injected alone, but its virulence was attenuated in the presence of NF1 (exoA-minus). NF1 alone, although not lethal to animals, became highly virulent when combined with NF2, its virulence augmented by cis-exoA expression when injected alone in mice. Based on metagenomics and microbiological analyses, it was found that, in mixed infection, NF1 selectively disseminated to mouse peripheral organs, whereas the other strains (NF2, NF3, and NF4) were confined to the injection site and eventually cleared. In vitro studies showed NF2 to be more effectively phagocytized and killed by macrophages than NF1. NF1 inhibited growth of NF2 on solid media, but ExoA of NF2 augmented virulence of NF1 and the presence of NF1 facilitated clearance of NF2 from animals either by enhanced priming of host immune system or direct killing via a contact-dependent mechanism.}, issn = {0027-8424}, doi = {10.1073/pnas.1523817113}, url = {http://www.pnas.org/lookup/doi/10.1073/pnas.1523817113}, author = {Ponnusamy, Duraisamy and Kozlova, Elena V. and Sha, Jian and Erova, Tatiana E. and Azar, Sasha R. and Fitts, Eric C. and Kirtley, Michelle L. and Tiner, Bethany L. and Andersson, Jourdan A. and Grim, Christopher J. and Isom, Richard P. and Hasan, Nur A. and Rita R Colwell and Chopra, Ashok K.} } @article {20331, title = {In Plain View: A Transparent Systems Approach for Enhancing Health Policy Decisions}, year = {2016}, month = {07/2016}, url = {https://www.nae.edu/19582/Bridge/155266/155393.aspx}, author = {Guru Madhavan and Charles E. Phelps and Rita R Colwell and Rino Rappuoli and Harvey V. Fineberg} } @article {20333, title = {Strategic Planning in Population Health and Public Health Practice: A Call to Action for Higher Education}, journal = {The Milbank Quarterly}, volume = {94906033}, year = {2016}, month = {Jan-03-2016}, pages = {109 - 125}, abstract = {POLICY POINTS: Scarce resources, especially in population health and public health practice, underlie the importance of strategic planning. Public health agencies{\textquoteright} current planning and priority setting efforts are often narrow, at times opaque, and focused on single metrics such as cost-effectiveness. As demonstrated by SMART Vaccines, a decision support software system developed by the Institute of Medicine and the National Academy of Engineering, new approaches to strategic planning allow the formal incorporation of multiple stakeholder views and multicriteria decision making that surpass even those sophisticated cost-effectiveness analyses widely recommended and used for public health planning. Institutions of higher education can and should respond by building on modern strategic planning tools as they teach their students how to improve population health and public health practice. CONTEXT: Strategic planning in population health and public health practice often uses single indicators of success or, when using multiple indicators, provides no mechanism for coherently combining the assessments. Cost-effectiveness analysis, the most complex strategic planning tool commonly applied in public health, uses only a single metric to evaluate programmatic choices, even though other factors often influence actual decisions. METHODS: Our work employed a multicriteria systems analysis approach--specifically, multiattribute utility theory--to assist in strategic planning and priority setting in a particular area of health care (vaccines), thereby moving beyond the traditional cost-effectiveness analysis approach. FINDINGS: (1) Multicriteria systems analysis provides more flexibility, transparency, and clarity in decision support for public health issues compared with cost-effectiveness analysis. (2) More sophisticated systems-level analyses will become increasingly important to public health as disease burdens increase and the resources to deal with them become scarcer. CONCLUSIONS: The teaching of strategic planning in public health must be expanded in order to fill a void in the profession{\textquoteright}s planning capabilities. Public health training should actively incorporate model building, promote the interactive use of software tools, and explore planning approaches that transcend restrictive assumptions of cost-effectiveness analysis. The Strategic Multi-Attribute Ranking Tool for Vaccines (SMART Vaccines), which was recently developed by the Institute of Medicine and the National Academy of Engineering to help prioritize new vaccine development, is a working example of systems analysis as a basis for decision support.}, doi = {10.1111/1468-0009.12182}, url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4941964/}, author = {Phelps, Charles and Madhavan, Guruprasad and Rappuoli, Rino and LEVIN, SCOTT and Shortlife, Edward and Rita R Colwell} } @article {20341, title = {Building Infectious Disease Research Programs to Promote Security and Enhance Collaborations with Countries of the Former Soviet Union}, journal = {Frontiers in Public Health}, volume = {35361632}, year = {2015}, month = {Feb-11-2017}, doi = {10.3389/fpubh.2015.00271}, url = {http://journal.frontiersin.org/Article/10.3389/fpubh.2015.00271/abstract}, author = {Bartholomew, James C. and Pearson, Andrew D. and Stenseth, Nils C. and LeDuc, James W. and Hirschberg, David L. and Rita R Colwell} } @article {19903, title = {Detection and characterization of nonspecific, sparsely populated binding modes in the early stages of complexation.}, journal = {J Comput Chem}, year = {2015}, month = {2015 Mar 18}, abstract = {

A method is proposed to study protein-ligand binding in a system governed by specific and nonspecific interactions. Strong associations lead to narrow distributions in the proteins configuration space; weak and ultraweak associations lead instead to broader distributions, a manifestation of nonspecific, sparsely populated binding modes with multiple interfaces. The method is based on the notion that a discrete set of preferential first-encounter modes are metastable states from which stable (prerelaxation) complexes at equilibrium evolve. The method can be used to explore alternative pathways of complexation with statistical significance and can be integrated into a general algorithm to study protein interaction networks. The method is applied to a peptide-protein complex. The peptide adopts several low-population conformers and binds in a variety of modes with a broad range of affinities. The system is thus well suited to analyze general features of binding, including conformational selection, multiplicity of binding modes, and nonspecific interactions, and to illustrate how the method can be applied to study these problems systematically. The equilibrium distributions can be used to generate biasing functions for simulations of multiprotein systems from which bulk thermodynamic quantities can be calculated. {\textcopyright} 2015 Wiley Periodicals, Inc.

}, issn = {1096-987X}, doi = {10.1002/jcc.23883}, author = {Cardone, Antonio and Bornstein, Aaron and Pant, Harish C and Brady, Mary and Sriram, Ram and Hassan, Sergio A} } @conference {20067, title = {Segmentation and sub-cellular feature-based analysis of microscopy images}, booktitle = {BioImage Informatics Conference }, year = {2015}, month = {10/2015}, author = {Cardone, Antonio and Chalfoun, Joe and Peskin, Adele and Bajcsy, Peter and Kociolek, Marcin and Bhadriraju, Kiran and Brady, Mary} } @article {20062, title = {Survey statistics of automated segmentations applied to optical imaging of mammalian cells}, journal = {BMC bioinformatics}, volume = {16}, year = {2015}, pages = {1}, author = {Bajcsy, Peter and Cardone, Antonio and Chalfoun, Joe and Halter, Michael and Juba, Derek and Kociolek, Marcin and Majurski, Michael and Peskin, Adele and Simon, Carl and Simon, Mylene and others} } @article {20340, title = {A unified initiative to harness Earth{\textquoteright}s microbiomes}, journal = {Science}, year = {2015}, month = {Jun-10-2017}, pages = {507 - 508}, issn = {0036-8075}, doi = {10.1126/science.aac8480}, url = {http://www.sciencemag.org/cgi/doi/10.1126/science.aac8480}, author = {Alivisatos, A. P. and Blaser, M. J. and Brodie, E. L. and Chun, M. and Dangl, J. L. and Donohue, T. J. and Dorrestein, P. C. and Gilbert, J. A. and Green, J. L. and Jansson, J. K. and Knight, R. and Maxon, M. E. and McFall-Ngai, M. J. and Miller, J. F. and Pollard, K. S. and Ruby, E. G. and Taha, S. A. and Rita R Colwell} } @article {20359, title = {Vibrio metoecus sp. nov., a close relative of Vibrio cholerae isolated from coastal brackish ponds and clinical specimens}, journal = {INTERNATIONAL JOURNAL OF SYSTEMATIC AND EVOLUTIONARY MICROBIOLOGY}, year = {2014}, month = {Jan-09-2014}, pages = {3208 - 3214}, issn = {1466-5026}, doi = {10.1099/ijs.0.060145-0}, url = {http://ijs.microbiologyresearch.org/content/journal/ijsem/10.1099/ijs.0.060145-0}, author = {Kirchberger, P. C. and Turnsek, M. and Hunt, D. E. and Haley, B. J. and Rita R Colwell and Polz, M. F. and Tarr, C. L. and Boucher, Y.} } @article {20357, title = {Phylodynamic Analysis of Clinical and Environmental Vibrio cholerae Isolates from Haiti Reveals Diversification Driven by Positive Selection}, journal = {mBio}, year = {2014}, month = {Jul-12-2016}, abstract = {Phylodynamic analysis of genome-wide single-nucleotide polymorphism (SNP) data is a powerful tool to investigate underlying evolutionary processes of bacterial epidemics. The method was applied to investigate a collection of 65 clinical and environmental isolates of Vibrio cholerae from Haiti collected between 2010 and 2012. Characterization of isolates recovered from environmental samples identified a total of four toxigenic V. cholerae O1 isolates, four non-O1/O139 isolates, and a novel nontoxigenic V. cholerae O1 isolate with the classical tcpA gene. Phylogenies of strains were inferred from genome-wide SNPs using coalescent-based demographic models within a Bayesian framework. A close phylogenetic relationship between clinical and environmental toxigenic V. cholerae O1 strains was observed. As cholera spread throughout Haiti between October 2010 and August 2012, the population size initially increased and then fluctuated over time. Selection analysis along internal branches of the phylogeny showed a steady accumulation of synonymous substitutions and a progressive increase of nonsynonymous substitutions over time, suggesting diversification likely was driven by positive selection. Short-term accumulation of nonsynonymous substitutions driven by selection may have significant implications for virulence, transmission dynamics, and even vaccine efficacy.}, doi = {10.1128/mBio.01824-14}, url = {http://mbio.asm.org/lookup/doi/10.1128/mBio.01824-14}, author = {Azarian, Taj and Ali, Afsar and Johnson, Judith A. and Mohr, David and Prosperi, Mattia and Veras, Nazle M. and Jubair, Mohammed and Strickland, Samantha L. and Rashid, Mohammad H. and Alam, Meer T. and Weppelmann, Thomas A. and Katz, Lee S. and Tarr, Cheryl L. and Rita R Colwell and Morris, J. Glenn and Salemi, Marco} } @book {20379, title = {Current Topics in Microbiology and Immunology One Health: The Human-Animal-Environment Interfaces in Emerging Infectious Diseases The Human Environment Interface: Applying Ecosystem Concepts to Health}, volume = {365}, year = {2013}, pages = {83 - 100}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, address = {Berlin, Heidelberg}, isbn = {978-3-642-36888-2}, issn = {0070-217X}, doi = {10.1007/978-3-642-36889-9}, url = {http://link.springer.com/10.1007/978-3-642-36889-9}, author = {Preston, Nicholas D. and Daszak, Peter and Rita R Colwell}, editor = {Mackenzie, John S. and Jeggo, Martyn and Daszak, Peter and Richt, Juergen A.} } @article {19700, title = {Improving public transit accessibility for blind riders by crowdsourcing bus stop landmark locations with Google street view}, journal = {The 15th International ACM SIGACCESS Conference}, year = {2013}, month = {2013/00/21}, pages = {16 - 8}, publisher = {SIGACCESS, ACM Special Interest Group on Accessible ComputingACM}, address = {New York, New York, USA}, abstract = {Abstract Low-vision and blind bus riders often rely on known physical landmarks to help locate and verify bus stop locations (eg., by searching for a shelter, bench, newspaper bin). However, there are currently few, if any, methods to determine this information a priori via ...}, isbn = {9781450324052}, doi = {10.1145/2513383.2513448}, url = {http://dl.acm.org/citation.cfm?doid=2513383.2513448}, author = {Hara, Kotaro and Azenkot, Shiri and Campbell, Megan and Bennett, Cynthia L and Le, Vicki and Pannella, Sean and Moore, Robert and Minckler, Kelly and Ng, Rochelle H and Jon Froehlich} } @article {20368, title = {Ocean Warming and Spread of Pathogenic Vibrios in the Aquatic Environment}, journal = {Microbial Ecology}, year = {2013}, month = {Jan-05-2013}, pages = {817 - 825}, abstract = {Vibrios are among the most common bacteria that inhabit surface waters throughout the world and are responsible for a number of severe infections both in humans and animals. Several reports recently showed that human Vibrio illnesses are increasing worldwide including fatal acute diarrheal diseases, such as cholera, gastroenteritis, wound infections, and septicemia. Many scientists believe this increase may be associated with global warming and rise in sea surface temperature (SST), although not enough evidence is available to support a causal link between emergence of Vibrio infections and climate warming. The effect of increased SST in promoting spread of vibrios in coastal and brackish waters is considered a causal factor explaining this trend. Field and laboratory studies carried out over the past 40 years supported this hypothesis, clearly showing temperature promotes Vibrio growth and persistence in the aquatic environment. Most recently, a long-term retrospective microbiological study carried out in the coastal waters of the southern North Sea provided the first experimental evidence for a positive and significant relationship between SST and Vibrio occurrence over a multidecadal time scale. As a future challenge, macroecological studies of the effects of ocean warming on Vibrio persistence and spread in the aquatic environment over large spatial and temporal scales would conclusively support evidence acquired to date combined with studies of the impact of global warming on epidemiologically relevant variables, such as host susceptibility and exposure. Assessing a causal link between ongoing climate change and enhanced growth and spread of vibrios and related illness is expected to improve forecast and mitigate future outbreaks associated with these pathogens.}, issn = {0095-3628}, doi = {10.1007/s00248-012-0163-2}, url = {http://link.springer.com/10.1007/s00248-012-0163-2}, author = {Vezzulli, Luigi and Rita R Colwell and Pruzzo, Carla} } @inbook {19620, title = {Parallel and Dynamic Searchable Symmetric Encryption}, booktitle = {Financial Cryptography and Data Security}, series = {Lecture Notes in Computer Science}, year = {2013}, month = {2013/01/01/}, pages = {258 - 274}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Searchable symmetric encryption (SSE) enables a client to outsource a collection of encrypted documents in the cloud and retain the ability to perform keyword searches without revealing information about the contents of the documents and queries. Although efficient SSE constructions are known, previous solutions are highly sequential. This is mainly due to the fact that, currently, the only method for achieving sub-linear time search is the inverted index approach (Curtmola, Garay, Kamara and Ostrovsky, CCS {\textquoteright}06) which requires the search algorithm to access a sequence of memory locations, each of which is unpredictable and stored at the previous location in the sequence. Motivated by advances in multi-core architectures, we present a new method for constructing sub-linear SSE schemes. Our approach is highly parallelizable and dynamic. With roughly a logarithmic number of cores in place, searches for a keyword w in our scheme execute in o(r) parallel time, where r is the number of documents containing keyword w (with more cores, this bound can go down to O(logn), i.e., independent of the result size r). Such time complexity outperforms the optimal Θ(r) sequential search time{\textemdash}a similar bound holds for the updates. Our scheme also achieves the following important properties: (a) it enjoys a strong notion of security, namely security against adaptive chosen-keyword attacks; (b) compared to existing sub-linear dynamic SSE schemes (e.g., Kamara, Papamanthou, Roeder, CCS {\textquoteright}12), updates in our scheme do not leak any information, apart from information that can be inferred from previous search tokens; (c) it can be implemented efficiently in external memory (with logarithmic I/O overhead). Our technique is simple and uses a red-black tree data structure; its security is proven in the random oracle model.}, keywords = {cloud storage, Computer Appl. in Administrative Data Processing, Data Encryption, e-Commerce/e-business, parallel search, Searchable encryption, Systems and Data Security}, isbn = {978-3-642-39883-4, 978-3-642-39884-1}, url = {http://link.springer.com/chapter/10.1007/978-3-642-39884-1_22}, author = {Kamara, Seny and Charalampos Papamanthou}, editor = {Sadeghi, Ahmad-Reza} } @conference {19617, title = {Practical Dynamic Proofs of Retrievability}, booktitle = {CCS {\textquoteright}13 Proceedings of the 2013 ACM SIGSAC Conference on Computer \& Communications Security }, series = {CCS {\textquoteright}13}, year = {2013}, month = {2013///}, pages = {325 - 336}, publisher = {ACM}, organization = {ACM}, abstract = {Proofs of Retrievability (PoR), proposed by Juels and Kaliski in 2007, enable a client to store n file blocks with a cloud server so that later the server can prove possession of all the data in a very efficient manner (i.e., with constant computation and bandwidth). Although many efficient PoR schemes for static data have been constructed, only two dynamic PoR schemes exist. The scheme by Stefanov et. al. (ACSAC 2012) uses a large of amount of client storage and has a large audit cost. The scheme by Cash (EUROCRYPT 2013) is mostly of theoretical interest, as it employs Oblivious RAM (ORAM) as a black box, leading to increased practical overhead (e.g., it requires about 300 times more bandwidth than our construction). We propose a dynamic PoR scheme with constant client storage whose bandwidth cost is comparable to a Merkle hash tree, thus being very practical. Our construction outperforms the constructions of Stefanov et. al. and Cash et. al., both in theory and in practice. Specifically, for n outsourced blocks of beta bits each, writing a block requires beta+O(lambdalog n) bandwidth and O(betalog n) server computation (lambda is the security parameter). Audits are also very efficient, requiring beta+O(lambda^2log n) bandwidth. We also show how to make our scheme publicly verifiable, providing the first dynamic PoR scheme with such a property. We finally provide a very efficient implementation of our scheme.}, keywords = {dynamic proofs of retrievability, erasure code, por}, isbn = {978-1-4503-2477-9}, url = {http://doi.acm.org/10.1145/2508859.2516669}, author = {Shi, Elaine and Stefanov, Emil and Charalampos Papamanthou} } @article {19666, title = {Primate Transcript and Protein Expression Levels Evolve Under Compensatory Selection Pressures}, journal = {Science}, volume = {342}, year = {2013}, month = {2013/11/29/}, pages = {1100 - 1104}, abstract = {Changes in gene regulation have likely played an important role in the evolution of primates. Differences in messenger RNA (mRNA) expression levels across primates have often been documented; however, it is not yet known to what extent measurements of divergence in mRNA levels reflect divergence in protein expression levels, which are probably more important in determining phenotypic differences. We used high-resolution, quantitative mass spectrometry to collect protein expression measurements from human, chimpanzee, and rhesus macaque lymphoblastoid cell lines and compared them to transcript expression data from the same samples. We found dozens of genes with significant expression differences between species at the mRNA level yet little or no difference in protein expression. Overall, our data suggest that protein expression levels evolve under stronger evolutionary constraint than mRNA levels.Don{\textquoteright}t Ape Protein Variation Changes in DNA and messenger RNA (mRNA) expression levels have been used to estimate evolutionary changes between species. However protein expression levels may better reflect selection on divergent and constrained phenotypes. Khan et al. (p. 1100, published online 17 October; see the Perspective by Vogel) measured the differences among and within species between mRNA expression and protein levels in humans, chimpanzees, and rhesus macaques, identifying protein transcripts that seem to be under lineage-specific constraint between humans and chimpanzees. }, isbn = {0036-8075, 1095-9203}, url = {http://www.sciencemag.org/content/342/6162/1100}, author = {Zia Khan and Ford, Michael J. and Cusanovich, Darren A. and Mitrano, Amy and Pritchard, Jonathan K. and Gilad, Yoav} } @article {19371, title = {Segmenting time-lapse phase contrast images of adjacent NIH 3T3 cells.}, journal = {Journal of microscopy}, volume = {249}, year = {2013}, month = {2013 Jan}, pages = {41-52}, abstract = {We present a new method for segmenting phase contrast images of NIH 3T3 fibroblast cells that is accurate even when cells are physically in contact with each other. The problem of segmentation, when cells are in contact, poses a challenge to the accurate automation of cell counting, tracking and lineage modelling in cell biology. The segmentation method presented in this paper consists of (1) background reconstruction to obtain noise-free foreground pixels and (2) incorporation of biological insight about dividing and nondividing cells into the segmentation process to achieve reliable separation of foreground pixels defined as pixels associated with individual cells. The segmentation results for a time-lapse image stack were compared against 238 manually segmented images (8219 cells) provided by experts, which we consider as reference data. We chose two metrics to measure the accuracy of segmentation: the {\textquoteright}Adjusted Rand Index{\textquoteright} which compares similarities at a pixel level between masks resulting from manual and automated segmentation, and the {\textquoteright}Number of Cells per Field{\textquoteright} (NCF) which compares the number of cells identified in the field by manual versus automated analysis. Our results show that the automated segmentation compared to manual segmentation has an average adjusted rand index of 0.96 (1 being a perfect match), with a standard deviation of 0.03, and an average difference of the two numbers of cells per field equal to 5.39\% with a standard deviation of 4.6\%.}, keywords = {Animals, Cell Adhesion, Cell Count, Cell Division, Cell Shape, Computational Biology, Fibroblasts, Image Processing, Computer-Assisted, Mice, Microscopy, Phase-Contrast, NIH 3T3 Cells, Reproducibility of results, Sensitivity and Specificity, Time-Lapse Imaging}, issn = {1365-2818}, doi = {10.1111/j.1365-2818.2012.03678.x}, author = {Chalfoun, J and Kociolek, M and Dima, A and Halter, M and Cardone, Antonio and Peskin, A and Bajcsy, P and Brady, M.} } @inbook {19603, title = {Signatures of Correct Computation}, booktitle = {Theory of Cryptography}, series = {Lecture Notes in Computer Science}, year = {2013}, month = {2013/01/01/}, pages = {222 - 242}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We introduce Signatures of Correct Computation (SCC), a new model for verifying dynamic computations in cloud settings. In the SCC model, a trusted source outsources a function f to an untrusted server, along with a public key for that function (to be used during verification). The server can then produce a succinct signature σ vouching for the correctness of the computation of f, i.e., that some result v is indeed the correct outcome of the function f evaluated on some point a. There are two crucial performance properties that we want to guarantee in an SCC construction: (1) verifying the signature should take asymptotically less time than evaluating the function f; and (2) the public key should be efficiently updated whenever the function changes. We construct SCC schemes (satisfying the above two properties) supporting expressive manipulations over multivariate polynomials, such as polynomial evaluation and differentiation. Our constructions are adaptively secure in the random oracle model and achieve optimal updates, i.e., the function{\textquoteright}s public key can be updated in time proportional to the number of updated coefficients, without performing a linear-time computation (in the size of the polynomial). We also show that signatures of correct computation imply Publicly Verifiable Computation (PVC), a model recently introduced in several concurrent and independent works. Roughly speaking, in the SCC model, any client can verify the signature σ and be convinced of some computation result, whereas in the PVC model only the client that issued a query (or anyone who trusts this client) can verify that the server returned a valid signature (proof) for the answer to the query. Our techniques can be readily adapted to construct PVC schemes with adaptive security, efficient updates and without the random oracle model.}, keywords = {Algorithm Analysis and Problem Complexity, Computation by Abstract Devices, Data Encryption, Systems and Data Security}, isbn = {978-3-642-36593-5, 978-3-642-36594-2}, url = {http://link.springer.com/chapter/10.1007/978-3-642-36594-2_13}, author = {Charalampos Papamanthou and Shi, Elaine and Tamassia, Roberto}, editor = {Sahai, Amit} } @article {19413, title = {Specific and Non-Specific Protein Association in Solution: Computation of Solvent Effects and Prediction of First-Encounter Modes for Efficient Configurational Bias Monte Carlo Simulations}, journal = {The Journal of Physical Chemistry B}, volume = {117}, year = {2013}, month = {May-10-2014}, pages = {12360 - 12374}, issn = {1520-6106}, doi = {10.1021/jp4050594}, url = {http://pubs.acs.org/doi/abs/10.1021/jp4050594}, author = {Cardone, Antonio and Pant, Harish and Hassan, Sergio A.} } @inbook {19613, title = {Streaming Authenticated Data Structures}, booktitle = {Advances in Cryptology {\textendash} EUROCRYPT 2013}, series = {Lecture Notes in Computer Science}, year = {2013}, month = {2013/01/01/}, pages = {353 - 370}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We consider the problem of streaming verifiable computation, where both a verifier and a prover observe a stream of n elements x 1,x 2,{\textellipsis},x n and the verifier can later delegate some computation over the stream to the prover. The prover must return the output of the computation, along with a cryptographic proof to be used for verifying the correctness of the output. Due to the nature of the streaming setting, the verifier can only keep small local state (e.g., logarithmic) which must be updatable in a streaming manner and with no interaction with the prover. Such constraints make the problem particularly challenging and rule out applying existing verifiable computation schemes. We propose streaming authenticated data structures, a model that enables efficient verification of data structure queries on a stream. Compared to previous work, we achieve an exponential improvement in the prover{\textquoteright}s running time: While previous solutions have linear prover complexity (in the size of the stream), even for queries executing in sublinear time (e.g., set membership), we propose a scheme with O(logM logn)O(\log M\ log n) prover complexity, where n is the size of the stream and M is the size of the universe of elements. Our schemes support a series of expressive queries, such as (non-)membership, successor, range search and frequency queries, over an ordered universe and even in higher dimensions. The central idea of our construction is a new authentication tree, called generalized hash tree. We instantiate our generalized hash tree with a hash function based on lattices assumptions, showing that it enjoys suitable algebraic properties that traditional Merkle trees lack. We exploit such properties to achieve our results.}, keywords = {Algorithm Analysis and Problem Complexity, Data Encryption, Discrete Mathematics in Computer Science, Systems and Data Security}, isbn = {978-3-642-38347-2, 978-3-642-38348-9}, url = {http://link.springer.com/chapter/10.1007/978-3-642-38348-9_22}, author = {Charalampos Papamanthou and Shi, Elaine and Tamassia, Roberto and Yi, Ke}, editor = {Johansson, Thomas and Nguyen, Phong Q.} } @article {19130, title = {AGORA: Assembly Guided by Optical Restriction Alignment}, journal = {BMC bioinformatics}, volume = {13}, year = {2012}, month = {2012}, abstract = {Genome assembly is difficult due to repeated sequences within the genome, which create ambiguities and cause the final assembly to be broken up into many separate sequences (contigs). Long range linking information, such as mate-pairs or mapping data, is necessary to help assembly software resolve repeats, thereby leading to a more complete reconstruction of genomes. Prior work has used optical maps for validating assemblies and scaffolding contigs, after an initial assembly has been produced. However, optical maps have not previously been used within the genome assembly process. Here, we use optical map information within the popular de Bruijn graph assembly paradigm to eliminate paths in the de Bruijn graph which are not consistent with the optical map and help determine the correct reconstruction of the genome.We developed a new algorithm called AGORA: Assembly Guided by Optical Restriction Alignment. AGORA is the first algorithm to use optical map information directly within the de Bruijn graph framework to help produce an accurate assembly of a genome that is consistent with the optical map information provided. Our simulations on bacterial genomes show that AGORA is effective at producing assemblies closely matching the reference sequences. Additionally, we show that noise in the optical map can have a strong impact on the final assembly quality for some complex genomes, and we also measure how various characteristics of the starting de Bruijn graph may impact the quality of the final assembly. Lastly, we show that a proper choice of restriction enzyme for the optical map may substantially improve the quality of the final assembly. Our work shows that optical maps can be used effectively to assemble genomes within the de Bruijn graph assembly framework. Our experiments also provide insights into the characteristics of the mapping data that most affect the performance of our algorithm, indicating the potential benefit of more accurate optical mapping technologies, such as nano-coding. }, author = {Lin, H.C. and Goldstein, S. and Mendelowitz, L. and Zhou, S. and Wetzel, J. and Schwartz, D.C. and Pop, Mihai} } @article {19723, title = {Archaeosortases and exosortases are widely distributed systems linking membrane transit with posttranslational modification.}, journal = {J Bacteriol}, volume = {194}, year = {2012}, month = {2012 Jan}, pages = {36-48}, abstract = {

Multiple new prokaryotic C-terminal protein-sorting signals were found that reprise the tripartite architecture shared by LPXTG and PEP-CTERM: motif, TM helix, basic cluster. Defining hidden Markov models were constructed for all. PGF-CTERM occurs in 29 archaeal species, some of which have more than 50 proteins that share the domain. PGF-CTERM proteins include the major cell surface protein in Halobacterium, a glycoprotein with a partially characterized diphytanylglyceryl phosphate linkage near its C terminus. Comparative genomics identifies a distant exosortase homolog, designated archaeosortase A (ArtA), as the likely protein-processing enzyme for PGF-CTERM. Proteomics suggests that the PGF-CTERM region is removed. Additional systems include VPXXXP-CTERM/archeaosortase B in two of the same archaea and PEF-CTERM/archaeosortase C in four others. Bacterial exosortases often fall into subfamilies that partner with very different cohorts of extracellular polymeric substance biosynthesis proteins; several species have multiple systems. Variant systems include the VPDSG-CTERM/exosortase C system unique to certain members of the phylum Verrucomicrobia, VPLPA-CTERM/exosortase D in several alpha- and deltaproteobacterial species, and a dedicated (single-target) VPEID-CTERM/exosortase E system in alphaproteobacteria. Exosortase-related families XrtF in the class Flavobacteria and XrtG in Gram-positive bacteria mark distinctive conserved gene neighborhoods. A picture emerges of an ancient and now well-differentiated superfamily of deeply membrane-embedded protein-processing enzymes. Their target proteins are destined to transit cellular membranes during their biosynthesis, during which most undergo additional posttranslational modifications such as glycosylation.

}, keywords = {Amino Acid Sequence, Aminoacyltransferases, Archaeal Proteins, Bacterial Proteins, Cell Membrane, Cysteine Endopeptidases, Gene Expression Regulation, Archaeal, Gene Expression Regulation, Bacterial, Gene Expression Regulation, Enzymologic, Molecular Sequence Data, Protein Processing, Post-Translational}, issn = {1098-5530}, doi = {10.1128/JB.06026-11}, author = {Haft, Daniel H and Payne, Samuel H and Jeremy D Selengut} } @article {19135, title = {Bioinformatics for the Human Microbiome Project}, journal = {PLoS Computational Biology}, volume = {8}, year = {2012}, month = {11/2012}, pages = {e1002779}, doi = {10.1371/journal.pcbi.1002779}, author = {Gevers, Dirk and Pop, Mihai and Schloss, Patrick D. and Huttenhower, Curtis} } @conference {19218, title = {The design and evaluation of prototype eco-feedback displays for fixture-level water usage data}, year = {2012}, month = {2012}, pages = {2367 - 2376}, publisher = {ACM}, organization = {ACM}, isbn = {145031015X}, author = {Jon Froehlich and Findlater,L. and Ostergren, M. and Ramanathan, S. and Peterson,J. and Wragg, I. and Larson,E. and Fu, F. and Bai, M. and Patel,S.} } @article {19253, title = {Design and Synthesis for Multimedia Systems Using the Targeted Dataflow Interchange Format}, journal = {IEEE Transactions on Multimedia}, volume = {14}, year = {2012}, month = {2012}, pages = {630 - 640}, abstract = {Development of multimedia systems that can be targeted to different platforms is challenging due to the need for rigorous integration between high-level abstract modeling, and low-level synthesis and optimization. In this paper, a new dataflow-based design tool called the targeted dataflow interchange format is introduced for retargetable design, analysis, and implementation of embedded software for multimedia systems. Our approach provides novel capabilities, based on principles of task-level dataflow analysis, for exploring and optimizing interactions across design components; object-oriented data structures for encapsulating contextual information for components; a novel model for representing parameterized schedules that are derived from repetitive graph structures; and automated code generation for programming interfaces and low-level customizations that are geared toward high-performance embedded-processing architectures. We demonstrate our design tool for cross-platform application design, parameterized schedule representation, and associated dataflow graph-code generation using a case study centered around an image registration application.}, keywords = {associated dataflow graph-code generation, Computational modeling, contextual information encapsulation, cross-platform application design, data flow graphs, Data models, Data structures, Dataflow graphs, design components, design tools, Digital signal processing, electronic data interchange, embedded signal processing, Embedded software, high-level abstract modeling, high-performance embedded-processing architectures, Image coding, image registration application, image representation, low-level customizations, low-level optimization, low-level synthesis, Multimedia communication, multimedia systems, multimedia systems development, object-oriented data structures, object-oriented methods, parameterized schedule representation, programming interfaces, repetitive graph structures, retargetable design, Schedules, scheduling, software synthesis, Streaming media, targeted dataflow interchange format, task-level dataflow analysis}, isbn = {1520-9210}, author = {Chung-Ching Shen and Wu, Shenpei and Sane, N. and Wu, Hsiang-Huang and Plishker,W. and Bhattacharyya, Shuvra S.} } @article {12432, title = {Dictionary-based Face Recognition Under Variable Lighting and Pose}, journal = {IEEE Transactions on Information Forensics and Security}, volume = {PP}, year = {2012}, month = {2012/02/27/}, pages = {1 - 1}, abstract = {We present a face recognition algorithm based on simultaneous sparse approximations under varying illumination and pose. A dictionary is learned for each class based on given training examples which minimizes the representation error with a sparseness constraint. A novel test image is projected onto the span of the atoms in each learned dictionary. The resulting residual vectors are then used for classification. To handle variations in lighting conditions and pose, an image relighting technique based on pose-robust albedo estimation is used to generate multiple frontal images of the same person with variable lighting. As a result, the proposed algorithm has the ability to recognize human faces with high accuracy even when only a single or a very few images per person are provided for training. The efficiency of the proposed method is demonstrated using publicly available databases and it is shown that this method is efficient and can perform significantly better than many competitive face recognition algorithms.}, keywords = {Biometrics, dictionary learning, face recognition, illumination variation,, outlier rejection}, isbn = {1556-6013}, doi = {10.1109/TIFS.2012.2189205}, author = {Patel, Vishal M. and Wu,T. and Biswas,S. and Phillips,P. and Chellapa, Rama} } @article {14328, title = {Disaggregated water sensing from a single, pressure-based sensor: An extended analysis of HydroSense using staged experiments}, journal = {Pervasive and Mobile Computing}, volume = {8}, year = {2012}, month = {2012/02//}, pages = {82 - 102}, abstract = {We present an extended analysis of our previous work on the HydroSense technology, which is a low-cost and easily installed single-point sensor of pressure for automatically disaggregating water usage activities in the home (Froehlich et~al., 2009~[53]). We expand upon this work by providing a survey of existing and emerging water disaggregation techniques, a more comprehensive description of the theory of operation behind our approach, and an expanded analysis section that includes hot versus cold water valve usage classification and a comparison between two classification approaches: the template-based matching scheme used in Froehlich et~al. (2009)~[53] and a new stochastic approach using a Hidden Markov Model. We show that both are successful in identifying valve- and fixture-level water events with greater than 90\% accuracies. We conclude with a discussion of the limitations in our experimental methodology and open problems going forward.}, keywords = {Activity sensing, H1.2, H5.2, Infrastructure-mediated sensing, Sensors, Water usage}, isbn = {1574-1192}, doi = {10.1016/j.pmcj.2010.08.008}, url = {http://www.sciencedirect.com/science/article/pii/S1574119210000842}, author = {Larson,Eric and Jon Froehlich and Campbell,Tim and Haggerty,Conor and Atlas,Les and Fogarty,James and Patel,Shwetak N.} } @conference {14906, title = {Dynamic changes in motivation in collaborative citizen-science projects}, booktitle = {Proceedings of the ACM 2012 conference on Computer Supported Cooperative Work}, series = {CSCW {\textquoteright}12}, year = {2012}, month = {2012///}, pages = {217 - 226}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Online citizen science projects engage volunteers in collecting, analyzing, and curating scientific data. Existing projects have demonstrated the value of using volunteers to collect data, but few projects have reached the full collaborative potential of scientists and volunteers. Understanding the shared and unique motivations of these two groups can help designers establish the technical and social infrastructures needed to promote effective partnerships. We present findings from a study of the motivational factors affecting participation in ecological citizen science projects. We show that volunteers are motivated by a complex framework of factors that dynamically change throughout their cycle of work on scientific projects; this motivational framework is strongly affected by personal interests as well as external factors such as attribution and acknowledgment. Identifying the pivotal points of motivational shift and addressing them in the design of citizen-science systems will facilitate improved collaboration between scientists and volunteers.}, keywords = {citizen science, Collaboration, crowdsourcing, ecology, motivation, scientists, volunteers}, isbn = {978-1-4503-1086-4}, doi = {10.1145/2145204.2145238}, url = {http://doi.acm.org/10.1145/2145204.2145238}, author = {Rotman,Dana and Preece,Jenny and Hammock,Jen and Procita,Kezee and Hansen,Derek and Parr,Cynthia and Lewis,Darcy and Jacobs, David W.} } @conference {19599, title = {Dynamic Searchable Symmetric Encryption}, booktitle = {CCS {\textquoteright}12 Proceedings of the 2012 ACM Conference on Computer and Communications Security }, series = {CCS {\textquoteright}12}, year = {2012}, month = {2012///}, pages = {965 - 976}, publisher = {ACM}, organization = {ACM}, abstract = {Searchable symmetric encryption (SSE) allows a client to encrypt its data in such a way that this data can still be searched. The most immediate application of SSE is to cloud storage, where it enables a client to securely outsource its data to an untrusted cloud provider without sacrificing the ability to search over it. SSE has been the focus of active research and a multitude of schemes that achieve various levels of security and efficiency have been proposed. Any practical SSE scheme, however, should (at a minimum) satisfy the following properties: sublinear search time, security against adaptive chosen-keyword attacks, compact indexes and the ability to add and delete files efficiently. Unfortunately, none of the previously-known SSE constructions achieve all these properties at the same time. This severely limits the practical value of SSE and decreases its chance of deployment in real-world cloud storage systems. To address this, we propose the first SSE scheme to satisfy all the properties outlined above. Our construction extends the inverted index approach (Curtmola et al., CCS 2006) in several non-trivial ways and introduces new techniques for the design of SSE. In addition, we implement our scheme and conduct a performance evaluation, showing that our approach is highly efficient and ready for deployment.}, keywords = {Cloud computing, cloud storage, searchable symmetric encryption}, isbn = {978-1-4503-1651-4}, url = {http://doi.acm.org/10.1145/2382196.2382298}, author = {Kamara, Seny and Charalampos Papamanthou and Roeder, Tom} } @article {20387, title = {Ecology of Vibrio parahaemolyticus and Vibrio vulnificus in the Coastal and Estuarine Waters of Louisiana, Maryland, Mississippi, and Washington (United States)}, journal = {Applied and Environmental Microbiology}, year = {2012}, month = {Mar-10-2013}, pages = {7249 - 7257}, abstract = {Vibrio parahaemolyticus and Vibrio vulnificus, which are native to estuaries globally, are agents of seafood-borne or wound infections, both potentially fatal. Like all vibrios autochthonous to coastal regions, their abundance varies with changes in environmental parameters. Sea surface temperature (SST), sea surface height (SSH), and chlorophyll have been shown to be predictors of zooplankton and thus factors linked to vibrio populations. The contribution of salinity, conductivity, turbidity, and dissolved organic carbon to the incidence and distribution of Vibrio spp. has also been reported. Here, a multicoastal, 21-month study was conducted to determine relationships between environmental parameters and V. parahaemolyticus and V. vulnificus populations in water, oysters, and sediment in three coastal areas of the United States. Because ecologically unique sites were included in the study, it was possible to analyze individual parameters over wide ranges. Molecular methods were used to detect genes for thermolabile hemolysin (tlh), thermostable direct hemolysin (tdh), and tdh-related hemolysin (trh) as indicators of V. parahaemolyticus and the hemolysin gene vvhA for V. vulnificus. SST and suspended particulate matter were found to be strong predictors of total and potentially pathogenic V. parahaemolyticus and V. vulnificus. Other predictors included chlorophyll a, salinity, and dissolved organic carbon. For the ecologically unique sites included in the study, SST was confirmed as an effective predictor of annual variation in vibrio abundance, with other parameters explaining a portion of the variation not attributable to SST.}, issn = {0099-2240}, doi = {10.1128/AEM.01296-12}, url = {http://aem.asm.org/lookup/doi/10.1128/AEM.01296-12}, author = {Johnson, Crystal N. and Bowers, John C. and Griffitt, Kimberly J. and Molina, Vanessa and Clostio, Rachel W. and Pei, Shaofeng and Laws, Edward and Paranjpye, Rohinee N. and Strom, Mark S. and Chen, Arlene and Hasan, Nur A. and Huq, Anwar and Noriea, Nicholas F. and Grimes, D. Jay and Rita R Colwell} } @article {19612, title = {Efficient Verification of Web-content Searching Through Authenticated Web Crawlers}, journal = {Proc. VLDB Endow.}, volume = {5}, year = {2012}, month = {2012/06//}, pages = {920 - 931}, abstract = {We consider the problem of verifying the correctness and completeness of the result of a keyword search. We introduce the concept of an authenticated web crawler and present its design and prototype implementation. An authenticated web crawler is a trusted program that computes a specially-crafted signature over the web contents it visits. This signature enables (i) the verification of common Internet queries on web pages, such as conjunctive keyword searches---this guarantees that the output of a conjunctive keyword search is correct and complete; (ii) the verification of the content returned by such Internet queries---this guarantees that web data is authentic and has not been maliciously altered since the computation of the signature by the crawler. In our solution, the search engine returns a cryptographic proof of the query result. Both the proof size and the verification time are proportional only to the sizes of the query description and the query result, but do not depend on the number or sizes of the web pages over which the search is performed. As we experimentally demonstrate, the prototype implementation of our system provides a low communication overhead between the search engine and the user, and fast verification of the returned results by the user.}, isbn = {2150-8097}, url = {http://dl.acm.org/citation.cfm?id=2336664.2336666}, author = {Goodrich, Michael T. and Charalampos Papamanthou and Nguyen, Duy and Tamassia, Roberto and Lopes, Cristina Videira and Ohrimenko, Olga and Triandopoulos, Nikos} } @article {19126, title = {Exploiting sparseness in de novo genome assembly}, journal = {BMC bioinformatics}, volume = {13}, year = {2012}, month = {2012}, abstract = {The very large memory requirements for the construction of assembly graphs for de novo genome assembly limit current algorithms to super-computing environments.In this paper, we demonstrate that constructing a sparse assembly graph which stores only a small fraction of the observed k-mers as nodes and the links between these nodes allows the de novo assembly of even moderately-sized genomes (~500 M) on a typical laptop computer. We implement this sparse graph concept in a proof-of-principle software package, SparseAssembler, utilizing a new sparse k-mer graph structure evolved from the de Bruijn graph. We test our SparseAssembler with both simulated and real data, achieving ~90\% memory savings and retaining high assembly accuracy, without sacrificing speed in comparison to existing de novo assemblers. }, author = {Ye, C. and Ma, Z.S. and Cannon, C.H. and Pop, Mihai and Yu, D.W.} } @article {14330, title = {Fast track article: Disaggregated water sensing from a single, pressure-based sensor: An extended analysis of HydroSense using staged experiments}, journal = {Pervasive Mob. Comput.}, volume = {8}, year = {2012}, month = {2012/02//}, pages = {82 - 102}, abstract = {We present an extended analysis of our previous work on the HydroSense technology, which is a low-cost and easily installed single-point sensor of pressure for automatically disaggregating water usage activities in the home (Froehlich et al., 2009 [53]). We expand upon this work by providing a survey of existing and emerging water disaggregation techniques, a more comprehensive description of the theory of operation behind our approach, and an expanded analysis section that includes hot versus cold water valve usage classification and a comparison between two classification approaches: the template-based matching scheme used in Froehlich et al. (2009) [53] and a new stochastic approach using a Hidden Markov Model. We show that both are successful in identifying valve- and fixture-level water events with greater than 90\% accuracies. We conclude with a discussion of the limitations in our experimental methodology and open problems going forward.}, keywords = {Activity sensing, H1.2, H5.2, Infrastructure-mediated sensing, Sensors, Water usage}, isbn = {1574-1192}, doi = {10.1016/j.pmcj.2010.08.008}, url = {http://dx.doi.org/10.1016/j.pmcj.2010.08.008}, author = {Larson,Eric and Jon Froehlich and Campbell,Tim and Haggerty,Conor and Atlas,Les and Fogarty,James and Patel,Shwetak N.} } @article {19131, title = {A framework for human microbiome research}, journal = {Nature}, volume = {486}, year = {2012}, month = {2012}, pages = {215 - 221}, abstract = {A variety of microbial communities and their genes (the microbiome) exist throughout the human body, with fundamental roles in human health and disease. The National Institutes of Health (NIH)-funded Human Microbiome Project Consortium has established a population-scale framework to develop metagenomic protocols, resulting in a broad range of quality-controlled resources and data including standardized methods for creating, processing and interpreting distinct types of high-throughput metagenomic data available to the scientific community. Here we present resources from a population of 242 healthy adults sampled at 15 or 18 body sites up to three times, which have generated 5,177 microbial taxonomic profiles from 16S ribosomal RNA genes and over 3.5 terabases of metagenomic sequence so far. In parallel, approximately 800 reference strains isolated from the human body have been sequenced. Collectively, these data represent the largest resource describing the abundance and variety of the human microbiome, while providing a framework for current and future studies.}, author = {Meth{\'e}, B.A. and Nelson,K. E and Pop, Mihai and Creasy, H.H. and Giglio, M.G. and Huttenhower, C. and Gevers, D. and Petrosino, J.F. and Abubucker, S. and Badger, J.H.} } @article {16266, title = {GAGE: A Critical Evaluation of Genome Assemblies and Assembly Algorithms}, journal = {Genome Research}, volume = {22}, year = {2012}, month = {2012}, pages = {557 - 567}, abstract = {New sequencing technology has dramatically altered the landscape of whole-genome sequencing, allowing scientists to initiate numerous projects to decode the genomes of previously unsequenced organisms. The lowest-cost technology can generate deep coverage of most species, including mammals, in just a few days. The sequence data generated by one of these projects consist of millions or billions of short DNA sequences (reads) that range from 50 to 150 nt in length. These sequences must then be assembled de novo before most genome analyses can begin. Unfortunately, genome assembly remains a very difficult problem, made more difficult by shorter reads and unreliable long-range linking information. In this study, we evaluated several of the leading de novo assembly algorithms on four different short-read data sets, all generated by Illumina sequencers. Our results describe the relative performance of the different assemblers as well as other significant differences in assembly difficulty that appear to be inherent in the genomes themselves. Three overarching conclusions are apparent: first, that data quality, rather than the assembler itself, has a dramatic effect on the quality of an assembled genome; second, that the degree of contiguity of an assembly varies enormously among different assemblers and different genomes; and third, that the correctness of an assembly also varies widely and is not well correlated with statistics on contiguity. To enable others to replicate our results, all of our data and methods are freely available, as are all assemblers used in this study.}, doi = {10.1101/gr.131383.111}, url = {http://genome.cshlp.org/content/22/3/557}, author = {Salzberg,Steven L. and Phillippy,Adam M and Zimin,Aleksey and Puiu,Daniela and Magoc,Tanja and Koren,Sergey and Treangen,Todd J and Schatz,Michael C and Delcher,Arthur L. and Roberts,Michael and Mar{\c c}ais,Guillaume and Pop, Mihai and Yorke,James A.} } @article {16267, title = {Gene Prediction with Glimmer for Metagenomic Sequences Augmented by Classification and Clustering}, journal = {Nucleic Acids ResearchNucl. Acids Res.}, volume = {40}, year = {2012}, month = {2012/01/01/}, pages = {e9-e9 - e9-e9}, abstract = {Environmental shotgun sequencing (or metagenomics) is widely used to survey the communities of microbial organisms that live in many diverse ecosystems, such as the human body. Finding the protein-coding genes within the sequences is an important step for assessing the functional capacity of a metagenome. In this work, we developed a metagenomics gene prediction system Glimmer-MG that achieves significantly greater accuracy than previous systems via novel approaches to a number of important prediction subtasks. First, we introduce the use of phylogenetic classifications of the sequences to model parameterization. We also cluster the sequences, grouping together those that likely originated from the same organism. Analogous to iterative schemes that are useful for whole genomes, we retrain our models within each cluster on the initial gene predictions before making final predictions. Finally, we model both insertion/deletion and substitution sequencing errors using a different approach than previous software, allowing Glimmer-MG to change coding frame or pass through stop codons by predicting an error. In a comparison among multiple gene finding methods, Glimmer-MG makes the most sensitive and precise predictions on simulated and real metagenomes for all read lengths and error rates tested.}, isbn = {0305-1048, 1362-4962}, doi = {10.1093/nar/gkr1067}, url = {http://nar.oxfordjournals.org/content/40/1/e9}, author = {Kelley,David R and Liu,Bo and Delcher,Arthur L. and Pop, Mihai and Salzberg,Steven L.} } @article {20386, title = {Genomic diversity of 2010 Haitian cholera outbreak strains}, journal = {Proceedings of the National Academy of Sciences}, year = {2012}, month = {05/2012}, pages = {E2010 - E2017}, issn = {0027-8424}, doi = {10.1073/pnas.1207359109}, url = {http://www.pnas.org/cgi/doi/10.1073/pnas.1207359109}, author = {Hasan, N. A. and Choi, S. Y. and Eppinger, M. and Clark, P. W. and Chen, A. and Alam, M. and Haley, B. J. and Taviani, E. and Hine, E. and Su, Q. and Tallon, L. J. and Prosper, J. B. and Furth, K. and Hoq, M. M. and Li, H. and Fraser-Liggett, C. M. and Cravioto, A. and Huq, A. and Ravel, J. and Cebula, T. A. and Rita R Colwell} } @article {15917, title = {Gradient-based Image Recovery Methods from Incomplete Fourier Measurements}, journal = {IEEE Transactions on Image Processing}, volume = {PP}, year = {2012}, month = {2012///}, pages = {1 - 1}, abstract = {A major problem in imaging applications such as Magnetic Resonance Imaging (MRI) and Synthetic Aperture Radar (SAR) is the task of trying to reconstruct an image with the smallest possible set of Fourier samples, every single one of which has a potential time and/or power cost. The theory of Compressive Sensing (CS) points to ways of exploiting inherent sparsity in such images in order to achieve accurate recovery using sub- Nyquist sampling schemes. Traditional CS approaches to this problem consist of solving total-variation minimization programs with Fourier measurement constraints or other variations thereof. This paper takes a different approach: Since the horizontal and vertical differences of a medical image are each more sparse or compressible than the corresponding total-variational image, CS methods will be more successful in recovering these differences individually. We develop an algorithm called GradientRec that uses a CS algorithm to recover the horizontal and vertical gradients and then estimates the original image from these gradients. We present two methods of solving the latter inverse problem: one based on least squares optimization and the other based on a generalized Poisson solver. After a thorough derivation of our complete algorithm, we present the results of various experiments that compare the effectiveness of the proposed method against other leading methods.}, keywords = {Compressed sensing, Fourier transforms, Image coding, Image edge detection, Image reconstruction, L1{\textendash}minimization, minimization, Noise measurement, OPTIMIZATION, Poisson solver, Sparse recovery, Total variation, TV}, isbn = {1057-7149}, doi = {10.1109/TIP.2011.2159803}, author = {Patel, Vishal M. and Maleh,R. and Gilbert,A. C and Chellapa, Rama} } @article {19129, title = {Identification of Coli Surface Antigen 23, a Novel Adhesin of Enterotoxigenic Escherichia coli}, journal = {Infection and immunity}, volume = {80}, year = {2012}, month = {2012}, pages = {2791 - 2801}, abstract = {Enterotoxigenic Escherichia coli (ETEC) is an important cause of diarrhea, mainly in developing countries. Although there are 25 different ETEC adhesins described in strains affecting humans, between 15\% and 50\% of the clinical isolates from different geographical regions are negative for these adhesins, suggesting that additional unidentified adhesion determinants might be present. Here, we report the discovery of Coli Surface Antigen 23 (CS23), a novel adhesin expressed by an ETEC serogroup O4 strain (ETEC 1766a), which was negative for the previously known ETEC adhesins, albeit it has the ability to adhere to Caco-2 cells. CS23 is encoded by an 8.8-kb locus which contains 9 open reading frames (ORFs), 7 of them sharing significant identity with genes required for assembly of K88-related fimbriae. This gene locus, named aal (adhesion-associated locus), is required for the adhesion ability of ETEC 1766a and was able to confer this adhesive phenotype to a nonadherent E. coli HB101 strain. The CS23 major structural subunit, AalE, shares limited identity with known pilin proteins, and it is more closely related to the CS13 pilin protein CshE, carried by human ETEC strains. Our data indicate that CS23 is a new member of the diverse adhesin repertoire used by ETEC strains.}, author = {Del Canto, F. and Botkin, D.J. and Valenzuela, P. and Popov, V. and Ruiz-Perez, F. and Nataro, J.P. and Levine, M.M. and Stine, O.C. and Pop, Mihai and Torres, A.G. and others} } @article {19721, title = {InterPro in 2011: new developments in the family and domain prediction database.}, journal = {Nucleic Acids Res}, volume = {40}, year = {2012}, month = {2012 Jan}, pages = {D306-12}, abstract = {

InterPro (http://www.ebi.ac.uk/interpro/) is a database that integrates diverse information about protein families, domains and functional sites, and makes it freely available to the public via Web-based interfaces and services. Central to the database are diagnostic models, known as signatures, against which protein sequences can be searched to determine their potential function. InterPro has utility in the large-scale analysis of whole genomes and meta-genomes, as well as in characterizing individual protein sequences. Herein we give an overview of new developments in the database and its associated software since 2009, including updates to database content, curation processes and Web and programmatic interfaces.

}, keywords = {Databases, Protein, Protein Structure, Tertiary, Proteins, Sequence Analysis, Protein, software, Terminology as Topic, User-Computer Interface}, issn = {1362-4962}, doi = {10.1093/nar/gkr948}, author = {Hunter, Sarah and Jones, Philip and Mitchell, Alex and Apweiler, Rolf and Attwood, Teresa K and Bateman, Alex and Bernard, Thomas and Binns, David and Bork, Peer and Burge, Sarah and de Castro, Edouard and Coggill, Penny and Corbett, Matthew and Das, Ujjwal and Daugherty, Louise and Duquenne, Lauranne and Finn, Robert D and Fraser, Matthew and Gough, Julian and Haft, Daniel and Hulo, Nicolas and Kahn, Daniel and Kelly, Elizabeth and Letunic, Ivica and Lonsdale, David and Lopez, Rodrigo and Madera, Martin and Maslen, John and McAnulla, Craig and McDowall, Jennifer and McMenamin, Conor and Mi, Huaiyu and Mutowo-Muellenet, Prudence and Mulder, Nicola and Natale, Darren and Orengo, Christine and Pesseat, Sebastien and Punta, Marco and Quinn, Antony F and Rivoire, Catherine and Sangrador-Vegas, Amaia and Jeremy D Selengut and Sigrist, Christian J A and Scheremetjew, Maxim and Tate, John and Thimmajanarthanan, Manjulapramila and Thomas, Paul D and Wu, Cathy H and Yeats, Corin and Yong, Siew-Yit} } @conference {13585, title = {Local Segmentation of Touching Characters using Contour based Shape Decomposition}, booktitle = {Document Analysis Systems}, year = {2012}, month = {2012///}, abstract = {We propose a contour based shape decomposition approach that provides local segmentation of touching characters. The shape contour is linearized into edgelets and edgelets are merged into boundary fragments. Connection cost between boundary fragments is obtained by considering local smoothness, connection length and a stroke-level property Similar Stroke Rate. Samples of connections among boundary fragments are randomly generated and the one with the minimum global cost is selected to produce optimal segmentation of the shape. To obtain a binary segmentation using this approach, we make an iterative search for the parameters that yields two components on a shape. Experimental results on a number of synthetic shape images and the LTP dataset showed that this contour based shape decomposition technique is promising and it is effective on providing local segmentation of touching characters.}, author = {Kang,Le and David Doermann and Cao,Huiagu and Prasad,Rohit and Natarajan,Prem} } @article {20381, title = {Long-term effects of ocean warming on the prokaryotic community: evidence from the vibrios}, journal = {The ISME Journal}, volume = {6111114882511}, year = {2012}, month = {Jan-01-2012}, pages = {21 - 30}, abstract = {The long-term effects of ocean warming on prokaryotic communities are unknown because of lack of historical data. We overcame this gap by applying a retrospective molecular analysis to the bacterial community on formalin-fixed samples from the historical Continuous Plankton Recorder archive, which is one of the longest and most geographically extensive collections of marine biological samples in the world. We showed that during the last half century, ubiquitous marine bacteria of the Vibrio genus, including Vibrio cholerae, increased in dominance within the plankton-associated bacterial community of the North Sea, where an unprecedented increase in bathing infections related to these bacteria was recently reported. Among environmental variables, increased sea surface temperature explained 45\% of the variance in Vibrio data, supporting the view that ocean warming is favouring the spread of vibrios and may be the cause of the globally increasing trend in their associated diseases.}, issn = {1751-7362}, doi = {10.1038/ismej.2011.89}, url = {http://www.nature.com/articles/ismej201189}, author = {Vezzulli, Luigi and Brettar, Ingrid and Pezzati, Elisabetta and Reid, Philip C and Rita R Colwell and H{\"o}fle, Manfred G and Pruzzo, Carla} } @article {13829, title = {Modality and Negation in SIMT Use of Modality and Negation in Semantically-Informed Syntactic MT}, journal = {Computational Linguistics}, year = {2012}, month = {2012///}, pages = {1 - 48}, abstract = {This paper describes the resource- and system-building efforts of an eight-week Johns Hopkins University Human Language Technology Center of Excellence Summer Camp for Applied Language Exploration (SCALE-2009) on Semantically-Informed Machine Translation (SIMT). We describe a new modality/negation (MN) annotation scheme, the creation of a (publicly available) MN lexicon, and two automated MN taggers that we built using the annotation scheme and lexicon. Our annotation scheme isolates three components of modality and negation: a trigger (a word that conveys modality or negation), a target (an action associated with modality or negation) and a holder (an experiencer of modality). We describe how our MN lexicon was semi-automatically produced and we demonstrate that a structure-based MN tagger results in precision around 86\% (depending on genre) for tagging of a standard LDC data set.}, isbn = {0891-2017}, doi = {10.1162/COLI_a_00099}, url = {http://dx.doi.org/10.1162/COLI_a_00099}, author = {Baker,Kathryn and Bloodgood,Michael and Dorr, Bonnie J and Callison-Burch,Chris and Filardo,Nathaniel W. and Piatko,Christine and Levin,Lori and Miller,Scott} } @article {19611, title = {Preserving Link Privacy in Social Network Based Systems}, journal = {arXiv:1208.6189 [cs]}, year = {2012}, note = {Comment: 16 pages, 15 figures}, month = {2012/08/30/}, abstract = {A growing body of research leverages social network based trust relationships to improve the functionality of the system. However, these systems expose users{\textquoteright} trust relationships, which is considered sensitive information in today{\textquoteright}s society, to an adversary. In this work, we make the following contributions. First, we propose an algorithm that perturbs the structure of a social graph in order to provide link privacy, at the cost of slight reduction in the utility of the social graph. Second we define general metrics for characterizing the utility and privacy of perturbed graphs. Third, we evaluate the utility and privacy of our proposed algorithm using real world social graphs. Finally, we demonstrate the applicability of our perturbation algorithm on a broad range of secure systems, including Sybil defenses and secure routing.}, keywords = {Computer Science - Cryptography and Security, Computer Science - Social and Information Networks}, url = {http://arxiv.org/abs/1208.6189}, author = {Mittal, Prateek and Charalampos Papamanthou and Song, Dawn} } @article {19668, title = {Quantitative measurement of allele-specific protein expression in a diploid yeast hybrid by LC-MS}, journal = {Molecular Systems Biology}, volume = {8}, year = {2012}, month = {2012/01/01/}, abstract = {Understanding the genetic basis of gene regulatory variation is a key goal of evolutionary and medical genetics. Regulatory variation can act in an allele-specific manner (cis-acting) or it can affect both alleles of a gene (trans-acting). Differential allele-specific expression (ASE), in which the expression of one allele differs from another in a diploid, implies the presence of cis-acting regulatory variation. While microarrays and high-throughput sequencing have enabled genome-wide measurements of transcriptional ASE, methods for measurement of protein ASE (pASE) have lagged far behind. We describe a flexible, accurate, and scalable strategy for measurement of pASE by liquid chromatography-coupled mass spectrometry (LC-MS). We apply this approach to a hybrid between the yeast species Saccharomyces cerevisiae and Saccharomyces bayanus. Our results provide the first analysis of the relative contribution of cis-acting and trans-acting regulatory differences to protein expression divergence between yeast species.Synopsis A novel strategy for the quantitative measurement of allele-specific protein expression is used to infer the contributions of cis- and trans-acting factors influencing the divergence of protein levels between yeast species. Rigorous experimental controls and analyses confirm the accuracy of the new strategy for the quantitative measurement of allele-specific protein expression by high-throughput mass spectrometry.Analysis of allele-specific protein expression in an interspecies yeast hybrid and protein expression differences between species reveals that both cis-effects and trans-effects contribute to protein expression divergence between two yeast species, Saccharomyces cerevisiae and Saccharomyces bayanus. }, keywords = {allele specific, divergence, mass spectrometry, protein expression, proteomics}, url = {http://msb.embopress.org/content/8/1/602}, author = {Zia Khan and Bloom, Joshua S. and Amini, Sasan and Singh, Mona and Perlman, David H. and Caudy, Amy A. and Kruglyak, Leonid} } @article {16030, title = {Querying event sequences by exact match or similarity search: Design and empirical evaluation}, journal = {Interacting with Computers}, volume = {24}, year = {2012}, month = {2012/03//}, pages = {55 - 68}, abstract = {Specifying event sequence queries is challenging even for skilled computer professionals familiar with SQL. Most graphical user interfaces for database search use an exact match approach, which is often effective, but near misses may also be of interest. We describe a new similarity search interface, in which users specify a query by simply placing events on a blank timeline and retrieve a similarity-ranked list of results. Behind this user interface is a new similarity measure for event sequences which the users can customize by four decision criteria, enabling them to adjust the impact of missing, extra, or swapped events or the impact of time shifts. We describe a use case with Electronic Health Records based on our ongoing collaboration with hospital physicians. A controlled experiment with 18 participants compared exact match and similarity search interfaces. We report on the advantages and disadvantages of each interface and suggest a hybrid interface combining the best of both.}, keywords = {Event sequence, Similan, similarity measure, Similarity Search, temporal categorical data, Temporal query interface}, isbn = {0953-5438}, doi = {10.1016/j.intcom.2012.01.003}, url = {http://www.sciencedirect.com/science/article/pii/S0953543812000124}, author = {Wongsuphasawat,Krist and Plaisant, Catherine and Taieb-Maimon,Meirav and Shneiderman, Ben} } @article {20385, title = {Role of GbpA protein, an important virulence-related colonization factor, for Vibrio cholerae{\textquoteright}s survival in the aquatic environment}, journal = {Environmental Microbiology Reports}, year = {2012}, month = {Jan-08-2012}, pages = {439 - 445}, abstract = {Vibrio cholerae N-acetyl glucosamine-binding protein A (GbpA) is a chitin binding protein and a virulence factor involved in the colonization of human intestine. We investigated the distribution and genetic variations of gbpA in 488 V. cholerae strains of environmental and clinical origin, belonging to different serogroups and biotypes. We found that the gene is consistently present and highly conserved including an environmental V. cholerae-related strain of ancestral origin. The gene was also consistently expressed in a number of representative V. cholerae strains cultured in laboratory aquatic microcosms under conditions simulating those found in temperate marine environments. Functional analysis carried out on V. cholerae O1 El Tor N16961 showed that GbpA is not involved in adhesion to inorganic surfaces but promotes interaction with environmental biotic substrates (plankton and bivalve hepatopancreas cells) representing known marine reservoir or host for the bacterium. It is suggested that the ability of GbpA to colonize human intestinal cells most probably originated from its primary function in the aquatic environment.}, doi = {10.1111/j.1758-2229.2012.00356.x}, url = {http://doi.wiley.com/10.1111/j.1758-2229.2012.00356.x}, author = {Stauder, Monica and Huq, Anwar and Pezzati, Elisabetta and Grim, Christopher J. and Ramoino, Paola and Pane, Luigi and Rita R Colwell and Pruzzo, Carla and Vezzulli, Luigi} } @article {17929, title = {Speeding Up Particle Trajectory Simulations under Moving Force Fields using GPUs}, journal = {Journal of Computing and Information Science in Engineering}, year = {2012}, month = {2012///}, abstract = {In this paper, we introduce a GPU-based framework forsimulating particle trajectories under both static and dynamic force fields. By exploiting the highly parallel nature of the problem and making efficient use of the available hardware, our simulator exhibits a significant speedup over its CPU- based analog. We apply our framework to a specific experi- mental simulation: the computation of trapping probabilities associated with micron-sized silica beads in optical trapping workbenches. When evaluating large numbers of trajectories (4096), we see approximately a 356 times speedup of the GPU-based simulator over its CPU-based counterpart. }, author = {Patro,R. and Dickerson,J. P. and Bista,S. and Gupta,S.K. and Varshney, Amitabh} } @article {19220, title = {Systems and Methods for Energy Harvesting in a Contained Fluid Circuit}, year = {2012}, month = {2012}, publisher = {UNIVERSITY OF WASHINGTON THROUGH ITS CENTER FOR COMMERCIALIZATION, CAMPBELL, Tim, LARSON, Eric, COHN, Gabriel, ALCAIDE, Ramses, FROEHLICH, Jon, PATEL, Shwetak}, abstract = {Systems and methods for harvesting energy from a closed fluid circuit, such as a water pipe system in a building. An energy harvester can be installed at a point in a water circuit and can generate energy from pressure differentials caused when a valve is opened or closed at any other point in the water circuit that is in fluid communication with the energy harvester. The energy can be used to power, for example, a sensor and/or a transmitter., L{\textquoteright}invention concerne des syst{\`e}mes et des proc{\'e}d{\'e}s de collecte de l{\textquoteright}{\'e}nergie qui provient d{\textquoteright}un circuit de fluide ferm{\'e}, comme un syst{\`e}me de conduites d{\textquoteright}eau dans un immeuble. Un collecteur d{\textquoteright}{\'e}nergie peut {\^e}tre install{\'e} {\`a} un certain point au sein d{\textquoteright}un circuit d{\textquoteright}eau et peut g{\'e}n{\'e}rer de l{\textquoteright}{\'e}nergie {\`a} partir des diff{\'e}rentiels de pression provoqu{\'e}s lorsqu{\textquoteright}une soupape est ouverte ou ferm{\'e}e {\`a} n{\textquoteright}importe quel autre point au sein du circuit d{\textquoteright}eau qui est en communication fluidique avec le collecteur d{\textquoteright}{\'e}nergie. L{\textquoteright}{\'e}nergie peut {\^e}tre utilis{\'e}e pour alimenter, par exemple, un capteur et/ou un {\'e}metteur.}, keywords = {Systems and Methods for Energy Harvesting in a Contained Fluid Circuit}, isbn = {WO/2012/021551}, url = {http://patentscope.wipo.int/search/en/WO2012021551}, author = {Campbell,Tim and Larson,Eric and Cohn, Gabriel and Alcaide,Ramses and Jon Froehlich and Patel,Shwetak} } @article {14398, title = {TACI: Taxonomy-Aware Catalog Integration}, journal = {IEEE Transactions on Knowledge and Data Engineering (TKDE)}, year = {2012}, month = {2012///}, abstract = {A fundamental data integration task faced by online commercial portals and commerce search engines is the integration of products coming from multiple providers to their product catalogs. In this scenario, the commercial portal has its own taxonomy (the {\textquotedblleft}master taxonomy{\textquotedblright}), while each data provider organizes its products into a different taxonomy (the {\textquotedblleft}provider taxonomy{\textquotedblright}). In this paper, we consider the problem of categorizing products from the data providers into the master taxonomy, while making use of the provider taxonomy information. Our approach is based on a taxonomy-aware processing step that adjusts the results of a text-based classifier to ensure that products that are close together in the provider taxonomy remain close in the master taxonomy. We formulate this intuition as a structured prediction optimization problem. To the best of our knowledge, this is the first approach that leverages the structure of taxonomies in order to enhance catalog integration. We propose algorithms that are scalable and thus applicable to the large datasets that are typical on the Web. We evaluate our algorithms on real-world data and we show that taxonomy-aware classification provides a significant improvement over existing approaches.}, author = {Papadimitriou,P. and Tsaparas,P. and Fuxman,A. and Getoor, Lise} } @conference {16031, title = {Towards event sequence representation, reasoning and visualization for EHR data}, booktitle = {Proceedings of the 2nd ACM SIGHIT International Health Informatics Symposium}, series = {IHI {\textquoteright}12}, year = {2012}, month = {2012///}, pages = {801 - 806}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Efficient analysis of event sequences and the ability to answer time-related, clinically important questions can accelerate clinical research in several areas such as causality assessments, decision support systems, and retrospective studies. The Clinical Narrative Temporal Reasoning Ontology (CNTRO)-based system is designed for semantically representing, annotating, and inferring temporal relations and constraints for clinical events in Electronic Health Records (EHR) represented in both structured and unstructured ways. The LifeFlow system is designed to support an interactive exploration of event sequences using visualization techniques. The combination of the two systems will provide a comprehensive environment for users to visualize inferred temporal relationships from EHR data. This paper discusses our preliminary efforts on connecting the two systems and the benefits we envision from such an environment.}, keywords = {ehr, semantic web, temporal relation reasoning, time trend visualization}, isbn = {978-1-4503-0781-9}, doi = {10.1145/2110363.2110461}, url = {http://doi.acm.org/10.1145/2110363.2110461}, author = {Tao,Cui and Wongsuphasawat,Krist and Clark,Kimberly and Plaisant, Catherine and Shneiderman, Ben and Chute,Christopher G.} } @article {19618, title = {Verifying Search Results Over Web Collections}, journal = {arXiv:1204.5446 [cs]}, year = {2012}, month = {2012/04/24/}, abstract = {Searching accounts for one of the most frequently performed computations over the Internet as well as one of the most important applications of outsourced computing, producing results that critically affect users{\textquoteright} decision-making behaviors. As such, verifying the integrity of Internet-based searches over vast amounts of web contents is essential. We provide the first solution to this general security problem. We introduce the concept of an authenticated web crawler and present the design and prototype implementation of this new concept. An authenticated web crawler is a trusted program that computes a special "signature" $s$ of a collection of web contents it visits. Subject to this signature, web searches can be verified to be correct with respect to the integrity of their produced results. This signature also allows the verification of complicated queries on web pages, such as conjunctive keyword searches. In our solution, along with the web pages that satisfy any given search query, the search engine also returns a cryptographic proof. This proof, together with the signature $s$, enables any user to efficiently verify that no legitimate web pages are omitted from the result computed by the search engine, and that no pages that are non-conforming with the query are included in the result. An important property of our solution is that the proof size and the verification time both depend solely on the sizes of the query description and the query result, but not on the number or sizes of the web pages over which the search is performed. Our authentication protocols are based on standard Merkle trees and the more involved bilinear-map accumulators. As we experimentally demonstrate, the prototype implementation of our system gives a low communication overhead between the search engine and the user, and allows for fast verification of the returned results on the user side.}, keywords = {Computer Science - Cryptography and Security}, url = {http://arxiv.org/abs/1204.5446}, author = {Goodrich, Michael T. and Nguyen, Duy and Ohrimenko, Olga and Charalampos Papamanthou and Tamassia, Roberto and Triandopoulos, Nikos and Lopes, Cristina Videira} } @article {19127, title = {We are what we eat: how the diet of infants affects their gut microbiome}, journal = {Genome Biology}, volume = {13}, year = {2012}, month = {2012}, abstract = {Simultaneous analysis of the gut microbiome and host gene expression in infants reveals the impact of diet (breastfeeding versus formula) on host-microbiome interactions.}, author = {Pop, Mihai} } @article {19717, title = {Whole genome analysis of Leptospira licerasiae provides insight into leptospiral evolution and pathogenicity.}, journal = {PLoS Negl Trop Dis}, volume = {6}, year = {2012}, month = {2012}, pages = {e1853}, abstract = {

The whole genome analysis of two strains of the first intermediately pathogenic leptospiral species to be sequenced (Leptospira licerasiae strains VAR010 and MMD0835) provides insight into their pathogenic potential and deepens our understanding of leptospiral evolution. Comparative analysis of eight leptospiral genomes shows the existence of a core leptospiral genome comprising 1547 genes and 452 conserved genes restricted to infectious species (including L. licerasiae) that are likely to be pathogenicity-related. Comparisons of the functional content of the genomes suggests that L. licerasiae retains several proteins related to nitrogen, amino acid and carbohydrate metabolism which might help to explain why these Leptospira grow well in artificial media compared with pathogenic species. L. licerasiae strains VAR010(T) and MMD0835 possess two prophage elements. While one element is circular and shares homology with LE1 of L. biflexa, the second is cryptic and homologous to a previously identified but unnamed region in L. interrogans serovars Copenhageni and Lai. We also report a unique O-antigen locus in L. licerasiae comprised of a 6-gene cluster that is unexpectedly short compared with L. interrogans in which analogous regions may include >90 such genes. Sequence homology searches suggest that these genes were acquired by lateral gene transfer (LGT). Furthermore, seven putative genomic islands ranging in size from 5 to 36 kb are present also suggestive of antecedent LGT. How Leptospira become naturally competent remains to be determined, but considering the phylogenetic origins of the genes comprising the O-antigen cluster and other putative laterally transferred genes, L. licerasiae must be able to exchange genetic material with non-invasive environmental bacteria. The data presented here demonstrate that L. licerasiae is genetically more closely related to pathogenic than to saprophytic Leptospira and provide insight into the genomic bases for its infectiousness and its unique antigenic characteristics.

}, keywords = {DNA, Bacterial, Evolution, Molecular, Gene Transfer, Horizontal, Genome, Bacterial, Genomic islands, HUMANS, Leptospira, Molecular Sequence Data, Multigene Family, Prophages, Sequence Analysis, DNA, Virulence factors}, issn = {1935-2735}, doi = {10.1371/journal.pntd.0001853}, author = {Ricaldi, Jessica N and Fouts, Derrick E and Jeremy D Selengut and Harkins, Derek M and Patra, Kailash P and Moreno, Angelo and Lehmann, Jason S and Purushe, Janaki and Sanka, Ravi and Torres, Michael and Webster, Nicholas J and Vinetz, Joseph M and Matthias, Michael A} } @article {14592, title = {Accelerated evolution of 3{\textquoteright}avian FOXE1 genes, and thyroid and feather specific expression of chicken FoxE1}, journal = {BMC Evolutionary Biology}, volume = {11}, year = {2011}, month = {2011/10/15/}, pages = {302 - 302}, abstract = {The forkhead transcription factor gene E1 (FOXE1) plays an important role in regulation of thyroid development, palate formation and hair morphogenesis in mammals. However, avian FOXE1 genes have not been characterized and as such, codon evolution of FOXE1 orthologs in a broader evolutionary context of mammals and birds is not known.}, isbn = {1471-2148}, doi = {10.1186/1471-2148-11-302}, url = {http://www.biomedcentral.com/1471-2148/11/302}, author = {Yaklichkin,Sergey Yu and Darnell,Diana K and Pier,Maricela V and Antin,Parker B and Hannenhalli, Sridhar} } @article {16241, title = {Accurate and fast estimation of taxonomic profiles from metagenomic shotgun sequences}, journal = {BMC Genomics}, volume = {12}, year = {2011}, month = {2011/07/27/}, pages = {S4 - S4}, abstract = {A major goal of metagenomics is to characterize the microbial composition of an environment. The most popular approach relies on 16S rRNA sequencing, however this approach can generate biased estimates due to differences in the copy number of the gene between even closely related organisms, and due to PCR artifacts. The taxonomic composition can also be determined from metagenomic shotgun sequencing data by matching individual reads against a database of reference sequences. One major limitation of prior computational methods used for this purpose is the use of a universal classification threshold for all genes at all taxonomic levels.}, isbn = {1471-2164}, doi = {10.1186/1471-2164-12-S2-S4}, url = {http://www.biomedcentral.com/1471-2164/12/S2/S4}, author = {Liu,Bo and Gibbons,Theodore and Ghodsi,Mohammad and Treangen,Todd and Pop, Mihai} } @article {19650, title = {Accurate proteome-wide protein quantification from high-resolution 15N mass spectra}, journal = {Genome Biology}, volume = {12}, year = {2011}, month = {2011/12/19/}, abstract = {In quantitative mass spectrometry-based proteomics, the metabolic incorporation of a single source of 15N-labeled nitrogen has many advantages over using stable isotope-labeled amino acids. However, the lack of a robust computational framework for analyzing the resulting spectra has impeded wide use of this approach. We have addressed this challenge by introducing a new computational methodology for analyzing 15N spectra in which quantification is integrated with identification. Application of this method to an Escherichia coli growth transition reveals significant improvement in quantification accuracy over previous methods.PMID: 22182234 }, isbn = {1465-6906}, url = {http://genomebiology.com/2011/12/12/R122/abstract}, author = {Zia Khan and Amini, Sasan and Bloom, Joshua S. and Ruse, Cristian and Caudy, Amy A. and Kruglyak, Leonid and Singh, Mona and Perlman, David H. and Tavazoie, Saeed} } @article {15158, title = {On Achieving the" Best of Both Worlds" in Secure Multiparty Computation}, journal = {SIAM Journal on Computing}, volume = {40}, year = {2011}, month = {2011///}, pages = {122 - 122}, abstract = {Two settings are traditionally considered for secure multiparty computation, de-pending on whether or not a majority of the parties are assumed to be honest. Existing protocols that assume an honest majority provide {\textquotedblleft}full security{\textquotedblright} (and, in particular, guarantee output delivery and fairness) when this assumption holds, but are completely insecure if this assumption is violated. On the other hand, known protocols tolerating an arbitrary number of corruptions do not guarantee fairness or output delivery even if only a single party is dishonest. It is natural to wonder whether it is possible to achieve the {\textquotedblleft}best of both worlds{\textquotedblright}: namely, a single protocol that simultaneously achieves the best possible security in both the above settings. Here, we rule out this possibility (at least for general functionalities) and show some positive results regarding what can be achieved. }, author = {Ishai,Y. and Katz, Jonathan and Kushilevitz,E. and Lindell,Y. and Petrank,E.} } @conference {16042, title = {Active progress bars: facilitating the switch to temporary activities}, booktitle = {Proceedings of the 2011 annual conference extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {1963 - 1968}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {In this paper, we seek to find a better way of effective task management when a progress bar interrupts user{\textquoteright}s primary activity. We propose to augment progress bars with user controlled functionalities facilitating the switch to temporary activities. We detail a taxonomy of waiting period contexts and possible temporary tasks, then report on 5 participatory design, and a follow-up survey of 96 respondents. Finally we describe an early prototype of active progress bars, and report on initial use.}, keywords = {Frustration, participatory design, progress bars, task switching}, isbn = {978-1-4503-0268-5}, doi = {10.1145/1979742.1979883}, url = {http://doi.acm.org/10.1145/1979742.1979883}, author = {Hurter,Christophe and Girouard,Audrey and Riche,Nathalie and Plaisant, Catherine} } @article {16044, title = {Analyzing Incident Management Event Sequences with Interactive Visualization}, journal = {Transportation Research Board 90th Annual Meeting Compendium of Papers}, year = {2011}, month = {2011///}, abstract = {While traditional safety and incident analysis has mostly focused on incident attributesdata, such as the location and time of the incident, there are other aspects in incident response that are temporal in nature and are more difficult to analyze. We describe a visual analytics tool for temporal data exploration, called LifeFlow, used for the analysis of incident response data. LifeFlow provides user-controlled overviews of event sequences (e.g. notification, arrival, clearance etc). It allows analysts to interactively explore temporal patterns, find anomalies in sequences and compare management practices. This type of analysis can potentially lead to process improvements and save human lives. We used NCHRP traffic incident data with more than 200,000 incidents are reported by 8 different agencies in a period of about 28 months. Our experience suggest that even non expert analysts can spot many anomalies in the data using the LifeFlow overviews, and are able to rapidly ask many questions and find differences between agencies. }, author = {Guerra G{\'o}mez,J. and Wongsuphasawat,K. and Wang,T. D and Pack,M. and Plaisant, Catherine} } @article {17745, title = {Annotated probabilistic temporal logic}, journal = {ACM Trans. Comput. Logic}, volume = {12}, year = {2011}, month = {2011/01//}, pages = {14:1{\textendash}14:44 - 14:1{\textendash}14:44}, abstract = {The semantics of most logics of time and probability is given via a probability distribution over threads, where a thread is a structure specifying what will be true at different points in time (in the future). When assessing the probabilities of statements such as {\textquotedblleft}Event a will occur within 5 units of time of event b,{\textquotedblright} there are many different semantics possible, even when assessing the truth of this statement within a single thread. We introduce the syntax of annotated probabilistic temporal (APT) logic programs and axiomatically introduce the key notion of a frequency function (for the first time) to capture different types of intrathread reasoning, and then provide a semantics for intrathread and interthread reasoning in APT logic programs parameterized by such frequency functions. We develop a comprehensive set of complexity results for consistency checking and entailment in APT logic programs, together with sound and complete algorithms to check consistency and entailment. The basic algorithms use linear programming, but we then show how to substantially and correctly reduce the sizes of these linear programs to yield better computational properties. We describe a real world application we are developing using APT logic programs.}, keywords = {frequency functions, imprecise probabilities, Probabilistic and temporal reasoning, threads}, isbn = {1529-3785}, doi = {10.1145/1877714.1877720}, url = {http://doi.acm.org/10.1145/1877714.1877720}, author = {Shakarian,Paulo and Parker,Austin and Simari,Gerardo and V.S. Subrahmanian} } @conference {19269, title = {Applying graphics processor acceleration in a software defined radio prototyping environment}, booktitle = {2011 22nd IEEE International Symposium on Rapid System Prototyping (RSP)}, year = {2011}, month = {2011}, pages = {67 - 73}, abstract = {With higher bandwidth requirements and more complex protocols, software defined radio (SDR) has ever growing computational demands. SDR applications have different levels of parallelism that can be exploited on multicore platforms, but design and programming difficulties have inhibited the adoption of specialized multicore platforms like graphics processors (GPUs). In this work we propose a new design flow that augments a popular existing SDR development environment (GNU Radio), with a dataflow foundation and a stand-alone GPU accelerated library. The approach gives an SDR developer the ability to prototype a GPU accelerated application and explore its design space fast and effectively. We demonstrate this design flow on a standard SDR benchmark and show that deciding how to utilize a GPU can be non-trivial for even relatively simple applications.}, keywords = {Acceleration, coprocessors, dataflow foundation, GNU radio, Graphics processing unit, graphics processor acceleration, Kernel, Libraries, multicore platforms, Multicore processing, PARALLEL PROCESSING, Pipelines, Protocols, software defined radio prototyping environment, software radio, stand-alone GPU accelerated library}, author = {Plishker,W. and Zaki, G.F. and Bhattacharyya, Shuvra S. and Clancy, C. and Kuykendall, J.} } @conference {17564, title = {Approximation algorithms for throughput maximization in wireless networks with delay constraints}, booktitle = {2011 Proceedings IEEE INFOCOM}, year = {2011}, month = {2011/04/10/15}, pages = {1116 - 1124}, publisher = {IEEE}, organization = {IEEE}, abstract = {We study the problem of throughput maximization in multi-hop wireless networks with end-to-end delay constraints for each session. This problem has received much attention starting with the work of Grossglauser and Tse (2002), and it has been shown that there is a significant tradeoff between the end-to-end delays and the total achievable rate. We develop algorithms to compute such tradeoffs with provable performance guarantees for arbitrary instances, with general interference models. Given a target delay-bound Δ(c) for each session c, our algorithm gives a stable flow vector with a total throughput within a factor of O (logΔm/log log Δm) of the maximum, so that the per-session (end-to-end) delay is O ((logΔm/log log Δm Δ(c))2), where Δm = maxc{Δ(c)}; note that these bounds depend only on the delays, and not on the network size, and this is the first such result, to our knowledge.}, keywords = {Approximation algorithms, Approximation methods, approximation theory, Delay, delay constraints, delays, general interference model, Interference, multihop wireless networks, optimisation, Optimized production technology, radio networks, radiofrequency interference, target delay bound, Throughput, throughput maximization, Wireless networks}, isbn = {978-1-4244-9919-9}, doi = {10.1109/INFCOM.2011.5934887}, author = {Guanhong Pei and Anil Kumar,V. S and Parthasarathy,S. and Srinivasan, Aravind} } @article {18547, title = {Architecting for innovation}, journal = {SIGCOMM Comput. Commun. Rev.}, volume = {41}, year = {2011}, month = {2011///}, pages = {24 - 36}, abstract = {We argue that the biggest problem with the current Internet architecture is not a particular functional deficiency, but its inability to accommodate innovation. To address this problem we propose a minimal architectural "framework" in which comprehensive architectures can reside. The proposed Framework for Internet Innovation (FII) --- which is derived from the simple observation that network interfaces should be extensible and abstract --- allows for a diversity of architectures to coexist, communicate, and evolve. We demonstrate FII{\textquoteright}s ability to accommodate diversity and evolution with a detailed examination of how information flows through the architecture and with a skeleton implementation of the relevant interfaces.}, keywords = {diversity, Evolution, innovation, internet architecture}, isbn = {0146-4833}, doi = {10.1145/2002250.2002256}, url = {http://doi.acm.org/10.1145/2002250.2002256}, author = {Koponen,Teemu and Shenker,Scott and Balakrishnan,Hari and Feamster, Nick and Ganichev,Igor and Ghodsi,Ali and Godfrey,P. Brighten and McKeown,Nick and Parulkar,Guru and Raghavan,Barath and Rexford,Jennifer and Arianfar,Somaya and Kuptsov,Dmitriy} } @article {16246, title = {Assessing the benefits of using mate-pairs to resolve repeats in de novo short-read prokaryotic assemblies}, journal = {BMC Bioinformatics}, volume = {12}, year = {2011}, month = {2011/04/13/}, pages = {95 - 95}, abstract = {Next-generation sequencing technologies allow genomes to be sequenced more quickly and less expensively than ever before. However, as sequencing technology has improved, the difficulty of de novo genome assembly has increased, due in large part to the shorter reads generated by the new technologies. The use of mated sequences (referred to as mate-pairs) is a standard means of disambiguating assemblies to obtain a more complete picture of the genome without resorting to manual finishing. Here, we examine the effectiveness of mate-pair information in resolving repeated sequences in the DNA (a paramount issue to overcome). While it has been empirically accepted that mate-pairs improve assemblies, and a variety of assemblers use mate-pairs in the context of repeat resolution, the effectiveness of mate-pairs in this context has not been systematically evaluated in previous literature.}, isbn = {1471-2105}, doi = {10.1186/1471-2105-12-95}, url = {http://www.biomedcentral.com/1471-2105/12/95}, author = {Wetzel,Joshua and Kingsford, Carl and Pop, Mihai} } @conference {13069, title = {AVSS 2011 demo session: A large-scale benchmark dataset for event recognition in surveillance video}, booktitle = {Advanced Video and Signal-Based Surveillance (AVSS), 2011 8th IEEE International Conference on}, year = {2011}, month = {2011/09/30/2}, pages = {527 - 528}, abstract = {We introduce to the surveillance community the VIRAT Video Dataset[1], which is a new large-scale surveillance video dataset designed to assess the performance of event recognition algorithms in realistic scenes1.}, doi = {10.1109/AVSS.2011.6027400}, author = {Oh,Sangmin and Hoogs,Anthony and Perera,Amitha and Cuntoor,Naresh and Chen,Chia-Chih and Lee,Jong Taek and Mukherjee,Saurajit and Aggarwal, JK and Lee,Hyungtae and Davis, Larry S. and Swears,Eran and Wang,Xiaoyang and Ji,Qiang and Reddy,Kishore and Shah,Mubarak and Vondrick,Carl and Pirsiavash,Hamed and Ramanan,Deva and Yuen,Jenny and Torralba,Antonio and Song,Bi and Fong,Anesco and Roy-Chowdhury,Amit and Desai,Mita} } @article {16247, title = {Bacillus Anthracis Comparative Genome Analysis in Support of the Amerithrax Investigation}, journal = {Proceedings of the National Academy of SciencesPNAS}, volume = {108}, year = {2011}, month = {2011/03/22/}, pages = {5027 - 5032}, abstract = {Before the anthrax letter attacks of 2001, the developing field of microbial forensics relied on microbial genotyping schemes based on a small portion of a genome sequence. Amerithrax, the investigation into the anthrax letter attacks, applied high-resolution whole-genome sequencing and comparative genomics to identify key genetic features of the letters{\textquoteright} Bacillus anthracis Ames strain. During systematic microbiological analysis of the spore material from the letters, we identified a number of morphological variants based on phenotypic characteristics and the ability to sporulate. The genomes of these morphological variants were sequenced and compared with that of the B. anthracis Ames ancestor, the progenitor of all B. anthracis Ames strains. Through comparative genomics, we identified four distinct loci with verifiable genetic mutations. Three of the four mutations could be directly linked to sporulation pathways in B. anthracis and more specifically to the regulation of the phosphorylation state of Spo0F, a key regulatory protein in the initiation of the sporulation cascade, thus linking phenotype to genotype. None of these variant genotypes were identified in single-colony environmental B. anthracis Ames isolates associated with the investigation. These genotypes were identified only in B. anthracis morphotypes isolated from the letters, indicating that the variants were not prevalent in the environment, not even the environments associated with the investigation. This study demonstrates the forensic value of systematic microbiological analysis combined with whole-genome sequencing and comparative genomics.}, isbn = {0027-8424, 1091-6490}, doi = {10.1073/pnas.1016657108}, url = {http://www.pnas.org/content/108/12/5027}, author = {Rasko,David A and Worsham,Patricia L and Abshire,Terry G and Stanley,Scott T and Bannan,Jason D and Wilson,Mark R and Langham,Richard J and Decker,R. Scott and Jiang,Lingxia and Read,Timothy D. and Phillippy,Adam M and Salzberg,Steven L. and Pop, Mihai and Van Ert,Matthew N and Kenefic,Leo J and Keim,Paul S and Fraser-Liggett,Claire M and Ravel,Jacques} } @article {19114, title = {Bambus 2: scaffolding metagenomes}, journal = {Bioinformatics}, volume = {27}, year = {2011}, month = {2011}, pages = {2964 - 2971}, author = {Koren, S. and Treangen, T.J. and Pop, Mihai} } @conference {18551, title = {Boosting the scalability of botnet detection using adaptive traffic sampling}, booktitle = {Proceedings of the 6th ACM Symposium on Information, Computer and Communications Security}, series = {ASIACCS {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {124 - 134}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Botnets pose a serious threat to the health of the Internet. Most current network-based botnet detection systems require deep packet inspection (DPI) to detect bots. Because DPI is a computational costly process, such detection systems cannot handle large volumes of traffic typical of large enterprise and ISP networks. In this paper we propose a system that aims to efficiently and effectively identify a small number of suspicious hosts that are likely bots. Their traffic can then be forwarded to DPI-based botnet detection systems for fine-grained inspection and accurate botnet detection. By using a novel adaptive packet sampling algorithm and a scalable spatial-temporal flow correlation approach, our system is able to substantially reduce the volume of network traffic that goes through DPI, thereby boosting the scalability of existing botnet detection systems. We implemented a proof-of-concept version of our system, and evaluated it using real-world legitimate and botnet-related network traces. Our experimental results are very promising and suggest that our approach can enable the deployment of botnet-detection systems in large, high-speed networks.}, keywords = {adaptive sampling, botnet, intrusion detection, NETWORK SECURITY}, isbn = {978-1-4503-0564-8}, doi = {10.1145/1966913.1966930}, url = {http://doi.acm.org/10.1145/1966913.1966930}, author = {Zhang,Junjie and Luo,Xiapu and Perdisci,Roberto and Gu,Guofei and Lee,Wenke and Feamster, Nick} } @article {18510, title = {Broadband internet performance: A view from the gateway}, journal = {SIGCOMM-Computer Communication Review}, volume = {41}, year = {2011}, month = {2011///}, pages = {134 - 134}, abstract = {We present the first study of network access link performance mea-sured directly from home gateway devices. Policymakers, ISPs, and users are increasingly interested in studying the performance of Internet access links. Because of many confounding factors in a home network or on end hosts, however, thoroughly understanding access network performance requires deploying measurement in- frastructure in users{\textquoteright} homes as gateway devices. In conjunction with the Federal Communication Commission{\textquoteright}s study of broad- band Internet access in the United States, we study the throughput and latency of network access links using longitudinal measure- ments from nearly 4,000 gateway devices across 8 ISPs from a de- ployment of over 4,200 devices. We study the performance users achieve and how various factors ranging from the user{\textquoteright}s choice of modem to the ISP{\textquoteright}s traffic shaping policies can affect performance. Our study yields many important findings about the characteristics of existing access networks. Our findings also provide insights into the ways that access network performance should be measured and presented to users, which can help inform ongoing broader efforts to benchmark the performance of access networks. }, author = {Sundaresan,S. and de Donato,W. and Feamster, Nick and Teixeira,R. and Crawford,S. and Pescap{\`e},A.} } @article {12984, title = {Can Deliberately Incomplete Gene Sample Augmentation Improve a Phylogeny Estimate for the Advanced Moths and Butterflies (Hexapoda: Lepidoptera)?}, journal = {Systematic BiologySyst Biol}, volume = {60}, year = {2011}, month = {2011/12/01/}, pages = {782 - 796}, abstract = {This paper addresses the question of whether one can economically improve the robustness of a molecular phylogeny estimate by increasing gene sampling in only a subset of taxa, without having the analysis invalidated by artifacts arising from large blocks of missing data. Our case study stems from an ongoing effort to resolve poorly understood deeper relationships in the large clade Ditrysia ( > 150,000 species) of the insect order Lepidoptera (butterflies and moths). Seeking to remedy the overall weak support for deeper divergences in an initial study based on five nuclear genes (6.6 kb) in 123 exemplars, we nearly tripled the total gene sample (to 26 genes, 18.4 kb) but only in a third (41) of the taxa. The resulting partially augmented data matrix (45\% intentionally missing data) consistently increased bootstrap support for groupings previously identified in the five-gene (nearly) complete matrix, while introducing no contradictory groupings of the kind that missing data have been predicted to produce. Our results add to growing evidence that data sets differing substantially in gene and taxon sampling can often be safely and profitably combined. The strongest overall support for nodes above the family level came from including all nucleotide changes, while partitioning sites into sets undergoing mostly nonsynonymous versus mostly synonymous change. In contrast, support for the deepest node for which any persuasive molecular evidence has yet emerged (78{\textendash}85\% bootstrap) was weak or nonexistent unless synonymous change was entirely excluded, a result plausibly attributed to compositional heterogeneity. This node (Gelechioidea + Apoditrysia), tentatively proposed by previous authors on the basis of four morphological synapomorphies, is the first major subset of ditrysian superfamilies to receive strong statistical support in any phylogenetic study. A {\textquotedblleft}more-genes-only{\textquotedblright} data set (41 taxa{\texttimes}26 genes) also gave strong signal for a second deep grouping (Macrolepidoptera) that was obscured, but not strongly contradicted, in more taxon-rich analyses.}, keywords = {Ditrysia, gene sampling, Hexapoda, Lepidoptera, missing data, molecular phylogenetics, nuclear genes, taxon sampling}, isbn = {1063-5157, 1076-836X}, doi = {10.1093/sysbio/syr079}, url = {http://sysbio.oxfordjournals.org/content/60/6/782}, author = {Cho,Soowon and Zwick,Andreas and Regier,Jerome C and Mitter,Charles and Cummings, Michael P. and Yao,Jianxiu and Du,Zaile and Zhao,Hong and Kawahara,Akito Y and Weller,Susan and Davis,Donald R and Baixeras,Joaquin and Brown,John W and Parr,Cynthia} } @article {17571, title = {Capacity of wireless networks under SINR interference constraints}, journal = {Wireless Networks}, volume = {17}, year = {2011}, month = {2011///}, pages = {1605 - 1624}, abstract = {A fundamental problem in wireless networks is to estimate their throughput capacity{\textemdash}given a set of wireless nodes and a set of connections, what is the maximum rate at which data can be sent on these connections. Most of the research in this direction has focused either on random distributions of points, or has assumed simple graph-based models for wireless interference. In this paper, we study the capacity estimation problem using a realistic Signal to Interference Plus Noise Ratio (SINR) model for interference, on arbitrary wireless networks without any assumptions on node distributions. The problem becomes much more challenging for this setting, because of the non-locality of the SINR model. Recent work by Moscibroda et al. (IEEE INFOCOM 2006, ACM MobiHoc 2006) has shown that the throughput achieved by using SINR models can differ significantly from that obtained by using graph-based models. In this work, we develop polynomial time algorithms to provably approximate the throughput capacity of wireless network under the SINR model.}, isbn = {1022-0038}, url = {http://dx.doi.org/10.1007/s11276-011-0367-2}, author = {Chafekar,Deepti and Anil Kumar,V. and Marathe,Madhav and Parthasarathy,Srinivasan and Srinivasan, Aravind} } @article {19369, title = {Cell cycle dependent TN-C promoter activity determined by live cell imaging.}, journal = {Cytometry. Part A : the journal of the International Society for Analytical Cytology}, volume = {79}, year = {2011}, month = {2011 Mar}, pages = {192-202}, abstract = {The extracellular matrix protein tenascin-C plays a critical role in development, wound healing, and cancer progression, but how it is controlled and how it exerts its physiological responses remain unclear. By quantifying the behavior of live cells with phase contrast and fluorescence microscopy, the dynamic regulation of TN-C promoter activity is examined. We employ an NIH 3T3 cell line stably transfected with the TN-C promoter ligated to the gene sequence for destabilized green fluorescent protein (GFP). Fully automated image analysis routines, validated by comparison with data derived from manual segmentation and tracking of single cells, are used to quantify changes in the cellular GFP in hundreds of individual cells throughout their cell cycle during live cell imaging experiments lasting 62 h. We find that individual cells vary substantially in their expression patterns over the cell cycle, but that on average TN-C promoter activity increases during the last 40\% of the cell cycle. We also find that the increase in promoter activity is proportional to the activity earlier in the cell cycle. This work illustrates the application of live cell microscopy and automated image analysis of a promoter-driven GFP reporter cell line to identify subtle gene regulatory mechanisms that are difficult to uncover using population averaged measurements.}, keywords = {Animals, cell cycle, Gene Expression Regulation, Green Fluorescent Proteins, Image Processing, Computer-Assisted, Mice, Microscopy, Fluorescence, Microscopy, Phase-Contrast, NIH 3T3 Cells, Promoter Regions, Genetic, Tenascin}, issn = {1552-4930}, doi = {10.1002/cyto.a.21028}, author = {Halter, Michael and Sisan, Daniel R and Chalfoun, Joe and Stottrup, Benjamin L and Cardone, Antonio and Dima,Alden A. and Tona, Alessandro and Plant,Anne L. and Elliott, John T} } @conference {12447, title = {Component-based restoration of speckled images}, booktitle = {2011 18th IEEE International Conference on Image Processing (ICIP)}, year = {2011}, month = {2011/09/11/14}, pages = {2797 - 2800}, publisher = {IEEE}, organization = {IEEE}, abstract = {Many coherent imaging modalities are often characterized by a multiplicative noise, known as speckle which often makes the interpretation of data difficult. In this paper, we present a speckle reduction algorithm based on separating the structure and texture components of SAR images. An iterative algorithm based on surrogate functionals is presented that solves the component optimization formulation. Experiments indicate this proposed method performs favorably compared to state-of-the-art speckle reduction methods.}, keywords = {coherent imaging modalities, component optimization formulation, component-based restoration, Dictionaries, image restoration, iterative algorithm, iterative methods, multiplicative noise, NOISE, optimisation, radar imaging, SAR images, Speckle, speckle reduction algorithm, speckled images, structure components, surrogate functionals, synthetic aperture radar, texture components, transforms, TV}, isbn = {978-1-4577-1304-0}, doi = {10.1109/ICIP.2011.6116252}, author = {Patel, Vishal M. and Easley,G. R and Chellapa, Rama} } @article {18796, title = {A computational framework for authoring and searching product design specifications}, journal = {Advanced Engineering Informatics}, volume = {25}, year = {2011}, month = {2011/08//}, pages = {516 - 534}, abstract = {The development of product design specifications (PDS) is an important part of the product development process. Incompleteness, ambiguity, or inconsistency in the PDS can lead to problems during the design process and may require unnecessary design iterations. This generally results in increased design time and cost. Currently, in many organizations, PDS are written using word processors. Since documents written by different authors can be inconsistent in style and word choice, it is difficult to automatically search for specific requirements. Moreover, this approach does not allow the possibility of automated design verification and validation against the design requirements and specifications.In this paper, we present a computational framework and a software tool based on this framework for writing, annotating, and searching computer-interpretable PDS. Our approach allows authors to write requirement statements in natural language to be consistent with the existing authoring practice. However, using mathematical expressions, keywords from predefined taxonomies, and other metadata the author of PDS can then annotate different parts of the requirement statements. This approach provides unambiguous meaning to the information contained in PDS, and helps to eliminate mistakes later in the process when designers must interpret requirements. Our approach also enables users to construct a new PDS document from the results of the search for requirements of similar devices and in similar contexts. This capability speeds up the process of creating PDS and helps authors write more detailed documents by utilizing previous, well written PDS documents. Our approach also enables checking for internal inconsistencies in the requirement statements. }, keywords = {Engineering design, Product design specifications, Requirements engineering}, isbn = {1474-0346}, doi = {10.1016/j.aei.2011.02.001}, url = {http://www.sciencedirect.com/science/article/pii/S1474034611000061}, author = {Weissman,Alexander and Petrov,Martin and Gupta, Satyandra K.} } @article {19266, title = {Dataflow-based Design and Implementation of Image Processing Applications}, year = {2011}, month = {2011}, abstract = {Dataflow is a well known computational model and is widely used forexpressing the functionality of digital signal processing (DSP) applications, such as audio and video data stream processing, digital communications, and image processing. These applications usually require real-time processing capabilities and have critical performance constraints. Dataflow provides a formal mechanism for describing specifications of DSP applications, imposes minimal data-dependency constraints in specifications, and is effective in exposing and exploiting task or data level parallelism for achieving high performance implementations. To demonstrate dataflow-based design methods in a manner that is concrete and easily adapted to different platforms and back-end design tools, we present in this report a number of case studies based on the lightweight dataflow (LWDF) programming methodology. LWDF is designed as a "minimalistic" approach for integrating coarse grain dataflow programming structures into arbitrary simulation- or platform-oriented languages, such as C, C++, CUDA, MATLAB, SystemC, Verilog, and VHDL. In particular, LWDF requires minimal dependence on specialized tools or libraries. This feature --- together with the rigorous adherence to dataflow principles throughout the LWDF design framework --- allows designers to integrate and experiment with dataflow modeling approaches relatively quickly and flexibly into existing design methodologies and processes. }, keywords = {Technical Report}, isbn = {UMIACS-TR-2011-11}, doi = {Technical Report}, url = {http://drum.lib.umd.edu//handle/1903/11403}, author = {Chung-Ching Shen and Plishker,William and Bhattacharyya, Shuvra S.} } @article {19273, title = {Dataflow-Based Implementation of Layered Sensing Applications}, year = {2011}, month = {2011}, abstract = {This report describes a new dataflow-based technology and associated design tools for high-productivity design, analysis, and optimization of layered sensing software for signal processing systems. Our approach provides novel capabilities, based on the principles of task-level dataflow analysis, for exploring and optimizing interactions across application behavior; operational context; high performance embedded processing platforms, and implementation constraints. Particularly, we introduce and deliver novel software tools, called the targeted dataflow interchange format (TDIF) and Dataflow Interchange Format Markup Language (DIFML), for design and implementation of layered sensing and signal processing systems. The TDIF-CUDA (Compute Unified Device Architecture) environment is a graphics processing unit targeted software synthesis tool that provides a unique integration of dynamic dataflow modeling; retargetable actor construction; software synthesis; and instrumentation-based schedule evaluation and tuning. The DIFML package is a software package for the DIFML format, which is an Extensible Markup Language (XML)-based format for exchanging information between DIF and other tools.}, keywords = {*COMPUTER AIDED DESIGN, *DATA FUSION, *DATAFLOW, *LAYERS, *SOFTWARE TOOLS, COMPUTER PROGRAMMING AND SOFTWARE, DETECTION, High performance computing, LAYERED SENSING, OPTIMIZATION, Signal processing, synthesis, T2KA}, author = {Bhattacharyya, Shuvra S. and Chung-Ching Shen and Plishker,William and Sane, Nimish and Wu, Hsiang-Huang and Gu, Ruirui} } @conference {19260, title = {Design methods for Wireless Sensor Network Building Energy Monitoring Systems}, booktitle = {2011 IEEE 36th Conference on Local Computer Networks (LCN)}, year = {2011}, month = {2011}, pages = {974 - 981}, abstract = {In this paper, we present a new energy analysis method for evaluating energy consumption of embedded sensor nodes at the application level and the network level. Then we apply the proposed energy analysis method to develop new energy management schemes in order to maximize lifetime for Wireless Sensor Network Building Energy Monitoring Systems (WSNBEMS). At the application level, we develop a new design approach that uses dataflow techniques to model the application-level interfacing behavior between the processor and sensors on an embedded sensor node. At the network level, we analyze the energy consumption of the IEEE 802.15.4 MAC functionality. Based on our techniques for modeling and energy analysis, we have implemented an optimized WSNBEMS for a real building, and validated our energy analysis techniques through measurements on this implementation. The performance of our implementation is also evaluated in terms of monitoring accuracy and energy consumption savings. We have demonstrated that by applying the proposed scheme, system lifetime can be improved significantly without affecting monitoring accuracy.}, keywords = {Analytical models, application-level interfacing behavior, building energy monitoring system, Buildings, dataflow technique, embedded sensor node, energy analysis method, Energy consumption, energy management systems, Energy resolution, IEEE 802.15.4 MAC functionality, Monitoring, OPTIMIZATION, wireless sensor network, Wireless sensor networks, WSNBEMS, Zigbee}, author = {Cho, Inkeun and Chung-Ching Shen and Potbhare, S. and Bhattacharyya, Shuvra S. and Goldsman,N.} } @conference {19263, title = {A design tool for efficient mapping of multimedia applications onto heterogeneous platforms}, booktitle = {2011 IEEE International Conference on Multimedia and Expo (ICME)}, year = {2011}, month = {2011}, pages = {1 - 6}, abstract = {Development of multimedia systems on heterogeneous platforms is a challenging task with existing design tools due to a lack of rigorous integration between high level abstract modeling, and low level synthesis and analysis. In this paper, we present a new dataflow-based design tool, called the targeted dataflow interchange format (TDIF), for design, analysis, and implementation of embedded software for multimedia systems. Our approach provides novel capabilities, based on the principles of task-level dataflow analysis, for exploring and optimizing interactions across application behavior; operational context; heterogeneous platforms, including high performance embedded processing architectures; and implementation constraints.}, keywords = {Dataflow graphs, design tools, embedded signal processing, software synthesis}, author = {Chung-Ching Shen and Wu, Hsiang-Huang and Sane, N. and Plishker,W. and Bhattacharyya, Shuvra S.} } @article {14317, title = {Disaggregated End-Use Energy Sensing for the Smart Grid}, journal = {IEEE Pervasive Computing}, volume = {10}, year = {2011}, month = {2011/03//Jan}, pages = {28 - 39}, abstract = {This article surveys existing and emerging disaggregation techniques for energy-consumption data and highlights signal features that might be used to sense disaggregated data in an easily installed and cost-effective manner.}, keywords = {Calibration, disaggregated end-use energy sensing, Disaggregated energy sensing, disaggregation data techniques, Electricity, Energy consumption, Energy efficiency, energy-consumption data, Gas, Home appliances, Sensors, Smart grid, Smart grids, smart power grids, Sustainability, Water}, isbn = {1536-1268}, doi = {10.1109/MPRV.2010.74}, author = {Jon Froehlich and Larson,E. and Gupta,S. and Cohn,G. and Reynolds,M. and Patel,S.} } @article {16256, title = {DNACLUST: accurate and efficient clustering of phylogenetic marker genes}, journal = {BMC Bioinformatics}, volume = {12}, year = {2011}, month = {2011/06/30/}, pages = {271 - 271}, abstract = {Clustering is a fundamental operation in the analysis of biological sequence data. New DNA sequencing technologies have dramatically increased the rate at which we can generate data, resulting in datasets that cannot be efficiently analyzed by traditional clustering methods.}, isbn = {1471-2105}, doi = {10.1186/1471-2105-12-271}, url = {http://www.biomedcentral.com/1471-2105/12/271}, author = {Ghodsi,Mohammadreza and Liu,Bo and Pop, Mihai} } @conference {13541, title = {Document Image Classification and Labeling using Multiple Instance Learning}, booktitle = {Intl. Conf. on Document Analysis and Recognition (ICDAR 11)}, year = {2011}, month = {2011///}, pages = {1059 - 1063}, abstract = {The labeling of large sets of images for training or testing analysis systems can be a very costly and time-consuming process. Multiple instance learning (MIL) is a generalization of traditional supervised learning which relaxes the need for exact labels on training instances. Instead, the labels are required only for a set of instances known as bags. In this paper, we apply MIL to the retrieval and localization of signatures and the retrieval of images containing machine-printed text, and show that a gain of 15-20\% in performance can be achieved over the supervised learning with weak-labeling. We also compare our approach to supervised learning with fully annotated training data and report a competitive accuracy for MIL. Using our experiments on real-world datasets, we show that MIL is a good alternative when the training data has only document-level annotation.}, author = {Kumar,Jayant and Pillai,Jaishanker and David Doermann} } @article {19267, title = {The DSPCAD Integrative Command Line Environment: Introduction to DICE Version 1.1}, year = {2011}, month = {2011}, abstract = {DICE (the DSPCAD Integrative Command Line Environment) is a package of utilities that facilitates efficient management of software projects. Key areas of emphasis in DICE are cross-platform operation, support for projects that integrate heterogeneous programming languages, and support for applying and integrating different kinds of design and testing methodologies. The package is being developed at the University of Maryland to facilitate the research and teaching of methods for implementation, testing, evolution, and revision of engineering software. The package is also being developed as a foundation for developing experimental research software for techniques and tools in the area of computer-aided design (CAD) of digital signal processing (DSP) systems. The package is intended for cross-platform operation, and is currently being developed and used actively on the Linux, Mac OS, Solaris, and Windows (equipped with Cygwin) platforms. This report provides an introduction to DICE, and provides background on some of the key features in DICE Version 1.1. This report also gives a brief introduction to dicelang, which is a plug-in package for DICE that provides additional utilities, libraries, and tools for managing software projects in specific programming languages.}, keywords = {*SOFTWARE ENGINEERING, COMPUTER PROGRAMMING, COMPUTER PROGRAMMING AND SOFTWARE, COMPUTER PROGRAMS, DICE(COMPUTER PROGRAM), programming languages, Project management}, author = {Bhattacharyya, Shuvra S. and Plishker,William and Chung-Ching Shen and Sane, Nimish and Zaki, George} } @article {19257, title = {The DSPCAD Lightweight Dataflow Environment: Introduction to LIDE Version 0.1}, year = {2011}, month = {2011}, abstract = {LIDE (the DSPCAD Lightweight Dataflow Environment) is a flexible,lightweight design environment that allows designers to experiment with dataflow-based approaches for design and implementation of digital signal processing (DSP) systems. LIDE contains libraries of dataflow graph elements (primitive actors, hierarchical actors, and edges) and utilities that assist designers in modeling, simulating, and implementing DSP systems using formal dataflow techniques. The libraries of dataflow graph elements (mainly actors) contained in LIDE provide useful building blocks that can be used to construct signal processing applications, and that can be used as examples that designers can adapt to create their own, customized LIDE actors. Furthermore, by using LIDE along with the DSPCAD Integrative Command Line Environment (DICE), designers can efficiently create and execute unit tests for user-designed actors. This report provides an introduction to LIDE. The report includes details on the process for setting up the LIDE environment, and covers methods for using pre-designed libraries of graph elements, as well as creating user-designed libraries and associated utilities using the C language. The report also gives an introduction to the C language plug-in for dicelang. This plug-in, called dicelang-C, provides features for efficient C-based project development and maintenance that are useful to apply when working with LIDE. }, keywords = {Technical Report}, isbn = {UMIACS-TR-2011-17}, doi = {Technical Report}, url = {http://drum.lib.umd.edu//handle/1903/12147}, author = {Chung-Ching Shen and Wang, Lai-Huei and Cho, Inkeun and Kim, Scott and Won, Stephen and Plishker,William and Bhattacharyya, Shuvra S.} } @conference {19022, title = {Efficient and secure threshold-based event validation for vanets}, year = {2011}, month = {2011}, abstract = {Determining whether the number of vehicles reporting anevent is above a threshold is an important mechanism for VANETs, because many applications rely on a threshold number of notifications to reach agreement among vehicles, to determine the validity of an event, or to prevent the abuse of emergency alarms. We present the first efficient and se- cure threshold-based event validation protocol for VANETs. Quite counter-intuitively, we found that the z-smallest ap- proach offers the best tradeoff between security and effi- ciency since other approaches perform better for probabilis- tic counting. Analysis and simulation shows that our pro- tocol provides > 99\% accuracy despite the presence of at- tackers, collection and distribution of alerts in less than 1 second, and negligible impact on network performance. }, url = {http://www.eecs.berkeley.edu/~elaines/docs/hsiao_wisec2011.pdf}, author = {Hsiao, H. C. and Studer, A. and Dubey, R. and Elaine Shi and Perrig, A.} } @article {16040, title = {Empirical Studies in Information Visualization: Seven Scenarios}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {PP}, year = {2011}, month = {2011///}, pages = {1 - 1}, abstract = {We take a new, scenario based look at evaluation in information visualization. Our seven scenarios, evaluating visual data analysis and reasoning, evaluating user performance, evaluating user experience, evaluating environments and work practices, evaluating communication through visualization, evaluating visualization algorithms, and evaluating collaborative data analysis were derived through an extensive literature review of over 800 visualization publications. These scenarios distinguish different study goals and types of research questions and are illustrated through example studies. Through this broad survey and the distillation of these scenarios we make two contributions. One, we encapsulate the current practices in the information visualization research community and, two, we provide a different approach to reaching decisions about what might be the most effective evaluation of a given information visualization. Scenarios can be used to choose appropriate research questions and goals and the provided examples can be consulted for guidance on how to design one{\textquoteright}s own study.}, isbn = {1077-2626}, doi = {10.1109/TVCG.2011.279}, author = {Lam,H. and Bertini,E. and Isenberg,P. and Plaisant, Catherine and Carpendale,S.} } @article {12440, title = {Example-Driven Manifold Priors for Image Deconvolution}, journal = {Image Processing, IEEE Transactions on}, volume = {20}, year = {2011}, month = {2011/11//}, pages = {3086 - 3096}, abstract = {Image restoration methods that exploit prior information about images to be estimated have been extensively studied, typically using the Bayesian framework. In this paper, we consider the role of prior knowledge of the object class in the form of a patch manifold to address the deconvolution problem. Specifically, we incorporate unlabeled image data of the object class, say natural images, in the form of a patch-manifold prior for the object class. The manifold prior is implicitly estimated from the given unlabeled data. We show how the patch-manifold prior effectively exploits the available sample class data for regularizing the deblurring problem. Furthermore, we derive a generalized cross-validation (GCV) function to automatically determine the regularization parameter at each iteration without explicitly knowing the noise variance. Extensive experiments show that this method performs better than many competitive image deconvolution methods.}, keywords = {Bayesian, cross-validation, data;Bayes, deconvolution;image, determination;deblurring, estimation;, framework;GCV, function;automatic, function;image, image, image;patch-manifold, manifold, method;iteration, method;natural, methods;deconvolution;image, methods;natural, parameter, prior;generalized, prior;unlabeled, problem, regularization, regularization;example-driven, restoration, restoration;iterative, scenes;parameter}, isbn = {1057-7149}, doi = {10.1109/TIP.2011.2145386}, author = {Ni,Jie and Turaga,P. and Patel, Vishal M. and Chellapa, Rama} } @article {16037, title = {Extracting Insights from Electronic Health Records: Case Studies, a Visual Analytics Process Model, and Design Recommendations}, journal = {Journal of Medical Systems}, volume = {35}, year = {2011}, month = {2011///}, pages = {1135 - 1152}, abstract = {Current electronic health record (EHR) systems facilitate the storage, retrieval, persistence, and sharing of patient data. However, the way physicians interact with EHRs has not changed much. More specifically, support for temporal analysis of a large number of EHRs has been lacking. A number of information visualization techniques have been proposed to alleviate this problem. Unfortunately, due to their limited application to a single case study, the results are often difficult to generalize across medical scenarios. We present the usage data of Lifelines2 (Wang et al. 2008 ), our information visualization system, and user comments, both collected over eight different medical case studies. We generalize our experience into a visual analytics process model for multiple EHRs. Based on our analysis, we make seven design recommendations to information visualization tools to explore EHR systems.}, isbn = {0148-5598}, url = {http://dx.doi.org/10.1007/s10916-011-9718-x}, author = {Wang,Taowei and Wongsuphasawat,Krist and Plaisant, Catherine and Shneiderman, Ben} } @conference {17175, title = {From slacktivism to activism: participatory culture in the age of social media}, booktitle = {Proceedings of the 2011 annual conference extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {819 - 822}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Social networking sites (e.g. Facebook), microblogging services (e.g. Twitter), and content-sharing sites (e.g. YouTube and Flickr) have introduced the opportunity for wide-scale, online social participation. Visibility of national and international priorities such as public health, political unrest, disaster relief, and climate change has increased, yet we know little about the benefits - and possible costs - of engaging in social activism via social media. These powerful social issues introduce a need for scientific research into technology mediated social participation. What are the actual, tangible benefits of "greening" Twitter profile pictures in support of the Iranian elections? Does cartooning a Facebook profile picture really raise awareness of child abuse? Are there unintended negative effects through low-risk, low-cost technology-mediated participation? And, is there a difference - in both outcome and engagement level - between different types of online social activism? This SIG will investigate technology mediated social participation through a critical lens, discussing both the potential positive and negative outcomes of such participation. Approaches to designing for increased participation, evaluating effects of participation, and next steps in scientific research directions will be discussed.}, keywords = {activism, change, design, participation, slacktivism, social media}, isbn = {978-1-4503-0268-5}, doi = {10.1145/1979742.1979543}, url = {http://doi.acm.org/10.1145/1979742.1979543}, author = {Rotman,Dana and Vieweg,Sarah and Yardi,Sarita and Chi,Ed and Preece,Jenny and Shneiderman, Ben and Pirolli,Peter and Glaisyer,Tom} } @article {14613, title = {Gene Coexpression Network Topology of Cardiac Development, Hypertrophy, and FailureClinical Perspective}, journal = {Circulation: Cardiovascular GeneticsCirc Cardiovasc Genet}, volume = {4}, year = {2011}, month = {2011/02/01/}, pages = {26 - 35}, abstract = {Background{\textemdash} Network analysis techniques allow a more accurate reflection of underlying systems biology to be realized than traditional unidimensional molecular biology approaches. Using gene coexpression network analysis, we define the gene expression network topology of cardiac hypertrophy and failure and the extent of recapitulation of fetal gene expression programs in failing and hypertrophied adult myocardium.Methods and Results{\textemdash} We assembled all myocardial transcript data in the Gene Expression Omnibus (n=1617). Because hierarchical analysis revealed species had primacy over disease clustering, we focused this analysis on the most complete (murine) dataset (n=478). Using gene coexpression network analysis, we derived functional modules, regulatory mediators, and higher-order topological relationships between genes and identified 50 gene coexpression modules in developing myocardium that were not present in normal adult tissue. We found that known gene expression markers of myocardial adaptation were members of upregulated modules but not hub genes. We identified ZIC2 as a novel transcription factor associated with coexpression modules common to developing and failing myocardium. Of 50 fetal gene coexpression modules, 3 (6\%) were reproduced in hypertrophied myocardium and 7 (14\%) were reproduced in failing myocardium. One fetal module was common to both failing and hypertrophied myocardium. Conclusions{\textemdash} Network modeling allows systems analysis of cardiovascular development and disease. Although we did not find evidence for a global coordinated program of fetal gene expression in adult myocardial adaptation, our analysis revealed specific gene expression modules active during both development and disease and specific candidates for their regulation. }, keywords = {fetal, Gene expression, heart failure, hypertrophy, myocardium}, isbn = {1942-325X, 1942-3268}, doi = {10.1161/CIRCGENETICS.110.941757}, url = {http://circgenetics.ahajournals.org/content/4/1/26}, author = {Dewey,Frederick E and Perez,Marco V and Wheeler,Matthew T and Watt,Clifton and Spin,Joshua and Langfelder,Peter and Horvath,Steve and Hannenhalli, Sridhar and Cappola,Thomas P. and Ashley,Euan A} } @article {14619, title = {Genome-Wide Survey of Natural Selection on Functional, Structural, and Network Properties of Polymorphic Sites in Saccharomyces Paradoxus}, journal = {Molecular Biology and EvolutionMol Biol Evol}, volume = {28}, year = {2011}, month = {2011/09/01/}, pages = {2615 - 2627}, abstract = {Background. To characterize the genetic basis of phenotypic evolution, numerous studies have identified individual genes that have likely evolved under natural selection. However, phenotypic changes may represent the cumulative effect of similar evolutionary forces acting on functionally related groups of genes. Phylogenetic analyses of divergent yeast species have identified functional groups of genes that have evolved at significantly different rates, suggestive of differential selection on the functional properties. However, due to environmental heterogeneity over long evolutionary timescales, selection operating within a single lineage may be dramatically different, and it is not detectable via interspecific comparisons alone. Moreover, interspecific studies typically quantify selection on protein-coding regions using the Dn/Ds ratio, which cannot be extended easily to study selection on noncoding regions or synonymous sites. The population genetic-based analysis of selection operating within a single lineage ameliorates these limitations. Findings. We investigated selection on several properties associated with genes, promoters, or polymorphic sites, by analyzing the derived allele frequency spectrum of single nucleotide polymorphisms (SNPs) in 28 strains of Saccharomyces paradoxus. We found evidence for significant differential selection between many functionally relevant categories of SNPs, underscoring the utility of function-centric approaches for discovering signatures of natural selection. When comparable, our findings are largely consistent with previous studies based on interspecific comparisons, with one notable exception: our study finds that mutations from an ancient amino acid to a relatively new amino acid are selectively disfavored, whereas interspecific comparisons have found selection against ancient amino acids. Several of our findings have not been addressed through prior interspecific studies: we find that synonymous mutations from preferred to unpreferred codons are selected against and that synonymous SNPs in the linker regions of proteins are relatively less constrained than those within protein domains. Conclusions. We present the first global survey of selection acting on various functional properties in S. paradoxus. We found that selection pressures previously detected over long evolutionary timescales have also shaped the evolution of S. paradoxus. Importantly, we also make novel discoveries untenable via conventional interspecific analyses.}, keywords = {derived allele frequency, Evolution, natural selection, yeast}, isbn = {0737-4038, 1537-1719}, doi = {10.1093/molbev/msr085}, url = {http://mbe.oxfordjournals.org/content/28/9/2615}, author = {Vishnoi,Anchal and Sethupathy,Praveen and Simola,Daniel and Plotkin,Joshua B. and Hannenhalli, Sridhar} } @article {16273, title = {Hawkeye and AMOS: Visualizing and Assessing the Quality of Genome Assemblies}, journal = {Briefings in Bioinformatics}, year = {2011}, month = {2011/12/23/}, abstract = {Since its launch in 2004, the open-source AMOS project has released several innovative DNA sequence analysis applications including: Hawkeye, a visual analytics tool for inspecting the structure of genome assemblies; the Assembly Forensics and FRCurve pipelines for systematically evaluating the quality of a genome assembly; and AMOScmp, the first comparative genome assembler. These applications have been used to assemble and analyze dozens of genomes ranging in complexity from simple microbial species through mammalian genomes. Recent efforts have been focused on enhancing support for new data characteristics brought on by second- and now third-generation sequencing. This review describes the major components of AMOS in light of these challenges, with an emphasis on methods for assessing assembly quality and the visual analytics capabilities of Hawkeye. These interactive graphical aspects are essential for navigating and understanding the complexities of a genome assembly, from the overall genome structure down to individual bases. Hawkeye and AMOS are available open source at http://amos.sourceforge.net.}, keywords = {assembly forensics, DNA Sequencing, genome assembly, visual analytics}, isbn = {1467-5463, 1477-4054}, doi = {10.1093/bib/bbr074}, url = {http://bib.oxfordjournals.org/content/early/2011/12/23/bib.bbr074}, author = {Schatz,Michael C and Phillippy,Adam M and Sommer,Daniel D and Delcher,Arthur L. and Puiu,Daniela and Narzisi,Giuseppe and Salzberg,Steven L. and Pop, Mihai} } @inbook {19277, title = {Heterogeneous Design in Functional DIF}, booktitle = {Transactions on High-Performance Embedded Architectures and Compilers IV}, series = {Lecture Notes in Computer Science}, year = {2011}, month = {2011}, pages = {391 - 408}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Dataflow formalisms have provided designers of digital signal processing (DSP) systems with analysis and optimizations for many years. As system complexity increases, designers are relying on more types of dataflow models to describe applications while retaining these implementation benefits. The semantic range of DSP-oriented dataflow models has expanded to cover heterogeneous models and dynamic applications, but efficient design, simulation, and scheduling of such applications has not. To facilitate implementing heterogeneous applications, we utilize a new dataflow model of computation and show how actors designed in other dataflow models are directly supported by this framework, allowing system designers to immediately compose and simulate actors from different models. Using examples, we show how this approach can be applied to quickly describe and functionally simulate a heterogeneous dataflow-based application such that a designer may analyze and tune trade-offs among different models and schedules for simulation time, memory consumption, and schedule size.}, keywords = {Arithmetic and Logic Structures, Computer Communication Networks, Dataflow, heterogeneous, Input/Output and Data Communications, Logic Design, Processor Architectures, Programming Languages, Compilers, Interpreters, Signal processing}, isbn = {978-3-642-24567-1, 978-3-642-24568-8}, url = {http://link.springer.com/chapter/10.1007/978-3-642-24568-8_20}, author = {Plishker,William and Sane, Nimish and Kiemb, Mary and Bhattacharyya, Shuvra S.}, editor = {Stenstr{\"o}m, Per} } @conference {12446, title = {Illumination robust dictionary-based face recognition}, booktitle = {2011 18th IEEE International Conference on Image Processing (ICIP)}, year = {2011}, month = {2011/09/11/14}, pages = {777 - 780}, publisher = {IEEE}, organization = {IEEE}, abstract = {In this paper, we present a face recognition method based on simultaneous sparse approximations under varying illumination. Our method consists of two main stages. In the first stage, a dictionary is learned for each face class based on given training examples which minimizes the representation error with a sparseness constraint. In the second stage, a test image is projected onto the span of the atoms in each learned dictionary. The resulting residual vectors are then used for classification. Furthermore, to handle changes in lighting conditions, we use a relighting approach based on a non-stationary stochastic filter to generate multiple images of the same person with different lighting. As a result, our algorithm has the ability to recognize human faces with good accuracy even when only a single or a very few images are provided for training. The effectiveness of the proposed method is demonstrated on publicly available databases and it is shown that this method is efficient and can perform significantly better than many competitive face recognition algorithms.}, keywords = {albedo, approximation theory, classification, competitive face recognition algorithms, Databases, Dictionaries, Face, face recognition, face recognition method, filtering theory, human face recognition, illumination robust dictionary-based face recognition, illumination variation, image representation, learned dictionary, learning (artificial intelligence), lighting, lighting conditions, multiple images, nonstationary stochastic filter, publicly available databases, relighting, relighting approach, representation error, residual vectors, Robustness, simultaneous sparse approximations, simultaneous sparse signal representation, sparseness constraint, Training, varying illumination, vectors}, isbn = {978-1-4577-1304-0}, doi = {10.1109/ICIP.2011.6116670}, author = {Patel, Vishal M. and Tao Wu and Biswas,S. and Phillips,P.J. and Chellapa, Rama} } @inbook {12456, title = {Image and Video-Based Biometrics}, booktitle = {Visual Analysis of HumansVisual Analysis of Humans}, year = {2011}, month = {2011///}, pages = {437 - 454}, publisher = {Springer London}, organization = {Springer London}, abstract = {Biometrics deals with the problem of identifying individuals based on physiological or behavioral characteristics. Since many physical characteristics, such as face, iris, etc., and behavioral characteristics, such as voice, expression, etc., are unique to an individual, biometric analysis offers a reliable and natural solution to the problem of identity verification. In this chapter, we discuss image and video-based biometrics involving face, iris and gait. In particular, we discuss several recent approaches to physiological biometrics based on Sparse Representations and Compressed Sensing. Some of the most compelling challenges and issues that confront research in biometrics are also addressed.}, isbn = {978-0-85729-997-0}, url = {http://dx.doi.org/10.1007/978-0-85729-997-0_22}, author = {Patel, Vishal M. and Pillai,Jaishanker K. and Chellapa, Rama}, editor = {Moeslund,Thomas B. and Hilton,Adrian and Kr{\"u}ger,Volker and Sigal,Leonid} } @article {15762, title = {Implicitly-weighted total least squares}, journal = {Linear Algebra and its Applications}, volume = {435}, year = {2011}, month = {2011/08/01/}, pages = {560 - 577}, abstract = {In a total least squares (TLS) problem, we estimate an optimal set of model parameters X , so that ( A - Δ A ) X = B - Δ B , where A is the model matrix, B is the observed data, and Δ A and Δ B are corresponding corrections. When B is a single vector, Rao (1997) and Paige and Strako{\v s} (2002) suggested formulating standard least squares problems, for which Δ A = 0 , and data least squares problems, for which Δ B = 0 , as weighted and scaled TLS problems. In this work we define an implicitly-weighted TLS formulation (ITLS) that reparameterizes these formulations to make computation easier. We derive asymptotic properties of the estimates as the number of rows in the problem approaches infinity, handling the rank-deficient case as well. We discuss the role of the ratio between the variances of errors in A and B in choosing an appropriate parameter in ITLS. We also propose methods for computing the family of solutions efficiently and for choosing the appropriate solution if the ratio of variances is unknown. We provide experimental results on the usefulness of the ITLS family of solutions.}, keywords = {Data least squares, Errors in variables, least squares, Linear regression, total least squares, Variance estimation}, isbn = {0024-3795}, doi = {10.1016/j.laa.2010.06.020}, url = {http://www.sciencedirect.com/science/article/pii/S0024379510003162}, author = {Park,Sungwoo and O{\textquoteright}Leary,Dianne P.} } @article {16043, title = {Improved Identification and Visualization of Emergency Department Patient Visits}, journal = {Annals of Emergency Medicine}, volume = {58}, year = {2011}, month = {2011///}, pages = {S309 - S309}, author = {Hettinger,AZ and Rackoff,A. and Wongsuphasawat,K. and Cheng,H. and Fairbanks,RJ and Plaisant, Catherine and Smith,M. S} } @article {16032, title = {Information Visualization: State of the Field and New Research Directions}, journal = {Information VisualizationInformation Visualization}, volume = {10}, year = {2011}, month = {2011/10/01/}, pages = {269 - 270}, isbn = {1473-8716, 1473-8724}, doi = {10.1177/1473871611418138}, url = {http://ivi.sagepub.com/content/10/4/269}, author = {Kerren,Andreas and Plaisant, Catherine and Stasko,John T} } @conference {14226, title = {Language Models for Semantic Extraction and Filtering in Video Action Recognition}, booktitle = {Workshops at the Twenty-Fifth AAAI Conference on Artificial Intelligence}, year = {2011}, month = {2011/08/24/}, abstract = {The paper addresses the following issues: (a) how to represent semantic information from natural language so that a vision model can utilize it? (b) how to extract the salient textual information relevant to vision? For a given domain, we present a new model of semantic extraction that takes into account word relatedness as well as word disambiguation in order to apply to a vision model. We automatically process the text transcripts and perform syntactic analysis to extract dependency relations. We then perform semantic extraction on the output to filter semantic entities related to actions. The resulting data are used to populate a matrix of co-occurrences utilized by the vision processing modules. Results show that explicitly modeling the co-occurrence of actions and tools significantly improved performance.}, url = {https://www.aaai.org/ocs/index.php/WS/AAAIW11/paper/viewPaper/3919}, author = {Tzoukermann,Evelyne and Neumann, Jan and Kosecka,Jana and Ferm{\"u}ller, Cornelia and Perera,Ian and Ferraro,Frank and Sapp,Ben and Chaudhry,Rizwan and Singh,Gautam} } @conference {13074, title = {A large-scale benchmark dataset for event recognition in surveillance video}, booktitle = {Computer Vision and Pattern Recognition (CVPR), 2011 IEEE Conference on}, year = {2011}, month = {2011/06//}, pages = {3153 - 3160}, abstract = {We introduce a new large-scale video dataset designed to assess the performance of diverse visual event recognition algorithms with a focus on continuous visual event recognition (CVER) in outdoor areas with wide coverage. Previous datasets for action recognition are unrealistic for real-world surveillance because they consist of short clips showing one action by one individual [15, 8]. Datasets have been developed for movies [11] and sports [12], but, these actions and scene conditions do not apply effectively to surveillance videos. Our dataset consists of many outdoor scenes with actions occurring naturally by non-actors in continuously captured videos of the real world. The dataset includes large numbers of instances for 23 event types distributed throughout 29 hours of video. This data is accompanied by detailed annotations which include both moving object tracks and event examples, which will provide solid basis for large-scale evaluation. Additionally, we propose different types of evaluation modes for visual recognition tasks and evaluation metrics along with our preliminary experimental results. We believe that this dataset will stimulate diverse aspects of computer vision research and help us to advance the CVER tasks in the years ahead.}, keywords = {algorithm;evaluation, CVER, databases;, databases;video, dataset;moving, event, metrics;large-scale, object, recognition, recognition;diverse, recognition;video, scenes;surveillance, surveillance;visual, tasks;computer, tracks;outdoor, video, video;computer, vision;continuous, vision;image, visual}, doi = {10.1109/CVPR.2011.5995586}, author = {Oh,Sangmin and Hoogs, A. and Perera,A. and Cuntoor, N. and Chen,Chia-Chih and Lee,Jong Taek and Mukherjee,S. and Aggarwal, JK and Lee,Hyungtae and Davis, Larry S. and Swears,E. and Wang,Xioyang and Ji,Qiang and Reddy,K. and Shah,M. and Vondrick,C. and Pirsiavash,H. and Ramanan,D. and Yuen,J. and Torralba,A. and Song,Bi and Fong,A. and Roy-Chowdhury, A. and Desai,M.} } @conference {16036, title = {LifeFlow: visualizing an overview of event sequences}, booktitle = {Proceedings of the 2011 annual conference on Human factors in computing systems}, series = {CHI {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {1747 - 1756}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Event sequence analysis is an important task in many domains: medical researchers may study the patterns of transfers within the hospital for quality control; transportation experts may study accident response logs to identify best practices. In many cases they deal with thousands of records. While previous research has focused on searching and browsing, overview tasks are often overlooked. We introduce a novel interactive visual overview of event sequences called \emph{LifeFlow}. LifeFlow is scalable, can summarize all possible sequences, and represents the temporal spacing of the events within sequences. Two case studies with healthcare and transportation domain experts are presented to illustrate the usefulness of LifeFlow. A user study with ten participants confirmed that after 15 minutes of training novice users were able to rapidly answer questions about the prevalence and temporal characteristics of sequences, find anomalies, and gain significant insight from the data.}, keywords = {Information Visualization, overview visualization, temporal categorical data, timestamped event sequences}, isbn = {978-1-4503-0228-9}, doi = {10.1145/1978942.1979196}, url = {http://doi.acm.org/10.1145/1978942.1979196}, author = {Wongsuphasawat,Krist and Guerra G{\'o}mez,John Alexis and Plaisant, Catherine and Wang,Taowei David and Taieb-Maimon,Meirav and Shneiderman, Ben} } @conference {17280, title = {LifeFlow: visualizing an overview of event sequences (video preview)}, booktitle = {Proceedings of the 2011 annual conference extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {507 - 510}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Event sequence analysis is an important task in many domains: medical researchers may study the patterns of transfers within the hospital for quality control; transportation experts may study accident response logs to identify best practices. In many cases they deal with thousands of records. While previous research has focused on searching and browsing, overview tasks are often overlooked. We introduce a novel interactive visual overview of event sequences called LifeFlow. LifeFlow is scalable, can summarize all possible sequences, and represents the temporal spacing of the events within sequences. In this video, we show an example of patient transfer data and briefly demonstrate how to analyze them with LifeFlow. Please see [11] or visit http:www.cs.umd.eduhcillifeflow for more detail.}, keywords = {emergency room, healthcare, Information Visualization, overview visualization, temporal categorical data, timestamped event sequences}, isbn = {978-1-4503-0268-5}, doi = {10.1145/1979742.1979557}, url = {http://doi.acm.org/10.1145/1979742.1979557}, author = {Wongsuphasawat,Krist and Guerra G{\'o}mez,John Alexis and Plaisant, Catherine and Wang,Taowei and Taieb-Maimon,Meirav and Shneiderman, Ben} } @article {13082, title = {Local Response Context Applied to Pedestrian Detection}, journal = {Progress in Pattern Recognition, Image Analysis, Computer Vision, and Applications}, year = {2011}, month = {2011///}, pages = {181 - 188}, abstract = {Appearing as an important task in computer vision, pedestrian detection has been widely investigated in the recent years. To design a robust detector, we propose a feature descriptor called Local Response Context (LRC). This descriptor captures discriminative information regarding the surrounding of the person{\textquoteright}s location by sampling the response map obtained by a generic sliding window detector. A partial least squares regression model using LRC descriptors is learned and employed as a second classification stage (after the execution of the generic detector to obtain the response map). Experiments based on the ETHZ pedestrian dataset show that the proposed approach improves significantly the results achieved by the generic detector alone and is comparable to the state-of-the-art methods.}, author = {Schwartz,W. and Davis, Larry S. and Pedrini,H.} } @article {14705, title = {LOCKSMITH: Practical static race detection for C}, journal = {ACM Trans. Program. Lang. Syst.}, volume = {33}, year = {2011}, month = {2011/01//}, pages = {3:1{\textendash}3:55 - 3:1{\textendash}3:55}, abstract = {Locksmith is a static analysis tool for automatically detecting data races in C programs. In this article, we describe each of Locksmith{\textquoteright}s component analyses precisely, and present systematic measurements that isolate interesting trade-offs between precision and efficiency in each analysis. Using a benchmark suite comprising stand-alone applications and Linux device drivers totaling more than 200,000 lines of code, we found that a simple no-worklist strategy yielded the most efficient interprocedural dataflow analysis; that our sharing analysis was able to determine that most locations are thread-local, and therefore need not be protected by locks; that modeling C structs and void pointers precisely is key to both precision and efficiency; and that context sensitivity yields a much more precise analysis, though with decreased scalability. Put together, our results illuminate some of the key engineering challenges in building Locksmith and data race detection analyses in particular, and constraint-based program analyses in general.}, keywords = {context sensitivity, contextual effects, correlation inference, Data race, locksmith, race detection, sharing analysis, static analysis}, isbn = {0164-0925}, doi = {10.1145/1889997.1890000}, url = {http://doi.acm.org/10.1145/1889997.1890000}, author = {Pratikakis,Polyvios and Foster, Jeffrey S. and Hicks, Michael W.} } @inbook {19186, title = {A Longitudinal Study of Pressure Sensing to Infer Real-World Water Usage Events in the Home}, booktitle = {Pervasive Computing}, series = {Lecture Notes in Computer Science}, volume = {6696}, year = {2011}, month = {2011}, pages = {50 - 69}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We present the first longitudinal study of pressure sensing to infer real-world water usage events in the home (e.g., dishwasher, upstairs bathroom sink, downstairs toilet). In order to study the pressure-based approach out in the wild , we deployed a ground truth sensor network for five weeks in three homes and two apartments that directly monitored valve-level water usage by fixtures and appliances . We use this data to, first, demonstrate the practical challenges in constructing water usage activity inference algorithms and, second, to inform the design of a new probabilistic-based classification approach. Inspired by algorithms in speech recognition, our novel Bayesian approach incorporates template matching, a language model, grammar, and prior probabilities. We show that with a single pressure sensor, our probabilistic algorithm can classify real-world water usage at the fixture level with 90\% accuracy and at the fixturecategory level with 96\% accuracy. With two pressure sensors, these accuracies increase to 94\% and 98\%. Finally, we show how our new approach can be trained with fewer examples than a strict template-matching approach alone.}, isbn = {978-3-642-21725-8}, url = {http://dx.doi.org/10.1007/978-3-642-21726-5_4}, author = {Jon Froehlich and Larson,Eric and Saba,Elliot and Campbell,Tim and Atlas,Les and Fogarty,James and Patel,Shwetak}, editor = {Lyons,Kent and Hightower,Jeffrey and Huang,Elaine} } @inbook {14324, title = {A Longitudinal Study of Pressure Sensing to Infer Real-World Water Usage Events in the Home}, booktitle = {Pervasive ComputingPervasive Computing}, series = {Lecture Notes in Computer Science}, volume = {6696}, year = {2011}, month = {2011///}, pages = {50 - 69}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We present the first longitudinal study of pressure sensing to infer real-world water usage events in the home (e.g., dishwasher, upstairs bathroom sink, downstairs toilet). In order to study the pressure-based approach out in the wild , we deployed a ground truth sensor network for five weeks in three homes and two apartments that directly monitored valve-level water usage by fixtures and appliances . We use this data to, first, demonstrate the practical challenges in constructing water usage activity inference algorithms and, second, to inform the design of a new probabilistic-based classification approach. Inspired by algorithms in speech recognition, our novel Bayesian approach incorporates template matching, a language model, grammar, and prior probabilities. We show that with a single pressure sensor, our probabilistic algorithm can classify real-world water usage at the fixture level with 90\% accuracy and at the fixturecategory level with 96\% accuracy. With two pressure sensors, these accuracies increase to 94\% and 98\%. Finally, we show how our new approach can be trained with fewer examples than a strict template-matching approach alone.}, isbn = {978-3-642-21725-8}, url = {http://dx.doi.org/10.1007/978-3-642-21726-5_4}, author = {Jon Froehlich and Larson,Eric and Saba,Elliot and Campbell,Tim and Atlas,Les and Fogarty,James and Patel,Shwetak}, editor = {Lyons,Kent and Hightower,Jeffrey and Huang,Elaine} } @article {12864, title = {Long-term effects of ocean warming on the prokaryotic community: evidence from the vibrios}, journal = {The ISME Journal}, volume = {6}, year = {2011}, month = {2011/07/14/}, pages = {21 - 30}, abstract = {The long-term effects of ocean warming on prokaryotic communities are unknown because of lack of historical data. We overcame this gap by applying a retrospective molecular analysis to the bacterial community on formalin-fixed samples from the historical Continuous Plankton Recorder archive, which is one of the longest and most geographically extensive collections of marine biological samples in the world. We showed that during the last half century, ubiquitous marine bacteria of the Vibrio genus, including Vibrio cholerae, increased in dominance within the plankton-associated bacterial community of the North Sea, where an unprecedented increase in bathing infections related to these bacteria was recently reported. Among environmental variables, increased sea surface temperature explained 45\% of the variance in Vibrio data, supporting the view that ocean warming is favouring the spread of vibrios and may be the cause of the globally increasing trend in their associated diseases.}, keywords = {ecophysiology, ecosystems, environmental biotechnology, geomicrobiology, ISME J, microbe interactions, microbial communities, microbial ecology, microbial engineering, microbial epidemiology, microbial genomics, microorganisms}, isbn = {1751-7362}, doi = {10.1038/ismej.2011.89}, url = {http://www.nature.com/ismej/journal/v6/n1/full/ismej201189a.html?WT.ec_id=ISMEJ-201201}, author = {Vezzulli,Luigi and Brettar,Ingrid and Pezzati,Elisabetta and Reid,Philip C. and Rita R Colwell and H{\"o}fle,Manfred G. and Pruzzo,Carla} } @article {17622, title = {Maximum bipartite flow in networks with adaptive channel width}, journal = {Theoretical Computer Science}, volume = {412}, year = {2011}, month = {2011/05/27/}, pages = {2577 - 2587}, abstract = {Traditionally, network optimization problems assume that each link in the network has a fixed capacity. Recent research in wireless networking has shown that it is possible to design networks where the capacity of the links can be changed adaptively to suit the needs of specific applications. In particular, one gets a choice of having a few high capacity outgoing links or many low capacity ones at any node of the network. This motivates us to have a re-look at classical network optimization problems and design algorithms to solve them in this new framework. In particular, we consider the problem of maximum bipartite flow, which has been studied extensively in the fixed-capacity network model. One of the motivations for studying this problem arises from the need to maximize the throughput of an infrastructure wireless network comprising base-stations (one set of vertices in the bipartition) and clients (the other set of vertices in the bipartition). We show that this problem has a significantly different combinatorial structure in this new network model from the fixed-capacity one. While there are several polynomial time algorithms for the maximum bipartite flow problem in traditional networks, we show that the problem is NP-hard in the new model. In fact, our proof extends to showing that the problem is APX-hard. We complement our lower bound by giving two algorithms for solving the problem approximately. The first algorithm is deterministic and achieves an approximation factor of O ( log N ) , where N is the number of nodes in the network, while the second algorithm is randomized and achieves an approximation factor of e e - 1 .}, keywords = {Adaptive channel width, graph algorithm, Linear program rounding, Maximum flow, Wireless networks}, isbn = {0304-3975}, doi = {10.1016/j.tcs.2010.10.023}, url = {http://www.sciencedirect.com/science/article/pii/S0304397510005852}, author = {Azar,Yossi and M{\k a}dry,Aleksander and Moscibroda,Thomas and Panigrahi,Debmalya and Srinivasan, Aravind} } @conference {17904, title = {MDMap: A system for data-driven layout and exploration of molecular dynamics simulations}, booktitle = {Biological Data Visualization (BioVis), 2011 IEEE Symposium on}, year = {2011}, month = {2011/10//}, pages = {111 - 118}, abstract = {Contemporary molecular dynamics simulations result in a glut of simulation data, making analysis and discovery a difficult and burdensome task. We present MDMap, a system designed to summarize long-running molecular dynamics (MD) simulations. We represent a molecular dynamics simulation as a state transition graph over a set of intermediate (stable and semi-stable) states. The transitions amongst the states together with their frequencies represent the flow of a biomolecule through the trajectory space. MDMap automatically determines potential intermediate conformations and the transitions amongst them by analyzing the conformational space explored by the MD simulation. MDMap is an automated system to visualize MD simulations as state-transition diagrams, and can replace the current tedious manual layouts of biomolecular folding landscapes with an automated tool. The layout of the representative states and the corresponding transitions among them is presented to the user as a visual synopsis of the long-running MD simulation. We compare and contrast multiple presentations of the state transition diagrams, such as conformational embedding, and spectral, hierarchical, and force-directed graph layouts. We believe this system could provide a road-map for the visualization of other stochastic time-varying simulations in a variety of different domains.}, keywords = {computing;digital, driven, DYNAMICS, exploration;data, folding, graph;stochastic, landscapes;data, layout;molecular, MDMap;biomolecular, method;stochastic, processes;, simulation;graph, simulations;state, simulations;trajectory, space;biology, theory;molecular, time-varying, transition}, doi = {10.1109/BioVis.2011.6094055}, author = {Patro,R. and Ip, Cheuk Yiu and Bista,S. and Cho,S.S. and Thirumalai,D. and Varshney, Amitabh} } @article {16039, title = {Medication Reconciliation: Work Domain Ontology, Prototype Development, and a Predictive Model}, journal = {AMIA Annual Symposium ProceedingsAMIA Annu Symp Proc}, volume = {2011}, year = {2011}, month = {2011///}, pages = {878 - 887}, abstract = {Medication errors can result from administration inaccuracies at any point of care and are a major cause for concern. To develop a successful Medication Reconciliation (MR) tool, we believe it necessary to build a Work Domain Ontology (WDO) for the MR process. A WDO defines the explicit, abstract, implementation-independent description of the task by separating the task from work context, application technology, and cognitive architecture. We developed a prototype based upon the WDO and designed to adhere to standard principles of interface design. The prototype was compared to Legacy Health System{\textquoteright}s and Pre-Admission Medication List Builder MR tools via a Keystroke-Level Model analysis for three MR tasks. The analysis found the prototype requires the fewest mental operations, completes tasks in the fewest steps, and completes tasks in the least amount of time. Accordingly, we believe that developing a MR tool, based upon the WDO and user interface guidelines, improves user efficiency and reduces cognitive load.}, isbn = {1942-597X}, author = {Markowitz,Eliz and Bernstam,Elmer V. and Herskovic,Jorge and Zhang,Jiajie and Shneiderman, Ben and Plaisant, Catherine and Johnson,Todd R.} } @article {19117, title = {MetaPath: identifying differentially abundant metabolic pathways in metagenomic datasets}, journal = {BMC proceedings}, volume = {5}, year = {2011}, month = {2011}, author = {Liu,B. and Pop, Mihai} } @article {19276, title = {Model-based precision analysis and optimization for digital signal processors}, journal = {Proceedings of the European Signal Processing Conference}, year = {2011}, month = {2011}, pages = {506 - 510}, abstract = {Embedded signal processing has witnessed explosive growth in re-cent years in both scientific and consumer applications, driving the need for complex, high-performance signal processing systems that are largely application driven. In order to efficiently implement these systems on programmable platforms such as digital signal processors (DSPs), it is important to analyze and optimize the ap- plication design from early stages of the design process. A key per- formance concern for designers is choosing the data format. In this work, we propose a systematic and efficient design flow involving model-based design to analyze application data sets and precision requirements. We demonstrate this design flow with an exploration study into the required precision for eigenvalue decomposition (EVD) using the Jacobi algorithm. We demonstrate that with a high degree of structured analysis and automation, we are able to analyze the data set to derive an efficient data format, and optimize important parts of the algorithm with respect to precision. }, author = {Kedilaya, Soujanya and Plishker,William and Purkovic, Aleksandar and Johnson, Brian and Bhattacharyya, Shuvra S.} } @conference {19270, title = {A Model-Based Schedule Representation for Heterogeneous Mapping of Dataflow Graphs}, booktitle = {2011 IEEE International Symposium on Parallel and Distributed Processing Workshops and Phd Forum (IPDPSW)}, year = {2011}, month = {2011}, pages = {70 - 81}, abstract = {Dataflow-based application specifications are widely used in model-based design methodologies for signal processing systems. In this paper, we develop a new model called the dataflow schedule graph (DSG) for representing a broad class of dataflow graph schedules. The DSG provides a graphical representation of schedules based on dataflow semantics. In conventional approaches, applications are represented using dataflow graphs, whereas schedules for the graphs are represented using specialized notations, such as various kinds of sequences or looping constructs. In contrast, the DSG approach employs dataflow graphs for representing both application models and schedules that are derived from them. Our DSG approach provides a precise, formal framework for unambiguously representing, analyzing, manipulating, and interchanging schedules. We develop detailed formulations of the DSG representation, and present examples and experimental results that demonstrate the utility of DSGs in the context of heterogeneous signal processing system design.}, keywords = {Computational modeling, data flow graphs, dataflow schedule graph, dataflow semantics, dataflow-based application specifications, Dynamic scheduling, heterogeneous mapping, heterogeneous signal processing system design, model-based design methodologies, model-based schedule representation, Processor scheduling, Program processors, Schedules, semantics, Signal processing, synchronization}, author = {Wu, Hsiang-Huang and Chung-Ching Shen and Sane, N. and Plishker,W. and Bhattacharyya, Shuvra S.} } @conference {19261, title = {Modeling and optimization of dynamic signal processing in resource-aware sensor networks}, booktitle = {2011 8th IEEE International Conference on Advanced Video and Signal-Based Surveillance (AVSS)}, year = {2011}, month = {2011}, pages = {449 - 454}, abstract = {Sensor node processing in resource-aware sensor networks is often critically dependent on dynamic signal processing functionality - i.e., signal processing functionality in which computational structure must be dynamically assessed and adapted based on time-varying environmental conditions, operating constraints or application requirements. In dynamic signal processing systems, it is important to provide flexibility for run-time adaptation of application behavior and execution characteristics, but in the domain of resource-aware sensor networks, such flexibility cannot come with significant costs in terms of power consumption overhead or reduced predictability. In this paper, we review a variety of complementary models of computation that are being developed as part of the dataflow interchange format (DIF) project to facilitate efficient and reliable implementation of dynamic signal processing systems. We demonstrate these methods in the context of resource-aware sensor networks.}, keywords = {Adaptation models, Aerodynamics, Computational modeling, data flow graphs, dataflow interchange format, Dynamic scheduling, dynamic signal processing, power consumption overhead, Program processors, resource-aware sensor networks, run-time adaptation, Schedules, sensor node processing, Signal processing, Wireless sensor networks}, author = {Bhattacharyya, Shuvra S. and Plishker,W. and Sane, N. and Chung-Ching Shen and Wu, Hsiang-Huang} } @conference {19621, title = {MOMMIE Knows Best: Systematic Optimizations for Verifiable Distributed Algorithms}, booktitle = {HotOS{\textquoteright}13 Proceedings of the 13th USENIX Conference on Hot Topics in Operating Systems }, series = {HotOS{\textquoteright}13}, year = {2011}, month = {2011///}, pages = {30 - 30}, publisher = {USENIX Association}, organization = {USENIX Association}, url = {http://dl.acm.org/citation.cfm?id=1991596.1991636}, author = {Maniatis, Petros and Dietz, Michael and Charalampos Papamanthou} } @article {19268, title = {Multithreaded Simulation for Synchronous Dataflow Graphs}, journal = {ACM Trans. Des. Autom. Electron. Syst.}, volume = {16}, year = {2011}, month = {2011}, pages = {25:1 - 25:23}, abstract = {For system simulation, Synchronous DataFlow (SDF) has been widely used as a core model of computation in design tools for digital communication and signal processing systems. The traditional approach for simulating SDF graphs is to compute and execute static schedules in single-processor desktop environments. Nowadays, however, multicore processors are increasingly popular desktop platforms for their potential performance improvements through thread-level parallelism. Without novel scheduling and simulation techniques that explicitly explore thread-level parallelism for executing SDF graphs, current design tools gain only minimal performance improvements on multicore platforms. In this article, we present a new multithreaded simulation scheduler, called MSS, to provide simulation runtime speedup for executing SDF graphs on multicore processors. MSS strategically integrates graph clustering, intracluster scheduling, actor vectorization, and intercluster buffering techniques to construct InterThread Communication (ITC) graphs at compile-time. MSS then applies efficient synchronization and dynamic scheduling techniques at runtime for executing ITC graphs in multithreaded environments. We have implemented MSS in the Advanced Design System (ADS) from Agilent Technologies. On an Intel dual-core, hyper-threading (4 processing units) processor, our results from this implementation demonstrate up to 3.5 times speedup in simulating modern wireless communication systems (e.g., WCDMA3G, CDMA 2000, WiMax, EDGE, and Digital TV).}, keywords = {multithreaded simulation, scheduling, Synchronous dataflow}, isbn = {1084-4309}, url = {http://doi.acm.org/10.1145/1970353.1970358}, author = {Chia-Jui Hsu and Pino, Jos{\'e} Luis and Bhattacharyya, Shuvra S.} } @article {19111, title = {Next Generation Sequence Assembly with AMOS}, journal = {Current Protocols in Bioinformatics}, volume = {11}, year = {2011}, month = {2011}, pages = {1 - 11}, author = {Treangen, T.J. and Sommer, D.D. and Angly, F.E. and Koren, S. and Pop, Mihai} } @conference {13070, title = {A novel feature descriptor based on the shearlet transform}, booktitle = {Image Processing (ICIP), 2011 18th IEEE International Conference on}, year = {2011}, month = {2011/09//}, pages = {1033 - 1036}, abstract = {Problems such as image classification, object detection and recognition rely on low-level feature descriptors to represent visual information. Several feature extraction methods have been proposed, including the Histograms of Oriented Gradients (HOG), which captures edge information by analyzing the distribution of intensity gradients and their directions. In addition to directions, the analysis of edge at different scales provides valuable information. Shearlet transforms provide a general framework for analyzing and representing data with anisotropic information at multiple scales. As a consequence, signal singularities, such as edges, can be precisely detected and located in images. Based on the idea of employing histograms to estimate the distribution of edge orientations and on the accurate multi-scale analysis provided by shearlet transforms, we propose a feature descriptor called Histograms of Shearlet Coefficients (HSC). Experimental results comparing HOG with HSC show that HSC provides significantly better results for the problems of texture classification and face identification.}, keywords = {analysis;multiscale, analysis;object, classification;face, classification;image, classification;intensity, coefficients;image, descriptor;feature, detection;object, distribution, edge, EXTRACTION, extraction;image, gradient, gradients;histograms, identification;feature, methods;histograms, of, orientations;face, oriented, recognition;feature, recognition;shearlet, recognition;transforms;, shearlet, singularities;texture, texture;object, transform;signal}, doi = {10.1109/ICIP.2011.6115600}, author = {Schwartz, W.R. and da Silva,R.D. and Davis, Larry S. and Pedrini,H.} } @conference {14994, title = {NSF/IEEE-TCPP curriculum initiative on parallel and distributed computing: core topics for undergraduates}, booktitle = {Proceedings of the 42nd ACM technical symposium on Computer science education}, year = {2011}, month = {2011///}, pages = {617 - 618}, author = {Prasad,S. K. and Chtchelkanova,A. and Das,S. and Dehne,F. and Gouda,M. and Gupta,A. and JaJa, Joseph F. and Kant,K. and La Salle,A. and LeBlanc,R. and others} } @conference {14902, title = {Odd Leaf Out: Improving Visual Recognition with Games}, booktitle = {Privacy, security, risk and trust (passat), 2011 ieee third international conference on and 2011 ieee third international conference on social computing (socialcom)}, year = {2011}, month = {2011/10//}, pages = {87 - 94}, abstract = {A growing number of projects are solving complex computational and scientific tasks by soliciting human feedback through games. Many games with a purpose focus on generating textual tags for images. In contrast, we introduce a new game, Odd Leaf Out, which provides players with an enjoyable and educational game that serves the purpose of identifying misclassification errors in a large database of labeled leaf images. The game uses a novel mechanism to solicit useful information from players{\textquoteright} incorrect answers. A study of 165 players showed that game data can be used to identify mislabeled leaves much more quickly than would have been possible using a computer vision algorithm alone. Domain novices and experts were equally good at identifying mislabeled images, although domain experts enjoyed the game more. We discuss the successes and challenges of this new game, which can be applied to other domains with labeled image datasets.}, keywords = {algorithm;educational, classification;object, computational, computing;botany;computer, datasets;misclassification, errors;scientific, feedback;labeled, game;human, games;computer, games;image, image, leaf, Odd, Out;complex, recognition;, recognition;biology, tags;visual, tasks;computer, tasks;textual, VISION}, doi = {10.1109/PASSAT/SocialCom.2011.225}, author = {Hansen,D. L and Jacobs, David W. and Lewis,D. and Biswas,A. and Preece,J. and Rotman,D. and Stevens,E.} } @inbook {19600, title = {Optimal Verification of Operations on Dynamic Sets}, booktitle = {Advances in Cryptology {\textendash} CRYPTO 2011}, series = {Lecture Notes in Computer Science}, year = {2011}, month = {2011/01/01/}, pages = {91 - 110}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We study the design of protocols for set-operation verification, namely the problem of cryptographically checking the correctness of outsourced set operations performed by an untrusted server over a dynamic collection of sets that are owned (and updated) by a trusted source. We present new authenticated data structures that allow any entity to publicly verify a proof attesting the correctness of primitive set operations such as intersection, union, subset and set difference. Based on a novel extension of the security properties of bilinear-map accumulators as well as on a primitive called accumulation tree, our protocols achieve optimal verification and proof complexity (i.e., only proportional to the size of the query parameters and the answer), as well as optimal update complexity (i.e., constant), while incurring no extra asymptotic space overhead. The proof construction is also efficient, adding a logarithmic overhead to the computation of the answer of a set-operation query. In contrast, existing schemes entail high communication and verification costs or high storage costs. Applications of interest include efficient verification of keyword search and database queries. The security of our protocols is based on the bilinear q-strong Diffie-Hellman assumption.}, keywords = {Computer Communication Networks, computers and society, Data Encryption, Discrete Mathematics in Computer Science, Management of Computing and Information Systems, Systems and Data Security}, isbn = {978-3-642-22791-2, 978-3-642-22792-9}, url = {http://link.springer.com/chapter/10.1007/978-3-642-22792-9_6}, author = {Charalampos Papamanthou and Tamassia, Roberto and Triandopoulos, Nikos}, editor = {Rogaway, Phillip} } @conference {13617, title = {Overview of the FIRE 2011 RISOTTask}, booktitle = {FIRE}, year = {2011}, month = {2011///}, abstract = {RISOT was a pilot task in FIRE 2011 which focused on the retrieval of automatically recognized text from machine printed sources. The collection used for search was a subset of the FIRE 2008 and 2010 Bengali test collections that contained 92 topics and 62,825 documents. Two teams participated, submitting a total of 11 monolingual runs.}, author = {Garain,Utpal and Paik,Jiaul and Pal,Tamaltaru and Majumder,Prasenjit and David Doermann and Oard, Douglas} } @conference {14471, title = {A probabilistic approach for learning folksonomies from structured data}, booktitle = {Proceedings of the fourth ACM international conference on Web search and data mining}, series = {WSDM {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {555 - 564}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Learning structured representations has emerged as an important problem in many domains, including document and Web data mining, bioinformatics, and image analysis. One approach to learning complex structures is to integrate many smaller, incomplete and noisy structure fragments. In this work, we present an unsupervised probabilistic approach that extends affinity propagation [7] to combine the small ontological fragments into a collection of integrated, consistent, and larger folksonomies. This is a challenging task because the method must aggregate similar structures while avoiding structural inconsistencies and handling noise. We validate the approach on a real-world social media dataset, comprised of shallow personal hierarchies specified by many individual users, collected from the photosharing website Flickr. Our empirical results show that our proposed approach is able to construct deeper and denser structures, compared to an approach using only the standard affinity propagation algorithm. Additionally, the approach yields better overall integration quality than a state-of-the-art approach based on incremental relational clustering.}, keywords = {collective knowledge, data mining, folksonomies, social information processing, social metadata, taxonomies}, isbn = {978-1-4503-0493-1}, doi = {10.1145/1935826.1935905}, url = {http://doi.acm.org/10.1145/1935826.1935905}, author = {Plangprasopchok,Anon and Lerman,Kristina and Getoor, Lise} } @article {17340, title = {Realizing the value of social media requires innovative computing research}, journal = {Communications of the ACM}, volume = {54}, year = {2011}, month = {2011/09//}, pages = {34 - 37}, abstract = {How social media are expanding traditional research and development topics for computer and information scientists.}, isbn = {0001-0782}, doi = {10.1145/1995376.1995389}, url = {http://doi.acm.org/10.1145/1995376.1995389}, author = {Shneiderman, Ben and Preece,Jennifer and Pirolli,Peter} } @article {16038, title = {Reducing Missed Laboratory Results: Defining Temporal Responsibility, Generating User Interfaces for Test Process Tracking, and Retrospective Analyses to Identify Problems}, journal = {AMIA Annual Symposium ProceedingsAMIA Annu Symp Proc}, volume = {2011}, year = {2011}, month = {2011///}, pages = {1382 - 1391}, abstract = {Researchers have conducted numerous case studies reporting the details on how laboratory test results of patients were missed by the ordering medical providers. Given the importance of timely test results in an outpatient setting, there is limited discussion of electronic versions of test result management tools to help clinicians and medical staff with this complex process. This paper presents three ideas to reduce missed results with a system that facilitates tracking laboratory tests from order to completion as well as during follow-up: (1) define a workflow management model that clarifies responsible agents and associated time frame, (2) generate a user interface for tracking that could eventually be integrated into current electronic health record (EHR) systems, (3) help identify common problems in past orders through retrospective analyses.}, isbn = {1942-597X}, author = {Tarkan,Sureyya and Plaisant, Catherine and Shneiderman, Ben and Hettinger,A. Zachary} } @conference {17338, title = {Re-engineering health care with information technology: the role of computer-human interaction}, booktitle = {PART 2 {\textemdash}{\textemdash}{\textemdash}{\textendash} Proceedings of the 2011 annual conference extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {451 - 454}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {There is critical, nation-wide need to improve health care and its cost. Health information technology has great promise that is yet to be realized. In this panel four noted experts will discuss key issues that should drive health IT, and the challenges for the CHI community to play a leading role.}, keywords = {clinical workflow, electronic medical records, health information technology visualization, healthcare informatics, participatory design, usability standards \&$\#$38; evaluation}, doi = {10.1145/1979482.1979490}, url = {http://doi.acm.org/10.1145/1979482.1979490}, author = {Butler,Keith and Payne,Thomas and Shneiderman, Ben and Brennan,Patricia and Zhang,Jiajie} } @article {12434, title = {Remote identification of faces: Problems, prospects, and progress}, journal = {Pattern Recognition Letters}, year = {2011}, month = {2011/12//}, abstract = {Face recognition in unconstrained acquisition conditions is one of the most challenging problems that has been actively researched in recent years. It is well known that many state-of-the-art still face recognition algorithms perform well, when constrained (frontal, well illuminated, high-resolution, sharp, and full) face images are acquired. However, their performance degrades significantly when the test images contain variations that are not present in the training images. In this paper, we highlight some of the key issues in remote face recognition. We define the remote face recognition as one where faces are several tens of meters (10{\textendash}250\&$\#$xa0;m) from the cameras. We then describe a remote face database which has been acquired in an unconstrained outdoor maritime environment. Recognition performance of a subset of existing still image-based face recognition algorithms is evaluated on the remote face data set. Further, we define the remote re-identification problem as matching a subject at one location with candidate sets acquired at a different location and over time in remote conditions. We provide preliminary experimental results on remote re-identification. It is demonstrated that in addition to applying a good classification algorithm, finding features that are robust to variations mentioned above and developing statistical models which can account for these variations are very important for remote face recognition.}, keywords = {Blur, illumination, low-resolution, pose variation, Re-identification, Remote face recognition}, isbn = {0167-8655}, doi = {10.1016/j.patrec.2011.11.020}, url = {http://www.sciencedirect.com/science/article/pii/S0167865511004107}, author = {Chellapa, Rama and Ni,Jie and Patel, Vishal M.} } @article {16033, title = {Research Directions in Data Wrangling: Visualizations and Transformations for Usable and Credible Data}, journal = {Information VisualizationInformation Visualization}, volume = {10}, year = {2011}, month = {2011/10/01/}, pages = {271 - 288}, abstract = {In spite of advances in technologies for working with data, analysts still spend an inordinate amount of time diagnosing data quality issues and manipulating data into a usable form. This process of {\textquoteleft}data wrangling{\textquoteright} often constitutes the most tedious and time-consuming aspect of analysis. Though data cleaning and integration arelongstanding issues in the database community, relatively little research has explored how interactive visualization can advance the state of the art. In this article, we review the challenges and opportunities associated with addressing data quality issues. We argue that analysts might more effectively wrangle data through new interactive systems that integrate data verification, transformation, and visualization. We identify a number of outstanding research questions, including how appropriate visual encodings can facilitate apprehension of missing data, discrepant values, and uncertainty; how interactive visualizations might facilitate data transform specification; and how recorded provenance and social interaction might enable wider reuse, verification, and modification of data transformations.}, keywords = {data cleaning, data quality, data transformation, Uncertainty, Visualization}, isbn = {1473-8716, 1473-8724}, doi = {10.1177/1473871611415994}, url = {http://ivi.sagepub.com/content/10/4/271}, author = {Kandel,Sean and Heer,Jeffrey and Plaisant, Catherine and Kennedy,Jessie and Van Ham,Frank and Riche,Nathalie Henry and Weaver,Chris and Lee,Bongshin and Brodbeck,Dominique and Buono,Paolo} } @article {15993, title = {Robotson Crusoe{\textendash}or{\textendash}What Is Common Sense?}, journal = {Logical Formalizations of Commonsense Reasoning {\textemdash} Papers from the AAAI 2011 Spring Symposium}, year = {2011}, month = {2011///}, abstract = {I will present a perspective on human-level commonsense behavior (HLCSB) that differs from commonsense reasoning (CSR) as the latter is often characterized in AI. I will argue that HLCSB is not far beyond the reach of current technology, and that it also provides solutions to some of the problems that plague CSR, most notably the brittleness problem. A key is the judicious use of metacognitive monitoring and control, especially in the area of automated learning.}, author = {Perlis, Don} } @article {17920, title = {A robust and rotationally invariant local surface descriptor with applications to non-local mesh processing}, journal = {Graphical Models}, volume = {73}, year = {2011}, month = {2011/09//}, pages = {231 - 242}, abstract = {In recent years, we have witnessed a striking increase in research concerning how to describe a meshed surface. These descriptors are commonly used to encode mesh properties or guide mesh processing, not to augment existing computations by replication. In this work, we first define a robust surface descriptor based on a local height field representation, and present a transformation via the extraction of Zernike moments. Unlike previous work, our local surface descriptor is innately rotationally invariant. Second, equipped with this novel descriptor, we present SAMPLE {\textendash} similarity augmented mesh processing using local exemplars {\textendash} a method which uses feature neighbourhoods to propagate mesh processing done in one part of the mesh, the local exemplar, to many others. Finally, we show that SAMPLE can be used in a number of applications, such as detail transfer and parameterization.}, keywords = {Local descriptors, Non-local mesh processing, shape analysis, Similarity processing}, isbn = {1524-0703}, doi = {10.1016/j.gmod.2011.05.002}, url = {http://www.sciencedirect.com/science/article/pii/S1524070311000166}, author = {Maximo, A. and Patro,R. and Varshney, Amitabh and Farias, R.} } @article {16041, title = {Seven guiding scenarios for information visualization evaluation}, volume = {2011-992-04}, year = {2011}, month = {2011///}, institution = {Department of Computer Science, University of Calgary}, abstract = {We take a new, scenario based look at evaluation in information visualization. Our seven scenarios, evaluatingvisual data analysis and reasoning, evaluating user performance, evaluating user experience, evaluating environments and work practices, evaluating communication through visualization, automated evaluation of visualizations, and evaluating collaborative data analysis were derived through an extensive literature review of over 800 visualization publications. These scenarios are described through their goals, the types of questions they embody and illustrated through example studies. Through this broad survey and the distillation of these scenarios we make two contributions. One, we encapsulate the current practices in the information visualization research community and, two, we provide a different approach to reaching decisions about what might be the most effective evaluation of a given information visualization. For example, if the research goals or evaluative questions are known they can be used to map to specific scenarios, where practical existing examples can be considered for effective evaluation approaches. }, author = {Lam,H. and Bertini,E. and Isenberg,P. and Plaisant, Catherine and Carpendale,S.} } @conference {13643, title = {Shape Codebook based Handwritten and Machine Printed Text Zone Extraction}, booktitle = {Document Recognition and Retrieval}, year = {2011}, month = {2011/01//}, pages = {7874:1-8 - 7874:1-8}, address = {San Francisco}, abstract = {We present a novel method for extracting handwritten and printed text zones from noisy document images with mixed content. We use Triple-Adjacent-Segment (TAS) based features which encode local shape characteristics of text in a consistent manner. We first construct two different codebooks of the shape features extracted from a set of handwritten and printed text documents. In the next step, we compute the normalized histogram of codewords for each segmented zone and use it to train Support Vector Machine (SVM) classifier. Due to a codebook based approach, our method is robust to the background noise present in the image. The TAS features used are invariant to translation, scale and rotation of text. In our experimental results, we show that a pixel-weighted zone classification accuracy of 98\% can be achieved for noisy Arabic documents. Further, we demonstrate the effectiveness of our method for document page classification and show that a high precision can be achieved for machine printed documents. The proposed method is robust to the size of zones, which may contain text content at word, line or paragraph level.}, author = {Kumar,Jayant and Prasad,Rohit and Cao,Huiagu and Abd-Almageed, Wael and David Doermann and Natarajan,Prem} } @article {17909, title = {Social Snapshot: A System for Temporally Coupled Social Photography}, journal = {Computer Graphics and Applications, IEEE}, volume = {31}, year = {2011}, month = {2011/02//jan}, pages = {74 - 84}, abstract = {Social Snapshot actively acquires and reconstructs temporally dynamic data. The system enables spatiotemporal 3D photography using commodity devices, assisted by their auxiliary sensors and network functionality. It engages users, making them active rather than passive participants in data acquisition.}, keywords = {3D, acquisition;data, acquisition;photography;social, computing;, coupled, data, photography;data, photography;temporally, reconstruction;social, sciences, snapshot;spatiotemporal, social}, isbn = {0272-1716}, doi = {10.1109/MCG.2010.107}, author = {Patro,R. and Ip, Cheuk Yiu and Bista,S. and Varshney, Amitabh} } @article {15907, title = {Sparsity-motivated automatic target recognition}, journal = {Applied OpticsAppl. Opt.}, volume = {50}, year = {2011}, month = {2011/04/01/}, pages = {1425 - 1433}, abstract = {We present an automatic target recognition algorithm using the recently developed theory of sparse representations and compressive sensing. We show how sparsity can be helpful for efficient utilization of data for target recognition. We verify the efficacy of the proposed algorithm in terms of the recognition rate and confusion matrices on the well known Comanche (Boeing{\textendash}Sikorsky, USA) forward-looking IR data set consisting of ten different military targets at different orientations.}, keywords = {IMAGE PROCESSING, Image recognition, algorithms and filters, pattern recognition, Vision - patterns and recognition}, doi = {10.1364/AO.50.001425}, url = {http://ao.osa.org/abstract.cfm?URI=ao-50-10-1425}, author = {Patel, Vishal M. and Nasrabadi,Nasser M. and Chellapa, Rama} } @article {12442, title = {Special Issue on Video Analysis on Resource-Limited Systems}, journal = {IEEE Transactions on Circuits and Systems for Video Technology}, volume = {21}, year = {2011}, month = {2011/10//}, pages = {1349 - 1352}, abstract = {The 17 papers in this special issue focus on resource-limited systems.}, keywords = {computational complexity, Image Enhancement, Special issues and sections, Video compression}, isbn = {1051-8215}, doi = {10.1109/TCSVT.2011.2165795}, author = {Chellapa, Rama and Cavallaro, A. and Wu,Y. and Shan, C. and Fu, Y. and Pulli, K.} } @article {19118, title = {Suppression subtractive hybridization PCR isolation of cDNAs from a Caribbean soft coral}, journal = {Electronic Journal of Biotechnology}, volume = {14}, year = {2011}, month = {2011}, pages = {8 - 9}, author = {Lopez, J.V. and Ledger, A. and Santiago-V{\'a}zquez, L.Z. and Pop, Mihai and Sommer, D.D. and Ranzer, L.K. and Feldman, R.A. and Russell, G.K.} } @conference {12441, title = {Synthesis-based recognition of low resolution faces}, booktitle = {2011 International Joint Conference on Biometrics (IJCB)}, year = {2011}, month = {2011/10/11/13}, pages = {1 - 6}, publisher = {IEEE}, organization = {IEEE}, abstract = {Recognition of low resolution face images is a challenging problem in many practical face recognition systems. Methods have been proposed in the face recognition literature for the problem when the probe is of low resolution, and a high resolution gallery is available for recognition. These methods modify the probe image such that the resultant image provides better discrimination. We formulate the problem differently by leveraging the information available in the high resolution gallery image and propose a generative approach for classifying the probe image. An important feature of our algorithm is that it can handle resolution changes along with illumination variations. The effective- ness of the proposed method is demonstrated using standard datasets and a challenging outdoor face dataset. It is shown that our method is efficient and can perform significantly better than many competitive low resolution face recognition algorithms.}, keywords = {Dictionaries, Face, face images, face recognition, face recognition literature, face recognition systems, illumination variations, image resolution, low resolution faces, Organizations, PROBES, support vector machines, synthesis based recognition}, isbn = {978-1-4577-1358-3}, doi = {10.1109/IJCB.2011.6117545}, author = {Shekhar, S. and Patel, Vishal M. and Chellapa, Rama} } @article {17744, title = {SYSTEM AND METHOD FOR DATA MANAGEMENT IN LARGE DATA NETWORKS}, volume = {WO/2011/032077}, year = {2011}, month = {2011/03//}, abstract = {A system and method for storing an input data network, in the form of graph is provided. The system includes a master node and a plurality of slave nodes. The master node is operable to receive the data network in the form of a graph, the graph including a plurality of vertices connected by edges; calculate a probability of co-retrieval for each of the plurality of vertices; and assign each of the plurality of vertices to one of the plurality of compute nodes based on the calculated probability of co-retrieval. Another method and system are provided for converting a dataset into a graph based index and storing the index on disk. Respective systems and methods of querying such data networks are also provided.}, author = {BROECHELER,M. and V.S. Subrahmanian and Pugliese, A.} } @article {16045, title = {A task taxonomy of network evolution analysis}, journal = {University of Maryland, Human-Computer Interaction Lab Tech Report HCIL-2011-09}, year = {2011}, month = {2011///}, abstract = {Visualization is a useful tool for understanding the nature of networks. The recent growth of social media requires morepowerful visualization techniques beyond static network diagrams. One of the most important challenges is the visualization of temporal network evolution. In order to provide strong temporal visualization methods, we need to understand what tasks users accomplish. This study provides a taxonomy of the temporal network visualization tasks. We identify (1) the entities, (2) the properties to be visualized, and (3) the hierarchy of temporal features, which were extracted by surveying existing temporal network visualization systems. By building and examining the task taxonomy, we report which tasks have been covered so far and suggest additions for designing the future visualizations. We also present example visualizations constructed using the task taxonomy for a social networking site in order to validate the quality of the taxonomy. }, author = {Ahn,JW and Plaisant, Catherine and Shneiderman, Ben} } @conference {19258, title = {Teaching cross-platform design and testing methods for embedded systems using DICE}, series = {WESE {\textquoteright}11}, year = {2011}, month = {2011}, pages = {38 - 45}, publisher = {ACM}, organization = {ACM}, abstract = {DICE (the DSPCAD Integrative Command Line Environment) is a package of utilities that facilitates efficient management of software projects. Key areas of emphasis in DICE are cross-platform operation, support for projects that integrate heterogeneous programming languages, and support for applying and integrating different kinds of design and testing methodologies. The package is being developed at the University of Maryland to facilitate the research and teaching of methods for implementation, testing, evolution, and revision of engineering software. The platform- and language-independent focus of DICE makes it an effective vehicle for teaching high-productivity, high-reliability methods for design and implementation of embedded systems for a variety of courses. In this paper, we provide an overview of features of DICE --- particularly as they relate to testing driven design practices --- that are useful in embedded systems education, and discuss examples and experiences of applying the tool in courses at the University of Maryland aimed at diverse groups of students --- undergraduate programming concepts for engineers, graduate VLSI architectures (aimed at research-oriented students), and graduate FPGA system design (aimed at professional Master{\textquoteright}s students).}, isbn = {978-1-4503-1046-8}, url = {http://doi.acm.org/10.1145/2077370.2077376}, author = {Bhattacharyya, Shuvra S. and Plishker,William and Gupta, Ayush and Chung-Ching Shen} } @inbook {16046, title = {Temporal Visualization of Social Network Dynamics: Prototypes for Nation of Neighbors}, booktitle = {Social Computing, Behavioral-Cultural Modeling and PredictionSocial Computing, Behavioral-Cultural Modeling and Prediction}, series = {Lecture Notes in Computer Science}, volume = {6589}, year = {2011}, month = {2011///}, pages = {309 - 316}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Information visualization is a powerful tool for analyzing the dynamic nature of social communities. Using Nation of Neighbors community network as a testbed, we propose five principles of implementing temporal visualizations for social networks and present two research prototypes: NodeXL and TempoVis. Three different states are defined in order to visualize the temporal changes of social networks. We designed the prototypes to show the benefits of the proposed ideas by letting users interactively explore temporal changes of social networks.}, isbn = {978-3-642-19655-3}, url = {http://dx.doi.org/10.1007/978-3-642-19656-0_43}, author = {Ahn,Jae-wook and Taieb-Maimon,Meirav and Sopan,Awalin and Plaisant, Catherine and Shneiderman, Ben}, editor = {Salerno,John and Yang,Shanchieh and Nau, Dana S. and Chai,Sun-Ki} } @article {15450, title = {Towards incremental component compatibility testing}, journal = {Proceedings of the 14th international ACM Sigsoft symposium on Component based software engineering, CBSE}, volume = {11}, year = {2011}, month = {2011///}, pages = {119 - 128}, abstract = {Software components are increasingly assembled from othercomponents. Each component may further depend on oth- ers, and each may have multiple active versions. The to- tal number of configurations{\textemdash}combinations of components and their versions{\textemdash}deployed by end users can be very large. Component developers, therefore, spend considerable time and effort doing compatibility testing {\textendash} determining whether their components can be built correctly for all deployed con- figurations. In previous work we developed Rachet to sup- port large-scale compatibility testing of components. In this paper, we describe and evaluate methods to enable Rachet to perform incremental compatibility testing. We de- scribe algorithms to compute differences in component com- patibilities between current and previous component builds, a formal test adequacy criterion based on covering the differ- ences, and cache-aware configuration sampling and testing methods that attempt to reuse effort from previous testing sessions. We evaluate our approach using the 5-year evo- lution history of a scientific middleware component. Our results show significant performance improvements over Ra- chet{\textquoteright}s previous retest-all approach, making the process of compatibility testing practical for evolving components. }, author = {Yoon,I. and Sussman, Alan and Memon, Atif M. and Porter, Adam} } @conference {17442, title = {TreeVersity: Comparing tree structures by topology and node{\textquoteright}s attributes differences}, booktitle = {2011 IEEE Conference on Visual Analytics Science and Technology (VAST)}, year = {2011}, month = {2011/10/23/28}, pages = {275 - 276}, publisher = {IEEE}, organization = {IEEE}, abstract = {It is common to classify data in hierarchies, they provide a comprehensible way of understanding big amounts of data. From budgets to organizational charts or even the stock market, trees are everywhere and people find them easy to use. However when analysts need to compare two versions of the same tree structure, or two related taxonomies, the task is not so easy. Much work has been done on this topic, but almost all of it has been restricted to either compare the trees by topology, or by the node attribute values. With this project we are proposing TreeVersity, a framework for comparing tree structures, both by structural changes and by differences in the node attributes. This paper is based on our previous work on comparing traffic agencies using LifeFlow [1, 2] and on a first prototype of TreeVersity.}, keywords = {Computer science, data classification, Data visualization, Educational institutions, hierarchy, Image color analysis, LifeFlow, node attributes differences, Pattern classification, structural changes, Topology, topology attributes differences, traffic agencies, tree structures comparison, trees (mathematics), TreeVersity, Vegetation, Visualization}, isbn = {978-1-4673-0015-5}, doi = {10.1109/VAST.2011.6102471}, author = {Gomez,J.A.G. and Buck-Coleman,A. and Plaisant, Catherine and Shneiderman, Ben} } @conference {16034, title = {TreeVersity: Comparing tree structures by topology and node{\textquoteright}s attributes differences}, booktitle = {Visual Analytics Science and Technology (VAST), 2011 IEEE Conference on}, year = {2011}, month = {2011/10//}, pages = {275 - 276}, abstract = {It is common to classify data in hierarchies, they provide a comprehensible way of understanding big amounts of data. From budgets to organizational charts or even the stock market, trees are everywhere and people find them easy to use. However when analysts need to compare two versions of the same tree structure, or two related taxonomies, the task is not so easy. Much work has been done on this topic, but almost all of it has been restricted to either compare the trees by topology, or by the node attribute values. With this project we are proposing TreeVersity, a framework for comparing tree structures, both by structural changes and by differences in the node attributes. This paper is based on our previous work on comparing traffic agencies using LifeFlow [1, 2] and on a first prototype of TreeVersity.}, keywords = {(mathematics);, agencies;tree, attributes, changes;topology, classification;hierarchy;node, classification;trees, comparison;pattern, differences;structural, differences;traffic, LifeFlow;TreeVersity;data, STRUCTURES}, doi = {10.1109/VAST.2011.6102471}, author = {Gomez,J.A.G. and Buck-Coleman,A. and Plaisant, Catherine and Shneiderman, Ben} } @article {17462, title = {Usability and Accessibility in Consumer Health Informatics: Current Trends and Future Challenges}, journal = {American Journal of Preventive Medicine}, volume = {40}, year = {2011}, month = {2011/05//}, pages = {S187-S197 - S187-S197}, abstract = {It is a truism that, for innovative eHealth systems to have true value and impact, they must first and foremost be usable and accessible by clinicians, consumers, and other stakeholders. In this paper, current trends and future challenges in the usability and accessibility of consumer health informatics will be described. Consumer expectations of their healthcare providers and healthcare records in this new era of consumer-directed care will be explored, and innovative visualizations, assistive technologies, and other ways that healthcare information is currently being provided and/or shared will be described. Challenges for ensuring the usability of current and future systems will also be discussed. An innovative model for conducting systematic, timely, user-centered research on consumer-facing websites at the National Cancer Institute (NCI) and the ongoing efforts at the National Institute of Standards and Technology (NIST) to promote health information technology (HIT) usability standards and evaluation criteria will also be presented.}, isbn = {0749-3797}, doi = {10.1016/j.amepre.2011.01.009}, url = {http://www.sciencedirect.com/science/article/pii/S0749379711000869}, author = {Goldberg,Larry and Lide,Bettijoyce and Lowry,Svetlana and Massett,Holly A. and O{\textquoteright}Connell,Trisha and Preece,Jennifer and Quesenbery,Whitney and Shneiderman, Ben} } @article {14379, title = {Using classifier cascades for scalable e-mail classification}, journal = {Collaboration, Electronic Messaging, Anti-Abuse and Spam Conference, ACM International Conference Proceedings Series}, year = {2011}, month = {2011///}, abstract = {In many real-world scenarios, we must make judgments in the presence of computational constraints. One common computational constraint arises when the features used to make a judgment each have differing acquisition costs, but there is a fixed total budget for a set of judgments. Par- ticularly when there are a large number of classifications that must be made in a real-time, an intelligent strategy for optimizing accuracy versus computational costs is essential. E-mail classification is an area where accurate and timely results require such a trade-off. We identify two scenarios where intelligent feature acquisition can improve classifier performance. In granular classification we seek to clas- sify e-mails with increasingly specific labels structured in a hierarchy, where each level of the hierarchy requires a differ- ent trade-off between cost and accuracy. In load-sensitive classification, we classify a set of instances within an ar- bitrary total budget for acquiring features. Our method, Adaptive Classifier Cascades (ACC), designs a policy to combine a series of base classifiers with increasing compu- tational costs given a desired trade-off between cost and ac- curacy. Using this method, we learn a relationship between feature costs and label hierarchies, for granular classification and cost budgets, for load-sensitive classification. We eval- uate our method on real-world e-mail datasets with realistic estimates of feature acquisition cost, and we demonstrate su- perior results when compared to baseline classifiers that do not have a granular, cost-sensitive feature acquisition policy.}, author = {Pujara,J. and Daum{\'e}, Hal and Getoor, Lise} } @article {19264, title = {Using the DSPCAD Integrative Command-Line Environment: User{\textquoteright}s Guide for DICE Version 1.1}, year = {2011}, month = {2011}, abstract = {This document provides instructions on setting up, starting up, andbuilding DICE and its key companion packages, dicemin and dicelang. This installation process is based on a general set of conventions, which we refer to as the DICE organizational conventions, for software packages. The DICE organizational conventions are specified in this report. These conventions are applied in DICE, dicemin, and dicelang, and also to other software packages that are developed in the Maryland DSPCAD Research Group. }, keywords = {Technical Report}, isbn = {UMIACS-TR-2011-13}, doi = {Technical Report}, url = {http://drum.lib.umd.edu//handle/1903/11804}, author = {Bhattacharyya, Shuvra S. and Chung-Ching Shen and Plishker,William and Sane, Nimish and Zaki, George} } @conference {12445, title = {Variable remapping of images from very different sources}, booktitle = {2011 18th IEEE International Conference on Image Processing (ICIP)}, year = {2011}, month = {2011/09/11/14}, pages = {1501 - 1504}, publisher = {IEEE}, organization = {IEEE}, abstract = {We present a system which registers image sequences acquired by very different sources, so that multiple views could be transformed to the same coordinates system. This enables the functionality of automatic object identification and confirmation across views and platforms. The capability of the system comes from three ingredients: 1) image context enlargement through temporal integration; 2) robust motion estimation using the G-RANSAC framework with a relaxed correspondence criteria; 3) constrained motion estimation within the G-RANSAC framework. The proposed system has worked successfully on thousands of frames from multiple collections with significant variations in scale and resolution.}, keywords = {automatic object identification, Buildings, CAMERAS, Conferences, constrained motion estimation, coordinates system, Estimation, G-RANSAC framework, image context enlargement, Image Enhancement, image registration, image sequence registration, Image sequences, Motion estimation, Robustness, temporal integration, variable image remapping}, isbn = {978-1-4577-1304-0}, doi = {10.1109/ICIP.2011.6115729}, author = {Wei Zhang and Yanlin Guo and Meth, R. and Sokoloff, H. and Pope, A. and Strat, T. and Chellapa, Rama} } @conference {19259, title = {Vectorization and mapping of software defined radio applications on heterogeneous multi-processor platforms}, booktitle = {2011 IEEE Workshop on Signal Processing Systems (SiPS)}, year = {2011}, month = {2011}, pages = {31 - 36}, abstract = {A variety of multiprocessor architectures have proliferated even for off-the-shelf computing platforms. To improve performance and productivity for common heterogeneous systems, we have developed a workflow to generate efficient solutions. By starting with a formal description of an application and the mapping problem we are able to generate a range of designs that efficiently trade-of latency and throughput. In this approach, efficient utilization of SIMD cores is achieved by applying extensive block processing in conjunction with efficient mapping and scheduling. We demonstrate our approach through an integration into the GNU Radio environment for software defined radio system design.}, keywords = {Benchmark testing, block processing, Design methodology, formal description, GNU radio environment, Graphic Processor Unit, Graphics processing unit, heterogeneous multiprocessor platform, mapping problem, Multicore processing, Multiprocessing systems, multiprocessor architecture, multiprocessor scheduling, operating systems (computers), PARALLEL PROCESSING, Processor scheduling, Schedules, SIMD core, Software Defined Radio, software defined radio system design, software radio, telecommunication computing, Throughput, vectorization, workflow}, author = {Zaki, G.F. and Plishker,W. and Bhattacharyya, Shuvra S. and Clancy, C. and Kuykendall, J.} } @article {16035, title = {Visualizing Missing Data: Graph Interpretation User Study}, journal = {IFIP Lecture Notes in Computer Science (LNCS)}, volume = {3585}, year = {2011}, month = {2011/03/31/}, pages = {861 - 872}, abstract = {Visualizing Missing Data: Graph Interpretation User Study}, url = {http://dl.ifip.org/index.php/lncs/article/view/25927}, author = {Drizd,Terence and Eaton,Cyntrica and Plaisant, Catherine} } @article {15262, title = {Achieving anonymity via clustering}, journal = {ACM Trans. Algorithms}, volume = {6}, year = {2010}, month = {2010/07//}, pages = {49:1{\textendash}49:19 - 49:1{\textendash}49:19}, abstract = {Publishing data for analysis from a table containing personal records, while maintaining individual privacy, is a problem of increasing importance today. The traditional approach of deidentifying records is to remove identifying fields such as social security number, name, etc. However, recent research has shown that a large fraction of the U.S. population can be identified using nonkey attributes (called quasi-identifiers) such as date of birth, gender, and zip code. The k-anonymity model protects privacy via requiring that nonkey attributes that leak information are suppressed or generalized so that, for every record in the modified table, there are at least k-1 other records having exactly the same values for quasi-identifiers. We propose a new method for anonymizing data records, where quasi-identifiers of data records are first clustered and then cluster centers are published. To ensure privacy of the data records, we impose the constraint that each cluster must contain no fewer than a prespecified number of data records. This technique is more general since we have a much larger choice for cluster centers than k-anonymity. In many cases, it lets us release a lot more information without compromising privacy. We also provide constant factor approximation algorithms to come up with such a clustering. This is the first set of algorithms for the anonymization problem where the performance is independent of the anonymity parameter k. We further observe that a few outlier points can significantly increase the cost of anonymization. Hence, we extend our algorithms to allow an ε fraction of points to remain unclustered, that is, deleted from the anonymized publication. Thus, by not releasing a small fraction of the database records, we can ensure that the data published for analysis has less distortion and hence is more useful. Our approximation algorithms for new clustering objectives are of independent interest and could be applicable in other clustering scenarios as well.}, keywords = {anonymity, Approximation algorithms, clustering, privacy}, isbn = {1549-6325}, doi = {10.1145/1798596.1798602}, url = {http://doi.acm.org/10.1145/1798596.1798602}, author = {Aggarwal,Gagan and Panigrahy,Rina and Feder,Tom{\'a}s and Thomas,Dilys and Kenthapadi,Krishnaram and Khuller, Samir and Zhu,An} } @article {13094, title = {Advanced tracking systems: computational approaches to be introduced to new series}, journal = {Augmented vision \& reality}, volume = {1}, year = {2010}, month = {2010///}, abstract = {Modern visual tracking systems implement a computational process that is often divided into several modules such as localization, tracking, recognition, behavior analysis and classification of events. This book will focus on recent advances in computational approaches for detection and tracking of human body, road boundaries and lane markers as well as on recognition of human activities, drowsiness and distraction state. This book is composed of seven distinct parts. Part I covers people localization algorithms in video sequences. Part II describes successful approaches for tracking people and bodyparts. The third part focuses on tracking of pedestrian and vehicles in outdoor images. Part IV describes recent methods to track lane markers and road boundaries. In part V, methods to track head, hand and facial features are reviewed. The last two parts cover the topics of automatic recognition and classification of activity, gesture, behavior, drowsiness and visual distraction state of humans.}, author = {HAMMOUD,R. and Porikli, F. and Davis, Larry S.} } @inbook {12503, title = {Aligning Spatio-Temporal Signals on a Special Manifold}, booktitle = {Computer Vision {\textendash} ECCV 2010Computer Vision {\textendash} ECCV 2010}, series = {Lecture Notes in Computer Science}, volume = {6315}, year = {2010}, month = {2010///}, pages = {547 - 560}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We investigate the spatio-temporal alignment of videos or features/signals extracted from them. Specifically, we formally define an alignment manifold and formulate the alignment problem as an optimization procedure on this non-linear space by exploiting its intrinsic geometry. We focus our attention on semantically meaningful videos or signals, e.g., those describing or capturing human motion or activities, and propose a new formalism for temporal alignment accounting for executing rate variations among realizations of the same video event. By construction, we address this static and deterministic alignment task in a dynamic and stochastic manner: we regard the search for optimal alignment parameters as a recursive state estimation problem for a particular dynamic system evolving on the alignment manifold. Consequently, a Sequential Importance Sampling iteration on the alignment manifold is designed for effective and efficient alignment. We demonstrate the performance on several types of input data that arise in vision problems.}, isbn = {978-3-642-15554-3}, url = {http://dx.doi.org/10.1007/978-3-642-15555-0_40}, author = {Ruonan Li and Chellapa, Rama}, editor = {Daniilidis,Kostas and Maragos,Petros and Paragios,Nikos} } @article {16242, title = {Alignment and clustering of phylogenetic markers - implications for microbial diversity studies}, journal = {BMC Bioinformatics}, volume = {11}, year = {2010}, month = {2010/03/24/}, pages = {152 - 152}, abstract = {Molecular studies of microbial diversity have provided many insights into the bacterial communities inhabiting the human body and the environment. A common first step in such studies is a survey of conserved marker genes (primarily 16S rRNA) to characterize the taxonomic composition and diversity of these communities. To date, however, there exists significant variability in analysis methods employed in these studies.}, isbn = {1471-2105}, doi = {10.1186/1471-2105-11-152}, url = {http://www.biomedcentral.com/1471-2105/11/152}, author = {White,James R and Navlakha,Saket and Nagarajan,Niranjan and Ghodsi,Mohammad-Reza and Kingsford, Carl and Pop, Mihai} } @article {19627, title = {Applications of Parameterized st-Orientations}, journal = {Journal of Graph Algorithms and Applications}, volume = {14}, year = {2010}, month = {2010///}, pages = {337 - 365}, isbn = {1526-1719}, url = {http://emis.um.ac.ir/journals/JGAA/getPaper-96.html?id=212}, author = {Charalampos Papamanthou and Tollis, Ioannis G.} } @inbook {12504, title = {Articulation-Invariant Representation of Non-planar Shapes}, booktitle = {Computer Vision {\textendash} ECCV 2010Computer Vision {\textendash} ECCV 2010}, series = {Lecture Notes in Computer Science}, volume = {6313}, year = {2010}, month = {2010///}, pages = {286 - 299}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Given a set of points corresponding to a 2D projection of a non-planar shape, we would like to obtain a representation invariant to articulations (under no self-occlusions). It is a challenging problem since we need to account for the changes in 2D shape due to 3D articulations, viewpoint variations, as well as the varying effects of imaging process on different regions of the shape due to its non-planarity. By modeling an articulating shape as a combination of approximate convex parts connected by non-convex junctions, we propose to preserve distances between a pair of points by (i) estimating the parts of the shape through approximate convex decomposition, by introducing a robust measure of convexity and (ii) performing part-wise affine normalization by assuming a weak perspective camera model, and then relating the points using the inner distance which is insensitive to planar articulations. We demonstrate the effectiveness of our representation on a dataset with non-planar articulations, and on standard shape retrieval datasets like MPEG-7.}, isbn = {978-3-642-15557-4}, url = {http://dx.doi.org/10.1007/978-3-642-15558-1_21}, author = {Gopalan,Raghuraman and Turaga,Pavan and Chellapa, Rama}, editor = {Daniilidis,Kostas and Maragos,Petros and Paragios,Nikos} } @article {16245, title = {Assembly complexity of prokaryotic genomes using short reads}, journal = {BMC Bioinformatics}, volume = {11}, year = {2010}, month = {2010/01/12/}, pages = {21 - 21}, abstract = {De Bruijn graphs are a theoretical framework underlying several modern genome assembly programs, especially those that deal with very short reads. We describe an application of de Bruijn graphs to analyze the global repeat structure of prokaryotic genomes.}, isbn = {1471-2105}, doi = {10.1186/1471-2105-11-21}, url = {http://www.biomedcentral.com/1471-2105/11/21}, author = {Kingsford, Carl and Schatz,Michael C and Pop, Mihai} } @conference {19284, title = {Automated generation of an efficient MPEG-4 Reconfigurable Video Coding decoder implementation}, booktitle = {2010 Conference on Design and Architectures for Signal and Image Processing (DASIP)}, year = {2010}, month = {2010}, pages = {265 - 272}, abstract = {This paper proposes an automatic design flow from user-friendly design to efficient implementation of video processing systems. This design flow starts with the use of coarse-grain dataflow representations based on the CAL language, which is a complete language for dataflow programming of embedded systems. Our approach integrates previously developed techniques for detecting synchronous dataflow (SDF) regions within larger CAL networks, and exploiting the static structure of such regions using analysis tools in The Dataflow interchange format Package (TDP). Using a new XML format that we have developed to exchange dataflow information between different dataflow tools, we explore systematic implementation of signal processing systems using CAL, SDF-like region detection, TDP-based static scheduling, and CAL-to-C (CAL2C) translation. Our approach, which is a novel integration of three complementary dataflow tools - the CAL parser, TDP, and CAL2C - is demonstrated on an MPEG Reconfigurable Video Coding (RVC) decoder.}, keywords = {automated generation, automatic design flow, CAL language, CAL networks, CAL-to-C translation, CAL2C translation, coarse-grain dataflow representations, Computational modeling, data flow computing, dataflow information, Dataflow programming, decoding, Digital signal processing, Libraries, MPEG-4 reconfigurable video coding decoder implementation, parallel languages, SDF detection, synchronous dataflow detection, TDP, TDP-based static scheduling, The Dataflow interchange format Package, Transform coding, user-friendly design, video coding, video processing systems, XML, XML format}, author = {Gu, Ruirui and Piat, J. and Raulet, M. and Janneck, J.W. and Bhattacharyya, Shuvra S.} } @conference {12471, title = {Automatic target recognition based on simultaneous sparse representation}, booktitle = {Image Processing (ICIP), 2010 17th IEEE International Conference on}, year = {2010}, month = {2010/09//}, pages = {1377 - 1380}, abstract = {In this paper, an automatic target recognition algorithm is presented based on a framework for learning dictionaries for simultaneous sparse signal representation and feature extraction. The dictionary learning algorithm is based on class supervised simultaneous orthogonal matching pursuit while a matching pursuit-based similarity measure is used for classification. We show how the proposed framework can be helpful for efficient utilization of data, with the possibility of developing real-time, robust target classification. We verify the efficacy of the proposed algorithm using confusion matrices on the well known Comanche forward-looking infrared data set consisting of ten different military targets at different orientations.}, keywords = {(artificial, algorithm;feature, based, classification;iterative, classification;learning, Comanche, data, dictionary;matching, extraction;image, forward-looking, infrared, intelligence);military, learning, MATCHING, matrix;dictionary, measure;military, methods;learning, orthogonal, pursuit, pursuit;confusion, recognition;class, recognition;target, representation;feature, representation;sparse, set;automatic, signal, similarity, simultaneous, sparse, supervised, systems;object, target, target;simultaneous, tracking;}, doi = {10.1109/ICIP.2010.5652306}, author = {Patel, Vishal M. and Nasrabadi,N.M. and Chellapa, Rama} } @conference {18550, title = {Behavioral clustering of HTTP-based malware and signature generation using malicious network traces}, booktitle = {Proceedings of the 7th USENIX conference on Networked systems design and implementation}, series = {NSDI{\textquoteright}10}, year = {2010}, month = {2010///}, pages = {26 - 26}, publisher = {USENIX Association}, organization = {USENIX Association}, address = {Berkeley, CA, USA}, abstract = {We present a novel network-level behavioral malware clustering system. We focus on analyzing the structural similarities among malicious HTTP traffic traces generated by executing HTTP-based malware. Our work is motivated by the need to provide quality input to algorithms that automatically generate network signatures. Accordingly, we define similarity metrics among HTTP traces and develop our system so that the resulting clusters can yield high-quality malware signatures. We implemented a proof-of-concept version of our network-level malware clustering system and performed experiments with more than 25,000 distinct malware samples. Results from our evaluation, which includes real-world deployment, confirm the effectiveness of the proposed clustering system and show that our approach can aid the process of automatically extracting network signatures for detecting HTTP traffic generated by malware-compromised machines.}, url = {http://dl.acm.org/citation.cfm?id=1855711.1855737}, author = {Perdisci,Roberto and Lee,Wenke and Feamster, Nick} } @article {18499, title = {Building a dynamic reputation system for DNS}, journal = {19th Usenix Security Symposium}, year = {2010}, month = {2010///}, abstract = {The Domain Name System (DNS) is an essential protocolused by both legitimate Internet applications and cyber at- tacks. For example, botnets rely on DNS to support agile com- mand and control infrastructures. An effective way to disrupt these attacks is to place malicious domains on a {\textquotedblleft}blocklist{\textquotedblright} (or {\textquotedblleft}blacklist{\textquotedblright}) or to add a filtering rule in a firewall or net- work intrusion detection system. To evade such security coun- termeasures, attackers have used DNS agility, e.g., by using new domains daily to evade static blacklists and firewalls. In this paper we propose Notos, a dynamic reputation system for DNS. The premise of this system is that malicious, agile use of DNS has unique characteristics and can be distinguished from legitimate, professionally provisioned DNS services. No- tos uses passive DNS query data and analyzes the network and zone features of domains. It builds models of known legit- imate domains and malicious domains, and uses these models to compute a reputation score for a new domain indicative of whether the domain is malicious or legitimate. We have eval- uated Notos in a large ISP{\textquoteright}s network with DNS traffic from 1.4 million users. Our results show that Notos can identify malicious domains with high accuracy (true positive rate of 96.8\%) and low false positive rate (0.38\%), and can identify these domains weeks or even months before they appear in public blacklists. }, author = {Antonakakis,M. and Perdisci,R. and Dagon,D. and Lee,W. and Feamster, Nick} } @conference {17572, title = {Cellular traffic offloading through opportunistic communications: a case study}, booktitle = {Proceedings of the 5th ACM workshop on Challenged networks}, series = {CHANTS {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {31 - 38}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Due to the increasing popularity of various applications for smartphones, 3G networks are currently overloaded by mobile data traffic. Offloading cellular traffic through opportunistic communications is a promising solution to partially solve this problem, because there is no monetary cost for it. As a case study, we investigate the target-set selection problem for information delivery in the emerging Mobile Social Networks (MoSoNets). We propose to exploit opportunistic communications to facilitate the information dissemination and thus reduce the amount of cellular traffic. In particular, we study how to select the target set with only k users, such that we can minimize the cellular data traffic. In this scenario, initially the content service providers deliver information over cellular networks to only users in the target set. Then through opportunistic communications, target-users will further propagate the information among all the subscribed users. Finally, service providers will send the information to users who fail to receive it before the delivery deadline (i.e., delay-tolerance threshold). We propose three algorithms, called Greedy, Heuristic, and Random, for this problem and evaluate their performance through an extensive trace-driven simulation study. The simulation results verify the efficiency of these algorithms for both synthetic and real-world mobility traces. For example, the Heuristic algorithm can offload cellular traffic by up to 73.66\% for a real-world mobility trace.}, keywords = {cellular traffic offloading, mobile social networks, opportunistic communications, target-set selection}, isbn = {978-1-4503-0139-8}, doi = {10.1145/1859934.1859943}, url = {http://doi.acm.org/10.1145/1859934.1859943}, author = {Han,Bo and Hui,Pan and Kumar,V. S. Anil and Marathe,Madhav V. and Guanhong Pei and Srinivasan, Aravind} } @article {15326, title = {Coherent turbulent motions in a Mach 3 boundary layer}, journal = {Bulletin of the American Physical Society}, volume = {55}, year = {2010}, month = {2010///}, author = {Beekman,I. and Kan,Y. C and Priebe,S. and Martin, M.P} } @conference {15457, title = {Community-based, collaborative testing and analysis}, booktitle = {Proceedings of the FSE/SDP workshop on Future of software engineering research}, series = {FoSER {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {239 - 244}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {This article proposes a research agenda aimed at enabling optimized testing and analysis processes and tools to support component-based software development communities. We hypothesize that de facto communities---sets of projects that provide, maintain and integrate many shared infrastructure components---are commonplace. Currently, community members, often unknown to each other, tend to work in isolation, duplicating work, failing to learn from each other{\textquoteright}s effort, and missing opportunities to efficiently improve the common infrastructure. We further hypothesize that as software integration continues to become the predominant mode of software development, there will be increasing value in tools and techniques that empower these communities to coordinate and optimize their development efforts, and to generate and broadly share information. Such tools and techniques will greatly improve the robustness, quality and usability of the common infrastructure which, in turn, will greatly reduce the time and effort needed to produce and use the end systems that are the true goal of the entire community.}, keywords = {component-based software development communities, testing and analysis}, isbn = {978-1-4503-0427-6}, doi = {10.1145/1882362.1882412}, url = {http://doi.acm.org/10.1145/1882362.1882412}, author = {Memon, Atif M. and Porter, Adam and Sussman, Alan} } @article {15912, title = {Compressed Synthetic Aperture Radar}, journal = {IEEE Journal of Selected Topics in Signal Processing}, volume = {4}, year = {2010}, month = {2010/04//}, pages = {244 - 254}, abstract = {In this paper, we introduce a new synthetic aperture radar (SAR) imaging modality which can provide a high-resolution map of the spatial distribution of targets and terrain using a significantly reduced number of needed transmitted and/or received electromagnetic waveforms. This new imaging scheme, requires no new hardware components and allows the aperture to be compressed. It also presents many new applications and advantages which include strong resistance to countermesasures and interception, imaging much wider swaths and reduced on-board storage requirements.}, keywords = {Compressed sensing, compressed sensing (CS), compressive sensing, electromagnetic waveforms, image resolution, radar imaging, SAR imaging modality, synthetic aperture radar, Synthetic aperture radar (SAR), targets spatial distribution}, isbn = {1932-4553}, doi = {10.1109/JSTSP.2009.2039181}, author = {Patel, Vishal M. and Easley,G. R and Healy,D. M and Chellapa, Rama} } @inbook {12500, title = {Compressive Acquisition of Dynamic Scenes}, booktitle = {Computer Vision {\textendash} ECCV 2010Computer Vision {\textendash} ECCV 2010}, series = {Lecture Notes in Computer Science}, volume = {6311}, year = {2010}, month = {2010///}, pages = {129 - 142}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Compressive sensing (CS) is a new approach for the acquisition and recovery of sparse signals and images that enables sampling rates significantly below the classical Nyquist rate. Despite significant progress in the theory and methods of CS, little headway has been made in compressive video acquisition and recovery. Video CS is complicated by the ephemeral nature of dynamic events, which makes direct extensions of standard CS imaging architectures and signal models infeasible. In this paper, we develop a new framework for video CS for dynamic textured scenes that models the evolution of the scene as a linear dynamical system (LDS). This reduces the video recovery problem to first estimating the model parameters of the LDS from compressive measurements, from which the image frames are then reconstructed. We exploit the low-dimensional dynamic parameters (the state sequence) and high-dimensional static parameters (the observation matrix) of the LDS to devise a novel compressive measurement strategy that measures only the dynamic part of the scene at each instant and accumulates measurements over time to estimate the static parameters. This enables us to considerably lower the compressive measurement rate considerably. We validate our approach with a range of experiments including classification experiments that highlight the effectiveness of the proposed approach.}, isbn = {978-3-642-15548-2}, url = {http://dx.doi.org/10.1007/978-3-642-15549-9_10}, author = {Sankaranarayanan,Aswin and Turaga,Pavan and Baraniuk,Richard and Chellapa, Rama}, editor = {Daniilidis,Kostas and Maragos,Petros and Paragios,Nikos} } @article {16254, title = {Computational Approaches for Genome Assembly Validation}, journal = {Biological Data Mining}, year = {2010}, month = {2010///}, pages = {163 - 163}, author = {Choi,J.H. and Tang,H. and Kim,S. and Pop, Mihai} } @conference {14449, title = {Constructing folksonomies by integrating structured metadata}, booktitle = {Proceedings of the 19th international conference on World wide web}, series = {WWW {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {1165 - 1166}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Aggregating many personal hierarchies into a common taxonomy, also known as a folksonomy, presents several challenges due to its sparseness, ambiguity, noise, and inconsistency. We describe an approach to folksonomy learning based on relational clustering that addresses these challenges by exploiting structured metadata contained in personal hierarchies. Our approach clusters similar hierarchies using their structure and tag statistics, then incrementally weaves them into a deeper, bushier tree. We study folksonomy learning using social metadata extracted from the photo-sharing site Flickr. We evaluate the learned folksonomy quantitatively by automatically comparing it to a reference taxonomy created by the Open Directory Project. Our empirical results suggest that the proposed approach improves upon the state-of-the-art folksonomy learning method.}, keywords = {collective knowledge, data mining, folksonomies}, isbn = {978-1-60558-799-8}, doi = {10.1145/1772690.1772856}, url = {http://doi.acm.org/10.1145/1772690.1772856}, author = {Plangprasopchok,Anon and Lerman,Kristina and Getoor, Lise} } @conference {14502, title = {Constructing Folksonomies by Integrating Structured Metadata with Relational Clustering}, booktitle = {Workshops at the Twenty-Fourth AAAI Conference on Artificial Intelligence}, year = {2010}, month = {2010///}, abstract = {Many social Web sites allow users to annotate the content with descriptive metadata, such as tags, and more recently also to organize content hierarchically. These types of struc- tured metadata provide valuable evidence for learning how a community organizes knowledge. For instance, we can ag- gregate many personal hierarchies into a common taxonomy, also known as a folksonomy, that will aid users in visual- izing and browsing social content, and also to help them in organizing their own content. However, learning from so- cial metadata presents several challenges: sparseness, ambi- guity, noise, and inconsistency. We describe an approach to folksonomy learning based on relational clustering that ad- dresses these challenges by exploiting structured metadata contained in personal hierarchies. Our approach clusters sim- ilar hierarchies using their structure and tag statistics, then incrementally weaves them into a deeper, bushier tree. We study folksonomy learning using social metadata extracted from the photo-sharing site Flickr. We evaluate the learned folksonomy quantitatively by automatically comparing it to a reference taxonomy. Our empirical results suggest that the proposed framework, which addresses the challenges listed above, improves on existing folksonomy learning methods.}, author = {Plangprasopchok,A. and Lerman,K. and Getoor, Lise} } @article {14509, title = {Context-Aware Query Recommendations}, volume = {12/408,726}, year = {2010}, month = {2010/09/23/}, abstract = {Described is a search-related technology in which context information regarding a user{\textquoteright}s prior search actions is used in making query recommendations for a current user action, such as a query or click. To determine whether each set or subset of context information is relevant to the user action, data obtained from a query log is evaluated. More particularly, a query transition (query-query) graph and a query click (query-URL) graph are extracted from the query log; vectors are computed for the current action and each context/sub-context and evaluated against vectors in the graphs to determine current action-to-context similarity. Also described is using similar context to provide the query recommendations, using parameters to control the similarity strictness, and/or whether more recent context information is more relevant than less recent context information, and using context information to distinguish between user sessions.}, url = {http://www.google.com/patents?id=nubWAAAAEBAJ}, author = {Ntoulas,Alexandros and Hwang,Heasoo and Getoor, Lise and Paparizos,Stelios and Lauw,Hady Wirawan}, editor = {Microsoft Corporation} } @conference {17747, title = {COSI: Cloud Oriented Subgraph Identification in Massive Social Networks}, booktitle = {Advances in Social Networks Analysis and Mining (ASONAM), 2010 International Conference on}, year = {2010}, month = {2010/08//}, pages = {248 - 255}, abstract = {Subgraph matching is a key operation on graph data. Social network (SN) providers may want to find all subgraphs within their social network that match certain query graph patterns. Unfortunately, subgraph matching is NP-complete, making its application to massive SNs a major challenge. Past work has shown how to implement subgraph matching on a single processor when the graph has 10-25M edges. In this paper, we show how to use cloud computing in conjunction with such existing single processor methods to efficiently match complex subgraphs on graphs as large as 778M edges. A cloud consists of one master compute node and k slave compute nodes. We first develop a probabilistic method to estimate probabilities that a vertex will be retrieved by a random query and that a pair of vertices will be successively retrieved by a random query. We use these probability estimates to define edge weights in an SN and to compute minimal edge cuts to partition the graph amongst k slave nodes. We develop algorithms for both master and slave nodes that try to minimize communication overhead. The resulting COSI system can answer complex queries over real-world SN data containing over 778M edges very efficiently.}, keywords = {(online);, answering;social, complexity;data, computing;edge, data;graph, estimation;query, handling;graph, matching;Internet;computational, matching;probability;query, network;subgraph, networking, NP-complete;cloud, pattern;probability, processing;social, theory;pattern, weight;graph}, doi = {10.1109/ASONAM.2010.80}, author = {Bröcheler,M. and Pugliese, A. and V.S. Subrahmanian} } @article {17045, title = {Cyberinfrastructure for Social Action on National Priorities}, journal = {Computer}, volume = {43}, year = {2010}, month = {2010/11//}, pages = {20 - 21}, abstract = {Extensive research is needed to build upon currently used media and tools to foster wider participation, address national priorities, and deal with potential dangers associated with technology-mediated social participation.}, keywords = {Collaborative tools, Peer to peer computing, Public policy, Research initiatives, Social network services, Special issues and sections, Technology-mediated social participation}, isbn = {0018-9162}, doi = {10.1109/MC.2010.315}, author = {Pirolli,Peter and Preece,Jenny and Shneiderman, Ben} } @article {18828, title = {Developing a Stochastic Dynamic Programming Framework for Optical Tweezer-Based Automated Particle Transport Operations}, journal = {Automation Science and Engineering, IEEE Transactions on}, volume = {7}, year = {2010}, month = {2010/04//}, pages = {218 - 227}, abstract = {Automated particle transport using optical tweezers requires the use of motion planning to move the particle while avoiding collisions with randomly moving obstacles. This paper describes a stochastic dynamic programming based motion planning framework developed by modifying the discrete version of an infinite-horizon partially observable Markov decision process algorithm. Sample trajectories generated by this algorithm are presented to highlight effectiveness in crowded scenes and flexibility. The algorithm is tested using silica beads in a holographic tweezer set-up and data obtained from the physical experiments are reported to validate various aspects of the planning simulation framework. This framework is then used to evaluate the performance of the algorithm under a variety of operating conditions.}, keywords = {holographic tweezer set-up, holography, infinite-horizon partially observable Markov decision process algorithm, Markov processes, motion planning framework, optical tweezer-based automated particle transport operations, optical tweezers, radiation pressure, silica beads, stochastic dynamic programming framework, stochastic programming}, isbn = {1545-5955}, doi = {10.1109/TASE.2009.2026056}, author = {Banerjee,A. G. and Pomerance,A. and Losert,W. and Gupta,S.K.} } @article {12876, title = {Diversity and distribution of cholix toxin, a novel ADP-ribosylating factor from Vibrio cholerae}, journal = {Environmental Microbiology Reports}, volume = {2}, year = {2010}, month = {2010/02/08/}, pages = {198 - 207}, abstract = {Non-toxigenic non-O1, non-O139 Vibrio cholerae strains isolated from both environmental and clinical settings carry a suite of virulence factors aside from cholera toxin. Among V. cholerae strains isolated from coastal waters of southern California, this includes cholix toxin, an ADP-ribosylating factor that is capable of halting protein synthesis in eukaryotic cells. The prevalence of the gene encoding cholix toxin, chxA, was assessed among a collection of 155 diverse V. cholerae strains originating from both clinical and environmental settings in Bangladesh and Mexico and other countries around the globe. The chxA gene was present in 47\% of 83 non-O1, non-O139 strains and 16\% of 72 O1/O139 strains screened as part of this study. A total of 86 chxA gene sequences were obtained, and phylogenetic analysis revealed that they fall into two distinct clades. These two clades were also observed in the phylogenies of several housekeeping genes, suggesting that the divergence observed in chxA extends to other regions of the V. cholerae genome, and most likely has arisen from vertical descent rather than horizontal transfer. Our results clearly indicate that ChxA is a major toxin of V. cholerae with a worldwide distribution that is preferentially associated with non-pandemic strains.}, isbn = {1758-2229}, doi = {10.1111/j.1758-2229.2010.00139.x}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1758-2229.2010.00139.x/abstract?userIsAuthenticated=false\&deniedAccessCustomisedMessage=}, author = {Purdy,Alexandra E. and Balch,Deborah and Liz{\'a}rraga-Partida,Marcial Leonardo and Islam,Mohammad Sirajul and Martinez-Urtaza,Jaime and Huq,Anwar and Rita R Colwell and Bartlett,Douglas H.} } @conference {15621, title = {A dynamic data structure for approximate range searching}, booktitle = {Proceedings of the 2010 annual symposium on Computational geometry}, series = {SoCG {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {247 - 256}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {In this paper, we introduce a simple, randomized dynamic data structure for storing multidimensional point sets, called a quadtreap. This data structure is a randomized, balanced variant of a quadtree data structure. In particular, it defines a hierarchical decomposition of space into cells, which are based on hyperrectangles of bounded aspect ratio, each of constant combinatorial complexity. It can be viewed as a multidimensional generalization of the treap data structure of Seidel and Aragon. When inserted, points are assigned random priorities, and the tree is restructured through rotations as if the points had been inserted in priority order. In any fixed dimension d, we show it is possible to store a set of n points in a quadtreap of space O(n). The height h of the tree is O(log n) with high probability. It supports point insertion in time O(h). It supports point deletion in worst-case time O(h2) and expected-case time O(h), averaged over the points of the tree. It can answer ε-approximate spherical range counting queries over groups and approximate nearest neighbor queries in time O(h + (1/ε)d-1).}, keywords = {Approximation algorithms, dynamic data structures, geometric data structures, quadtrees, Range searching}, isbn = {978-1-4503-0016-2}, doi = {10.1145/1810959.1811002}, url = {http://doi.acm.org/10.1145/1810959.1811002}, author = {Mount, Dave and Park,Eunhui} } @conference {17750, title = {Efficient Policy-Based Inconsistency Management in Relational Knowledge Bases}, booktitle = {Scalable Uncertainty Management: 4th International Conference}, volume = {6379}, year = {2010}, month = {2010///}, pages = {264 - 264}, abstract = {Real-world databases are frequently inconsistent. Even though the users who work with a body of data are far more familiar not only with that data, but also their own job and the risks they are willing to take and the inferences they are willing to make from inconsistent data, most DBMSs force them to use the policy embedded in the DBMS. Inconsistency management policies (IMPs) were introduced so that users can apply policies that they deem are appropriate for data they know and understand better than anyone else. In this paper, we develop an efficient {\textquotedblleft}cluster table{\textquotedblright} method to implement IMPs and show that using cluster tables instead of a standard DBMS index is far more efficient when less than about 3\% of a table is involved in an inconsistency (which is hopefully the case in most real world DBs), while standard DBMS indexes perform better when the amount of inconsistency in a database is over 3\%.}, doi = {10.1007/978-3-642-15951-0_26}, author = {Martinez,M. V and Parisi,F. and Pugliese, A. and Simari,G. I and V.S. Subrahmanian} } @article {19293, title = {Energy-driven distribution of signal processing applications across wireless sensor networks}, journal = {ACM Trans. Sen. Netw.}, volume = {6}, year = {2010}, month = {2010}, pages = {24:1 - 24:32}, abstract = {Wireless sensor network (WSN) applications have been studied extensively in recent years. Such applications involve resource-limited embedded sensor nodes that have small size and low power requirements. Based on the need for extended network lifetimes in WSNs in terms of energy use, the energy efficiency of computation and communication operations in the sensor nodes becomes critical. Digital Signal Processing (DSP) applications typically require intensive data processing operations and as a result are difficult to implement directly in resource-limited WSNs. In this article, we present a novel design methodology for modeling and implementing computationally intensive DSP applications applied to wireless sensor networks. This methodology explores efficient modeling techniques for DSP applications, including data sensing and processing; derives formulations of Energy-Driven Partitioning (EDP) for distributing such applications across wireless sensor networks; and develops efficient heuristic algorithms for finding partitioning results that maximize the network lifetime. To address such an energy-driven partitioning problem, this article provides a new way of aggregating data and reducing communication traffic among nodes based on application analysis. By considering low data token delivery points and the distribution of computation in the application, our approach finds energy-efficient trade-offs between data communication and computation.}, keywords = {DSP, Energy efficiency, network lifetime, Speech recognition, Wireless sensor networks}, isbn = {1550-4859}, url = {http://doi.acm.org/10.1145/1754414.1754420}, author = {Chung-Ching Shen and Plishker, William L. and Ko,Dong-Ik and Bhattacharyya, Shuvra S. and Goldsman,Neil} } @article {12880, title = {Environmental reservoirs of Vibrio cholerae and their role in cholera}, journal = {Environmental Microbiology Reports}, volume = {2}, year = {2010}, month = {2010/01/15/}, pages = {27 - 33}, abstract = {In the aquatic environment, Vibrio cholerae has been reported to be associated with a variety of living organisms, including animals with an exoskeleton of chitin, aquatic plants, protozoa, bivalves, waterbirds, as well as abiotic substrates (e.g. sediments). Most of these are well-known or putative environmental reservoirs for the bacterium, defined as places where the pathogen lives over time, with the potential to be released and to cause human infection. Environmental reservoirs also serve as V. cholerae disseminators and vectors. They can be responsible for the start of an epidemic, may be critical to cholera endemicity, and affect the evolution of pathogen virulence. To date, in addition to the generally recognized role of zooplankton as the largest environmental reservoir for V. cholerae, other environmental reservoirs play some role in cholera epidemiology by favouring persistence of the pathogen during inter-epidemic periods. Little is known about the ecological factors affecting V. cholerae survival in association with aquatic substrates. Studies aimed at these aspects, i.e. understanding how environmental reservoirs interact, are affected by climate, and contribute to disease epidemiology, will be useful for understanding global implications of V. cholerae and the disease cholera.}, isbn = {1758-2229}, doi = {10.1111/j.1758-2229.2009.00128.x}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1758-2229.2009.00128.x/abstract?userIsAuthenticated=false\&deniedAccessCustomisedMessage=}, author = {Vezzulli,Luigi and Pruzzo,Carla and Huq,Anwar and Rita R Colwell} } @article {12426, title = {Evaluation of the Interaction of Cyclin-Dependent Kinase 5 with Activator p25 and with p25-Derived Inhibitor CIP}, journal = {Journal of Computational Biology}, volume = {17}, year = {2010}, month = {2010///}, pages = {707 - 721}, author = {Cardone, Antonio and Albers,R.W. and Sriram,R.D. and Pant,H.C.} } @inbook {12995, title = {Evolutionary framework for Lepidoptera model systems}, booktitle = {Genetics and Molecular Biology of LepidopteraGenetics and Molecular Biology of Lepidoptera}, year = {2010}, month = {2010///}, pages = {1 - 24}, publisher = {Taylor \& Francis}, organization = {Taylor \& Francis}, address = {Boca Raton}, abstract = {{\textquotedblleft}Model systems{\textquotedblright} are specific organisms upon which detailed studies have been conducted examining a fundamental biological question. If the studies are robust, their results can be extrapolated among an array of organisms that possess features in common with the subject organism. The true power of model systems lies in the ability to extrapolate these details across larger groups of organisms. In order to generalize these results, comparative studies are essential and require that model systems be placed into their evolutionary or phylogenetic context. This chapter examines model systems in the insect order Lepidoptera from the perspective of several different superfamilies. Historically, many species of Lepidoptera have been essential in the development of invaluable model systems in the fields of development biology, genetics, molecular biology, physiology, co-evolution, population dynamics, and ecology.}, author = {Roe,A. and Weller,S. and Baixeras,J. and Brown,J. W and Cummings, Michael P. and Davis,DR and Horak,M and Kawahara,A. Y and Mitter,C and Parr,C.S. and Regier,J. C and Rubinoff,D and Simonsen,TJ and Wahlberg,N and Zwick,A.}, editor = {Goldsmith,M and Marec,F} } @inbook {14196, title = {An Experimental Study of Color-Based Segmentation Algorithms Based on the Mean-Shift Concept}, booktitle = {Computer Vision {\textendash} ECCV 2010Computer Vision {\textendash} ECCV 2010}, series = {Lecture Notes in Computer Science}, volume = {6312}, year = {2010}, month = {2010///}, pages = {506 - 519}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We point out a difference between the original mean-shift formulation of Fukunaga and Hostetler and the common variant in the computer vision community, namely whether the pairwise comparison is performed with the original or with the filtered image of the previous iteration. This leads to a new hybrid algorithm, called Color Mean Shift, that roughly speaking, treats color as Fukunaga{\textquoteright}s algorithm and spatial coordinates as Comaniciu{\textquoteright}s algorithm. We perform experiments to evaluate how different kernel functions and color spaces affect the final filtering and segmentation results, and the computational speed, using the Berkeley and Weizmann segmentation databases. We conclude that the new method gives better results than existing mean shift ones on four standard comparison measures ( improvement on RAND and BDE measures respectively for color images), with slightly higher running times ( ). Overall, the new method produces segmentations comparable in quality to the ones obtained with current state of the art segmentation algorithms.}, isbn = {978-3-642-15551-2}, url = {http://dx.doi.org/10.1007/978-3-642-15552-9_37}, author = {Bitsakos,K. and Ferm{\"u}ller, Cornelia and Aloimonos, J.}, editor = {Daniilidis,Kostas and Maragos,Petros and Paragios,Nikos} } @conference {16054, title = {Exploring temporal patterns with information visualization: keynote}, booktitle = {Proceedings of Graphics Interface 2010}, series = {GI {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {1 - 2}, publisher = {Canadian Information Processing Society}, organization = {Canadian Information Processing Society}, address = {Toronto, Ont., Canada, Canada}, abstract = {After an overview of visualizations to explore temporal patterns, we will focus on interfaces for discovering temporal event patterns in electronic health records. Specifying event sequence queries is challenging even for skilled computer professionals familiar with SQL. Our novel interactive search strategies allow for aligning records on important events, ranking, and filtering combined with grouping of results to find common or rare events. A second approach is to use query-by-example, in which users specify a pattern and see a similarity-ranked list of results, but the similarity measure needs to be customized for different needs. Temporal summaries allow comparisons between groups. We will discuss the methods we use to evaluate the usefulness of our interfaces through collaborations with clinicians and hospital administrators on case studies. Finally, application of the techniques to other domains will be discussed.}, keywords = {electronic health records, search interfaces}, isbn = {978-1-56881-712-5}, url = {http://dl.acm.org/citation.cfm?id=1839214.1839216}, author = {Plaisant, Catherine} } @article {12494, title = {Face Recognition by Computers and Humans}, journal = {Computer}, volume = {43}, year = {2010}, month = {2010/02//}, pages = {46 - 55}, abstract = {This article talks about how the study of how humans perceive faces can be used to help design practical systems for face recognition. Besides applications related to identification and verification-such as access control, law enforcement, ID and licensing, and surveillance-face recognition has also proven useful in applications such as human-computer interaction, virtual reality, database retrieval, multimedia, and computer entertainment. Continuing research into face recognition will provide scientists and engineers with many vital projects, in areas such as homeland security, human-computer interaction, and numerous consumer applications. The areas we are considering pursuing are recognition from unconstrained video sequences, incorporating familiarity into algorithms, modeling effects of aging, and developing biologically plausible models for human face recognition ability.}, keywords = {access, Computer, control;aging;computer, control;face, entertainment;database, interaction;national, interaction;unconstrained, reality;access, recognition;homeland, recognition;human, retrieval;face, security;, security;human, sequences;virtual, video}, isbn = {0018-9162}, doi = {10.1109/MC.2010.37}, author = {Chellapa, Rama and Sinha, P. and Phillips,P.J.} } @article {16264, title = {Finding Biologically Accurate Clusterings in Hierarchical Tree Decompositions Using the Variation of Information}, journal = {Journal of Computational Biology}, volume = {17}, year = {2010}, month = {2010/03//}, pages = {503 - 516}, abstract = {Hierarchical clustering is a popular method for grouping together similar elements based on a distance measure between them. In many cases, annotations for some elements are known beforehand, which can aid the clustering process. We present a novel approach for decomposing a hierarchical clustering into the clusters that optimally match a set of known annotations, as measured by the variation of information metric. Our approach is general and does not require the user to enter the number of clusters desired. We apply it to two biological domains: finding protein complexes within protein interaction networks and identifying species within metagenomic DNA samples. For these two applications, we test the quality of our clusters by using them to predict complex and species membership, respectively. We find that our approach generally outperforms the commonly used heuristic methods.}, isbn = {1066-5277, 1557-8666}, doi = {10.1089/cmb.2009.0173}, url = {http://www.liebertonline.com/doi/abs/10.1089/cmb.2009.0173}, author = {Navlakha,Saket and White,James and Nagarajan,Niranjan and Pop, Mihai and Kingsford, Carl} } @article {16265, title = {Finishing genomes with limited resources: lessons from an ensemble of microbial genomes}, journal = {BMC Genomics}, volume = {11}, year = {2010}, month = {2010/04/16/}, pages = {242 - 242}, abstract = {While new sequencing technologies have ushered in an era where microbial genomes can be easily sequenced, the goal of routinely producing high-quality draft and finished genomes in a cost-effective fashion has still remained elusive. Due to shorter read lengths and limitations in library construction protocols, shotgun sequencing and assembly based on these technologies often results in fragmented assemblies. Correspondingly, while draft assemblies can be obtained in days, finishing can take many months and hence the time and effort can only be justified for high-priority genomes and in large sequencing centers. In this work, we revisit this issue in light of our own experience in producing finished and nearly-finished genomes for a range of microbial species in a small-lab setting. These genomes were finished with surprisingly little investments in terms of time, computational effort and lab work, suggesting that the increased access to sequencing might also eventually lead to a greater proportion of finished genomes from small labs and genomics cores.}, isbn = {1471-2164}, doi = {10.1186/1471-2164-11-242}, url = {http://www.biomedcentral.com/1471-2164/11/242}, author = {Nagarajan,Niranjan and Cook,Christopher and Di Bonaventura,Maria Pia and Ge,Hong and Richards,Allen and Bishop-Lilly,Kimberly A and DeSalle,Robert and Read,Timothy D. and Pop, Mihai} } @inbook {19194, title = {GasSense: Appliance-Level, Single-Point Sensing of Gas Activity in the Home}, booktitle = {Pervasive Computing}, series = {Lecture Notes in Computer Science}, volume = {6030}, year = {2010}, month = {2010}, pages = {265 - 282}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {This paper presents GasSense, a low-cost, single-point sensing solution for automatically identifying gas use down to its source (e.g., water heater, furnace, fireplace). This work adds a complementary sensing solution to the growing body of work in infrastructure-mediated sensing. GasSense analyzes the acoustic response of a home{\textquoteright}s government mandated gas regulator, which provides the unique capability of sensing both the individual appliance at which gas is currently being consumed as well as an estimate of the amount of gas flow. Our approach provides a number of appealing features including the ability to be easily and safely installed without the need of a professional. We deployed our solution in nine different homes and initial results show that GasSense has an average accuracy of 95.2\% in identifying individual appliance usage.}, isbn = {978-3-642-12653-6}, url = {http://dx.doi.org/10.1007/978-3-642-12654-3_16}, author = {Cohn,Gabe and Gupta,Sidhant and Jon Froehlich and Larson,Eric and Patel,Shwetak}, editor = {Flor{\'e}en,Patrik and Kr{\"u}ger,Antonio and Spasojevic,Mirjana} } @article {19094, title = {Genomic characterization of the Yersinia genus}, journal = {Genome biology}, volume = {11}, year = {2010}, month = {2010}, author = {Chen, P.E. and Cook, C. and Stewart, A.C. and Nagarajan,N. and Sommer, D.D. and Pop, Mihai and Thomason, B. and Thomason, M.P.K. and Lentz, S. and Nolan, N. and others} } @conference {14446, title = {Growing a tree in the forest: constructing folksonomies by integrating structured metadata}, booktitle = {Proceedings of the 16th ACM SIGKDD international conference on Knowledge discovery and data mining}, series = {KDD {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {949 - 958}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Many social Web sites allow users to annotate the content with descriptive metadata, such as tags, and more recently to organize content hierarchically. These types of structured metadata provide valuable evidence for learning how a community organizes knowledge. For instance, we can aggregate many personal hierarchies into a common taxonomy, also known as a folksonomy, that will aid users in visualizing and browsing social content, and also to help them in organizing their own content. However, learning from social metadata presents several challenges, since it is sparse, shallow, ambiguous, noisy, and inconsistent. We describe an approach to folksonomy learning based on relational clustering, which exploits structured metadata contained in personal hierarchies. Our approach clusters similar hierarchies using their structure and tag statistics, then incrementally weaves them into a deeper, bushier tree. We study folksonomy learning using social metadata extracted from the photo-sharing site Flickr, and demonstrate that the proposed approach addresses the challenges. Moreover, comparing to previous work, the approach produces larger, more accurate folksonomies, and in addition, scales better.}, keywords = {collective knowledge, data mining, folksonomies, relational clustering, social information processing, social metadata, taxonomies}, isbn = {978-1-4503-0055-1}, doi = {10.1145/1835804.1835924}, url = {http://doi.acm.org/10.1145/1835804.1835924}, author = {Plangprasopchok,Anon and Lerman,Kristina and Getoor, Lise} } @inbook {16274, title = {Identifying Differentially Abundant Metabolic Pathways in Metagenomic Datasets}, booktitle = {Bioinformatics Research and Applications}, series = {Lecture Notes in Computer Science}, volume = {6053}, year = {2010}, month = {2010///}, pages = {101 - 112}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Enabled by rapid advances in sequencing technology, metagenomic studies aim to characterize entire communities of microbes bypassing the need for culturing individual bacterial members. One major goal of such studies is to identify specific functional adaptations of microbial communities to their habitats. Here we describe a powerful analytical method (MetaPath) that can identify differentially abundant pathways in metagenomic data-sets, relying on a combination of metagenomic sequence data and prior metabolic pathway knowledge. We show that MetaPath outperforms other common approaches when evaluated on simulated datasets. We also demonstrate the power of our methods in analyzing two, publicly available, metagenomic datasets: a comparison of the gut microbiome of obese and lean twins; and a comparison of the gut microbiome of infant and adult subjects. We demonstrate that the subpathways identified by our method provide valuable insights into the biological activities of the microbiome.}, isbn = {978-3-642-13077-9}, url = {http://dx.doi.org/10.1007/978-3-642-13078-6_12}, author = {Liu,Bo and Pop, Mihai}, editor = {Borodovsky,Mark and Gogarten,Johann and Przytycka,Teresa and Rajasekaran,Sanguthevar} } @article {14510, title = {Identifying Modifiers in Web Queries Over Structured Data}, volume = {12/473,286}, year = {2010}, month = {2010/12/02/}, abstract = {Described is using modifiers in online search queries for queries that map to a database table. A modifier (e.g., an adjective or a preposition) specifies the intended meaning of a target, in which the target maps to a column in that table. The modifier thus corresponds to one or more functions that determine which rows of data in the column match the query, e.g., {\textquotedblleft}cameras under $400{\textquotedblright} maps to a camera (or product) table, and {\textquotedblleft}under{\textquotedblright} is the modifier that represents a function (less than) that is used to evaluate a {\textquotedblleft}price{\textquotedblright} target/data column. Also described are different classes of modifiers, and generating the dictionaries for a domain (corresponding to a table) via query log mining.}, url = {http://www.google.com/patents?id=gQTkAAAAEBAJ}, author = {Paparizos,Stelios and Joshi,Amrula Sadanand and Getoor, Lise and Ntoulas,Alexandros}, editor = {Microsoft Corporation} } @conference {12429, title = {Image classification of vascular smooth muscle cells}, booktitle = {Proceedings of the 1st ACM International Health Informatics Symposium}, series = {IHI {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {484 - 486}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {cell biology, digital image processing, machine learning}, isbn = {978-1-4503-0030-8}, doi = {10.1145/1882992.1883068}, url = {http://doi.acm.org/10.1145/1882992.1883068}, author = {Grasso,Michael A. and Mokashi,Ronil and Dalvi,Darshana and Cardone, Antonio and Dima,Alden A. and Bhadriraju,Kiran and Plant,Anne L. and Brady,Mary and Yesha,Yaacov and Yesha,Yelena} } @article {14487, title = {On the importance of sharing negative results}, journal = {SIGKDD explorations}, volume = {12}, year = {2010}, month = {2010///}, pages = {3 - 4}, author = {Giraud-Carrier,C. and Dunham,M.H. and Atreya,A. and Elkan,C. and Perlich,C. and Swirszcz,G. and Shi,X. and Philip,S.Y. and F{\"u}rnkranz,J. and Sima,J.F.} } @article {19614, title = {On the initialization methods of an exterior point algorithm for the assignment problem}, journal = {International Journal of Computer Mathematics}, volume = {87}, year = {2010}, month = {2010///}, pages = {1831 - 1846}, abstract = {In this paper, we present a theoretical investigation and an extensive computational study of exterior point simplex algorithm (EPSA) initialization methods for the assignment problem (AP). We describe the exterior point algorithm using three different initialization methods. Effective implementations are explored for each initialization method. Then we perform an experimental evaluation on a large set of benchmark problems from the TSPLib 95 and OR Library collections. The results obtained demonstrate the advantages of the three initialization methods. Finally, we give a theoretical justification of the initialization methods efficiency. We explain theoretically the computational ranking for these methods.}, isbn = {0020-7160}, url = {http://www.tandfonline.com/doi/abs/10.1080/00207160802524739}, author = {Charalampos Papamanthou and Paparrizos, K. and Samaras, N. and Sifaleras, A.} } @article {14401, title = {Integrating structured metadata with relational affinity propagation}, journal = {In proceedings of AAAI Workshop on Statistical Relational AI}, year = {2010}, month = {2010///}, abstract = {Structured and semi-structured data describing entities, tax- onomies and ontologies appears in many domains. There is a huge interest in integrating structured information from multiple sources; however integrating structured data to in- fer complex common structures is a difficult task because the integration must aggregate similar structures while avoiding structural inconsistencies that may appear when the data is combined. In this work, we study the integration of struc- tured social metadata: shallow personal hierarchies specified by many individual users on the Social Web, and focus on in- ferring a collection of integrated, consistent taxonomies. We frame this task as an optimization problem with structural constraints. We propose a new inference algorithm, which we refer to as Relational Affinity Propagation (RAP) that ex- tends affinity propagation (Frey and Dueck 2007) by intro- ducing structural constraints. We validate the approach on a real-world social media dataset, collected from the photoshar- ing website Flickr. Our empirical results show that our pro- posed approach is able to construct deeper and denser struc- tures compared to an approach using only the standard affin- ity propagation algorithm.}, author = {Plangprasopchok,A. and Lerman,K. and Getoor, Lise} } @article {16052, title = {Interactive information visualization for exploring and querying electronic health records: A systematic review}, year = {2010}, month = {2010///}, institution = {Human-Computer Interaction Lab, University of Maryland}, abstract = {To overcome the complexity and scale of making medical decisions based on electronic health records (EHRs) a variety of visual methods have been proposed. This paper surveys twelve state-of-the-art information visualization systems described in the scientific literature and compares them based on a set of carefully selected criteria. It aims to systematically examine the systems{\textquoteright} strengths and weaknesses to inform future information visualization designs.We select twelve state-of-the-art information visualization systems from information visualization, medical information systems and human-computer interaction conferences and journals. We compare the systems using the following criteria: (1) data types covered, (2) multivariate analysis support, (3) number of patients records used (one or many), and (4) user intents addressed. The review describes the twelve systems in detail and evaluates the systems using the aforementioned criteria. We discuss how the systems differ in their features and highlight how these differences are related to their design and affect the user intent model. Examples of findings include: (1) most systems handle numerical or categorical data but not both, (2) most systems are specifically designed for looking at a single patient or multiple patients but not both, (3) most systems utilize horizontal time lines to represent time, (4) only systems that handle multiple patient records have good support for Filter, and (5) some specific user intents (e.g. the Encode and Connect intents) are rarely addressed. Based on our review results, we believe that effective information visualization can facilitate analysis of patient records, and we encourage the information visualization community to study the application of their systems and conduct more in depth evaluations. We identify potential future research topics in interactive support for data abstraction and medical tasks that involve looking at a single or multiple records. Finally, we propose to create a repository for data and tasks so benchmarks can be established for both academic and commercial patient record visualization systems. }, author = {Rind,A. and Wang,T. D and Aigner,W. and Miksh,S. and Wongsuphasawat,K. and Plaisant, Catherine and Shneiderman, Ben} } @article {13798, title = {Interlingual Annotation of Parallel Text Corpora: A New Framework for Annotation and Evaluation}, journal = {Natural Language Engineering}, volume = {16}, year = {2010}, month = {2010///}, pages = {197 - 243}, abstract = {This paper focuses on an important step in the creation of a system of meaning representation and the development of semantically annotated parallel corpora, for use in applications such as machine translation, question answering, text summarization, and information retrieval. The work described below constitutes the first effort of any kind to annotate multiple translations of foreign-language texts with interlingual content. Three levels of representation are introduced: deep syntactic dependencies (IL0), intermediate semantic representations (IL1), and a normalized representation that unifies conversives, nonliteral language, and paraphrase (IL2). The resulting annotated, multilingually induced, parallel corpora will be useful as an empirical basis for a wide range of research, including the development and evaluation of interlingual NLP systems and paraphrase-extraction systems as well as a host of other research and development efforts in theoretical and applied linguistics, foreign language pedagogy, translation studies, and other related disciplines.}, doi = {10.1017/S1351324910000070}, author = {Dorr, Bonnie J and Passonneau,Rebecca J. and Farwell,David and Green,Rebecca and Habash,Nizar and Helmreich,Stephen and Hovy,Eduard and Levin,Lori and Miller,Keith J. and Mitamura,Teruko and Rambow,Owen and Siddharthan,Advaith} } @article {18522, title = {An Internet Wide View into DNS Lookup Patterns}, year = {2010}, month = {2010///}, institution = {VeriSign Labs, School of Computer Science, Georgia Tech}, abstract = {This paper analyzes the DNS lookup patterns from a largeauthoritative top-level domain server and characterizes how the lookup patterns for unscrupulous domains may differ from those for legitimate domains. We examine domains for phishing attacks and spam and malware related domains, and see how these lookup patterns vary in terms of both their temporal and spatial characteristics. We find that malicious domains tend to exhibit more variance in the networks that look up these domains, and we also find that these domains become popular considerably more quickly after their initial registration time. We also note that miscreant domains ex- hibit distinct clusters, in terms to the networks that look up these domains. The distinct spatial and temporal character- istics of these domains, and their tendency to exhibit simi- lar lookup behavior, suggests that it may be possible to ulti- mately develop more effective blacklisting techniques based on these differing lookup patterns. }, author = {Hao,S. and Feamster, Nick and Pandrangi,R.} } @conference {19282, title = {A Lightweight Dataflow Approach for Design and Implementation of SDR Systems}, booktitle = {Wireless Innovation Conference and Product Exposition, Washington DC, USA}, year = {2010}, month = {2010}, abstract = {Model-based design methods based on dataflow modelsof computation are attractive for design and implementation of wireless communication systems because of their intuitive correspondence to communication system block diagrams, and the formal structure that is exposed through formal dataflow representations (e.g., see [2]). In this paper, we introduce a novel lightweight dataflow (LWDF) programming model for model-based design and implementation of wireless communication and software-defined radio systems. The approach is suitable for improving the productivity of the design process; the agility with which designs can be retargeted across different platforms; and the quality of derived implementations. By {\textquotedblleft}lightweight{\textquotedblright}, we meant that the programming model is designed to be minimally intrusive on existing design processes, and require minimal dependence on specialized tools or libraries. This allows designers to integrate and experiment with dataflow modeling approaches relatively quickly and flexibly into existing design methodologies and processes. }, url = {http://www.researchgate.net/publication/228788399_A_lightweight_dataflow_approach_for_design_and_implementation_of_SDR_systems/file/d912f511472fa25833.pdf}, author = {Chung-Ching Shen and Plishker,William and Wu, Hsiang-Huang and Bhattacharyya, Shuvra S.} } @conference {19290, title = {Loop transformations for interface-based hierarchies IN SDF graphs}, booktitle = {2010 21st IEEE International Conference on Application-specific Systems Architectures and Processors (ASAP)}, year = {2010}, month = {2010}, pages = {341 - 344}, abstract = {Data-flow has proven to be an attractive computation model for programming digital signal processing (DSP) applications. A restricted version of data-flow, termed synchronous data-flow (SDF), offers strong compile-time predictability properties, but has limited expressive power. A new type of hierarchy (Interface-based SDF) has been proposed allowing more expressivity while maintaining its predictability. One of the main problems with this hierarchical SDF model is the lack of trade-off between parallelism and network clustering. This paper presents a systematic method for applying an important class of loop transformation techniques in the context of interface-based SDF semantics. The resulting approach provides novel capabilities for integrating parallelism extraction properties of the targeted loop transformations with the useful modeling, analysis, and code reuse properties provided by SDF.}, keywords = {Application software, code generation, Computer architecture, Computer interfaces, Data-Flow programming, Digital signal processing, Loop parallelization, PARALLEL PROCESSING, Power engineering computing, Power system modeling, Processor scheduling, Programming profession, scheduling, SDF graph, system recovery}, author = {Piat, J. and Bhattacharyya, Shuvra S. and Raulet, M.} } @article {16050, title = {Making sense of archived e-mail: Exploring the Enron collection with NetLens}, journal = {Journal of the American Society for Information Science and Technology}, volume = {61}, year = {2010}, month = {2010/04/01/}, pages = {723 - 744}, abstract = {Informal communications media pose new challenges for information-systems design, but the nature of informal interaction offers new opportunities as well. This paper describes NetLens-E-mail, a system designed to support exploration of the content-actor network in large e-mail collections. Unique features of NetLens-E-mail include close coupling of orientation, specification, restriction, and expansion, and introduction and incorporation of a novel capability for iterative projection between content and actor networks within the same collection. Scenarios are presented to illustrate the intended employment of NetLens-E-mail, and design walkthroughs with two domain experts provide an initial basis for assessment of the suitability of the design by scholars and analysts.}, isbn = {1532-2890}, doi = {10.1002/asi.21275}, url = {http://onlinelibrary.wiley.com/doi/10.1002/asi.21275/full}, author = {Kang,Hyunmo and Plaisant, Catherine and Elsayed,Tamer and Oard, Douglas} } @conference {16055, title = {ManyNets: an interface for multiple network analysis and visualization}, booktitle = {Proceedings of the 28th international conference on Human factors in computing systems}, series = {CHI {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {213 - 222}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Traditional network analysis tools support analysts in studying a single network. ManyNets offers these analysts a powerful new approach that enables them to work on multiple networks simultaneously. Several thousand networks can be presented as rows in a tabular visualization, and then inspected, sorted and filtered according to their attributes. The networks to be displayed can be obtained by subdivision of larger networks. Examples of meaningful subdivisions used by analysts include ego networks, community extraction, and time-based slices. Cell visualizations and interactive column overviews allow analysts to assess the distribution of attributes within particular sets of networks. Details, such as traditional node-link diagrams, are available on demand. We describe a case study analyzing a social network geared towards film recommendations by means of decomposition. A small usability study provides feedback on the use of the interface on a set of tasks issued from the case study.}, keywords = {exploratory analysis, graphical user interface, Information Visualization, interaction, network analysis, table interface}, isbn = {978-1-60558-929-9}, doi = {10.1145/1753326.1753358}, url = {http://doi.acm.org/10.1145/1753326.1753358}, author = {Freire,Manuel and Plaisant, Catherine and Shneiderman, Ben and Golbeck,Jen} } @conference {16025, title = {The Metacognitive Loop: An Architecture for Building Robust Intelligent Systems}, booktitle = {2010 AAAI Fall Symposium Series}, year = {2010}, month = {2010/03/11/}, abstract = {The Metacognitive Loop: An Architecture for Building Robust Intelligent Systems}, keywords = {commonsense, ontologies, robust intelligent systems}, url = {http://www.aaai.org/ocs/index.php/FSS/FSS10/paper/view/2161}, author = {Shahri,Hamid Haidarian and Dinalankara,Wikum and Fults,Scott and Wilson,Shomir and Perlis, Don and Schmill,Matt and Oates,Tim and Josyula,Darsana and Anderson,Michael} } @conference {16278, title = {MetaPhyler: Taxonomic profiling for metagenomic sequences}, booktitle = {2010 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)}, year = {2010}, month = {2010/12/18/21}, pages = {95 - 100}, publisher = {IEEE}, organization = {IEEE}, abstract = {A major goal of metagenomics is to characterize the microbial diversity of an environment. The most popular approach relies on 16S rRNA sequencing, however this approach can generate biased estimates due to differences in the copy number of the 16S rRNA gene between even closely related organisms, and due to PCR artifacts. The taxonomic composition can also be determined from whole-metagenome sequencing data by matching individual sequences against a database of reference genes. One major limitation of prior methods used for this purpose is the use of a universal classification threshold for all genes at all taxonomic levels. We propose that better classification results can be obtained by tuning the taxonomic classifier to each matching length, reference gene, and taxonomic level. We present a novel taxonomic profiler MetaPhyler, which uses marker genes as a taxonomic reference. Results on simulated datasets demonstrate that MetaPhyler outperforms other tools commonly used in this context (CARMA, Megan and PhymmBL). We also present interesting results obtained by applying MetaPhyler to a real metagenomic dataset.}, keywords = {Bioinformatics, CARMA comparison, Databases, Genomics, Linear regression, marker genes, matching length, Megan comparison, metagenomic sequences, metagenomics, MetaPhyler, microbial diversity, microorganisms, molecular biophysics, molecular configurations, Pattern classification, pattern matching, phylogenetic classification, Phylogeny, PhymmBL comparison, reference gene database, Sensitivity, sequence matching, taxonomic classifier, taxonomic level, taxonomic profiling, whole metagenome sequencing data}, isbn = {978-1-4244-8306-8}, doi = {10.1109/BIBM.2010.5706544}, author = {Liu,Bo and Gibbons,T. and Ghodsi,M. and Pop, Mihai} } @article {13708, title = {A modality lexicon and its use in automatic tagging}, journal = {Proceedings of the Seventh conference on International Language Resources and Evaluation (LREC{\textquoteright}10)}, year = {2010}, month = {2010///}, pages = {1402 - 1407}, abstract = {This paper describes our resource-building results for an eight-week JHU Human Language Technology Center of Excellence SummerCamp for Applied Language Exploration (SCALE-2009) on Semantically-Informed Machine Translation. Specifically, we describe the construction of a modality annotation scheme, a modality lexicon, and two automated modality taggers that were built using the lexicon and annotation scheme. Our annotation scheme is based on identifying three components of modality: a trigger, a target and a holder. We describe how our modality lexicon was produced semi-automatically, expanding from an initial hand-selected list of modality trigger words and phrases. The resulting expanded modality lexicon is being made publicly available. We demonstrate that one tagger{\textemdash}a structure-based tagger{\textemdash}results in precision around 86\% (depending on genre) for tagging of a standard LDC data set. In a machine translation application, using the structure-based tagger to annotate English modalities on an English-Urdu training corpus improved the translation quality score for Urdu by 0.3 Bleu points in the face of sparse training data. }, author = {Baker,K. and Bloodgood,M. and Dorr, Bonnie J and Filardo,N.W. and Levin,L. and Piatko,C.} } @inbook {15770, title = {Modelling Type 1a Supernova Light Curves}, booktitle = {Exponential Data Fitting and Its ApplicationsExponential Data Fitting and Its Applications}, year = {2010}, month = {2010///}, pages = {169 - 186}, publisher = {Bentham Science Publishers Ltd.}, organization = {Bentham Science Publishers Ltd.}, url = {http://www.bentham.org/ebooks/9781608050482/http://www.bentham.org/ebooks/9781608050482/}, author = {Rust,Bert W. and O{\textquoteright}Leary, Dianne P. and Mullen,Katharine M.}, editor = {Pereyra,V. and Scherer,G.} } @article {12488, title = {Multiscale directional filtering of noisy InSAR phase images}, journal = {Proceedings of SPIE}, volume = {7703}, year = {2010}, month = {2010/04/05/}, pages = {770308-770308-9 - 770308-770308-9}, abstract = {In this work, we present a new approach for the problem of interferometric phase noise reduction in synthetic aperture radar interferometry based on the shearlet representation. Shearlets provide a multidirectional and multiscale decomposition that have advantages when dealing with noisy phase fringes over standard filtering methods. Using a shearlet decomposition of a noisy phase image, we can adaptively estimate a phase representation in a multiscale and anisotropic fashion. Such denoised phase interferograms can be used to provide much better digital elevation maps (DEM). Experiments show that this method performs significantly better than many competitive methods.}, isbn = {0277786X}, doi = {doi:10.1117/12.849576}, url = {http://spiedigitallibrary.org/proceedings/resource/2/psisdg/7703/1/770308_1?isAuthorized=no}, author = {Patel, Vishal M. and Easley,Glenn R and Chellapa, Rama} } @article {16053, title = {New forms of Human-Computer Interaction for Visualizing Information}, journal = {Information Visualization}, year = {2010}, month = {2010///}, abstract = {The Graphical User Interface (GUI) {\textendash} although developed in research laboratories in the late 1970s {\textendash} is still the dominant interaction paradigm in Information Visualization. We propose a new interaction paradigm called Blended Interaction. It combines ideas of Embodied Cognition, Multimodal Interaction, Reality-Based Interaction \& Ubiquitous Computing. This is intended to stress that a single increase in the reality aspect of the interaction cannot go far enough. The particular challenge {\textendash} and from the user{\textquoteright}s standpoint, the key advantage {\textendash} lies in a meaningful marriage between the tested real-world options and the digital world. As a minimum this marriage must exist on the levels of the interaction, communication, of the way we solve problems with conventional tools (workflows), and of the design of the space or the architecture of buildings and places. The digital world often offers entirely new possibilities and takes the form of interactive devices of various shapes but also of intelligent everyday objects (e.g. the {\textquoteright}Internet of things{\textquoteright}). In our view, interaction concepts can indeed offer a new quality of interaction, but only when the design of the interaction includes all these domains at the same time and with equal weighting. We test the suitability of our ideas of Blended Interaction concepts by using specific application examples that are being worked on as part of current research projects. Our experiences show that this new interaction paradigm has also great potential for interacting with visualization. For example, we have developed multi-touch scatter plots \& facet maps for tangible user interfaces supporting the searching \& browsing in Digital Libraries. We have embedded different visualizations into a Zoomable Object-oriented Information Landscape (ZOIL), which supports our vision of using visualizations on different displays of different size at the same time. We have developed specific kind of search tokens that supports collaborative search activities. For example, we try to address the following research questions: * How can future interactive InfoVis tools look like, especially in the light of the idea Blended Interaction? * How can future interactive InfoVis tools benefit from Multi-Displays \& Multimodal environments used by Multiple Users? * What are the specific design requirements for multi-touch visualizations? * How can we support the collaborative use visualization tools?}, author = {Reiterer,H. and Kerren,A. and Plaisant, Catherine and Stasko,J.T.} } @article {16049, title = {Non-visual exploration of geographic maps: Does sonification help?}, journal = {Disability \& Rehabilitation: Assistive Technology}, volume = {5}, year = {2010}, month = {2010/05//}, pages = {164 - 174}, abstract = {Purpose. This study aims at evaluating the effectiveness of sonification as a mean to provide access to geo-referenced information to users with visual impairments.Method. Thiry-five participants (10 congenitally blind, 10 with acquired blindness and 15 blindfolded sighted) completed four tasks of progressive difficulty. During each task, participants first explored a sonified map by using either a tablet or a keyboard to move across regions and listened to sounds giving information about the current location. Then the participants were asked to identify, among four tactile maps, the one that crossmodally corresponds to the sonifed map they just explored. Finally, participants answered a self-report questionnaire of understanding and satisfaction. Results. Participants achieved high accuracy in all of the four tactile map discrimination tasks. No significant performance difference was found neither between subjects that used keyboard or tablet, nor between the three groups of blind and sighted participants. Differences between groups and interfaces were found in the usage strategies. High levels of satisfaction and understanding of the tools and tasks emerged from users{\textquoteright} reports. }, isbn = {1748-3107, 1748-3115}, doi = {10.3109/17483100903100277}, url = {http://informahealthcare.com/doi/abs/10.3109/17483100903100277}, author = {Delogu,Franco and Palmiero,Massimiliano and Federici,Stefano and Plaisant, Catherine and Zhao,Haixia and Belardinelli,Olivetti} } @article {14513, title = {Object Classification Using Taxonomies}, volume = {12/414,065}, year = {2010}, month = {2010/07/22/}, abstract = {As provided herein objects from a source catalog, such as a provider{\textquoteright}s catalog, can be added to a target catalog, such as an enterprise master catalog, in a scalable manner utilizing catalog taxonomies. A baseline classifier determines probabilities for source objects to target catalog classes. Source objects can be assigned to those classes with probabilities that meet a desired threshold and meet a desired rate. A classification cost for target classes can be determined for respective unassigned source objects, which can comprise determining an assignment cost and separation cost for the source objects for respective desired target classes. The separation and assignment costs can be combined to determine the classification cost, and the unassigned source objects can be assigned to those classes having a desired classification cost.}, url = {http://www.google.com/patents?id=oXDSAAAAEBAJ}, author = {Tsaparas,Panayiotis and Papadimitriou,Panagiotis and Fuxman,Ariel D. and Getoor, Lise and Agrawal,Rakesh}, editor = {Microsoft Corporation} } @inbook {19604, title = {Optimal Authenticated Data Structures with Multilinear Forms}, booktitle = {Pairing-Based Cryptography - Pairing 2010}, series = {Lecture Notes in Computer Science}, year = {2010}, month = {2010/01/01/}, pages = {246 - 264}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Cloud computing and cloud storage are becoming increasingly prevalent. In this paradigm, clients outsource their data and computations to third-party service providers. Data integrity in the cloud therefore becomes an important factor for the functionality of these web services.Authenticated data structures, implemented with various cryptographic primitives, have been widely studied as a means of providing efficient solutions to data integrity problems (e.g., Merkle trees). In this paper, we introduce a new authenticated dictionary data structure that employs multilinear forms, a cryptographic primitive proposed by Silverberg and Boneh in 2003 [10], the construction of which, however, remains an open problem to date. Our authenticated dictionary is optimal, that is, it does not add any extra asymptotic cost to the plain dictionary data structure, yielding proofs of constant size, i.e., asymptotically equal to the size of the answer, while maintaining other relevant complexities logarithmic. Instead, solutions based on cryptographic hashing (e.g., Merkle trees) require proofs of logarithmic size [40]. Because multilinear forms are not known to exist yet, our result can be viewed from a different angle: if one could prove that optimal authenticated dictionaries cannot exist in the computational model, irrespectively of cryptographic primitives, then our solution would imply that cryptographically interesting multilinear form generators cannot exist as well (i.e., it can be viewed as a reduction). Thus, we provide an alternative avenue towards proving the nonexistence of multilinear form generators in the context of general lower bounds for authenticated data structures [40] and for memory checking [18], a model similar to the authenticated data structures model.}, keywords = {Algorithm Analysis and Problem Complexity, authenticated dictionary, Coding and Information Theory, Computer Communication Networks, Data Encryption, Discrete Mathematics in Computer Science, multilinear forms, Systems and Data Security}, isbn = {978-3-642-17454-4, 978-3-642-17455-1}, url = {http://link.springer.com/chapter/10.1007/978-3-642-17455-1_16}, author = {Charalampos Papamanthou and Tamassia, Roberto and Triandopoulos, Nikos}, editor = {Joye, Marc and Miyaji, Atsuko and Otsuka, Akira} } @article {12466, title = {PADS: A Probabilistic Activity Detection Framework for Video Data}, journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on}, volume = {32}, year = {2010}, month = {2010/12//}, pages = {2246 - 2261}, abstract = {There is now a growing need to identify various kinds of activities that occur in videos. In this paper, we first present a logical language called Probabilistic Activity Description Language (PADL) in which users can specify activities of interest. We then develop a probabilistic framework which assigns to any subvideo of a given video sequence a probability that the subvideo contains the given activity, and we finally develop two fast algorithms to detect activities within this framework. OffPad finds all minimal segments of a video that contain a given activity with a probability exceeding a given threshold. In contrast, the OnPad algorithm examines a video during playout (rather than afterwards as OffPad does) and computes the probability that a given activity is occurring (even if the activity is only partially complete). Our prototype Probabilistic Activity Detection System (PADS) implements the framework and the two algorithms, building on top of existing image processing algorithms. We have conducted detailed experiments and compared our approach to four different approaches presented in the literature. We show that-for complex activity definitions-our approach outperforms all the other approaches.}, keywords = {Automated;Programming Languages;Video Recording;, Computer-Assisted;Models, PADL;PADS;image processing algorithms;offPad algorithm;onPad algorithm;probabilistic activity description language;probabilistic activity detection framework;video data;video sequence;image sequences;object detection;probability;video surveillance;Algorit, Statistical;Movement;Pattern Recognition}, isbn = {0162-8828}, doi = {10.1109/TPAMI.2010.33}, author = {Albanese, M. and Chellapa, Rama and Cuntoor, N. and Moscato, V. and Picariello, A. and V.S. Subrahmanian and Udrea,O.} } @article {15788, title = {Portfolio Selection Using Tikhonov Filtering to Estimate the Covariance Matrix}, journal = {SIAM Journal on Financial Mathematics}, volume = {1}, year = {2010}, month = {2010///}, pages = {932 - 961}, abstract = {Markowitz{\textquoteright}s portfolio selection problem chooses weights for stocks in a portfolio based on an estimated covariance matrix of stock returns. Our study proposes reducing noise in the estimation by using a Tikhonov filter function. In addition, we prevent rank deficiency of the estimated covariance matrix and propose a method for effectively choosing the Tikhonov parameter, which determines the filtering intensity. We put previous estimators into a common framework and compare their filtering functions for eigenvalues of the correlation matrix. We demonstrate the effectiveness of our estimator using stock return data from 1958 through 2007.}, keywords = {covariance matrix estimate, Markowitz portfolio selection, ridge regression, Tikhonov regularization}, doi = {10.1137/090749372}, url = {http://link.aip.org/link/?SJF/1/932/1}, author = {Park,Sungwoo and O{\textquoteright}Leary, Dianne P.} } @article {16391, title = {The problem with zoning: nonlinear effects of interactions between location preferences and externalities on land use and utility}, journal = {Environment and Planning B: Planning and Design}, volume = {37}, year = {2010}, month = {2010///}, pages = {408 - 428}, author = {Zellner,M.L. and Riolo,R.L. and Rand, William and Brown,D.G. and Page,S.E. and Fernandez,L.E.} } @conference {19291, title = {Rapid prototyping for digital signal processing systems using Parameterized Synchronous Dataflow graphs}, booktitle = {2010 21st IEEE International Symposium on Rapid System Prototyping (RSP)}, year = {2010}, month = {2010}, pages = {1 - 7}, abstract = {Parameterized Synchronous Dataflow (PSDF) has been used previously for abstract scheduling and as a model for architecting embedded software and FPGA implementations. PSDF has been shown to be attractive for these purposes due to its support for flexible dynamic reconfiguration, and efficient quasi-static scheduling. To apply PSDF techniques more deeply into the design flow, support for comprehensive functional simulation and efficient hardware mapping is important. By building on the DIF (Dataflow Interchange Format), which is a design language and associated software package for developing and experimenting with dataflow-based design techniques for signal processing systems, we have developed a tool for functional simulation of PSDF specifications. This simulation tool allows designers to model applications in PSDF and simulate their functionality, including use of the dynamic parameter reconfiguration capabilities offered by PSDF. Based on this simulation tool, we also present a systematic design methodology for applying PSDF to the design and implementation of digital signal processing systems, with emphasis on FPGA-based systems for signal processing. We demonstrate capabilities for rapid and accurate prototyping offered by our proposed design methodology, along with its novel support for PSDF-based FPGA system implementation.}, keywords = {abstract scheduling, Computational modeling, Computer architecture, data flow graphs, dataflow based design, dataflow interchange format, design flow, design language, Digital signal processing, digital signal processing systems, dynamic parameter reconfiguration, Dynamic scheduling, efficient hardware mapping, efficient quasistatic scheduling, Embedded software, embedded systems, Field programmable gate arrays, flexible dynamic reconfiguration, FPGA based systems, FPGA implementations, functional simulation, Hardware, parameterized synchronous dataflow graphs, rapid prototyping, Schedules, scheduling, semantics, simulation tool, software package, systematic design methodology}, author = {Wu, Hsiang-Huang and Kee, Hojin and Sane, N. and Plishker,W. and Bhattacharyya, Shuvra S.} } @article {17950, title = {Saliency Guided Summarization of Molecular Dynamics Simulations}, journal = {Scientific Visualization: Advanced Concepts}, volume = {1}, year = {2010}, month = {2010///}, pages = {321 - 335}, abstract = {We present a novel method to measure saliency in molecular dynamics simulation data. This saliency measure is based on a multiscale center-surround mechanism, which is fast and efficient to compute. We explore the use of the saliency function to guide the selection of representative and anomalous timesteps for summarization of simulations. To this end, we also introduce a multiscale keyframe selection procedure which automatically provides keyframes representing the simulation at varying levels of coarseness. We compare our saliency guided keyframe approach against other methods, and show that it consistently selects superior keyframes as measured by their predictive power in reconstructing the simulation.}, author = {Patro,R. and Ip,C. Y and Varshney, Amitabh and Hagen,H.} } @conference {15918, title = {Sectored Random Projections for Cancelable Iris Biometrics}, booktitle = {2010 IEEE International Conference on Acoustics Speech and Signal Processing (ICASSP)}, year = {2010}, month = {2010/03//}, pages = {1838 - 1841}, publisher = {IEEE}, organization = {IEEE}, abstract = {Privacy and security are essential requirements in practical biometric systems. In order to prevent the theft of biometric patterns, it is desired to modify them through revocable and non invertible transformations called Cancelable Biometrics. In this paper, we propose an efficient algorithm for generating a Cancelable Iris Biometric based on Sectored Random Projections. Our algorithm can generate a new pattern if the existing one is stolen, retain the original recognition performance and prevent extraction of useful information from the transformed patterns. Our method also addresses some of the drawbacks of existing techniques and is robust to degradations due to eyelids and eyelashes.}, keywords = {biometric pattern, Biometrics, Cancelable Biometrics, cancelable iris biometrics, data mining, data privacy, Degradation, Eyelashes, Eyelids, Iris, iris recognition, pattern recognition, privacy, random processes, Random Projections, Robustness, sectored random projection, Secure Biometrics, Security, security of data}, isbn = {978-1-4244-4295-9}, doi = {10.1109/ICASSP.2010.5495383}, author = {Pillai,J.K. and Patel, Vishal M. and Chellapa, Rama and Ratha,N. K} } @article {12501, title = {Secure and robust iris recognition using random projections and sparse representations}, journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on}, year = {2010}, month = {2010///}, pages = {1 - 1}, author = {Pillai, J. and Patel, Vishal M. and Chellapa, Rama and Ratha, N.} } @inbook {12507, title = {Semantic Video Content Analysis}, booktitle = {Video Search and MiningVideo Search and Mining}, series = {Studies in Computational Intelligence}, volume = {287}, year = {2010}, month = {2010///}, pages = {147 - 176}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {In recent years, there has been significant interest in the area of automatically recognizing activities occurring in a camera{\textquoteright}s field of view and detecting abnormalities. The practical applications of such a system could include airport tarmac monitoring, or monitoring of activities in secure installations, to name a few. The difficulty of the problem is compounded by several factors: detection of primitive actions in spite of changes in illumination, occlusions and noise; complexmultiagent interaction;mapping of higher-level activities to lower-level primitive actions; variations in which the same semantic activity can be performed. In this chapter, we develop a theory of semantic activity analysis that addresses each of these issues in an integrated manner. Specifically, we discuss ontological representations of knowledge of a domain, integration of domain knowledge and statistical models for achieving semantic mappings, definition of logical languages to describe activities, and design of frameworks which integrate all the above aspects in a coherent way, thus laying the foundations of effective Semantic Video Content Analysis systems.}, isbn = {978-3-642-12899-8}, url = {http://dx.doi.org/10.1007/978-3-642-12900-1_6}, author = {Albanese,Massimiliano and Turaga,Pavan and Chellapa, Rama and Pugliese,Andrea and Subrahmanian,V.}, editor = {Schonfeld,Dan and Shan,Caifeng and Tao,Dacheng and Wang,Liang} } @article {19205, title = {Sensing Events Affecting Liquid Flow in a Liquid Distribution System}, year = {2010}, month = {2010}, publisher = {University of Washington}, abstract = {By monitoring pressure transients in a liquid within a liquid distribution system using only a single sensor, events such as the opening and closing of valves at specific fixtures are readily detected. The sensor, which can readily be coupled to a faucet bib, transmits an output signal to a computing device. Each such event can be identified by the device based by comparing characteristic features of the pressure transient waveform with previously observed characteristic features for events in the system. These characteristic features, which can include the varying pressure, derivative, and real Cepstrum of the pressure transient waveform, can be used to select a specific fixture where a valve open or close event has occurred. Flow to each fixture and leaks in the system can also be determined from the pressure transient signal. A second sensor disposed at a point disparate from the first sensor provides further event information.}, url = {http://www.google.com/patents?id=2JXnAAAAEBAJ}, author = {Patel,Shwetak N. and Fogarty,James A. and Jon Froehlich and Larson,Eric C.} } @inbook {16284, title = {Sequencing and Genome Assembly Using Next-Generation Technologies}, booktitle = {Computational BiologyComputational Biology}, series = {Methods in Molecular Biology}, volume = {673}, year = {2010}, month = {2010///}, pages = {1 - 17}, publisher = {Humana Press}, organization = {Humana Press}, abstract = {Several sequencing technologies have been introduced in recent years that dramatically outperform the traditional Sanger technology in terms of throughput and cost. The data generated by these technologies are characterized by generally shorter read lengths (as low as 35 bp) and different error characteristics than Sanger data. Existing software tools for assembly and analysis of sequencing data are, therefore, ill-suited to handle the new types of data generated. This paper surveys the recent software packages aimed specifically at analyzing new generation sequencing data.}, isbn = {978-1-60761-842-3}, url = {http://dx.doi.org/10.1007/978-1-60761-842-3_1}, author = {Nagarajan,Niranjan and Pop, Mihai}, editor = {Feny{\"o},David} } @conference {19296, title = {Simulating dynamic communication systems using the core functional dataflow model}, booktitle = {2010 IEEE International Conference on Acoustics Speech and Signal Processing (ICASSP)}, year = {2010}, month = {2010}, pages = {1538 - 1541}, abstract = {The latest communication technologies invariably consist of modules with dynamic behavior. There exists a number of design tools for communication system design with their foundation in dataflow modeling semantics. These tools must not only support the functional specification of dynamic communication modules and subsystems but also provide accurate estimation of resource requirements for efficient simulation and implementation. We explore this trade-off - between flexible specification of dynamic behavior and accurate estimation of resource requirements - using a representative application employing an adaptive modulation scheme. We propose an approach for precise modeling of such applications based on a recently-introduced form of dynamic dataflow called core functional dataflow. From our proposed modeling approach, we show how parameterized looped schedules can be generated and analyzed to simulate applications with low run-time overhead as well as guaranteed bounded memory execution. We demonstrate our approach using the Advanced Design System from Agilent Technologies, Inc., which is a commercial tool for design and simulation of communication systems.}, keywords = {adaptive modulation, Analytical models, Application software, Computational modeling, core functional dataflow model, Dataflow, dataflow modeling semantics, design tools, Digital signal processing, dynamic communication systems, functional specification, Hardware, modeling and simulation, Power system modeling, Predictive models, Processor scheduling, Production, Signal processing, software tools, wireless communication}, author = {Sane, N. and Chia-Jui Hsu and Pino,J. L and Bhattacharyya, Shuvra S.} } @conference {15921, title = {Sparse representations and Random Projections for robust and cancelable biometrics}, booktitle = {Control Automation Robotics Vision (ICARCV), 2010 11th International Conference on}, year = {2010}, month = {2010/12//}, pages = {1 - 6}, abstract = {In recent years, the theories of Sparse Representation (SR) and Compressed Sensing (CS) have emerged as powerful tools for efficiently processing data in non-traditional ways. An area of promise for these theories is biome $\#$x0301;trie identification. In this paper, we review the role of sparse representation and CS for efficient biome $\#$x0301;trie identification. Algorithms to perform identification from face and iris data are reviewed. By applying Random Projections it is possible to purposively hide the biome $\#$x0301;trie data within a template. This procedure can be effectively employed for securing and protecting personal biome $\#$x0301;trie data against theft. Some of the most compelling challenges and issues that confront research in biometrics using sparse representations and CS are also addressed.}, keywords = {Biometric identification, Cancelable Biometrics, Compressed sensing, face data, face recognition, iris data, iris recognition, personal biometric data, Random Projections, robust biometrics, sparse representations}, doi = {10.1109/ICARCV.2010.5707955}, author = {Patel, Vishal M. and Chellapa, Rama and Tistarelli,M.} } @article {12487, title = {Sparsity inspired automatic target recognition}, journal = {Proceedings of SPIE}, volume = {7696}, year = {2010}, month = {2010/04/23/}, pages = {76960Q-76960Q-8 - 76960Q-76960Q-8}, abstract = {In this paper, we develop a framework for using only the needed data for automatic target recognition (ATR) algorithms using the recently developed theory of sparse representations and compressive sensing (CS). We show how sparsity can be helpful for efficient utilization of data, with the possibility of developing real-time, robust target classification. We verify the efficacy of the proposed algorithm in terms of the recognition rate on the well known Comanche forward-looking infrared (FLIR) data set consisting of ten different military targets at different orientations.}, isbn = {0277786X}, doi = {doi:10.1117/12.850533}, url = {http://spiedigitallibrary.org/proceedings/resource/2/psisdg/7696/1/76960Q_1?isAuthorized=no}, author = {Patel, Vishal M. and Nasrabadi,Nasser M. and Chellapa, Rama} } @article {19368, title = {Structural and dynamic determinants of ligand binding and regulation of cyclin-dependent kinase 5 by pathological activator p25 and inhibitory peptide CIP.}, journal = {Journal of molecular biology}, volume = {401}, year = {2010}, month = {2010 Aug 20}, pages = {478-92}, abstract = {The crystal structure of the cdk5/p25 complex has provided information on possible molecular mechanisms of the ligand binding, specificity, and regulation of the kinase. Comparative molecular dynamics simulations are reported here for physiological conditions. This study provides new insight on the mechanisms that modulate such processes, which may be exploited to control pathological activation by p25. The structural changes observed in the kinase are stabilized by a network of interactions involving highly conserved residues within the cyclin-dependent kinase (cdk) family. Collective motions of the proteins (cdk5, p25, and CIP) and their complexes are identified by principal component analysis, revealing two conformational states of the activation loop upon p25 complexation, which are absent in the uncomplexed kinase and not apparent from the crystal. Simulations of the uncomplexed inhibitor CIP show structural rearrangements and increased flexibility of the interfacial loop containing the critical residue E240, which becomes fully hydrated and available for interactions with one of several positively charged residues in the kinase. These changes provide a rationale for the observed high affinity and enhanced inhibitory action of CIP when compared to either p25 or the physiological activators of cdk5.}, keywords = {Crystallography, X-Ray, Cyclin-Dependent Kinase 5, Cyclin-Dependent Kinase Inhibitor Proteins, HUMANS, Ligands, molecular dynamics simulation, Nerve Tissue Proteins, Principal component analysis, Protein Binding, Protein Conformation}, issn = {1089-8638}, doi = {10.1016/j.jmb.2010.06.040}, author = {Cardone, Antonio and Hassan, S A and Albers,R.W. and Sriram,R.D. and Pant,H.C.} } @article {14362, title = {Summary of the first ACM SIGKDD workshop on knowledge discovery from uncertain data (U{\textquoteright}09)}, journal = {ACM SIGKDD Explorations Newsletter}, volume = {11}, year = {2010}, month = {2010/05//}, pages = {90 - 91}, isbn = {1931-0145}, doi = {10.1145/1809400.1809419}, url = {http://doi.acm.org/10.1145/1809400.1809419}, author = {Pei,Jian and Getoor, Lise and de Keijzer,Ander} } @article {13869, title = {System and method for analysis of an opinion expressed in documents with regard to a particular topic}, volume = {11/808,278}, year = {2010}, month = {2010//01/28}, abstract = {System and method for analysis of an opinion expressed in documents on a particular topic computes opinion strength on a continuous numeric scale, or qualitatively. A variety of opinion scoring techniques are plugged in to score opinion expressing words and sentences in documents. These scores are aggregated to measure the opinion intensity of documents. Multilingual opinion analysis is supported by capability to concurrently identify and visualize the opinion intensity expressed in documents in multiple languages. A multi-dimensional representation of the measured opinion intensity is generated which is agreeable with multi-lingual domain.}, url = {http://www.google.com/patents?id=j9fLAAAAEBAJ}, author = {V.S. Subrahmanian and Picariello,Antonio and Dorr, Bonnie J and Reforgiato,Diego Recupero and Cesarano,Carmine and Sagoff,Amelia} } @inbook {17395, title = {Technology-Mediated Social Participation: Deep Science and Extreme Technology}, booktitle = {Active Media TechnologyActive Media Technology}, series = {Lecture Notes in Computer Science}, volume = {6335}, year = {2010}, month = {2010///}, pages = {1 - 4}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {The dramatic success of social media such as Facebook, Twitter, YouTube, Flickr, blogs, and traditional discussion groups empowers individuals to become active in local and global communities. With modest redesign, these technologies can be harnessed to support national priorities such as healthcare/wellness, disaster response, community safety, energy sustainability, etc. This talk describes a research agenda for these topics that develops deep science questions and extreme technology challenges.}, isbn = {978-3-642-15469-0}, url = {http://dx.doi.org/10.1007/978-3-642-15470-6_1}, author = {Shneiderman, Ben}, editor = {An,Aijun and Lingras,Pawan and Petty,Sheila and Huang,Runhe} } @article {16048, title = {Understanding Interactive Legends: a Comparative Evaluation with Standard Widgets}, journal = {Computer Graphics Forum}, volume = {29}, year = {2010}, month = {2010/06/01/}, pages = {1193 - 1202}, abstract = {Interactive information visualization systems rely on widgets to allow users to interact with the data and modify the representation. We define interactive legends as a class of controls combining the visual representation of static legends and interaction mechanisms of widgets. As interactive legends start to appear in popular websites, we categorize their designs for common data types and evaluate their effectiveness compare to standard widgets. Results suggest that 1) interactive legends can lead to faster perception of the mapping between data values and visual encodings and 2) interaction time is affected differently depending on the data type. Additionally, our study indicates superiority both in terms of perception and interaction of ordinal controls over numerical ones. Numerical techniques are mostly used in today{\textquoteright}s systems. By providing solutions to allowing users to modify ranges interactively, we believe that interactive legends make it possible to increase the use of ordinal techniques for visual exploration.}, keywords = {H.5 [Information Interfaces and Presentation]: Miscellaneous{\textemdash}}, isbn = {1467-8659}, doi = {10.1111/j.1467-8659.2009.01678.x}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1467-8659.2009.01678.x/abstract;jsessionid=F01A4A94A1103DF771F7EEF705FCF66F.d02t03}, author = {Riche,Nathalie Henry and Lee,Bongshin and Plaisant, Catherine} } @conference {14306, title = {Using symbolic evaluation to understand behavior in configurable software systems}, booktitle = {Software Engineering, 2010 ACM/IEEE 32nd International Conference on}, volume = {1}, year = {2010}, month = {2010///}, pages = {445 - 454}, author = {Reisner,E. and Song,C. and Ma,K.K. and Foster, Jeffrey S. and Porter, Adam} } @article {19299, title = {Utilizing Hierarchical Multiprocessing for Medical Image Registration}, journal = {IEEE Signal Processing Magazine}, volume = {27}, year = {2010}, month = {2010}, pages = {61 - 68}, abstract = {This work discusses an approach to utilize hierarchical multiprocessing in the context of medical image registration. By first organizing application parallelism into a domain-specific taxonomy, an algorithm is structured to target a set of multicore platforms.The approach on a cluster of graphics processing units (GPUs) requiring the use of two parallel programming environments to achieve fast execution times is demonstrated.There is negligible loss in accuracy for rigid registration when employing GPU acceleration, but it does adversely effect our nonrigid registration implementation due to our usage of a gradient descent approach.}, keywords = {Acceleration, application parallelism, Biomedical imaging, domain-specific taxonomy, GPU acceleration, gradient descent approach, Graphics processing unit, hierarchical multiprocessing, image registration, Magnetic resonance imaging, Medical diagnostic imaging, medical image processing, medical image registration, multicore platform set, Multicore processing, PARALLEL PROCESSING, parallel programming, Robustness, Signal processing algorithms, Ultrasonic imaging}, isbn = {1053-5888}, author = {Plishker,W. and Dandekar,O. and Bhattacharyya, Shuvra S. and Shekhar,R.} } @conference {16047, title = {VAST 2010 Challenge: Arms dealings and pandemics}, booktitle = {Visual Analytics Science and Technology (VAST), 2010 IEEE Symposium on}, year = {2010}, month = {2010/10//}, pages = {263 - 264}, abstract = {The 5th VAST Challenge consisted of three mini-challenges that involved both intelligence analysis and bioinformatics. Teams could solve one, two or all three mini-challenges and assess the overall situation to enter the Grand Challenge. Mini-challenge one involved text reports about people and events giving information about arms dealers, situations in various countries and linkages between different countries. Mini-challenge two involved hospital admission and death records from various countries providing information about the spread of a world wide pandemic. Mini-challenge three involved genetic data to be used to identify the origin of the pandemic and the most dangerous viral mutations. The Grand Challenge was to determine how these various mini-challenges were connected. As always the goal was to analyze the data and provide novel interactive visualizations useful in the analytic process. We received 58 submissions in total and gave 15 awards.}, keywords = {2010, administrative, admission;intelligence, analysis;interactive, analysis;medical, challenge;arms, challenge;hospital, data, data;grand, dealings;bioinformatics;dangerous, mutations;death, processing;weapons;, records;genetic, reports;bioinformatics;data, VAST, viral, visualizations;minichallenge;pandemics;text}, doi = {10.1109/VAST.2010.5649054}, author = {Grinstein,G. and Konecni,S. and Scholtz,J. and Whiting,M. and Plaisant, Catherine} } @conference {16051, title = {Visual information seeking in multiple electronic health records: design recommendations and a process model}, booktitle = {Proceedings of the 1st ACM International Health Informatics Symposium}, series = {IHI {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {46 - 55}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Current electronic health record (EHR) systems facilitate the storage, retrieval, persistence, and sharing of patient data. However, the way physicians interact with EHRs has not changed much. More specifically, support for temporal analysis of a large number of EHRs has been lacking. A number of information visualization techniques have been proposed to alleviate this problem. Unfortunately, due to their limited application to a single case study, the results are often difficult to generalize across medical scenarios. We present the usage data of Lifelines2,[22] our information visualization system, and user comments, both collected over eight different medical case studies. We generalize our experience into an information-seeking process model for multiple EHRs. Based on our analysis, we make recommendations to future information visualization designers for EHRs on design requirements and future research directions.}, keywords = {design requriements, electronic health records, human-computer interaction (hci), Information Visualization}, isbn = {978-1-4503-0030-8}, doi = {10.1145/1882992.1883001}, url = {http://doi.acm.org/10.1145/1882992.1883001}, author = {Wang,Taowei David and Wongsuphasawat,Krist and Plaisant, Catherine and Shneiderman, Ben} } @inbook {17503, title = {Visualizing Threaded Conversation Networks: Mining Message Boards and Email Lists for Actionable Insights}, booktitle = {Active Media TechnologyActive Media Technology}, series = {Lecture Notes in Computer Science}, volume = {6335}, year = {2010}, month = {2010///}, pages = {47 - 62}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Analyzing complex online relationships is a difficult job, but new information visualization tools are enabling a wider range of users to make actionable insights from the growing volume of online data. This paper describes the challenges and methods for conducting analyses of threaded conversations such as found in enterprise message boards, email lists, and forums. After defining threaded conversation, we characterize the types of networks that can be extracted from them. We then provide 3 mini case studies to illustrate how actionable insights for community managers can be gained by applying the network analysis metrics and visualizations available in the free, open source NodeXL tool, which is a powerful, yet easy-to-use tool embedded in Excel 2007/2010.}, isbn = {978-3-642-15469-0}, url = {http://dx.doi.org/10.1007/978-3-642-15470-6_7}, author = {Hansen,Derek and Shneiderman, Ben and Smith,Marc}, editor = {An,Aijun and Lingras,Pawan and Petty,Sheila and Huang,Runhe} } @conference {14351, title = {WATTR: a method for self-powered wireless sensing of water activity in the home}, booktitle = {Proceedings of the 12th ACM international conference on Ubiquitous computing}, series = {Ubicomp {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {169 - 172}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {power harvesting, sensing, water conservation}, isbn = {978-1-60558-843-8}, doi = {10.1145/1864349.1864378}, url = {http://doi.acm.org/10.1145/1864349.1864378}, author = {Campbell,Tim and Larson,Eric and Cohn,Gabe and Jon Froehlich and Alcaide,Ramses and Patel,Shwetak N.} } @article {18925, title = {When is it better not to look ahead?}, journal = {Artificial Intelligence}, volume = {174}, year = {2010}, month = {2010/11//}, pages = {1323 - 1338}, abstract = {In situations where one needs to make a sequence of decisions, it is often believed that looking ahead will help produce better decisions. However, it was shown 30 years ago that there are {\textquotedblleft}pathological{\textquotedblright} situations in which looking ahead is counterproductive. Two long-standing open questions are (a) what combinations of factors have the biggest influence on whether lookahead pathology occurs, and (b) whether it occurs in real-world decision-making.This paper includes simulation results for several synthetic game-tree models, and experimental results for three well-known board games: two chess endgames, kalah (with some modifications to facilitate experimentation), and the 8-puzzle. The simulations show the interplay between lookahead pathology and several factors that affect it; and the experiments confirm the trends predicted by the simulation models. The experiments also show that lookahead pathology is more common than has been thought: all three games contain situations where it occurs. }, keywords = {Game-tree search, Lookahead pathology, minimax}, isbn = {0004-3702}, doi = {10.1016/j.artint.2010.08.002}, url = {http://www.sciencedirect.com/science/article/pii/S0004370210001402}, author = {Nau, Dana S. and Lu{\v s}trek,Mitja and Parker,Austin and Bratko,Ivan and Gams,Matja{\v z}} } @inbook {18357, title = {Why Did the Person Cross the Road (There)? Scene Understanding Using Probabilistic Logic Models and Common Sense Reasoning}, booktitle = {Computer Vision {\textendash} ECCV 2010}, series = {Lecture Notes in Computer Science}, volume = {6312}, year = {2010}, month = {2010///}, pages = {693 - 706}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We develop a video understanding system for scene elements, such as bus stops, crosswalks, and intersections, that are characterized more by qualitative activities and geometry than by intrinsic appearance. The domain models for scene elements are not learned from a corpus of video, but instead, naturally elicited by humans, and represented as probabilistic logic rules within a Markov Logic Network framework. Human elicited models, however, represent object interactions as they occur in the 3D world rather than describing their appearance projection in some specific 2D image plane. We bridge this gap by recovering qualitative scene geometry to analyze object interactions in the 3D world and then reasoning about scene geometry, occlusions and common sense domain knowledge using a set of meta-rules. The effectiveness of this approach is demonstrated on a set of videos of public spaces.}, isbn = {978-3-642-15551-2}, url = {http://dx.doi.org/10.1007/978-3-642-15552-9_50}, author = {Kembhavi,Aniruddha and Tom Yeh and Davis, Larry S.}, editor = {Daniilidis,Kostas and Maragos,Petros and Paragios,Nikos} } @article {14650, title = {Young Proteins Experience More Variable Selection Pressures Than Old Proteins}, journal = {Genome ResearchGenome Res.}, volume = {20}, year = {2010}, month = {2010/11/01/}, pages = {1574 - 1581}, abstract = {It is well known that young proteins tend to experience weaker purifying selection and evolve more quickly than old proteins. Here, we show that, in addition, young proteins tend to experience more variable selection pressures over time than old proteins. We demonstrate this pattern in three independent taxonomic groups: yeast, Drosophila, and mammals. The increased variability of selection pressures on young proteins is highly significant even after controlling for the fact that young proteins are typically shorter and experience weaker purifying selection than old proteins. The majority of our results are consistent with the hypothesis that the function of a young gene tends to change over time more readily than that of an old gene. At the same time, our results may be caused in part by young genes that serve constant functions over time, but nevertheless appear to evolve under changing selection pressures due to depletion of adaptive mutations. In either case, our results imply that the evolution of a protein-coding sequence is partly determined by its age and origin, and not only by the phenotypic properties of the encoded protein. We discuss, via specific examples, the consequences of these findings for understanding of the sources of evolutionary novelty.}, isbn = {1088-9051, 1549-5469}, doi = {10.1101/gr.109595.110}, url = {http://genome.cshlp.org/content/20/11/1574}, author = {Vishnoi,Anchal and Kryazhimskiy,Sergey and Bazykin,Georgii A and Hannenhalli, Sridhar and Plotkin,Joshua B.} } @article {16059, title = {Advancing User-Centered Evaluation of Visual Analytic Environments Through Contests}, journal = {Information VisualizationInformation Visualization}, volume = {8}, year = {2009}, month = {2009/09/21/}, pages = {230 - 238}, abstract = {In this paper, the authors describe the Visual Analytics Science and Technology (VAST) Symposium contests run in 2006 and 2007 and the VAST 2008 and 2009 challenges. These contests were designed to provide researchers with a better understanding of the tasks and data that face potential end users. Access to these end users is limited because of time constraints and the classified nature of the tasks and data. In that respect, the contests serve as an intermediary, with the metrics and feedback serving as measures of utility to the end users. The authors summarize the lessons learned and the future directions for VAST Challenges.}, keywords = {metrics, synthetic data, user-centered evaluation, visual analytics}, isbn = {1473-8716, 1473-8724}, doi = {10.1057/ivs.2009.16}, url = {http://ivi.sagepub.com/content/8/3/230}, author = {Costello,Loura and Grinstein,Georges and Plaisant, Catherine and Scholtz,Jean} } @article {18784, title = {Algorithms for extraction of nanowire lengths and positions from optical section microscopy image sequence}, journal = {Journal of Computing and Information Science in Engineering}, volume = {9}, year = {2009}, month = {2009///}, pages = {041007 - 041007}, author = {Peng,T. and Balijepalli,A. and Gupta,S.K. and LeBrun,T. W.} } @article {18811, title = {Algorithms for generating multi-stage molding plans for articulated assemblies}, journal = {Robotics and Computer-Integrated Manufacturing}, volume = {25}, year = {2009}, month = {2009/02//}, pages = {91 - 106}, abstract = {Multi-stage molding is capable of producing better-quality articulated products at a lower cost. During the multi-stage molding process, assembly operations are performed along with the molding operations. Hence, it gives rise to a new type of planning problem. It is difficult to perform the planning manually because it involves evaluating large number of combinations and solving complex geometric reasoning problems. This paper investigates the problem of generating multi-stage molding plans for articulated assemblies. We present a planning framework that allows us to utilize constraints from experimentally proven molding plans. As a part of the planning problem, we determine the molding sequence and intermediate assembly configurations. We present algorithms for all the steps in the planning problem and characterize their computational complexities. Finally, we illustrate our approach with representative examples.}, keywords = {In-mold assembly, mold design, Process planning}, isbn = {0736-5845}, doi = {10.1016/j.rcim.2007.10.002}, url = {http://www.sciencedirect.com/science/article/pii/S0736584507001044}, author = {Priyadarshi,Alok K. and Gupta, Satyandra K.} } @conference {16974, title = {Analyzing (social media) networks with NodeXL}, booktitle = {Proceedings of the fourth international conference on Communities and technologies}, series = {C\&$\#$38;T {\textquoteright}09}, year = {2009}, month = {2009///}, pages = {255 - 264}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We present NodeXL, an extendible toolkit for network overview, discovery and exploration implemented as an add-in to the Microsoft Excel 2007 spreadsheet software. We demonstrate NodeXL data analysis and visualization features with a social media data sample drawn from an enterprise intranet social network. A sequence of NodeXL operations from data import to computation of network statistics and refinement of network visualization through sorting, filtering, and clustering functions is described. These operations reveal sociologically relevant differences in the patterns of interconnection among employee participants in the social media space. The tool and method can be broadly applied.}, keywords = {excel, network analysis, social media, social network, spreadsheet, Visualization}, isbn = {978-1-60558-713-4}, doi = {10.1145/1556460.1556497}, url = {http://doi.acm.org/10.1145/1556460.1556497}, author = {Smith,Marc A. and Shneiderman, Ben and Milic-Frayling,Natasa and Mendes Rodrigues,Eduarda and Barash,Vladimir and Dunne,Cody and Capone,Tony and Perer,Adam and Gleave,Eric} } @conference {15855, title = {Arabic Cross-Document Coreference Resolution}, booktitle = {Proceedings of the ACL-IJCNLP 2009 Conference Short Papers$}$}, year = {2009}, month = {2009///}, pages = {357 - 360}, author = {Sayeed,A. and Elsayed,T. and Garera,N. and Alexander,D. and Xu,T. and Oard, Douglas and Yarowsky,D. and Piatko,C.} } @article {16244, title = {ARDB-Antibiotic Resistance Genes Database}, journal = {Nucleic Acids Research}, volume = {37}, year = {2009}, month = {2009/01/01/}, pages = {D443-D447 - D443-D447}, abstract = {The treatment of infections is increasingly compromised by the ability of bacteria to develop resistance to antibiotics through mutations or through the acquisition of resistance genes. Antibiotic resistance genes also have the potential to be used for bio-terror purposes through genetically modified organisms. In order to facilitate the identification and characterization of these genes, we have created a manually curated database{\textemdash}the Antibiotic Resistance Genes Database (ARDB){\textemdash}unifying most of the publicly available information on antibiotic resistance. Each gene and resistance type is annotated with rich information, including resistance profile, mechanism of action, ontology, COG and CDD annotations, as well as external links to sequence and protein databases. Our database also supports sequence similarity searches and implements an initial version of a tool for characterizing common mutations that confer antibiotic resistance. The information we provide can be used as compendium of antibiotic resistance factors as well as to identify the resistance genes of newly sequenced genes, genomes, or metagenomes. Currently, ARDB contains resistance information for 13 293 genes, 377 types, 257 antibiotics, 632 genomes, 933 species and 124 genera. ARDB is available at http://ardb.cbcb.umd.edu/.}, isbn = {0305-1048, 1362-4962}, doi = {10.1093/nar/gkn656}, url = {http://nar.oxfordjournals.org/content/37/suppl_1/D443.short}, author = {Liu,B. and Pop, Mihai} } @article {14139, title = {Block-diagonal preconditioning for spectral stochastic finite-element systems}, journal = {IMA Journal of Numerical Analysis}, volume = {29}, year = {2009}, month = {2009///}, pages = {350 - 350}, author = {Powell, C. E and Elman, Howard} } @article {15077, title = {Collusion-free multiparty computation in the mediated model}, journal = {Advances in Cryptology-CRYPTO 2009}, year = {2009}, month = {2009///}, pages = {524 - 540}, abstract = {Collusion-free protocols prevent subliminal communication (i.e., covert channels) between parties running the protocol. In the standard communication model, if one-way functions exist, then protocols satisfying any reasonable degree of privacy cannot be collusion-free. To circumvent this impossibility, Alwen, shelat and Visconti (CRYPTO 2008) recently suggested the mediated model where all communication passes through a mediator. The goal is to design protocols where collusion-freeness is guaranteed as long as the mediator is honest, while standard security guarantees hold if the mediator is dishonest. In this model, they gave constructions of collusion-free protocols for commitments and zero-knowledge proofs in the two-party setting.We strengthen the definition of Alwen et al., and resolve the main open questions in this area by showing a collusion-free protocol (in the mediated model) for computing any multi-party functionality. }, doi = {10.1007/978-3-642-03356-8_31}, author = {Alwen,J. and Katz, Jonathan and Lindell,Y. and Persiano,G. and Shelat,A. and Visconti,I.} } @article {14482, title = {Common effect of antipsychotics on the biosynthesis and regulation of fatty acids and cholesterol supports a key role of lipid homeostasis in schizophrenia}, journal = {Schizophrenia Research}, volume = {108}, year = {2009}, month = {2009/03//}, pages = {134 - 142}, abstract = {For decades, the dopamine hypothesis has gained the most attention in an attempt to explain the origin and the symptoms of schizophrenia. While this hypothesis offers an explanation for the relationship between psychotic symptoms and dopamine kinetics, it does not provide a direct explanation of the etiology of schizophrenia which remains poorly understood. Consequently, current antipsychotics that target neurotransmitter receptors, have limited and inconsistent efficacy. To gain insights into the mechanism of action of these drugs, we studied the expression profile of 12,490 human genes in a cell line treated with 18 antipsychotics, and compared it to that of a library of 448 other compounds used in a variety of disorders. Analysis reveals a common effect of antipsychotics on the biosynthesis and regulation of fatty acids and cholesterol, which is discussed in the context of a lipid hypothesis where alterations in lipid homeostasis might underlie the pathogenesis of schizophrenia. This finding may help research aimed at the development of novel treatments for this devastating disease.}, keywords = {Antipsychotic action, Gene expression, Lipid homeostasis, Pathogenesis}, isbn = {0920-9964}, doi = {10.1016/j.schres.2008.11.025}, url = {http://www.sciencedirect.com/science/article/pii/S0920996408005306}, author = {Polymeropoulos,Mihael H. and Licamele,Louis and Volpi,Simona and Mack,Kendra and Mitkus,Shruti N. and Carstea,Eugene D. and Getoor, Lise and Thompson,Andrew and Lavedan,Christian} } @article {16253, title = {Complete Genome Sequence of Aggregatibacter (Haemophilus) Aphrophilus NJ8700}, journal = {Journal of BacteriologyJ. Bacteriol.}, volume = {191}, year = {2009}, month = {2009/07/15/}, pages = {4693 - 4694}, abstract = {We report the finished and annotated genome sequence of Aggregatibacter aphrophilus strain NJ8700, a strain isolated from the oral flora of a healthy individual, and discuss characteristics that may affect its dual roles in human health and disease. This strain has a rough appearance, and its genome contains genes encoding a type VI secretion system and several factors that may participate in host colonization.}, isbn = {0021-9193, 1098-5530}, doi = {10.1128/JB.00447-09}, url = {http://jb.asm.org/content/191/14/4693}, author = {Di Bonaventura,Maria Pia and DeSalle,Rob and Pop, Mihai and Nagarajan,Niranjan and Figurski,David H and Fine,Daniel H and Kaplan,Jeffrey B and Planet,Paul J} } @article {13104, title = {A Comprehensive Evaluation Framework and a Comparative Study for Human Detectors}, journal = {Intelligent Transportation Systems, IEEE Transactions on}, volume = {10}, year = {2009}, month = {2009/09//}, pages = {417 - 427}, abstract = {We introduce a framework for evaluating human detectors that considers the practical application of a detector on a full image using multisize sliding-window scanning. We produce detection error tradeoff (DET) curves relating the miss detection rate and the false-alarm rate computed by deploying the detector on cropped windows and whole images, using, in the latter, either image resize or feature resize. Plots for cascade classifiers are generated based on confidence scores instead of on variation of the number of layers. To assess a method{\textquoteright}s overall performance on a given test, we use the average log miss rate (ALMR) as an aggregate performance score. To analyze the significance of the obtained results, we conduct 10-fold cross-validation experiments. We applied our evaluation framework to two state-of-the-art cascade-based detectors on the standard INRIA person dataset and a local dataset of near-infrared images. We used our evaluation framework to study the differences between the two detectors on the two datasets with different evaluation methods. Our results show the utility of our framework. They also suggest that the descriptors used to represent features and the training window size are more important in predicting the detection performance than the nature of the imaging process, and that the choice between resizing images or features can have serious consequences.}, keywords = {classification;infrared, classifier;comprehensive, curve;INRIA, dataset;average, DET, DETECTION, detection;, detector;image, error, Evaluation, framework;cropped, image;image, imaging;object, log, miss, person, rate;cascade, rate;feature, rate;multisize, resize;human, resize;miss, scanning;near-infrared, sliding-window, tradeoff;false-alarm, window;detection}, isbn = {1524-9050}, doi = {10.1109/TITS.2009.2026670}, author = {Hussein,M. and Porikli, F. and Davis, Larry S.} } @conference {15908, title = {Compressed sensing for Synthetic Aperture Radar imaging}, booktitle = {2009 16th IEEE International Conference on Image Processing (ICIP)}, year = {2009}, month = {2009/11//}, pages = {2141 - 2144}, publisher = {IEEE}, organization = {IEEE}, abstract = {In this paper, we introduce a new Synthetic Aperture Radar (SAR) imaging modality that provides a high resolution map of the spatial distribution of targets and terrain based on a significant reduction in the number of transmitted and/or received electromagnetic waveforms. This new imaging scheme, which requires no new hardware components, allows the aperture to be compressed and presents many important applications and advantages among which include resolving ambiguities, strong resistance to countermeasures and interception, and reduced on-board storage constraints.}, keywords = {Compressed sensing, High-resolution imaging, Image coding, image resolution, Optical imaging, Radar antennas, radar imaging, Radar polarimetry, Reflectivity, SAR, spatial distribution, synthetic aperture radar, synthetic aperture radar imaging}, isbn = {978-1-4244-5653-6}, doi = {10.1109/ICIP.2009.5414307}, author = {Patel, Vishal M. and Easley,G. R and Healy,D. M and Chellapa, Rama} } @article {15298, title = {Computational linguistics for metadata building (CLiMB): using text mining for the automatic identification, categorization, and disambiguation of subject terms for image metadata}, journal = {Multimedia Tools and Applications}, volume = {42}, year = {2009}, month = {2009///}, pages = {115 - 138}, abstract = {In this paper, we present a system using computational linguistic techniques to extract metadata for image access. We discuss the implementation, functionality and evaluation of an image catalogers{\textquoteright} toolkit, developed in the Computational Linguistics for Metadata Building (CLiMB) research project. We have tested components of the system, including phrase finding for the art and architecture domain, functional semantic labeling using machine learning, and disambiguation of terms in domain-specific text vis a vis a rich thesaurus of subject terms, geographic and artist names. We present specific results on disambiguation techniques and on the nature of the ambiguity problem given the thesaurus, resources, and domain-specific text resource, with a comparison of domain-general resources and text. Our primary user group for evaluation has been the cataloger expert with specific expertise in the fields of painting, sculpture, and vernacular and landscape architecture.}, doi = {10.1007/s11042-008-0253-9}, author = {Klavans,J. L and Sheffield,C. and Abels,E. and Jimmy Lin and Passonneau,R. and Sidhu,T. and Soergel,D.} } @conference {19443, title = {Computer Help at Home: Methods and Motivations for Informal Technical Support}, booktitle = {SIGCHI {\textquoteright}09}, series = {CHI {\textquoteright}09}, year = {2009}, month = {2009///}, pages = {739 - 748}, publisher = {ACM}, organization = {ACM}, abstract = {Prior research suggests that people may ask their family and friends for computer help. But what influences whether and how a "helper" will provide help? To answer this question, we conducted a qualitative investigation of people who participated in computer support activities with family and friends in the past year. We describe how factors including maintenance of one{\textquoteright}s personal identity as a computer expert and accountability to one{\textquoteright}s social network determine who receives help and the quality of help provided. We also discuss the complex, fractured relationship between the numerous stakeholders involved in the upkeep of home computing infrastructures. Based on our findings, we provide implications for the design of systems to support informal help-giving in residential settings.}, keywords = {help-giving, help-seeking, Home computing, identity management, social networks}, isbn = {978-1-60558-246-7}, url = {http://doi.acm.org/10.1145/1518701.1518816}, author = {Poole, Erika Shehan and Marshini Chetty and Morgan, Tom and Grinter, Rebecca E. and Edwards, W. Keith} } @article {13279, title = {Computing and visualizing a graph-based decomposition for non-manifold shapes}, journal = {Graph-Based Representations in Pattern Recognition}, year = {2009}, month = {2009///}, pages = {62 - 71}, abstract = {Modeling and understanding complex non-manifold shapes is a key issue in shape analysis and retrieval. The topological structure of a non-manifold shape can be analyzed through its decomposition into a collection of components with a simpler topology. Here, we consider a decomposition of a non-manifold shape into components which are almost manifolds, and we present a novel graph representation which highlights the non-manifold singularities shared by the components as well as their connectivity relations. We describe an algorithm for computing the decomposition and its associated graph representation. We present a new tool for visualizing the shape decomposition and its graph as an effective support to modeling, analyzing and understanding non-manifold shapes.}, doi = {10.1007/978-3-642-02124-4_7}, author = {De Floriani, Leila and Panozzo,D. and Hui,A.} } @article {13701, title = {A cost-effective lexical acquisition process for large-scale thesaurus translation}, journal = {Language resources and evaluation}, volume = {43}, year = {2009}, month = {2009///}, pages = {27 - 40}, abstract = {Thesauri and controlled vocabularies facilitate access to digital collections by explicitly representing the underlying principles of organization. Translation of such resources into multiple languages is an important component for providing multilingual access. However, the specificity of vocabulary terms in most thesauri precludes fully-automatic translation using general-domain lexical resources. In this paper, we present an efficient process for leveraging human translations to construct domain-specific lexical resources. This process is illustrated on a thesaurus of 56,000 concepts used to catalog a large archive of oral histories. We elicited human translations on a small subset of concepts, induced a probabilistic phrase dictionary from these translations, and used the resulting resource to automatically translate the rest of the thesaurus. Two separate evaluations demonstrate the acceptability of the automatic translations and the cost-effectiveness of our approach.}, author = {Jimmy Lin and Murray,G. C and Dorr, Bonnie J and Haji{\v c},J. and Pecina,P.} } @article {13259, title = {A dimension-independent library for building and manipulating multiresolution triangulations}, volume = {DISI-TR-99-03}, year = {2009}, month = {2009///}, institution = {Department of Computer Science and Information Science, University of Genoa}, abstract = {A Multi-Triangulation (MT) is a general multiresolution model for representing k-dimensional geometricobjects through simplicial complexes. An MT integrates several alternative representations of an object, and provides simple methods for handling representations at variable resolution efficiently , thus offering a basis for the development of applications that need to manage the level-of-detail of complex objects. In this paper, we present an object-oriented library that provides an open-ended tool for building and manipulating object representations based on the MT. }, author = {De Floriani, Leila and Magillo,P. and Puppo,E.} } @conference {17585, title = {Distributed Strategies for Channel Allocation and Scheduling in Software-Defined Radio Networks}, booktitle = {IEEE INFOCOM 2009}, year = {2009}, month = {2009/04/19/25}, pages = {1521 - 1529}, publisher = {IEEE}, organization = {IEEE}, abstract = {Equipping wireless nodes with multiple radios can significantly increase the capacity of wireless networks, by making these radios simultaneously transmit over multiple non-overlapping channels. However, due to the limited number of radios and available orthogonal channels, designing efficient channel assignment and scheduling algorithms in such networks is a major challenge. In this paper, we present provably-good distributed algorithms for simultaneous channel allocation of individual links and packet-scheduling, in software-defined radio (SDR) wireless networks. Our distributed algorithms are very simple to implement, and do not require any coordination even among neighboring nodes. A novel access hash function or random oracle methodology is one of the key drivers of our results. With this access hash function, each radio can know the transmitters{\textquoteright} decisions for links in its interference set for each time slot without introducing any extra communication overhead between them. Further, by utilizing the inductive-scheduling technique, each radio can also backoff appropriately to avoid collisions. Extensive simulations demonstrate that our bounds are valid in practice.}, keywords = {access hash function, Channel allocation, channel assignment algorithm, channel capacity, collision avoidance, Computer science, cryptography, distributed algorithm, distributed algorithms, Educational institutions, inductive-scheduling technique, Interference, interference set, packet scheduling algorithm, Peer to peer computing, Radio network, radio networks, radiofrequency interference, random oracle methodology, scheduling, Scheduling algorithm, simultaneous channel allocation, software radio, software-defined radio wireless network capacity, telecommunication congestion control, telecommunication security, Throughput, wireless channels, Wireless networks}, isbn = {978-1-4244-3512-8}, doi = {10.1109/INFCOM.2009.5062069}, author = {Han,Bo and Kumar,V. S.A and Marathe,M. V and Parthasarathy,S. and Srinivasan, Aravind} } @conference {19597, title = {Dynamic Provable Data Possession}, booktitle = {CCS {\textquoteright}09 Proceedings of the 16th ACM Conference on Computer and Communications Security }, series = {CCS {\textquoteright}09}, year = {2009}, month = {2009///}, pages = {213 - 222}, publisher = {ACM}, organization = {ACM}, abstract = {We consider the problem of efficiently proving the integrity of data stored at untrusted servers. In the provable data possession (PDP) model, the client preprocesses the data and then sends it to an untrusted server for storage, while keeping a small amount of meta-data. The client later asks the server to prove that the stored data has not been tampered with or deleted (without downloading the actual data). However, the original PDP scheme applies only to static (or append-only) files. We present a definitional framework and efficient constructions for dynamic provable data possession (DPDP), which extends the PDP model to support provable updates to stored data. We use a new version of authenticated dictionaries based on rank information. The price of dynamic updates is a performance change from O(1) to O(logn) (or O(nεlog n), for a file consisting of n blocks, while maintaining the same (or better, respectively) probability of misbehavior detection. Our experiments show that this slowdown is very low in practice (e.g. 415KB proof size and 30ms computational overhead for a 1GB file). We also show how to apply our DPDP scheme to outsourced file systems and version control systems (e.g. CVS).}, keywords = {authenticated data structures, Authentication, integrity checking, outsourced storage, proof of retrievability, provable data possession, skip list}, isbn = {978-1-60558-894-0}, url = {http://doi.acm.org/10.1145/1653662.1653688}, author = {Erway, Chris and K{\"u}p\cc{\"u}, Alptekin and Charalampos Papamanthou and Tamassia, Roberto} } @inbook {19635, title = {Efficient Robust Private Set Intersection}, booktitle = {Applied Cryptography and Network Security}, series = {Lecture Notes in Computer Science}, year = {2009}, month = {2009/01/01/}, pages = {125 - 142}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Computing Set Intersection privately and efficiently between two mutually mistrusting parties is an important basic procedure in the area of private data mining. Assuring robustness, namely, coping with potentially arbitrarily misbehaving (i.e., malicious) parties, while retaining protocol efficiency (rather than employing costly generic techniques) is an open problem. In this work the first solution to this problem is presented.}, keywords = {Coding and Information Theory, Computer Communication Networks, Cryptographic protocols, Data Encryption, Data Structures, Cryptology and Information Theory, Information Systems Applications (incl.Internet), Privacy Preserving Data Mining, Secure Two-party Computation, Set Intersection, Systems and Data Security}, isbn = {978-3-642-01956-2, 978-3-642-01957-9}, url = {http://link.springer.com/chapter/10.1007/978-3-642-01957-9_8}, author = {Dana Dachman-Soled and Malkin, Tal and Raykova, Mariana and Yung, Moti}, editor = {Abdalla, Michel and Pointcheval, David and Fouque, Pierre-Alain and Vergnaud, Damien} } @article {16404, title = {The emergence of zoning policy games in exurban jurisdictions: Informing collective action theory}, journal = {Land Use Policy}, volume = {26}, year = {2009}, month = {2009/04//}, pages = {356 - 367}, abstract = {Theoretical urban policy literature predicts the likelihood of free riding in the management of common goods such as forested open space; such outcome is often characterized as a Prisoner{\textquoteright}s Dilemma game. Numerous cases exist in which neighboring jurisdictions cooperate to maintain public goods, challenging the expected results, yet theoretical explanations of these cases have not been fully developed. In this paper, we use an agent-based model to explore how underlying micro-behaviors affect the payoffs obtained by two neighboring municipalities in a hypothetical exurban area. Payoffs are measured in terms of regional forested space and of local tax revenue at the end of the agent-based simulations; the municipalities affect these payoffs through their choice of residential zoning policies and the spillover effect between the neighboring jurisdictions. Zoning restrictions influence the conversion of farmland into residential subdivisions of different types, and consequently the location of heterogeneous residential households in the region. Developers and residents respond to the changing landscape characteristics, thus establishing a feedback between early and future land-use patterns. The structure of the simulated payoffs is analyzed using standard game theory. Our analysis shows that a variety of games, in addition to Prisoner{\textquoteright}s Dilemma, can emerge between the neighboring jurisdictions. Other games encourage coordination or subsidization, offering some explanations for the unexpected observations. The game realized in any given context depends on the initial characteristics of the landscape, the value given to the objectives each township seeks to maximize, and the income distribution of the population.}, keywords = {Agent-based modeling, game theory, Land-use policy, Local government cooperation, Scale interaction}, isbn = {0264-8377}, doi = {10.1016/j.landusepol.2008.04.004}, url = {http://www.sciencedirect.com/science/article/pii/S0264837708000604}, author = {Zellner,Moira L. and Page,Scott E. and Rand, William and Brown,Daniel G. and Robinson,Derek T. and Nassauer,Joan and Low,Bobbi} } @conference {12511, title = {Enhancing sparsity using gradients for compressive sensing}, booktitle = {Image Processing (ICIP), 2009 16th IEEE International Conference on}, year = {2009}, month = {2009/11//}, pages = {3033 - 3036}, abstract = {In this paper, we propose a reconstruction method that recovers images assumed to have a sparse representation in a gradient domain by using partial measurement samples that are collected in the Fourier domain. A key improvement of this technique is that it makes use of a robust generalized Poisson solver that greatly aids in achieving a significantly improved performance over similar proposed methods. Experiments provided also demonstrate that this new technique is more flexible to work with either random or restricted sampling scenarios better than its competitors.}, keywords = {analysis;gradient, domain;compressive, domain;image, Fourier, generalized, measurement, methods;, methods;image, Poisson, reconstruction;image, reconstruction;partial, representation;Fourier, representation;sampling, samples;robust, scenarios;sparse, sensing;enhancing, solver;sampling, sparsity;gradient}, doi = {10.1109/ICIP.2009.5414411}, author = {Patel, Vishal M. and Easley,G. R and Chellapa, Rama and Healy,D. M} } @article {18701, title = {Evidence for Bidentate Substrate Binding as the Basis for the K48 Linkage Specificity of Otubain 1}, journal = {Journal of Molecular Biology}, volume = {386}, year = {2009}, month = {2009/03/06/}, pages = {1011 - 1023}, abstract = {Otubain 1 belongs to the ovarian tumor (OTU) domain class of cysteine protease deubiquitinating enzymes. We show here that human otubain 1 (hOtu1) is highly linkage-specific, cleaving Lys48 (K48)-linked polyubiquitin but not K63-, K29-, K6-, or K11-linked polyubiquitin, or linear α-linked polyubiquitin. Cleavage is not limited to either end of a polyubiquitin chain, and both free and substrate-linked polyubiquitin are disassembled. Intriguingly, cleavage of K48-diubiquitin by hOtu1 can be inhibited by diubiquitins of various linkage types, as well as by monoubiquitin. NMR studies and activity assays suggest that both the proximal and distal units of K48-diubiquitin bind to hOtu1. Reaction of Cys23 with ubiquitin-vinylsulfone identified a ubiquitin binding site that is distinct from the active site, which includes Cys91. Occupancy of the active site is needed to enable tight binding to the second site. We propose that distinct binding sites for the ubiquitins on either side of the scissile bond allow hOtu1 to discriminate among different isopeptide linkages in polyubiquitin substrates. Bidentate binding may be a general strategy used to achieve linkage-specific deubiquitination.}, keywords = {deubiquitination, isopeptide, linkage specificity, otubain, polyubiquitin}, isbn = {0022-2836}, doi = {10.1016/j.jmb.2008.12.085}, url = {http://www.sciencedirect.com/science/article/pii/S0022283608016124}, author = {Wang,Tao and Yin,Luming and Cooper,Eric M. and Lai,Ming-Yih and Dickey,Seth and Pickart,Cecile M. and Fushman, David and Wilkinson,Keith D. and Cohen,Robert E. and Wolberger,Cynthia} } @article {14573, title = {Evidence for Coregulation of Myocardial Gene Expression by MEF2 and NFAT in Human Heart Failure / CLINICAL PERSPECTIVE}, journal = {Circulation: Cardiovascular Genetics}, volume = {2}, year = {2009}, month = {2009/06/01/}, pages = {212 - 219}, abstract = {Background{\textemdash} Pathological stresses induce heart failure in animal models through activation of multiple cardiac transcription factors (TFs) working cooperatively. However, interactions among TFs in human heart failure are less understood. Here, we use genomic data to examine the evidence that 5 candidate TF families coregulate gene expression in human heart failure.Methods and Results{\textemdash} RNA isolates from failing (n=86) and nonfailing (n=16) human hearts were hybridized with Affymetrix HU133A arrays. For each gene on the array, we determined conserved MEF2, NFAT, NKX , GATA , and FOX binding motifs within the -1-kb promoter region using human-murine sequence alignments and the TRANSFAC database. Across 9076 genes expressed in the heart, TF-binding motifs tended to cluster together in nonrandom patterns within promoters of specific genes (P values ranging from 10-2 to 10-21), suggesting coregulation. We then modeled differential expression as a function of TF combinations present in promoter regions. Several combinations predicted increased odds of differential expression in the failing heart, with the highest odds ratios noted for genes containing both MEF2 and NFAT binding motifs together in the same promoter region (peak odds ratio, 3.47; P=0.005).Conclusions{\textemdash} These findings provide genomic evidence for coregulation of myocardial gene expression by MEF2 and NFAT in human heart failure. In doing so, they extend the paradigm of combinatorial regulation of gene expression to the human heart and identify new target genes for mechanistic study. More broadly, we demonstrate how integrating diverse sources of genomic data yields novel insight into human cardiovascular disorders.}, doi = {10.1161/CIRCGENETICS.108.816686}, url = {http://circgenetics.ahajournals.org/content/2/3/212.abstract}, author = {Putt,Mary E. and Hannenhalli, Sridhar and Lu,Yun and Haines,Philip and Chandrupatla,Hareesh R. and Morrisey,Edward E. and Margulies,Kenneth B. and Cappola,Thomas P.} } @article {12997, title = {Extreme polymorphism in a vaccine antigen and risk of clinical malaria: implications for vaccine development}, journal = {Sci Transl Med}, volume = {1}, year = {2009}, month = {2009/10//}, pages = {2ra5 - 2ra5}, abstract = {Vaccines directed against the blood stages of Plasmodium falciparum malaria are intended to prevent the parasite from invading and replicating within host cells. No blood-stage malaria vaccine has shown clinical efficacy in humans. Most malaria vaccine antigens are parasite surface proteins that have evolved extensive genetic diversity, and this diversity could allow malaria parasites to escape vaccine-induced immunity. We examined the extent and within-host dynamics of genetic diversity in the blood-stage malaria vaccine antigen apical membrane antigen-1 in a longitudinal study in Mali. Two hundred and fourteen unique apical membrane antigen-1 haplotypes were identified among 506 human infections, and amino acid changes near a putative invasion machinery binding site were strongly associated with the development of clinical symptoms, suggesting that these residues may be important to consider in designing polyvalent apical membrane antigen-1 vaccines and in assessing vaccine efficacy in field trials. This extreme diversity may pose a serious obstacle to an effective polyvalent recombinant subunit apical membrane antigen-1 vaccine.}, doi = {10.1126/scitranslmed.3000257}, author = {Takala,S. L and Coulibaly,D. and Thera,M. A and Batchelor,A. H and Cummings, Michael P. and Escalante,A. A and Ouattara,A. and Traor{\'e},K. and Niangaly,A. and Djimd{\'e},A. A and Doumbo,OK and Plowe,CV} } @article {15276, title = {Finding Biologically Accurate Clusterings in Hierarchical Decompositions Using the Variation of Information}, journal = {Lecture Notes in Computer Science: Research in Computational Molecular Biology}, volume = {5541}, year = {2009}, month = {2009///}, pages = {400 - 417}, author = {Navlakha,S. and White,J. R. and Nagarajan,N. and Pop, Mihai and Kingsford, Carl} } @inbook {16263, title = {Finding Biologically Accurate Clusterings in Hierarchical Tree Decompositions Using the Variation of Information}, booktitle = {Research in Computational Molecular Biology}, series = {Lecture Notes in Computer Science}, volume = {5541}, year = {2009}, month = {2009///}, pages = {400 - 417}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Hierarchical clustering is a popular method for grouping together similar elements based on a distance measure between them. In many cases, annotations for some elements are known beforehand, which can aid the clustering process. We present a novel approach for decomposing a hierarchical clustering into the clusters that optimally match a set of known annotations, as measured by the variation of information metric. Our approach is general and does not require the user to enter the number of clusters desired. We apply it to two biological domains: finding protein complexes within protein interaction networks and identifying species within metagenomic DNA samples. For these two applications, we test the quality of our clusters by using them to predict complex and species membership, respectively. We find that our approach generally outperforms the commonly used heuristic methods.}, isbn = {978-3-642-02007-0}, url = {http://dx.doi.org/10.1007/978-3-642-02008-7_29}, author = {Navlakha,Saket and White,James and Nagarajan,Niranjan and Pop, Mihai and Kingsford, Carl}, editor = {Batzoglou,Serafim} } @article {14614, title = {Gene Profiling of Human Adipose Tissue During Evoked Inflammation In Vivo}, journal = {DiabetesDiabetes}, volume = {58}, year = {2009}, month = {2009/10/01/}, pages = {2211 - 2219}, abstract = {OBJECTIVE Adipose inflammation plays a central role in obesity-related metabolic and cardiovascular complications. However, few human adipose-secreted proteins are known to mediate these processes. We hypothesized that microarray mRNA profiling of human adipose during evoked inflammation could identify novel adipocytokines.RESEARCH DESIGN AND METHODS Healthy human volunteers (n = 14) were treated with intravenous endotoxin (3 ng/kg lipopolysaccharide [LPS]) and underwent subcutaneous adipose biopsies before and after LPS. On Affymetrix U133Plus 2.0 arrays, adipose mRNAs modulated >1.5-fold (with P < 0.00001) were selected. SignalP 3.0 and SecretomeP 2.0 identified genes predicted to encode secreted proteins. Of these, 86 candidates were chosen for validation in adipose from an independent human endotoxemia protocol (N = 7, with 0.6 ng/kg LPS) and for exploration of cellular origin in primary human adipocytes and macrophages in vitro. RESULTS Microarray identified 776 adipose genes modulated by LPS; 298 were predicted to be secreted. Of detectable prioritized genes, 82 of 85 (96\% [95\% CI 90{\textendash}99]) were upregulated (fold changes >1.0) during the lower-dose (LPS 0.6 ng/kg) validation study and 51 of 85 (59\% [49{\textendash}70]) were induced greater than 1.5-fold. Treatment of primary adipocytes with LPS and macrophage polarization to M1 proinflammatory phenotype increased expression by 1.5-fold for 58 and 73\% of detectable genes, respectively. CONCLUSIONS We demonstrate that evoked inflammation of human adipose in vivo modulated expression of multiple genes likely secreted by adipocytes and monocytes. These included established adipocytokines and chemokines implicated in recruitment and activation of lymphocytes, adhesion molecules, antioxidants, and several novel genes with unknown function. Such candidates may represent biomarkers and therapeutic targets for obesity-related complications. }, isbn = {0012-1797, 1939-327X}, doi = {10.2337/db09-0256}, url = {http://diabetes.diabetesjournals.org/content/58/10/2211}, author = {Shah,Rachana and Lu,Yun and Hinkle,Christine C and McGillicuddy,Fiona C and Kim,Roy and Hannenhalli, Sridhar and Cappola,Thomas P. and Heffron,Sean and Wang,XingMei and Mehta,Nehal N and Putt,Mary and Reilly,Muredach P} } @article {16269, title = {Genome Assembly Reborn: Recent Computational Challenges}, journal = {Briefings in Bioinformatics}, volume = {10}, year = {2009}, month = {2009/07/01/}, pages = {354 - 366}, abstract = {Research into genome assembly algorithms has experienced a resurgence due to new challenges created by the development of next generation sequencing technologies. Several genome assemblers have been published in recent years specifically targeted at the new sequence data; however, the ever-changing technological landscape leads to the need for continued research. In addition, the low cost of next generation sequencing data has led to an increased use of sequencing in new settings. For example, the new field of metagenomics relies on large-scale sequencing of entire microbial communities instead of isolate genomes, leading to new computational challenges. In this article, we outline the major algorithmic approaches for genome assembly and describe recent developments in this domain.}, keywords = {genome assembly, genome sequencing, next generation sequencing technologies}, isbn = {1467-5463, 1477-4054}, doi = {10.1093/bib/bbp026}, url = {http://bib.oxfordjournals.org/content/10/4/354}, author = {Pop, Mihai} } @inbook {19605, title = {Graph Drawing for Security Visualization}, booktitle = {Graph Drawing}, series = {Lecture Notes in Computer Science}, year = {2009}, month = {2009/01/01/}, pages = {2 - 13}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {With the number of devices connected to the internet growing rapidly and software systems being increasingly deployed on the web, security and privacy have become crucial properties for networks and applications. Due the complexity and subtlety of cryptographic methods and protocols, software architects and developers often fail to incorporate security principles in their designs and implementations. Also, most users have minimal understanding of security threats. While several tools for developers, system administrators and security analysts are available, these tools typically provide information in the form of textual logs or tables, which are cumbersome to analyze. Thus, in recent years, the field of security visualization has emerged to provide novel ways to display security-related information so that it is easier to understand. In this work, we give a preliminary survey of approaches to the visualization of computer security concepts that use graph drawing techniques.}, keywords = {Algorithm Analysis and Problem Complexity, Computer Graphics, Data structures, Discrete Mathematics in Computer Science}, isbn = {978-3-642-00218-2, 978-3-642-00219-9}, url = {http://link.springer.com/chapter/10.1007/978-3-642-00219-9_2}, author = {Tamassia, Roberto and Palazzi, Bernardo and Charalampos Papamanthou}, editor = {Tollis, Ioannis G. and Patrignani, Maurizio} } @conference {14321, title = {HydroSense: infrastructure-mediated single-point sensing of whole-home water activity}, booktitle = {Proceedings of the 11th international conference on Ubiquitous computing, September}, year = {2009}, month = {2009///}, author = {Jon Froehlich and Larson,E. and Campbell,T. and Haggerty,C. and Fogarty,J. and Patel,S.N.} } @conference {16317, title = {Incremental covering array failure characterization in large configuration spaces}, booktitle = {Proceedings of the eighteenth international symposium on Software testing and analysis}, year = {2009}, month = {2009///}, pages = {177 - 188}, author = {Fouch{\'e},S. and Cohen,M. B and Porter, Adam} } @article {14478, title = {Index interactions in physical design tuning: modeling, analysis, and applications}, journal = {Proceedings of the VLDB Endowment}, volume = {2}, year = {2009}, month = {2009/08//}, pages = {1234 - 1245}, abstract = {One of the key tasks of a database administrator is to optimize the set of materialized indices with respect to the current workload. To aid administrators in this challenging task, commercial DBMSs provide advisors that recommend a set of indices based on a sample workload. It is left for the administrator to decide which of the recommended indices to materialize and when. This decision requires some knowledge of how the indices benefit the workload, which may be difficult to understand if there are any dependencies or interactions among indices. Unfortunately, advisors do not provide this crucial information as part of the recommendation. Motivated by this shortcoming, we propose a framework and associated tools that can help an administrator understand the interactions within the recommended set of indices. We formalize the notion of index interactions and develop a novel algorithm to identify the interaction relationships that exist within a set of indices. We present experimental results with a prototype implementation over IBM DB2 that demonstrate the efficiency of our approach. We also describe two new database tuning tools that utilize information about index interactions. The first tool visualizes interactions based on a partitioning of the index-set into non-interacting subsets, and the second tool computes a schedule that materializes the indices over several maintenance windows with maximal overall benefit. In both cases, we provide strong analytical results showing that index interactions can enable enhanced functionality.}, isbn = {2150-8097}, url = {http://dl.acm.org/citation.cfm?id=1687627.1687766}, author = {Schnaitter,Karl and Polyzotis,Neoklis and Getoor, Lise} } @conference {19110, title = {Inexact Local Alignment Search over Suffix Arrays}, year = {2009}, month = {2009}, pages = {83 - 87}, author = {Ghodsi,M. and Pop, Mihai} } @conference {16275, title = {Inexact Local Alignment Search over Suffix Arrays}, booktitle = {IEEE International Conference on Bioinformatics and Biomedicine, 2009. BIBM {\textquoteright}09}, year = {2009}, month = {2009/11/01/4}, pages = {83 - 87}, publisher = {IEEE}, organization = {IEEE}, abstract = {We describe an algorithm for finding approximate seeds for DNA homology searches. In contrast to previous algorithms that use exact or spaced seeds, our approximate seeds may contain insertions and deletions. We present a generalized heuristic for finding such seeds efficiently and prove that the heuristic does not affect sensitivity. We show how to adapt this algorithm to work over the memory efficient suffix array with provably minimal overhead in running time. We demonstrate the effectiveness of our algorithm on two tasks: whole genome alignment of bacteria and alignment of the DNA sequences of 177 genes that are orthologous in human and mouse. We show our algorithm achieves better sensitivity and uses less memory than other commonly used local alignment tools.}, keywords = {bacteria, Bioinformatics, biology computing, Computational Biology, Costs, DNA, DNA homology searches, DNA sequences, Educational institutions, generalized heuristic, genes, Genetics, genome alignment, Genomics, human, inexact local alignment search, inexact seeds, local alignment, local alignment tools, memory efficient suffix array, microorganisms, molecular biophysics, mouse, Organisms, Sensitivity and Specificity, sequences, suffix array, USA Councils}, isbn = {978-0-7695-3885-3}, doi = {10.1109/BIBM.2009.25}, author = {Ghodsi,M. and Pop, Mihai} } @article {19460, title = {The Ins and Outs of Home Networking: The Case for Useful and Usable Domestic Networking}, journal = {ACM Trans. Comput.-Hum. Interact.}, volume = {16}, year = {2009}, month = {2009/06//}, pages = {8:1 - 8:28}, abstract = {Householders are increasingly adopting home networking as a solution to the demands created by the presence of multiple computers, devices, and the desire to access the Internet. However, current network solutions are derived from the world of work (and initially the military) and provide poor support for the needs of the home. We present the key findings to emerge from empirical studies of home networks in the UK and US. The studies reveal two key kinds of work that effective home networking relies upon: one, the technical work of setting up and maintaining the home network, and the other, the collaborative and socially organized work of the home which the network is embedded in and supports. The two are thoroughly intertwined and rely upon one another for their realization, yet neither is adequately supported by current networking technologies and applications. Explication of the {\textquotedblleft}work to make the home network work{\textquotedblright} opens up the design space for the continued integration of the home network in domestic life and elaboration of future support. Key issues for development include the development of networking facilities that do not require advanced networking knowledge, that are flexible and support the local social order of the home and the evolution of its routines, and which ultimately make the home network visible and accountable to household members.}, keywords = {home networking, Human computer interaction}, isbn = {1073-0516}, url = {http://doi.acm.org/10.1145/1534903.1534905}, author = {Grinter, Rebecca E. and Edwards, W. Keith and Marshini Chetty and Poole, Erika S. and Sung, Ja-Young and Yang, Jeonghwa and Crabtree, Andy and Tolmie, Peter and Rodden, Tom and Greenhalgh, Chris and Benford, Steve} } @article {17238, title = {Integrating Statistics and Visualization for Exploratory Power: From Long-Term Case Studies to Design Guidelines}, journal = {IEEE Computer Graphics and Applications}, volume = {29}, year = {2009}, month = {2009/06//May}, pages = {39 - 51}, abstract = {Evaluating visual-analytics systems is challenging because laboratory-based controlled experiments might not effectively represent analytical tasks. One such system, Social Action, integrates statistics and visualization in an interactive exploratory tool for social network analysis. This article describes results from long-term case studies with domain experts and extends established design goals for information visualization.}, keywords = {case studies, Control systems, Data analysis, data mining, data visualisation, Data visualization, data-mining, design guidelines, Employment, exploration, Filters, Guidelines, Information Visualization, insights, laboratory-based controlled experiments, Performance analysis, social network analysis, Social network services, social networking (online), social networks, SocialAction, statistical analysis, Statistics, visual analytics, visual-analytics systems, Visualization}, isbn = {0272-1716}, doi = {10.1109/MCG.2009.44}, author = {Perer,A. and Shneiderman, Ben} } @conference {15913, title = {Inverse halftoning using a shearlet representation}, booktitle = {Proceedings of SPIE}, volume = {7446}, year = {2009}, month = {2009///}, pages = {74460C - 74460C}, author = {Easley,G. R and Patel, Vishal M. and Healy Jr,D.M.} } @article {15552, title = {Maintaining nets and net trees under incremental motion}, journal = {Algorithms and Computation}, year = {2009}, month = {2009///}, pages = {1134 - 1143}, abstract = {The problem of maintaining geometric structures for points in motion has been well studied over the years. Much theoretical work to date has been based on the assumption that point motion is continuous and predictable, but in practice, motion is typically presented incrementally in discrete time steps and may not be predictable. We consider the problem of maintaining a data structure for a set of points undergoing such incremental motion. We present a simple online model in which two agents cooperate to maintain the structure. One defines the data structure and provides a collection of certificates, which guarantee the structure{\textquoteright}s correctness. The other checks that the motion over time satisfies these certificates and notifies the first agent of any violations.We present efficient online algorithms for maintaining both nets and net trees for a point set undergoing incremental motion in a space of constant dimension. We analyze our algorithms{\textquoteright} efficiencies by bounding their competitive ratios relative to an optimal algorithm. We prove a constant factor competitive ratio for maintaining a slack form of nets, and our competitive ratio for net trees is proportional to the square of the tree{\textquoteright}s height. }, doi = {10.1007/978-3-642-10631-6_114}, author = {Cho,M. and Mount, Dave and Park,E.} } @article {17917, title = {Modeling and visualization of human activities for multicamera networks}, journal = {EURASIP Journal on Image and Video Processing}, volume = {2009}, year = {2009}, month = {2009///}, author = {Sankaranarayanan,A. C and Patro,R. and Turaga,P. and Varshney, Amitabh and Chellapa, Rama} } @conference {14564, title = {Network-aware forward caching}, booktitle = {Proceedings of the 18th international conference on World wide web}, series = {WWW {\textquoteright}09}, year = {2009}, month = {2009///}, pages = {291 - 300}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {This paper proposes and evaluates a Network Aware Forward Caching approach for determining the optimal deployment strategy of forward caches to a network. A key advantage of this approach is that we can reduce the network costs associated with forward caching to maximize the benefit obtained from their deployment. We show in our simulation that a 37\% increase to net benefits could be achieved over the standard method of full cache deployment to cache all POPs traffic. In addition, we show that this maximal point occurs when only 68\% of the total traffic is cached. Another contribution of this paper is the analysis we use to motivate and evaluate this problem. We characterize the Internet traffic of 100K subscribers of a US residential broadband provider. We use both layer 4 and layer 7 analysis to investigate the traffic volumes of the flows as well as study the general characteristics of the applications used. We show that HTTP is a dominant protocol and account for 68\% of the total downstream traffic and that 34\% of that traffic is multimedia. In addition, we show that multimedia content using HTTP exhibits a 83\% annualized growth rate and other HTTP traffic has a 53\% growth rate versus the 26\% over all annual growth rate of broadband traffic. This shows that HTTP traffic will become ever more dominent and increase the potential caching opportunities. Furthermore, we characterize the core backbone traffic of this broadband provider to measure the distance travelled by content and traffic. We find that CDN traffic is much more efficient than P2P content and that there is large skew in the Air Miles between POP in a typical network. Our findings show that there are many opportunties in broadband provider networks to optimize how traffic is delivered and cached.}, keywords = {caching, Web}, isbn = {978-1-60558-487-4}, doi = {10.1145/1526709.1526749}, url = {http://doi.acm.org/10.1145/1526709.1526749}, author = {Erman,Jeffrey and Gerber,Alexandre and Hajiaghayi, Mohammad T. and Pei,Dan and Spatscheck,Oliver} } @conference {13098, title = {Object detection via boosted deformable features}, booktitle = {Image Processing (ICIP), 2009 16th IEEE International Conference on}, year = {2009}, month = {2009/11//}, pages = {1445 - 1448}, abstract = {It is a common practice to model an object for detection tasks as a boosted ensemble of many models built on features of the object. In this context, features are defined as subregions with fixed relative locations and extents with respect to the object{\textquoteright}s image window. We introduce using deformable features with boosted ensembles. A deformable features adapts its location depending on the visual evidence in order to match the corresponding physical feature. Therefore, deformable features can better handle deformable objects. We empirically show that boosted ensembles of deformable features perform significantly better than boosted ensembles of fixed features for human detection.}, keywords = {boosted, detection;object, detection;statistics;, detection;visual, ensembles;deformable, evidence;feature, extraction;object, features;human}, doi = {10.1109/ICIP.2009.5414561}, author = {Hussein,M. and Porikli, F. and Davis, Larry S.} } @article {16281, title = {Parametric Complexity of Sequence Assembly: Theory and Applications to Next Generation Sequencing}, journal = {Journal of Computational Biology}, volume = {16}, year = {2009}, month = {2009/07//}, pages = {897 - 908}, abstract = {In recent years, a flurry of new DNA sequencing technologies have altered the landscape of genomics, providing a vast amount of sequence information at a fraction of the costs that were previously feasible. The task of assembling these sequences into a genome has, however, still remained an algorithmic challenge that is in practice answered by heuristic solutions. In order to design better assembly algorithms and exploit the characteristics of sequence data from new technologies, we need an improved understanding of the parametric complexity of the assembly problem. In this article, we provide a first theoretical study in this direction, exploring the connections between repeat complexity, read lengths, overlap lengths and coverage in determining the {\textquotedblleft}hard{\textquotedblright} instances of the assembly problem. Our work suggests at least two ways in which existing assemblers can be extended in a rigorous fashion, in addition to delineating directions for future theoretical investigations.}, isbn = {1066-5277, 1557-8666}, doi = {10.1089/cmb.2009.0005}, url = {http://www.liebertonline.com/doi/abs/10.1089/cmb.2009.0005}, author = {Nagarajan,Niranjan and Pop, Mihai} } @article {14713, title = {Passive aggressive measurement with MGRP}, journal = {SIGCOMM Comput. Commun. Rev.}, volume = {39}, year = {2009}, month = {2009/08//}, pages = {279 - 290}, abstract = {We present the Measurement Manager Protocol (MGRP), an in-kernel service that schedules and transmits probes on behalf of active measurement tools. Unlike prior measurement services, MGRP transparently piggybacks application packets inside the often significant amounts of empty padding contained in typical probes. Using MGRP thus combines the modularity, flexibility, and accuracy of standalone active measurement tools with the lower overhead of passive measurement techniques. Microbenchmark experiments show that the resulting bandwidth savings makes it possible to measure the network accurately, but faster and more aggressively than without piggybacking, and with few ill effects to piggybacked application or competing traffic. When using MGRP to schedule measurements on behalf of MediaNet, an overlay service that adaptively schedules media streams, we show MediaNet can achieve significantly higher streaming rates under the same network conditions.}, keywords = {active, available bandwidth, kernel module, passive, piggybacking, probing, streaming, transport protocol}, isbn = {0146-4833}, doi = {10.1145/1594977.1592601}, url = {http://doi.acm.org/10.1145/1594977.1592601}, author = {Papageorge,Pavlos and McCann,Justin and Hicks, Michael W.} } @article {13115, title = {PERCEPTION AND NAVIGATION FOR AUTONOMOUS VEHICLES}, journal = {IEEE Transactions on intelligent transportation systems}, volume = {10}, year = {2009}, month = {2009///}, pages = {417 - 427}, abstract = {We introduce a framework for evaluating human detectors that considers the practical application of a detector on a full image using multisize sliding-window scanning. We produce detection error tradeoff (DET) curves relating the miss detection rate and the false-alarm rate computed by deploying the detector on cropped windows and whole images, using, in the latter, either image resize or feature resize. Plots for cascade classifiers are generated based on confidence scores instead of on variation of the number of layers. To assess a method{\textquoteright}s overall performance on a given test, we use the average log miss rate (ALMR) as an aggregate performance score. To analyze the significance of the obtained results, we conduct 10-fold cross-validation experiments. We applied our evaluation framework to two state-of-the-art cascade-based detectors on the standard INRIA Person dataset and a local dataset of near-infrared images. We used our evaluation framework to study the differences between the two detectors on the two datasets with different evaluation methods. Our results show the utility of our framework. They also suggest that the descriptors used to represent features and the training window size are more important in predicting the detection performance than the nature of the imaging process, and that the choice between resizing images or features can have serious consequences.}, author = {Hussein,M. and Porikli, F. and Davis, Larry S.} } @inbook {18916, title = {Planning for Interactions among Autonomous Agents}, booktitle = {Programming Multi-Agent Systems}, series = {Lecture Notes in Computer Science}, volume = {5442}, year = {2009}, month = {2009///}, pages = {1 - 23}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {AI planning research has traditionally focused on offline pl- anning for static single-agent environments. In environments where an agent needs to plan its interactions with other autonomous agents, planning is much more complicated, because the actions of the other agents can induce a combinatorial explosion in the number of contingencies that the planner will need to consider. This paper discusses several ways to alleviate the combinatorial explosion, and illustrates their use in several different kinds of multi-agent planning domains.}, keywords = {Computer science}, isbn = {978-3-642-03277-6}, url = {http://www.springerlink.com/content/j258015ux2p38383/abstract/}, author = {Au,Tsz-Chiu and Kuter,Ugur and Nau, Dana S.}, editor = {Hindriks,Koen and Pokahr,Alexander and Sardina,Sebastian} } @inbook {13395, title = {PrDB: Managing Large-Scale Correlated Probabilistic Databases (Abstract)}, booktitle = {Scalable Uncertainty ManagementScalable Uncertainty Management}, series = {Lecture Notes in Computer Science}, volume = {5785}, year = {2009}, month = {2009///}, pages = {1 - 1}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Increasing numbers of real-world application domains are generating data that is inherently noisy, incomplete, and probabilistic in nature. Statistical inference and probabilistic modeling often introduce another layer of uncertainty on top of that. Examples of such data include measurement data collected by sensor networks, observation data in the context of social networks, scientific and biomedical data, and data collected by various online cyber-sources. Over the last few years, numerous approaches have been proposed, and several systems built, to integrate uncertainty into databases. However, these approaches typically make simplistic and restrictive assumptions concerning the types of uncertainties that can be represented. Most importantly, they often make highly restrictive independence assumptions, and cannot easily model rich correlations among the tuples or attribute values. Furthermore, they typically lack support for specifying uncertainties at different levels of abstractions, needed to handle large-scale uncertain datasets.}, isbn = {978-3-642-04387-1}, url = {http://dx.doi.org/10.1007/978-3-642-04388-8_1}, author = {Deshpande, Amol}, editor = {Godo,Llu{\'\i}s and Pugliese,Andrea} } @conference {15486, title = {Prioritizing component compatibility tests via user preferences}, booktitle = {Software Maintenance, 2009. ICSM 2009. IEEE International Conference on}, year = {2009}, month = {2009/09//}, pages = {29 - 38}, abstract = {Many software systems rely on third-party components during their build process. Because the components are constantly evolving, quality assurance demands that developers perform compatibility testing to ensure that their software systems build correctly over all deployable combinations of component versions, also called configurations. However, large software systems can have many configurations, and compatibility testing is often time and resource constrained. We present a prioritization mechanism that enhances compatibility testing by examining the ldquomost importantrdquo configurations first, while distributing the work over a cluster of computers. We evaluate our new approach on two large scientific middleware systems and examine tradeoffs between the new prioritization approach and a previously developed lowest-cost-configuration-first approach.}, keywords = {compatibility testing prioritization, component configurations, computer clusters, Middleware, Middleware systems, object-oriented programming, program testing, software engineering, Software systems, third-party components, user preferences}, doi = {10.1109/ICSM.2009.5306357}, author = {Yoon,Il-Chul and Sussman, Alan and Memon, Atif M. and Porter, Adam} } @article {18686, title = {Prioritizing Vulnerability Remediation by Determining Attacker-Targeted Vulnerabilities}, journal = {Security Privacy, IEEE}, volume = {7}, year = {2009}, month = {2009/02//jan}, pages = {42 - 48}, abstract = {This article attempts to empirically analyze which vulnerabilities attackers tend to target in order to prioritize vulnerability remediation. This analysis focuses on the link between malicious connections and vulnerabilities, where each connection is considered malicious. Attacks requiring multiple connections are counted as multiple attacks. As the number of connections increases, so does the cost of recovering from the intrusion. The authors deployed four honey pots for four months, each running a different Windows service pack with its associated set of vulnerabilities. They then performed three empirical analyses to determine the relationship between the number of malicious connections and the total number of vulnerabilities, the number of malicious connections and the number of the vulnerabilities for different services, and the number of known successful attacks and the number of vulnerabilities for different services.}, keywords = {attacker-targeted vulnerabilities, intrusion detection, malicious connections, security of data, vulnerability remediation, Windows service pack}, isbn = {1540-7993}, doi = {10.1109/MSP.2009.13}, author = {Michel Cukier and Panjwani,S.} } @book {14511, title = {Proceedings of the 1st ACM SIGKDD Workshop on Knowledge Discovery from Uncertain Data}, year = {2009}, month = {2009///}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {The importance of uncertain data is growing quickly in many essential applications such as environmental surveillance, mobile object tracking and data integration. Recently, storing, collecting, processing, and analyzing uncertain data has attracted increasing attention from both academia and industry. Analyzing and mining uncertain data needs collaboration and joint effort from multiple research communities including reasoning under uncertainty, uncertain databases and mining uncertain data. For example, statistics and probabilistic reasoning can provide support with models for representing uncertainty. The uncertain database community can provide methods for storing and managing uncertain data, while research in mining uncertain data can provide data analysis tasks and methods. It is important to build connections among those communities to tackle the overall problem of analyzing and mining uncertain data. There are many common challenges among the communities. One is to understand the different modeling assumptions made, and how they impact the methods, both in terms of accuracy and efficiency. Different researchers hold different assumptions and this is one of the major obstacles in the research of mining uncertain data. Another is the scalability of proposed management and analysis methods. Finally, to make analysis and mining useful and practical, we need real data sets for testing. Unfortunately, uncertain data sets are often hard to get. The goal of the First ACM SIGKDD Workshop on Knowledge Discovery from Uncertain Data (U{\textquoteright}09) is to discuss in depth the challenges, opportunities and techniques on the topic of analyzing and mining uncertain data. The theme of this workshop is to make connections among the research areas of uncertain databases, probabilistic reasoning, and data mining, as well as to build bridges among the aspects of models, data, applications, novel mining tasks and effective solutions. By making connections among different communities, we aim at understanding each other in terms of scientific foundation as well as commonality and differences in research methodology. The workshop program is very stimulating and exciting. We are pleased to feature two invited talks by pioneers in mining uncertain data. Christopher Jermaine will give an invited talk titled "Managing and Mining Uncertain Data: What Might We Do Better?" Matthias Renz will address the topic "Querying and Mining Uncertain Data: Methods, Applications, and Challenges". Moreover, 8 accepted papers in 4 full presentations and 4 concise presentations will cover a bunch of interesting topics and on-going research projects about uncertain data mining.}, isbn = {978-1-60558-675-5}, editor = {Pei,Jian and Getoor, Lise and de Keijzer,Ander} } @article {16061, title = {Querying timestamped event sequences by exact search or similarity-based search: design and empirical evaluation}, volume = {HCIL-2009-20}, year = {2009}, month = {2009///}, institution = {Human-Computer Interaction Lab, University of Maryland}, author = {Wongsuphasawat,K. and Plaisant, Catherine and Shneiderman, Ben} } @article {17421, title = {The reader-to-leader framework: Motivating technology-mediated social participation}, journal = {AIS Transactions on Human-Computer Interaction}, volume = {1}, year = {2009}, month = {2009///}, pages = {13 - 32}, abstract = {Billions of people participate in online social activities. Most users participate as readers of discussion boards, searchers of blogposts, or viewers of photos. A fraction of users become contributors of user-generated content by writing consumer product reviews, uploading travel photos, or expressing political opinions. Some users move beyond such individual efforts to become collaborators, forming tightly connected groups with lively discussions whose outcome might be a Wikipedia article or a carefully edited YouTube video. A small fraction of users becomes leaders, who participate in governance by setting and upholding policies, repairing vandalized materials, or mentoring novices. We analyze these activities and offer the Reader-to-Leader Framework with the goal of helping researchers, designers, and managers understand what motivates technology-mediated social participation. This will enable them to improve interface design and social support for their companies, government agencies, and non-governmental organizations. These improvements could reduce the number of failed projects, while accelerating the application of social media for national priorities such as healthcare, energy sustainability, emergency response, economic development, education, and more. }, author = {Preece,J. and Shneiderman, Ben} } @article {17655, title = {Scheduling on Unrelated Machines under Tree-Like Precedence Constraints}, journal = {Algorithmica}, volume = {55}, year = {2009}, month = {2009///}, pages = {205 - 226}, abstract = {We present polylogarithmic approximations for the R | prec | C max and R | prec |∑ j w j C j problems, when the precedence constraints are {\textquotedblleft}treelike{\textquotedblright}{\textemdash}i.e., when the undirected graph underlying the precedences is a forest. These are the first non-trivial generalizations of the job shop scheduling problem to scheduling with precedence constraints that are not just chains. These are also the first non-trivial results for the weighted completion time objective on unrelated machines with precedence constraints of any kind . We obtain improved bounds for the weighted completion time and flow time for the case of chains with restricted assignment{\textemdash}this generalizes the job shop problem to these objective functions. We use the same lower bound of {\textquotedblleft}congestion + dilation{\textquotedblright}, as in other job shop scheduling approaches (e.g. Shmoys, Stein and Wein, SIAM J. Comput. 23, 617{\textendash}632, 1994 ). The first step in our algorithm for the R | prec | C max problem with treelike precedences involves using the algorithm of Lenstra, Shmoys and Tardos to obtain a processor assignment with the congestion + dilation value within a constant factor of the optimal. We then show how to generalize the random-delays technique of Leighton, Maggs and Rao to the case of trees. For the special case of chains, we show a dependent rounding technique which leads to a bicriteria approximation algorithm for minimizing the flow time, a notoriously hard objective function.}, isbn = {0178-4617}, url = {http://dx.doi.org/10.1007/s00453-007-9004-y}, author = {Anil Kumar,V. and Marathe,Madhav and Parthasarathy,Srinivasan and Srinivasan, Aravind} } @article {16283, title = {Searching for SNPs with cloud computing}, journal = {Genome Biology}, volume = {10}, year = {2009}, month = {2009/11/20/}, pages = {R134 - R134}, abstract = {As DNA sequencing outpaces improvements in computer speed, there is a critical need to accelerate tasks like alignment and SNP calling. Crossbow is a cloud-computing software tool that combines the aligner Bowtie and the SNP caller SOAPsnp. Executing in parallel using Hadoop, Crossbow analyzes data comprising 38-fold coverage of the human genome in three hours using a 320-CPU cluster rented from a cloud computing service for about $85. Crossbow is available from http://bowtie-bio.sourceforge.net/crossbow/.}, isbn = {1465-6906}, doi = {10.1186/gb-2009-10-11-r134}, url = {http://genomebiology.com/2009/10/11/R134}, author = {Langmead,Ben and Schatz,Michael C and Jimmy Lin and Pop, Mihai and Salzberg,Steven L.} } @article {13326, title = {Semantic-based segmentation and annotation of 3d models}, journal = {Image Analysis and Processing{\textendash}ICIAP 2009}, year = {2009}, month = {2009///}, pages = {103 - 112}, abstract = {3D objects have become widely available and used in different application domains. Thus, it is becoming fundamental to use, integrate and develop techniques for extracting and maintaining their embedded knowledge. These techniques should be encapsulated in portable and intelligent systems able to semantically annotate the 3D object models in order to improve their usability and indexing, especially in innovative web cooperative environments. Lately, we are moving in this direction, with the definition and development of data structures, methods and interfaces for structuring and semantically annotating 3D complex models (and scenes) - even changing in time - according to ontology-driven metadata and following ontology-driven processes. Here, we concentrate on the tools for segmenting manifold 3D models and on the underline structural representation that we build and manipulate. We also describe the first prototype of an annotation tool which allows a hierarchical semantic-driven tagging of the segmented model and provides an interface from which the user can inspect and browse the entire segmentation graph.}, doi = {10.1007/978-3-642-04146-4_13}, author = {Papaleo,L. and De Floriani, Leila} } @proceedings {19206, title = {Sensing opportunities for personalized feedback technology to reduce consumption}, year = {2009}, month = {2009}, author = {Jon Froehlich and Everitt,K. and Fogarty,J. and Patel,S. and Landay,J.} } @article {13271, title = {A set of tools for Representing, Decomposing and Visualizing non manifold Cellular Complexes}, volume = {DISI-TR-09-06}, year = {2009}, month = {2009///}, institution = {Department of Computer Science and Information Science, University of Genoa}, abstract = {Modeling and understanding complex non-manifold shapes is a key issue in shape analysis and retrieval. The topological structure of a non-manifold shape can be analyzed through its decomposition into a collection of components with a simpler topology. Here, we consider a decomposition of a non-manifold shape into components which are almost manifolds, and we present a novel graph representation which highlights the non-manifold singularities shared by the components as well as their connectivity relations. We describe an algorithm for computing the decomposition and its associated graph representation. We present a new tool for visualizing the shape decomposition and its graph as an effective support to modeling, analyzing and understanding non-manifold shapes. We describe a new data structure for non-manifold simplicial complex that we used in our decomposition software and we provide a complete description of all functionalities of the library we developed.}, author = {De Floriani, Leila and Panozzo,D. and Hui,A.} } @article {15909, title = {Shearlet-Based Deconvolution}, journal = {IEEE Transactions on Image Processing}, volume = {18}, year = {2009}, month = {2009/12//}, pages = {2673 - 2685}, abstract = {In this paper, a new type of deconvolution algorithm is proposed that is based on estimating the image from a shearlet decomposition. Shearlets provide a multidirectional and multiscale decomposition that has been mathematically shown to represent distributed discontinuities such as edges better than traditional wavelets. Constructions such as curvelets and contourlets share similar properties, yet their implementations are significantly different from that of shearlets. Taking advantage of unique properties of a new M-channel implementation of the shearlet transform, we develop an algorithm that allows for the approximation inversion operator to be controlled on a multiscale and multidirectional basis. A key improvement over closely related approaches such as ForWaRD is the automatic determination of the threshold values for the noise shrinkage for each scale and direction without explicit knowledge of the noise variance using a generalized cross validation (GCV). Various tests show that this method can perform significantly better than many competitive deconvolution algorithms.}, keywords = {approximation inversion operator, contourlets, curvelets, Deconvolution, distributed discontinuities, generalized cross validation, image restoration, M-channel implementation, shearlet-based deconvolution, shearlets, Wavelet transforms, wavelets}, isbn = {1057-7149}, doi = {10.1109/TIP.2009.2029594}, author = {Patel, Vishal M. and Easley,G. R and Healy,D. M} } @conference {15922, title = {Sparsity inspired selection and recognition of iris images}, booktitle = {Biometrics: Theory, Applications, and Systems, 2009. BTAS {\textquoteright}09. IEEE 3rd International Conference on}, year = {2009}, month = {2009/09//}, pages = {1 - 6}, abstract = {Iris images acquired from a partially cooperating subject often suffer from blur, occlusion due to eyelids, and specular reflections. The performance of existing iris recognition systems degrade significantly on these images. Hence it is essential to select good images from the incoming iris video stream, before they are input to the recognition algorithm. In this paper, we propose a sparsity based algorithm for selection of good iris images and their subsequent recognition. Unlike most existing algorithms for iris image selection, our method can handle segmentation errors and a wider range of acquisition artifacts common in iris image capture. We perform selection and recognition in a single step which is more efficient than devising separate specialized algorithms for the two. Recognition from partially cooperating users is a significant step towards deploying iris systems in a wide variety of applications.}, keywords = {biometrics (access control), image recognition, Image segmentation, iris image recognition, iris image selection, iris video stream, sparsity inspired selection, video signal processing, video streaming}, doi = {10.1109/BTAS.2009.5339067}, author = {Pillai,J.K. and Patel, Vishal M. and Chellapa, Rama} } @article {17754, title = {SPOT Databases: Efficient Consistency Checking and Optimistic Selection in Probabilistic Spatial Databases}, journal = {Knowledge and Data Engineering, IEEE Transactions on}, volume = {21}, year = {2009}, month = {2009/01//}, pages = {92 - 107}, abstract = {Spatial probabilistic temporal (SPOT) databases are a paradigm for reasoning with probabilistic statements about where a vehicle may be now or in the future. They express statements of the form "Object O is in spatial region R at some time t with some probability in the interval [L,U]." Past work on SPOT databases has developed selection operators based on selecting SPOT atoms that are entailed by the SPOT database-we call this "cautious" selection. In this paper, we study several problems. First, we note that the runtime of consistency checking and cautious selection algorithms in past work is influenced greatly by the granularity of the underlying Cartesian space. In this paper, we first introduce the notion of "optimistic" selection, where we are interested in returning all SPOT atoms in a database that are consistent with respect to a query, rather than having an entailment relationship. We then develop an approach to scaling SPOT databases that has three main contributions: 1) We develop methods to eliminate variables from the linear programs used in past work, thus greatly reducing the size of the linear programs used-the resulting advances apply to consistency checking, optimistic selection, and cautious selection. 2) We develop a host of theorems to show how we can prune the search space when we are interested in optimistic selection. 3) We use the above contributions to build an efficient index to execute optimistic selection queries over SPOT databases. Our approach is superior to past work in two major respects: First, it makes fewer assumptions than all past works on this topic except that in. Second, our experiments, which are based on real-world data about ship movements, show that our algorithms are much more efficient than those in.}, keywords = {Cartesian, checking;database, database;data, databases;, databases;visual, indexing;inference, indexing;linear, integrity;database, mechanisms;linear, probabilistic, problem;spatial, problems;temporal, processing;search, program;optimistic, programming;probability;query, query;probabilistic, reasoning;search, selection, selection;consistency, space;cautious, temporal}, isbn = {1041-4347}, doi = {10.1109/TKDE.2008.93}, author = {Parker,A. and Infantes,G. and Grant,J. and V.S. Subrahmanian} } @article {16287, title = {Statistical Methods for Detecting Differentially Abundant Features in Clinical Metagenomic Samples}, journal = {PLoS Comput Biology}, volume = {5}, year = {2009}, month = {2009/04/10/}, pages = {e1000352 - e1000352}, abstract = {The emerging field of metagenomics aims to understand the structure and function of microbial communities solely through DNA analysis. Current metagenomics studies comparing communities resemble large-scale clinical trials with multiple subjects from two general populations (e.g. sick and healthy). To improve analyses of this type of experimental data, we developed a statistical methodology for detecting differentially abundant features between microbial communities, that is, features that are enriched or depleted in one population versus another. We show our methods are applicable to various metagenomic data ranging from taxonomic information to functional annotations. We also provide an assessment of taxonomic differences in gut microbiota between lean and obese humans, as well as differences between the functional capacities of mature and infant gut microbiomes, and those of microbial and viral metagenomes. Our methods are the first to statistically address differential abundance in comparative metagenomics studies with multiple subjects, and we hope will give researchers a more complete picture of how exactly two environments differ.}, doi = {10.1371/journal.pcbi.1000352}, url = {UR - http://dx.doi.org/10.1371/journal.pcbi.1000352,http://dx.doi.org/10.1371/journal.pcbi.1000352}, author = {White,James Robert and Nagarajan,Niranjan and Pop, Mihai} } @article {16062, title = {The Story of One: Humanity scholarship with visualization and text analysis}, journal = {Relation}, volume = {10}, year = {2009}, month = {2009///}, pages = {8485 - 8485}, author = {Clement,T. and Plaisant, Catherine and Vuillemot,R.} } @conference {19045, title = {TACKing Together Efficient Authentication, Revocation, and Privacy in VANETs}, year = {2009}, month = {2009}, pages = {1 - 9}, abstract = {Vehicular ad hoc networks (VANETs) require a mechanism to help authenticate messages, identify valid vehicles, and remove malevolent vehicles. A public key infrastructure (PKI) can provide this functionality using certificates and fixed public keys. However, fixed keys allow an eavesdropper to associate a key with a vehicle and a location, violating drivers{\textquoteright} privacy. In this work we propose a VANET key management scheme based on temporary anonymous certified keys (TACKs). Our scheme efficiently prevents eavesdroppers from linking a vehicle{\textquoteright}s different keys and provides timely revocation of misbehaving participants while maintaining the same or less overhead for vehicle-to-vehicle communication as the current IEEE 1609.2 standard for VANET security.}, keywords = {ad hoc networks, data privacy, eavesdropper, IEEE 1609.2 standard, Message authentication, mobile radio, public key cryptography, public key infrastructure, remove malevolent vehicle, telecommunication security, temporary anonymous certified key, valid vehicle identification, VANET key management, VANET security, vehicle-to-vehicle communication, Vehicles, vehicular ad hoc network}, author = {Studer, A. and Elaine Shi and Bai, Fan and Perrig, A.} } @article {18921, title = {Task decomposition on abstract states, for planning under nondeterminism}, journal = {Artificial Intelligence}, volume = {173}, year = {2009}, month = {2009/04//}, pages = {669 - 695}, abstract = {Although several approaches have been developed for planning in nondeterministic domains, solving large planning problems is still quite difficult. In this work, we present a new planning algorithm, called Yoyo, for solving planning problems in fully observable nondeterministic domains. Yoyo combines an HTN-based mechanism for constraining its search and a Binary Decision Diagram (BDD) representation for reasoning about sets of states and state transitions.We provide correctness theorems for Yoyo, and an experimental comparison of it with MBP and ND-SHOP2, the two previously-best algorithms for planning in nondeterministic domains. In our experiments, Yoyo could easily deal with problem sizes that neither MBP nor ND-SHOP2 could scale up to, and could solve problems about 100 to 1000 times faster than MBP and ND-SHOP2. }, keywords = {Binary decision diagrams, Hierarchical task-network (HTN) planning, Planning in nondeterministic domains}, isbn = {0004-3702}, doi = {10.1016/j.artint.2008.11.012}, url = {http://www.sciencedirect.com/science/article/pii/S0004370208001987}, author = {Kuter,Ugur and Nau, Dana S. and Pistore,Marco and Traverso,Paolo} } @article {17397, title = {Temporal Summaries: Supporting Temporal Categorical Searching, Aggregation and Comparison}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {15}, year = {2009}, month = {2009/12//Nov}, pages = {1049 - 1056}, abstract = {When analyzing thousands of event histories, analysts often want to see the events as an aggregate to detect insights and generate new hypotheses about the data. An analysis tool must emphasize both the prevalence and the temporal ordering of these events. Additionally, the analysis tool must also support flexible comparisons to allow analysts to gather visual evidence. In a previous work, we introduced align, rank, and filter (ARF) to accentuate temporal ordering. In this paper, we present temporal summaries, an interactive visualization technique that highlights the prevalence of event occurrences. Temporal summaries dynamically aggregate events in multiple granularities (year, month, week, day, hour, etc.) for the purpose of spotting trends over time and comparing several groups of records. They provide affordances for analysts to perform temporal range filters. We demonstrate the applicability of this approach in two extensive case studies with analysts who applied temporal summaries to search, filter, and look for patterns in electronic health records and academic records.}, keywords = {Aggregates, Collaborative work, Computational Biology, Computer Graphics, Data analysis, data visualisation, Data visualization, Databases, Factual, Displays, Event detection, Filters, Heparin, History, Human computer interaction, Human-computer interaction, HUMANS, Information Visualization, Interaction design, interactive visualization technique, Medical Records Systems, Computerized, Pattern Recognition, Automated, Performance analysis, Springs, temporal categorical data visualization, temporal categorical searching, temporal ordering, temporal summaries, Thrombocytopenia, Time factors}, isbn = {1077-2626}, doi = {10.1109/TVCG.2009.187}, author = {Wang,T. D and Plaisant, Catherine and Shneiderman, Ben and Spring, Neil and Roseman,D. and Marchand,G. and Mukherjee,V. and Smith,M.} } @article {16056, title = {Temporal Summaries: Supporting Temporal Categorical Searching, Aggregation and Comparison}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {15}, year = {2009}, month = {2009/12//nov}, pages = {1049 - 1056}, abstract = {When analyzing thousands of event histories, analysts often want to see the events as an aggregate to detect insights and generate new hypotheses about the data. An analysis tool must emphasize both the prevalence and the temporal ordering of these events. Additionally, the analysis tool must also support flexible comparisons to allow analysts to gather visual evidence. In a previous work, we introduced align, rank, and filter (ARF) to accentuate temporal ordering. In this paper, we present temporal summaries, an interactive visualization technique that highlights the prevalence of event occurrences. Temporal summaries dynamically aggregate events in multiple granularities (year, month, week, day, hour, etc.) for the purpose of spotting trends over time and comparing several groups of records. They provide affordances for analysts to perform temporal range filters. We demonstrate the applicability of this approach in two extensive case studies with analysts who applied temporal summaries to search, filter, and look for patterns in electronic health records and academic records.}, keywords = {Automated;Thrombocytopenia;Time Factors;, Computerized;Pattern Recognition, Factual;Heparin;Humans;Medical Records Systems, interactive visualization technique;temporal categorical searching;temporal ordering;temporal summaries;data visualisation;human computer interaction;Computational Biology;Computer Graphics;Databases}, isbn = {1077-2626}, doi = {10.1109/TVCG.2009.187}, author = {Wang,T. D and Plaisant, Catherine and Shneiderman, Ben and Spring, Neil and Roseman,D. and Marchand,G. and Mukherjee,V. and Smith,M.} } @article {19726, title = {Three genomes from the phylum Acidobacteria provide insight into the lifestyles of these microorganisms in soils.}, journal = {Appl Environ Microbiol}, volume = {75}, year = {2009}, month = {2009 Apr}, pages = {2046-56}, abstract = {

The complete genomes of three strains from the phylum Acidobacteria were compared. Phylogenetic analysis placed them as a unique phylum. They share genomic traits with members of the Proteobacteria, the Cyanobacteria, and the Fungi. The three strains appear to be versatile heterotrophs. Genomic and culture traits indicate the use of carbon sources that span simple sugars to more complex substrates such as hemicellulose, cellulose, and chitin. The genomes encode low-specificity major facilitator superfamily transporters and high-affinity ABC transporters for sugars, suggesting that they are best suited to low-nutrient conditions. They appear capable of nitrate and nitrite reduction but not N(2) fixation or denitrification. The genomes contained numerous genes that encode siderophore receptors, but no evidence of siderophore production was found, suggesting that they may obtain iron via interaction with other microorganisms. The presence of cellulose synthesis genes and a large class of novel high-molecular-weight excreted proteins suggests potential traits for desiccation resistance, biofilm formation, and/or contribution to soil structure. Polyketide synthase and macrolide glycosylation genes suggest the production of novel antimicrobial compounds. Genes that encode a variety of novel proteins were also identified. The abundance of acidobacteria in soils worldwide and the breadth of potential carbon use by the sequenced strains suggest significant and previously unrecognized contributions to the terrestrial carbon cycle. Combining our genomic evidence with available culture traits, we postulate that cells of these isolates are long-lived, divide slowly, exhibit slow metabolic rates under low-nutrient conditions, and are well equipped to tolerate fluctuations in soil hydration.

}, keywords = {Anti-Bacterial Agents, bacteria, Biological Transport, Carbohydrate Metabolism, Cyanobacteria, DNA, Bacterial, Fungi, Genome, Bacterial, Macrolides, Molecular Sequence Data, Nitrogen, Phylogeny, Proteobacteria, Sequence Analysis, DNA, sequence homology, Soil Microbiology}, issn = {1098-5336}, doi = {10.1128/AEM.02294-08}, author = {Ward, Naomi L and Challacombe, Jean F and Janssen, Peter H and Henrissat, Bernard and Coutinho, Pedro M and Wu, Martin and Xie, Gary and Haft, Daniel H and Sait, Michelle and Badger, Jonathan and Barabote, Ravi D and Bradley, Brent and Brettin, Thomas S and Brinkac, Lauren M and Bruce, David and Creasy, Todd and Daugherty, Sean C and Davidsen, Tanja M and DeBoy, Robert T and Detter, J Chris and Dodson, Robert J and Durkin, A Scott and Ganapathy, Anuradha and Gwinn-Giglio, Michelle and Han, Cliff S and Khouri, Hoda and Kiss, Hajnalka and Kothari, Sagar P and Madupu, Ramana and Nelson, Karen E and Nelson, William C and Paulsen, Ian and Penn, Kevin and Ren, Qinghu and Rosovitz, M J and Jeremy D Selengut and Shrivastava, Susmita and Sullivan, Steven A and Tapia, Roxanne and Thompson, L Sue and Watkins, Kisha L and Yang, Qi and Yu, Chunhui and Zafar, Nikhat and Zhou, Liwei and Kuske, Cheryl R} } @article {13026, title = {Toward reconstructing the evolution of advanced moths and butterflies (Lepidoptera: Ditrysia): an initial molecular study}, journal = {BMC Evol Biol}, volume = {9}, year = {2009}, month = {2009///}, pages = {280 - 280}, abstract = {BACKGROUND: In the mega-diverse insect order Lepidoptera (butterflies and moths; 165,000 described species), deeper relationships are little understood within the clade Ditrysia, to which 98\% of the species belong. To begin addressing this problem, we tested the ability of five protein-coding nuclear genes (6.7 kb total), and character subsets therein, to resolve relationships among 123 species representing 27 (of 33) superfamilies and 55 (of 100) families of Ditrysia under maximum likelihood analysis. RESULTS: Our trees show broad concordance with previous morphological hypotheses of ditrysian phylogeny, although most relationships among superfamilies are weakly supported. There are also notable surprises, such as a consistently closer relationship of Pyraloidea than of butterflies to most Macrolepidoptera. Monophyly is significantly rejected by one or more character sets for the putative clades Macrolepidoptera as currently defined (P < 0.05) and Macrolepidoptera excluding Noctuoidea and Bombycoidea sensu lato (P < or = 0.005), and nearly so for the superfamily Drepanoidea as currently defined (P < 0.08). Superfamilies are typically recovered or nearly so, but usually without strong support. Relationships within superfamilies and families, however, are often robustly resolved. We provide some of the first strong molecular evidence on deeper splits within Pyraloidea, Tortricoidea, Geometroidea, Noctuoidea and others.Separate analyses of mostly synonymous versus non-synonymous character sets revealed notable differences (though not strong conflict), including a marked influence of compositional heterogeneity on apparent signal in the third codon position (nt3). As available model partitioning methods cannot correct for this variation, we assessed overall phylogeny resolution through separate examination of trees from each character set. Exploration of "tree space" with GARLI, using grid computing, showed that hundreds of searches are typically needed to find the best-feasible phylogeny estimate for these data. CONCLUSION: Our results (a) corroborate the broad outlines of the current working phylogenetic hypothesis for Ditrysia, (b) demonstrate that some prominent features of that hypothesis, including the position of the butterflies, need revision, and (c) resolve the majority of family and subfamily relationships within superfamilies as thus far sampled. Much further gene and taxon sampling will be needed, however, to strongly resolve individual deeper nodes.}, doi = {10.1186/1471-2148-9-280}, author = {Regier,J. C and Zwick,A. and Cummings, Michael P. and Kawahara,A. Y and Cho,S. and Weller,S. and Roe,A. and Baixeras,J. and Brown,J. W and Parr,C. and Davis,DR and Epstein,M and Hallwachs,W and Hausmann,A and Janzen,DH and Kitching,IJ and Solis,MA and Yen,S-H and Bazinet,A. L and Mitter,C} } @article {14301, title = {Triaging Checklists: a Substitute for a PhD in Static Analysis}, journal = {Evaluation and Usability of Programming Languages and Tools (PLATEAU) PLATEAU 2009}, year = {2009}, month = {2009///}, abstract = {Static analysis tools have achieved great success in recentyears in automating the process of detecting defects in soft- ware. However, these sophisticated tools have yet to gain widespread adoption, since many of these tools remain too difficult to understand and use. In previous work, we dis- covered that even with an effective code visualization tool, users still found it hard to determine if warnings reported by these tools were true errors or false warnings. The fun- damental problem users face is to understand enough of the underlying algorithm to determine if a warning is caused by imprecision in the algorithm, a challenge that even experts with PhDs may take a while to achieve. In our current work, we propose to use triaging checklists to provide users with systematic guidance to identify false warnings by taking into account specific sources of imprecision in the particular tool. Additionally, we plan to provide checklist assistants, which is a library of simple analyses designed to aid users in an- swering checklist questions. }, author = {Phang,K.Y. and Foster, Jeffrey S. and Hicks, Michael W. and Sazawal,V.} } @article {16292, title = {Ultrafast and memory-efficient alignment of short DNA sequences to the human genome}, journal = {Genome Biology}, volume = {10}, year = {2009}, month = {2009/03/04/}, pages = {R25 - R25}, abstract = {Bowtie is an ultrafast, memory-efficient alignment program for aligning short DNA sequence reads to large genomes. For the human genome, Burrows-Wheeler indexing allows Bowtie to align more than 25 million reads per CPU hour with a memory footprint of approximately 1.3 gigabytes. Bowtie extends previous Burrows-Wheeler techniques with a novel quality-aware backtracking algorithm that permits mismatches. Multiple processor cores can be used simultaneously to achieve even greater alignment speeds. Bowtie is open source http://bowtie.cbcb.umd.edu.}, isbn = {1465-6906}, doi = {10.1186/gb-2009-10-3-r25}, url = {http://genomebiology.com/2009/10/3/R25}, author = {Langmead,Ben and Trapnell,Cole and Pop, Mihai and Salzberg,Steven L.} } @article {17549, title = {A unified approach to scheduling on unrelated parallel machines}, journal = {J. ACM}, volume = {56}, year = {2009}, month = {2009/08//}, pages = {28:1{\textendash}28:31 - 28:1{\textendash}28:31}, abstract = {We develop a single rounding algorithm for scheduling on unrelated parallel machines; this algorithm works well with the known linear programming-, quadratic programming-, and convex programming-relaxations for scheduling to minimize completion time, makespan, and other well-studied objective functions. This algorithm leads to the following applications for the general setting of unrelated parallel machines: (i) a bicriteria algorithm for a schedule whose weighted completion-time and makespan simultaneously exhibit the current-best individual approximations for these criteria; (ii) better-than-two approximation guarantees for scheduling to minimize the Lp norm of the vector of machine-loads, for all 1 < p < $\infty$; and (iii) the first constant-factor multicriteria approximation algorithms that can handle the weighted completion-time and any given collection of integer Lp norms. Our algorithm has a natural interpretation as a melding of linear-algebraic and probabilistic approaches. Via this view, it yields a common generalization of rounding theorems due to Karp et al. [1987] and Shmoys \& Tardos [1993], and leads to improved approximation algorithms for the problem of scheduling with resource-dependent processing times introduced by Grigoriev et al. [2007].}, keywords = {Approximation algorithms, Randomized rounding, scheduling under multiple criteria}, isbn = {0004-5411}, doi = {10.1145/1552285.1552289}, url = {http://doi.acm.org/10.1145/1552285.1552289}, author = {Kumar,V. S. Anil and Marathe,Madhav V. and Parthasarathy,Srinivasan and Srinivasan, Aravind} } @conference {16058, title = {VAST 2009 challenge: An insider threat}, booktitle = {Visual Analytics Science and Technology, 2009. VAST 2009. IEEE Symposium on}, year = {2009}, month = {2009/10//}, pages = {243 - 244}, abstract = {The 4th VAST Challenge centered on a cyber analytics scenario and offered three mini-challenges with datasets of badge and network traffic data, a social network including geospatial information, and security video. Teams could also enter the Grand challenge which combined all three datasets. In this paper, we summarize the dataset, the overall scenario and the questions asked in the challenges. We describe the judging process and new infrastructure developed to manage the submissions and compute accuracy measures in the social network mini challenge. We received 49 entries from 30 teams, and gave 23 different awards to a total of 16 teams.}, keywords = {2009, analytics;data, analytics;geospatial, challenge;cyber, data;security, information, information;human, interaction;network, interfaces;, network;visual, traffic, user, VAST, video;social, visualisation;graphical}, doi = {10.1109/VAST.2009.5334454}, author = {Grinstein,G. and Scholtz,J. and Whiting,M. and Plaisant, Catherine} } @article {13112, title = {Video Compression and Retrieval of Moving Object Location Applied to Surveillance}, journal = {Image Analysis and Recognition}, year = {2009}, month = {2009///}, pages = {906 - 916}, abstract = {A major problem in surveillance systems is the storage requirements for video archival; videos are recorded continuously for long periods of time, resulting in large amounts of data. Therefore, it is essential to apply efficient compression techniques. Additionally, it is useful to be able to index the archived videos based on events. In general, such events are defined by the interaction among moving objects in the scene. Consequently, besides data compression, efficient ways of storing moving objects should be considered. We present a method that exploits both temporal and spatial redundancy of videos captured from static cameras to perform compression and subsequently allows fast retrieval of moving object locations directly from the compressed data. Experimental results show that the approach achieves high compression ratios compared to other existing video compression techniques without significant quality degradation and is fast due to the simplicity of the operations required for compression and decompression.}, author = {Schwartz,W. and Pedrini,H. and Davis, Larry S.} } @article {16060, title = {Visual-Analytics Evaluation}, journal = {Computer Graphics and Applications, IEEE}, volume = {29}, year = {2009}, month = {2009/06//may}, pages = {16 - 17}, abstract = {Visual analytics (VA) is the science of analytical reasoning facilitated by interactive visual interfaces. Assessing VA technology{\textquoteright}s effectiveness is challenging because VA tools combine several disparate components, both low and high level, integrated in complex interactive systems used by analysts, emergency responders, and others. These components include analytical reasoning, visual representations, computer-human interaction techniques, data representations and transformations, collaboration tools, and especially tools for communicating the results of their use. VA tool users{\textquoteright} activities can be exploratory and can take place over days, weeks, or months. Users might not follow a predefined or even linear work flow. They might work alone or in groups. To understand these complex behaviors, an evaluation can target the component level, the system level, or the work environment level, and requires realistic data and tasks. Traditional evaluation metrics such as task completion time, number of errors, or recall and precision are insufficient to quantify the utility of VA tools, and new research is needed to improve our VA evaluation methodology.}, keywords = {analytic, analytics, evaluation;cognition;data, generation;user, interactive, interfaces;synthetic-data-set, reasoning;visual, systems;, tools;visual, visual, visualisation;interactive}, isbn = {0272-1716}, doi = {10.1109/MCG.2009.56}, author = {Plaisant, Catherine and Grinstein,G. and Scholtz,J.} } @article {15998, title = {What puts the {\textquotedblleft}meta{\textquotedblright} in metacognition?}, journal = {Behavioral and Brain Sciences}, volume = {32}, year = {2009}, month = {2009///}, pages = {138 - 139}, author = {Anderson,M. L and Perlis, Don} } @conference {16057, title = {What{\textquoteright}s being said near "Martha"? Exploring name entities in literary text collections}, booktitle = {Visual Analytics Science and Technology, 2009. VAST 2009. IEEE Symposium on}, year = {2009}, month = {2009/10//}, pages = {107 - 114}, abstract = {A common task in literary analysis is to study characters in a novel or collection. Automatic entity extraction, text analysis and effective user interfaces facilitate character analysis. Using our interface, called POSvis, the scholar uses word clouds and self-organizing graphs to review vocabulary, to filter by part of speech, and to explore the network of characters located near characters under review. Further, visualizations show word usages within an analysis window (i.e. a book chapter), which can be compared with a reference window (i.e. the whole book). We describe the interface and report on an early case study with a humanities scholar.}, keywords = {analysis;humanities, analysis;literary, analysis;user, clouds;word, collection;name, entity, entity;part-of-speech, extraction;character, filtering;linguistics;text, filtering;self-organizing, graph;text, interface;vocabulary;word, interfaces;vocabulary;, POSvis;automatic, scholar;literary, text, usage;data, visualisation;information}, doi = {10.1109/VAST.2009.5333248}, author = {Vuillemot,R. and Clement,T. and Plaisant, Catherine and Kumar,A.} } @conference {18926, title = {Within epsilon of optimal play in the cultaptation social learning game}, series = {AAMAS {\textquoteright}09}, year = {2009}, month = {2009///}, pages = {1327 - 1328}, publisher = {International Foundation for Autonomous Agents and Multiagent Systems}, organization = {International Foundation for Autonomous Agents and Multiagent Systems}, address = {Richland, SC}, abstract = {Social learning, in which members of a society learn by observing the behavior of others, is an important foundation for human culture, and is observed in many other species as well. It seems natural to assume that social learning evolved due to the inherent superiority of copying others{\textquoteright} success rather than learning on one{\textquoteright}s own via trial-and-error innovation. However, there has also been substantial work questioning this intuition [3, 5, 1, 6, 4]. For example, blindly copying information from others is not useful if the information is wrong---or if it once was right but has since become outdated. Under what conditions does social learning outperform trial-and-error learning, and what kinds of social-learning strategies are likely to perform well?}, isbn = {978-0-9817381-7-8}, url = {http://dl.acm.org/citation.cfm?id=1558109.1558276}, author = {Carr,Ryan and Raboin,Eric and Parker,Austin and Nau, Dana S.} } @article {13874, title = {The acl anthology reference corpus: A reference dataset for bibliographic research in computational linguistics}, journal = {Proc. of the 6th International Conference on Language Resources and Evaluation Conference (LREC{\textquoteright}08)}, year = {2008}, month = {2008///}, pages = {1755 - 1759}, abstract = {The ACL Anthology is a digital archive of conference and journal papers in natural language processing and computational linguistics.Its primary purpose is to serve as a reference repository of research results, but we believe that it can also be an object of study and a platform for research in its own right. We describe an enriched and standardized reference corpus derived from the ACL Anthology that can be used for research in scholarly document processing. This corpus, which we call the ACL Anthology Reference Corpus (ACL ARC), brings together the recent activities of a number of research groups around the world. Our goal is to make the corpus widely available, and to encourage other researchers to use it as a standard testbed for experiments in both bibliographic and bibliometric research. }, author = {Bird,S. and Dale,R. and Dorr, Bonnie J and Gibson,B. and Joseph,M.T. and Kan,M.Y. and Lee,D. and Powley,B. and Radev,D.R. and Tan,Y.F.} } @article {15954, title = {Active logic semantics for a single agent in a static world}, journal = {Artificial Intelligence}, volume = {172}, year = {2008}, month = {2008/05//}, pages = {1045 - 1063}, abstract = {For some time we have been developing, and have had significant practical success with, a time-sensitive, contradiction-tolerant logical reasoning engine called the active logic machine (ALMA). The current paper details a semantics for a general version of the underlying logical formalism, active logic. Central to active logic are special rules controlling the inheritance of beliefs in general (and of beliefs about the current time in particular), very tight controls on what can be derived from direct contradictions (P\&{\textlnot}P), and mechanisms allowing an agent to represent and reason about its own beliefs and past reasoning. Furthermore, inspired by the notion that until an agent notices that a set of beliefs is contradictory, that set seems consistent (and the agent therefore reasons with it as if it were consistent), we introduce an "apperception function" that represents an agent{\textquoteright}s limited awareness of its own beliefs, and serves to modify inconsistent belief sets so as to yield consistent sets. Using these ideas, we introduce a new definition of logical consequence in the context of active logic, as well as a new definition of soundness such that, when reasoning with consistent premises, all classically sound rules remain sound in our new sense. However, not everything that is classically sound remains sound in our sense, for by classical definitions, all rules with contradictory premises are vacuously sound, whereas in active logic not everything follows from a contradiction.}, keywords = {Active logic, Autonomous agents, Brittleness, Logic, Nonmonotonic logic, Paraconsistent logic, semantics, Soundness, TIME}, isbn = {0004-3702}, doi = {16/j.artint.2007.11.005}, url = {http://www.sciencedirect.com/science/article/pii/S0004370207001993}, author = {Anderson,Michael L. and Gomaa,Walid and Grant,John and Perlis, Don} } @article {15835, title = {Advances in multilingual and multimodal information retrieval}, journal = {Lecture Notes in Computer Science}, volume = {5152}, year = {2008}, month = {2008///}, author = {Peters,C. and Jijkoun,V. and Mandl,T. and M{\"u}ller,H. and Oard, Douglas and Pe{\~n}as,A. and Santos,D.} } @conference {17763, title = {An AGM-based belief revision mechanism for probabilistic spatio-temporal logics}, booktitle = {Proceedings of the 23rd national conference on Artificial intelligence}, year = {2008}, month = {2008///}, pages = {511 - 516}, author = {Parker,A. and Infantes,G. and V.S. Subrahmanian and Grant,J.} } @article {19608, title = {Algorithms for computing a parameterized st-orientation}, journal = {Theoretical Computer Science}, volume = {408}, year = {2008}, month = {2008/11/28/}, pages = {224 - 240}, abstract = {s t -orientations ( s t -numberings) or bipolar orientations of undirected graphs are central to many graph algorithms and applications. Several algorithms have been proposed in the past to compute an s t -orientation of a biconnected graph. In this paper, we present new algorithms that compute such orientations with certain (parameterized) characteristics in the final s t -oriented graph, such as the length of the longest path. This work has many applications, including Graph Drawing and Network Routing, where the length of the longest path is vital in deciding certain features of the final solution. This work applies to other difficult problems as well, such as graph coloring and of course longest path. We present extended theoretical and experimental results which show that our technique is efficient and performs well in practice.}, keywords = {Graph algorithms, Longest path, Planar graphs, s t -numberings}, isbn = {0304-3975}, url = {http://www.sciencedirect.com/science/article/pii/S0304397508005653}, author = {Charalampos Papamanthou and Tollis, Ioannis G.} } @article {18782, title = {Algorithms for Generating Adaptive Projection Patterns for 3D Shape Measurement}, journal = {Journal of Computing and Information Science in Engineering}, volume = {8}, year = {2008}, month = {2008///}, pages = {031009 - 031009}, author = {Peng,T. and Gupta,S.K.} } @inbook {19602, title = {Algorithms for Location Estimation Based on RSSI Sampling}, booktitle = {Algorithmic Aspects of Wireless Sensor Networks}, series = {Lecture Notes in Computer Science}, year = {2008}, month = {2008/01/01/}, pages = {72 - 86}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {In this paper, we re-examine the RSSI measurement model for location estimation and provide the first detailed formulation of the probability distribution of the position of a sensor node. We also show how to use this probabilistic model to efficiently compute a good estimation of the position of the sensor node by sampling multiple readings from the beacons (where we do not merely use the mean of the samples) and then minimizing a function with an acceptable computational effort. The results of the simulation of our method in TOSSIM indicate that the location of the sensor node can be computed in a small amount of time and that the quality of the solution is competitive with previous approaches.}, keywords = {Algorithm Analysis and Problem Complexity, Computer Communication Networks, Data structures, Discrete Mathematics in Computer Science, Information Systems and Communication Service}, isbn = {978-3-540-92861-4, 978-3-540-92862-1}, url = {http://link.springer.com/chapter/10.1007/978-3-540-92862-1_7}, author = {Charalampos Papamanthou and Preparata, Franco P. and Tamassia, Roberto}, editor = {Fekete, S{\'a}ndor P.} } @conference {16069, title = {Aligning temporal data by sentinel events: discovering patterns in electronic health records}, booktitle = {Proceedings of the twenty-sixth annual SIGCHI conference on Human factors in computing systems}, series = {CHI {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {457 - 466}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Electronic Health Records (EHRs) and other temporal databases contain hidden patterns that reveal important cause-and-effect phenomena. Finding these patterns is a challenge when using traditional query languages and tabular displays. We present an interactive visual tool that complements query formulation by providing operations to align, rank and filter the results, and to visualize estimates of the intervals of validity of the data. Display of patient histories aligned on sentinel events (such as a first heart attack) enables users to spot precursor, co-occurring, and aftereffect events. A controlled study demonstrates the benefits of providing alignment (with a 61\% speed improvement for complex tasks). A qualitative study and interviews with medical professionals demonstrates that the interface can be learned quickly and seems to address their needs.}, keywords = {electronic health record, Evaluation, Information Visualization, search, temporal data, Uncertainty}, isbn = {978-1-60558-011-1}, doi = {10.1145/1357054.1357129}, url = {http://doi.acm.org/10.1145/1357054.1357129}, author = {Wang,Taowei David and Plaisant, Catherine and Quinn,Alexander J. and Stanchak,Roman and Murphy,Shawn and Shneiderman, Ben} } @conference {17559, title = {Approximation Algorithms for Computing Capacity of Wireless Networks with SINR Constraints}, booktitle = {IEEE INFOCOM 2008. The 27th Conference on Computer Communications}, year = {2008}, month = {2008/04/13/18}, pages = {1166 - 1174}, publisher = {IEEE}, organization = {IEEE}, abstract = {A fundamental problem in wireless networks is to estimate its throughput capacity - given a set of wireless nodes, and a set of connections, what is the maximum rate at which data can be sent on these connections. Most of the research in this direction has focused on either random distributions of points, or has assumed simple graph-based models for wireless interference. In this paper, we study capacity estimation problem using the more general Signal to Interference Plus Noise Ratio (SINR) model for interference, on arbitrary wireless networks. The problem becomes much harder in this setting, because of the non-locality of the SINR model. Recent work by Moscibroda et al. (2006) has shown that the throughput in this model can differ from graph based models significantly. We develop polynomial time algorithms to provably approximate the total throughput in this setting.}, keywords = {Algorithm design and analysis, approximation algorithm, Approximation algorithms, approximation theory, Computer networks, Computer science, graph theory, graph-based model, Interference constraints, polynomial time algorithm, Propagation losses, Protocols, radio networks, radiofrequency interference, signal to interference plus noise ratio, Signal to noise ratio, Throughput, wireless interference, wireless network, Wireless networks}, isbn = {978-1-4244-2025-4}, doi = {10.1109/INFOCOM.2008.172}, author = {Chafekar,D. and Kumart,V. S.A and Marathe,M. V and Parthasarathy,S. and Srinivasan, Aravind} } @article {19619, title = {Athos: Efficient Authentication of Outsourced File Systems: Information Security}, journal = {Lecture Notes in Computer Science}, year = {2008}, month = {2008///}, pages = {80 - 96}, isbn = {03029743}, author = {Triandopoulos, Nikolaos and Goodrich, Michael T. and Charalampos Papamanthou and Tamassia, Roberto} } @conference {19598, title = {Authenticated Hash Tables}, booktitle = {CCS {\textquoteright}08 Proceedings of the 15th ACM Conference on Computer and Communications Security }, series = {CCS {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {437 - 448}, publisher = {ACM}, organization = {ACM}, abstract = {Hash tables are fundamental data structures that optimally answer membership queries. Suppose a client stores n elements in a hash table that is outsourced at a remote server so that the client can save space or achieve load balancing. Authenticating the hash table functionality, i.e., verifying the correctness of queries answered by the server and ensuring the integrity of the stored data, is crucial because the server, lying outside the administrative control of the client, can be malicious. We design efficient and secure protocols for optimally authenticating membership queries on hash tables: for any fixed constants 0 < ε < 1 and κ > 1/ε, the server can provide a proof of integrity of the answer to a (non-)membership query in constant time, requiring O(nε/logκε--1 n) time to treat updates, yet keeping the communication and verification costs constant. This is the first construction for authenticating a hash table with constant query cost and sublinear update cost. Our solution employs the RSA accumulator in a nested way over the stored data, strictly improving upon previous accumulator-based solutions. Our construction applies to two concrete data authentication models and lends itself to a scheme that achieves different trade-offs---namely, constant update time and O(nε/logκε n) query time for fixed ε > 0 and κ > 0. An experimental evaluation of our solution shows very good scalability.}, keywords = {Authentication, hash tables, rsa accumulator, verification}, isbn = {978-1-59593-810-7}, url = {http://doi.acm.org/10.1145/1455770.1455826}, author = {Charalampos Papamanthou and Tamassia, Roberto and Triandopoulos, Nikos} } @conference {16073, title = {BELIV{\textquoteright}08: Beyond time and errors: novel evaluation methods for information visualization}, booktitle = {CHI {\textquoteright}08 extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {3913 - 3916}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Information visualization systems allow users to produce insights, innovations and discoveries. Evaluating such tools is a challenging task and the goal of BELIV{\textquoteright}08 is to make a step ahead in the comprehension of such a complex activity. Current evaluation methods exhibit noticeable limitations and researchers in the area experiment some frustration with evaluation processes that are time consuming and too often leading to unsatisfactory results. The most used evaluation metrics such as task time completion and number of errors appear insufficient to quantify the quality of an information visualization system; thus the name of the workshop: "beyond time and errors".}, keywords = {Evaluation, Information Visualization}, isbn = {978-1-60558-012-8}, doi = {10.1145/1358628.1358955}, url = {http://doi.acm.org/10.1145/1358628.1358955}, author = {Bertini,Enrico and Perer,Adam and Plaisant, Catherine and Santucci,Giuseppe} } @article {16240, title = {Bioinformatics challenges of new sequencing technology}, journal = {Trends in Genetics}, volume = {24}, year = {2008}, month = {2008/03//}, pages = {142 - 149}, abstract = {New DNA sequencing technologies can sequence up to one billion bases in a single day at low cost, putting large-scale sequencing within the reach of many scientists. Many researchers are forging ahead with projects to sequence a range of species using the new technologies. However, these new technologies produce read lengths as short as 35{\^a}{\texteuro}{\textquotedblleft}40 nucleotides, posing challenges for genome assembly and annotation. Here we review the challenges and describe some of the bioinformatics systems that are being proposed to solve them. We specifically address issues arising from using these technologies in assembly projects, both de novo and for resequencing purposes, as well as efforts to improve genome annotation in the fragmented assemblies produced by short read lengths.}, isbn = {0168-9525}, doi = {10.1016/j.tig.2007.12.006}, url = {http://www.sciencedirect.com/science/article/pii/S016895250800022X}, author = {Pop, Mihai and Salzberg,Steven L.} } @conference {17570, title = {Capacity of Asynchronous Random-Access Scheduling in Wireless Networks}, booktitle = {IEEE INFOCOM 2008. The 27th Conference on Computer Communications}, year = {2008}, month = {2008/04/13/18}, pages = {1148 - 1156}, publisher = {IEEE}, organization = {IEEE}, abstract = {We study the throughput capacity of wireless networks which employ (asynchronous) random-access scheduling as opposed to deterministic scheduling. The central question we answer is: how should we set the channel-access probability for each link in the network so that the network operates close to its optimal throughput capacity? We design simple and distributed channel-access strategies for random-access networks which are provably competitive with respect to the optimal scheduling strategy, which is deterministic, centralized, and computationally infeasible. We show that the competitiveness of our strategies are nearly the best achievable via random-access scheduling, thus establishing fundamental limits on the performance of random- access. A notable outcome of our work is that random access compares well with deterministic scheduling when link transmission durations differ by small factors, and much worse otherwise. The distinguishing aspects of our work include modeling and rigorous analysis of asynchronous communication, asymmetry in link transmission durations, and hidden terminals under arbitrary link-conflict based wireless interference models.}, keywords = {asynchronous random-access scheduling, channel access probability, Computer networks, Computer science, Educational institutions, Interference, Optimal scheduling, Peer to peer computing, probability, Processor scheduling, radio link, radio links, radio networks, Routing, scheduling, Throughput, throughput capacity, wireless channels, Wireless networks}, isbn = {978-1-4244-2025-4}, doi = {10.1109/INFOCOM.2008.170}, author = {Chafekar,D. and Levin,D. and Kumar,V. S.A and Marathe,M. V and Parthasarathy,S. and Srinivasan, Aravind} } @conference {16329, title = {CiCUTS: Combining System Execution Modeling Tools with Continuous Integration Environments}, booktitle = {Engineering of Computer-Based Systems, IEEE International Conference on the}, year = {2008}, month = {2008///}, pages = {66 - 75}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {System execution modeling (SEM) tools provide an effective means to evaluate the quality of service (QoS) of enterprise distributed real-time and embedded (DRE) systems. SEM tools facilitate testing and resolving performance issues throughout the entire development life-cycle, rather than waiting until final system integration. SEM tools have not historically focused on effective testing. New techniques are therefore needed to help bridge the gap between the early integration capabilities of SEM tools and testing so developers can focus on resolving strategic integration and performance issues, as opposed to wrestling with tedious and error-prone low-level testing concerns. This paper provides two contributions to research on using SEM tools to address enterprise DRE system integration challenges. First, we evaluate several approaches for combining continuous integration environments with SEM tools and describe CiCUTS, which combines the CUTS SEM tool with the CruiseControl .NET continuous integration environment. Second, we present a case study that shows how CiCUTS helps reduce the time and effort required to manage and execute integration tests that evaluate QoS metrics for a representative DRE system from the domain of shipboard computing. The results of our case study show that CiCUTS helps developers and testers ensure the performance of an example enterprise DRE system is within its QoS specifications throughout development, instead of waiting until system integration time to evaluate QoS.}, keywords = {continuous integration, integration testing, serialized phasing, system execution modeling}, isbn = {978-0-7695-3141-0}, doi = {http://doi.ieeecomputersociety.org/10.1109/ECBS.2008.20}, author = {Hill,James H. and Schmidt,Douglas C. and Porter, Adam and Slaby,John M.} } @article {17021, title = {Community response grid (CRG) for a university campus: Design requirements and implications}, journal = {Proceedings of the 5th International ISCRAM Conference}, year = {2008}, month = {2008///}, pages = {34 - 43}, abstract = {This paper describes the initial stages of the participatory design of a community-oriented emergency responsesystem for a university campus. After reviewing related work and the current University emergency response system, this paper describes our participatory design process, discusses initial findings from a design requirement survey and from our interactions with different stakeholders, and proposes a Web interface design for a community response grid system. The prototyping of the system demonstrates the possibility of fostering a social-network-based community participation in emergency response, and also identifies concerns raised by potential users and by the professional responder community. }, author = {Wu,P.F. and Qu,Y. and Preece,J. and Fleischmann,K. and Golbeck,J. and Jaeger,P. and Shneiderman, Ben} } @article {12898, title = {The Complete Genome Sequence of Thermococcus Onnurineus NA1 Reveals a Mixed Heterotrophic and Carboxydotrophic Metabolism}, journal = {Journal of BacteriologyJ. Bacteriol.}, volume = {190}, year = {2008}, month = {2008/11/15/}, pages = {7491 - 7499}, abstract = {Members of the genus Thermococcus, sulfur-reducing hyperthermophilic archaea, are ubiquitously present in various deep-sea hydrothermal vent systems and are considered to play a significant role in the microbial consortia. We present the complete genome sequence and feature analysis of Thermococcus onnurineus NA1 isolated from a deep-sea hydrothermal vent area, which reveal clues to its physiology. Based on results of genomic analysis, T. onnurineus NA1 possesses the metabolic pathways for organotrophic growth on peptides, amino acids, or sugars. More interesting was the discovery that the genome encoded unique proteins that are involved in carboxydotrophy to generate energy by oxidation of CO to CO2, thereby providing a mechanistic basis for growth with CO as a substrate. This lithotrophic feature in combination with carbon fixation via RuBisCO (ribulose 1,5-bisphosphate carboxylase/oxygenase) introduces a new strategy with a complementing energy supply for T. onnurineus NA1 potentially allowing it to cope with nutrient stress in the surrounding of hydrothermal vents, providing the first genomic evidence for the carboxydotrophy in Thermococcus.}, isbn = {0021-9193, 1098-5530}, doi = {10.1128/JB.00746-08}, url = {http://jb.asm.org/content/190/22/7491}, author = {Lee,Hyun Sook and Kang,Sung Gyun and Bae,Seung Seob and Lim,Jae Kyu and Cho,Yona and Kim,Yun Jae and Jeon,Jeong Ho and Cha,Sun-Shin and Kwon,Kae Kyoung and Kim,Hyung-Tae and Park,Cheol-Joo and Lee,Hee-Wook and Kim,Seung Il and Jongsik Chun and Rita R Colwell and Kim,Sang-Jin and Lee,Jung-Hyun} } @article {12549, title = {A Constrained Probabilistic Petri Net Framework for Human Activity Detection in Video}, journal = {Multimedia, IEEE Transactions on}, volume = {10}, year = {2008}, month = {2008/10//}, pages = {982 - 996}, abstract = {Recognition of human activities in restricted settings such as airports, parking lots and banks is of significant interest in security and automated surveillance systems. In such settings, data is usually in the form of surveillance videos with wide variation in quality and granularity. Interpretation and identification of human activities requires an activity model that a) is rich enough to handle complex multi-agent interactions, b) is robust to uncertainty in low-level processing and c) can handle ambiguities in the unfolding of activities. We present a computational framework for human activity representation based on Petri nets. We propose an extension-Probabilistic Petri Nets (PPN)-and show how this model is well suited to address each of the above requirements in a wide variety of settings. We then focus on answering two types of questions: (i) what are the minimal sub-videos in which a given activity is identified with a probability above a certain threshold and (ii) for a given video, which activity from a given set occurred with the highest probability? We provide the PPN-MPS algorithm for the first problem, as well as two different algorithms (naive PPN-MPA and PPN-MPA) to solve the second. Our experimental results on a dataset consisting of bank surveillance videos and an unconstrained TSA tarmac surveillance dataset show that our algorithms are both fast and provide high quality results.}, keywords = {activity, dataset;automated, detection;human, interactions;security, net;human, nets;image, Petri, probabilistic, processing;multiagent, processing;video, representation;low-level, representation;video, signal, Surveillance, surveillance;, systems;constrained, systems;surveillance, tarmac, TSA, videos;Petri}, isbn = {1520-9210}, doi = {10.1109/TMM.2008.2001369}, author = {Albanese, M. and Chellapa, Rama and Moscato, V. and Picariello, A. and V.S. Subrahmanian and Turaga,P. and Udrea,O.} } @conference {14669, title = {Contextual effects for version-consistent dynamic software updating and safe concurrent programming}, booktitle = {Proceedings of the 35th annual ACM SIGPLAN-SIGACT symposium on Principles of programming languages}, series = {POPL {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {37 - 49}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {This paper presents a generalization of standard effect systems that we call contextual effects. A traditional effect system computes the effect of an expression e. Our system additionally computes the effects of the computational context in which e occurs. More specifically, we computethe effect of the computation that has already occurred(the prior effect) and the effect of the computation yet to take place (the future effect). Contextual effects are useful when the past or future computation of the program is relevant at various program points. We present two substantial examples. First, we show how prior and future effects can be used to enforce transactional version consistency(TVC), a novel correctness property for dynamic software updates. TV Censures that programmer-designated transactional code blocks appear to execute entirely at the same code version, even if a dynamic update occurs in the middle of the block. Second, we show how future effects can be used in the analysis of multi-threaded programs to find thread-shared locations. This is an essential step in applications such as data race detection.}, keywords = {computation effects, contextual effects, data race detection, dynamic software updating, type and effect systems, version consistency}, isbn = {978-1-59593-689-9}, doi = {10.1145/1328438.1328447}, url = {http://doi.acm.org/10.1145/1328438.1328447}, author = {Neamtiu,Iulian and Hicks, Michael W. and Foster, Jeffrey S. and Pratikakis,Polyvios} } @article {16065, title = {Data Sonification for Users with Visual Impairment: A Case Study with Georeferenced Data}, journal = {ACM Trans. Comput.-Hum. Interact.}, volume = {15}, year = {2008}, month = {2008/05//}, pages = {4:1{\textendash}4:28 - 4:1{\textendash}4:28}, abstract = {We describe the development and evaluation of a tool, iSonic, to assist users with visual impairment in exploring georeferenced data using coordinated maps and tables, augmented with nontextual sounds and speech output. Our in-depth case studies with 7 blind users during 42 hours of data collection, showed that iSonic enabled them to find facts and discover trends in georeferenced data, even in unfamiliar geographical contexts, without special devices. Our design was guided by an Action-by-Design-Component (ADC) framework, which was also applied to scatterplots to demonstrate its generalizability. Video and download is available at www.cs.umd.edu/hcil/iSonic/.}, keywords = {auditory user interfaces, information seeking, Interactive sonification, universal usability, users with visual impairment}, isbn = {1073-0516}, doi = {10.1145/1352782.1352786}, url = {http://doi.acm.org/10.1145/1352782.1352786}, author = {Zhao,Haixia and Plaisant, Catherine and Shneiderman, Ben and Lazar,Jonathan} } @article {17063, title = {Designing community-based emergency communication system: A preliminary study}, journal = {Proceedings of the American Society for Information Science and Technology}, volume = {45}, year = {2008}, month = {2008///}, pages = {1 - 3}, author = {Fei Wu,P. and Qu,Y. and Fleischmann,K. and Golbeck,J. and Jaeger,P. and Preece,J. and Shneiderman, Ben} } @article {13292, title = {Discrete Distortion in Triangulated 3-Manifolds}, journal = {Computer Graphics Forum}, volume = {27}, year = {2008}, month = {2008/09/29/}, pages = {1333 - 1340}, abstract = {We introduce a novel notion, that we call discrete distortion, for a triangulated 3-manifold. Discrete distortion naturally generalizes the notion of concentrated curvature defined for triangulated surfaces and provides a powerful tool to understand the local geometry and topology of 3-manifolds. Discrete distortion can be viewed as a discrete approach to Ricci curvature for singular flat manifolds. We distinguish between two kinds of distortion, namely, vertex distortion, which is associated with the vertices of the tetrahedral mesh decomposing the 3-manifold, and bond distortion, which is associated with the edges of the tetrahedral mesh. We investigate properties of vertex and bond distortions. As an example, we visualize vertex distortion on manifold hypersurfaces in R4 defined by a scalar field on a 3D mesh. distance fields.}, keywords = {I.3.3 [Computer Graphics], I.3.5 [Computational Geometry and Object Modeling]}, isbn = {1467-8659}, doi = {10.1111/j.1467-8659.2008.01272.x}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1467-8659.2008.01272.x/abstract?userIsAuthenticated=false\&deniedAccessCustomisedMessage=}, author = {Mesmoudi,Mohammed Mostefa and De Floriani, Leila and Port,Umberto} } @article {19615, title = {Distortion lower bounds for line embeddings}, journal = {Information Processing Letters}, volume = {108}, year = {2008}, month = {2008/10/31/}, pages = {175 - 178}, abstract = {In this paper, we show how we can derive lower bounds and also compute the exact distortion for the line embeddings of some special metrics, especially trees and graphs with certain structure. Using linear programming to formulate a simpler version of the problem gives an interesting intuition and direction concerning the computation of general lower bounds for distortion into the line. We also show that our lower bounds on special cases of metrics are a lot better than previous lower bounds.}, keywords = {combinatorial problems, Theory of computation}, isbn = {0020-0190}, url = {http://www.sciencedirect.com/science/article/pii/S0020019008001464}, author = {Mathieu, Claire and Charalampos Papamanthou} } @article {12904, title = {Dual role colonization factors connecting Vibrio cholerae{\textquoteright}s lifestyles in human and aquatic environments open new perspectives for combating infectious diseases}, journal = {Current Opinion in Biotechnology}, volume = {19}, year = {2008}, month = {2008/06//}, pages = {254 - 259}, abstract = {Vibrio cholerae exhibits two distinctive lifestyles, one inside the milieu of the human intestine and the other in the aquatic environment. Recently, the existence of V. cholerae ligands involved in colonization of both human intestine and environmental chitin surfaces via the same binding specificity has been shown. Such molecules, here named {\textquoteleft}dual role colonization factors (DRCFs){\textquoteright}, are example of a tight connection between the two V. cholerae{\textquoteright}s lifestyles. It is suggested that DRCFs and, more generally, bacterial factors and pathways having roles in pathogenesis and in the out of the human body life may be promising targets for development of novel prophylactic or therapeutic interventions that may also affect V. cholerae fitness in its environmental reservoirs.}, isbn = {0958-1669}, doi = {10.1016/j.copbio.2008.04.002}, url = {http://www.sciencedirect.com/science/article/pii/S0958166908000426}, author = {Vezzulli,Luigi and Guzm{\'a}n,Carlos A and Rita R Colwell and Pruzzo,Carla} } @conference {15452, title = {Effective and scalable software compatibility testing}, booktitle = {Proceedings of the 2008 international symposium on Software testing and analysis}, series = {ISSTA {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {63 - 74}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Today{\textquoteright}s software systems are typically composed of multiple components, each with different versions. Software compatibility testing is a quality assurance task aimed at ensuring that multi-component based systems build and/or execute correctly across all their versions{\textquoteright} combinations, or configurations. Because there are complex and changing interdependencies between components and their versions, and because there are such a large number of configurations, it is generally infeasible to test all potential configurations. Consequently, in practice, compatibility testing examines only a handful of default or popular configurations to detect problems; as a result costly errors can and do escape to the field. This paper presents a new approach to compatibility testing, called Rachet. We formally model the entire configuration space for software systems and use the model to generate test plans to sample a portion of the space. In this paper, we test all direct dependencies between components and execute the test plan efficiently in parallel. We present empirical results obtained by applying our approach to two large-scale scientific middleware systems. The results show that for these systems Rachet scaled well and discovered incompatibilities between components, and that testing only direct dependences did not compromise test quality.}, keywords = {component-based software system, software compatibility testing}, isbn = {978-1-60558-050-0}, doi = {10.1145/1390630.1390640}, url = {http://doi.acm.org/10.1145/1390630.1390640}, author = {Yoon,Il-Chul and Sussman, Alan and Memon, Atif M. and Porter, Adam} } @inbook {19607, title = {Effective Visualization of File System Access-Control}, booktitle = {Visualization for Computer Security}, series = {Lecture Notes in Computer Science}, year = {2008}, month = {2008/01/01/}, pages = {18 - 25}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {In this paper, we present a visual representation of access control permissions in a standard hierarchical file system. Our visualization of file permissions leverages treemaps, a popular graphical representation of hierarchical data. In particular, we present a visualization of access control for the NTFS file system that can help a non-expert user understand and manipulate file system permissions in a simple and effective way. While our examples are based on NTFS, our approach can be used for many other hierarchical file systems as well.}, keywords = {Computer Communication Networks, Computer Imaging, Vision, Pattern Recognition and Graphics, data mining and knowledge discovery, Data Structures, Cryptology and Information Theory, Visualization}, isbn = {978-3-540-85931-4, 978-3-540-85933-8}, url = {http://link.springer.com/chapter/10.1007/978-3-540-85933-8_2}, author = {Heitzmann, Alexander and Palazzi, Bernardo and Charalampos Papamanthou and Tamassia, Roberto}, editor = {Goodall, John R. and Conti, Gregory and Ma, Kwan-Liu} } @conference {19601, title = {Efficient Integrity Checking of Untrusted Network Storage}, booktitle = {StorageSS {\textquoteright}08 Proceedings of the 4th ACM International Workshop on Storage Security and Survivability }, series = {StorageSS {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {43 - 54}, publisher = {ACM}, organization = {ACM}, abstract = {Outsourced storage has become more and more practical in recent years. Users can now store large amounts of data in multiple servers at a relatively low price. An important issue for outsourced storage systems is to design an efficient scheme to assure users that their data stored at remote servers has not been tampered with. This paper presents a general method and a practical prototype application for verifying the integrity of files in an untrusted network storage service. The verification process is managed by an application running in a trusted environment (typically on the client) that stores just one cryptographic hash value of constant size, corresponding to the "digest" of an authenticated data structure. The proposed integrity verification service can work with any storage service since it is transparent to the storage technology used. Experimental results show that our integrity verification method is efficient and practical for network storage systems.}, keywords = {authenticated data structures, data integrity, Security, untrusted outsourced storage}, isbn = {978-1-60558-299-3}, url = {http://doi.acm.org/10.1145/1456469.1456479}, author = {Heitzmann, Alexander and Palazzi, Bernardo and Charalampos Papamanthou and Tamassia, Roberto} } @article {17132, title = {Enabling teachers to explore grade patterns to identify individual needs and promote fairer student assessment}, journal = {Computers \& Education}, volume = {51}, year = {2008}, month = {2008/12//}, pages = {1467 - 1485}, abstract = {Exploring student test, homework, and other assessment scores is a challenge for most teachers, especially when attempting to identify cross-assessment weaknesses and produce final course grades. During the course, teachers need to identify subject weaknesses in order to help students who are struggling with a particular topic. This identification often needs to happen across multiple assessment data points and should be considered in comparison to the class{\textquoteright}s progress as a whole. When determining grades, fairness to all is essential, but there are special needs for students who did poorly on one exam or had a steadily increasing grasp of the subject. We present eduViz, a visualization tool designed to help teachers explore and assign grades. Teachers can see the trajectory of student scores, the relationship of a particular student to the class, and use categories they have defined in order to filter their assessment information. Query response is immediate and all logical comparisons are possible. Teachers can easily compare their query to the class or per student average as well as view scores by raw point total or percentage. Additionally, eduViz provides a grade assignment interface which allows teachers to view sorted student scores in a scatterplot. This scatterplot is coupled with a unique partition slider which allows users to move color coordinated bands on the scatterplot to indicate grade ranges. As these grade ranges are set, a histogram is updated to show the number of students assigned to each grade range. These features give teachers new and powerful ways to explore and assign grades so that they can better understand student strengths and weaknesses and make the most of the time they have available. Interviews with 16 expert teachers indicate that eduViz is a success across fields, provides teachers with a useful tool to understand and help their classes, and encourages reflective practice.}, keywords = {Evaluation methodologies, Human{\textendash}computer interface, Improving classroom teaching, Pedagogical issues, Teaching/learning strategies}, isbn = {0360-1315}, doi = {10.1016/j.compedu.2008.01.005}, url = {http://www.sciencedirect.com/science/article/pii/S0360131508000353}, author = {Friedler,Sorelle A. and Tan,Yee Lin and Peer,Nir J. and Shneiderman, Ben} } @article {16063, title = {Enhancing In-Car Navigation Systems with Personal Experience}, journal = {Transportation Research Record: Journal of the Transportation Research Board}, volume = {2064}, year = {2008}, month = {2008/12/01/}, pages = {33 - 42}, abstract = {Computers are extremely powerful for data processing but less adept at handling problems that involve subjective reasoning. People, on the other hand, are good at such tasks. A framework is presented for adding subjective human experience to in-car navigation systems. People often rely on personal experience when planning trips, choosing the route that is fastest, prettiest, or recommended by a friend. A set of methods was developed to help people record personal driving history, add textual annotations describing subjective experiences, and share data with friends and family or even the broader community. Users can then learn from personal data or harness the multiplicity of individual experiences to enjoy new routes. This approach can be used in conjunction with traditional in-car navigation systems.}, doi = {10.3141/2064-06}, url = {http://dx.doi.org/10.3141/2064-06}, author = {Bederson, Benjamin B. and Clamage,Aaron and Plaisant, Catherine} } @article {16066, title = {Evaluating Visual Analytics at the 2007 VAST Symposium Contest}, journal = {Computer Graphics and Applications, IEEE}, volume = {28}, year = {2008}, month = {2008/04//march}, pages = {12 - 21}, abstract = {In this article, we report on the contest{\textquoteright}s data set and tasks, the judging criteria, the winning tools, and the overall lessons learned in the competition. We believe that by organizing these contests, we{\textquoteright}re creating useful resources for researchers and are beginning to understand how to better evaluate VA tools. Competitions encourage the community to work on difficult problems, improve their tools, and develop baselines for others to build or improve upon. We continue to evolve a collection of data sets, scenarios, and evaluation methodologies that reflect the richness of the many VA tasks and applications.}, keywords = {2007, analytics, analytics;data, and, Contest;Visual, Science, Symposium, Technology;data, VAST, visualisation;, visualization;visual}, isbn = {0272-1716}, doi = {10.1109/MCG.2008.27}, author = {Plaisant, Catherine and Grinstein,G. and Scholtz,J. and Whiting,M. and O{\textquoteright}Connell,T. and Laskowski,S. and Chien,L. and Tat,A. and Wright,W. and Gorg,C. and Zhicheng Liu and Parekh,N. and Singhal,K. and Stasko,J.} } @article {18670, title = {Experiences with building an intrusion-tolerant group communication system}, journal = {Software: Practice and Experience}, volume = {38}, year = {2008}, month = {2008///}, pages = {639 - 666}, abstract = {There are many group communication systems (GCSs) that provide consistent group membership and reliable, ordered multicast properties in the presence of crash faults. However, relatively few GCS implementations are able to provide these properties in the presence of malicious faults resulting from intrusions. We describe the systematic transformation of a crash-tolerant GCS, namely C-Ensemble, into an intrusion-tolerant GCS, the ITUA GCS. To perform the transformation, we devised intrusion-tolerant versions of key group communication protocols. We then inserted implementations of the protocols into C-Ensemble and made significant changes to the rest of the C-Ensemble protocol stack to make the stack intrusion tolerant. We quantify the cost of providing intrusion-tolerant group communication in two ways. First, we quantify the implementation effort by presenting a detailed analysis of the amount of change required to the original C-Ensemble system. In doing so, we provide insight into the choice of building an intrusion-tolerant GCS from scratch versus building one by leveraging a crash-tolerant implementation. Second, we quantify the run-time performance cost of tolerating intrusions by presenting results from an experimental evaluation of the main intrusion-tolerant microprotocols. The results are analyzed to identify the parts that contribute the most overhead while providing intrusion tolerance during both normal operation and recovery from intrusions. Copyright {\textcopyright} 2007 John Wiley \& Sons, Ltd.}, keywords = {distributed protocols, experimental evaluation, Fault tolerance, Group communication, intrusion tolerance}, isbn = {1097-024X}, doi = {10.1002/spe.848}, url = {http://onlinelibrary.wiley.com/doi/10.1002/spe.848/abstract}, author = {Ramasamy,HariGovind V. and Pandey,Prashant and Michel Cukier and Sanders,William H.} } @article {16396, title = {Exurbia from the bottom-up: Confronting empirical challenges to characterizing a complex system}, journal = {Geoforum}, volume = {39}, year = {2008}, month = {2008/03//}, pages = {805 - 818}, abstract = {We describe empirical results from a multi-disciplinary project that support modeling complex processes of land-use and land-cover change in exurban parts of Southeastern Michigan. Based on two different conceptual models, one describing the evolution of urban form as a consequence of residential preferences and the other describing land-cover changes in an exurban township as a consequence of residential preferences, local policies, and a diversity of development types, we describe a variety of empirical data collected to support the mechanisms that we encoded in computational agent-based models. We used multiple methods, including social surveys, remote sensing, and statistical analysis of spatial data, to collect data that could be used to validate the structure of our models, calibrate their specific parameters, and evaluate their output. The data were used to investigate this system in the context of several themes from complexity science, including have (a) macro-level patterns; (b) autonomous decision making entities (i.e., agents); (c) heterogeneity among those entities; (d) social and spatial interactions that operate across multiple scales and (e) nonlinear feedback mechanisms. The results point to the importance of collecting data on agents and their interactions when producing agent-based models, the general validity of our conceptual models, and some changes that we needed to make to these models following data analysis. The calibrated models have been and are being used to evaluate landscape dynamics and the effects of various policy interventions on urban land-cover patterns.}, keywords = {Ecological effects, Land-cover change, Land-use change, spatial modeling, Urban sprawl}, isbn = {0016-7185}, doi = {10.1016/j.geoforum.2007.02.010}, url = {http://www.sciencedirect.com/science/article/pii/S0016718507000371}, author = {Brown,Daniel G. and Robinson,Derek T. and An,Li and Nassauer,Joan I. and Zellner,Moira and Rand, William and Riolo,Rick and Page,Scott E. and Low,Bobbi and Wang,Zhifang} } @article {16262, title = {Figaro: A Novel Statistical Method for Vector Sequence Removal}, journal = {Bioinformatics}, volume = {24}, year = {2008}, month = {2008/02/15/}, pages = {462 - 467}, abstract = {Motivation: Sequences produced by automated Sanger sequencing machines frequently contain fragments of the cloning vector on their ends. Software tools currently available for identifying and removing the vector sequence require knowledge of the vector sequence, specific splice sites and any adapter sequences used in the experiment{\textemdash}information often omitted from public databases. Furthermore, the clipping coordinates themselves are missing or incorrectly reported. As an example, within the \~{}1.24 billion shotgun sequences deposited in the NCBI Trace Archive, as many as \~{}735 million (\~{}60\%) lack vector clipping information. Correct clipping information is essential to scientists attempting to validate, improve and even finish the increasingly large number of genomes released at a {\textquoteleft}draft{\textquoteright} quality level.Results: We present here Figaro, a novel software tool for identifying and removing the vector from raw sequence data without prior knowledge of the vector sequence. The vector sequence is automatically inferred by analyzing the frequency of occurrence of short oligo-nucleotides using Poisson statistics. We show that Figaro achieves 99.98\% sensitivity when tested on \~{}1.5 million shotgun reads from Drosophila pseudoobscura. We further explore the impact of accurate vector trimming on the quality of whole-genome assemblies by re-assembling two bacterial genomes from shotgun sequences deposited in the Trace Archive. Designed as a module in large computational pipelines, Figaro is fast, lightweight and flexible. Availability: Figaro is released under an open-source license through the AMOS package (http://amos.sourceforge.net/Figaro). Contact: mpop@umiacs.umd.edu }, isbn = {1367-4803, 1460-2059}, doi = {10.1093/bioinformatics/btm632}, url = {http://bioinformatics.oxfordjournals.org/content/24/4/462}, author = {White,James Robert and Roberts,Michael and Yorke,James A. and Pop, Mihai} } @inbook {14698, title = {Formalizing Soundness of Contextual Effects}, booktitle = {Theorem Proving in Higher Order LogicsTheorem Proving in Higher Order Logics}, series = {Lecture Notes in Computer Science}, volume = {5170}, year = {2008}, month = {2008///}, pages = {262 - 277}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {A contextual effect system generalizes standard type and effect systems: where a standard effect system computes the effect of an expression e , a contextual effect system additionally computes the prior and future effect of e , which characterize the behavior of computation prior to and following, respectively, the evaluation of e . This paper describes the formalization and proof of soundness of contextual effects, which we mechanized using the Coq proof assistant. Contextual effect soundness is an unusual property because the prior and future effect of a term e depends not on e itself (or its evaluation), but rather on the evaluation of the context in which e appears. Therefore, to state and prove soundness we must {\textquotedblleft}match up{\textquotedblright} a subterm in the original typing derivation with the possibly-many evaluations of that subterm during the evaluation of the program, in a way that is robust under substitution. We do this using a novel typed operational semantics. We conjecture that our approach could prove useful for reasoning about other properties of derivations that rely on the context in which that derivation appears.}, isbn = {978-3-540-71065-3}, url = {http://dx.doi.org/10.1007/978-3-540-71067-7_22}, author = {Pratikakis,Polyvios and Foster, Jeffrey S. and Hicks, Michael W. and Neamtiu,Iulian}, editor = {Mohamed,Otmane and Mu{\~n}oz,C{\'e}sar and Tahar,Sofi{\`e}ne} } @article {16268, title = {Genome assembly forensics: finding the elusive mis-assembly}, journal = {Genome Biology}, volume = {9}, year = {2008}, month = {2008/03/14/}, pages = {R55 - R55}, abstract = {We present the first collection of tools aimed at automated genome assembly validation. This work formalizes several mechanisms for detecting mis-assemblies, and describes their implementation in our automated validation pipeline, called amosvalidate. We demonstrate the application of our pipeline in both bacterial and eukaryotic genome assemblies, and highlight several assembly errors in both draft and finished genomes. The software described is compatible with common assembly formats and is released, open-source, at http://amos.sourceforge.net.}, isbn = {1465-6906}, doi = {10.1186/gb-2008-9-3-r55}, url = {http://genomebiology.com/2008/9/3/R55}, author = {Phillippy,Adam M and Schatz,Michael C and Pop, Mihai} } @article {14617, title = {Genome-Wide Analysis of Natural Selection on Human Cis-Elements}, journal = {PLoS ONEPLoS ONE}, volume = {3}, year = {2008}, month = {2008///}, pages = {e3137 - e3137}, abstract = {It has been speculated that the polymorphisms in the non-coding portion of the human genome underlie much of the phenotypic variability among humans and between humans and other primates. If so, these genomic regions may be undergoing rapid evolutionary change, due in part to natural selection. However, the non-coding region is a heterogeneous mix of functional and non-functional regions. Furthermore, the functional regions are comprised of a variety of different types of elements, each under potentially different selection regimes.Using the HapMap and Perlegen polymorphism data that map to a stringent set of putative binding sites in human proximal promoters, we apply the Derived Allele Frequency distribution test of neutrality to provide evidence that many human-specific and primate-specific binding sites are likely evolving under positive selection. We also discuss inherent limitations of publicly available human SNP datasets that complicate the inference of selection pressures. Finally, we show that the genes whose proximal binding sites contain high frequency derived alleles are enriched for positive regulation of protein metabolism and developmental processes. Thus our genome-scale investigation provides evidence for positive selection on putative transcription factor binding sites in human proximal promoters. }, doi = {10.1371/journal.pone.0003137}, url = {UR - http://dx.doi.org/10.1371/journal.pone.0003137,http://dx.doi.org/10.1371/journal.pone.0003137}, author = {Sethupathy,Praveen and Giang,Hoa and Plotkin,Joshua B. and Hannenhalli, Sridhar} } @article {16271, title = {Genome-Wide Analysis of Repetitive Elements in Papaya}, journal = {Tropical Plant Biology}, volume = {1}, year = {2008}, month = {2008///}, pages = {191 - 201}, abstract = {Papaya ( Carica papaya L.) is an important fruit crop cultivated in tropical and subtropical regions worldwide. A first draft of its genome sequence has been recently released. Together with Arabidopsis , rice, poplar, grapevine and other genomes in the pipeline, it represents a good opportunity to gain insight into the organization of plant genomes. Here we report a detailed analysis of repetitive elements in the papaya genome, including transposable elements (TEs), tandemly-arrayed sequences, and high copy number genes. These repetitive sequences account for \~{}56\% of the papaya genome with TEs being the most abundant at 52\%, tandem repeats at 1.3\% and high copy number genes at 3\%. Most common types of TEs are represented in the papaya genome with retrotransposons being the dominant class, accounting for 40\% of the genome. The most prevalent retrotransposons are Ty3-gypsy (27.8\%) and Ty1-copia (5.5\%). Among the tandem repeats, microsatellites are the most abundant in number, but represent only 0.19\% of the genome. Minisatellites and satellites are less abundant, but represent 0.68\% and 0.43\% of the genome, respectively, due to greater repeat length. Despite an overall smaller gene repertoire in papaya than many other angiosperms, a significant fraction of genes (\>2\%) are present in large gene families with copy number greater than 20. This repeat database clarified a major part of the papaya genome organization and partly explained the lower gene repertoire in papaya than in Arabidopsis .}, isbn = {1935-9756}, url = {http://dx.doi.org/10.1007/s12042-008-9015-0}, author = {Nagarajan,Niranjan and Navajas-P{\'e}rez,Rafael and Pop, Mihai and Alam,Maqsudul and Ming,Ray and Paterson,Andrew and Salzberg,Steven} } @article {12903, title = {Global impact of Vibrio cholerae interactions with chitin}, journal = {Environmental Microbiology}, volume = {10}, year = {2008}, month = {2008/06/01/}, pages = {1400 - 1410}, abstract = {The interaction of Vibrio cholerae with chitin exemplifies for microbial ecology a successful bacteria{\textendash}substrate interaction with complex and significant influence on the lifestyle of the bacterium. Chitin is one of the most abundant polymers on earth and possibly the most abundant in the aquatic environment, where its association with V.~cholerae has provided the microorganism with a number of advantages, including food availability, adaptation to environmental nutrient gradients, tolerance to stress and protection from predators. Emergent properties of V.~cholerae{\textendash}chitin interactions occur at multiple hierarchical levels in the environment and include cell metabolic and physiological responses e.g. chemotaxis, cell multiplication, induction of competence, biofilm formation, commensal and symbiotic relationship with higher organisms, cycling of nutrients, and pathogenicity for humans and aquatic animals. As factors mediating virulence of V.~cholerae for humans and aquatic animals derive from mechanisms of adaptation to its environment, at different levels of hierarchical scale, V.~cholerae interactions with chitin represent a useful model for examination of the role of primary habitat selection in the development of traits that have been identified as virulence factors in human disease.}, isbn = {1462-2920}, doi = {10.1111/j.1462-2920.2007.01559.x}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1462-2920.2007.01559.x/full}, author = {Pruzzo,Carla and Vezzulli,Luigi and Rita R Colwell} } @article {14135, title = {H (div) preconditioning for a mixed finite element formulation of the stochastic diffusion problem}, year = {2008}, month = {2008///}, institution = {Citeseer}, author = {Elman, Howard and Furnival, D. G and Powell, C. E} } @article {18492, title = {Hosting virtual networks on commodity hardware}, volume = {GT-CS-07-10}, year = {2008}, month = {2008///}, institution = {Georgia Institute of Technology}, abstract = {This paper describes Trellis, a software platform for hostingmultiple virtual networks on shared commodity hardware. Trellis allows each virtual network to define its own topol- ogy, control protocols, and forwarding tables, which low- ers the barrier for deploying custom services on an isolated, reconfigurable, and programmable network, while amor- tizing costs by sharing the physical infrastructure. Trellis synthesizes two container-based virtualization technologies, VServer and NetNS, as well as a new tunneling mechanism, EGRE, into a coherent platform that enables high-speed vir- tual networks. We describe the design and implementation, of Trellis, including kernel-level performance optimizations, and evaluate its supported packet-forwarding rates against other virtualization technologies. We are in the process of upgrading the VINI facility to use Trellis. We also plan to release Trellis as part of MyVINI, a standalone software dis- tribution that allows researchers and application developers to deploy their own virtual network hosting platforms. }, author = {Bhatia,S. and Motiwala,M. and Muhlbauer,W. and Valancius,V. and Bavier,A. and Feamster, Nick and Peterson,L. and Rexford,J.} } @inbook {14700, title = {Implicit Flows: Can{\textquoteright}t Live with {\textquoteleft}Em, Can{\textquoteright}t Live without {\textquoteleft}Em}, booktitle = {Information Systems SecurityInformation Systems Security}, series = {Lecture Notes in Computer Science}, volume = {5352}, year = {2008}, month = {2008///}, pages = {56 - 70}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Verifying that programs trusted to enforce security actually do so is a practical concern for programmers and administrators. However, there is a disconnect between the kinds of tools that have been successfully applied to real software systems (such as taint mode in Perl and Ruby), and information-flow compilers that enforce a variant of the stronger security property of noninterference. Tools that have been successfully used to find security violations have focused on explicit flows of information, where high-security information is directly leaked to output. Analysis tools that enforce noninterference also prevent implicit flows of information, where high-security information can be inferred from a program{\textquoteright}s flow of control. However, these tools have seen little use in practice, despite the stronger guarantees that they provide. To better understand why, this paper experimentally investigates the explicit and implicit flows identified by the standard algorithm for establishing noninterference. When applied to implementations of authentication and cryptographic functions, the standard algorithm discovers many real implicit flows of information, but also reports an extremely high number of false alarms, most of which are due to conservative handling of unchecked exceptions (e.g., null pointer exceptions). After a careful analysis of all sources of true and false alarms, due to both implicit and explicit flows, the paper concludes with some ideas to improve the false alarm rate, toward making stronger security analysis more practical.}, isbn = {978-3-540-89861-0}, url = {http://dx.doi.org/10.1007/978-3-540-89862-7_4}, author = {King,Dave and Hicks,Boniface and Hicks, Michael W. and Jaeger,Trent}, editor = {Sekar,R. and Pujari,Arun} } @conference {17766, title = {Inconsistency management policies}, booktitle = {Proc. 2008 Intl. Conference on Knowledge Representation and Reasoning (KR 2008)}, year = {2008}, month = {2008///}, abstract = {Though there is much work on how inconsistency indatabases should be managed, there is good reason to believe that end users will want to bring their domain expertise and needs to bear in how to deal with inconsistencies. In this paper, we propose the concept of inconsistency management policies (IMPs). We show that IMPs are rich enough to spec- ify many types of inconsistency management methods pro- posed previously, but provide end users with tools that allow them to use the policies that they want. Our policies are also capable of allowing inconsistency to persist in the database or of eliminating more than a minimal subset of tuples involved in the inconsistency. We present a formal axiomatic definition of IMPs and present appropriate complexity results, together with results linking different IMPs together. We extend the relational algebra (RA) to incorporate IMPs and present the- oretical results showing how IMPs and classical RA operators interact. }, author = {Martinez,M. V and Parisi,F. and Pugliese, A. and Simari,G. I and V.S. Subrahmanian} } @conference {17239, title = {Integrating statistics and visualization: case studies of gaining clarity during exploratory data analysis}, booktitle = {Proceedings of the twenty-sixth annual SIGCHI conference on Human factors in computing systems}, series = {CHI {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {265 - 274}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Although both statistical methods and visualizations have been used by network analysts, exploratory data analysis remains a challenge. We propose that a tight integration of these technologies in an interactive exploratory tool could dramatically speed insight development. To test the power of this integrated approach, we created a novel social network analysis tool, SocialAction, and conducted four long-term case studies with domain experts, each working on unique data sets with unique problems. The structured replicated case studies show that the integrated approach in SocialAction led to significant discoveries by a political analyst, a bibliometrician, a healthcare consultant, and a counter-terrorism researcher. Our contributions demonstrate that the tight integration of statistics and visualizations improves exploratory data analysis, and that our evaluation methodology for long-term case studies captures the research strategies of data analysts.}, keywords = {case studies, Evaluation, exploratory data analysis, Information Visualization, social networks, Statistics}, isbn = {978-1-60558-011-1}, doi = {10.1145/1357054.1357101}, url = {http://doi.acm.org/10.1145/1357054.1357101}, author = {Perer,Adam and Shneiderman, Ben} } @article {16068, title = {Interactive auditory data exploration: A framework and evaluation with geo-referenced data sonification}, journal = {ACM Transactions on Computer-Human Interaction}, year = {2008}, month = {2008///}, abstract = {We describe an Action-by-Design-Component (ADC) framework to guide auditory interface designers for exploratory data analysis. The framework characterizes data interaction in the auditory mode as a set of Auditory Information Seeking Actions (AISAs). Contrasting AISAs with actions in visualizations, the framework also discusses design considerations for a set of Design Components to support AISAs. Applying the framework to geo-referenced data, we systematically explored and evaluated its design space. A data exploration tool, iSonic, was built for blind users. In depth case studies with 7 blind users, with over 42 hours of data collection, showed that iSonic enabled them to find facts and discover trends of geo-referenced data, even in unfamiliar geographical contexts, without special devices. The results also showed that blind users dramatically benefited from the rich set of task-oriented actions (AISAs) and the use of multiple highly coordinated data views provided by the ADC framework. Some widely used techniques in visualization, with appropriate adaptation, also work in the auditory mode. The application of the framework to scatterplots shows that the framework can be generalized and lead to the design of a unified auditory workspace for general exploratory data analysis. Readers can view a supplementary video demonstration of iSonic by visiting www.cs.umd.edu/hcil/iSonic/}, author = {Zhao,H. and Plaisant, Catherine and Shneiderman, Ben and Lazar,J.} } @conference {13264, title = {A Java3D framework for inspecting and segmenting 3D models}, booktitle = {Proceedings of the 13th international symposium on 3D web technology}, series = {Web3D {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {67 - 74}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Models of 3D objects have become widely accessible in several disciplines within academia and industry, spanning from scientific visualization to entertainment. In the last few years, 3D models are often organized into digital libraries accessible over the network, and thus semantic annotation of such models becomes an important issue. A fundamental step in annotating a 3D model is to segment it into meaningful parts. In this work, we present a Java3D framework for inspecting and segmenting 3D objects represented in X3D format. In particular, we present a combination of segmentation and merging techniques for producing a feasible decomposition of the boundary of a 3D object. We represent such decomposition as a graph, that we call the segmentation graph which is the basis for semantic annotation. We describe also the interface we have developed to allow visualization and browsing of both the decomposition and the segmentation graph in order to understand the topological structure of the resulting decomposition.}, keywords = {Java3D, object manipulation, Object segmentation, shape semantics, X3D}, isbn = {978-1-60558-213-9}, doi = {10.1145/1394209.1394225}, url = {http://doi.acm.org/10.1145/1394209.1394225}, author = {De Floriani, Leila and Papaleo,Laura and Carissimi,Nicol{\'o}} } @conference {13122, title = {Kernel integral images: A framework for fast non-uniform filtering}, booktitle = {Computer Vision and Pattern Recognition, 2008. CVPR 2008. IEEE Conference on}, year = {2008}, month = {2008/06//}, pages = {1 - 8}, abstract = {Integral images are commonly used in computer vision and computer graphics applications. Evaluation of box filters via integral images can be performed in constant time, regardless of the filter size. Although Heckbert (1986) extended the integral image approach for more complex filters, its usage has been very limited, in practice. In this paper, we present an extension to integral images that allows for application of a wide class of non-uniform filters. Our approach is superior to Heckbertpsilas in terms of precision requirements and suitability for parallelization. We explain the theoretical basis of the approach and instantiate two concrete examples: filtering with bilinear interpolation, and filtering with approximated Gaussian weighting. Our experiments show the significant speedups we achieve, and the higher accuracy of our approach compared to Heckbertpsilas.}, keywords = {approximated, equations;interpolation;, filtering;bilinear, filtering;kernel, Gaussian, graphics;computer, images;approximation, integral, interpolation;computer, nonuniform, processing;integral, theory;filtering, theory;image, vision;fast, weighting}, doi = {10.1109/CVPR.2008.4587641}, author = {Hussein,M. and Porikli, F. and Davis, Larry S.} } @article {14569, title = {Maternal depletion of CTCF reveals multiple functions during oocyte and preimplantation embryo development}, journal = {Development}, volume = {135}, year = {2008}, month = {2008///}, pages = {2729 - 2738}, abstract = {CTCF is a multifunctional nuclear factor involved in epigenetic regulation. Despite recent advances that include the systematic discovery of CTCF-binding sites throughout the mammalian genome, the in vivo roles of CTCF in adult tissues and during embryonic development are largely unknown. Using transgenic RNAi, we depleted maternal stores of CTCF from growing mouse oocytes, and identified hundreds of misregulated genes. Moreover, our analysis suggests that CTCF predominantly activates or derepresses transcription in oocytes. CTCF depletion causes meiotic defects in the egg, and mitotic defects in the embryo that are accompanied by defects in zygotic gene expression, and culminate in apoptosis. Maternal pronuclear transfer and CTCF mRNA microinjection experiments indicate that CTCF is a mammalian maternal effect gene, and that persistent transcriptional defects rather than persistent chromosomal defects perturb early embryonic development. This is the first study detailing a global and essential role for CTCF in mouse oocytes and preimplantation embryos.}, doi = {10.1242/dev.024539}, url = {http://dev.biologists.org/content/135/16/2729.abstract}, author = {Wan,Le-Ben and Pan,Hua and Hannenhalli, Sridhar and Cheng,Yong and Ma,Jun and Fedoriw,Andrew and Lobanenkov,Victor and Latham,Keith E. and Schultz,Richard M. and Bartolomei,Marisa S.} } @conference {19457, title = {More Than Meets the Eye: Transforming the User Experience of Home Network Management}, booktitle = {Proceedings of the 7th ACM conference on Designing interactive systems}, series = {DIS {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {455 - 464}, publisher = {ACM}, organization = {ACM}, abstract = {As computing migrates from the workplace to the home, householders must tackle problems of home network maintenance. Often they lack the technical knowledge or motivation to complete these tasks, making the user experience of home network maintenance frustrating. In response to these difficulties, many householders rely on handwritten reminders or interactive networking tools that are ill-suited for the home environment. In this paper, we seek to understand how to design better home network management tools through a study of sketches created by 40 people in 18 households. In our study, we obtained information about householders{\textquoteright} knowledge, practices and needs with respect to home networking. Based on our results, we present guidelines for transforming the user experience of home network management.}, keywords = {home networks, sketching, troubleshooting}, isbn = {978-1-60558-002-9}, url = {http://doi.acm.org/10.1145/1394445.1394494}, author = {Poole, Erika Shehan and Marshini Chetty and Grinter, Rebecca E. and Edwards, W. Keith} } @article {18707, title = {Mutations in the Hydrophobic Core of Ubiquitin Differentially Affect Its Recognition by Receptor Proteins}, journal = {Journal of Molecular Biology}, volume = {375}, year = {2008}, month = {2008/01/25/}, pages = {979 - 996}, abstract = {Ubiquitin (Ub) is one of the most highly conserved signaling proteins in eukaryotes. In carrying out its myriad functions, Ub conjugated to substrate proteins interacts with dozens of receptor proteins that link the Ub signal to various biological outcomes. Here we report mutations in conserved residues of Ub{\textquoteright}s hydrophobic core that have surprisingly potent and specific effects on molecular recognition. Mutant Ubs bind tightly to the Ub-associated domain of the receptor proteins Rad23 and hHR23A but fail to bind the Ub-interacting motif present in the receptors Rpn10 and S5a. Moreover, chains assembled on target substrates with mutant Ubs are unable to support substrate degradation by the proteasome in vitro or sustain viability of yeast cells. The mutations have relatively little effect on Ub{\textquoteright}s overall structure but reduce its rigidity and cause a slight displacement of the C-terminal β-sheet, thereby compromising association with Ub-interacting motif but not with Ub-associated domains. These studies emphasize an unexpected role for Ub{\textquoteright}s core in molecular recognition and suggest that the diversity of protein{\textendash}protein interactions in which Ub engages placed enormous constraints on its evolvability.}, keywords = {hydrophobic core mutation, molecular recognition, proteasomal degradation, ubiquitin, ubiquitin receptors}, isbn = {0022-2836}, doi = {10.1016/j.jmb.2007.11.016}, url = {http://www.sciencedirect.com/science/article/pii/S0022283607014763}, author = {Haririnia,Aydin and Verma,Rati and Purohit,Nisha and Twarog,Michael Z. and Deshaies,Raymond J. and Bolon,Dan and Fushman, David} } @conference {15916, title = {A new multiresolution generalized directional filter bank design and application in image enhancement}, booktitle = {15th IEEE International Conference on Image Processing, 2008. ICIP 2008}, year = {2008}, month = {2008/10//}, pages = {2816 - 2819}, publisher = {IEEE}, organization = {IEEE}, abstract = {In this paper, we present an image enhancement technique based on a new multiscale generalized directional filter bank design. The design presented is a shift-invariant overcomplete representation, which is well suited to extracting geometric features such as edges. Special cases of this design method can be made to reduce to different and improved implementations of the shearlet and the contourlet transforms, which are known to represent certain classes of images optimally. Use of this new filter bank design has proven itself competitive in image restoration for noisy images and is well suited for distinguishing noise from weak edges. Experimental results show that our unique image enhancement technique out-performs wavelet and contourlet based enhancement methods.}, keywords = {Algorithm design and analysis, Approximation error, Channel bank filters, contourlet transform, Design methodology, Discrete transforms, Feature extraction, Filter bank, Frequency, geometric feature extraction, Image Enhancement, IMAGE PROCESSING, image resolution, image restoration, Multidimensional digital filters, Multidimensional systems, multiresolution generalized directional filter bank design, shearlet transform, shift-invariant overcomplete representation, transforms, Wavelet transforms}, isbn = {978-1-4244-1765-0}, doi = {10.1109/ICIP.2008.4712380}, author = {Patel, Vishal M. and Easley,G. R and Healy,D. M} } @conference {16839, title = {NewsStand: a new view on news}, booktitle = {Proceedings of the 16th ACM SIGSPATIAL international conference on Advances in geographic information systems}, series = {GIS {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {18:1{\textendash}18:10 - 18:1{\textendash}18:10}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {News articles contain a wealth of implicit geographic content that if exposed to readers improves understanding of today{\textquoteright}s news. However, most articles are not explicitly geotagged with their geographic content, and few news aggregation systems expose this content to users. A new system named NewsStand is presented that collects, analyzes, and displays news stories in a map interface, thus leveraging on their implicit geographic content. NewsStand monitors RSS feeds from thousands of online news sources and retrieves articles within minutes of publication. It then extracts geographic content from articles using a custom-built geotagger, and groups articles into story clusters using a fast online clustering algorithm. By panning and zooming in NewsStand{\textquoteright}s map interface, users can retrieve stories based on both topical significance and geographic region, and see substantially different stories depending on position and zoom level.}, keywords = {clustering, geotagging, knowledge discovery, text mining}, isbn = {978-1-60558-323-5}, doi = {10.1145/1463434.1463458}, url = {http://doi.acm.org/10.1145/1463434.1463458}, author = {Teitler,Benjamin E. and Lieberman,Michael D. and Panozzo,Daniele and Sankaranarayanan,Jagan and Samet, Hanan and Sperling,Jon} } @article {15339, title = {Nitric oxide production from surface recombination of oxygen and nitrogen atoms}, journal = {Journal of Thermophysics and Heat Transfer}, volume = {22}, year = {2008}, month = {2008///}, pages = {178 - 186}, author = {Pejakovi{\'c},D. A and Marschall,J. and Duan,L. and Martin, M.P} } @article {16338, title = {Pooled ANOVA}, journal = {Computational Statistics \& Data Analysis}, volume = {52}, year = {2008}, month = {2008/08/15/}, pages = {5215 - 5228}, abstract = {We introduce Pooled ANOVA, a greedy algorithm to sequentially select the rare important factors from a large set of factors. Problems such as computer simulations and software performance tuning involve a large number of factors, few of which have an important effect on the outcome or performance measure. We pool multiple factors together, and test the pool for significance. If the pool has a significant effect we retain the factors for deconfounding. If not, we either declare that none of the factors are important, or retain them for follow-up decoding, depending on our assumptions and stage of testing. The sparser important factors are, the bigger the savings. Pooled ANOVA requires fewer assumptions than other, similar methods (e.g.~sequential bifurcation), such as not requiring all important effects to have the same sign. We demonstrate savings of 25\%-35\% when compared to a conventional ANOVA, and also the ability to work in a setting where Sequential Bifurcation fails.}, isbn = {0167-9473}, doi = {16/j.csda.2008.04.024}, url = {http://www.sciencedirect.com/science/article/pii/S0167947308002168}, author = {Last,Michael and Luta,Gheorghe and Orso,Alex and Porter, Adam and Young,Stan} } @article {16067, title = {Promoting Insight-Based Evaluation of Visualizations: From Contest to Benchmark Repository}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {14}, year = {2008}, month = {2008/02//jan}, pages = {120 - 134}, abstract = {Information visualization (InfoVis) is now an accepted and growing field, but questions remain about the best uses for and the maturity of novel visualizations. Usability studies and controlled experiments are helpful, but generalization is difficult. We believe that the systematic development of benchmarks will facilitate the comparison of techniques and help identify their strengths under different conditions. We were involved in the organization and management of three InfoVis contests for the 2003, 2004, and 2005 IEEE InfoVis Symposia, which requested teams to report on insights gained while exploring data. We give a summary of the state of the art of evaluation in InfoVis, describe the three contests, summarize their results, discuss outcomes and lessons learned, and conjecture the future of visualization contests. All materials produced by the contests are archived in the InfoVis benchmark repository.}, keywords = {Computer-Assisted;Software;Software Validation;User-Computer Interface;, Factual;Evaluation Studies as Topic;Image Interpretation, InfoVis contest;benchmark repository;information visualization;data visualisation;Algorithms;Benchmarking;Computer Graphics;Databases}, isbn = {1077-2626}, doi = {10.1109/TVCG.2007.70412}, author = {Plaisant, Catherine and Fekete,J.-D. and Grinstein,G.} } @inbook {17668, title = {The Randomized Coloring Procedure with Symmetry-Breaking}, booktitle = {Automata, Languages and ProgrammingAutomata, Languages and Programming}, series = {Lecture Notes in Computer Science}, volume = {5125}, year = {2008}, month = {2008///}, pages = {306 - 319}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {A basic randomized coloring procedure has been used in probabilistic proofs to obtain remarkably strong results on graph coloring. These results include the asymptotic version of the List Coloring Conjecture due to Kahn, the extensions of Brooks{\textquoteright} Theorem to sparse graphs due to Kim and Johansson, and Luby{\textquoteright}s fast parallel and distributed algorithms for graph coloring. The most challenging aspect of a typical probabilistic proof is showing adequate concentration bounds for key random variables. In this paper, we present a simple symmetry-breaking augmentation to the randomized coloring procedure that works well in conjunction with Azuma{\textquoteright}s Martingale Inequality to easily yield the requisite concentration bounds. We use this approach to obtain a number of results in two areas: frugal coloring and weighted equitable coloring . A β-frugal coloring of a graph G is a proper vertex-coloring of G in which no color appears more than β times in any neighborhood. Let G = ( V , E ) be a vertex-weighted graph with weight function w : V {\textrightarrow}[0, 1] and let W = ∑ v ∈ V w ( v ). A weighted equitable coloring of G is a proper k -coloring such that the total weight of every color class is {\textquotedblleft}large{\textquotedblright}, i.e., {\textquotedblleft}not much smaller{\textquotedblright} than W / k ; this notion is useful in obtaining tail bounds for sums of dependent random variables.}, isbn = {978-3-540-70574-1}, url = {http://dx.doi.org/10.1007/978-3-540-70575-8_26}, author = {Pemmaraju,Sriram and Srinivasan, Aravind}, editor = {Aceto,Luca and Damg{\r a}rd,Ivan and Goldberg,Leslie and Halld{\'o}rsson,Magn{\'u}s and Ing{\'o}lfsd{\'o}ttir,Anna and Walukiewicz,Igor} } @article {16071, title = {A range of indicators for the evaluation of state health department Web-based data query systems}, journal = {Issues in Evaluating Health Department Web-based Data Query Systems: Working Papers}, year = {2008}, month = {2008///}, pages = {39 - 39}, abstract = {This chapter explores the wide range of possible indicators (also called metrics) that can be used toevaluate state health department Web-based data query systems (WDQS). While the list of indicators we propose is not exhaustive, it strives to cast a wide net on the range of issues that should be considered when evaluating WDQS, from the richness of functionalities provided by the WDQS, to the usability of the WDQS, the level of human resources required to develop and maintain the WDQS, and the frequency and intensity of WDQS use. }, author = {Plaisant, Catherine} } @conference {16010, title = {The role of metacognition in robust AI systems}, booktitle = {AAAI-08 Workshop on Metareasoning,(Chicago, IL)}, year = {2008}, month = {2008///}, author = {Schmill,M. D and Oates,T. and Anderson,M. and Fults,S. and Josyula,D. and Perlis, Don and Wilson,S.} } @article {14077, title = {Role of transposable elements in trypanosomatids}, journal = {Microbes and Infection}, volume = {10}, year = {2008}, month = {2008/05//}, pages = {575 - 581}, abstract = {Transposable elements constitute 2-5\% of the genome content in trypanosomatid parasites. Some of them are involved in critical cellular functions, such as the regulation of gene expression in Leishmania spp. In this review, we highlight the remarkable role extinct transposable elements can play as the source of potential new functions.}, keywords = {Cellular function, Domestication, Evolution, Gene expression, Leishmania, Regulation of mRNA stability, Retroposon, Transposable element, Trypanosoma}, isbn = {1286-4579}, doi = {16/j.micinf.2008.02.009}, url = {http://www.sciencedirect.com/science/article/pii/S1286457908000464}, author = {Bringaud,Fr{\'e}d{\'e}ric and Ghedin,Elodie and El-Sayed, Najib M. and Papadopoulou,Barbara} } @article {16282, title = {Scaffolding and Validation of Bacterial Genome Assemblies Using Optical Restriction Maps}, journal = {Bioinformatics}, volume = {24}, year = {2008}, month = {2008/05/15/}, pages = {1229 - 1235}, abstract = {Motivation: New, high-throughput sequencing technologies have made it feasible to cheaply generate vast amounts of sequence information from a genome of interest. The computational reconstruction of the complete sequence of a genome is complicated by specific features of these new sequencing technologies, such as the short length of the sequencing reads and absence of mate-pair information. In this article we propose methods to overcome such limitations by incorporating information from optical restriction maps.Results: We demonstrate the robustness of our methods to sequencing and assembly errors using extensive experiments on simulated datasets. We then present the results obtained by applying our algorithms to data generated from two bacterial genomes Yersinia aldovae and Yersinia kristensenii. The resulting assemblies contain a single scaffold covering a large fraction of the respective genomes, suggesting that the careful use of optical maps can provide a cost-effective framework for the assembly of genomes. Availability: The tools described here are available as an open-source package at ftp://ftp.cbcb.umd.edu/pub/software/soma Contact: mpop@umiacs.umd.edu }, isbn = {1367-4803, 1460-2059}, doi = {10.1093/bioinformatics/btn102}, url = {http://bioinformatics.oxfordjournals.org/content/24/10/1229}, author = {Nagarajan,Niranjan and Read,Timothy D. and Pop, Mihai} } @conference {17761, title = {Scaling RDF with Time}, booktitle = {Proceedings of the 17th international conference on World Wide Web}, series = {WWW {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {605 - 614}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {The World Wide Web Consortium{\textquoteright}s RDF standard primarily consists of (subject, property, object) triples that specify the value that a given subject has for a given property. However, it is frequently the case that even for a fixed subject and property, the value varies with time. As a consequence, efforts have been made to annotate RDF triples with "valid time" intervals. However, to date, no proposals exist for efficient indexing of such temporal RDF databases. It is clearly beneficial to store RDF data in a relational DB - however, standard relational indexes are inadequately equipped to handle RDF{\textquoteright}s graph structure. In this paper, we propose the tGRIN index structure that builds a specialized index for temporal RDF that is physically stored in an RDBMS. Past efforts to store RDF in relational stores include Jena2 from HP, Sesame from OpenRDF.org, and 3store from the University of Southampton. We show that even when these efforts are augmented with well known temporal indexes like R+ trees, SR-trees, ST-index, and MAP21, the tGRIN index exhibits superior performance. In terms of index build time, tGRIN takes two thirds or less of the time used by any other system, and it uses a comparable amount of memory and less disk space than Jena, Sesame and 3store. More importantly, tGRIN can answer queries three to six times faster for average query graph patterns and five to ten times faster for complex queries than these systems.}, keywords = {RDF indexing, resource description framework, temporal RDF}, isbn = {978-1-60558-085-2}, doi = {10.1145/1367497.1367579}, url = {http://doi.acm.org/10.1145/1367497.1367579}, author = {Pugliese,Andrea and Udrea,Octavian and V.S. Subrahmanian} } @article {16070, title = {Searching Electronic Health Records for Temporal Patterns in Patient Histories: A Case Study with Microsoft Amalga}, journal = {AMIA Annual Symposium ProceedingsAMIA Annu Symp Proc}, volume = {2008}, year = {2008}, month = {2008///}, pages = {601 - 605}, abstract = {As electronic health records (EHR) become more widespread, they enable clinicians and researchers to pose complex queries that can benefit immediate patient care and deepen understanding of medical treatment and outcomes. However, current query tools make complex temporal queries difficult to pose, and physicians have to rely on computer professionals to specify the queries for them. This paper describes our efforts to develop a novel query tool implemented in a large operational system at the Washington Hospital Center (Microsoft Amalga, formerly known as Azyxxi). We describe our design of the interface to specify temporal patterns and the visual presentation of results, and report on a pilot user study looking for adverse reactions following radiology studies using contrast.}, isbn = {1942-597X}, author = {Plaisant, Catherine and Lam,Stanley and Shneiderman, Ben and Smith,Mark S. and Roseman,David and Marchand,Greg and Gillam,Michael and Feied,Craig and Handler,Jonathan and Rappaport,Hank} } @article {15965, title = {A self-help guide for autonomous systems}, journal = {AI Magazine}, volume = {29}, year = {2008}, month = {2008///}, pages = {67 - 67}, author = {Anderson,M. L and Fults,S. and Josyula,D. P and Oates,T. and Perlis, Don and Wilson,S. and Wright,D.} } @article {14574, title = {Sex and Age Dimorphism of Myocardial Gene Expression in Nonischemic Human Heart Failure}, journal = {Circulation: Cardiovascular Genetics}, volume = {1}, year = {2008}, month = {2008/12/01/}, pages = {117 - 125}, abstract = {Background{\textemdash} We report the first comprehensive analysis of gene expression differences by sex and age in left ventricular samples from 102 patients with dilated cardiomyopathy.Methods and Results{\textemdash} Gene expression data (HG-U133A gene chip, Affymetrix) were analyzed from 30 females and 72 males from 3 separate centers. More than 1800 genes displayed sexual dimorphism in the heart (adjusted P value <0.05). A significant number of these genes were highly represented in gene ontology pathways involved in ion transport and G-protein-coupled receptor signaling. Localization of these genes revealed enrichment on both the sex chromosomes as well as chromosomes 3, 4, and 14. The second goal of this study was to determine the effect of age on gene expression. Within the female cohort, >140 genes were differentially expressed in the <55 years age group compared with the >55 years age group. These genes were highly represented in gene ontology pathways involved in DNA damage. In contrast, zero genes in the male cohort <55 years met statistical significance when compared with the >55 years age group.Conclusions{\textemdash} Gene expression in dilated cardiomyopathy displayed evidence of sexual dimorphism similar to other somatic tissues and age dimorphism within the female cohort.}, doi = {10.1161/CIRCGENETICS.108.802652}, url = {http://circgenetics.ahajournals.org/content/1/2/117.abstract}, author = {Fermin,David R. and Barac,Ana and Lee,Sangjin and Polster,Sean P. and Hannenhalli, Sridhar and Bergemann,Tracy L. and Grindle,Suzanne and Dyke,David B. and Pagani,Francis and Miller,Leslie W. and Tan,Sarah and dos Remedios,Cris and Cappola,Thomas P. and Margulies,Kenneth B. and Hall,Jennifer L.} } @conference {17392, title = {Systematic yet flexible discovery: guiding domain experts through exploratory data analysis}, booktitle = {Proceedings of the 13th international conference on Intelligent user interfaces}, series = {IUI {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {109 - 118}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {During exploratory data analysis, visualizations are often useful for making sense of complex data sets. However, as data sets increase in size and complexity, static information visualizations decrease in comprehensibility. Interactive techniques can yield valuable discoveries, but current data analysis tools typically support only opportunistic exploration that may be inefficient and incomplete. We present a refined architecture that uses systematic yet flexible (SYF) design goals to guide domain expert users through complex exploration of data over days, weeks and months. The SYF system aims to support exploratory data analysis with some of the simplicity of an e-commerce check-out while providing added flexibility to pursue insights. The SYF system provides an overview of the analysis process, suggests unexplored states, allows users to annotate useful states, supports collaboration, and enables reuse of successful strategies. The affordances of the SYF system are demonstrated by integrating it into a social network analysis tool employed by social scientists and intelligence analysts. The SYF system is a tool-independent component and can be incorporated into other data analysis tools.}, keywords = {exploratory data analysis, guides, Information Visualization, social networks, systematic yet flexible, wizards}, isbn = {978-1-59593-987-6}, doi = {10.1145/1378773.1378788}, url = {http://doi.acm.org/10.1145/1378773.1378788}, author = {Perer,Adam and Shneiderman, Ben} } @conference {15062, title = {A topic-based Document Correlation Model}, booktitle = {Machine Learning and Cybernetics, 2008 International Conference on}, volume = {5}, year = {2008}, month = {2008/07//}, pages = {2487 - 2491}, abstract = {Document correlation analysis is now a focus of study in text mining. This paper proposed a Document Correlation Model to capture the correlation between documents from topic level. The model represents the document correlation as the Optimal Matching of a bipartite graph, of which each partition is a document, each node is a topic, and each edge is the similarity between two topics. The topics of each document are retrieved by the Latent Dirichlet Allocation model and Gibbs sampling. Experiments on correlated document search show that the Document Correlation Model outperforms the Vector Space Model on two aspects: 1) it has higher average retrieval precision; 2) it needs less space to store a documentpsilas information.}, keywords = {bipartite graph optimal matching, data mining, document correlation analysis, document retrieval, Gibbs sampling, Information retrieval, latent Dirichlet allocation model, text analysis, text mining, topic-based document correlation model}, doi = {10.1109/ICMLC.2008.4620826}, author = {Jia,Xi-Ping and Peng,Hong and Zheng,Qi-Lun and Zhuolin Jiang and Li,Zhao} } @conference {18867, title = {Towards Design and Fabrication of a Miniature MRI-Compatible Robot for Applications in Neurosurgery}, year = {2008}, month = {2008///}, pages = {747 - 754}, publisher = {ASME}, organization = {ASME}, abstract = {Brain tumors are among the most feared complications of cancer and they occur in 20{\textendash}40\% of adult cancer patients. Despite numerous advances in treatment, the prognosis for these patients is poor, with a median survival of 4{\textendash}8 months. The primary reasons for poor survival rate are the lack of good continuous imaging modality for intraoperative intracranial procedures and the inability to remove the complete tumor tissue due to its placement in the brain and the corresponding space constraints to reach it. Intraoperative magnetic resonance imaging (MRI) supplements the surgeon{\textquoteright}s visual and tactile senses in a way that no other imaging device can achieve resulting in less trauma to surrounding healthy brain tissue during surgery. To minimize the trauma to surrounding healthy brain tissue, it would be beneficial to operate through a narrow surgical corridor dissected by the neurosurgeon. Facilitating tumor removal by accessing regions outside the direct {\textquotedblleft}line-of-sight{\textquotedblright} of the neurosurgical corridor will require a highly dexterous, small cross section, and MRI-compatible robot. Developing such a robot is extremely challenging task. In this paper we report a preliminary design of 6-DOF robot for possible application in neurosurgery. The robot actuators and body parts are constructed from MRI compatible materials. The current prototype is 0.36{\textquotedblright} in diameter and weighs only 0.0289 N (2.95 grams). The device was actuated using Flexinol{\textregistered} which is a shape memory alloy manufactured by Dynalloy, Inc. The end-effector forces ranged from 12 mN to 50 mN depending on the robot configuration. The end-effector force to robot weight ratio varied from 0.41 to 1.73. During trials the robot motion was repeatable and the range of motion of the robot was about 90 degrees for the end-effector when one side shape memory alloy (SMA) channel was actuated. The actuation time from the start to finish was about 2.5 s.}, isbn = {978-0-7918-4326-0}, doi = {10.1115/DETC2008-49587}, url = {http://link.aip.org/link/ASMECP/v2008/i43260/p747/s1\&Agg=doi}, author = {Pappafotis,Nicholas and Bejgerowski,Wojciech and Gullapalli,Rao and Simard,J. Marc and Gupta, Satyandra K. and Desai,Jaydev P.} } @conference {18608, title = {Trellis: a platform for building flexible, fast virtual networks on commodity hardware}, booktitle = {Proceedings of the 2008 ACM CoNEXT Conference}, series = {CoNEXT {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {72:1{\textendash}72:6 - 72:1{\textendash}72:6}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We describe Trellis, a platform for hosting virtual networks on shared commodity hardware. Trellis allows each virtual network to define its own topology, control protocols, and forwarding tables, while amortizing costs by sharing the physical infrastructure. Trellis synthesizes two container-based virtualization technologies, VServer and NetNS, as well as a new tunneling mechanism, EGRE, into a coherent platform that enables high-speed virtual networks. We describe the design and implementation of Trellis and evaluate its packet-forwarding rates relative to other virtualization technologies and native kernel forwarding performance.}, isbn = {978-1-60558-210-8}, doi = {10.1145/1544012.1544084}, url = {http://doi.acm.org/10.1145/1544012.1544084}, author = {Bhatia,Sapan and Motiwala,Murtaza and Muhlbauer,Wolfgang and Mundada,Yogesh and Valancius,Vytautas and Bavier,Andy and Feamster, Nick and Peterson,Larry and Rexford,Jennifer} } @conference {15332, title = {Upstream and downstream in unsteadiness of STBLI using DNS data in two con gurations}, booktitle = {46th AIAA Aerospace Sciences Meeting}, year = {2008}, month = {2008///}, abstract = {Statistical analysis of the upstream and downstream flow influence on shock unsteadiness in shock and turbulent boundary layer interactions are performed using DNS data of a compression corner Wu \& Martin1 and a reflected shock case interaction. For both cases, the scaling proposed by Dussauge et al.2 for the characteristic low frequency applies. The statistical analysis for the compression corner shows that the unsteadiness of the shock is dominated by the downstream flow. The same analysis applied to the reflected shock case also indicates downstream influence. Additional studies are required to fully characterize the reflected shock case DNS data.}, author = {Martin, M.P and Priebe,S. and Wu,M.} } @article {15356, title = {Upstream and downstream influence on the unsteadiness of STBLI using DNS data in two configurations}, journal = {AIAA Paper 2008-0719, 46, h}, year = {2008}, month = {2008///}, author = {Martin, M.P and Priebe,S. and Wu,M.} } @conference {16064, title = {VAST 2008 Challenge: Introducing mini-challenges}, booktitle = {Visual Analytics Science and Technology, 2008. VAST {\textquoteright}08. IEEE Symposium on}, year = {2008}, month = {2008/10//}, pages = {195 - 196}, abstract = {Visual analytics experts realize that one effective way to push the field forward and to develop metrics for measuring the performance of various visual analytics components is to hold an annual competition. The VAST 2008 Challenge is the third year that such a competition was held in conjunction with the IEEE Visual Analytics Science and Technology (VAST) symposium. The authors restructured the contest format used in 2006 and 2007 to reduce the barriers to participation and offered four mini-challenges and a Grand Challenge. Mini Challenge participants were to use visual analytic tools to explore one of four heterogeneous data collections to analyze specific activities of a fictitious, controversial movement. Questions asked in the Grand Challenge required the participants to synthesize data from all four data sets. In this paper we give a brief overview of the data sets, the tasks, the participation, the judging, and the results.}, keywords = {2008, analytics;data, Challenge;heterogeneous, collections;visual, data, VAST, visualisation;}, doi = {10.1109/VAST.2008.4677383}, author = {Grinstein,G. and Plaisant, Catherine and Laskowski,S. and O{\textquoteright}Connell,T. and Scholtz,J. and Whiting,M.} } @inbook {16072, title = {Visualizing Functional Data with an Application to eBay{\textquoteright}s Online Auctions}, booktitle = {Handbook of Data VisualizationHandbook of Data Visualization}, series = {Springer Handbooks of Computational Statistics}, year = {2008}, month = {2008///}, pages = {873 - 898}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Technological advances in the measurement, collection, and storage of data have led tomore andmore complex data structures. Examples of such structures include measurements of the behavior of individuals over time, digitized two- or three-dimensional images of the brain, and recordings of three- or even four-dimensional movements of objects traveling through space and time. Such data, although recorded in a discrete fashion, are usually thought of as continuous objects that are represented by functional relationships. This gave rise to functional data analysis (FDA), which was made popular by the monographs of Ramsay and Silverman (1997, 2002), where the center of interest is a set of curves, shapes, objects, or, more generally, a set of functional observations , in contrast to classical statistics where interest centers on a set of data vectors. In that sense, functional data is not only different from the data structure studied in classical statistics, but it actually generalizes it. Many of these new data structures require new statistical methods to unveil the information that they carry.}, isbn = {978-3-540-33037-0}, url = {http://dx.doi.org/10.1007/978-3-540-33037-0_34}, author = {Chen,Chun-houh and H{\"a}rdle,Wolfgang and Unwin,Antony and Jank,Wolfgang and Shmueli,Galit and Plaisant, Catherine and Shneiderman, Ben} } @article {15831, title = {Why Do Hubs in the Yeast Protein Interaction Network Tend To Be Essential: Reexamining the Connection between the Network Topology and Essentiality}, journal = {PLoS Comput BiolPLoS Comput Biol}, volume = {4}, year = {2008}, month = {2008///}, pages = {e1000140 - e1000140}, abstract = {Analysis of protein interaction networks in the budding yeast Saccharomyces cerevisiae has revealed that a small number of proteins, the so-called hubs, interact with a disproportionately large number of other proteins. Furthermore, many hub proteins have been shown to be essential for survival of the cell{\textemdash}that is, in optimal conditions, yeast cannot grow and multiply without them. This relation between essentiality and the number of neighbors in the protein{\textendash}protein interaction network has been termed the centrality-lethality rule. However, why are such hubs essential? Jeong and colleagues [1] suggested that overrepresentation of essential proteins among high-degree nodes can be attributed to the central role that hubs play in mediating interactions among numerous, less connected proteins. Another view, proposed by He and Zhang, suggested that that the majority of proteins are essential due to their involvement in one or more essential protein{\textendash}protein interactions that are distributed uniformly at random along the network edges [2]. We find that none of the above reasons determines essentiality. Instead, the majority of hubs are essential due to their involvement in Essential Complex Biological Modules, a group of densely connected proteins with shared biological function that are enriched in essential proteins. This study sheds new light on the topological complexity of protein interaction networks.}, doi = {10.1371/journal.pcbi.1000140}, url = {http://dx.doi.org/10.1371/journal.pcbi.1000140}, author = {Zotenko,Elena and Mestre,Julian and O{\textquoteright}Leary, Dianne P. and Przytycka,Teresa M.} } @article {19616, title = {Worst case examples of an exterior point algorithm for the assignment problem}, journal = {Discrete Optimization}, volume = {5}, year = {2008}, month = {2008/08//}, pages = {605 - 614}, abstract = {An efficient exterior point simplex type algorithm for the assignment problem has been developed by Paparrizos~[K. Paparrizos, An infeasible (exterior point) simplex algorithm for assignment problems, Math. Program. 51 (1991) 45{\textendash}54]. This algorithm belongs to the category of forest algorithms and solves an n {\texttimes} n assignment problem in at most n ( n - 1 ) 2 iterations and in at most O ( n 3 ) time. In this paper worst case examples are presented. Specifically, a systematic procedure to construct worst case assignment problems is presented for the exterior point algorithm. The algorithm applied to these examples executes exactly n ( n - 1 ) 2 iterations. This result verifies that the bound O ( n 3 ) is the best possible for the above-mentioned algorithm.}, keywords = {Assignment problem, Exterior point algorithm, Worst case examples}, isbn = {1572-5286}, url = {http://www.sciencedirect.com/science/article/pii/S1572528608000030}, author = {Charalampos Papamanthou and Paparrizos, Konstantinos and Samaras, Nikolaos and Stergiou, Konstantinos} } @article {16918, title = {911. gov}, journal = {Science(Washington)}, volume = {315}, year = {2007}, month = {2007///}, pages = {944 - 944}, author = {Shneiderman, Ben and Preece,J.} } @inbook {17770, title = {Aggregates in Generalized Temporally Indeterminate Databases}, booktitle = {Scalable Uncertainty ManagementScalable Uncertainty Management}, series = {Lecture Notes in Computer Science}, volume = {4772}, year = {2007}, month = {2007///}, pages = {171 - 186}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Dyreson and Snodgrass as well as Dekhtyar et. al. have provided a probabilistic model (as well as compelling example applications) for why there may be temporal indeterminacy in databases. In this paper, we first propose a formal model for aggregate computation in such databases when there is uncertainty not just in the temporal attribute, but also in the ordinary (non-temporal) attributes. We identify two types of aggregates: event correlated aggregates, and non event correlated aggregations, and provide efficient algorithms for both of them. We prove that our algorithms are correct, and we present experimental results showing that the algorithms work well in practice.}, keywords = {Computer, Science}, isbn = {978-3-540-75407-7}, url = {http://dx.doi.org/10.1007/978-3-540-75410-7_13}, author = {Udrea,Octavian and Majki{\'c},Zoran and Subrahmanian,V.}, editor = {Prade,Henri and Subrahmanian,V.} } @article {18734, title = {Algorithms for on-line monitoring of micro spheres in an optical tweezers-based assembly cell}, journal = {Journal of Computing and Information Science in Engineering}, volume = {7}, year = {2007}, month = {2007///}, pages = {330 - 330}, abstract = {Optical tweezers have emerged as a powerful tool for microand nanomanipulation. Using optical tweezers to perform auto- mated assembly requires on-line monitoring of components in the assembly workspace. This paper presents algorithms for estimat- ing positions and orientations of microscale and nanoscale com- ponents in the 3-Dimensional assembly workspace. Algorithms presented in this paper use images obtained by optical section microscopy. The images are first segmented to locate areas of in- terest and then image gradient information from the areas of in- terest is used to generate probable locations and orientations of components in the XY-plane. Finally, signature curves are com- puted and utilized to obtain component locations and orienta- tions in 3-D space. We have tested these algorithms with silica micro-spheres as well as metallic nanowires. We believe that the algorithms described in this paper will provide the foundation for realizing automated assembly operations in optical tweezers- based assembly cells. }, author = {Peng,T. and Balijepalli,A. and Gupta,S.K. and LeBrun,T.} } @conference {16019, title = {Application of MCL in a dialog agent}, booktitle = {Third Language and Technology Conference}, year = {2007}, month = {2007///}, author = {Josyula,D. P and Fults,S. and Anderson,M. L and Wilson,S. and Perlis, Don} } @conference {14664, title = {Automated detection of persistent kernel control-flow attacks}, booktitle = {Proceedings of the 14th ACM conference on Computer and communications security}, series = {CCS {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {103 - 115}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {This paper presents a new approach to dynamically monitoring operating system kernel integrity, based on a property called state-based control-flow integrity (SBCFI). Violations of SBCFI signal a persistent, unexpected modification of the kernel{\textquoteright}s control-flow graph. We performed a thorough analysis of 25 Linux rootkits and found that 24 (96\%) employ persistent control-flow modifications; an informal study of Windows rootkits yielded similar results. We have implemented SBCFI enforcement as part of the Xen and VMware virtual machine monitors. Our implementation detected all the control-flow modifying rootkits we could install, while imposing unnoticeable overhead for both a typical web server workload and CPU-intensive workloads when operating at 10 second intervals.}, keywords = {CFI, integrity, Kernel, rootkit, virtualization}, isbn = {978-1-59593-703-2}, doi = {10.1145/1315245.1315260}, url = {http://doi.acm.org/10.1145/1315245.1315260}, author = {Petroni,Jr.,Nick L. and Hicks, Michael W.} } @article {16076, title = {BELIV{\textquoteright}06: beyond time and errors: novel evaluation methods for information visualization}, journal = {interactions}, volume = {14}, year = {2007}, month = {2007/05//}, pages = {59 - 60}, isbn = {1072-5520}, doi = {10.1145/1242421.1242460}, url = {http://doi.acm.org/10.1145/1242421.1242460}, author = {Bertini,Enrico and Plaisant, Catherine and Santucci,Giuseppe} } @article {16238, title = {Characterization of Ehp, a Secreted Complement Inhibitory Protein from Staphylococcus aureus}, journal = {Journal of Biological Chemistry}, volume = {282}, year = {2007}, month = {2007/10/12/}, pages = {30051 - 30061}, abstract = {We report here the discovery and characterization of Ehp, a new secreted Staphylococcus aureus protein that potently inhibits the alternative complement activation pathway. Ehp was identified through a genomic scan as an uncharacterized secreted protein from S. aureus, and immunoblotting of conditioned S. aureus culture medium revealed that the Ehp protein was secreted at the highest levels during log-phase bacterial growth. The mature Ehp polypeptide is composed of 80 residues and is 44\% identical to the complement inhibitory domain of S. aureus Efb (extracellular fibrinogen-binding protein). We observed preferential binding by Ehp to native and hydrolyzed C3 relative to fully active C3b and found that Ehp formed a subnanomolar affinity complex with these various forms of C3 by binding to its thioester-containing C3d domain. Site-directed mutagenesis demonstrated that Arg75 and Asn82 are important in forming the Ehp{\textperiodcentered}C3d complex, but loss of these side chains did not completely disrupt Ehp/C3d binding. This suggested the presence of a second C3d-binding site in Ehp, which was mapped to the proximity of Ehp Asn63. Further molecular level details of the Ehp/C3d interaction were revealed by solving the 2.7-{\r A} crystal structure of an Ehp{\textperiodcentered}C3d complex in which the low affinity site had been mutationally inactivated. Ehp potently inhibited C3b deposition onto sensitized surfaces by the alternative complement activation pathway. This inhibition was directly related to Ehp/C3d binding and was more potent than that seen for Efb-C. An altered conformation in Ehp-bound C3 was detected by monoclonal antibody C3-9, which is specific for a neoantigen exposed in activated forms of C3. Our results suggest that increased inhibitory potency of Ehp relative to Efb-C is derived from the second C3-binding site in this new protein.}, doi = {10.1074/jbc.M704247200}, url = {http://www.jbc.org/content/282/41/30051.abstract}, author = {Hammel,Michal and Sfyroera,Georgia and Pyrpassopoulos,Serapion and Ricklin,Daniel and Ramyar,Kasra X. and Pop, Mihai and Jin,Zhongmin and Lambris,John D. and Geisbrecht,Brian V.} } @conference {13147, title = {Classifying Computer Generated Charts}, booktitle = {Content-Based Multimedia Indexing, 2007. CBMI {\textquoteright}07. International Workshop on}, year = {2007}, month = {2007/06//}, pages = {85 - 92}, abstract = {We present an approach for classifying images of charts based on the shape and spatial relationships of their primitives. Five categories are considered: bar-charts, curve-plots, pie-charts, scatter-plots and surface-plots. We introduce two novel features to represent the structural information based on (a) region segmentation and (b) curve saliency. The local shape is characterized using the Histograms of Oriented Gradients (HOG) and the Scale Invariant Feature Transform (SIFT) descriptors. Each image is represented by sets of feature vectors of each modality. The similarity between two images is measured by the overlap in the distribution of the features -measured using the Pyramid Match algorithm. A test image is classified based on its similarity with training images from the categories. The approach is tested with a database of images collected from the Internet.}, keywords = {algorithm;scale, analysis;visual, classification;image, database;image, databases;, feature, Internet;bar-chart;curve-plot;image, invariant, match, matching;image, relationship;surface-plot;Internet;image, representation;image, segmentation;pie-chart;pyramid, segmentation;statistical, transform;scatter-plot;spatial}, doi = {10.1109/CBMI.2007.385396}, author = {Prasad,V. S.N and Siddiquie,B. and Golbeck,J. and Davis, Larry S.} } @article {17023, title = {Community response grids: E-government, social networks, and effective emergency management}, journal = {Telecommunications Policy}, volume = {31}, year = {2007}, month = {2007/11//}, pages = {592 - 604}, abstract = {This paper explores the concept of developing community response grids (CRGs) for community emergency response and the policy implications of such a system. CRGs make use of the Internet and mobile communication devices, allowing residents and responders to share information, communicate, and coordinate activities in response to a major disaster. This paper explores the viability of using mobile communication technologies and the Web, including e-government, to develop response systems that would aid communities before, during, and after a major disaster, providing channels for contacting residents and responders, uploading information, distributing information, coordinating the responses of social networks, and facilitating resident-to-resident assistance. Drawing upon research from computer science, information studies, public policy, emergency management, and several other disciplines, the paper elaborates on the concept of and need for CRGs, examines related current efforts that can inform the development of CRGs, discusses how research about community networks can be used to instill trust and social capital in CRGs, and examines the issues of public policy, telecommunications, and e-government related to such a system.}, keywords = {Community response grid, E-government, Emergency response, Mobile communications, Public policy, social networks}, isbn = {0308-5961}, doi = {10.1016/j.telpol.2007.07.008}, url = {http://www.sciencedirect.com/science/article/pii/S0308596107000699}, author = {Jaeger,Paul T. and Shneiderman, Ben and Fleischmann,Kenneth R. and Preece,Jennifer and Qu,Yan and Fei Wu,Philip} } @article {17022, title = {Community response grids for older adults: Motivations, usability, and sociability}, journal = {Proceedings of the 13th Americas Conference on Information Systems}, year = {2007}, month = {2007///}, abstract = {This paper discusses the motivation for a Community Response Grid (CRG) to helpolder adults improve their capability for coping with emergency situations. We define and discuss the concept of a CRG, briefly review the limits of current emergency response systems, and identify usability and sociability guidelines for CRGs for older adults based on existing research. The paper ends with a call to action and suggestions for future research directions. }, author = {Wu,P.F. and Preece,J. and Shneiderman, Ben and Jaeger,P. T and Qu,Y.} } @article {17024, title = {Community Response Grids: Using Information Technology to Help Communities Respond to Bioterror Emergencies}, journal = {Biosecurity and Bioterrorism: Biodefense Strategy, Practice, and Science}, volume = {5}, year = {2007}, month = {2007/12//}, pages = {335 - 346}, abstract = {Access to accurate and trusted information is vital in preparing for, responding to, and recovering from an mergency. To facilitate response in large-scale emergency situations, Community Response Grids (CRGs) integrate Internet and mobile technologies to enable residents to report information, professional emergency responders to disseminate instructions, and residents to assist one another. CRGs use technology to help residents and professional emergency responders to work together in community response to emergencies, including bioterrorism events. In a time of increased danger from bioterrorist threats, the application of advanced information and communication technologies to community response is vital in confronting such threats. This article describes CRGs, their underlying concepts, development efforts, their relevance to biosecurity and bioterrorism, and future research issues in the use of technology to facilitate community response.}, isbn = {1538-7135, 1557-850X}, doi = {10.1089/bsp.2007.0034}, url = {http://www.liebertonline.com/doi/abs/10.1089/bsp.2007.0034}, author = {Jaeger,Paul T. and Fleischmann,Kenneth R. and Preece,Jennifer and Shneiderman, Ben and Fei Wu,Philip and Qu,Yan} } @conference {18646, title = {A Comparison between Internal and External Malicious Traffic}, year = {2007}, month = {2007///}, pages = {109 - 114}, abstract = {This paper empirically compares malicious traffic originating inside an organization (i.e., internal traffic) with malicious traffic originating outside an organization (i.e., external traffic). Two honeypot target computers were deployed to collect malicious traffic data over a period of fifteen weeks. In the first study we showed that there was a weak correlation between internal and external traffic based on the number of malicious connections. Since the type of malicious activity is linked to the port that was targeted, we focused on the most frequently targeted ports. We observed that internal malicious traffic often contained different malicious content compared to that of external traffic. In the third study, we discovered that the volume of malicious traffic was linked to the day of the week. We showed that internal and external malicious activities differ: where the external malicious activity is quite stable over the week, the internal traffic varied as a function of the users{\textquoteright} activity profile.}, keywords = {Computer networks, Data analysis, external traffic, honeypot target computers, internal traffic, malicious traffic data, security of data, user activity profile}, doi = {10.1109/ISSRE.2007.32}, author = {Michel Cukier and Panjwani,S.} } @article {18966, title = {A computational survey of candidate exonic splicing enhancer motifs in the model plant Arabidopsis thaliana}, journal = {BMC Bioinformatics}, volume = {8}, year = {2007}, month = {2007/05/21/}, pages = {159 - 159}, abstract = {Algorithmic approaches to splice site prediction have relied mainly on the consensus patterns found at the boundaries between protein coding and non-coding regions. However exonic splicing enhancers have been shown to enhance the utilization of nearby splice sites.}, isbn = {1471-2105}, doi = {10.1186/1471-2105-8-159}, url = {http://www.biomedcentral.com/1471-2105/8/159/abstract}, author = {Pertea,Mihaela and Mount, Stephen M. and Salzberg,Steven L.} } @inbook {19606, title = {On the Cost of Persistence and Authentication in Skip Lists}, booktitle = {Experimental Algorithms}, series = {Lecture Notes in Computer Science}, year = {2007}, month = {2007/01/01/}, pages = {94 - 107}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We present an extensive experimental study of authenticated data structures for dictionaries and maps implemented with skip lists. We consider realizations of these data structures that allow us to study the performance overhead of authentication and persistence. We explore various design decisions and analyze the impact of garbage collection and virtual memory paging, as well. Our empirical study confirms the efficiency of authenticated skip lists and offers guidelines for incorporating them in various applications.}, keywords = {Algorithm Analysis and Problem Complexity, algorithms, Computer Graphics, Data structures, Discrete Mathematics in Computer Science, Numeric Computing}, isbn = {978-3-540-72844-3, 978-3-540-72845-0}, url = {http://link.springer.com/chapter/10.1007/978-3-540-72845-0_8}, author = {Goodrich, Michael T. and Charalampos Papamanthou and Tamassia, Roberto}, editor = {Demetrescu, Camil} } @article {12922, title = {Creating a nationwide wireless detection sensor network for chemical, biological and radiological threats}, journal = {Gentag White Paper}, year = {2007}, month = {2007///}, author = {Rita R Colwell and Peeters,J.} } @conference {17582, title = {Cross-layer latency minimization in wireless networks with SINR constraints}, booktitle = {Proceedings of the 8th ACM international symposium on Mobile ad hoc networking and computing}, series = {MobiHoc {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {110 - 119}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Recently, there has been substantial interest in the design of cross-layer protocols for wireless networks. These protocols optimize certain performance metric(s) of interest (e.g. latency, energy, rate) by jointly optimizing the performance of multiple layers of the protocol stack. Algorithm designers often use geometric-graph-theoretic models for radio interference to design such cross-layer protocols. In this paper we study the problem of designing cross-layer protocols for multi-hop wireless networks using a more realistic Signal to Interference plus Noise Ratio (SINR) model for radio interference. The following cross-layer latency minimization problem is studied: Given a set V of transceivers, and a set of source-destination pairs, (i) choose power levels for all the transceivers, (ii) choose routes for all connections, and (iii) construct an end-to-end schedule such that the SINR constraints are satisfied at each time step so as to minimize the make-span of the schedule (the time by which all packets have reached their respective destinations). We present a polynomial-time algorithm with provable worst-case performance guarantee for this cross-layer latency minimization problem. As corollaries of the algorithmic technique we show that a number of variants of the cross-layer latency minimization problem can also be approximated efficiently in polynomial time. Our work extends the results of Kumar et al. (Proc. SODA, 2004) and Moscibroda et al. (Proc. MOBIHOC, 2006). Although our algorithm considers multiple layers of the protocol stack, it can naturally be viewed as compositions of tasks specific to each layer --- this allows us to improve the overall performance while preserving the modularity of the layered structure.}, keywords = {cross-layer design, end-to-end scheduling, Interference, SINR model, Wireless networks}, isbn = {978-1-59593-684-4}, doi = {10.1145/1288107.1288123}, url = {http://doi.acm.org/10.1145/1288107.1288123}, author = {Chafekar,Deepti and Kumar,V. S. Anil and Marathe,Madhav V. and Parthasarathy,Srinivasan and Srinivasan, Aravind} } @article {18697, title = {Crystal Structure and Solution NMR Studies of Lys48-linked Tetraubiquitin at Neutral pH}, journal = {Journal of Molecular Biology}, volume = {367}, year = {2007}, month = {2007/03/16/}, pages = {204 - 211}, abstract = {Ubiquitin modification of proteins is used as a signal in many cellular processes. Lysine side-chains can be modified by a single ubiquitin or by a polyubiquitin chain, which is defined by an isopeptide bond between the C terminus of one ubiquitin and a specific lysine in a neighboring ubiquitin. Polyubiquitin conformations that result from different lysine linkages presumably differentiate their roles and ability to bind specific targets and enzymes. However, conflicting results have been obtained regarding the precise conformation of Lys48-linked tetraubiquitin. We report the crystal structure of Lys48-linked tetraubiquitin at near-neutral pH. The two tetraubiquitin complexes in the asymmetric unit show the complete connectivity of the chain and the molecular details of the interactions. This tetraubiquitin conformation is consistent with our NMR data as well as with previous studies of diubiquitin and tetraubiquitin in solution at neutral pH. The structure provides a basis for understanding Lys48-linked polyubiquitin recognition under physiological conditions.}, keywords = {crystal structure, Lys48-linked, polyubiquitin chains, tetraubiquitin, ubiquitin}, isbn = {0022-2836}, doi = {10.1016/j.jmb.2006.12.065}, url = {http://www.sciencedirect.com/science/article/pii/S0022283606017554}, author = {Eddins,Michael J. and Varadan,Ranjani and Fushman, David and Pickart,Cecile M. and Wolberger,Cynthia} } @conference {17782, title = {Detecting stochastically scheduled activities in video}, booktitle = {International Joint Conference on Artificial Intelligence}, year = {2007}, month = {2007///}, pages = {1802 - 1807}, abstract = {The ability to automatically detect activities invideo is of increasing importance in applications such as bank security, airport tarmac security, bag- gage area security and building site surveillance. We present a stochastic activity model composed of atomic actions which are directly observable through image understanding primitives. We focus on answering two types of questions: (i) what are the minimal sub-videos in which a given action is identified with probability above a certain thresh- old and (ii) for a given video, can we decide which activity from a given set most likely occurred? We provide the MPS algorithm for the first problem, as well as two different algorithms (naiveMPA and MPA) to solve the second. Our experimental re- sults on a dataset consisting of staged bank robbery videos (described in [Vu et al., 2003]) show that our algorithms are both fast and provide high qual- ity results when compared to human reviewers. }, author = {Albanese, M. and Moscato, V. and Picariello, A. and V.S. Subrahmanian and Udrea,O.} } @conference {15461, title = {Direct-dependency-based software compatibility testing}, booktitle = {Proceedings of the twenty-second IEEE/ACM international conference on Automated software engineering}, series = {ASE {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {409 - 412}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Software compatibility testing is an important quality assurance task aimed at ensuring that component-based software systems build and/or execute properly across a broad range of user system configurations. Because each configuration can involve multiple components with different versions, and because there are complex and changing interdependencies between components and their versions, it is generally infeasible to test all potential configurations. Therefore, compatibility testing usually means examining only a handful of default or popular configurations to detect problems, and as a result costly errors can and do escape to the field This paper presents an improved approach to compatibility testing called RACHET. We formally model the configuration space for component-based systems and use the model to generate test plans covering user-specified portion of the space - the example in this paper is covering all it direct dependencies between components. The test plan is executed efficiently in parallel, by distributing work so as to best utilize test resources. We conducted experimentsand simulation studies applying our approach to a large-scale data management middleware system. The results showed that for this system RACHET discovered incompatibilities between components at a small fraction of the cost for exhaustive testing without compromising test quality}, keywords = {compatibility testing, component-based software system}, isbn = {978-1-59593-882-4}, doi = {10.1145/1321631.1321696}, url = {http://doi.acm.org/10.1145/1321631.1321696}, author = {Yoon,Il-Chul and Sussman, Alan and Memon, Atif M. and Porter, Adam} } @conference {16079, title = {Discovering interesting usage patterns in text collections: integrating text mining with visualization}, booktitle = {Proceedings of the sixteenth ACM conference on Conference on information and knowledge management}, series = {CIKM {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {213 - 222}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {This paper addresses the problem of making text mining results more comprehensible to humanities scholars, journalists, intelligence analysts, and other researchers, in order to support the analysis of text collections. Our system, FeatureLens1, visualizes a text collection at several levels of granularity and enables users to explore interesting text patterns. The current implementation focuses on frequent itemsets of n-grams, as they capture the repetition of exact or similar expressions in the collection. Users can find meaningful co-occurrences of text patterns by visualizing them within and across documents in the collection. This also permits users to identify the temporal evolution of usage such as increasing, decreasing or sudden appearance of text patterns. The interface could be used to explore other text features as well. Initial studies suggest that FeatureLens helped a literary scholar and 8 users generate new hypotheses and interesting insights using 2 text collections.}, keywords = {digital humanities, frequent closed itemsets, n-grams, text mining, user interface}, isbn = {978-1-59593-803-9}, doi = {10.1145/1321440.1321473}, url = {http://doi.acm.org/10.1145/1321440.1321473}, author = {Don,Anthony and Zheleva,Elena and Gregory,Machon and Tarkan,Sureyya and Auvil,Loretta and Clement,Tanya and Shneiderman, Ben and Plaisant, Catherine} } @inbook {17584, title = {Distributed Ranked Search}, booktitle = {High Performance Computing {\textendash} HiPC 2007High Performance Computing {\textendash} HiPC 2007}, series = {Lecture Notes in Computer Science}, volume = {4873}, year = {2007}, month = {2007///}, pages = {7 - 20}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {P2P deployments are a natural infrastructure for building distributed search networks. Proposed systems support locating and retrieving all results, but lack the information necessary to rank them. Users, however, are primarily interested in the most relevant results, not necessarily all possible results. Using random sampling, we extend a class of well-known information retrieval ranking algorithms such that they can be applied in this decentralized setting. We analyze the overhead of our approach, and quantify how our system scales with increasing number of documents, system size, document to node mapping (uniform versus non-uniform), and types of queries (rare versus popular terms). Our analysis and simulations show that a) these extensions are efficient, and scale with little overhead to large systems, and b) the accuracy of the results obtained using distributed ranking is comparable to that of a centralized implementation.}, isbn = {978-3-540-77219-4}, url = {http://dx.doi.org/10.1007/978-3-540-77220-0_6}, author = {Gopalakrishnan,Vijay and Morselli,Ruggero and Bhattacharjee, Bobby and Keleher,Pete and Srinivasan, Aravind}, editor = {Aluru,Srinivas and Parashar,Manish and Badrinath,Ramamurthy and Prasanna,Viktor} } @article {16257, title = {Draft Genome of the Filarial Nematode Parasite Brugia Malayi}, journal = {Science}, volume = {317}, year = {2007}, month = {2007/09/21/}, pages = {1756 - 1760}, abstract = {Parasitic nematodes that cause elephantiasis and river blindness threaten hundreds of millions of people in the developing world. We have sequenced the \~{}90 megabase (Mb) genome of the human filarial parasite Brugia malayi and predict \~{}11,500 protein coding genes in 71 Mb of robustly assembled sequence. Comparative analysis with the free-living, model nematode Caenorhabditis elegans revealed that, despite these genes having maintained little conservation of local synteny during \~{}350 million years of evolution, they largely remain in linkage on chromosomal units. More than 100 conserved operons were identified. Analysis of the predicted proteome provides evidence for adaptations of B. malayi to niches in its human and vector hosts and insights into the molecular basis of a mutualistic relationship with its Wolbachia endosymbiont. These findings offer a foundation for rational drug design.}, isbn = {0036-8075, 1095-9203}, doi = {10.1126/science.1145406}, url = {http://www.sciencemag.org/content/317/5845/1756}, author = {Ghedin,Elodie and Wang,Shiliang and Spiro,David and Caler,Elisabet and Zhao,Qi and Crabtree,Jonathan and Allen,Jonathan E and Delcher,Arthur L. and Guiliano,David B and Miranda-Saavedra,Diego and Angiuoli,Samuel V and Creasy,Todd and Amedeo,Paolo and Haas,Brian and El-Sayed, Najib M. and Wortman,Jennifer R. and Feldblyum,Tamara and Tallon,Luke and Schatz,Michael and Shumway,Martin and Koo,Hean and Salzberg,Steven L. and Schobel,Seth and Pertea,Mihaela and Pop, Mihai and White,Owen and Barton,Geoffrey J and Carlow,Clotilde K. S and Crawford,Michael J and Daub,Jennifer and Dimmic,Matthew W and Estes,Chris F and Foster,Jeremy M and Ganatra,Mehul and Gregory,William F and Johnson,Nicholas M and Jin,Jinming and Komuniecki,Richard and Korf,Ian and Kumar,Sanjay and Laney,Sandra and Li,Ben-Wen and Li,Wen and Lindblom,Tim H and Lustigman,Sara and Ma,Dong and Maina,Claude V and Martin,David M. A and McCarter,James P and McReynolds,Larry and Mitreva,Makedonka and Nutman,Thomas B and Parkinson,John and Peregr{\'\i}n-Alvarez,Jos{\'e} M and Poole,Catherine and Ren,Qinghu and Saunders,Lori and Sluder,Ann E and Smith,Katherine and Stanke,Mario and Unnasch,Thomas R and Ware,Jenna and Wei,Aguan D and Weil,Gary and Williams,Deryck J and Zhang,Yinhua and Williams,Steven A and Fraser-Liggett,Claire and Slatko,Barton and Blaxter,Mark L and Scott,Alan L} } @article {12186, title = {EcoLens: Integration and interactive visualization of ecological datasets}, journal = {Ecological Informatics}, volume = {2}, year = {2007}, month = {2007/01/01/}, pages = {61 - 69}, abstract = {Complex multi-dimensional datasets are now pervasive in science and elsewhere in society. Better interactive tools are needed for visual data exploration so that patterns in such data may be easily discovered, data can be proofread, and subsets of data can be chosen for algorithmic analysis. In particular, synthetic research such as ecological interaction research demands effective ways to examine multiple datasets. This paper describes our integration of hundreds of food-web datasets into a common platform, and the visualization software, EcoLens, we developed for exploring this information. This publicly-available application and integrated dataset have been useful for our research predicting large complex food webs, and EcoLens is favorably reviewed by other researchers. Many habitats are not well represented in our large database. We confirm earlier results about the small size and lack of taxonomic resolution in early food webs but find that they and a non-food-web source provide trophic information about a large number of taxa absent from more modern studies. Corroboration of Tuesday Lake trophic links across studies is usually possible, but lack of links among congeners may have several explanations. While EcoLens does not provide all kinds of analytical support, its label- and item-based approach is effective at addressing concerns about the comparability and taxonomic resolution of food-web data.}, keywords = {Data integration, Food webs, Taxonomy, Visualization}, isbn = {1574-9541}, doi = {10.1016/j.ecoinf.2007.03.005}, url = {http://www.sciencedirect.com/science/article/pii/S1574954107000118}, author = {Parr,Cynthia Sims and Lee,Bongshin and Bederson, Benjamin B.} } @article {12357, title = {Efficient simulation of critical synchronous dataflow graphs}, journal = {ACM Transactions on Design Automation of Electronic Systems (TODAES)}, volume = {12}, year = {2007}, month = {2007///}, pages = {1 - 28}, author = {Hsu,C. J and Ko,M. Y and Bhattacharyya, Shuvra S. and Ramasubbu,S. and Pino,J. L} } @article {18006, title = {Electron beam and optical proximity effect reduction for nanolithography: New results}, journal = {Journal of Vacuum Science \& Technology B}, volume = {25}, year = {2007}, month = {2007///}, pages = {2288 - 2294}, abstract = {Proximity effect correction by dose modulation is widely practiced in electron-beam lithography. Optical proximity control is also possible using a combination of shape adjustment and phase control. Assigning {\textquotedblleft}the right{\textquotedblright} dose (or fill factor and phase for optics) is a well known mathematical inverse problem. Linear programming, by definition, is the appropriate method for determining dose. In the past, the technique was too slow for full-scale implementation in mask making. Here, the authors discuss how recent developments in computer speed and architecture have improved the prospects for full-scale implementation. In addition, the authors discuss some numerical techniques, analogous to gridding and relaxation, that make linear programming more attractive in mask making.}, keywords = {electron beam lithography, Linear programming, masks, nanolithography, proximity effect (lithography)}, doi = {10.1116/1.2806967}, url = {http://link.aip.org/link/?JVB/25/2288/1}, author = {Peckerar,Martin and Sander,David and Srivastava,Ankur and Foli,Adakou and Vishkin, Uzi} } @conference {12330, title = {Energy-aware data compression for wireless sensor networks}, booktitle = {Acoustics, Speech and Signal Processing, 2007. ICASSP 2007. IEEE International Conference on}, volume = {2}, year = {2007}, month = {2007///}, pages = {II{\textendash}45 - II{\textendash}45}, author = {Puthenpurayil,S. and Gu,R. and Bhattacharyya, Shuvra S.} } @conference {12382, title = {An energy-driven design methodology for distributing DSP applications across wireless sensor networks}, booktitle = {Proceedings of RTSS}, year = {2007}, month = {2007///}, pages = {214 - 226}, author = {Shen,C. C and Plishker,W. and Bhattacharyya, Shuvra S. and Goldsman,N.} } @article {12600, title = {Estimation of contour motion and deformation for nonrigid object tracking}, journal = {Journal of the Optical Society of America AJ. Opt. Soc. Am. A}, volume = {24}, year = {2007}, month = {2007///}, pages = {2109 - 2121}, abstract = {We present an algorithm for nonrigid contour tracking in heavily cluttered background scenes. Based on the properties of nonrigid contour movements, a sequential framework for estimating contour motion and deformation is proposed. We solve the nonrigid contour tracking problem by decomposing it into three subproblems: motion estimation, deformation estimation, and shape regulation. First, we employ a particle filter to estimate the global motion parameters of the affine transform between successive frames. Then we generate a probabilistic deformation map to deform the contour. To improve robustness, multiple cues are used for deformation probability estimation. Finally, we use a shape prior model to constrain the deformed contour. This enables us to retrieve the occluded parts of the contours and accurately track them while allowing shape changes specific to the given object types. Our experiments show that the proposed algorithm significantly improves the tracker performance.}, keywords = {Motion detection, VISION}, doi = {10.1364/JOSAA.24.002109}, url = {http://josaa.osa.org/abstract.cfm?URI=josaa-24-8-2109}, author = {Jie Shao and Porikli,Fatih and Chellapa, Rama} } @article {16670, title = {Evaluating a cross-cultural children{\textquoteright}s online book community: Lessons learned for sociability, usability, and cultural exchange}, journal = {Interacting with Computers}, volume = {19}, year = {2007}, month = {2007/07//}, pages = {494 - 511}, abstract = {The use of computers for human-to-human communication among adults has been studied for many years, but using computer technology to enable children from all over the world to talk to each other has rarely been discussed by researchers. The goal of our research is to fill this gap and explore the design and evaluation of children{\textquoteright}s cross-language online communities via a case study of the International Children{\textquoteright}s Digital Library Communities (ICDLCommunities). This project supports the development of communities for children (ages 7{\textendash}11) that form around the International Digital Children{\textquoteright}s Library (ICDL) book collection. In this community the children can learn about each others{\textquoteright} cultures and make friends even if they do not speak the same language. They can also read and create stories and ask and answer questions about these. From this evaluation study we learned that: (i) children are very interested in their counterparts in other countries and a remarkable amount of communication takes place even when they do not share a common language; (ii) representing their identity online in many different forms is particularly important to children when communicating in an online community; (iii) children enjoy drawing but representing stories in a sequence of diagrams is challenging and needs support; and (iv) asking and answering questions without language is possible using graphical templates. In this paper we present our findings and make recommendations for designing children{\textquoteright}s cross-cultural online communities.}, keywords = {children, Cross-cultural, Evaluation and design, International Children{\textquoteright}s Digital Library, Online communities}, isbn = {0953-5438}, doi = {10.1016/j.intcom.2007.03.001}, url = {http://www.sciencedirect.com/science/article/pii/S0953543807000240}, author = {Komlodi,Anita and Hou,Weimin and Preece,Jenny and Druin, Allison and Golub,Evan and Alburo,Jade and Liao,Sabrina and Elkiss,Aaron and Resnik, Philip} } @article {16261, title = {Evolution of genes and genomes on the Drosophila phylogeny}, journal = {Nature}, volume = {450}, year = {2007}, month = {2007/11/08/}, pages = {203 - 218}, abstract = {Comparative analysis of multiple genomes in a phylogenetic framework dramatically improves the precision and sensitivity of evolutionary inference, producing more robust results than single-genome analyses can provide. The genomes of 12 Drosophila species, ten of which are presented here for the first time (sechellia, simulans, yakuba, erecta, ananassae, persimilis, willistoni, mojavensis, virilis and grimshawi), illustrate how rates and patterns of sequence divergence across taxa can illuminate evolutionary processes on a genomic scale. These genome sequences augment the formidable genetic tools that have made Drosophila melanogaster a pre-eminent model for animal genetics, and will further catalyse fundamental research on mechanisms of development, cell biology, genetics, disease, neurobiology, behaviour, physiology and evolution. Despite remarkable similarities among these Drosophila species, we identified many putatively non-neutral changes in protein-coding genes, non-coding RNA genes, and cis-regulatory regions. These may prove to underlie differences in the ecology and behaviour of these diverse species.}, isbn = {0028-0836}, doi = {10.1038/nature06341}, url = {http://www.nature.com/nature/journal/v450/n7167/full/nature06341.html}, author = {Clark,Andrew G. and Eisen,Michael B. and Smith,Douglas R. and Bergman,Casey M. and Oliver,Brian and Markow,Therese A. and Kaufman,Thomas C. and Kellis,Manolis and Gelbart,William and Iyer,Venky N. and Pollard,Daniel A. and Sackton,Timothy B. and Larracuente,Amanda M. and Singh,Nadia D. and Abad,Jose P. and Abt,Dawn N. and Adryan,Boris and Aguade,Montserrat and Akashi,Hiroshi and Anderson,Wyatt W. and Aquadro,Charles F. and Ardell,David H. and Arguello,Roman and Artieri,Carlo G. and Barbash,Daniel A. and Barker,Daniel and Barsanti,Paolo and Batterham,Phil and Batzoglou,Serafim and Begun,Dave and Bhutkar,Arjun and Blanco,Enrico and Bosak,Stephanie A. and Bradley,Robert K. and Brand,Adrianne D. and Brent,Michael R. and Brooks,Angela N. and Brown,Randall H. and Butlin,Roger K. and Caggese,Corrado and Calvi,Brian R. and Carvalho,A. Bernardo de and Caspi,Anat and Castrezana,Sergio and Celniker,Susan E. and Chang,Jean L. and Chapple,Charles and Chatterji,Sourav and Chinwalla,Asif and Civetta,Alberto and Clifton,Sandra W. and Comeron,Josep M. and Costello,James C. and Coyne,Jerry A. and Daub,Jennifer and David,Robert G. and Delcher,Arthur L. and Delehaunty,Kim and Do,Chuong B. and Ebling,Heather and Edwards,Kevin and Eickbush,Thomas and Evans,Jay D. and Filipski,Alan and Findei|[szlig]|,Sven and Freyhult,Eva and Fulton,Lucinda and Fulton,Robert and Garcia,Ana C. L. and Gardiner,Anastasia and Garfield,David A. and Garvin,Barry E. and Gibson,Greg and Gilbert,Don and Gnerre,Sante and Godfrey,Jennifer and Good,Robert and Gotea,Valer and Gravely,Brenton and Greenberg,Anthony J. and Griffiths-Jones,Sam and Gross,Samuel and Guigo,Roderic and Gustafson,Erik A. and Haerty,Wilfried and Hahn,Matthew W. and Halligan,Daniel L. and Halpern,Aaron L. and Halter,Gillian M. and Han,Mira V. and Heger,Andreas and Hillier,LaDeana and Hinrichs,Angie S. and Holmes,Ian and Hoskins,Roger A. and Hubisz,Melissa J. and Hultmark,Dan and Huntley,Melanie A. and Jaffe,David B. and Jagadeeshan,Santosh and Jeck,William R. and Johnson,Justin and Jones,Corbin D. and Jordan,William C. and Karpen,Gary H. and Kataoka,Eiko and Keightley,Peter D. and Kheradpour,Pouya and Kirkness,Ewen F. and Koerich,Leonardo B. and Kristiansen,Karsten and Kudrna,Dave and Kulathinal,Rob J. and Kumar,Sudhir and Kwok,Roberta and Lander,Eric and Langley,Charles H. and Lapoint,Richard and Lazzaro,Brian P. and Lee,So-Jeong and Levesque,Lisa and Li,Ruiqiang and Lin,Chiao-Feng and Lin,Michael F. and Lindblad-Toh,Kerstin and Llopart,Ana and Long,Manyuan and Low,Lloyd and Lozovsky,Elena and Lu,Jian and Luo,Meizhong and Machado,Carlos A. and Makalowski,Wojciech and Marzo,Mar and Matsuda,Muneo and Matzkin,Luciano and McAllister,Bryant and McBride,Carolyn S. and McKernan,Brendan and McKernan,Kevin and Mendez-Lago,Maria and Minx,Patrick and Mollenhauer,Michael U. and Montooth,Kristi and Mount, Stephen M. and Mu,Xu and Myers,Eugene and Negre,Barbara and Newfeld,Stuart and Nielsen,Rasmus and Noor,Mohamed A. F. and O{\textquoteright}Grady,Patrick and Pachter,Lior and Papaceit,Montserrat and Parisi,Matthew J. and Parisi,Michael and Parts,Leopold and Pedersen,Jakob S. and Pesole,Graziano and Phillippy,Adam M and Ponting,Chris P. and Pop, Mihai and Porcelli,Damiano and Powell,Jeffrey R. and Prohaska,Sonja and Pruitt,Kim and Puig,Marta and Quesneville,Hadi and Ram,Kristipati Ravi and Rand,David and Rasmussen,Matthew D. and Reed,Laura K. and Reenan,Robert and Reily,Amy and Remington,Karin A. and Rieger,Tania T. and Ritchie,Michael G. and Robin,Charles and Rogers,Yu-Hui and Rohde,Claudia and Rozas,Julio and Rubenfield,Marc J. and Ruiz,Alfredo and Russo,Susan and Salzberg,Steven L. and Sanchez-Gracia,Alejandro and Saranga,David J. and Sato,Hajime and Schaeffer,Stephen W. and Schatz,Michael C and Schlenke,Todd and Schwartz,Russell and Segarra,Carmen and Singh,Rama S. and Sirot,Laura and Sirota,Marina and Sisneros,Nicholas B. and Smith,Chris D. and Smith,Temple F. and Spieth,John and Stage,Deborah E. and Stark,Alexander and Stephan,Wolfgang and Strausberg,Robert L. and Strempel,Sebastian and Sturgill,David and Sutton,Granger and Sutton,Granger G. and Tao,Wei and Teichmann,Sarah and Tobari,Yoshiko N. and Tomimura,Yoshihiko and Tsolas,Jason M. and Valente,Vera L. S. and Venter,Eli and Venter,J. Craig and Vicario,Saverio and Vieira,Filipe G. and Vilella,Albert J. and Villasante,Alfredo and Walenz,Brian and Wang,Jun and Wasserman,Marvin and Watts,Thomas and Wilson,Derek and Wilson,Richard K. and Wing,Rod A. and Wolfner,Mariana F. and Wong,Alex and Wong,Gane Ka-Shu and Wu,Chung-I and Wu,Gabriel and Yamamoto,Daisuke and Yang,Hsiao-Pei and Yang,Shiaw-Pyng and Yorke,James A. and Yoshida,Kiyohito and Zdobnov,Evgeny and Zhang,Peili and Zhang,Yu and Zimin,Aleksey V. and Baldwin,Jennifer and Abdouelleil,Amr and Abdulkadir,Jamal and Abebe,Adal and Abera,Brikti and Abreu,Justin and Acer,St Christophe and Aftuck,Lynne and Alexander,Allen and An,Peter and Anderson,Erica and Anderson,Scott and Arachi,Harindra and Azer,Marc and Bachantsang,Pasang and Barry,Andrew and Bayul,Tashi and Berlin,Aaron and Bessette,Daniel and Bloom,Toby and Blye,Jason and Boguslavskiy,Leonid and Bonnet,Claude and Boukhgalter,Boris and Bourzgui,Imane and Brown,Adam and Cahill,Patrick and Channer,Sheridon and Cheshatsang,Yama and Chuda,Lisa and Citroen,Mieke and Collymore,Alville and Cooke,Patrick and Costello,Maura and D{\textquoteright}Aco,Katie and Daza,Riza and Haan,Georgius De and DeGray,Stuart and DeMaso,Christina and Dhargay,Norbu and Dooley,Kimberly and Dooley,Erin and Doricent,Missole and Dorje,Passang and Dorjee,Kunsang and Dupes,Alan and Elong,Richard and Falk,Jill and Farina,Abderrahim and Faro,Susan and Ferguson,Diallo and Fisher,Sheila and Foley,Chelsea D. and Franke,Alicia and Friedrich,Dennis and Gadbois,Loryn and Gearin,Gary and Gearin,Christina R. and Giannoukos,Georgia and Goode,Tina and Graham,Joseph and Grandbois,Edward and Grewal,Sharleen and Gyaltsen,Kunsang and Hafez,Nabil and Hagos,Birhane and Hall,Jennifer and Henson,Charlotte and Hollinger,Andrew and Honan,Tracey and Huard,Monika D. and Hughes,Leanne and Hurhula,Brian and Husby,M Erii and Kamat,Asha and Kanga,Ben and Kashin,Seva and Khazanovich,Dmitry and Kisner,Peter and Lance,Krista and Lara,Marcia and Lee,William and Lennon,Niall and Letendre,Frances and LeVine,Rosie and Lipovsky,Alex and Liu,Xiaohong and Liu,Jinlei and Liu,Shangtao and Lokyitsang,Tashi and Lokyitsang,Yeshi and Lubonja,Rakela and Lui,Annie and MacDonald,Pen and Magnisalis,Vasilia and Maru,Kebede and Matthews,Charles and McCusker,William and McDonough,Susan and Mehta,Teena and Meldrim,James and Meneus,Louis and Mihai,Oana and Mihalev,Atanas and Mihova,Tanya and Mittelman,Rachel and Mlenga,Valentine and Montmayeur,Anna and Mulrain,Leonidas and Navidi,Adam and Naylor,Jerome and Negash,Tamrat and Nguyen,Thu and Nguyen,Nga and Nicol,Robert and Norbu,Choe and Norbu,Nyima and Novod,Nathaniel and O{\textquoteright}Neill,Barry and Osman,Sahal and Markiewicz,Eva and Oyono,Otero L. and Patti,Christopher and Phunkhang,Pema and Pierre,Fritz and Priest,Margaret and Raghuraman,Sujaa and Rege,Filip and Reyes,Rebecca and Rise,Cecil and Rogov,Peter and Ross,Keenan and Ryan,Elizabeth and Settipalli,Sampath and Shea,Terry and Sherpa,Ngawang and Shi,Lu and Shih,Diana and Sparrow,Todd and Spaulding,Jessica and Stalker,John and Stange-Thomann,Nicole and Stavropoulos,Sharon and Stone,Catherine and Strader,Christopher and Tesfaye,Senait and Thomson,Talene and Thoulutsang,Yama and Thoulutsang,Dawa and Topham,Kerri and Topping,Ira and Tsamla,Tsamla and Vassiliev,Helen and Vo,Andy and Wangchuk,Tsering and Wangdi,Tsering and Weiand,Michael and Wilkinson,Jane and Wilson,Adam and Yadav,Shailendra and Young,Geneva and Yu,Qing and Zembek,Lisa and Zhong,Danni and Zimmer,Andrew and Zwirko,Zac and Jaffe,David B. and Alvarez,Pablo and Brockman,Will and Butler,Jonathan and Chin,CheeWhye and Gnerre,Sante and Grabherr,Manfred and Kleber,Michael and Mauceli,Evan and MacCallum,Iain} } @inbook {18904, title = {Finding Most Probable Worlds of Probabilistic Logic Programs}, booktitle = {Scalable Uncertainty Management}, series = {Lecture Notes in Computer Science}, volume = {4772}, year = {2007}, month = {2007///}, pages = {45 - 59}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Probabilistic logic programs have primarily studied the problem of entailment of probabilistic atoms. However, there are some interesting applications where we are interested in finding a possible world that is most probable. Our first result shows that the problem of computing such {\textquotedblright}maximally probable worlds{\textquotedblright} (MPW) is intractable. We subsequently show that we can often greatly reduce the size of the linear program used in past work (by Ng and Subrahmanian) and yet solve the problem exactly. However, the intractability results still make computational efficiency quite impossible. We therefore also develop several heuristics to solve the MPW problem and report extensive experimental results on the accuracy and efficiency of such heuristics.}, keywords = {Computer science}, isbn = {978-3-540-75407-7}, url = {http://www.springerlink.com/content/e4463p4rv4k01u93/abstract/}, author = {Khuller, Samir and Martinez,Vanina and Nau, Dana S. and Simari,Gerardo and Sliva,Amy and Subrahmanian,V.}, editor = {Prade,Henri and Subrahmanian,V.} } @conference {18838, title = {Generating Multi-Stage Molding Plans for Articulated Assemblies}, year = {2007}, month = {2007/07//}, pages = {56 - 63}, abstract = {Multi-stage molding is capable of producing better-quality articulated products at a lower cost. During the multi-stage molding process, assembly operations are performed along with the molding operations. Hence, it adds new constraints to the assembly planning problem. This paper introduces the problem of generating multi-stage molding plans for articulated assemblies. We present detailed algorithms for determining the molding sequence and intermediate assembly configurations.}, keywords = {articulated assemblies, articulated products, assembly planning, assembly planning problem, intermediate assembly configuration, moulding, multistage molding}, doi = {10.1109/ISAM.2007.4288449}, author = {Priyadarshi,A. K. and Gupta,S.K.} } @article {15279, title = {Genome Analysis Linking Recent European and African Influenza (H5N1) Viruses}, journal = {Emerging Infectious DiseasesEmerg Infect Dis}, volume = {13}, year = {2007}, month = {2007/05//}, pages = {713 - 718}, abstract = {Although linked, these viruses are distinct from earlier outbreak strains., To better understand the ecology and epidemiology of the highly pathogenic avian influenza virus in its transcontinental spread, we sequenced and analyzed the complete genomes of 36 recent influenza A (H5N1) viruses collected from birds in Europe, northern Africa, and southeastern Asia. These sequences, among the first complete genomes of influenza (H5N1) viruses outside Asia, clearly depict the lineages now infecting wild and domestic birds in Europe and Africa and show the relationships among these isolates and other strains affecting both birds and humans. The isolates fall into 3 distinct lineages, 1 of which contains all known non-Asian isolates. This new Euro-African lineage, which was the cause of several recent (2006) fatal human infections in Egypt and Iraq, has been introduced at least 3 times into the European-African region and has split into 3 distinct, independently evolving sublineages. One isolate provides evidence that 2 of these sublineages have recently reassorted.}, isbn = {1080-6040}, doi = {10.3201/eid1305.070013}, author = {Salzberg,Steven L. and Kingsford, Carl and Cattoli,Giovanni and Spiro,David J. and Janies,Daniel A. and Aly,Mona Mehrez and Brown,Ian H. and Couacy-Hymann,Emmanuel and De Mia,Gian Mario and Dung,Do Huu and Guercio,Annalisa and Joannis,Tony and Ali,Ali Safar Maken and Osmani,Azizullah and Padalino,Iolanda and Saad,Magdi D. and Savi{\'c},Vladimir and Sengamalay,Naomi A. and Yingst,Samuel and Zaborsky,Jennifer and Zorman-Rojs,Olga and Ghedin,Elodie and Capua,Ilaria} } @conference {17771, title = {GRIN: A graph based RDF index}, booktitle = {Proceedings of the National Conference on Artificial Intelligence}, volume = {22}, year = {2007}, month = {2007///}, pages = {1465 - 1465}, abstract = {RDF ({\textquotedblleft}Resource Description Framework{\textquotedblright}) is now a widelyused World Wide Web Consortium standard. However, methods to index large volumes of RDF data are still in their infancy. In this paper, we focus on providing a very lightweight indexing mechanism for certain kinds of RDF queries, namely graph-based queries where there is a need to traverse edges in the graph determined by an RDF database. Our approach uses the idea of drawing circles around selected {\textquotedblleft}center{\textquotedblright} vertices in the graph where the circle would encom- pass those vertices in the graph that are within a given dis- tance of the {\textquotedblleft}center{\textquotedblright} vertex. We come up with methods of finding such {\textquotedblleft}center{\textquotedblright} vertices and identifying the radius of the circles and then leverage this to build an index called GRIN. We compare GRIN with three existing RDF indexex: Jena, Sesame, and RDFBroker. We compared (i) the time to an- swer graph based queries, (ii) memory needed to store the index, and (iii) the time to build the index. GRIN outper- forms Jena, Sesame and RDFBroker on all three measures for graph based queries (for other types of queries, it may be worth building one of these other indexes and using it), at the expense of using a larger amount of memory when answering queries. }, author = {Udrea,O. and Pugliese, A. and V.S. Subrahmanian} } @conference {19449, title = {Grow and Know: Understanding Record-keeping Needs for Tracking the Development of Young Children}, booktitle = {SIGCHI {\textquoteright}07}, series = {CHI {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {1351 - 1360}, publisher = {ACM}, organization = {ACM}, abstract = {From birth through age five, children undergo rapid development and learn skills that will influence them their entire lives. Regular visits to the pediatrician and detailed record-keeping can ensure that children are progressing and can identify early warning signs of developmental delay or disability. However, new parents are often overwhelmed with new responsibilities, and we believe there is an opportunity for computing technology to assist in this process. In this paper, we present a qualitative study aimed at uncovering some specific needs for record-keeping and analysis for new parents and their network of caregivers. Through interviews and focus groups, we have confirmed assumptions about the rationales parents have and the functions required for using technology for record-keeping. We also identify new themes, potential prototypes, and design guidelines for this domain.}, keywords = {children, design requirements, developmental delay, healthcare, qualitative study}, isbn = {978-1-59593-593-9}, url = {http://doi.acm.org/10.1145/1240624.1240830}, author = {Kientz, Julie A. and Arriaga, Rosa I. and Marshini Chetty and Hayes, Gillian R. and Richardson, Jahmeilah and Patel, Shwetak N. and Abowd, Gregory D.} } @article {17183, title = {Hawkeye: an interactive visual analytics tool for genome assemblies}, journal = {Genome Biology}, volume = {8}, year = {2007}, month = {2007/03/09/}, pages = {R34 - R34}, abstract = {Genome sequencing remains an inexact science, and genome sequences can contain significant errors if they are not carefully examined. Hawkeye is our new visual analytics tool for genome assemblies, designed to aid in identifying and correcting assembly errors. Users can analyze all levels of an assembly along with summary statistics and assembly metrics, and are guided by a ranking component towards likely mis-assemblies. Hawkeye is freely available and released as part of the open source AMOS project http://amos.sourceforge.net/hawkeye.}, isbn = {1465-6906}, doi = {10.1186/gb-2007-8-3-r34}, url = {http://genomebiology.com/2007/8/3/R34}, author = {Schatz,Michael C and Phillippy,Adam M and Shneiderman, Ben and Salzberg,Steven L.} } @conference {15986, title = {Hood College, Master of Business Administration, 2005 Hood College, Master of Science (Computer Science), 2001 Hood College, Bachelor of Science (Computer Science), 1998 Frederick Community College, Associate in Arts (Business Administration), 1993}, booktitle = {Proceedings of the Workshop on Metareasoning in Agent-Based Systems}, year = {2007}, month = {2007///}, author = {Anderson,M. L and Schmill,M. and Oates,T. and Perlis, Don and Josyula,D. and Wright,D. and Human,S. W.T.D.N and Metacognition,L. and Fults,S. and Josyula,D. P} } @article {13151, title = {Human activity understanding using visibility context}, journal = {IEEE/RSJ IROS Workshop: From sensors to human spatial concepts (FS2HSC)}, year = {2007}, month = {2007///}, abstract = {Visibility in architectural layouts affects humannavigation, so a suitable representation of visibility context is useful in understanding human activity. Motivated by studies of spatial behavior, we use a set of features from visibility analysis to represent spatial context in the interpretation of human activity. An agent{\textquoteright}s goal, belief about the world, trajectory and visible layout are considered to be random variables that evolve with time during the agent{\textquoteright}s movement, and are modeled in a Bayesian framework. We design a search-based task in a sprite-world, and compare the results of our framework to those of human subject experiments. Our findings confirm that knowledge of spatial layout improves human interpretations of the trajectories (implying that visibility context is useful in this task). Since our framework demonstrates performance close to that of human subjects with knowledge of spatial layout, our findings confirm that our model makes adequate use of visibility context. In addition, the representation we use for visibility context allows our model to generalize well when presented with new scenes. }, author = {Morariu,V.I. and Prasad,V. S.N and Davis, Larry S.} } @inbook {17208, title = {Human Values for Shaping the Made World}, booktitle = {Human-Computer Interaction {\textendash} INTERACT 2007Human-Computer Interaction {\textendash} INTERACT 2007}, series = {Lecture Notes in Computer Science}, volume = {4662}, year = {2007}, month = {2007///}, pages = {1 - 1}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Interface design principles have been effective in shaping new desktop applications, web-based resources, and mobile devices. Usability and sociability promote successful online communities and social network services. The contributions of human-computer interaction researchers have been effective in raising the quality of design of many products and services. As our influence grows, we can play an even more profound role in guaranteeing that enduring human values are embedded in the next generation of technology. This talk identifies which goals are realistic, such as universality, responsibility, trust, empathy, and privacy, and how we might ensure that they become part of future services and systems.}, isbn = {978-3-540-74794-9}, url = {http://dx.doi.org/10.1007/978-3-540-74796-3_1}, author = {Shneiderman, Ben}, editor = {Baranauskas,C{\'e}cilia and Palanque,Philippe and Abascal,Julio and Barbosa,Simone} } @conference {14701, title = {Improving software quality with static analysis}, booktitle = {Proceedings of the 7th ACM SIGPLAN-SIGSOFT workshop on Program analysis for software tools and engineering}, series = {PASTE {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {83 - 84}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {At the University of Maryland, we have been working to improve the reliability and security of software by developing new, effective static analysis tools. These tools scan software for bug patterns or show that the software is free from a particular class of defects. There are two themes common to our different projects: 1. Our ultimate focus is on utility: can a programmer actually improve the quality of his or her software using an analysis tool? The important first step toward answering this question is to engineer tools so that they can analyze existing, nontrivial programs, and to carefully report the results of such analyses experimentally. The desire to better understand a more human-centered notion of utility underlies much of our future work. 2. We release all of our tools open source. This allows other researchers to verify our results, and to reuse some or all of our implementations, which often required significant effort to engineer. We believe that releasing source code is important for accelerating the pace of research results software quality, and just as importantly allows feedback from the wider community. In this research group presentation, we summarize some recent work and sketch future directions.}, keywords = {bug patterns, bugs, C, Data races, FFIs, java, modularity, network protocols, Software quality}, isbn = {978-1-59593-595-3}, doi = {10.1145/1251535.1251549}, url = {http://doi.acm.org/10.1145/1251535.1251549}, author = {Foster, Jeffrey S. and Hicks, Michael W. and Pugh, William} } @conference {16854, title = {Knowledge discovery using the sand spatial browser}, booktitle = {Proceedings of the 8th annual international conference on Digital government research: bridging disciplines \& domains}, series = {dg.o {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {284 - 285}, publisher = {Digital Government Society of North America}, organization = {Digital Government Society of North America}, abstract = {The use of the SAND Internet Browser as a knowledge discovery tool for epidemiological cartography is highlighted by recreating the results of Dr. John Snow{\textquoteright}s study of the 1854 Cholera epidemic in Soho, London.}, keywords = {distance semi-join, knowledge discovery, sand database system, snow cholera map}, isbn = {1-59593-599-1}, url = {http://dl.acm.org/citation.cfm?id=1248460.1248521}, author = {Samet, Hanan and Phillippy,Adam and Sankaranarayanan,Jagan} } @conference {17769, title = {MAGIC: A Multi-Activity Graph Index for Activity Detection}, booktitle = {Information Reuse and Integration, 2007. IRI 2007. IEEE International Conference on}, year = {2007}, month = {2007/08//}, pages = {267 - 272}, abstract = {Suppose we are given a set A of activities of interest, a set O of observations, and a probability threshold p. We are interested in finding the set of all pairs (a, O{\textquoteright}), where a epsi A and O{\textquoteright} sube O, that minimally validate the fact that an instance of activity a occurs in O with probability p or more. The novel contribution of this paper is the notion of the multi-activity graph index (MAGIC), which can index very large numbers of observations from interleaved activities and quickly retrieve completed instances of the monitored activities. We introduce two complexity reducing restrictions of the problem (which takes exponential time) and develop algorithms for each. We experimentally evaluate our exponential algorithm as well as the restricted algorithms on both synthetic data and a real (depersonalized) travel data set consisting of 5.5 million observations. Our experiments show that MAGIC consumes reasonable amounts of memory and can retrieve completed instances of activities in just a few seconds. We also report appropriate statistical significance results validating our experimental hypotheses.}, keywords = {complexity;graph, detection;complexity, graph, index;probability, MAGIC;activity, reducing, restrictions;exponential, theory;probability;, threshold;computational, time;multiactivity}, doi = {10.1109/IRI.2007.4296632}, author = {Albanese, M. and Pugliese, A. and V.S. Subrahmanian and Udrea,O.} } @article {18859, title = {Manufacturing multi-material articulated plastic products using in-mold assembly}, journal = {The International Journal of Advanced Manufacturing Technology}, volume = {32}, year = {2007}, month = {2007///}, pages = {350 - 365}, abstract = {In-mold assembly can be used to create plastic products with articulated joints. This process eliminates the need for post-molding assembly and reduces the number of parts being used in the product, hence improving the product quality. However, designing both products and molds is significantly more challenging in case of in-mold assembly. Currently, a systematic methodology does not exist for developing product and processes to exploit potential benefits of in-mold assembly for creating articulated joints. This paper is a step towards creating such a methodology and reports the following three results. First, it presents a model for designing assemblies and molding process so that the joint clearances and variation in the joint clearances can meet the performance goals. Second, it describes proven mold design templates for realizing revolute, prismatic, and spherical joints. Third, it describes a mold design methodology for designing molds for products that contain articulated joints and will be produced using in-mold assembly process. Three case studies are also presented to illustrate how in-mold assembly process can be used to create articulated devices.}, keywords = {engineering}, isbn = {0268-3768}, doi = {10.1007/s00170-005-0343-z}, url = {http://www.springerlink.com/content/q77nl78827r7w461/abstract/}, author = {Priyadarshi,Alok and Gupta, Satyandra K. and Gouker,Regina and Krebs,Florian and Shroeder,Martin and Warth,Stefan} } @conference {15910, title = {An M-channel directional filter bank compatible with the contourlet and shearlet frequency tiling}, booktitle = {Proceedings of SPIE}, volume = {6701}, year = {2007}, month = {2007///}, pages = {67010C - 67010C}, author = {Easley,G. R and Patel, Vishal M. and Healy Jr,D.M.} } @conference {13828, title = {Measuring variability in sentence ordering for news summarization}, booktitle = {Proceedings of the Eleventh European Workshop on Natural Language Generation}, series = {ENLG {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {81 - 88}, publisher = {Association for Computational Linguistics}, organization = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, abstract = {The issue of sentence ordering is an important one for natural language tasks such as multi-document summarization, yet there has not been a quantitative exploration of the range of acceptable sentence orderings for short texts. We present results of a sentence reordering experiment with three experimental conditions. Our findings indicate a very high degree of variability in the orderings that the eighteen subjects produce. In addition, the variability of reorderings is significantly greater when the initial ordering seen by subjects is different from the original summary. We conclude that evaluation of sentence ordering should use multiple reference orderings. Our evaluation presents several metrics that might prove useful in assessing against multiple references. We conclude with a deeper set of questions: (a) what sorts of independent assessments of quality of the different reference orderings could be made and (b) whether a large enough test set would obviate the need for such independent means of quality assessment.}, url = {http://dl.acm.org/citation.cfm?id=1610163.1610177}, author = {Madnani,Nitin and Passonneau,Rebecca and Ayan,Necip Fazil and Conroy,John M. and Dorr, Bonnie J and Klavans,Judith L. and O{\textquoteright}Leary, Dianne P. and Schlesinger,Judith D.} } @article {14088, title = {Microarray analysis of gene expression induced by sexual contact in Schistosoma mansoni}, journal = {BMC Genomics}, volume = {8}, year = {2007}, month = {2007///}, pages = {181 - 181}, abstract = {BACKGROUND:The parasitic trematode Schistosoma mansoni is one of the major causative agents of Schistosomiasis, a disease that affects approximately 200 million people, mostly in developing countries. Since much of the pathology is associated with eggs laid by the female worm, understanding the mechanisms involved in oogenesis and sexual maturation is an important step towards the discovery of new targets for effective drug therapy. It is known that the adult female worm only develops fully in the presence of a male worm and that the rates of oviposition and maturation of eggs are significantly increased by mating. In order to study gene transcripts associated with sexual maturation and oviposition, we compared the gene expression profiles of sexually mature and immature parasites using DNA microarrays.RESULTS:For each experiment, three amplified RNA microarray hybridizations and their dye swaps were analyzed. Our results show that 265 transcripts are differentially expressed in adult females and 53 in adult males when mature and immature worms are compared. Of the genes differentially expressed, 55\% are expressed at higher levels in paired females while the remaining 45\% are more expressed in unpaired ones and 56.6\% are expressed at higher levels in paired male worms while the remaining 43.4\% are more expressed in immature parasites. Real-time RT-PCR analysis validated the microarray results. Several new maturation associated transcripts were identified. Genes that were up-regulated in single-sex females were mostly related to energy generation (i.e. carbohydrate and protein metabolism, generation of precursor metabolites and energy, cellular catabolism, and organelle organization and biogenesis) while genes that were down-regulated related to RNA metabolism, reactive oxygen species metabolism, electron transport, organelle organization and biogenesis and protein biosynthesis.CONCLUSION:Our results confirm previous observations related to gene expression induced by sexual maturation in female schistosome worms. They also increase the list of S. mansoni maturation associated transcripts considerably, therefore opening new and exciting avenues for the study of the conjugal biology and development of new drugs against schistosomes.}, isbn = {1471-2164}, doi = {10.1186/1471-2164-8-181}, url = {http://www.biomedcentral.com/1471-2164/8/181}, author = {Waisberg,Michael and Lobo,Francisco and Cerqueira,Gustavo and Passos,Liana and Carvalho,Omar and Franco,Gloria and El-Sayed, Najib M.} } @article {16279, title = {Minimus: a fast, lightweight genome assembler}, journal = {BMC Bioinformatics}, volume = {8}, year = {2007}, month = {2007/02/26/}, pages = {64 - 64}, abstract = {Genome assemblers have grown very large and complex in response to the need for algorithms to handle the challenges of large whole-genome sequencing projects. Many of the most common uses of assemblers, however, are best served by a simpler type of assembler that requires fewer software components, uses less memory, and is far easier to install and run.}, isbn = {1471-2105}, doi = {10.1186/1471-2105-8-64}, url = {http://www.biomedcentral.com/1471-2105/8/64}, author = {Sommer,Daniel D and Delcher,Arthur L. and Salzberg,Steven L. and Pop, Mihai} } @article {18735, title = {Model and algorithms for point cloud construction using digital projection patterns}, journal = {Journal of Computing and Information Science in Engineering}, volume = {7}, year = {2007}, month = {2007///}, pages = {372 - 372}, author = {Peng,T. and Gupta,S.K.} } @conference {19036, title = {Multi-Dimensional Range Query over Encrypted Data}, year = {2007}, month = {2007}, pages = {350 - 364}, abstract = {We design an encryption scheme called multi-dimensional range query over encrypted data (MRQED), to address the privacy concerns related to the sharing of network audit logs and various other applications. Our scheme allows a network gateway to encrypt summaries of network flows before submitting them to an untrusted repository. When network intrusions are suspected, an authority can release a key to an auditor, allowing the auditor to decrypt flows whose attributes (e.g., source and destination addresses, port numbers, etc.) fall within specific ranges. However, the privacy of all irrelevant flows are still preserved. We formally define the security for MRQED and prove the security of our construction under the decision bilinear Diffie-Hellman and decision linear assumptions in certain bilinear groups. We study the practical performance of our construction in the context of network audit logs. Apart from network audit logs, our scheme also has interesting applications for financial audit logs, medical privacy, untrusted remote storage, etc. In particular, we show that MRQED implies a solution to its dual problem, which enables investors to trade stocks through a broker in a privacy-preserving manner.}, keywords = {auditing, cryptography, data privacy, decision bilinear Diffie-Hellman, decision linear assumptions, encrypted data, multi-dimensional range query, network audit logs, network gateway, network intrusions}, author = {Elaine Shi and Bethencourt, J. and Chan, T.-H.H. and Song, Dawn and Perrig, A.} } @article {12214, title = {NetLens: iterative exploration of content-actor network data}, journal = {Information Visualization}, volume = {6}, year = {2007}, month = {2007///}, pages = {18 - 31}, author = {Kang,H. and Plaisant, Catherine and Lee,B. and Bederson, Benjamin B.} } @article {16077, title = {NetLens: Iterative Exploration of Content-Actor Network Data}, journal = {Information VisualizationInformation Visualization}, volume = {6}, year = {2007}, month = {2007/03/20/}, pages = {18 - 31}, abstract = {Networks have remained a challenge for information retrieval and visualization because of the rich set of tasks that users want to accomplish. This paper offers an abstract Content-Actor network data model, a classification of tasks, and a tool to support them. The NetLens interface was designed around the abstract Content-Actor network data model to allow users to pose a series of elementary queries and iteratively refine visual overviews and sorted lists. This enables the support of complex queries that are traditionally hard to specify. NetLens is general and scalable in that it applies to any data set that can be represented with our abstract data model. This paper describes the use of NetLens with a subset of the ACM Digital Library consisting of about 4000 papers from the CHI conference written by about 6000 authors, and reports on a usability study with nine participants.}, keywords = {content-actor network data, digital library, incremental data exploration, iterative query refinement, User interfaces}, isbn = {1473-8716, 1473-8724}, doi = {10.1057/palgrave.ivs.9500143}, url = {http://ivi.sagepub.com/content/6/1/18}, author = {Kang,Hyunmo and Plaisant, Catherine and Lee,Bongshin and Bederson, Benjamin B.} } @conference {15997, title = {Ontologies for reasoning about failures in AI systems}, booktitle = {Proceedings from the Workshop on Metareasoning in Agent Based Systems at the Sixth International Joint Conference on Autonomous Agents and Multiagent Sytems}, year = {2007}, month = {2007///}, author = {Schmill,M. and Josyula,D. and Anderson,M. L and Wilson,S. and Oates,T. and Perlis, Don and Fults,S.} } @article {16860, title = {Out-of-core Multi-resolution Terrain Modeling}, journal = {Spatial data on the Web: modeling and management}, year = {2007}, month = {2007///}, pages = {43 - 43}, author = {Danovaro,E. and De Floriani, Leila and Puppo,E. and Samet, Hanan} } @article {12352, title = {Parameterized Looped Schedules for Compact Representation of Execution Sequences in DSP Hardware and Software Implementation}, journal = {IEEE Transactions on Signal Processing}, volume = {55}, year = {2007}, month = {2007/06//}, pages = {3126 - 3138}, abstract = {In this paper, we present a technique for compact representation of execution sequences in terms of efficient looping constructs. Here, by a looping construct, we mean a compact way of specifying a finite repetition of a set of execution primitives. Such compaction, which can be viewed as a form of hierarchical run-length encoding (RLE), has application in many very large scale integration (VLSI) signal processing contexts, including efficient control generation for Kahn processes on field-programmable gate arrays (FPGAs), and software synthesis for static dataflow models of computation. In this paper, we significantly generalize previous models for loop-based code compaction of digital signal processing (DSP) programs to yield a configurable code compression methodology that exhibits a broad range of achievable tradeoffs. Specifically, we formally develop and apply to DSP hardware and software synthesis a parameterizable loop scheduling approach with compact format, dynamic reconfigurability, and low-overhead decompression}, keywords = {Application software, array signal processing, code compression methodology, compact representation, Compaction, data compression, Design automation, Digital signal processing, digital signal processing chips, DSP, DSP hardware, embedded systems, Encoding, Field programmable gate arrays, field-programmable gate arrays (FPGAs), FPGA, Hardware, hierarchical runlength encoding, high-level synthesis, Kahn process, loop-based code compaction, looping construct, parameterized loop schedules, program compilers, reconfigurable design, runlength codes, scheduling, Signal generators, Signal processing, Signal synthesis, software engineering, software implementation, static dataflow models, Very large scale integration, VLSI}, isbn = {1053-587X}, doi = {10.1109/TSP.2007.893964}, author = {Ming-Yung Ko and Zissulescu,C. and Puthenpurayil,S. and Bhattacharyya, Shuvra S. and Kienhuis,B. and Deprettere,E. F} } @inbook {19623, title = {Parameterized st-Orientations of Graphs: Algorithms and Experiments}, booktitle = {Graph Drawing}, series = {Lecture Notes in Computer Science}, year = {2007}, month = {2007/01/01/}, pages = {220 - 233}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {st-orientations (st-numberings) or bipolar orientations of undirected graphs are central to many graph algorithms and applications. Several algorithms have been proposed in the past to compute an st-orientation of a biconnected graph. However, as indicated in [1], the computation of more than one st-orientation is very important for many applications in multiple research areas, such as this of Graph Drawing. In this paper we show how to compute such orientations with certain (parameterized) characteristics in the final st-oriented graph, such as the length of the longest path. Apart from Graph Drawing, this work applies in other areas such as Network Routing and in tackling difficult problems such as Graph Coloring and Longest Path. We present primary approaches to the problem of computing longest path parameterized st-orientations of graphs, an analytical presentation (together with proof of correctness) of a new O(mlog5 n) (O(mlogn) for planar graphs) time algorithm that computes such orientations (and which was used in [1]) and extensive computational results that reveal the robustness of the algorithm.}, keywords = {Algorithm Analysis and Problem Complexity, Computer Graphics, Data structures, Discrete Mathematics in Computer Science}, isbn = {978-3-540-70903-9, 978-3-540-70904-6}, url = {http://link.springer.com/chapter/10.1007/978-3-540-70904-6_22}, author = {Charalampos Papamanthou and Tollis, Ioannis G.}, editor = {Kaufmann, Michael and Wagner, Dorothea} } @conference {19040, title = {Portcullis: protecting connection setup from denial-of-capability attacks}, series = {SIGCOMM {\textquoteright}07}, year = {2007}, month = {2007}, pages = {289 - 300}, publisher = {ACM}, organization = {ACM}, abstract = {Systems using capabilities to provide preferential service to selected flows have been proposed as a defense against large-scale network denial-of-service attacks. While these systems offer strong protection for established network flows, the Denial-of-Capability (DoC) attack, which prevents new capability-setup packets from reaching the destination, limits the value of these systems. Portcullis mitigates DoC attacks by allocating scarce link bandwidth for connection establishment packets based on per-computation fairness. We prove that a legitimate sender can establish a capability with high probability regardless of an attacker{\textquoteright}s resources or strategy and that no system can improve on our guarantee. We simulate full and partial deployments of Portcullis on an Internet-scale topology to confirm our theoretical results and demonstrate the substantial benefits of using per-computation fairness.}, keywords = {network capability, per-computation fairness}, isbn = {978-1-59593-713-1}, url = {http://doi.acm.org/10.1145/1282380.1282413}, author = {Parno, Bryan and Wendlandt, Dan and Elaine Shi and Perrig, Adrian and Maggs, Bruce and Hu, Yih-Chun} } @conference {18886, title = {Probabilistic go theories}, year = {2007}, month = {2007///}, abstract = {There are numerous cases where we need to rea- son about vehicles whose intentions and itineraries are not known in advance to us. For example, Coast Guard agents tracking boats don{\textquoteright}t always know where they are headed. Likewise, in drug en- forcement applications, it is not always clear where drug-carrying airplanes (which do often show up on radar) are headed, and how legitimate planes with an approved flight manifest can avoid them. Likewise, traffic planners may want to understand how many vehicles will be on a given road at a given time. Past work on reasoning about vehi- cles (such as the {\textquotedblleft}logic of motion{\textquotedblright} by Yaman et. al. [Yaman et al., 2004]) only deals with vehicles whose plans are known in advance and don{\textquoteright}t cap- ture such situations. In this paper, we develop a for- mal probabilistic extension of their work and show that it captures both vehicles whose itineraries are known, and those whose itineraries are not known. We show how to correctly answer certain queries against a set of statements about such vehicles. A prototype implementation shows our system to work efficiently in practice.}, url = {http://www.aaai.org/Papers/IJCAI/2007/IJCAI07-079.pdf}, author = {Parker,A. and Yaman,F. and Nau, Dana S. and V.S. Subrahmanian} } @inbook {14410, title = {Probabilistic Relational Models}, booktitle = {Introduction to Statistical Relational LearningIntroduction to Statistical Relational Learning}, year = {2007}, month = {2007///}, pages = {129 - 129}, author = {Getoor, Lise and Friedman,N. and Koller,D. and Pfeffer,A. and Taskar,B.} } @article {17644, title = {Provable algorithms for joint optimization of transport, routing and MAC layers in wireless ad hoc networks}, journal = {Proc. DialM-POMC Workshop on Foundations of Mobile Computing}, year = {2007}, month = {2007///}, author = {Kumar,V. S.A and Marathe,M. V and Parthasarathy,S. and Srinivasan, Aravind} } @article {12919, title = {Recovery in culture of viable but nonculturable Vibrio parahaemolyticus: regrowth or resuscitation?}, journal = {The ISME Journal}, volume = {1}, year = {2007}, month = {2007/05/10/}, pages = {111 - 120}, abstract = {The objective of this study was to explore the recovery of culturability of viable but nonculturable (VBNC) Vibrio parahaemolyticus after temperature upshift and to determine whether regrowth or resuscitation occurred. A clinical strain of V. parahaemolyticus Vp5 was rendered VBNC by exposure to artificial seawater (ASW) at 4{\textdegree}C. Aliquots of the ASW suspension of cells (0.1, 1 and 10 ml) were subjected to increased temperatures of 20{\textdegree}C and 37{\textdegree}C. Culturability of the cells in the aliquots was monitored for colony formation on a rich medium and changes in morphology were measured by scanning (SEM) and transmission (TEM) electron microscopy. Samples of VBNC cells were fixed and examined by SEM, revealing a heterogeneous population comprising small cells and larger, flattened cells. Forty-eight hours after temperature upshift to 20{\textdegree}C or 37{\textdegree}C, both elongation and division by binary fission of the cells were observed, employing SEM and TEM, but only in the 10-ml aliquots. The results suggest that a portion of VBNC cells is able to undergo cell division. It is concluded that a portion of VBNC cells of V. parahaemolyticus subjected to cold temperatures remain viable. After temperature upshift, regrowth of those cells, rather than resuscitation of all bacteria of the initial inoculum, appears to be responsible for recovery of culturability of VBNC cells of V. parahaemolyticus. Nutrient in filtrates of VBNC cells is hypothesized to allow growth of the temperature-responsive cells, with cell division occurring via binary fission, but also including an atypical, asymmetric cell division.}, keywords = {ecophysiology, ecosystems, environmental biotechnology, geomicrobiology, ISME J, microbe interactions, microbial communities, microbial ecology, microbial engineering, microbial epidemiology, microbial genomics, microorganisms}, isbn = {1751-7362}, doi = {10.1038/ismej.2007.1}, url = {http://www.nature.com/ismej/journal/v1/n2/full/ismej20071a.html}, author = {Coutard,Fran|[ccedil]|ois and Crassous,Philippe and Droguet,Micka|[euml]|l and Gobin,Eric and Rita R Colwell and Pommepuy,Monique and Hervio-Heath,Dominique} } @article {15476, title = {Reliable Effects Screening: A Distributed Continuous Quality Assurance Process for Monitoring Performance Degradation in Evolving Software Systems}, journal = {Software Engineering, IEEE Transactions on}, volume = {33}, year = {2007}, month = {2007/02//}, pages = {124 - 141}, abstract = {Developers of highly configurable performance-intensive software systems often use in-house performance-oriented "regression testing" to ensure that their modifications do not adversely affect their software{\textquoteright}s performance across its large configuration space. Unfortunately, time and resource constraints can limit in-house testing to a relatively small number of possible configurations, followed by unreliable extrapolation from these results to the entire configuration space. As a result, many performance bottlenecks escape detection until systems are fielded. In our earlier work, we improved the situation outlined above by developing an initial quality assurance process called "main effects screening". This process 1) executes formally designed experiments to identify an appropriate subset of configurations on which to base the performance-oriented regression testing, 2) executes benchmarks on this subset whenever the software changes, and 3) provides tool support for executing these actions on in-the-field and in-house computing resources. Our initial process had several limitations, however, since it was manually configured (which was tedious and error-prone) and relied on strong and untested assumptions for its accuracy (which made its use unacceptably risky in practice). This paper presents a new quality assurance process called "reliable effects screening" that provides three significant improvements to our earlier work. First, it allows developers to economically verify key assumptions during process execution. Second, it integrates several model-driven engineering tools to make process configuration and execution much easier and less error prone. Third, we evaluate this process via several feasibility studies of three large, widely used performance-intensive software frameworks. Our results indicate that reliable effects screening can detect performance degradation in large-scale systems more reliably and with significantly less resources than conventional t- echniques}, keywords = {configuration subset, distributed continuous quality assurance process, evolving software systems, in house testing, main effects screening, performance bottlenecks, performance degradation monitoring, performance intensive software systems, process configuration, process execution, program testing, regression testing, reliable effects screening, software benchmarks, Software performance, software performance evaluation, Software quality, software reliability, tool support}, isbn = {0098-5589}, doi = {10.1109/TSE.2007.20}, author = {Yilmaz,C. and Porter, Adam and Krishna,A. S and Memon, Atif M. and Schmidt,D. C and Gokhale,A.S. and Natarajan,B.} } @book {17768, title = {Scalable Uncertainty Management: First International Conference, SUM 2007, Washington, DC, USA, October 10-12, 2007 : Proceedings}, year = {2007}, month = {2007/11/14/}, publisher = {Springer}, organization = {Springer}, abstract = {This book constitutes the refereed proceedings of the First International Conference on Scalable Uncertainty Management, SUM 2007, held in Washington, DC, USA, in Oktober 2007.The 20 revised full papers presented were carefully reviewed and selected from numerous submissions for inclusion in the book. The papers address artificial intelligence researchers, database researchers, and practitioners to demonstrate theoretical techniques required to manage the uncertainty that arises in large scale real world applications. The papers deal with the following topics: uncertainty models, probabilistic logics, fuzzy logics, and annotated logics, inconsistency logics, database algebras and calculi, scalable database systems, spatial, temporal, mobile and multimedia databases, as well as implementations, and applications.}, keywords = {Artificial intelligence, Computers / Information Theory, Computers / Intelligence (AI) \& Semantics, Electronic books, Mathematics / Logic, Uncertainty (Information theory)}, isbn = {9783540754077}, author = {Prade,Henri and V.S. Subrahmanian} } @conference {13270, title = {A semantic web environment for digital shapes understanding}, booktitle = {Proceedings of the semantic and digital media technologies 2nd international conference on Semantic Multimedia}, series = {SAMT{\textquoteright}07}, year = {2007}, month = {2007///}, pages = {226 - 239}, publisher = {Springer-Verlag}, organization = {Springer-Verlag}, address = {Berlin, Heidelberg}, abstract = {In the last few years, the volume of multimedia content available on the Web significantly increased. This led to the need for techniques to handle such data. In this context, we see a growing interest in considering the Semantic Web in action and in the definition of tools capable of analyzing and organizing digital shape models. In this paper, we present a Semantic Web environment, be-SMART, for inspecting 3D shapes and for structuring and annotating such shapes according to ontology-driven metadata. Specifically, we describe in details the first module of be-SMART, the Geometry and Topology Analyzer, and the algorithms we have developed for extracting geometrical and topological information from 3D shapes. We also describe the second module, the Topological Decomposer, which produces a graph-based representation of the decomposition of the shape into manifold components. This is successively modified by the third and the fourth modules, which perform the automatic and manual segmentation of the manifold parts.}, keywords = {digital shapes, semantic annotation, semantic web, shape analysis, shape understanding}, isbn = {3-540-77033-X, 978-3-540-77033-6}, url = {http://dl.acm.org/citation.cfm?id=1780533.1780565}, author = {De Floriani, Leila and Hui,Annie and Papaleo,Laura and Huang,May and Hendler,James} } @conference {17780, title = {Sentiment analysis: Adjectives and adverbs are better than adjectives alone}, booktitle = {Proceedings of the International Conference on Weblogs and Social Media (ICWSM)}, year = {2007}, month = {2007///}, abstract = {Most past work on determining the strength of subjective expres-sions within a sentence or a document use specific parts of speech such as adjectives, verbs and nouns. To date, there is almost no work on the use of adverbs in sentiment analysis, nor has there been any work on the use of adverb-adjective combinations (AACs). We propose an AAC-based sentiment analysis technique that uses a lin- guistic analysis of adverbs of degree. We define a set of general axioms (based on a classification of adverbs of degree into five cat- egories) that all adverb scoring techniques must satisfy. Instead of aggregating scores of both adverbs and adjectives using simple scor- ing functions, we propose an axiomatic treatment of AACs based on the linguistic classification of adverbs. Three specific AAC scor- ing methods that satisfy the axioms are presented. We describe the results of experiments on an annotated set of 200 news articles (an- notated by 10 students) and compare our algorithms with some exist- ing sentiment analysis algorithms. We show that our results lead to higher accuracy based on Pearson correlation with human subjects. }, author = {Benamara,F. and Cesarano,C. and Picariello, A. and Reforgiato,D. and V.S. Subrahmanian} } @article {12572, title = {Signal Processing for Biometric Systems [DSP Forum]}, journal = {Signal Processing Magazine, IEEE}, volume = {24}, year = {2007}, month = {2007/11//}, pages = {146 - 152}, abstract = {This IEEE signal processing magazine (SPM) forum discuses signal processing applications, technologies, requirements, and standardization of biometric systems. The forum members bring their expert insights into issues such as biometric security, privacy, and multibiometric and fusion techniques. The invited forum members are Prof. Anil K. Jain of Michigan State University, Prof. Rama Chellappa of the University of Maryland, Dr. Stark C. Draper of theUniversity of Wisconsin in Madison, Prof. Nasir Memon of Polytechnic University, and Dr. P. Jonathon Phillips of the National Institute of Standards and Technology. The moderator of the forum is Dr. Anthony Vetro of Mitsubishi Electric Research Labs, and associate editor of SPM.}, keywords = {(access, biometric, control);security;signal, forum;signal, magazine, PROCESSING, processing;, security;biometric, standardization;fusion, systems, technique;multibiometric, technique;signal, technology;biometrics}, isbn = {1053-5888}, doi = {10.1109/MSP.2007.905886}, author = {Jain, A.K. and Chellapa, Rama and Draper, S.C. and Memon, N. and Phillips,P.J. and Vetro, A.} } @conference {17360, title = {Similarity-Based Forecasting with Simultaneous Previews: A River Plot Interface for Time Series Forecasting}, booktitle = {Information Visualization, 2007. IV {\textquoteright}07. 11th International Conference}, year = {2007}, month = {2007/07/04/6}, pages = {191 - 196}, publisher = {IEEE}, organization = {IEEE}, abstract = {Time-series forecasting has a large number of applications. Users with a partial time series for auctions, new stock offerings, or industrial processes desire estimates of the future behavior. We present a data driven forecasting method and interface called similarity-based forecasting (SBF). A pattern matching search in an historical time series dataset produces a subset of curves similar to the partial time series. The forecast is displayed graphically as a river plot showing statistical information about the SBF subset. A forecasting preview interface allows users to interactively explore alternative pattern matching parameters and see multiple forecasts simultaneously. User testing with 8 users demonstrated advantages and led to improvements.}, keywords = {data driven forecasting method, data visualisation, Data visualization, Economic forecasting, forecasting preview interface, Graphical user interfaces, historical time series dataset, Laboratories, new stock offerings, partial time series, pattern matching, pattern matching search, Predictive models, river plot interface, Rivers, similarity-based forecasting, Smoothing methods, Technological innovation, Testing, time series, time series forecasting, Weather forecasting}, isbn = {0-7695-2900-3}, doi = {10.1109/IV.2007.101}, author = {Buono,P. and Plaisant, Catherine and Simeone,A. and Aris,A. and Shneiderman, Ben and Shmueli,G. and Jank,W.} } @article {15475, title = {Skoll: A Process and Infrastructure for Distributed Continuous Quality Assurance}, journal = {Software Engineering, IEEE Transactions on}, volume = {33}, year = {2007}, month = {2007/08//}, pages = {510 - 525}, abstract = {Software engineers increasingly emphasize agility and flexibility in their designs and development approaches. They increasingly use distributed development teams, rely on component assembly and deployment rather than green field code writing, rapidly evolve the system through incremental development and frequent updating, and use flexible product designs supporting extensive end-user customization. While agility and flexibility have many benefits, they also create an enormous number of potential system configurations built from rapidly changing component implementations. Since today{\textquoteright}s quality assurance (QA) techniques do not scale to handle highly configurable systems, we are developing and validating novel software QA processes and tools that leverage the extensive computing resources of user and developer communities in a distributed, continuous manner to improve software quality significantly. This paper provides several contributions to the study of distributed, continuous QA (DCQA). First, it shows the structure and functionality of Skoll, which is an environment that defines a generic around-the-world, around-the-clock QA process and several sophisticated tools that support this process. Second, it describes several novel QA processes built using the Skoll environment. Third, it presents two studies using Skoll: one involving user testing of the Mozilla browser and another involving continuous build, integration, and testing of the ACE+TAO communication software package. The results of our studies suggest that the Skoll environment can manage and control distributed continuous QA processes more effectively than conventional QA processes. For example, our DCQA processes rapidly identified problems that had taken the ACE+TAO developers much longer to find and several of which they had not found. Moreover, the automatic analysis of QA results provided developers information that enabled them to quickly find the root causes of problems}, keywords = {ACE+TAO communication software package, component assembly, component deployment, distributed continuous quality assurance, distributed development teams, distributed processing, end-user customization, flexible product design, incremental development, object-oriented programming, Skoll, software engineering, Software quality, systems analysis}, isbn = {0098-5589}, doi = {10.1109/TSE.2007.70719}, author = {Porter, Adam and Yilmaz,C. and Memon, Atif M. and Schmidt,D. C and Natarajan,B.} } @conference {16299, title = {Software configuration management using ontologies}, booktitle = {3rd International Workshop on Semantic Web Enabled Software Engineering (SWESE 2007), Innsubruk, Austria}, year = {2007}, month = {2007///}, author = {Shahri,H. H and Hendler,J. A and Porter, Adam} } @article {16080, title = {Something that is interesting is interesting then: Using text minig and visualizations to aid interpreting repetition in Gertrude Stein{\textquoteright}s The Making of Americans}, journal = {Proceedings of the Digital Humanities Conference}, year = {2007}, month = {2007///}, pages = {40 - 44}, author = {Clement,T. and Don,A. and Plaisant, Catherine and Auvil,L. and Pape,G. and Goren,V.} } @article {16078, title = {Special Issue in Honor of Ben Shneiderman{\textquoteright}s 60th Birthday: Reflections on Human-Computer Interaction}, journal = {International Journal of Human-Computer Interaction}, volume = {23}, year = {2007}, month = {2007///}, pages = {195 - 204}, isbn = {1044-7318}, doi = {10.1080/10447310701702766}, url = {http://www.tandfonline.com/doi/abs/10.1080/10447310701702766}, author = {Plaisant, Catherine and North,Chris} } @article {17779, title = {Story creation from heterogeneous data sources}, journal = {Multimedia Tools and Applications}, volume = {33}, year = {2007}, month = {2007///}, pages = {351 - 377}, abstract = {There are numerous applications where there is a need to rapidly infer a story about a given subject from a given set of potentially heterogeneous data sources. In this paper, we formally define a story to be a set of facts about a given subject that satisfies a {\textquotedblleft}story length{\textquotedblright} constraint. An optimal story is a story that maximizes the value of an objective function measuring the goodness of a story. We present algorithms to extract stories from text and other data sources. We also develop an algorithm to compute an optimal story, as well as three heuristic algorithms to rapidly compute a suboptimal story. We run experiments to show that constructing stories can be efficiently performed and that the stories constructed by these heuristic algorithms are high quality stories. We have built a prototype STORY system based on our model{\textemdash}we briefly describe the prototype as well as one application in this paper.}, doi = {10.1007/s11042-007-0100-4}, author = {Fayzullin,M. and V.S. Subrahmanian and Albanese, M. and Cesarano,C. and Picariello, A.} } @article {15812, title = {Structural Footprinting in Protein Structure Comparison: The Impact of Structural Fragments}, journal = {BMC Structural Biology}, volume = {7}, year = {2007}, month = {2007///}, abstract = {Abstract Background One approach for speeding-up protein structure comparison is the projection approach, where a protein structure is mapped to a high-dimensional vector and structural similarity is approximated by distance between the corresponding vectors. Structural footprinting methods are projection methods that employ the same general technique to produce the mapping: first select a representative set of structural fragments as models and then map a protein structure to a vector in which each dimension corresponds to a particular model and "counts" the number of times the model appears in the structure. The main difference between any two structural footprinting methods is in the set of models they use; in fact a large number of methods can be generated by varying the type of structural fragments used and the amount of detail in their representation. How do these choices affect the ability of the method to detect various types of structural similarity? Results To answer this question we benchmarked three structural footprinting methods that vary significantly in their selection of models against the CATH database. In the first set of experiments we compared the methods{\textquoteright} ability to detect structural similarity characteristic of evolutionarily related structures, i.e., structures within the same CATH superfamily. In the second set of experiments we tested the methods{\textquoteright} agreement with the boundaries imposed by classification groups at the Class, Architecture, and Fold levels of the CATH hierarchy. Conclusion In both experiments we found that the method which uses secondary structure information has the best performance on average, but no one method performs consistently the best across all groups at a given classification level. We also found that combining the methods{\textquoteright} outputs significantly improves the performance. Moreover, our new techniques to measure and visualize the methods{\textquoteright} agreement with the CATH hierarchy, including the threshholded affinity graph, are useful beyond this work. In particular, they can be used to expose a similar composition of different classification groups in terms of structural fragments used by the method and thus provide an alternative demonstration of the continuous nature of the protein structure universe.}, url = {DOI:10.1186/1472-6807-7-53DOI:10.1186/1472-6807-7-53}, author = {Zotenko,Elena and Dogan,Rezarta Islamaj and Wilbur,W. John and O{\textquoteright}Leary, Dianne P. and Przytycka,Teresa M.} } @conference {13334, title = {Surface Segmentation through Concentrated Curvature}, booktitle = {Image Analysis and Processing, 2007. ICIAP 2007. 14th International Conference on}, year = {2007}, month = {2007/09//}, pages = {671 - 676}, abstract = {Curvature is one of the most relevant notions that links the metric properties of a surface to its geometry and to its topology (Gauss-Bonnet theorem). In the literature, a variety of approaches exist to compute curvatures in the discrete case. Several techniques are computationally intensive or suffer from convergence problems. In this paper, we discuss the notion of concentrated curvature, introduced by Troyanov [24]. We discuss properties of this curvature and compare with a widely-used technique that estimates the Gaussian curvatures on a triangulated surface. We apply our STD method [13] for terrain segmentation to segment a surface by using different curvature approaches and we illustrate our comparisons through examples.}, keywords = {curvature;convergence, curvatures;concentrated, Gauss-Bonnet, problems;surface, processes;image, segmentation;, segmentation;terrain, segmentation;triangulated, surface;Gaussian, theorem;Gaussian}, doi = {10.1109/ICIAP.2007.4362854}, author = {Mesmoudi,M. M. and Danovaro,E. and De Floriani, Leila and Port,U.} } @article {16523, title = {Swarm Intelligence Systems Using Guided Self-Organization for Collective Problem Solving}, journal = {Advances in Complex Systems}, volume = {10}, year = {2007}, month = {2007///}, pages = {5 - 34}, author = {RodrIGuez,A. and Grushin,A. and Reggia, James A. and HAUPTMAN,AMI and SIPPER,M. and PAN,Z. and Reggia, James A. and GAO,D. and DARABOS,C. and GIACOBINI,M. and others} } @conference {15906, title = {Task-based interaction with an integrated multilingual, multimedia information system: a formative evaluation}, booktitle = {Proceedings of the 7th ACM/IEEE-CS joint conference on Digital libraries}, series = {JCDL {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {117 - 126}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {This paper describes a formative evaluation of an integrated multilingual, multimedia information system, a series of user studies designed to guide system development. The system includes automatic speech recognition for English, Chinese, and Arabic, automatic translation from Chinese and Arabic into English, and query-based and profile-based search options. The study design emphasizes repeated evaluation with the same (increasingly experienced) participants, exploration of alternative task designs, rich qualitative and quantitative data collection, and rapid analysis to provide the timely feedback needed to support iterative and responsive development. Results indicate that users presented with materials in a language that they do not know can generate remarkably useful work products, but that integration of transcription, translation, search and profile management poses challenges that would be less evident were each technology to be evaluated in isolation.}, keywords = {Cross-language information retrieval, multimedia, User studies}, isbn = {978-1-59593-644-8}, doi = {10.1145/1255175.1255199}, url = {http://doi.acm.org/10.1145/1255175.1255199}, author = {Zhang,Pengyi and Plettenberg,Lynne and Klavans,Judith L. and Oard, Douglas and Soergel,Dagobert} } @conference {12368, title = {A taxonomy for medical image registration acceleration techniques}, booktitle = {Life Science Systems and Applications Workshop, 2007. LISA 2007. IEEE/NIH}, year = {2007}, month = {2007///}, pages = {160 - 163}, author = {Plishker,W. and Dandekar,O. and Bhattacharyya, Shuvra S. and Shekhar,R.} } @article {16322, title = {Techniques for Classifying Executions of Deployed Software to Support Software Engineering Tasks}, journal = {IEEE Transactions on Software Engineering}, volume = {33}, year = {2007}, month = {2007///}, pages = {287 - 304}, abstract = {There is an increasing interest in techniques that support analysis and measurement of fielded software systems. These techniques typically deploy numerous instrumented instances of a software system, collect execution data when the instances run in the field, and analyze the remotely collected data to better understand the system{\textquoteright}s in-the-field behavior. One common need for these techniques is the ability to distinguish execution outcomes (e.g., to collect only data corresponding to some behavior or to determine how often and under which condition a specific behavior occurs). Most current approaches, however, do not perform any kind of classification of remote executions and either focus on easily observable behaviors (e.g., crashes) or assume that outcomes{\textquoteright} classifications are externally provided (e.g., by the users). To address the limitations of existing approaches, we have developed three techniques for automatically classifying execution data as belonging to one of several classes. In this paper, we introduce our techniques and apply them to the binary classification of passing and failing behaviors. Our three techniques impose different overheads on program instances and, thus, each is appropriate for different application scenarios. We performed several empirical studies to evaluate and refine our techniques and to investigate the trade-offs among them. Our results show that 1) the first technique can build very accurate models, but requires a complete set of execution data; 2) the second technique produces slightly less accurate models, but needs only a small fraction of the total execution data; and 3) the third technique allows for even further cost reductions by building the models incrementally, but requires some sequential ordering of the software instances{\textquoteright} instrumentation.}, keywords = {execution classification, remote analysis/measurement.}, isbn = {0098-5589}, doi = {http://doi.ieeecomputersociety.org/10.1109/TSE.2007.1004}, author = {Haran,Murali and Karr,Alan and Last,Michael and Orso,Alessandro and Porter, Adam and Sanil,Ashish and Fouch?,Sandro} } @inbook {19622, title = {Time and Space Efficient Algorithms for Two-Party Authenticated Data Structures}, booktitle = {Information and Communications Security}, series = {Lecture Notes in Computer Science}, year = {2007}, month = {2007/01/01/}, pages = {1 - 15}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Authentication is increasingly relevant to data management. Data is being outsourced to untrusted servers and clients want to securely update and query their data. For example, in database outsourcing, a client{\textquoteright}s database is stored and maintained by an untrusted server. Also, in simple storage systems, clients can store very large amounts of data but at the same time, they want to assure their integrity when they retrieve them. In this paper, we present a model and protocol for two-party authentication of data structures. Namely, a client outsources its data structure and verifies that the answers to the queries have not been tampered with. We provide efficient algorithms to securely outsource a skip list with logarithmic time overhead at the server and client and logarithmic communication cost, thus providing an efficient authentication primitive for outsourced data, both structured (e.g., relational databases) and semi-structured (e.g., XML documents). In our technique, the client stores only a constant amount of space, which is optimal. Our two-party authentication framework can be deployed on top of existing storage applications, thus providing an efficient authentication service. Finally, we present experimental results that demonstrate the practical efficiency and scalability of our scheme.}, keywords = {Algorithm Analysis and Problem Complexity, Computer Communication Networks, computers and society, Data Encryption, Management of Computing and Information Systems, Systems and Data Security}, isbn = {978-3-540-77047-3, 978-3-540-77048-0}, url = {http://link.springer.com/chapter/10.1007/978-3-540-77048-0_1}, author = {Charalampos Papamanthou and Tamassia, Roberto}, editor = {Qing, Sihan and Imai, Hideki and Wang, Guilin} } @conference {16008, title = {Toward domain-neutral human-level metacognition}, booktitle = {AAAI Spring Symposium on Logical Formalizations of Commonsense Reasoning}, year = {2007}, month = {2007///}, author = {Anderson,M. L and Schmill,M. and Oates,T. and Perlis, Don and Josyula,D. and Wright,D. and Wilson,S.} } @conference {12331, title = {Towards a heterogeneous medical image registration acceleration platform}, booktitle = {Biomedical Circuits and Systems Conference, 2007. BIOCAS 2007. IEEE}, year = {2007}, month = {2007///}, pages = {231 - 234}, author = {Plishker,W. and Dandekar,O. and Bhattacharyya, Shuvra S. and Shekhar,R.} } @conference {13340, title = {Towards a semantic web system for understanding real world representations}, booktitle = {Proceedings of the Tenth International Conference on Computer Graphics and Artificial Intelligence}, year = {2007}, month = {2007///}, abstract = {As the volume of multimedia content available on the Web continues to increase, there is a clearneed for more advanced techniques for effective retrieval and management of such data. In this context, we see a growing interest in exploiting the potential of the Semantic Web in different research fields. But, on the other hand, there is a general lack of tools able to analyse, organize and understand digital shape models (especially 3D models) for easily populating repositories with complete and well-detailed metadata. Several steps are necessary to create and associate semantics with a shape and some of them are clearly context-dependent. In this paper, we present the design details of our new system, that we call be-SMART, for inspecting digital 3D shapes by extracting geometrical and topological information from them and for structuring and annotating these shapes using ontology-driven metadata. In particular, we describe the general structure of the system, the different modules and their mutual relations. We then concentrate on the first two modules of the system showing how the 3D models are annotated following ontology-driven metadata. We also provide motivations for further work in developing new techniques for both annotating and managing 3D models on the Web. }, author = {Papaleo,L. and De Floriani, Leila and Hendler,J. and Hui,A.} } @article {18869, title = {Towards the development of a virtual environment-based training system for mechanical assembly operations}, journal = {Virtual Reality}, volume = {11}, year = {2007}, month = {2007///}, pages = {189 - 206}, abstract = {In this paper, we discuss the development of Virtual Training Studio (VTS), a virtual environment-based training system that allows training supervisors to create training instructions and allows trainees to learn assembly operations in a virtual environment. Our system is mainly focused on the cognitive side of training so that trainees can learn to recognize parts, remember assembly sequences, and correctly orient the parts during assembly operations. Our system enables users to train using the following three training modes: (1) Interactive Simulation, (2) 3D Animation, and (3) Video. Implementing these training modes required us to develop several new system features. This paper presents an overview of the VTS system and describes a few main features of the system. We also report user test results that show how people train using our system. The user test results indicate that the system is able to support a wide variety of training preferences and works well to support training for assembly operations.}, keywords = {Computer science}, isbn = {1359-4338}, doi = {10.1007/s10055-007-0076-4}, url = {http://www.springerlink.com/content/g7760422m13l83x1/abstract/}, author = {Brough,John and Schwartz,Maxim and Gupta, Satyandra K. and Anand,Davinder and Kavetsky,Robert and Pettersen,Ralph} } @article {14572, title = {Variola virus topoisomerase: DNA cleavage specificity and distribution of sites in Poxvirus genomes}, journal = {Virology}, volume = {365}, year = {2007}, month = {2007/08/15/}, pages = {60 - 69}, abstract = {Topoisomerase enzymes regulate superhelical tension in DNA resulting from transcription, replication, repair, and other molecular transactions. Poxviruses encode an unusual type IB topoisomerase that acts only at conserved DNA sequences containing the core pentanucleotide 5{\textquoteright}-(T/C)CCTT-3{\textquoteright}. In X-ray structures of the variola virus topoisomerase bound to DNA, protein-DNA contacts were found to extend beyond the core pentanucleotide, indicating that the full recognition site has not yet been fully defined in functional studies. Here we report quantitation of DNA cleavage rates for an optimized 13~bp site and for all possible single base substitutions (40 total sites), with the goals of understanding the molecular mechanism of recognition and mapping topoisomerase sites in poxvirus genome sequences. The data allow a precise definition of enzyme-DNA interactions and the energetic contributions of each. We then used the resulting "action matrix" to show that favorable topoisomerase sites are distributed all along the length of poxvirus DNA sequences, consistent with a requirement for local release of superhelical tension in constrained topological domains. In orthopox genomes, an additional central cluster of sites was also evident. A negative correlation of predicted topoisomerase sites was seen relative to early terminators, but no correlation was seen with early or late promoters. These data define the full variola virus topoisomerase recognition site and provide a new window on topoisomerase function in vivo.}, keywords = {Annotation of topoisomerase sites, Sequence specific recognition, Topoisomerase IB, Variola virus}, isbn = {0042-6822}, doi = {16/j.virol.2007.02.037}, url = {http://www.sciencedirect.com/science/article/pii/S0042682207001225}, author = {Minkah,Nana and Hwang,Young and Perry,Kay and Van Duyne,Gregory D. and Hendrickson,Robert and Lefkowitz,Elliot J. and Hannenhalli, Sridhar and Bushman,Frederic D.} } @conference {16074, title = {VAST 2007 Contest - Blue Iguanodon}, booktitle = {Visual Analytics Science and Technology, 2007. VAST 2007. IEEE Symposium on}, year = {2007}, month = {2007/11/30/1}, pages = {231 - 232}, abstract = {Visual analytics experts realize that one effective way to push the field forward and to develop metrics for measuring the performance of various visual analytics components is to hold an annual competition. The second visual analytics science and technology (VAST) contest was held in conjunction with the 2007 IEEE VAST symposium. In this contest participants were to use visual analytic tools to explore a large heterogeneous data collection to construct a scenario and find evidence buried in the data of illegal and terrorist activities that were occurring. A synthetic data set was made available as well as tasks. In this paper we describe some of the advances we have made from the first competition held in 2006.}, keywords = {activities;large, activities;visual, analytic, collection;terrorist, data, heterogeneous, illegal, interfaces;, tools;data, user, visualisation;graphical}, doi = {10.1109/VAST.2007.4389032}, author = {Grinstein,G. and Plaisant, Catherine and Laskowski,S. and O{\textquoteright}Connell,T. and Scholtz,J. and Whiting,M.} } @conference {15244, title = {Achieving anonymity via clustering}, booktitle = {Proceedings of the twenty-fifth ACM SIGMOD-SIGACT-SIGART symposium on Principles of database systems}, series = {PODS {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {153 - 162}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Publishing data for analysis from a table containing personal records, while maintaining individual privacy, is a problem of increasing importance today. The traditional approach of de-identifying records is to remove identifying fields such as social security number, name etc. However, recent research has shown that a large fraction of the US population can be identified using non-key attributes (called quasi-identifiers) such as date of birth, gender, and zip code [15]. Sweeney [16] proposed the k-anonymity model for privacy where non-key attributes that leak information are suppressed or generalized so that, for every record in the modified table, there are at least k-1 other records having exactly the same values for quasi-identifiers. We propose a new method for anonymizing data records, where quasi-identifiers of data records are first clustered and then cluster centers are published. To ensure privacy of the data records, we impose the constraint that each cluster must contain no fewer than a pre-specified number of data records. This technique is more general since we have a much larger choice for cluster centers than k-Anonymity. In many cases, it lets us release a lot more information without compromising privacy. We also provide constant-factor approximation algorithms to come up with such a clustering. This is the first set of algorithms for the anonymization problem where the performance is independent of the anonymity parameter k. We further observe that a few outlier points can significantly increase the cost of anonymization. Hence, we extend our algorithms to allow an ε fraction of points to remain unclustered, i.e., deleted from the anonymized publication. Thus, by not releasing a small fraction of the database records, we can ensure that the data published for analysis has less distortion and hence is more useful. Our approximation algorithms for new clustering objectives are of independent interest and could be applicable in other clustering scenarios as well.}, keywords = {anonymity, Approximation algorithms, clustering, privacy}, isbn = {1-59593-318-2}, doi = {10.1145/1142351.1142374}, url = {http://doi.acm.org/10.1145/1142351.1142374}, author = {Aggarwal,Gagan and Feder,Tom{\'a}s and Kenthapadi,Krishnaram and Khuller, Samir and Panigrahy,Rina and Thomas,Dilys and Zhu,An} } @inbook {12623, title = {Acquisition of Articulated Human Body Models Using Multiple Cameras}, booktitle = {Articulated Motion and Deformable ObjectsArticulated Motion and Deformable Objects}, series = {Lecture Notes in Computer Science}, volume = {4069}, year = {2006}, month = {2006///}, pages = {78 - 89}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Motion capture is an important application in different areas such as biomechanics, computer animation, and human-computer interaction. Current motion capture methods typically use human body models in order to guide pose estimation and tracking. We model the human body as a set of tapered super-quadrics connected in an articulated structure and propose an algorithm to automatically estimate the parameters of the model using video sequences obtained from multiple calibrated cameras. Our method is based on the fact that the human body is constructed of several articulated chains that can be visualised as essentially 1-D segments embedded in 3-D space and connected at specific joint locations. The proposed method first computes a voxel representation from the images and maps the voxels to a high dimensional space in order to extract the 1-D structure. A bottom-up approach is then suggested in order to build a parametric (spline-based) representation of a general articulated body in the high dimensional space followed by a top-down probabilistic approach that registers the segments to the known human body model. We then present an algorithm to estimate the parameters of our model using the segmented and registered voxels.}, isbn = {978-3-540-36031-5}, url = {http://dx.doi.org/10.1007/11789239_9}, author = {Sundaresan,Aravind and Chellapa, Rama}, editor = {Perales,Francisco and Fisher,Robert} } @conference {18758, title = {Algorithms for on-line monitoring of components in an optical tweezers-based assembly cell}, year = {2006}, month = {2006///}, pages = {1 - 13}, abstract = {Optical tweezers have emerged as a powerful tool for microand nanomanipulation. Using optical tweezers to perform auto- mated assembly requires on-line monitoring of components in the assembly workspace. This paper presents algorithms for estimat- ing positions and orientations of microscale and nanoscale com- ponents in the 3-Dimensional assembly workspace. Algorithms presented in this paper use images obtained by optical section microscopy. The images are first segmented to locate areas of in- terest and then image gradient information from the areas of in- terest is used to generate probable locations and orientations of components in the XY-plane. Finally, signature curves are com- puted and utilized to obtain component locations and orienta- tions in 3-D space. We have tested these algorithms with silica micro-spheres as well as metallic nanowires. We believe that the algorithms described in this paper will provide the foundation for realizing automated assembly operations in optical tweezers- based assembly cells. }, url = {http://www.glue.umd.edu/~skgupta/Publication/CIE06_Peng1.pdf}, author = {Peng,T. and Balijepalli,A. and Gupta,S.K. and LeBrun,T. W.} } @conference {13723, title = {Annotation compatibility working group report}, booktitle = {Proceedings of the Workshop on Frontiers in Linguistically Annotated Corpora 2006}, series = {LAC {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {38 - 53}, publisher = {Association for Computational Linguistics}, organization = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, abstract = {This report explores the question of compatibility between annotation projects including translating annotation formalisms to each other or to common forms. Compatibility issues are crucial for systems that use the results of multiple annotation projects. We hope that this report will begin a concerted effort in the field to track the compatibility of annotation schemes for part of speech tagging, time annotation, treebanking, role labeling and other phenomena.}, isbn = {1-932432-78-7}, url = {http://dl.acm.org/citation.cfm?id=1641991.1641997}, author = {Meyers,A. and Fang,A. C. and Ferro,L. and K{\"u}bler,S. and Jia-Lin,T. and Palmer,M. and Poesio,M. and Dolbey,A. and Schuler,K. K. and Loper,E. and Zinsmeister,H. and Penn,G. and Xue,N. and Hinrichs,E. and Wiebe,J. and Pustejovsky,J. and Farwell,D. and Hajicova,E. and Dorr, Bonnie J and Hovy,E. and Onyshkevych,B. A. and Levin,L.} } @article {19020, title = {Anonymous multi-attribute encryption with range query and conditional decryption}, year = {2006}, month = {2006}, institution = {Carnegie Mellon University}, abstract = {We introduce the concept of Anonymous Multi-Attribute Encryption with Range Query and Con-ditional Decryption (AMERQCD). In AMERQCD, a plaintext is encrypted under a point in multi- dimensional space. To a computationally bounded adversary, the ciphertext hides both the plaintext and the point under which it is encrypted. In a range query, a master key owner releases the decryp- tion key for an arbitrary hyper-rectangle in space, thus allowing decryption of ciphertexts previ- ously encrypted under any point within the hyper-rectangle. However, a computationally bounded adversary cannot learn any information on ciphertexts outside the range covered by the decryption key (except the fact that they do not lie within this range). We give an efficient construction based on the Decision Bilinear Diffie-Hellman (D-BDH) and Decision Linear (D-Linear) assumption. }, isbn = {CMU-CS-06-135}, doi = {Technical Report}, url = {http://reports-archive.adm.cs.cmu.edu/anon/anon/home/ftp/2006/CMU-CS-06-135.pdf}, author = {Bethencourt, J. and Chan, H. and Perrig, A. and Elaine Shi and Song,D.} } @inbook {19624, title = {Applications of Parameterized st-Orientations in Graph Drawing Algorithms}, booktitle = {Graph Drawing}, series = {Lecture Notes in Computer Science}, year = {2006}, month = {2006/01/01/}, pages = {355 - 367}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {Many graph drawing algorithms use st-numberings (st-orien-tations or bipolar orientations) as a first step. An st-numbering of a biconnected undirected graph defines a directed graph with no cycles, one single source s and one single sink t. As there exist exponentially many st-numberings that correspond to a certain undirected graph G, using different st-numberings in various graph drawing algorithms can result in aesthetically different drawings with different area bounds. In this paper, we present results concerning new algorithms for parameterized st-orientations, their impact on graph drawing algorithms and especially in visibility representations.}, keywords = {Algorithm Analysis and Problem Complexity, Computer Graphics, Data structures, Discrete Mathematics in Computer Science}, isbn = {978-3-540-31425-7, 978-3-540-31667-1}, url = {http://link.springer.com/chapter/10.1007/11618058_32}, author = {Charalampos Papamanthou and Tollis, Ioannis G.}, editor = {Healy, Patrick and Nikolov, Nikola S.} } @conference {12093, title = {Applying flow-sensitive CQUAL to verify MINIX authorization check placement}, booktitle = {Proceedings of the 2006 workshop on Programming languages and analysis for security}, series = {PLAS {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {3 - 6}, publisher = {ACM}, organization = {ACM}, address = {Ottawa, Ontario, Canada}, abstract = {We present the first use of flow-sensitive CQUAL to verify the placement of operating system authorization checks. Our analysis of MINIX 3 system servers and discovery of a non-exploitable Time-Of-Check/Time-Of-Use bug demonstrate the effectiveness of flow sensitive CQUAL and its advantage over earlier flow-insensitive versions. We also identify and suggest alternatives to current CQUAL usability features that encourage analysts to make omissions that cause the otherwise sound tool to produce false-negative results.}, keywords = {access controls, cqual, minix}, isbn = {1-59593-374-3}, doi = {10.1145/1134744.1134747}, author = {Fraser,Timothy and Petroni,Jr. and Arbaugh, William A.} } @article {18653, title = {An architecture for adaptive intrusion-tolerant applications}, journal = {Software: Practice and Experience}, volume = {36}, year = {2006}, month = {2006///}, pages = {1331 - 1354}, abstract = {Applications that are part of a mission-critical information system need to maintain a usable level of key services through ongoing cyber-attacks. In addition to the well-publicized denial of service (DoS) attacks, these networked and distributed applications are increasingly threatened by sophisticated attacks that attempt to corrupt system components and violate service integrity. While various approaches have been explored to deal with DoS attacks, corruption-inducing attacks remain largely unaddressed. We have developed a collection of mechanisms based on redundancy, Byzantine fault tolerance, and adaptive middleware that help distributed, object-based applications tolerate corruption-inducing attacks. In this paper, we present the ITUA architecture, which integrates these mechanisms in a framework for auto-adaptive intrusion-tolerant systems, and we describe our experience in using the technology to defend a critical application that is part of a larger avionics system as an example. We also motivate the adaptive responses that are key to intrusion tolerance, and explain the use of the ITUA architecture to support them in an architectural framework. Copyright {\textcopyright} 2006 John Wiley \& Sons, Ltd.}, keywords = {adaptive defense, adaptive middleware, Byzantine fault tolerance, intrusion tolerance, redundancy, survivability architecture}, isbn = {1097-024X}, doi = {10.1002/spe.747}, url = {http://onlinelibrary.wiley.com/doi/10.1002/spe.747/abstract}, author = {Pal,Partha and Rubel,Paul and Atighetchi,Michael and Webber,Franklin and Sanders,William H. and Seri,Mouna and Ramasamy,HariGovind and Lyons,James and Courtney,Tod and Agbaria,Adnan and Michel Cukier and Gossett,Jeanna and Keidar,Idit} } @conference {12085, title = {An architecture for specification-based detection of semantic integrity violations in kernel dynamic data}, booktitle = {Proceedings of the 15th conference on USENIX Security Symposium}, year = {2006}, month = {2006///}, pages = {20 - 20}, author = {Petroni Jr,N. L and Fraser,T. and Walters,A. A and Arbaugh, William A.} } @article {16352, title = {Atomic instructions in java}, journal = {ECOOP 2002{\textemdash}Object-Oriented Programming}, year = {2006}, month = {2006///}, pages = {5 - 16}, author = {Hovemeyer,D. and Pugh, William and Spacco,J.} } @article {18874, title = {Automatic web services composition using Shop2}, year = {2006}, month = {2006///}, institution = {Department of Computer Science, University of Maryland, College Park}, abstract = {Semantic markup of Web services will enable the automation of various kinds of tasks, including discovery, composition, and execution of Web services. We describe how an AI planning system (SHOP2) can be used with DAML-S Web service descriptions to automatically compose Web services.}, url = {http://www.stormingmedia.us/96/9697/A969744.html}, author = {Hendler,J. and Nau, Dana S. and Parsia,B. and Sirin,E. and Wu,D.} } @article {16983, title = {Balancing Systematic and Flexible Exploration of Social Networks}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {12}, year = {2006}, month = {2006/10//Sept}, pages = {693 - 700}, abstract = {Social network analysis (SNA) has emerged as a powerful method for understanding the importance of relationships in networks. However, interactive exploration of networks is currently challenging because: (1) it is difficult to find patterns and comprehend the structure of networks with many nodes and links, and (2) current systems are often a medley of statistical methods and overwhelming visual output which leaves many analysts uncertain about how to explore in an orderly manner. This results in exploration that is largely opportunistic. Our contributions are techniques to help structural analysts understand social networks more effectively. We present SocialAction, a system that uses attribute ranking and coordinated views to help users systematically examine numerous SNA measures. Users can (1) flexibly iterate through visualizations of measures to gain an overview, filter nodes, and find outliers, (2) aggregate networks using link structure, find cohesive subgroups, and focus on communities of interest, and (3) untangle networks by viewing different link types separately, or find patterns across different link types using a matrix overview. For each operation, a stable node layout is maintained in the network visualization so users can make comparisons. SocialAction offers analysts a strategy beyond opportunism, as it provides systematic, yet flexible, techniques for exploring social networks}, keywords = {Aggregates, algorithms, attribute ranking, Cluster Analysis, Computer Graphics, Computer simulation, Coordinate measuring machines, coordinated views, Data analysis, data visualisation, Data visualization, exploratory data analysis, Filters, Gain measurement, graph theory, Graphical user interfaces, Information Storage and Retrieval, interactive graph visualization, matrix algebra, matrix overview, Models, Biological, Navigation, network visualization, Pattern analysis, Population Dynamics, Social Behavior, social network analysis, Social network services, social networks, social sciences computing, Social Support, SocialAction, software, statistical analysis, statistical methods, User-Computer Interface}, isbn = {1077-2626}, doi = {10.1109/TVCG.2006.122}, author = {Perer,A. and Shneiderman, Ben} } @article {13175, title = {Ballistic hand movements}, journal = {Articulated Motion and Deformable Objects}, year = {2006}, month = {2006///}, pages = {153 - 164}, abstract = {Common movements like reaching, striking, etc. observed during surveillance have highly variable target locations. This puts appearance-based techniques at a disadvantage for modelling and recognizing them. Psychological studies indicate that these actions are ballistic in nature. Their trajectories have simple structures and are determined to a great degree by the starting and ending positions. We present an approach for movement recognition that explicitly considers their ballistic nature. This enables the decoupling of recognition from the movement{\textquoteright}s trajectory, allowing generalization over a range of target-positions. A given movement is first analyzed to determine if it is ballistic. Ballistic movements are further classified into reaching, striking, etc. The proposed approach was tested with motion capture data obtained from the CMU MoCap database.}, author = {Prasad,V. and Kellokumpu,V. and Davis, Larry S.} } @article {17013, title = {Classifying science: Phenomena, data, theory, method, practice: Book Reviews}, journal = {J. Am. Soc. Inf. Sci. Technol.}, volume = {57}, year = {2006}, month = {2006/12//}, pages = {1977 - 1978}, abstract = {Due to e-mail{\textquoteright}s ubiquitous nature, millions of users are intimate with the technology; however, most users are only familiar with managing their own e-mail, which is an inherently different task from exploring an e-mail archive. Historians and social scientists believe that e-mail archives are important artifacts for understanding the individuals and communities they represent. To understand the conversations evidenced in an archive, context is needed. In this article, we present a new way to gain this necessary context: analyzing the temporal rhythms of social relationships. We provide methods for constructing meaningful rhythms from the e-mail headers by identifying relationships and interpreting their attributes. With these visualization techniques, e-mail archive explorers can uncover insights that may have been otherwise hidden in the archive. We apply our methods to an individual{\textquoteright}s 15-year e-mail archive, which consists of about 45,000 messages and over 4,000 relationships. {\textcopyright} 2006 Wiley Periodicals, Inc.}, keywords = {corpus_analysis, email, hci, project--email, text_analysis, Visualization}, isbn = {1532-2882}, url = {http://dx.doi.org/10.1002/asi.v57:14}, author = {Perer,Adam and Shneiderman, Ben and Oard, Douglas} } @conference {12316, title = {Compression techniques for minimum energy consumption}, booktitle = {25th Army Science Conference}, year = {2006}, month = {2006///}, author = {Puthenpurayil,S. and Gu,R. and Bhattacharyya, Shuvra S.} } @conference {18780, title = {A computational framework for point cloud construction using digital projection patterns}, year = {2006}, month = {2006///}, address = {Philadelphia, USA}, abstract = {Many reverse engineering and inspection applications re-quire generation of point clouds representing faces of physical objects. This paper describes a computational framework for constructing point clouds using digital projection patterns. The basic principle behind the approach is to project known patterns on the object using a digital projector. A digital camera is then used to take images of the object with the known projection pat- terns imposed on it. Due to the presence of 3-D faces of the ob- ject, the projection patterns appear distorted in the images. The images are analyzed to construct the 3-D point cloud that is ca- pable of introducing the observed distortions in the images. The approach described in this paper presents three advances over the previously developed approaches. First, it is capable of work- ing with the projection patterns that have variable fringe widths and curved fringes and hence can provide improved accuracy. Second, our algorithm minimizes the number of images needed for creating the 3-D point cloud. Finally, we use a hybrid ap- proach that uses a combination of reference plane images and es- timated system parameters to construct the point cloud. This ap- proach provides good run-time computational performance and simplifies the system calibration. }, url = {http://www.glue.umd.edu/~skgupta/Publication/CIE06_Peng2.pdf}, author = {Peng,T. and Gupta,S.K.} } @article {14668, title = {Context-sensitive correlation analysis for detecting races}, journal = {Proceedings of the ACM Conference on Programming Language Design and Implementation (PLDI)}, year = {2006}, month = {2006///}, pages = {320 - 331}, author = {Pratikakis,P. and Foster, Jeffrey S. and Hicks, Michael W.} } @article {16315, title = {Covering arrays for efficient fault characterization in complex configuration spaces}, journal = {IEEE Transactions on Software Engineering}, year = {2006}, month = {2006///}, pages = {20 - 34}, author = {Yilmaz,C. and Cohen,M. B and Porter, Adam} } @article {17042, title = {Creativity Support Tools: Report From a U.S. National Science Foundation Sponsored Workshop}, journal = {International Journal of Human-Computer Interaction}, volume = {20}, year = {2006}, month = {2006///}, pages = {61 - 77}, abstract = {Creativity support tools is a research topic with high risk but potentially very high payoff. The goal is to develop improved software and user interfaces that empower users to be not only more productive but also more innovative. Potential users include software and other engineers, diverse scientists, product and graphic designers, architects, educators, students, and many others. Enhanced interfaces could enable more effective searching of intellectual resources, improved collaboration among teams, and more rapid discovery processes. These advanced interfaces should also provide potent support in hypothesis formation, speedier evaluation of alternatives, improved understanding through visualization, and better dissemination of results. For creative endeavors that require composition of novel artifacts (e.g., computer programs, scientific papers, engineering diagrams, symphonies, artwork), enhanced interfaces could facilitate exploration of alternatives, prevent unproductive choices, and enable easy backtracking. This U.S. National Science Foundation sponsored workshop brought together 25 research leaders and graduate students to share experiences, identify opportunities, and formulate research challenges. Two key outcomes emerged: (a) encouragement to evaluate creativity support tools through multidimensional in-depth longitudinal case studies and (b) formulation of 12 principles for design of creativity support tools.Creativity support tools is a research topic with high risk but potentially very high payoff. The goal is to develop improved software and user interfaces that empower users to be not only more productive but also more innovative. Potential users include software and other engineers, diverse scientists, product and graphic designers, architects, educators, students, and many others. Enhanced interfaces could enable more effective searching of intellectual resources, improved collaboration among teams, and more rapid discovery processes. These advanced interfaces should also provide potent support in hypothesis formation, speedier evaluation of alternatives, improved understanding through visualization, and better dissemination of results. For creative endeavors that require composition of novel artifacts (e.g., computer programs, scientific papers, engineering diagrams, symphonies, artwork), enhanced interfaces could facilitate exploration of alternatives, prevent unproductive choices, and enable easy backtracking. This U.S. National Science Foundation sponsored workshop brought together 25 research leaders and graduate students to share experiences, identify opportunities, and formulate research challenges. Two key outcomes emerged: (a) encouragement to evaluate creativity support tools through multidimensional in-depth longitudinal case studies and (b) formulation of 12 principles for design of creativity support tools. }, isbn = {1044-7318}, doi = {10.1207/s15327590ijhc2002_1}, url = {http://www.tandfonline.com/doi/abs/10.1207/s15327590ijhc2002_1}, author = {Shneiderman, Ben and Fischer,Gerhard and Czerwinski,Mary and Resnick,Mitch and Myers,Brad and Candy,Linda and Edmonds,Ernest and Eisenberg,Mike and Giaccardi,Elisa and Hewett,Tom and Jennings,Pamela and Kules,Bill and Nakakoji,Kumiyo and Nunamaker,Jay and Pausch,Randy and Selker,Ted and Sylvan,Elisabeth and Terry,Michael} } @inbook {13751, title = {Cross-Language Access to Recorded Speech in the MALACH Project}, booktitle = {Text, Speech and DialogueText, Speech and Dialogue}, series = {Lecture Notes in Computer Science}, volume = {2448}, year = {2006}, month = {2006///}, pages = {197 - 212}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {The MALACH project seeks to help users find information in a vast multilingual collections of untranscribed oral history interviews. This paper introduces the goals of the project and focuses on supporting access by users who are unfamiliar with the interview language. It begins with a review of the state of the art in crosslanguage speech retrieval; approaches that will be investigated in the project are then described. Czech was selected as the first non-English language to be supported, so results of an initial experiment with Czech/English cross-language retrieval are reported.}, isbn = {978-3-540-44129-8}, url = {http://dx.doi.org/10.1007/3-540-46154-X_8}, author = {Oard, Douglas and Demner-Fushman,Dina and Haji{\v c},Jan and Ramabhadran,Bhuvana and Gustman,Samuel and Byrne,William and Soergel,Dagobert and Dorr, Bonnie J and Resnik, Philip and Picheny,Michael}, editor = {Sojka,Petr and Kopecek,Ivan and Pala,Karel} } @conference {12317, title = {Dataflow transformations in high-level DSP system design}, booktitle = {Proceedings of the International Symposium on System-on-Chip, Tampere, Finland}, year = {2006}, month = {2006///}, pages = {131 - 136}, author = {Saha,S. and Puthenpurayil,S. and Bhattacharyya, Shuvra S.} } @article {15252, title = {Dependent rounding and its applications to approximation algorithms}, journal = {Journal of the ACM}, volume = {53}, year = {2006}, month = {2006/05//}, pages = {324 - 360}, abstract = {We develop a new randomized rounding approach for fractional vectors defined on the edge-sets of bipartite graphs. We show various ways of combining this technique with other ideas, leading to improved (approximation) algorithms for various problems. These include:---low congestion multi-path routing;---richer random-graph models for graphs with a given degree-sequence;---improved approximation algorithms for: (i) throughput-maximization in broadcast scheduling, (ii) delay-minimization in broadcast scheduling, as well as (iii) capacitated vertex cover; and---fair scheduling of jobs on unrelated parallel machines.}, keywords = {broadcast scheduling, Randomized rounding}, isbn = {0004-5411}, doi = {10.1145/1147954.1147956}, url = {http://doi.acm.org/10.1145/1147954.1147956}, author = {Gandhi,Rajiv and Khuller, Samir and Parthasarathy,Srinivasan and Srinivasan, Aravind} } @article {16812, title = {Dwarf cube architecture for reducing storage sizes of multidimensional data}, volume = {10/157,960}, year = {2006}, month = {2006/11/07/}, abstract = {The invention relates to data warehouses and the ability to create and maintain data cubes of multi-dimensional data. More specifically, the invention pertains to data cube architectures that permit significant reduction of storage, exhibit very efficient retrieval and provide a very efficient incremental update of the data cubes.}, url = {http://www.google.com/patents?id=PGl7AAAAEBAJ}, author = {Roussopoulos, Nick and Sismanis,John and Deligiannakis,Antonios}, editor = {The University of Maryland College Park} } @article {13160, title = {Edge affinity for pose-contour matching}, journal = {Computer Vision and Image Understanding}, volume = {104}, year = {2006}, month = {2006/10//}, pages = {36 - 47}, abstract = {We present an approach for whole-body pose-contour matching. Contour matching in natural images in the absence of foreground{\textendash}background segmentation is difficult. Usually an asymmetric approach is adopted, where a contour is said to match well if it aligns with a subset of the image{\textquoteright}s gradients. This leads to problems as the contour can match with a portion of an object{\textquoteright}s outline and ignore the remainder. We present a model for using edge continuity to address this issue. Pairs of edge elements in the image are linked with affinities if they are likely to belong to the same object. A contour that matches with a set of image gradients is constrained to also match with other gradients having high affinities with the chosen ones. Experimental results show that this improves matching performance.}, keywords = {Contour matching, Edge continuity}, isbn = {1077-3142}, doi = {10.1016/j.cviu.2006.06.008}, url = {http://www.sciencedirect.com/science/article/pii/S1077314206000737}, author = {Prasad,V. S.N and Davis, Larry S. and Tran,Son Dinh and Elgammal,Ahmed} } @conference {12348, title = {Efficient simulation of critical synchronous dataflow graphs}, booktitle = {Proceedings of the 43rd annual Design Automation Conference}, year = {2006}, month = {2006///}, pages = {893 - 898}, author = {Hsu,C. J and Ramasubbu,S. and Ko,M. Y and Pino,J. L and Bhattacharyya, Shuvra S.} } @article {15932, title = {Enhancing reinforcement learning with metacognitive monitoring and control for improved perturbation tolerance}, journal = {Journal of Experimental and Theoretical Artificial Intelligence}, volume = {18}, year = {2006}, month = {2006///}, pages = {387 - 411}, author = {Anderson,M. L and Oates,T. and Chong,W. and Perlis, Don} } @conference {15852, title = {Evaluation of Multilingual and Multi-modal Information Retrieval-Seventh Workshop of the Cross-Language Evaluation Forum, CLEF 2006, Alicante, Spain, September 19-21, 2006, Revised Selected Papers}, booktitle = {Seventh Workshop of the Cross-Language Evaluation Forum, CLEF 2006}, year = {2006}, month = {2006///}, address = {Alicante, Spain}, author = {Peters,C. and Clough,P. and Gey,F. and Karlgren,J. and Magnini,B. and Oard, Douglas and De Rijke,M. and Stempfhuber,M.} } @inbook {14693, title = {Existential Label Flow Inference Via CFL Reachability}, booktitle = {Static AnalysisStatic Analysis}, series = {Lecture Notes in Computer Science}, volume = {4134}, year = {2006}, month = {2006///}, pages = {88 - 106}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {In programming languages, existential quantification is useful for describing relationships among members of a structured type. For example, we may have a list in which there exists some mutual exclusion lock l in each list element such that l protects the data stored in that element. With this information, a static analysis can reason about the relationship between locks and locations in the list even when the precise identity of the lock and/or location is unknown. To facilitate the construction of such static analyses, this paper presents a context-sensitive label flow analysis algorithm with support for existential quantification. Label flow analysis is a core part of many static analysis systems. Following Rehof et al, we use context-free language (CFL) reachability to develop an efficient O(n 3) label flow inference algorithm. We prove the algorithm sound by reducing its derivations to those in a system based on polymorphically-constrained types, in the style of Mossin. We have implemented a variant of our analysis as part of a data race detection tool for C programs.}, isbn = {978-3-540-37756-6}, url = {http://dx.doi.org/10.1007/11823230_7}, author = {Pratikakis,Polyvios and Foster, Jeffrey S. and Hicks, Michael W.}, editor = {Yi,Kwangkeun} } @article {16081, title = {Exploring auction databases through interactive visualization}, journal = {Decision Support Systems}, volume = {42}, year = {2006}, month = {2006/12//}, pages = {1521 - 1538}, abstract = {We introduce AuctionExplorer, a suite of tools for exploring databases of online auctions. The suite combines tools for collecting, processing, and interactively exploring auction attributes (e.g., seller rating), and the bid history (price evolution represented as a time series). Part of AuctionExplorer{\textquoteright}s power comes from its coupling of the two information structures, thereby allowing exploration of relationships between them. Exploration can be directed by hypothesis testing or exploratory data analysis. We propose a process for visual data analysis and illustrate AuctionExplorer{\textquoteright}s operations with a dataset of eBay auctions. Insights may improve seller, bidder, auction house, and other vendors{\textquoteright} understanding of the market, thereby assisting their decision making process.}, keywords = {Auction dynamics, Bid history, Online auctions, time series, user interface}, isbn = {0167-9236}, doi = {10.1016/j.dss.2006.01.001}, url = {http://www.sciencedirect.com/science/article/pii/S0167923606000042}, author = {Shmueli,Galit and Jank,Wolfgang and Aris,Aleks and Plaisant, Catherine and Shneiderman, Ben} } @conference {16085, title = {Exploring content-actor paired network data using iterative query refinement with NetLens}, booktitle = {Digital Libraries, 2006. JCDL {\textquoteright}06. Proceedings of the 6th ACM/IEEE-CS Joint Conference on}, year = {2006}, month = {2006/06//}, pages = {372 - 372}, abstract = {Networks have remained a challenge for information retrieval and visualization because of the rich set of tasks that users want to accomplish. This paper demonstrates a tool, NetLens, to explore a content-actor paired network data model. The NetLens interface was designed to allow users to pose a series of elementary queries and iteratively refine visual overviews and sorted lists. This enables the support of complex queries that are traditionally hard to specify in node-link visualizations. NetLens is general and scalable in that it applies to any dataset that can be represented with our abstract content-actor data model}, keywords = {data, interface, libraries;query, management, model;data, models;data, NetLens;content-actor, network, pair, processing;user, query, refinement;data, retrieval;iterative, systems;, visualisation;digital, visualization;information}, doi = {10.1145/1141753.1141868}, author = {Lee,Bongshin and Kang,Hyunmo and Plaisant, Catherine and Bederson, Benjamin B.} } @conference {16087, title = {Exploring erotics in Emily Dickinson{\textquoteright}s correspondence with text mining and visual interfaces}, booktitle = {Proceedings of the 6th ACM/IEEE-CS joint conference on Digital libraries}, series = {JCDL {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {141 - 150}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {This paper describes a system to support humanities scholars in their interpretation of literary work. It presents a user interface and web architecture that integrates text mining, a graphical user interface and visualization, while attempting to remain easy to use by non specialists. Users can interactively read and rate documents found in a digital libraries collection, prepare training sets, review results of classification algorithms and explore possible indicators and explanations. Initial evaluation steps suggest that there is a rationale for "provocational" text mining in literary interpretation.}, keywords = {case studies, humanities, literary criticism, text mining, user interface, Visualization}, isbn = {1-59593-354-9}, doi = {10.1145/1141753.1141781}, url = {http://doi.acm.org/10.1145/1141753.1141781}, author = {Plaisant, Catherine and Rose,James and Yu,Bei and Auvil,Loretta and Kirschenbaum,Matthew G. and Smith,Martha Nell and Clement,Tanya and Lord,Greg} } @inbook {18834, title = {Finding Mold-Piece Regions Using Computer Graphics Hardware}, booktitle = {Geometric Modeling and Processing - GMP 2006}, series = {Lecture Notes in Computer Science}, volume = {4077}, year = {2006}, month = {2006///}, pages = {655 - 662}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {An important step in the mold design process that ensures disassembly of mold pieces consists of identifying various regions on the part that will be formed by different mold pieces. This paper presents an efficient and robust algorithm to find and highlight the mold-piece regions on a part. The algorithm can be executed on current-generation computer graphics hardware. The complexity of the algorithm solely depends on the time to render the given part. By using a system that can quickly find various mold-piece regions on a part, designers can easily optimize the part and mold design and if needed make appropriate corrections upfront, streamlining the subsequent design steps.}, keywords = {Computer science}, isbn = {978-3-540-36711-6}, url = {http://www.springerlink.com/content/t8t3588924263p12/abstract/}, author = {Priyadarshi,Alok and Gupta, Satyandra K.}, editor = {Kim,Myung-Soo and Shimada,Kenji} } @article {12814, title = {High-confidence medical device software and systems}, journal = {Computer}, volume = {39}, year = {2006}, month = {2006/04//}, pages = {33 - 38}, abstract = {Given the shortage of caregivers and the increase in an aging US population, the future of US healthcare quality does not look promising and definitely is unlikely to be cheaper. Advances in health information systems and healthcare technology offer a tremendous opportunity for improving the quality of care while reducing costs. The development and production of medical device software and systems is a crucial issue, both for the US economy and for ensuring safe advances in healthcare delivery. As devices become increasingly smaller in physical terms but larger in software terms, the design, testing, and eventual Food and Drug Administration (FDA) device approval is becoming much more expensive for medical device manufacturers both in terms of time and cost. Furthermore, the number of devices that have recently been recalled due to software and hardware problems is increasing at an alarming rate. As medical devices are becoming increasingly networked, ensuring even the same level of health safety seems a challenge.}, keywords = {Aging, biomedical equipment, Clinical software engineering, Costs, FDA device, Food and Drug Administration device, health and safety, health care, health information system, health safety, healthcare delivery, Healthcare technology, Information systems, medical computing, medical device manufacturing, medical device software development, medical device systems development, medical information systems, Medical services, Medical software, Medical tests, networked medical devices, Production systems, Software design, Software safety, Software systems, Software testing, US healthcare quality}, isbn = {0018-9162}, doi = {10.1109/MC.2006.127}, author = {Lee,I. and Pappas,G. J and Cleaveland, Rance and Hatcliff,J. and Krogh,B. H and Lee,P. and Rubin,H. and Sha,L.} } @article {19661, title = {How Multirobot Systems Research will Accelerate our Understanding of Social Animal Behavior}, journal = {Proceedings of the IEEE}, volume = {94}, year = {2006}, month = {2006/07//}, pages = {1445 - 1463}, abstract = {Our understanding of social insect behavior has significantly influenced artificial intelligence (AI) and multirobot systems{\textquoteright} research (e.g., ant algorithms and swarm robotics). In this work, however, we focus on the opposite question: "How can multirobot systems research contribute to the understanding of social animal behavior?" As we show, we are able to contribute at several levels. First, using algorithms that originated in the robotics community, we can track animals under observation to provide essential quantitative data for animal behavior research. Second, by developing and applying algorithms originating in speech recognition and computer vision, we can automatically label the behavior of animals under observation. In some cases the automatic labeling is more accurate and consistent than manual behavior identification. Our ultimate goal, however, is to automatically create, from observation, executable models of behavior. An executable model is a control program for an agent that can run in simulation (or on a robot). The representation for these executable models is drawn from research in multirobot systems programming. In this paper we present the algorithms we have developed for tracking, recognizing, and learning models of social animal behavior, details of their implementation, and quantitative experimental results using them to study social insects}, keywords = {Acceleration, Animal behavior, ant movement tracking, Artificial intelligence, biology computing, Computer vision, control engineering computing, Insects, Intelligent robots, Labeling, monkey movement tracking, multi-robot systems, multirobot systems, robotics algorithms, Robotics and automation, social animal behavior, social animals, social insect behavior, Speech recognition, tracking}, isbn = {0018-9219}, author = {Balch, T. and Dellaert, F. and Feldman, A. and Guillory, A. and Isbell, C.L. and Zia Khan and Pratt, S.C. and Stein, A.N. and Wilde, H.} } @article {14623, title = {Identification and cross-species comparison of canine osteoarthritic gene regulatory cis-elements}, journal = {Osteoarthritis and Cartilage}, volume = {14}, year = {2006}, month = {2006/08//}, pages = {830 - 838}, abstract = {SummaryObjectiveTo better understand transcription regulation of osteoarthritis (OA) by examining common promoter motifs in canine osteoarthritic genes, to identify other genes containing these motifs and to assess the conservation of these motifs between canine, human, mouse and rat. Design Differentially expressed transcripts in canine OA were mapped to the human genome. We thus identified 20 orthologous human transcripts representing 19 up-regulated genes and 62 orthologous transcripts representing 60 down-regulated genes. The 5\&$\#$xa0;kbp upstream regions of these transcripts were used to identify binding sites and build promoter models based on those sites. The human genome was subsequently searched for other transcripts likely to be regulated by the same promoter models. Orthologous transcripts were then identified in canine, rat and mouse for determination of potential cross-species conservation of binding sites comprising the promoter model. Results Four promoter models containing 5{\textendash}6 transcripts and 5{\textendash}8 common transcription factor binding sites were developed. They include binding sites for AP-4, AP-2α and γ, and E2F. Several hundred other human genes were found to contain these promoter motifs. Furthermore these motifs were significantly over represented in the orthologous genes in canine, rat and mouse genomes. Conclusions We have developed and applied a computational methodology to identify common promoter elements implicated in OA and shared amongst four higher vertebrates. The transcription factors associated with these binding sites and other genes driven by these promoter motifs have been implicated in OA, chondrocyte development and with other biological factors involved in the disease. }, keywords = {Chondrocyte, Gene expression, Osteoarthritis, Promoter}, isbn = {1063-4584}, doi = {10.1016/j.joca.2006.02.007}, url = {http://www.sciencedirect.com/science/article/pii/S1063458406000379}, author = {Hannenhalli, Sridhar and Middleton,R.P. and Levy,S. and Perroud,B. and Holzwarth,J.A. and McDonald,K. and Hannah,S.S.} } @article {18577, title = {In VINI veritas: realistic and controlled network experimentation}, journal = {SIGCOMM Comput. Commun. Rev.}, volume = {36}, year = {2006}, month = {2006/08//}, pages = {3 - 14}, abstract = {This paper describes VINI, a virtual network infrastructure that allows network researchers to evaluate their protocols and services in a realistic environment that also provides a high degree of control over network conditions. VINI allows researchers to deploy and evaluate their ideas with real routing software, traffic loads, and network events. To provide researchers flexibility in designing their experiments, VINI supports simultaneous experiments with arbitrary network topologies on a shared physical infrastructure. This paper tackles the following important design question: What set of concepts and techniques facilitate flexible, realistic, and controlled experimentation (e.g., multiple topologies and the ability to tweak routing algorithms) on a fixed physical infrastructure? We first present VINI{\textquoteright}s high-level design and the challenges of virtualizing a single network. We then present PL-VINI, an implementation of VINI on PlanetLab, running the "Internet In a Slice". Our evaluation of PL-VINI shows that it provides a realistic and controlled environment for evaluating new protocols and services.}, keywords = {architecture, experimentation, Internet, Routing, virtualization}, isbn = {0146-4833}, doi = {10.1145/1151659.1159916}, url = {http://doi.acm.org/10.1145/1151659.1159916}, author = {Bavier,Andy and Feamster, Nick and Huang,Mark and Peterson,Larry and Rexford,Jennifer} } @article {16088, title = {Integrating data and interfaces to enhance understanding of government statistics: toward the national statistical knowledge network project briefing}, journal = {Proceedings of 7th Annual International Conference on Digital Libraries (DG06)}, year = {2006}, month = {2006///}, pages = {21 - 24}, abstract = {This paper reports the results of work in the final no-costextension year of a digital government project that developed user interface models and prototypes to help people find and understand government statistics; proposed a Statistical Knowledge Network architecture that supports cross agency information access; and demonstrated models for government- academic collaboration. }, author = {Marchionini,G. and Haas,S. and Plaisant, Catherine and Shneiderman, Ben} } @article {16091, title = {Interactive sonification for blind people exploration of geo-referenced data: comparison between a keyboard-exploration and a haptic-exploration interfaces}, journal = {Cognitive Processing}, volume = {7}, year = {2006}, month = {2006///}, pages = {178 - 179}, isbn = {1612-4782}, url = {http://dx.doi.org/10.1007/s10339-006-0137-8}, author = {Delogu,Franco and Belardinelli,Marta and Palmiero,Massimiliano and Pasqualotto,Emanuele and Zhao,Haixia and Plaisant, Catherine and Federici,Stefano} } @article {13812, title = {Leveraging recurrent phrase structure in large-scale ontology translation}, journal = {Proceedings of the 11th Annual Conference of the European Association for Machine Translation}, year = {2006}, month = {2006///}, abstract = {This paper presents a process for leveraging structural relationships and reusablephrases when translating large-scale ontologies. Digital libraries are becoming more and more prevalent. An important step in providing universal access to such material is to pro- vide multi-lingual access to the underlying principles of organization via ontologies, thesauri, and controlled vocabularies. Machine translation of these resources requires high accuracy and a deep vocabulary. Human input is often required, but full manual translation can be slow and expensive. We report on a cost-effective approach to ontology translation. We describe our technique of prioritization, our process of collecting aligned translations and generating a new lexicon, and the resulting improvement to translation system output. Our preliminary evaluation indicates that this technique provides significant cost savings for human-assisted translation. The process we developed can be applied to ontologies in other domains and is easily incorporated into other translation systems. }, author = {Murray,G. C and Dorr, Bonnie J and Jimmy Lin and Haji{\v c},J. and Pecina,P.} } @conference {13813, title = {Leveraging reusability: cost-effective lexical acquisition for large-scale ontology translation}, booktitle = {Proceedings of the 21st International Conference on Computational Linguistics and the 44th annual meeting of the Association for Computational Linguistics}, series = {ACL-44}, year = {2006}, month = {2006///}, pages = {945 - 952}, publisher = {Association for Computational Linguistics}, organization = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, abstract = {Thesauri and ontologies provide important value in facilitating access to digital archives by representing underlying principles of organization. Translation of such resources into multiple languages is an important component for providing multilingual access. However, the specificity of vocabulary terms in most ontologies precludes fully-automated machine translation using general-domain lexical resources. In this paper, we present an efficient process for leveraging human translations when constructing domain-specific lexical resources. We evaluate the effectiveness of this process by producing a probabilistic phrase dictionary and translating a thesaurus of 56,000 concepts used to catalogue a large archive of oral histories. Our experiments demonstrate a cost-effective technique for accurate machine translation of large ontologies.}, doi = {10.3115/1220175.1220294}, url = {http://dx.doi.org/10.3115/1220175.1220294}, author = {Murray,G. Craig and Dorr, Bonnie J and Jimmy Lin and Haji{\v c},Jan and Pecina,Pavel} } @conference {14703, title = {Lock inference for atomic sections}, booktitle = {Proceedings of the First ACM SIGPLAN Workshop on Languages, Compilers, and Hardware Support for Transactional Computing}, year = {2006}, month = {2006///}, abstract = {To prevent unwanted interactions in multithreaded programs, pro-grammers have traditionally employed pessimistic, blocking con- currency primitives. Using such primitives correctly and efficiently is notoriously difficult. To simplify the problem, recent research proposes that programmers specify atomic sections of code whose executions should be atomic with respect to one another, without dictating exactly how atomicity enforced. Much work has explored using optimistic concurrency, or software transactions, as a means to implement atomic sections. This paper proposes to implement atomic sections using a static whole-program analysis to insert necessary uses of pessimistic con- currency primitives. Given a program that contains programmer- specified atomic sections and thread creations, our mutex infer- ence algorithm efficiently infers a set of locks for each atomic section that should be acquired (released) upon entering (exiting) the atomic section. The key part of this algorithm is determining which memory locations in the program could be shared between threads, and using this information to generate the necessary locks. To determine sharing, our analysis uses the notion of continuation effects to track the locations accessed after each program point. As continuation effects are flow sensitive, a memory location may be thread-local before a thread creation and thread-shared afterward. We prove that our algorithm is correct, and provides parallelism according to the precision of the points-to analysis. While our al- gorithm also attempts to reduce the number locks while preserving parallelism, we show that minimizing the number of locks is NP- hard. }, author = {Hicks, Michael W. and Foster, Jeffrey S. and Pratikakis,P.} } @article {14704, title = {LOCKSMITH: context-sensitive correlation analysis for race detection}, journal = {SIGPLAN Not.}, volume = {41}, year = {2006}, month = {2006/06//}, pages = {320 - 331}, abstract = {One common technique for preventing data races in multi-threaded programs is to ensure that all accesses to shared locations are consistently protected by a lock. We present a tool called LOCKSMITH for detecting data races in C programs by looking for violations of this pattern. We call the relationship between locks and the locations they protect consistent correlation, and the core of our technique is a novel constraint-based analysis that infers consistent correlation context-sensitively, using the results to check that locations are properly guarded by locks. We present the core of our algorithm for a simple formal language λ> which we have proven sound, and discuss how we scale it up to an algorithm that aims to be sound for all of C. We develop several techniques to improve the precision and performance of the analysis, including a sharing analysis for inferring thread locality; existential quantification for modeling locks in data structures; and heuristics for modeling unsafe features of C such as type casts. When applied to several benchmarks, including multi-threaded servers and Linux device drivers, LOCKSMITH found several races while producing a modest number of false alarm.}, keywords = {context-sensitivity, correlation, locksmith, multi-threaded programming, race detection, Type inference}, isbn = {0362-1340}, doi = {10.1145/1133255.1134019}, url = {http://doi.acm.org/10.1145/1133255.1134019}, author = {Pratikakis,Polyvios and Foster, Jeffrey S. and Hicks, Michael W.} } @article {16006, title = {The metacognitive loop I: Enhancing reinforcement learning with metacognitive monitoring and control for improved perturbation tolerance}, journal = {Journal of Experimental \& Theoretical Artificial Intelligence}, volume = {18}, year = {2006}, month = {2006///}, pages = {387 - 411}, author = {Anderson,M. L and Oates,T. and Chong,W. and Perlis, Don} } @article {16276, title = {Metagenomic Analysis of the Human Distal Gut Microbiome}, journal = {Science}, volume = {312}, year = {2006}, month = {2006/06/02/}, pages = {1355 - 1359}, abstract = {The human intestinal microbiota is composed of 1013 to 1014 microorganisms whose collective genome ({\textquotedblleft}microbiome{\textquotedblright}) contains at least 100 times as many genes as our own genome. We analyzed \~{}78 million base pairs of unique DNA sequence and 2062 polymerase chain reaction{\textendash}amplified 16S ribosomal DNA sequences obtained from the fecal DNAs of two healthy adults. Using metabolic function analyses of identified genes, we compared our human genome with the average content of previously sequenced microbial genomes. Our microbiome has significantly enriched metabolism of glycans, amino acids, and xenobiotics; methanogenesis; and 2-methyl-d-erythritol 4-phosphate pathway{\textendash}mediated biosynthesis of vitamins and isoprenoids. Thus, humans are superorganisms whose metabolism represents an amalgamation of microbial and human attributes.}, isbn = {0036-8075, 1095-9203}, doi = {10.1126/science.1124234}, url = {http://www.sciencemag.org/content/312/5778/1355}, author = {Gill,Steven R. and Pop, Mihai and DeBoy,Robert T. and Eckburg,Paul B and Turnbaugh,Peter J and Samuel,Buck S and Gordon,Jeffrey I and Relman,David A and Fraser-Liggett,Claire M and Nelson,Karen E.} } @article {13266, title = {A multi-resolution representation for terrain morphology}, journal = {Geographic Information Science}, year = {2006}, month = {2006///}, pages = {33 - 46}, abstract = {Mesh-based terrain representations provide accurate descriptions of a terrain, but fail in capturing its morphological structure. The morphology of a terrain is defined by its critical points and by the critical lines joining them, which form a so-called surface network. Because of the large size of current terrain data sets, a multi-resolution representation of the terrain morphology is crucial. Here, we address the problem of representing the morphology of a terrain at different resolutions. The basis of the multi-resolution terrain model, that we call a Multi-resolution Surface Network (MSN), is a generalization operator on a surface network, which produces a simplified representation incrementally. An MSN is combined with a multi-resolution mesh-based terrain model, which encompasses the terrain morphology at different resolutions. We show how variable-resolution representations can be extracted from an MSN, and we present also an implementation of an MSN in a compact encoding data structure.}, doi = {10.1007/11863939_3}, author = {Danovaro,E. and De Floriani, Leila and Papaleo,L. and Vitali,M.} } @article {17309, title = {Nuclear Envelope Dystrophies Show a Transcriptional Fingerprint Suggesting Disruption of Rb{\textendash}MyoD Pathways in Muscle Regeneration}, journal = {BrainBrain}, volume = {129}, year = {2006}, month = {2006/04/01/}, pages = {996 - 1013}, abstract = {Mutations of lamin A/C (LMNA) cause a wide range of human disorders, including progeria, lipodystrophy, neuropathies and autosomal dominant Emery{\textendash}Dreifuss muscular dystrophy (EDMD). EDMD is also caused by X-linked recessive loss-of-function mutations of emerin, another component of the inner nuclear lamina that directly interacts with LMNA. One model for disease pathogenesis of LMNA and emerin mutations is cell-specific perturbations of the mRNA transcriptome in terminally differentiated cells. To test this model, we studied 125 human muscle biopsies from 13 diagnostic groups (125 U133A, 125 U133B microarrays), including EDMD patients with LMNA and emerin mutations. A Visual and Statistical Data Analyzer (VISDA) algorithm was used to statistically model cluster hierarchy, resulting in a tree of phenotypic classifications. Validations of the diagnostic tree included permutations of U133A and U133B arrays, and use of two probe set algorithms (MAS5.0 and MBEI). This showed that the two nuclear envelope defects (EDMD LMNA, EDMD emerin) were highly related disorders and were also related to fascioscapulohumeral muscular dystrophy (FSHD). FSHD has recently been hypothesized to involve abnormal interactions of chromatin with the nuclear envelope. To identify disease-specific transcripts for EDMD, we applied a leave-one-out (LOO) cross-validation approach using LMNA patient muscle as a test data set, with reverse transcription{\textendash}polymerase chain reaction (RT{\textendash}PCR) validations in both LMNA and emerin patient muscle. A high proportion of top-ranked and validated transcripts were components of the same transcriptional regulatory pathway involving Rb1 and MyoD during muscle regeneration (CRI-1, CREBBP, Nap1L1, ECREBBP/p300), where each was specifically upregulated in EDMD. Using a muscle regeneration time series (27 time points) we develop a transcriptional model for downstream consequences of LMNA and emerin mutations. We propose that key interactions between the nuclear envelope and Rb and MyoD fail in EDMD at the point of myoblast exit from the cell cycle, leading to poorly coordinated phosphorylation and acetylation steps. Our data is consistent with mutations of nuclear lamina components leading to destabilization of the transcriptome in differentiated cells.}, keywords = {EDMD = Emery{\textendash}Dreifuss muscular dystrophy, emerin, Emery-Dreifuss muscular dystrophy, FSHD = fascioscapulohumeral muscular dystrophy, IDG = individual discriminatory genes, JDG = jointly discriminatory genes, lamin A/C, LGMD = limb-girdle muscular dystrophy, LOO = leave-one-out, RT{\textendash}PCR = reverse transcription{\textendash}polymerase chain reaction; VISDA = Visual and Statistical Data Analyzer, Skeletal muscle, wFC = weighted Fisher criterion}, isbn = {0006-8950, 1460-2156}, doi = {10.1093/brain/awl023}, url = {http://brain.oxfordjournals.org/content/129/4/996}, author = {Bakay,Marina and Wang,Zuyi and Melcon,Gisela and Schiltz,Louis and Xuan,Jianhua and Zhao,Po and Sartorelli,Vittorio and Seo,Jinwook and Pegoraro,Elena and Angelini,Corrado and Shneiderman, Ben and Escolar,Diana and Chen,Yi-Wen and Winokur,Sara T and Pachman,Lauren M and Fan,Chenguang and Mandler,Raul and Nevo,Yoram and Gordon,Erynn and Zhu,Yitan and Dong,Yibin and Wang,Yue and Hoffman,Eric P} } @article {12150, title = {Observations about software development for high end computing}, journal = {CTWatch Quarterly}, volume = {2}, year = {2006}, month = {2006///}, pages = {33 - 38}, abstract = {In this paper, we have summarized our findings from a series of case studies conducted with ten ASC and MP Codes as a series of observations. Due to different environments in which each code is developed, some of the observations are consistent across code teams while others vary across code teams. Overall, we found high consistency among the ASC Codes and the MP Codes. Due to the different environments and foci of these projects, this result is both surprising and positive. In addition, despite the fact that a large majority of the developers on these teams have little or no formal training in software engineering, they have been able to make use of some basic software engineering principles. Further education and motivation could increase the use of these principles and further increase the quality of scientific and engineering software that has already demonstrated its value.Based on the positive results thus far, we have plans to conduct additional case studies to gather more data in support of or in contradiction to the observations presented in this paper. In future case studies, we will strive to investigate codes from additional domains, thereby allowing broader, more inclusive conclusions to be drawn. }, author = {Carver, J. C and Hochstein, L. M and Kendall,R. P and Nakamura, T. and Zelkowitz, Marvin V and Basili, Victor R. and Post,D. E} } @conference {17788, title = {Opinion Analysis in Document Databases}, booktitle = {Proc. AAAI Spring Symposium on Computational Approaches to Analyzing Weblogs, Stanford, CA}, year = {2006}, month = {2006///}, abstract = {There are numerous applications in which we would like toassess what opinions are being expressed in text documents. Forr example, Martha Stewart{\textquoteright}s company may have wished to assess the degree of harshness of news articles about her in the recent past. Likewise, a World Bank official may wish to as- sess the degree of criticism of a proposed dam in Bangladesh. The ability to gauge opinion on a given topic is therefore of critical interest. In this paper, we develop a suite of algo- rithms which take as input, a set D of documents as well as a topic t, and gauge the degree of opinion expressed about topic t in the set D of documents. Our algorithms can return both a number (larger the number, more positive the opinion) as well as a qualitative opinion (e.g. harsh, complimentary). We as- sess the accuracy of these algorithms via human experiments and show that the best of these algorithms can accurately re- flect human opinions. We have also conducted performance experiments showing that our algorithms are computationally fast. }, author = {Cesarano,C. and Dorr, Bonnie J and Picariello, A. and Reforgiato,D. and Sagoff,A. and V.S. Subrahmanian} } @article {16243, title = {An optimized system for expression and purification of secreted bacterial proteins}, journal = {Protein Expression and Purification}, volume = {46}, year = {2006}, month = {2006/03//}, pages = {23 - 32}, abstract = {In this report, we describe an optimized system for the efficient overexpression, purification, and refolding of secreted bacterial proteins. Candidate secreted proteins were produced recombinantly in Escherichia coli as Tobacco Etch Virus protease-cleavable hexahistidine-c-myc eptiope fusion proteins. Without regard to their initial solubility, recombinant fusion proteins were extracted from whole cells with guanidium chloride, purified under denaturing conditions by immobilized metal affinity chromatography, and refolded by rapid dilution into a solution containing only Tris buffer and sodium chloride. Following concentration on the same resin under native conditions, each protein was eluted for further purification and/or characterization. Preliminary studies on a test set of 12 secreted proteins ranging in size from 13 to 130\&$\#$xa0;kDa yielded between 10 and 50\&$\#$xa0;mg of fusion protein per liter of induced culture at greater than 90\% purity, as judged by Coomassie-stained SDS{\textendash}PAGE. Of the nine proteins further purified, analytical gel filtration chromatography indicated that each was a monomer in solution and circular dichroism spectroscopy revealed that each had adopted a well-defined secondary structure. While there are many potential applications for this system, the results presented here suggest that it will be particularly useful for investigators employing structural approaches to understand protein function, as attested to by the crystal structures of three proteins purified using this methodology (B.V. Geisbrecht, B.Y. Hamaoka, B. Perman, A. Zemla, D.J. Leahy, J. Biol. Chem. 280 (2005) 17243{\textendash}17250).}, keywords = {Pathogens, Secreted proteins, Toxins, Virulence factors}, isbn = {1046-5928}, doi = {10.1016/j.pep.2005.09.003}, url = {http://www.sciencedirect.com/science/article/pii/S1046592805003128}, author = {Geisbrecht,Brian V. and Bouyain,Samuel and Pop, Mihai} } @article {13320, title = {Out{\textendash}of{\textendash}Core Multiresolution Terrain Modeling}, journal = {Modeling and Management of Geographical Data over Distributed Architectures. Springer{\textendash}Verlag}, year = {2006}, month = {2006///}, author = {Danovaro,E. and De Floriani, Leila and Puppo,E. and Samet, Hanan} } @conference {17786, title = {Overconfidence or paranoia? search in imperfect-information games}, booktitle = {PROCEEDINGS OF THE NATIONAL CONFERENCE ON ARTIFICIAL INTELLIGENCE}, volume = {21}, year = {2006}, month = {2006///}, pages = {1045 - 1045}, abstract = {We derive a recursive formula for expected utility valuesin imperfect- information game trees, and an imperfect- information game tree search algorithm based on it. The for- mula and algorithm are general enough to incorporate a wide variety of opponent models. We analyze two opponent mod- els. The {\textquotedblleft}paranoid{\textquotedblright} model is an information-set analog of the minimax rule used in perfect-information games. The {\textquotedblleft}over- confident{\textquotedblright} model assumes the opponent moves randomly. Our experimental tests in the game of kriegspiel chess (an imperfect-information variant of chess) produced surpris- ing results: (1) against each other, and against one of the kriegspiel algorithms presented at IJCAI-05, the overconfi- dent model usually outperformed the paranoid model; (2) the performance of both models depended greatly on how well the model corresponded to the opponent{\textquoteright}s behavior. These re- sults suggest that the usual assumption of perfect-information game tree search{\textemdash}that the opponent will choose the best pos- sible move{\textemdash}isn{\textquoteright}t as useful in imperfect-information games. }, url = {http://www.aaai.org/Papers/AAAI/2006/AAAI06-164.pdf}, author = {Parker,A. and Nau, Dana S. and V.S. Subrahmanian} } @article {19018, title = {OverDoSe: A generic DDoS protection service using an overlay network}, year = {2006}, month = {2006}, institution = {School of Computer Science, Carnegie Mellon University}, abstract = {We present the design and implementation of OverDoSe, an overlay network offering generic DDoS protection for targeted sites. OverDoSe clients and servers are isolated at the IP level. Overlay nodes route packets between a client and a server, and regulate traffic according to the server{\textquoteright}s instructions. Through the use of light-weight security primitives, OverDoSe achieves resilience against compromised overlay nodes with a minimal performance overhead. OverDoSe can be deployed by a single ISP who wishes to offer DDoS protection as a value-adding service to its customers.}, isbn = {CMU-CS-06-114}, doi = {Technical Report}, url = {http://repository.cmu.edu/cgi/viewcontent.cgi?article=1073\&context=compsci}, author = {Elaine Shi and Stoica,I. and Andersen,D.G. and Perrig, A.} } @article {17783, title = {The priority curve algorithm for video summarization}, journal = {Information Systems}, volume = {31}, year = {2006}, month = {2006/11//}, pages = {679 - 695}, abstract = {In this paper, we introduce the concept of a priority curve associated with a video. We then provide an algorithm that can use the priority curve to create a summary (of a desired length) of any video. The summary thus created exhibits nice continuity properties and also avoids repetition. We have implemented the priority curve algorithm (PriCA) and compared it with other summarization algorithms in the literature with respect to both performance and the output quality. The quality of summaries was evaluated by a group of 200 students in Naples, Italy, who watched soccer videos. We show that PriCA is faster than existing algorithms and also produces better quality summaries. We also briefly describe a soccer video summarization system we have built on using the PriCA architecture and various (classical) image processing algorithms.}, keywords = {Content based retrieval, Video databases, Video summarization}, isbn = {0306-4379}, doi = {10.1016/j.is.2005.12.003}, url = {http://www.sciencedirect.com/science/article/pii/S0306437905001250}, author = {Albanese, M. and Fayzullin,M. and Picariello, A. and V.S. Subrahmanian} } @article {17645, title = {Provable algorithms for parallel generalized sweep scheduling}, journal = {Journal of Parallel and Distributed Computing}, volume = {66}, year = {2006}, month = {2006///}, pages = {807 - 821}, author = {Anil Kumar,V. S and Marathe,M. V and Parthasarathy,S. and Srinivasan, Aravind and Zust,S.} } @article {15805, title = {Secondary Structure Spatial Conformation Footprint: A Novel Method for Fast Protein Structure Comparison and Classification}, journal = {BMC Structural Biology}, volume = {6}, year = {2006}, month = {2006///}, abstract = {Recently a new class of methods for fast protein structure comparison has emerged. We call the methods in this class projection methods as they rely on a mapping of protein structure into a high-dimensional vector space. Once the mapping is done, the structure comparison is reduced to distance computation between corresponding vectors. As structural similarity is approximated by distance between projections, the success of any projection method depends on how well its mapping function is able to capture the salient features of protein structure. There is no agreement on what constitutes a good projection technique and the three currently known projection methods utilize very different approaches to the mapping construction, both in terms of what structural elements are included and how this information is integrated to produce a vector representation. Results In this paper we propose a novel projection method that uses secondary structure information to produce the mapping. First, a diverse set of spatial arrangements of triplets of secondary structure elements, a set of structural models, is automatically selected. Then, each protein structure is mapped into a high-dimensional vector of "counts" or footprint, where each count corresponds to the number of times a given structural model is observed in the structure, weighted by the precision with which the model is reproduced. We perform the first comprehensive evaluation of our method together with all other currently known projection methods. Conclusion The results of our evaluation suggest that the type of structural information used by a projection method affects the ability of the method to detect structural similarity. In particular, our method that uses the spatial conformations of triplets of secondary structure elements outperforms other methods in most of the tests.}, url = {DOI:10.1186/1472-6807-6-12DOI:10.1186/1472-6807-6-12}, author = {Zotenko,Elena and O{\textquoteright}Leary, Dianne P. and Przytycka,Teresa M.} } @conference {12608, title = {Shape-Regulated Particle Filtering for Tracking Non-Rigid Objects}, booktitle = {Image Processing, 2006 IEEE International Conference on}, year = {2006}, month = {2006/10//}, pages = {2813 - 2816}, abstract = {This paper presents an active contour based algorithm for tracking non-rigid objects in heavily cluttered scenes. We decompose the non-rigid contour tracking problem into three subproblems: 2D motion estimation, deformation detection, and shape regulation. First, we employ a particle filter to estimate the affine transform parameters between successive frames. Second, by using a dynamic object model, we generate a probabilistic map of deformation to reshape its contour. Finally, we project the updated model onto a trained shape subspace to constrain deformations to be within possible object appearances. Our experiments show that the proposed algorithm significantly improves the performance of the tracker}, keywords = {(numerical, 2D, algorithm;affine, based, contour, detection;dynamic, detection;motion, detection;parameter, estimation;active, estimation;object, estimation;particle, Filtering, filtering;probabilistic, filters;, map;shape-regulation;affine, methods);tracking, model;nonrigid, MOTION, object, scenes;deformation, tracking;parameter, transform;cluttered, transforms;clutter;edge}, doi = {10.1109/ICIP.2006.312993}, author = {Jie Shao and Chellapa, Rama and Porikli, F.} } @article {16084, title = {Shared family calendars: Promoting symmetry and accessibility}, journal = {ACM Trans. Comput.-Hum. Interact.}, volume = {13}, year = {2006}, month = {2006/09//}, pages = {313 - 346}, abstract = {We describe the design and use of a system facilitating the sharing of calendar information between remotely located, multi-generational family members. Most previous work in this area involves software enabling younger family members to monitor their parents. We have found, however, that older adults are equally if not more interested in the activities of younger family members. The major obstacle preventing them from participating in information sharing is the technology itself. Therefore, we developed a multi-layered interface approach that offers simple interaction to older users. In our system, users can choose to enter information into a computerized calendar or write it by hand on digital paper calendars. All of the information is automatically shared among everyone in the distributed family. By making the interface more accessible to older users, we promote symmetrical sharing of information among both older and younger family members. We present our participatory design process, describe the user interface, and report on an exploratory field study in three households of an extended family.}, keywords = {calendar, digital paper, elderly, family technology, Home, layered interface, privacy, universal usability}, isbn = {1073-0516}, doi = {10.1145/1183456.1183458}, url = {http://doi.acm.org/10.1145/1183456.1183458}, author = {Plaisant, Catherine and Clamage,Aaron and Hutchinson,Hilary Browne and Bederson, Benjamin B. and Druin, Allison} } @article {12605, title = {Special Issue on Biometrics: Algorithms and Applications}, journal = {Proceedings of the IEEE}, volume = {94}, year = {2006}, month = {2006/11//}, pages = {1912 - 1914}, isbn = {0018-9219}, doi = {10.1109/JPROC.2006.886016}, author = {Chellapa, Rama and Phillips, J. and Reynolds, D.} } @conference {18649, title = {A Statistical Analysis of Attack Data to Separate Attacks}, year = {2006}, month = {2006/06//}, pages = {383 - 392}, abstract = {This paper analyzes malicious activity collected from a test-bed, consisting of two target computers dedicated solely to the purpose of being attacked, over a 109 day time period. We separated port scans, ICMP scans, and vulnerability scans from the malicious activity. In the remaining attack data, over 78\% (i.e., 3,677 attacks) targeted port 445, which was then statistically analyzed. The goal was to find the characteristics that most efficiently separate the attacks. First, we separated the attacks by analyzing their messages. Then we separated the attacks by clustering characteristics using the K-Means algorithm. The comparison between the analysis of the messages and the outcome of the K-Means algorithm showed that 1) the mean of the distributions of packets, bytes and message lengths over time are poor characteristics to separate attacks and 2) the number of bytes, the mean of the distribution of bytes and message lengths as a function of the number packets are the best characteristics for separating attacks}, keywords = {attack data statistical analysis, attack separation, computer crime, Data analysis, data mining, ICMP scans, K-Means algorithm, pattern clustering, port scans, statistical analysis, vulnerability scans}, doi = {10.1109/DSN.2006.9}, author = {Michel Cukier and Berthier,R. and Panjwani,S. and Tan,S.} } @conference {16086, title = {Strategies for evaluating information visualization tools: multi-dimensional in-depth long-term case studies}, booktitle = {Proceedings of the 2006 AVI workshop on BEyond time and errors: novel evaluation methods for information visualization}, series = {BELIV {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {1 - 7}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {After an historical review of evaluation methods, we describe an emerging research method called Multi-dimensional In-depth Long-term Case studies (MILCs) which seems well adapted to study the creative activities that users of information visualization systems engage in. We propose that the efficacy of tools can be assessed by documenting 1) usage (observations, interviews, surveys, logging etc.) and 2) expert users{\textquoteright} success in achieving their professional goals. We summarize lessons from related ethnography methods used in HCI and provide guidelines for conducting MILCs for information visualization. We suggest ways to refine the methods for MILCs in modest sized projects and then envision ambitious projects with 3-10 researchers working over 1-3 years to understand individual and organizational use of information visualization by domain experts working at the frontiers of knowledge in their fields.}, isbn = {1-59593-562-2}, doi = {10.1145/1168149.1168158}, url = {http://doi.acm.org/10.1145/1168149.1168158}, author = {Shneiderman, Ben and Plaisant, Catherine} } @conference {12181, title = {Target size study for one-handed thumb use on small touchscreen devices}, booktitle = {Proceedings of the 8th conference on Human-computer interaction with mobile devices and services}, year = {2006}, month = {2006///}, pages = {203 - 210}, author = {Parhi,P. and Karlson,A.K. and Bederson, Benjamin B.} } @conference {16089, title = {Task taxonomy for graph visualization}, booktitle = {Proceedings of the 2006 AVI workshop on BEyond time and errors: novel evaluation methods for information visualization}, series = {BELIV {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {1 - 5}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Our goal is to define a list of tasks for graph visualization that has enough detail and specificity to be useful to: 1) designers who want to improve their system and 2) to evaluators who want to compare graph visualization systems. In this paper, we suggest a list of tasks we believe are commonly encountered while analyzing graph data. We define graph specific objects and demonstrate how all complex tasks could be seen as a series of low-level tasks performed on those objects. We believe that our taxonomy, associated with benchmark datasets and specific tasks, would help evaluators generalize results collected through a series of controlled experiments.}, keywords = {Evaluation, graph visualization, task taxonomy}, isbn = {1-59593-562-2}, doi = {10.1145/1168149.1168168}, url = {http://doi.acm.org/10.1145/1168149.1168168}, author = {Lee,Bongshin and Plaisant, Catherine and Parr,Cynthia Sims and Fekete,Jean-Daniel and Henry,Nathalie} } @article {15487, title = {Techniques and processes for improving the quality and performance of open-source software}, journal = {Software Process: Improvement and Practice}, volume = {11}, year = {2006}, month = {2006///}, pages = {163 - 176}, abstract = {Open-source development processes have emerged as an effective approach to reduce cycle-time and decrease design, implementation, and quality assurance (QA) costs for certain types of software, particularly systems infrastructure software, such as operating systems (OS), compilers and language processing tools, text and drawing editors, and middleware. This article presents two contributions to the study of open-source software processes. First, we describe key challenges of open-source software and illustrate how QA processes{\textemdash}specifically those tailored to open-source development{\textemdash}help mitigate these challenges better than traditional closed-source processes do. Second, we summarize results of empirical studies that evaluate how our Skoll distributed continuous quality assurance (DCQA) techniques and processes help to resolve key challenges in developing and validating open-source software. Our results show that: (a) using models to configure and guide the DCQA process improves developer understanding of open-source software, (b) improving the diversity of platform configurations helps QA engineers find defects missed during conventional testing, and (c) centralizing control of QA activities helps to eliminate redundant work. Copyright {\textcopyright} 2006 John Wiley \& Sons, Ltd.}, keywords = {distributed continuous quality assurance, open-source software development and testing}, isbn = {1099-1670}, doi = {10.1002/spip.260}, url = {http://onlinelibrary.wiley.com/doi/10.1002/spip.260/abstract}, author = {Porter, Adam and Yilmaz,Cemal and Memon, Atif M. and Krishna,Arvind S. and Schmidt,Douglas C. and Gokhale,Aniruddha} } @article {16022, title = {Theory and Application of Self-Reference: Logic and Beyond}, journal = {Self-reference}, volume = {178}, year = {2006}, month = {2006///}, pages = {121 - 121}, author = {Perlis, Don} } @conference {14442, title = {Is there a grand challenge or X-prize for data mining?}, booktitle = {Proceedings of the 12th ACM SIGKDD international conference on Knowledge discovery and data mining}, series = {KDD {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {954 - 956}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {This panel will discuss possible exciting and motivating Grand Challenge problems for Data Mining, focusing on bioinformatics, multimedia mining, link mining, text mining, and web mining.}, keywords = {Bioinformatics, data mining, grand challenge, image mining, link mining, multimedia mining, text mining, video mining, web mining, X-prize}, isbn = {1-59593-339-5}, doi = {10.1145/1150402.1150535}, url = {http://doi.acm.org/10.1145/1150402.1150535}, author = {Piatetsky-Shapiro,Gregory and Grossman,Robert and Djeraba,Chabane and Feldman,Ronen and Getoor, Lise and Zaki,Mohammed} } @conference {13339, title = {Topology-based reasoning on non-manifold shapes}, booktitle = {Proceedings of the 1st International Symposium on Shapes and Semantics, Matsushima, Japan}, year = {2006}, month = {2006///}, abstract = {Topological information is a promising resource to research in shape-understanding as it provides a high-level description of the characteristics of a shape, and such high-level description often has strong association with the semantics of an object. The Shape Acquisition and Processing (SAP) ontology has been designed to maintain useful information of a model. We propose here an extension to the SAP ontology, that addresses the non-manifold properties of a model. Useful information about the connectivity of an object can be obtained based on an analysis of its non- manifold properties, because the structure of a non-manifold object can be considered as a graph of manifold parts connected together at non-manifold joints. The manifold parts are often pieces that have strong semantic associations. In this work, we describe the type of non-manifold properties, the various types of connected components in a non-manifold object and their semantical significance. We address how the Euler{\textquoteright} characteristics of a non-manifold object can be found based on such information. All such information are extractable from a model using TopMesh, a tool that we have developed. In this work, we also describe the features of TopMesh, namely, all the topological properties it extracts. }, author = {De Floriani, Leila and Hui,A. and Papaleo,L.} } @conference {16327, title = {Towards Dependability in Everyday Software Using Software Telemetry}, booktitle = {Engineering of Autonomic and Autonomous Systems, IEEE International Workshop on}, year = {2006}, month = {2006///}, pages = {9 - 18}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {Application-level software dependability is difficult to ensure. Thus it?s typically used only in custom systems and is achieved using one-of-a-kind, handcrafted solutions. We are interested in understanding whether and how these techniques can be applied to more common, lower-end systems. To this end, we have adapted a condition-based maintenance (CBM) approach called the Multivariate State Estimation Technique (MSET). This approach automatically creates sophisticated statistical models that predict system failure well before failures occur, leading to simpler and more successful recoveries. We have packaged this approach in the Software Dependability Framework (SDF). The SDF consists of instrumentation and data management libraries, a CBM module, performance visualization tools, and a software architecture that supports system designers. Finally, we evaluated our framework on a simple video game application. Our results suggest that we can cheaply and reliably predict impending runtime failures and respond to them in time to improve the system?s dependability.}, isbn = {0-7695-2544-X}, doi = {http://doi.ieeecomputersociety.org/10.1109/EASE.2006.21}, author = {Gross,Kenny C. and Urmanov,Aleksey and Votta,Lawrence G. and McMaster,Scott and Porter, Adam} } @article {14585, title = {Transcriptional Genomics Associates FOX Transcription Factors With Human Heart Failure}, journal = {Circulation}, volume = {114}, year = {2006}, month = {2006///}, pages = {1269 - 1276}, abstract = {Background{\textemdash} Specific transcription factors (TFs) modulate cardiac gene expression in murine models of heart failure, but their relevance in human subjects remains untested. We developed and applied a computational approach called transcriptional genomics to test the hypothesis that a discrete set of cardiac TFs is associated with human heart failure.Methods and Results{\textemdash} RNA isolates from failing (n=196) and nonfailing (n=16) human hearts were hybridized with Affymetrix HU133A arrays, and differentially expressed heart failure genes were determined. TF binding sites overrepresented in the -5-kb promoter sequences of these heart failure genes were then determined with the use of public genome sequence databases. Binding sites for TFs identified in murine heart failure models (MEF2, NKX, NF-AT, and GATA) were significantly overrepresented in promoters of human heart failure genes (P<0.002; false discovery rate 2\% to 4\%). In addition, binding sites for FOX TFs showed substantial overrepresentation in both advanced human and early murine heart failure (P<0.002 and false discovery rate <4\% for each). A role for FOX TFs was supported further by expression of FOXC1, C2, P1, P4, and O1A in failing human cardiac myocytes at levels similar to established hypertrophic TFs and by abundant FOXP1 protein in failing human cardiac myocyte nuclei.Conclusions{\textemdash} Our results provide the first evidence that specific TFs identified in murine models (MEF2, NKX, NFAT, and GATA) are associated with human heart failure. Moreover, these data implicate specific members of the FOX family of TFs (FOXC1, C2, P1, P4, and O1A) not previously suggested in heart failure pathogenesis. These findings provide a crucial link between animal models and human disease and suggest a specific role for FOX signaling in modulating the hypertrophic response of the heart to stress in humans.}, doi = {10.1161/CIRCULATIONAHA.106.632430}, url = {http://circ.ahajournals.org/content/114/12/1269.abstract}, author = {Hannenhalli, Sridhar and Putt,Mary E. and Gilmore,Joan M. and Wang,Junwen and Parmacek,Michael S. and Epstein,Jonathan A. and Morrisey,Edward E. and Margulies,Kenneth B. and Cappola,Thomas P.} } @article {16082, title = {TreePlus: Interactive Exploration of Networks with Enhanced Tree Layouts}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {12}, year = {2006}, month = {2006/12//nov}, pages = {1414 - 1426}, abstract = {Despite extensive research, it is still difficult to produce effective interactive layouts for large graphs. Dense layout and occlusion make food Webs, ontologies and social networks difficult to understand and interact with. We propose a new interactive visual analytics component called TreePlus that is based on a tree-style layout. TreePlus reveals the missing graph structure with visualization and interaction while maintaining good readability. To support exploration of the local structure of the graph and gathering of information from the extensive reading of labels, we use a guiding metaphor of "plant a seed and watch it grow." It allows users to start with a node and expand the graph as needed, which complements the classic overview techniques that can be effective at (but often limited to) revealing clusters. We describe our design goals, describe the interface and report on a controlled user study with 28 participants comparing TreePlus with a traditional graph interface for six tasks. In general, the advantage of TreePlus over the traditional interface increased as the density of the displayed data increased. Participants also reported higher levels of confidence in their answers with TreePlus and most of them preferred TreePlus}, keywords = {Automated;Software;User-Computer Interface;, Biological;Pattern Recognition, data visualization;graph structure;graphical user interface;interactive visual analytics;occlusion;tree-style layout;data visualisation;graphical user interfaces;hidden feature removal;interactive systems;trees (mathematics);Algorithms;Computer Graphics;C}, isbn = {1077-2626}, doi = {10.1109/TVCG.2006.106}, author = {Lee,B. and Parr,C.S. and Plaisant, Catherine and Bederson, Benjamin B. and Veksler,V.D. and Gray,W.D. and Kotfila,C.} } @article {17518, title = {Using PlanetLab for network research}, journal = {ACM SIGOPS Operating Systems Review}, volume = {40}, year = {2006}, month = {2006/01/01/}, pages = {17 - 17}, isbn = {01635980}, doi = {10.1145/1113361.1113368}, url = {http://dl.acm.org/citation.cfm?id=1113368}, author = {Spring, Neil and Peterson,Larry and Bavier,Andy and Pait,Vivek} } @article {17480, title = {Using rhythms of relationships to understand e-mail archives}, journal = {Journal of the American Society for Information Science and Technology}, volume = {57}, year = {2006}, month = {2006/12/01/}, pages = {1936 - 1948}, abstract = {Due to e-mail{\textquoteright}s ubiquitous nature, millions of users are intimate with the technology; however, most users are only familiar with managing their own e-mail, which is an inherently different task from exploring an e-mail archive. Historians and social scientists believe that e-mail archives are important artifacts for understanding the individuals and communities they represent. To understand the conversations evidenced in an archive, context is needed. In this article, we present a new way to gain this necessary context: analyzing the temporal rhythms of social relationships. We provide methods for constructing meaningful rhythms from the e-mail headers by identifying relationships and interpreting their attributes. With these visualization techniques, e-mail archive explorers can uncover insights that may have been otherwise hidden in the archive. We apply our methods to an individual{\textquoteright}s 15-year e-mail archive, which consists of about 45,000 messages and over 4,000 relationships.}, isbn = {1532-2890}, doi = {10.1002/asi.20387}, url = {http://onlinelibrary.wiley.com/doi/10.1002/asi.20387/abstract}, author = {Perer,Adam and Shneiderman, Ben and Oard, Douglas} } @conference {16083, title = {VAST 2006 Contest - A Tale of Alderwood}, booktitle = {Visual Analytics Science And Technology, 2006 IEEE Symposium On}, year = {2006}, month = {2006/11/31/2}, pages = {215 - 216}, abstract = {Visual analytics experts realize that one effective way to push the field forward and to develop metrics for measuring the performance of various visual analytics components is to hold an annual competition. The first visual analytics science and technology (VAST) contest was held in conjunction with the 2006 IEEE VAST Symposium. The competition entailed the identification of possible political shenanigans in the fictitious town of Alderwood. A synthetic data set was made available as well as tasks. We summarize how we prepared and advertised the contest, developed some initial metrics for evaluation, and selected the winners. The winners were invited to participate at an additional live competition at the symposium to provide them with feedback from senior analysts}, keywords = {Alderwood;human, analysis;data, analytics, and, contest;data, information, interaction;sense, making;visual, Science, technology, visualisation;}, doi = {10.1109/VAST.2006.261420}, author = {Grinstein,G. and O{\textquoteright}Connell,T. and Laskowski,S. and Plaisant, Catherine and Scholtz,J. and Whiting,M.} } @inbook {12638, title = {Video Mensuration Using a Stationary Camera}, booktitle = {Computer Vision {\textendash} ECCV 2006Computer Vision {\textendash} ECCV 2006}, series = {Lecture Notes in Computer Science}, volume = {3953}, year = {2006}, month = {2006///}, pages = {164 - 176}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {This paper presents a method for video mensuration using a single stationary camera. The problem we address is simple, i.e., the mensuration of any arbitrary line segment on the reference plane using multiple frames with minimal calibration. Unlike previous solutions that are based on planar rectification, our approach is based on fitting the image of multiple concentric circles on the plane. Further, the proposed method aims to minimize the error in mensuration. Hence we can calculate the mensuration of the line segments not lying on the reference plane. Using an algorithm for detecting and tracking wheels of an automobile, we have implemented a fully automatic system for wheel base mensuration. The mensuration results are accurate enough that they can be used to determine the vehicle classes. Furthermore, we measure the line segment between any two points on the vehicle and plot them in top and side views.}, isbn = {978-3-540-33836-9}, url = {http://dx.doi.org/10.1007/11744078_13}, author = {Guo,Feng and Chellapa, Rama}, editor = {Leonardis,Ale{\v s} and Bischof,Horst and Pinz,Axel} } @article {12627, title = {View Invariance for Human Action Recognition}, journal = {International Journal of Computer Vision}, volume = {66}, year = {2006}, month = {2006///}, pages = {83 - 101}, abstract = {This paper presents an approach for viewpoint invariant human action recognition, an area that has received scant attention so far, relative to the overall body of work in human action recognition. It has been established previously that there exist no invariants for 3D to 2D projection. However, there exist a wealth of techniques in 2D invariance that can be used to advantage in 3D to 2D projection. We exploit these techniques and model actions in terms of view-invariant canonical body poses and trajectories in 2D invariance space, leading to a simple and effective way to represent and recognize human actions from a general viewpoint. We first evaluate the approach theoretically and show why a straightforward application of the 2D invariance idea will not work. We describe strategies designed to overcome inherent problems in the straightforward approach and outline the recognition algorithm. We then present results on 2D projections of publicly available human motion capture data as well on manually segmented real image sequences. In addition to robustness to viewpoint change, the approach is robust enough to handle different people, minor variabilities in a given action, and the speed of aciton (and hence, frame-rate) while encoding sufficient distinction among actions.}, isbn = {0920-5691}, url = {http://dx.doi.org/10.1007/s11263-005-3671-4}, author = {Parameswaran,Vasu and Chellapa, Rama} } @conference {13481, title = {Visualization Support for Fusing Relational, Spatio-Temporal Data: Building Career Histories}, booktitle = {Information Fusion, 2006 9th International Conference on}, year = {2006}, month = {2006/07//}, pages = {1 - 7}, publisher = {IEEE}, organization = {IEEE}, abstract = {Many real-world domains resist analysis because they are best characterized by a variety of data types, including relational, spatial, and temporal components. Examples of such domains include disease outbreaks, criminal networks, and the World-Wide Web. We present two types of visualizations based on physical metaphors that facilitate fusion, analysis, and deep understanding of relational, spatio-temporal data. The first visualization is based on the metaphor of fluid flow through elastic pipes, and the second on wave propagation. We discuss both types of visualizations in the context of fusing information about the activities of scientists over time with the goal of constructing career histories}, isbn = {1-4244-0953-5, 0-9721844-6-5}, doi = {10.1109/ICIF.2006.301772}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4086058}, author = {Blythe,Jim and Patwardhan,Mithila and Oates,Tim and desJardins, Marie and Rheingans,Penny} } @inbook {16090, title = {Visualizing Graphs as Trees: Plant a Seed and Watch it Grow}, booktitle = {Graph DrawingGraph Drawing}, series = {Lecture Notes in Computer Science}, volume = {3843}, year = {2006}, month = {2006///}, pages = {516 - 518}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {TreePlus is a graph browsing technique based on a tree-style layout. It shows the missing graph structure using interaction techniques and enables users to start with a specific node and incrementally explore the local structure of graphs. We believe that it supports particularly well tasks that require rapid reading of labels.}, isbn = {978-3-540-31425-7}, url = {http://dx.doi.org/10.1007/11618058_50}, author = {Lee,Bongshin and Parr,Cynthia and Plaisant, Catherine and Bederson, Benjamin B.}, editor = {Healy,Patrick and Nikolov,Nikola} } @inbook {19210, title = {Voting with Your Feet: An Investigative Study of the Relationship Between Place Visit Behavior and Preference}, booktitle = {UbiComp 2006: Ubiquitous Computing}, series = {Lecture Notes in Computer Science}, volume = {4206}, year = {2006}, month = {2006}, pages = {333 - 350}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Real world recommendation systems, personalized mobile search, and online city guides could all benefit from data on personal place preferences. However, collecting explicit rating data of locations as users travel from place to place is impractical. This paper investigates the relationship between explicit place ratings and implicit aspects of travel behavior such as visit frequency and travel time. We conducted a four-week study with 16 participants using a novel sensor-based experience sampling tool, called My Experience (Me), which we developed for mobile phones. Over the course of the study Me was used to collect 3,458 in-situ questionnaires on 1,981 place visits. Our results show that, first, sensor-triggered experience sampling is a useful methodology for collecting targeted information in situ. Second, despite the complexities underlying travel routines and visit behavior, there exist positive correlations between place preference and automatically detectable features like visit frequency and travel time. And, third, we found that when combined, visit frequency and travel time result in stronger correlations with place rating than when measured individually. Finally, we found no significant difference in place ratings due to the presence of others.}, isbn = {978-3-540-39634-5}, url = {http://dx.doi.org/10.1007/11853565_20}, author = {Jon Froehlich and Chen,Mike and Smith,Ian and Potter,Fred}, editor = {Dourish,Paul and Friday,Adrian} } @inbook {14276, title = {Wavelet-Based Super-Resolution Reconstruction: Theory and Algorithm}, booktitle = {Computer Vision {\textendash} ECCV 2006Computer Vision {\textendash} ECCV 2006}, series = {Lecture Notes in Computer Science}, volume = {3954}, year = {2006}, month = {2006///}, pages = {295 - 307}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We present a theoretical analysis and a new algorithm for the problem of super-resolution imaging: the reconstruction of HR (high-resolution) images from a sequence of LR (low-resolution) images. Super-resolution imaging entails solutions to two problems. One is the alignment of image frames. The other is the reconstruction of a HR image from multiple aligned LR images. Our analysis of the latter problem reveals insights into the theoretical limits of super-resolution reconstruction. We find that at best we can reconstruct a HR image blurred by a specific low-pass filter. Based on the analysis we present a new wavelet-based iterative reconstruction algorithm which is very robust to noise. Furthermore, it has a computationally efficient built-in denoising scheme with a nearly optimal risk bound. Roughly speaking, our method could be described as a better-conditioned iterative back-projection scheme with a fast and optimal regularization criteria in each iteration step. Experiments with both simulated and real data demonstrate that our approach has significantly better performance than existing super-resolution methods. It has the ability to remove even large amounts of mixed noise without creating smoothing artifacts.}, isbn = {978-3-540-33838-3}, url = {http://dx.doi.org/10.1007/11744085_23}, author = {Hui Ji and Ferm{\"u}ller, Cornelia}, editor = {Leonardis,Ale{\v s} and Bischof,Horst and Pinz,Axel} } @article {14363, title = {What are the grand challenges for data mining? - KDD-2006 panel report}, journal = {ACM SIGKDD Explorations Newsletter}, volume = {8}, year = {2006}, month = {2006///}, pages = {70 - 77}, abstract = {We discuss what makes exciting and motivating Grand Challenge problems for Data Mining, and propose criteria for a good Grand Challenge. We then consider possible GC problems from multimedia mining, link mining, large- scale modeling, text mining, and proteomics. This report is the result of a panel held at KDD-2006 conference.}, author = {Piatetsky-Shapiro,G. and Djeraba,C. and Getoor, Lise and Grossman,R. and Feldman,R. and Zaki,M.} } @conference {16353, title = {What do high-level memory models mean for transactions?}, booktitle = {Proceedings of the 2006 workshop on Memory system performance and correctness - MSPC {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {62 - 62}, address = {San Jose, California}, doi = {10.1145/1178597.1178609}, url = {http://dl.acm.org/citation.cfm?id=1178609}, author = {Grossman,Dan and Manson,Jeremy and Pugh, William} } @inbook {12642, title = {What Is the Range of Surface Reconstructions from a Gradient Field?}, booktitle = {Computer Vision {\textendash} ECCV 2006Computer Vision {\textendash} ECCV 2006}, series = {Lecture Notes in Computer Science}, volume = {3951}, year = {2006}, month = {2006///}, pages = {578 - 591}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We propose a generalized equation to represent a continuum of surface reconstruction solutions of a given non-integrable gradient field. We show that common approaches such as Poisson solver and Frankot-Chellappa algorithm are special cases of this generalized equation. For a N {\texttimes} N pixel grid, the subspace of all integrable gradient fields is of dimension N 2 {\textendash} 1. Our framework can be applied to derive a range of meaningful surface reconstructions from this high dimensional space. The key observation is that the range of solutions is related to the degree of anisotropy in applying weights to the gradients in the integration process. While common approaches use isotropic weights, we show that by using a progression of spatially varying anisotropic weights, we can achieve significant improvement in reconstructions. We propose (a) α-surfaces using binary weights, where the parameter α allows trade off between smoothness and robustness, (b) M-estimators and edge preserving regularization using continuous weights and (c) Diffusion using affine transformation of gradients. We provide results on photometric stereo, compare with previous approaches and show that anisotropic treatment discounts noise while recovering salient features in reconstructions.}, isbn = {978-3-540-33832-1}, url = {http://dx.doi.org/10.1007/11744023_45}, author = {Agrawal,Amit and Raskar,Ramesh and Chellapa, Rama}, editor = {Leonardis,Ale{\v s} and Bischof,Horst and Pinz,Axel} } @inbook {12679, title = {3D Facial Pose Tracking in Uncalibrated Videos}, booktitle = {Pattern Recognition and Machine IntelligencePattern Recognition and Machine Intelligence}, series = {Lecture Notes in Computer Science}, volume = {3776}, year = {2005}, month = {2005///}, pages = {515 - 520}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {This paper presents a method to recover the 3D configuration of a face in each frame of a video. The 3D configuration consists of the 3 translational parameters and the 3 orientation parameters which correspond to the yaw, pitch and roll of the face, which is important for applications like face modeling, recognition, expression analysis, etc. The approach combines the structural advantages of geometric modeling with the statistical advantages of a particle-filter based inference. The face is modeled as the curved surface of a cylinder which is free to translate and rotate arbitrarily. The geometric modeling takes care of pose and self-occlusion while the statistical modeling handles moderate occlusion and illumination variations. Experimental results on multiple datasets are provided to show the efficacy of the approach. The insensitivity of our approach to calibration parameters (focal length) is also shown.}, isbn = {978-3-540-30506-4}, url = {http://dx.doi.org/10.1007/11590316_81}, author = {Aggarwal,Gaurav and Veeraraghavan,Ashok and Chellapa, Rama}, editor = {Pal,Sankar and Bandyopadhyay,Sanghamitra and Biswas,Sambhunath} } @inbook {19610, title = {3D Visualization of Semantic Metadata Models and Ontologies}, booktitle = {Graph Drawing}, series = {Lecture Notes in Computer Science}, year = {2005}, month = {2005/01/01/}, pages = {377 - 388}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We propose an algorithm for the 3D visualization of general ontology models used in many applications, such as semantic web, entity-relationship diagrams and other database models. The visualization places entities in the 3D space. Previous techniques produce drawings that are 2-dimensional, which are often complicated and hard to comprehend. Our technique uses the third dimension almost exclusively for the display of the isa relationships (links) while the property relationships (links) are placed on some layer (plane). Thus the semantic difference between isa links and property links, which should be as vertical or as horizontal as possible respectively, is emphasized. Special reference is made on a certain model, the CIDOC Conceptual Reference Model.}, keywords = {Algorithm Analysis and Problem Complexity, Computer Graphics, Data structures, Discrete Mathematics in Computer Science}, isbn = {978-3-540-24528-5, 978-3-540-31843-9}, url = {http://link.springer.com/chapter/10.1007/978-3-540-31843-9_38}, author = {Charalampos Papamanthou and Tollis, Ioannis G. and Doerr, Martin}, editor = {Pach, J{\'a}nos} } @article {17683, title = {Algorithm 844: Computing sparse reduced-rank approximations to sparse matrices}, journal = {ACM Transactions on Mathematical Software-TOMS}, volume = {31}, year = {2005}, month = {2005///}, pages = {252 - 269}, author = {Berry,M. W and Pulatova,S. A and Stewart, G.W.} } @conference {17550, title = {Algorithmic aspects of capacity in wireless networks}, booktitle = {Proceedings of the 2005 ACM SIGMETRICS international conference on Measurement and modeling of computer systems}, series = {SIGMETRICS {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {133 - 144}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {This paper considers two inter-related questions: (i) Given a wireless ad-hoc network and a collection of source-destination pairs {(si,ti)}, what is the maximum throughput capacity of the network, i.e. the rate at which data from the sources to their corresponding destinations can be transferred in the network? (ii) Can network protocols be designed that jointly route the packets and schedule transmissions at rates close to the maximum throughput capacity? Much of the earlier work focused on random instances and proved analytical lower and upper bounds on the maximum throughput capacity. Here, in contrast, we consider arbitrary wireless networks. Further, we study the algorithmic aspects of the above questions: the goal is to design provably good algorithms for arbitrary instances. We develop analytical performance evaluation models and distributed algorithms for routing and scheduling which incorporate fairness, energy and dilation (path-length) requirements and provide a unified framework for utilizing the network close to its maximum throughput capacity.Motivated by certain popular wireless protocols used in practice, we also explore "shortest-path like" path selection strategies which maximize the network throughput. The theoretical results naturally suggest an interesting class of congestion aware link metrics which can be directly plugged into several existing routing protocols such as AODV, DSR, etc. We complement the theoretical analysis with extensive simulations. The results indicate that routes obtained using our congestion aware link metrics consistently yield higher throughput than hop-count based shortest path metrics.}, keywords = {capacity modeling, end-to-end scheduling, Linear programming, Wireless networks}, isbn = {1-59593-022-1}, doi = {10.1145/1064212.1064228}, url = {http://doi.acm.org/10.1145/1064212.1064228}, author = {Kumar,V. S. Anil and Marathe,Madhav V. and Parthasarathy,Srinivasan and Srinivasan, Aravind} } @article {18752, title = {Algorithms for constructing 3-D point clouds using multiple digital fringe projection patterns}, journal = {Computer-Aided Design and Applications}, volume = {2}, year = {2005}, month = {2005///}, pages = {737 - 746}, abstract = {This paper describes algorithms for generating 3-D point clouds from a set of digital imagesobtained from projecting phase-shifted sinusoidal fringe patterns onto object. In this paper, a mathematical model is introduced for describing the geometric relationship between the fringe patterns being projected, the image captured and the shape of the object being measured. This model allows considerable flexibility in the spatial configuration of shape measurement system. The algorithms for point cloud construction described in this paper present an improvement over the existing algorithms in terms of accuracy, ease of system calibration, and sensitivity to parameter errors. These algorithms have been incorporated in a shape measurement system and shown to have a very good performance. }, url = {http://cadanda.homestead.com/V2No6_04.pdf}, author = {Peng,T. and Gupta,S.K. and Lau,K.} } @conference {17895, title = {And away we go: understanding the complexity of launching complex HPC applications}, booktitle = {Proceedings of the second international workshop on Software engineering for high performance computing system applications}, series = {SE-HPCS {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {45 - 49}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Although not well-studied, launching HPC applications is extremely complex. To better understand the launching process, we conducted a simple case study. Based in part on this study and an examination of existing toolkits, we have begun to develop a prototype environment to support HPC application launching.}, keywords = {development, high-performance, Productivity, software}, isbn = {1-59593-117-1}, doi = {10.1145/1145319.1145333}, url = {http://doi.acm.org/10.1145/1145319.1145333}, author = {Yoon,Il-Chul and Sussman, Alan and Porter, Adam} } @conference {14325, title = {Are GSM phones THE solution for localization?}, booktitle = {Mobile Computing Systems and Applications, 2006. WMCSA{\textquoteright}06. Proceedings. 7th IEEE Workshop on}, year = {2005}, month = {2005///}, pages = {34 - 42}, author = {Varshavsky,A. and Chen,M.Y. and de Lara,E. and Jon Froehlich and Haehnel,D. and Hightower,J. and LaMarca,A. and Potter,F. and Sohn,T. and Tang,K. and others} } @conference {19027, title = {BIND: a fine-grained attestation service for secure distributed systems}, year = {2005}, month = {2005}, pages = {154 - 168}, abstract = {In this paper we propose BIND (binding instructions and data), a fine-grained attestation service for securing distributed systems. Code attestation has recently received considerable attention in trusted computing. However, current code attestation technology is relatively immature. First, due to the great variability in software versions and configurations, verification of the hash is difficult. Second, the time-of-use and time-of-attestation discrepancy remains to be addressed, since the code may be correct at the time of the attestation, but it may be compromised by the time of use. The goal of BIND is to address these issues and make code attestation more usable in securing distributed systems. BIND offers the following properties: (1) BIND performs fine-grained attestation. Instead of attesting to the entire memory content, BIND attests only to the piece of code we are concerned about. This greatly simplifies verification. (2) BIND narrows the gap between time-of-attestation and time-of-use. BIND measures a piece of code immediately before it is executed and uses a sandboxing mechanism to protect the execution of the attested code. (3) BIND ties the code attestation with the data that the code produces, such that we can pinpoint what code has been run to generate that data. In addition, by incorporating the verification of input data integrity into the attestation, BIND offers transitive integrity verification, i.e., through one signature, we can vouch for the entire chain of processes that have performed transformations over a piece of data. BIND offers a general solution toward establishing a trusted environment for distributed system designers.}, keywords = {BIND, binding instructions and data, code attestation, data integrity, digital signatures, distributed processing, fine-grained attestation service, input data integrity, program verification, sandboxing mechanism, secure distributed systems, signature, time-of-attestation, time-of-use, transitive integrity verification, trusted computing}, author = {Elaine Shi and Perrig, A. and Van Doorn, L.} } @article {16100, title = {Broadening Access to Large Online Databases by Generalizing Query Previews (2000)}, journal = {Institute for Systems Research Technical Reports}, year = {2005}, month = {2005///}, abstract = {Companies, government agencies, and other types of organizations are making their large databases available to the world over the Internet. Current database front-ends do not give users information about the distribution of data. This leads many users to waste time and network resources posing queries that have either zero-hit or mega-hit result sets. Query previews form a novel visual approach for browsing large databases. Query previews supply data distribution information about the database that is being searched and give continuous feedback about the size of the result set for the query as it is being formed. On the other hand, query previews use only a few pre-selected attributes of the database. The distribution information is displayed only on these attributes. Unfortunately, many databases are formed of numerous relations and attributes. This paper introduces a generalization of query previews. We allow users to browse all of the relations and attributes of a database using a hierarchical browser. Any of the attributes can be used to display the distribution information, making query previews applicable to many public online databases.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6474}, author = {Tanin,Egemen and Plaisant, Catherine and Shneiderman, Ben} } @article {16110, title = {The Challenge of Personal Information Management}, journal = {INTERACT{\textquoteright}05: Communicating Naturally through Computers (Adjunct Proceedings)}, year = {2005}, month = {2005///}, author = {Dix,A. and Jones,W. and Czerwinski,M. and Teevan,J. and Plaisant, Catherine and Moran,TP} } @conference {13277, title = {Clustering Techniques for Out-of-Core Multi-resolution Modeling}, booktitle = {Visualization Conference, IEEE}, year = {2005}, month = {2005///}, pages = {113 - 113}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {Thanks to improvements in simulation tools, high resolution scanning facilities and multidimensional medical imaging, huge datasets are commonly available. Multi-resolution models manage the complexity of such data sets, by varying resolution and focusing detail in specific areas of interests. Since many currently available data sets cannot fit in main memory, the need arises to design data structures, construction and query algorithms for multi-resolution models which work in secondary memory.}, isbn = {0-7803-9462-3}, doi = {http://doi.ieeecomputersociety.org/10.1109/VIS.2005.15}, author = {Danovaro,Emanuele and De Floriani, Leila and Puppo,Enrico and Samet, Hanan} } @article {14078, title = {Comparative Genomics of Trypanosomatid Parasitic Protozoa}, journal = {Science}, volume = {309}, year = {2005}, month = {2005/07/15/}, pages = {404 - 409}, abstract = {A comparison of gene content and genome architecture of Trypanosoma brucei, Trypanosoma cruzi, and Leishmania major, three related pathogens with different life cycles and disease pathology, revealed a conserved core proteome of about 6200 genes in large syntenic polycistronic gene clusters. Many species-specific genes, especially large surface antigen families, occur at nonsyntenic chromosome-internal and subtelomeric regions. Retroelements, structural RNAs, and gene family expansion are often associated with syntenic discontinuities that{\textemdash}along with gene divergence, acquisition and loss, and rearrangement within the syntenic regions{\textemdash}have shaped the genomes of each parasite. Contrary to recent reports, our analyses reveal no evidence that these species are descended from an ancestor that contained a photosynthetic endosymbiont.}, doi = {10.1126/science.1112181}, url = {http://www.sciencemag.org/content/309/5733/404.abstract}, author = {El-Sayed, Najib M. and Myler,Peter J. and Blandin,Ga{\"e}lle and Berriman,Matthew and Crabtree,Jonathan and Aggarwal,Gautam and Caler,Elisabet and Renauld,Hubert and Worthey,Elizabeth A. and Hertz-Fowler,Christiane and Ghedin,Elodie and Peacock,Christopher and Bartholomeu,Daniella C. and Haas,Brian J. and Tran,Anh-Nhi and Wortman,Jennifer R. and Alsmark,U. Cecilia M. and Angiuoli,Samuel and Anupama,Atashi and Badger,Jonathan and Bringaud,Frederic and Cadag,Eithon and Carlton,Jane M. and Cerqueira,Gustavo C. and Creasy,Todd and Delcher,Arthur L. and Djikeng,Appolinaire and Embley,T. Martin and Hauser,Christopher and Ivens,Alasdair C. and Kummerfeld,Sarah K. and Pereira-Leal,Jose B. and Nilsson,Daniel and Peterson,Jeremy and Salzberg,Steven L. and Shallom,Joshua and Silva,Joana C. and Sundaram,Jaideep and Westenberger,Scott and White,Owen and Melville,Sara E. and Donelson,John E. and Andersson,Bj{\"o}rn and Stuart,Kenneth D. and Hall,Neil} } @article {17030, title = {Content Index to Volume 18}, journal = {INTERNATIONAL JOURNAL OF HUMAN{\textendash}COMPUTER INTERACTION}, volume = {18}, year = {2005}, month = {2005///}, pages = {367 - 368}, author = {Kuniavsky,M. and Vaughan,M. and Bederson, Benjamin B. and Shneiderman, Ben and Rau,P.L.P. and from Menus,T. and Lane,D.M. and Napier,H.A. and Peres,S.C. and S{\'a}ndor,A.} } @article {17797, title = {The CPR model for summarizing video}, journal = {Multimedia Tools and Applications}, volume = {26}, year = {2005}, month = {2005///}, pages = {153 - 173}, abstract = {Most past work on video summarization has been based on selecting key frames from videos. We propose a model of video summarization based on three important parameters: Priority (of frames), Continuity (of the summary), and non-Repetition (of the summary). In short, a summary must include high priority frames and must be continuous and non-repetitive. An optimal summary is one that maximizes an objective function based on these three parameters. We show examples of how CPR parameters can be computed and provide algorithms to find optimal summaries based on the CPR approach. Finally, we briefly report on the performance of these algorithms.}, doi = {10.1007/s11042-005-0451-7}, author = {Fayzullin,M. and V.S. Subrahmanian and Picariello, A. and Sapino,M. L} } @article {12989, title = {Data sharing in ecology and evolution}, journal = {Trends in Ecology \& Evolution}, volume = {20}, year = {2005}, month = {2005/07//}, pages = {362 - 363}, isbn = {0169-5347}, doi = {10.1016/j.tree.2005.04.023}, url = {http://www.sciencedirect.com/science/article/pii/S0169534705001308}, author = {Parr,Cynthia S. and Cummings, Michael P.} } @book {16101, title = {Designing The User Interface}, year = {2005}, month = {2005///}, publisher = {Pearson Addison Wesley, USA}, organization = {Pearson Addison Wesley, USA}, author = {Shneiderman, Ben and Plaisant, Catherine} } @conference {19032, title = {Detection of denial-of-message attacks on sensor network broadcasts}, year = {2005}, month = {2005}, pages = {64 - 78}, abstract = {So far sensor network broadcast protocols assume a trustworthy environment. However in safety and mission-critical sensor networks this assumption may not be valid and some sensor nodes might be adversarial. In these environments, malicious sensor nodes can deprive other nodes from receiving a broadcast message. We call this attack a denial-of-message attack (DoM). In this paper we model and analyze this attack, and present countermeasures. We present SIS, a secure implicit sampling scheme that permits a broadcasting base station to probabilistically detect the failure of nodes to receive its broadcast, even if these failures result from an attacker motivated to induce these failures undetectably. SIS works by eliciting authenticated acknowledgments from a subset of nodes per broadcast, where the subset is unpredictable to the attacker and tunable so as to mitigate acknowledgment implosion on the base station. We use a game-theoretic approach to evaluate this scheme in the face of an optimal attacker that attempts to maximize the number of nodes it denies the broadcast while remaining undetected by the base station, and show that SIS significantly constrains such an attacker even in sensor networks exhibiting high intrinsic loss rates. We also discuss extensions that permit more targeted detection capabilities.}, keywords = {authenticated acknowledgments, broadcast channels, broadcast protocols, broadcasting base station, countermeasures, denial-of-message attacks, DoM, game theory, game-theoretic approach, malicious sensor nodes, Mobile computing, optimal attacker, probabilistic detection, probability, Protocols, Sampling methods, secure implicit sampling, sensor network broadcasts, SIS, telecommunication security, Wireless sensor networks}, author = {McCune, J.M. and Elaine Shi and Perrig, A. and Reiter, M.K.} } @conference {16309, title = {Distributed performance testing using statistical modeling}, booktitle = {Proceedings of the 1st international workshop on Advances in model-based testing}, year = {2005}, month = {2005///}, pages = {1 - 7}, author = {Karr,A. F and Porter, Adam} } @article {18699, title = {Diverse polyubiquitin interaction properties of ubiquitin-associated domains}, journal = {Nature Structural \& Molecular Biology}, volume = {12}, year = {2005}, month = {2005///}, pages = {708 - 714}, abstract = {The ubiquitin-associated (UBA) domain occurs frequently in proteins involved in ubiquitin-dependent signaling pathways. Although polyubiquitin chain binding is considered to be a defining feature of the UBA domain family, the generality of this property has not been established. Here we have surveyed the polyubiquitin interaction properties of 30 UBA domains, including 16 of 17 occurrences in budding yeast. The UBA domains sort into four classes that include linkage-selective polyubiquitin binders and domains that bind different chains (and monoubiquitin) in a nondiscriminatory manner; one notable class (30\%) did not bind any ubiquitin ligand surveyed. The properties of a given UBA domain are conserved from yeast to mammals. Their functional relevance is further suggested by the ability of an ectopic UBA domain to alter the specificity of a deubiquitylating enzyme in a predictable manner. Conversely, non-UBA sequences can modulate the interaction properties of a UBA domain.}, keywords = {apoptosis, basic cellular processes, Biochemistry, biophysics, cell biology, cell cycle, cell surface proteins, cell-cell interactions, checkpoints, chromatin, chromatin remodeling, chromatin structure, content, DNA recombination, DNA repair, DNA replication, Gene expression, Genetics, intracellular signaling, journal, macromolecules, mechanism, membrane processes, molecular, molecular basis of disease, molecular biology, molecular interactions, multi-component complexes, nature publishing group, nature structural molecular biology, nucleic acids, protein degradation, protein folding, protein processing, Proteins, regulation of transcription, regulation of translation, RNA, RNA processing, RNAi, signal transduction, single molecule studies, structure and function of proteins, transcription, translation}, isbn = {1545-9993}, doi = {10.1038/nsmb962}, url = {http://www.nature.com/nsmb/journal/v12/n8/full/nsmb962.html}, author = {Raasi,Shahri and Varadan,Ranjani and Fushman, David and Pickart,Cecile M.} } @article {16295, title = {An empirical study of regression test application frequency}, journal = {Software Testing, Verification and Reliability}, volume = {15}, year = {2005}, month = {2005///}, pages = {257 - 279}, author = {Kim,J. M and Porter, Adam and Rothermel,G.} } @conference {16372, title = {Evaluating and tuning a static analysis to find null pointer bugs}, booktitle = {Proceedings of the 6th ACM SIGPLAN-SIGSOFT workshop on Program analysis for software tools and engineering}, year = {2005}, month = {2005///}, pages = {13 - 19}, author = {Hovemeyer,D. and Spacco,J. and Pugh, William} } @inbook {16105, title = {Evaluation Methodologies for Visual Analytics}, booktitle = {Illuminating the Path: The Research and Development Agenda for Visual AnalyticsIlluminating the Path: The Research and Development Agenda for Visual Analytics}, year = {2005}, month = {2005///}, pages = {150 - 157}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, keywords = {file-import-09-03-18}, url = {http://nvac.pnl.gov/agenda.stm$\#$book}, author = {Laskowski,S. and Plaisant, Catherine}, editor = {Thomas,James and Cook,Kristin} } @article {16613, title = {Evolutionary discovery of arbitrary self-replicating structures}, journal = {Computational Science{\textendash}ICCS 2005}, year = {2005}, month = {2005///}, pages = {404 - 411}, author = {PAN,Z. and Reggia, James A.} } @article {16389, title = {EvoSTOC Contributions}, journal = {Applications of evolutionary computing: EvoWorkshops 2005, EvoBIO, EvoCOMNET, EvoHOT, EvoIASP, EvoMUSART, and EvoSTOC}, year = {2005}, month = {2005///}, author = {Merkle,D. and Middendorf,M. and Scheidler,A. and Avigad,G. and Moshaiov,A. and Brauner,N. and Parsopoulos,K.E. and Vrahatis,M.N. and Rand, William and Riolo,R} } @conference {18657, title = {An experimental evaluation to determine if port scans are precursors to an attack}, year = {2005}, month = {2005/07/01/june}, pages = {602 - 611}, abstract = {This paper describes an experimental approach to determine the correlation between port scans and attacks. Discussions in the security community often state that port scans should be considered as precursors to an attack. However, very few studies have been conducted to quantify the validity of this hypothesis. In this paper, attack data were collected using a test-bed dedicated to monitoring attackers. The data collected consist of port scans, ICMP scans, vulnerability scans, successful attacks and management traffic. Two experiments were performed to validate the hypothesis of linking port scans and vulnerability scans to the number of packets observed per connection. Customized scripts were then developed to filter the collected data and group them on the basis of scans and attacks between a source and destination IP address pair. The correlation of the filtered data groups was assessed. The analyzed data consists of forty-eight days of data collection for two target computers on a heavily utilized subnet.}, keywords = {attack data collection, computer crime, filtered data groups, ICMP scans, IP address, IP networks, management traffic, port scans, telecommunication security, Telecommunication traffic, vulnerability scans}, doi = {10.1109/DSN.2005.18}, author = {Panjwani,S. and Tan,S. and Jarrin,K.M. and Michel Cukier} } @article {16111, title = {Extending the Utility of Treemaps with Flexible Hierarchy (2004)}, year = {2005}, month = {2005///}, abstract = {Treemaps is a visualization technique for presenting hierarchical information on two dimensional displays. Prior implementations limit the visualization to pre-defined static hierarchies. Flexible hierarchy, a new capability of Treemap 4.0, enables users to define various hierarchies through dynamically selecting a series of data attributes so that they can discover patterns, clusters and outliers. This paper describes the design and implementation issues of flexible hierarchy. It then reports on a usability study which led to enhancements to the interface.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6514}, author = {Chintalapani,Gouthami and Plaisant, Catherine and Shneiderman, Ben} } @conference {13194, title = {Extracting regions of symmetry}, booktitle = {Image Processing, 2005. ICIP 2005. IEEE International Conference on}, volume = {3}, year = {2005}, month = {2005/09//}, pages = {III - 133-6 - III - 133-6}, abstract = {This paper presents an approach for extending the normalized-cut (n-cut) segmentation algorithm to find symmetric regions present in natural images. We use an existing algorithm to quickly detect possible symmetries present in an image. The detected symmetries are then individually verified using the modified n-cut algorithm to eliminate spurious detections. The weights of the n-cut algorithm are modified so as to include both symmetric and spatial affinities. A global parameter is defined to model the tradeoff between spatial coherence and symmetry. Experimental results indicate that symmetric quality measure for a region segmented by our algorithm is a good indicator for the significance of the principal axis of symmetry.}, keywords = {algorithm;, coherence;, DETECTION, detection;, elimination;, extraction;, feature, image, images;, natural, normalized-cut, object, region, segmentation, segmentation;, spatial, spurious, symmetry}, doi = {10.1109/ICIP.2005.1530346}, author = {Gupta,A. and Prasad,V. S.N and Davis, Larry S.} } @conference {13197, title = {Fast illumination-invariant background subtraction using two views: error analysis, sensor placement and applications}, booktitle = {Computer Vision and Pattern Recognition, 2005. CVPR 2005. IEEE Computer Society Conference on}, volume = {1}, year = {2005}, month = {2005/06//}, pages = {1071 - 1078 vol. 1 - 1071 - 1078 vol. 1}, abstract = {Background modeling and subtraction to detect new or moving objects in a scene is an important component of many intelligent video applications. Compared to a single camera, the use of multiple cameras leads to better handling of shadows, specularities and illumination changes due to the utilization of geometric information. Although the result of stereo matching can be used as the feature for detection, it has been shown that the detection process can be made much faster by a simple subtraction of the intensities observed at stereo-generated conjugate pairs in the two views. The methodology however, suffers from false and missed detections due to some geometric considerations. In this paper, we perform a detailed analysis of such errors. Then, we propose a sensor configuration that eliminates false detections. Algorithms are also proposed that effectively eliminate most detection errors due to missed detections, specular reflections and objects being geometrically close to the background. Experiments on several scenes illustrate the utility and enhanced performance of the proposed approach compared to existing techniques.}, keywords = {analysis;, application;, background, cameras;, configuration;, DETECTION, detection;, error, error;, extraction;, false, feature, handling;, illumination-invariance;, image, intelligent, matching;, modeling;, object, placement;, processing;, sensor, sensors;, shadow, stereo, subtraction;, video}, doi = {10.1109/CVPR.2005.155}, author = {Lim,Ser-Nam and Mittal,A. and Davis, Larry S. and Paragios,N.} } @article {15982, title = {Formal approaches to teamwork}, journal = {We Will Show Them: Essays in Honour of Dov Gabbay}, volume = {1}, year = {2005}, month = {2005///}, pages = {39 - 68}, author = {Grant,J. and Kraus,S. and Perlis, Don} } @article {12969, title = {A framework for set-oriented computation in inductive logic programming and its application in generalizing inverse entailment}, journal = {Inductive Logic Programming}, year = {2005}, month = {2005///}, pages = {69 - 86}, author = {Corrada Bravo, Hector and Page,D. and Ramakrishnan,R. and Shavlik,J. and Costa,V. S} } @inbook {12678, title = {Fusing Depth and Video Using Rao-Blackwellized Particle Filter}, booktitle = {Pattern Recognition and Machine IntelligencePattern Recognition and Machine Intelligence}, series = {Lecture Notes in Computer Science}, volume = {3776}, year = {2005}, month = {2005///}, pages = {521 - 526}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We address the problem of fusing sparse and noisy depth data obtained from a range finder with features obtained from intensity images to estimate ego-motion and refine 3D structure of a scene using a Rao-Blackwellized particle filter. For scenes with low depth variability, the algorithm shows an alternate way of performing Structure from Motion (SfM) starting with a flat depth map. Instead of using 3D depths, we formulate the problem using 2D image domain parallax and show that conditioned on non-linear motion parameters, the parallax magnitude with respect to the projection of the vanishing point forms a linear subsystem independent of camera motion and their distributions can be analytically integrated. Thus, the structure is obtained by estimating parallax with respect to the given depths using a Kalman filter and only the ego-motion is estimated using a particle filter. Hence, the required number of particles becomes independent of the number of feature points which is an improvement over previous algorithms. Experimental results on both synthetic and real data show the effectiveness of our approach.}, isbn = {978-3-540-30506-4}, url = {http://dx.doi.org/10.1007/11590316_82}, author = {Agrawal,Amit and Chellapa, Rama}, editor = {Pal,Sankar and Bandyopadhyay,Sanghamitra and Biswas,Sambhunath} } @conference {18875, title = {Game-tree search with combinatorially large belief states}, volume = {19}, year = {2005}, month = {2005///}, pages = {254 - 254}, abstract = {In games such as kriegspiel chess (a chess variantwhere players have no direct knowledge of the op- ponent{\textquoteright}s pieces{\textquoteright} locations) the belief state{\textquoteright}s sizes dwarf those of other partial information games like bridge, scrabble, and poker{\textendash}and there is no easy way to generate states satisfying the given observa- tions. We show that statistical sampling approaches can be developed to do well in such games. We show that it is not necessary for the random sample to consist only of game boards that satisfy each and every one of a player{\textquoteright}s observations. In fact, we win 24\% more often by beginning with such completely consistent boards and gradually switching (as the game progressed) to boards that are merely consistent with the latest observation. This surprising result is explained by noting that as the game progresses, a board that is consistent with the last move becomes more and more likely to be consistent with the entire set of observations, even if we have no idea what sequence of moves might have actually generated this board. }, url = {http://www.ijcai.org/papers/0878.pdf}, author = {Parker,A. and Nau, Dana S. and V.S. Subrahmanian} } @article {16291, title = {The Genome Sequence of Trypanosoma Cruzi, Etiologic Agent of Chagas Disease}, journal = {ScienceScience}, volume = {309}, year = {2005}, month = {2005/07/15/}, pages = {409 - 415}, abstract = {Whole-genome sequencing of the protozoan pathogen Trypanosoma cruzi revealed that the diploid genome contains a predicted 22,570 proteins encoded by genes, of which 12,570 represent allelic pairs. Over 50\% of the genome consists of repeated sequences, such as retrotransposons and genes for large families of surface molecules, which include trans-sialidases, mucins, gp63s, and a large novel family (>1300 copies) of mucin-associated surface protein (MASP) genes. Analyses of the T. cruzi, T. brucei, and Leishmania major (Tritryp) genomes imply differences from other eukaryotes in DNA repair and initiation of replication and reflect their unusual mitochondrial DNA. Although the Tritryp lack several classes of signaling molecules, their kinomes contain a large and diverse set of protein kinases and phosphatases; their size and diversity imply previously unknown interactions and regulatory processes, which may be targets for intervention.}, isbn = {0036-8075, 1095-9203}, doi = {10.1126/science.1112631}, url = {http://www.sciencemag.org/content/309/5733/409}, author = {El-Sayed, Najib M. and Myler,Peter J. and Bartholomeu,Daniella C. and Nilsson,Daniel and Aggarwal,Gautam and Tran,Anh-Nhi and Ghedin,Elodie and Worthey,Elizabeth A. and Delcher,Arthur L. and Blandin,Ga{\"e}lle and Westenberger,Scott J. and Caler,Elisabet and Cerqueira,Gustavo C. and Branche,Carole and Haas,Brian and Anupama,Atashi and Arner,Erik and {\r A}slund,Lena and Attipoe,Philip and Bontempi,Esteban and Bringaud,Fr{\'e}d{\'e}ric and Burton,Peter and Cadag,Eithon and Campbell,David A. and Carrington,Mark and Crabtree,Jonathan and Darban,Hamid and da Silveira,Jose Franco and de Jong,Pieter and Edwards,Kimberly and Englund,Paul T. and Fazelina,Gholam and Feldblyum,Tamara and Ferella,Marcela and Frasch,Alberto Carlos and Gull,Keith and Horn,David and Hou,Lihua and Huang,Yiting and Kindlund,Ellen and Klingbeil,Michele and Kluge,Sindy and Koo,Hean and Lacerda,Daniela and Levin,Mariano J. and Lorenzi,Hernan and Louie,Tin and Machado,Carlos Renato and McCulloch,Richard and McKenna,Alan and Mizuno,Yumi and Mottram,Jeremy C. and Nelson,Siri and Ochaya,Stephen and Osoegawa,Kazutoyo and Pai,Grace and Parsons,Marilyn and Pentony,Martin and Pettersson,Ulf and Pop, Mihai and Ramirez,Jose Luis and Rinta,Joel and Robertson,Laura and Salzberg,Steven L. and Sanchez,Daniel O. and Seyler,Amber and Sharma,Reuben and Shetty,Jyoti and Simpson,Anjana J. and Sisk,Ellen and Tammi,Martti T. and Tarleton,Rick and Teixeira,Santuza and Van Aken,Susan and Vogt,Christy and Ward,Pauline N. and Wickstead,Bill and Wortman,Jennifer and White,Owen and Fraser,Claire M. and Stuart,Kenneth D. and Andersson,Bj{\"o}rn} } @article {14071, title = {The Genome Sequence of Trypanosoma cruzi, Etiologic Agent of Chagas Disease}, journal = {Science}, volume = {309}, year = {2005}, month = {2005/07/15/}, pages = {409 - 415}, abstract = {Whole-genome sequencing of the protozoan pathogen Trypanosoma cruzi revealed that the diploid genome contains a predicted 22,570 proteins encoded by genes, of which 12,570 represent allelic pairs. Over 50\% of the genome consists of repeated sequences, such as retrotransposons and genes for large families of surface molecules, which include trans-sialidases, mucins, gp63s, and a large novel family (>1300 copies) of mucin-associated surface protein (MASP) genes. Analyses of the T. cruzi, T. brucei, and Leishmania major (Tritryp) genomes imply differences from other eukaryotes in DNA repair and initiation of replication and reflect their unusual mitochondrial DNA. Although the Tritryp lack several classes of signaling molecules, their kinomes contain a large and diverse set of protein kinases and phosphatases; their size and diversity imply previously unknown interactions and regulatory processes, which may be targets for intervention.}, doi = {10.1126/science.1112631}, url = {http://www.sciencemag.org/content/309/5733/409.abstract}, author = {El-Sayed, Najib M. and Myler,Peter J. and Bartholomeu,Daniella C. and Nilsson,Daniel and Aggarwal,Gautam and Tran,Anh-Nhi and Ghedin,Elodie and Worthey,Elizabeth A. and Delcher,Arthur L. and Blandin,Ga{\"e}lle and Westenberger,Scott J. and Caler,Elisabet and Cerqueira,Gustavo C. and Branche,Carole and Haas,Brian and Anupama,Atashi and Arner,Erik and {\r A}slund,Lena and Attipoe,Philip and Bontempi,Esteban and Bringaud,Fr{\'e}d{\'e}ric and Burton,Peter and Cadag,Eithon and Campbell,David A. and Carrington,Mark and Crabtree,Jonathan and Darban,Hamid and da Silveira,Jose Franco and de Jong,Pieter and Edwards,Kimberly and Englund,Paul T. and Fazelina,Gholam and Feldblyum,Tamara and Ferella,Marcela and Frasch,Alberto Carlos and Gull,Keith and Horn,David and Hou,Lihua and Huang,Yiting and Kindlund,Ellen and Klingbeil,Michele and Kluge,Sindy and Koo,Hean and Lacerda,Daniela and Levin,Mariano J. and Lorenzi,Hernan and Louie,Tin and Machado,Carlos Renato and McCulloch,Richard and McKenna,Alan and Mizuno,Yumi and Mottram,Jeremy C. and Nelson,Siri and Ochaya,Stephen and Osoegawa,Kazutoyo and Pai,Grace and Parsons,Marilyn and Pentony,Martin and Pettersson,Ulf and Pop, Mihai and Ramirez,Jose Luis and Rinta,Joel and Robertson,Laura and Salzberg,Steven L. and Sanchez,Daniel O. and Seyler,Amber and Sharma,Reuben and Shetty,Jyoti and Simpson,Anjana J. and Sisk,Ellen and Tammi,Martti T. and Tarleton,Rick and Teixeira,Santuza and Van Aken,Susan and Vogt,Christy and Ward,Pauline N. and Wickstead,Bill and Wortman,Jennifer and White,Owen and Fraser,Claire M. and Stuart,Kenneth D. and Andersson,Bj{\"o}rn} } @conference {13335, title = {The half-edge tree: a compact data structure for level-of-detail tetrahedral meshes}, booktitle = {Shape Modeling and Applications, 2005 International Conference}, year = {2005}, month = {2005/06//}, pages = {332 - 337}, abstract = {We propose a new data structure for the compact encoding of a level-of detail (LOD) model of a three-dimensional scalar field based on unstructured tetrahedral meshes. Such data structure, called a half-edge tree (HET), is built through the iterative application of a half-edge collapse, i.e. by contracting an edge to one of its endpoints. We also show that selective refined meshes extracted from an HET contain on average about 34\% and up to 75\% less tetrahedra than those extracted from an LOD model built through a general edge collapse.}, keywords = {application;, compact, computational, data, detection;, edge, encoding;, generation;, geometry;, half-edge, iterative, level-of-detail, mesh, meshes;, methods;, model;, structure;, structures;, tetrahedral, tree, tree;}, doi = {10.1109/SMI.2005.47}, author = {Danovaro,E. and De Floriani, Leila and Magillo,P. and Puppo,E. and Sobrero,D. and Sokolovsky,N.} } @conference {18872, title = {A hierarchical task-network planner based on symbolic model checking}, year = {2005}, month = {2005///}, pages = {300 - 309}, abstract = {Although several approaches have been developed for planning in nondeterministic domains, solving large planning problems is still quite difficult. In this work, we present a novel algorithm, called YoYo, for planning in nondeterministic domains under the assumption of full observability. This algorithm enables us to combine the power of search-control strategies as in Planning with Hierarchical Task Networks (HTNs) with tech- niques from the Planning via Symbolic Model-Checking (SMC). Our experimental evaluation confirms the po- tentialities of our approach, demonstrating that it com- bines the advantages of these paradigms.}, url = {https://www.aaai.org/Papers/ICAPS/2005/ICAPS05-031.pdf}, author = {Kuter,U. and Nau, Dana S. and Pistore,M. and Traverso,P.} } @article {12658, title = {Human action-recognition using mutual invariants}, journal = {Computer Vision and Image Understanding}, volume = {98}, year = {2005}, month = {2005/05//}, pages = {294 - 324}, abstract = {Static and temporally varying 3D invariants are proposed for capturing the spatio-temporal dynamics of a general human action to enable its representation in a compact, view-invariant manner. Two variants of the representation are presented and studied: (1) a restricted-3D version, whose theory and implementation are simple and efficient but which can be applied only to a restricted class of human action, and (2) a full-3D version, whose theory and implementation are more complex but which can be applied to any general human action. A detailed analysis of the two representations is presented. We show why a straightforward implementation of the key ideas does not work well in the general case, and present strategies designed to overcome inherent weaknesses in the approach. What results is an approach for human action modeling and recognition that is not only invariant to viewpoint, but is also robust enough to handle different people, different speeds of action (and hence, frame rate) and minor variabilities in a given action, while encoding sufficient distinction among actions. Results on 2D projections of human motion capture and on manually segmented real image sequences demonstrate the effectiveness of the approach.}, keywords = {Human action-recognition, Model based invariants, Mutual invariants, View invariance}, isbn = {1077-3142}, doi = {10.1016/j.cviu.2004.09.002}, url = {http://www.sciencedirect.com/science/article/pii/S107731420400147X}, author = {Parameswaran,Vasu and Chellapa, Rama} } @conference {16099, title = {"I hear the pattern": interactive sonification of geographical data patterns}, booktitle = {CHI {\textquoteright}05 extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {1905 - 1908}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Interactive sonification (non-speech sound) is a novel strategy to present the geographical distribution patterns of statistical data to vision impaired users. We discuss the design space with dimensions of interaction actions, data representation forms, input devices, navigation structures, and sound feedback encoding. Two interfaces were designed, one using a keyboard and another using a smooth surface touch tablet. A study with three blind users shows that they are able to perceive patterns of 5-category values on both familiar and unknown maps, and learn new map geography, in both interfaces.}, keywords = {auditory user interfaces, information seeking, sonification, universal usability, vision impairment}, isbn = {1-59593-002-7}, doi = {10.1145/1056808.1057052}, url = {http://doi.acm.org/10.1145/1056808.1057052}, author = {Zhao,Haixia and Plaisant, Catherine and Shneiderman, Ben} } @article {16107, title = {Immediate Usability: A Case Study of Public Access Design for a Community Photo Library (2003)}, year = {2005}, month = {2005///}, abstract = {This paper describes a novel instantiation of a digital photo library in a public access system. It demonstrates how designers can utilize characteristics of a target user community (social constraints, trust, and a lack of anonymity) to provide capabilities that would be impractical in other types of public access systems. It also presents a compact set of design principles and guidelines for ensuring the immediate usability of public access information systems. These principles and guidelines were derived from our experience developing PhotoFinder Kiosk, a community photo library. Attendees of a major HCI conference (CHI 2001 Conference on Human Factors in Computing Systems) successfully used the tool to browse and annotate collections of photographs spanning 20 years of HCI-related conferences, producing a richly annotated photo history of the field of human-computer interaction. Observations and log data were used to evaluate the tool and develop the guidelines. They provide specific guidance for practitioners, as well as a useful framework for additional research in public access interfaces.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6504}, author = {Kules,Bill and Kang,Hyunmo and Plaisant, Catherine and Rose,Anne and Shneiderman, Ben} } @article {16106, title = {Immediate Usability: Kiosk design principles from the CHI 2001 Photo Library (2001)}, year = {2005}, month = {2005///}, abstract = {This paper describes a novel set of design principles and guidelines for ensuring the immediate usability of public access systems. These principles and guidelines were formulated while developing PhotoFinder Kiosk, a community photo library. Attendees of CHI 2001 successfully used the tool to browse and annotate collections of photographs spanning 20 years of CHI and related conferences, producing a richly annotated photo history of the field of human-computer interaction. We used observations and log data to evaluate the tool and refine the guidelines. They provide specific guidance for practitioners, as well as a useful framework for additional research in public access interfaces.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6488}, author = {Kules,Bill and Kang,Hyunmo and Plaisant, Catherine and Rose,Anne and Shneiderman, Ben} } @article {17223, title = {Improving Accessibility and Usability of Geo-referenced Statistical Data (2003)}, year = {2005}, month = {2005///}, abstract = {Several technology breakthroughs are needed to achieve the goals of universal accessibility and usability. These goals are especially challenging in the case of geo-referenced statistical data that many U.S. government agencies supply. We present technical and user-interface design challenges in accommodating users with low-end technology (slow network connection and low-end machine) and users who are blind or vision-impaired. Our solutions are presented and future work is discussed.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6502}, author = {Zhao,Haixia and Plaisant, Catherine and Shneiderman, Ben} } @article {18910, title = {Information gathering during planning for Web Service composition}, journal = {Web Semantics: Science, Services and Agents on the World Wide Web}, volume = {3}, year = {2005}, month = {2005/10//}, pages = {183 - 205}, abstract = {Hierarchical task network (HTN) based planning techniques have been applied to the problem of composing Web Services, especially when described using the OWL-S service ontologies. Many of the existing Web Services are either exclusively information providing or crucially depend on information-providing services. Thus, many interesting service compositions involve collecting information either during execution or during the composition process itself. In this paper, we focus on the latter issue. In particular, we present ENQUIRER , an HTN-planning algorithm designed for planning domains in which the information about the initial state of the world may not be complete, but it is discoverable through plan-time information-gathering queries. We have shown that ENQUIRER is sound and complete, and derived several mathematical relationships among the amount of available information, the likelihood of the planner finding a plan, and the quality of the plan found. We have performed experimental tests that confirmed our theoretical results and that demonstrated how ENQUIRER can be used for Web Service composition.}, keywords = {HTN planning, Information gathering, Web Service composition}, isbn = {1570-8268}, doi = {10.1016/j.websem.2005.07.001}, url = {http://www.sciencedirect.com/science/article/pii/S1570826805000168}, author = {Kuter,Ugur and Sirin,Evren and Parsia,Bijan and Nau, Dana S. and Hendler,James} } @inbook {16103, title = {Information Visualization and the Challenge of Universal Usability}, booktitle = {Exploring GeovisualizationExploring Geovisualization}, year = {2005}, month = {2005///}, pages = {53 - 82}, publisher = {Elsevier}, organization = {Elsevier}, address = {Oxford}, abstract = {Information Visualization aims to provide compact graphical presentations and user interfaces for interactively manipulating large numbers of items. We present a simple {\textquotedblleft}data by tasks taxonomy{\textquotedblright} then discuss the challenges of providing universal usability, with example applications using geo-referenced data. Information Visualization has been shown to be a powerful visual thinking or decision tool but it is becoming important for services to reach and empower every citizen. Technological advances are needed to deal with user diversity (age, language, disabilities, etc.) but also with the variety of technology used (screen size, network speed, etc.) and the gaps in user{\textquoteright}s knowledge (general knowledge, knowledge of the application domain, of the interface syntax or semantic). We present examples that illustrate how those challenges can be addressed.}, isbn = {978-0-08-044531-1}, url = {http://www.sciencedirect.com/science/article/pii/B9780080445311504218}, author = {Plaisant, Catherine}, editor = {Jason Dykes and Alan M. MacEachren and Menno-Jan KraakA2 - Jason Dykes,Alan M. MacEachren and Menno-Jan Kraak} } @article {17255, title = {Interactive pattern search in time series}, journal = {Proceedings of SPIE}, volume = {5669}, year = {2005}, month = {2005/03/11/}, pages = {175 - 186}, abstract = {The need for pattern discovery in long time series data led researchers to develop algorithms for similarity search. Most of the literature about time series focuses on algorithms that index time series and bring the data into the main storage, thus providing fast information retrieval on large time series. This paper reviews the state of the art in visualizing time series, and focuses on techniques that enable users to visually and interactively query time series. Then, it presents TimeSearcher 2, a tool that enables users to explore multidimensional data using synchronized tables and graphs with overview+detail, filter the time series data to reduce the scope of the search, select an existing pattern to find similar occurrences, and interactively adjust similarity parameters to narrow the result set. This tool is an extension of previous work, TimeSearcher 1, which uses graphical timeboxes to interactively query time series data.}, isbn = {0277786X}, doi = {doi:10.1117/12.587537}, url = {http://spiedigitallibrary.org/proceedings/resource/2/psisdg/5669/1/175_1?isAuthorized=no}, author = {Buono,Paolo and Aris,Aleks and Plaisant, Catherine and Khella,Amir and Shneiderman, Ben} } @article {17256, title = {Interactive Pattern Search in Time Series (2004)}, year = {2005}, month = {2005///}, abstract = {The need for pattern discovery in long time series data led researchers to develop algorithms for similarity search. Most of the literature about time series focuses on algorithms that index time series and bring the data into the main storage, thus providing fast information retrieval on large time series. This paper reviews the state of the art in visualizing time series, and focuses on techniques that enable users to interactively query time series. Then it presents TimeSearcher 2, a tool that enables users to explore multidimensional data using coordinated tables and graphs with overview+detail, filter the time series data to reduce the scope of the search, select an existing pattern to find similar occurrences, and interactively adjust similarity parameters to narrow the result set. This tool is an extension of previous work, TimeSearcher 1, which uses graphical timeboxes to interactively query time series data.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6519}, author = {Buono,Paolo and Aris,Aleks and Plaisant, Catherine and Khella,Amir and Shneiderman, Ben} } @article {17258, title = {Interactive sonification of choropleth maps}, journal = {IEEE Multimedia}, volume = {12}, year = {2005}, month = {2005/06//April}, pages = {26 - 35}, abstract = {Auditory information is an important channel for the visually impaired. Effective sonification (the use of non-speech audio to convey information) promotes equal working opportunities for people with vision impairments by helping them explore data collections for problem solving and decision making. Interactive sonification systems can make georeferenced data accessible to people with vision impairments. The authors compare methods for using sound to encode georeferenced data patterns and for navigating maps.}, keywords = {audio signal processing, audio user interfaces, Auditory (non-speech) feedback, auditory information, cartography, choropleth maps, data collections, decision making, Evaluation, Feedback, georeferenced data, Guidelines, handicapped aids, Hardware, HUMANS, information resources, interaction style, Interactive sonification, interactive systems, Navigation, nonspeech audio, problem solving, Problem-solving, sound, universal usability, US Government, User interfaces, vision impairments, World Wide Web}, isbn = {1070-986X}, doi = {10.1109/MMUL.2005.28}, author = {Zhao,Haixia and Smith,B. K and Norman,K. and Plaisant, Catherine and Shneiderman, Ben} } @article {16095, title = {InterSon: Interactive Sonification for Geo-referenced Data Exploration for the Vision Impaired}, journal = {Tech Report HCIL-2005-13}, year = {2005}, month = {2005/05//}, abstract = {InterSon is an interactive sonification tool that allows vision im-paired users to explore complex geo-referenced statistical data for fact finding, problem solving and decision making. Examples include maps of population density, crime rates or housing prices. The integrated use of sounds and speech allows users to hear the overall distribution of values on maps and to explore the map to get more details. Users can use the standard computer keyboard, or take advantage of special devices such as a touchpad when they are available. Synchronized auditory and visual displays allow the use of residual vision and facilitate collaboration with sighted colleagues.}, author = {Zhao,H. and Plaisant, Catherine} } @article {16017, title = {Introduction to the special review issue}, journal = {Artificial Intelligence}, volume = {169}, year = {2005}, month = {2005///}, pages = {103 - 103}, author = {Perlis, Don and Norvig,P.} } @conference {16098, title = {iSonic: interactive sonification for non-visual data exploration}, booktitle = {Proceedings of the 7th international ACM SIGACCESS conference on Computers and accessibility}, series = {Assets {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {194 - 195}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {iSonic is an interactive sonification tool for vision impaired users to explore geo-referenced statistical data, such as population or crime rates by geographical regions. Users use a keyboard or a smooth surface touchpad to interact with coordinated map and table views of the data. The integrated use of musical sounds and speech allows users to grasp the overall data trends and to explore the data to get more details. Scenarios of use are described.}, keywords = {auditory user interfaces, information seeking, sonification, universal usability, vision impairment}, isbn = {1-59593-159-7}, doi = {10.1145/1090785.1090826}, url = {http://doi.acm.org/10.1145/1090785.1090826}, author = {Zhao,Haixia and Plaisant, Catherine and Shneiderman, Ben} } @conference {16384, title = {The Java memory model}, booktitle = {Proceedings of the 32nd ACM SIGPLAN-SIGACT symposium on Principles of programming languages}, series = {POPL {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {378 - 391}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {This paper describes the new Java memory model, which has been revised as part of Java 5.0. The model specifies the legal behaviors for a multithreaded program; it defines the semantics of multithreaded Java programs and partially determines legal implementations of Java virtual machines and compilers.The new Java model provides a simple interface for correctly synchronized programs -- it guarantees sequential consistency to data-race-free programs. Its novel contribution is requiring that the behavior of incorrectly synchronized programs be bounded by a well defined notion of causality. The causality requirement is strong enough to respect the safety and security properties of Java and weak enough to allow standard compiler and hardware optimizations. To our knowledge, other models are either too weak because they do not provide for sufficient safety/security, or are too strong because they rely on a strong notion of data and control dependences that precludes some standard compiler transformations.Although the majority of what is currently done in compilers is legal, the new model introduces significant differences, and clearly defines the boundaries of legal transformations. For example, the commonly accepted definition for control dependence is incorrect for Java, and transformations based on it may be invalid.In addition to providing the official memory model for Java, we believe the model described here could prove to be a useful basis for other programming languages that currently lack well-defined models, such as C++ and C$\#$.}, keywords = {concurrency, java, memory model, Multithreading}, isbn = {1-58113-830-X}, doi = {10.1145/1040305.1040336}, url = {http://doi.acm.org/10.1145/1040305.1040336}, author = {Manson,Jeremy and Pugh, William and Adve,Sarita V.} } @article {17282, title = {Listening to Maps: User Evaluation of Interactive Sonifications of Geo-Referenced Data (2004)}, journal = {Institute for Systems Research Technical Reports}, year = {2005}, month = {2005///}, abstract = {In this paper, we summarize the Auditory Information Seeking Principle (AISP) (gist, navigate, filter, and details-ondemand). To improve blind access to geo-referenced statistical data, we developed several interactive sonifications, adhering to the above AISP. Two user studies are presented. In the first user study with nine sighted subjects, a preliminary map design is compared with an enhanced table design. The study shows subjects can recognize geographic data distribution patterns on a real map with 51 geographic regions, in both designs. The map-based design was strongly preferred. The study also shows evidence that AISP conforms to people information seeking strategies. Based on the observations from the first user study, a second user study was conducted with forty-eight sighted subjects comparing four map designs. The effects of using sound to encode vertical geographic positions and two map navigation methods were compared. The result is presented and future work is discussed.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6518}, author = {Zhao,Haixia and Smith,Benjamin K and Norman,Kent L and Plaisant, Catherine and Shneiderman, Ben} } @article {16096, title = {Listening to Maps: User Evaluation of Interactive Sonifications of Geo-Referenced Data}, journal = {Institute for Systems Research Technical Reports}, year = {2005}, month = {2005///}, abstract = {In this paper, we summarize the Auditory Information Seeking Principle (AISP) (gist, navigate, filter, and details-ondemand). To improve blind access to geo-referenced statistical data, we developed several interactive sonifications, adhering to the above AISP. Two user studies are presented. In the first user study with nine sighted subjects, a preliminary map design is compared with an enhanced table design. The study shows subjects can recognize geographic data distribution patterns on a real map with 51 geographic regions, in both designs. The map-based design was strongly preferred. The study also shows evidence that AISP conforms to people information seeking strategies. Based on the observations from the first user study, a second user study was conducted with forty-eight sighted subjects comparing four map designs. The effects of using sound to encode vertical geographic positions and two map navigation methods were compared. The result is presented and future work is discussed.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6518}, author = {Zhao,Haixia and Smith,Benjamin K and Norman,Kent L and Plaisant, Catherine and Shneiderman, Ben} } @article {15990, title = {Logic, self-awareness and self-improvement: The metacognitive loop and the problem of brittleness}, journal = {Journal of Logic and Computation}, volume = {15}, year = {2005}, month = {2005///}, pages = {21 - 21}, author = {Anderson,M. L and Perlis, Don} } @article {15956, title = {A logic-based model of intention formation and action for multi-agent subcontracting}, journal = {Artificial Intelligence}, volume = {163}, year = {2005}, month = {2005/04//}, pages = {163 - 201}, abstract = {We present a formalism for representing the formation of intentions by agents engaged in cooperative activity. We use a syntactic approach presenting a formal logical calculus that can be regarded as a meta-logic that describes the reasoning and activities of the agents. Our central focus is on the evolving intentions of agents over time, and the conditions under which an agent can adopt and maintain an intention. In particular, the reasoning time and the time taken to subcontract are modeled explicitly in the logic. We axiomatize the concept of agent interactions in the meta-language, show that the meta-theory is consistent and describe the unique intended model of the meta-theory. In this context we deal both with subcontracting between agents and the presence of multiple recipes, that is, multiple ways of accomplishing tasks. We show that under various initial conditions and known facts about agent beliefs and abilities, the meta-theory representation yields good results.}, keywords = {Cooperative agents, intentions, Minimal model semantics, Subcontracting, Syntactic logic}, isbn = {0004-3702}, doi = {16/j.artint.2004.11.003}, url = {http://www.sciencedirect.com/science/article/pii/S0004370204001924}, author = {Grant,John and Kraus,Sarit and Perlis, Don} } @conference {16298, title = {Main effects screening}, booktitle = {Proceedings of the 27th international conference on Software engineering - ICSE {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {293 - 293}, address = {St. Louis, MO, USA}, doi = {10.1145/1062455.1062515}, url = {http://dl.acm.org/citation.cfm?id=1062515}, author = {Yilmaz,Cemal and Krishna,Arvind S. and Memon, Atif M. and Porter, Adam and Schmidt,Douglas C. and Gokhale,Aniruddha and Natarajan,Balachandran} } @conference {15453, title = {Main effects screening: a distributed continuous quality assurance process for monitoring performance degradation in evolving software systems}, booktitle = {Proceedings of the 27th international conference on Software engineering}, series = {ICSE {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {293 - 302}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Developers of highly configurable performance-intensive software systems often use a type of in-house performance-oriented "regression testing" to ensure that their modifications have not adversely affected their software{\textquoteright}s performance across its large configuration space. Unfortunately, time and resource constraints often limit developers to in-house testing of a small number of configurations and unreliable extrapolation from these results to the entire configuration space, which allows many performance bottlenecks and sources of QoS degradation to escape detection until systems are fielded. To improve performance assessment of evolving systems across large configuration spaces, we have developed a distributed continuous quality assurance (DCQA) process called main effects screening that uses in-the-field resources to execute formally designed experiments to help reduce the configuration space, thereby allowing developers to perform more targeted in-house QA. We have evaluated this process via several feasibility studies on several large, widely-used performance-intensive software systems. Our results indicate that main effects screening can detect key sources of performance degradation in large-scale systems with significantly less effort than conventional techniques.}, keywords = {design of experiment theory, distributed continuous quality assurance, performance-oriented regression testing}, isbn = {1-58113-963-2}, doi = {10.1145/1062455.1062515}, url = {http://doi.acm.org/10.1145/1062455.1062515}, author = {Yilmaz,Cemal and Krishna,Arvind S. and Memon, Atif M. and Porter, Adam and Schmidt,Douglas C. and Gokhale,Aniruddha and Natarajan,Balachandran} } @article {19499, title = {MEAD: support for Real-Time Fault-Tolerant CORBA}, journal = {Concurrency and Computation: Practice and Experience}, volume = {17}, year = {2005}, month = {2005///}, pages = {1527 - 1545}, abstract = {The OMG{\textquoteright}s Real-Time CORBA (RT-CORBA) and Fault-Tolerant CORBA (FT-CORBA) specifications make it possible for today{\textquoteright}s CORBA implementations to exhibit either real-time or fault tolerance in isolation. While real-time requires a priori knowledge of the system{\textquoteright}s temporal operation, fault tolerance necessarily deals with faults that occur unexpectedly, and with possibly unpredictable fault recovery times. The MEAD (Middleware for Embedded Adaptive Dependability) system attempts to identify and to reconcile the conflicts between real-time and fault tolerance, in a resource-aware manner, for distributed CORBA applications. MEAD supports transparent yet tunable fault tolerance in real-time, proactive dependability, resource-aware system adaptation to crash, communication and timing faults with bounded fault detection and fault recovery. Copyright {\textcopyright} 2005 John Wiley \& Sons, Ltd.}, keywords = {CORBA, Fault tolerance, non-determinism, predictability, real-time, recovery, trade-offs}, isbn = {1532-0634}, url = {http://onlinelibrary.wiley.com/doi/10.1002/cpe.882/abstract}, author = {Narasimhan, P. and Tudor Dumitras and Paulos, A. M. and Pertet, S. M. and Reverte, C. F. and Slember, J. G. and Srivastava, D.} } @inbook {14707, title = {Merging Network Measurement with Data Transport}, booktitle = {Passive and Active Network MeasurementPassive and Active Network Measurement}, series = {Lecture Notes in Computer Science}, volume = {3431}, year = {2005}, month = {2005///}, pages = {368 - 371}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {The tasks of measurement and data transport are often treated independently, but we believe there are benefits to bringing them together. This paper proposes the simple idea of a transport agent to encapsulate useful data within probe packets in place of useless padding.}, isbn = {978-3-540-25520-8}, url = {http://dx.doi.org/10.1007/978-3-540-31966-5_36}, author = {Papageorgiou,Pavlos and Hicks, Michael W.}, editor = {Dovrolis,Constantinos} } @conference {15924, title = {Metacognition for dropping and reconsidering intentions}, booktitle = {AAAI Spring Symposium on Metacognition in Computation}, year = {2005}, month = {2005///}, author = {Josyula,D. P and Anderson,M. L and Perlis, Don} } @article {13707, title = {A methodology for extrinsic evaluation of text summarization: Does ROUGE correlate}, journal = {Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization}, year = {2005}, month = {2005///}, pages = {1 - 8}, abstract = {This paper demonstrates the usefulness of sum-maries in an extrinsic task of relevance judgment based on a new method for measuring agree- ment, Relevance-Prediction, which compares sub- jects{\textquoteright} judgments on summaries with their own judg- ments on full text documents. We demonstrate that, because this measure is more reliable than previ- ous gold-standard measures, we are able to make stronger statistical statements about the benefits of summarization. We found positive correlations be- tween ROUGE scores and two different summary types, where only weak or negative correlations were found using other agreement measures. How- ever, we show that ROUGE may be sensitive to the choice of summarization style. We discuss the im- portance of these results and the implications for fu- ture summarization evaluations. }, author = {Dorr, Bonnie J and Monz,C. and President,S. and Schwartz,R. and Zajic, David} } @article {13307, title = {Morse-Smale decompositions for modeling terrain knowledge}, journal = {Spatial Information Theory}, year = {2005}, month = {2005///}, pages = {426 - 444}, abstract = {In this paper, we describe, analyze and compare techniques for extracting spatial knowledge from a terrain model. Specifically, we investigate techniques for extracting a morphological representation from a terrain model based on an approximation of a Morse-Smale complex. A Morse-Smale complex defines a decomposition of a topographic surface into regions with vertices at the critical points and bounded by integral lines which connect passes to pits and peaks. This provides a terrain representation which encompasses the knowledge on the salient characteristics of the terrain. We classify the various techniques for computing a Morse-Smale complexe based on the underlying terrain model, a Regular Square Grid (RSG) or a Triangulated Irregular Network (TIN), and based on the algorithmic approach they apply. Finally, we discuss hierarchical terrain representations based on a Morse-Smale decomposition.}, doi = {10.1007/11556114_27}, author = {{\v C}omi{\'c},L. and De Floriani, Leila and Papaleo,L.} } @inbook {12670, title = {Moving Object Detection and Compression in IR Sequences}, booktitle = {Computer Vision Beyond the Visible SpectrumComputer Vision Beyond the Visible Spectrum}, series = {Advances in Pattern Recognition}, year = {2005}, month = {2005///}, pages = {141 - 165}, publisher = {Springer London}, organization = {Springer London}, abstract = {We consider the problem of remote surveillance using infrared (IR) sensors. The aim is to use IR image sequences to detect moving objects (humans or vehicles), and to transmit a few {\textquotedblleft}best-view images{\textquotedblright} of every new object that is detected. Since the available bandwidth is usually low, if the object chip is big, it needs to be compressed before being transmitted. Due to low computational power of computing devices attached to the sensor, the algorithms should be computationally simple. We present two approaches for object detection {\textemdash} one which specifically solves the more difficult long-range object detection problem, and the other for objects at short range. For objects at short range, we also present techniques for selecting a single best-view object chip and computationally simple techniques for compressing it to very low bit rates due to the channel bandwidth constraint. A fast image chip compression scheme implemented in the wavelet domain by combining a non-iterative zerotree coding method with 2D-DPCM for both low-and high-frequency subbands is presented. Comparisons with some existing schemes are also included. The object detection and compression algorithms have been implemented in C/C++ and their performance has been evaluated using the Hitachi{\textquoteright}s SH4 platform with software simulation.}, isbn = {978-1-84628-065-8}, url = {http://dx.doi.org/10.1007/1-84628-065-6_5}, author = {Vaswani,Namrata and Agrawal,Amit and Qinfen Zheng and Chellapa, Rama}, editor = {Bhanu,Bir and Pavlidis,Ioannis} } @conference {13310, title = {Multi-resolution out-of-core modeling of terrain and geological data}, booktitle = {Proceedings of the 13th annual ACM international workshop on Geographic information systems}, series = {GIS {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {143 - 152}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Multi-resolution is a useful tool for managing the complexity of huge terrain and geological data sets. Since encoding large data sets may easily exceed main memory capabilities, data structures and algorithms capable of efficiently working in external memory are needed. In our work, we aim at developing an out-of-core multi-resolution model dimension-independent, that can be used for both terrains, represented by Triangulated Irregular Networks(TINs), and 3D data, such as geological data, represented by tetrahedral meshes. We have based our approach on a general multi-resolution model, that we have proposed in our previous work, which supports the extraction of variable-resolution representations. As first step, we have developed, in a prototype simulation system, a large number of clustering techniques for the modifications in a multi-resolution model. Here, we describe such techniques, and analyze and evaluate them experimentally. The result of this investigation has led us to select a specific clustering approach as the basis for an efficient out-of-core data structure.}, keywords = {Multi-resolution, out-of-core, terrain models}, isbn = {1-59593-146-5}, doi = {10.1145/1097064.1097085}, url = {http://doi.acm.org/10.1145/1097064.1097085}, author = {Danovaro,Emanuele and De Floriani, Leila and Puppo,Enrico and Samet, Hanan} } @article {19609, title = {A parametric visualization software for the assignment problem}, journal = {Yugoslav Journal of Operations Research}, volume = {15}, year = {2005}, month = {2005///}, pages = {147 - 158}, isbn = {0354-0243}, url = {http://www.doiserbia.nb.rs/Article.aspx?id=0354-02430501147P\&AspxAutoDetectCookieSupport=1}, author = {Charalampos Papamanthou and Paparrizos, Konstantinos and Samaras, Nikolaos} } @article {16399, title = {Path dependence and the validation of agent-based spatial models of land use}, journal = {International Journal of Geographical Information Science}, volume = {19}, year = {2005}, month = {2005///}, pages = {153 - 174}, author = {Corresponding,D.G.B. and Page,S and Riolo,R and Zellner,M and Rand, William} } @article {16398, title = {Path dependence and the validation of agent-based spatial models of land use}, journal = {International Journal of Geographical Information Science}, volume = {19}, year = {2005}, month = {2005///}, pages = {153 - 174}, abstract = {In this paper, we identify two distinct notions of accuracy of land?use models and highlight a tension between them. A model can have predictive accuracy: its predicted land?use pattern can be highly correlated with the actual land?use pattern. A model can also have process accuracy: the process by which locations or land?use patterns are determined can be consistent with real world processes. To balance these two potentially conflicting motivations, we introduce the concept of the invariant region, i.e., the area where land?use type is almost certain, and thus path independent; and the variant region, i.e., the area where land use depends on a particular series of events, and is thus path dependent. We demonstrate our methods using an agent?based land?use model and using multi?temporal land?use data collected for Washtenaw County, Michigan, USA. The results indicate that, using the methods we describe, researchers can improve their ability to communicate how well their model performs, the situations or instances in which it does not perform well, and the cases in which it is relatively unlikely to predict well because of either path dependence or stochastic uncertainty.}, isbn = {1365-8816}, doi = {10.1080/13658810410001713399}, url = {http://www.tandfonline.com/doi/abs/10.1080/13658810410001713399}, author = {Brown,Daniel G. and Page,Scott and Riolo,Rick and Zellner,Moira and Rand, William} } @article {12933, title = {Pathogenic Vibrio species in the marine and estuarine environment}, journal = {Oceans and health: pathogens in the marine environment}, year = {2005}, month = {2005///}, pages = {217 - 252}, abstract = {The genus Vibrio includes more than 30 species, at least 12 of which are pathogenic to humans and/or have been associated with foodborne diseases (Chakraborty et al., 1997). Among these species, Vibrio cholerae, serogroups O1 and O139, are the most important, since they are associated with epidemic and pandemic diarrhea outbreaks in many parts of the world (Centers for Disease Control and Prevention, 1995; Kaper et al., 1995). However, other species of vibrios capable of causing diarrheal disease in humans have received greater attention in the last decade. These include Vibrio parahaemolyticus, a leading cause of foodborne disease outbreaks in Japan and Korea (Lee et al., 2001), Vibrio vulnificus, Vibrio alginolyticus, Vibrio damsela, Vibrio fluvialis, Vibrio furnissii, Vibrio hollisae, Vibrio metschnikovii, and Vibrio mimicus (Altekruse et al., 2000; H{\o}i et al., 1997). In the USA, Vibrio species have been estimated to be the cause of about 8000 illnesses annually (Mead et al., 1999).}, doi = {10.1007/0-387-23709-7_9}, author = {Pruzzo,C. and Huq,A. and Rita R Colwell and Donelli,G.} } @inbook {12676, title = {Pattern Recognition in Video}, booktitle = {Pattern Recognition and Machine IntelligencePattern Recognition and Machine Intelligence}, series = {Lecture Notes in Computer Science}, volume = {3776}, year = {2005}, month = {2005///}, pages = {11 - 20}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Images constitute data that live in a very high dimensional space, typically of the order of hundred thousand dimensions. Drawing inferences from correlated data of such high dimensions often becomes intractable. Therefore traditionally several of these problems like face recognition, object recognition, scene understanding etc. have been approached using techniques in pattern recognition. Such methods in conjunction with methods for dimensionality reduction have been highly popular and successful in tackling several image processing tasks. Of late, the advent of cheap, high quality video cameras has generated new interests in extending still image-based recognition methodologies to video sequences. The added temporal dimension in these videos makes problems like face and gait-based human recognition, event detection, activity recognition addressable. Our research has focussed on solving several of these problems through a pattern recognition approach. Of course, in video streams patterns refer to both patterns in the spatial structure of image intensities around interest points and temporal patterns that arise either due to camera motion or object motion. In this paper, we discuss the applications of pattern recognition in video to problems like face and gait-based human recognition, behavior classification, activity recognition and activity based person identification.}, isbn = {978-3-540-30506-4}, url = {http://dx.doi.org/10.1007/11590316_2}, author = {Chellapa, Rama and Veeraraghavan,Ashok and Aggarwal,Gaurav}, editor = {Pal,Sankar and Bandyopadhyay,Sanghamitra and Biswas,Sambhunath} } @article {19039, title = {Pioneer: verifying code integrity and enforcing untampered code execution on legacy systems}, journal = {ACM SIGOPS Operating Systems Review}, volume = {39}, year = {2005}, month = {2005}, pages = {1 - 16}, abstract = {We propose a primitive, called Pioneer, as a first step towards verifiable code execution on untrusted legacy hosts. Pioneer does not require any hardware support such as secure co-processors or CPU-architecture extensions. We implement Pioneer on an Intel Pentium IV Xeon processor. Pioneer can be used as a basic building block to build security systems. We demonstrate this by building a kernel rootkit detector.}, keywords = {dynamic root of trust, rootkit detection, self-check-summing code, software-based code attestation, verifiable code execution}, isbn = {0163-5980}, url = {http://doi.acm.org/10.1145/1095809.1095812}, author = {Seshadri, Arvind and Luk, Mark and Elaine Shi and Perrig, Adrian and van Doorn, Leendert and Khosla, Pradeep} } @article {18462, title = {Processing of reverberant speech for time-delay estimation}, journal = {IEEE Transactions on Speech and Audio Processing}, volume = {13}, year = {2005}, month = {2005/11//}, pages = {1110 - 1118}, abstract = {In this paper, we present a method of extracting the time-delay between speech signals collected at two microphone locations. Time-delay estimation from microphone outputs is the first step for many sound localization algorithms, and also for enhancement of speech. For time-delay estimation, speech signals are normally processed using short-time spectral information (either magnitude or phase or both). The spectral features are affected by degradations in speech caused by noise and reverberation. Features corresponding to the excitation source of the speech production mechanism are robust to such degradations. We show that these source features can be extracted reliably from the speech signal. The time-delay estimate can be obtained using the features extracted even from short segments (50-100 ms) of speech from a pair of microphones. The proposed method for time-delay estimation is found to perform better than the generalized cross-correlation (GCC) approach. A method for enhancement of speech is also proposed using the knowledge of the time-delay and the information of the excitation source.}, keywords = {Acoustic noise, acoustic signal processing, array signal processing, data mining, Degradation, delay estimation, Feature extraction, Hilbert envelope, localization algorithm, microphone arrays, microphone location, Microphones, Phase estimation, reverberation, short-time spectral information, Signal processing, source features, source information excitation, speech enhancement, Speech processing, speech production mechanism, speech signal, time-delay, time-delay estimation}, isbn = {1063-6676}, doi = {10.1109/TSA.2005.853005}, author = {Yegnanarayana,B. and Prasanna,S. R.M and Duraiswami, Ramani and Zotkin,Dmitry N} } @conference {15994, title = {On the reasoning of real-world agents: Toward a semantics for active logic}, booktitle = {7-th Annual Symposium on the Logical Formalization of Commonsense Reasoning}, year = {2005}, month = {2005///}, author = {Anderson,M. L and Gomaa,W. and Grant,J. and Perlis, Don} } @inbook {16114, title = {Representing Unevenly-Spaced Time Series Data for Visualization and Interactive Exploration}, booktitle = {Human-Computer Interaction - INTERACT 2005Human-Computer Interaction - INTERACT 2005}, series = {Lecture Notes in Computer Science}, volume = {3585}, year = {2005}, month = {2005///}, pages = {835 - 846}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Visualizing time series is useful to support discovery of relations and patterns in financial, genomic, medical and other applications. Often, measurements are equally spaced over time. We discuss the challenges of unevenly-spaced time series and present fourrepresentationmethods: sampled events, aggregated sampled events, event index and interleaved event index. We developed these methods while studying eBay auction data with TimeSearcher. We describe the advantages, disadvantages, choices for algorithms and parameters, and compare the different methods for different tasks. Interaction issues such as screen resolution, response time for dynamic queries, and learnability are governed by these decisions.}, isbn = {978-3-540-28943-2}, url = {http://dx.doi.org/10.1007/11555261_66}, author = {Aris,Aleks and Shneiderman, Ben and Plaisant, Catherine and Shmueli,Galit and Jank,Wolfgang}, editor = {Costabile,Maria and Patern{\`o},Fabio} } @article {16113, title = {Representing Unevenly-Spaced Time Series Data for Visualization and Interactive Exploration (2005)}, journal = {Institute for Systems Research Technical Reports}, year = {2005}, month = {2005///}, abstract = {Visualizing time series data is useful to support discovery of relations and patterns in financial, genomic, medical and other applications. In most time series, measurements are equally spaced over time. This paper discusses the challenges for unevenly-spaced time series data and presents four methods to represent them: sampled events, aggregated sampled events, event index and interleaved event index. We developed these methods while studying eBay auction data with TimeSearcher. We describe the advantages, disadvantages, choices for algorithms and parameters, and compare the different methods. Since each method has its advantages, this paper provides guidance for choosing the right combination of methods, algorithms, and parameters to solve a given problem for unevenly-spaced time series. Interaction issues such as screen resolution, response time for dynamic queries, and meaning of the visual display are governed by these decisions.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6537}, author = {Aris,Aleks and Shneiderman, Ben and Plaisant, Catherine and Shmueli,Galit and Jank,Wolfgang} } @article {16094, title = {Robotic apparatus and wireless communication system}, volume = {10/085,821}, year = {2005}, month = {2005/05/17/}, abstract = {A robotic apparatus and system adapted to communicate with a wireless sensor. The apparatus may be either physical or virtual in nature and is adapted to communicate physical movements with a wireless sensor. Data received from the sensor and/or robotic apparatus may be reviewed in a real-time mode, or may be saved for review at a later time. In addition, the apparatus may be controlled through an operator that is in local or remote communication with the apparatus. The robotic system may include pre-programmed interactive platforms for enabling communication between a user and the apparatus in a dynamic mode. In addition, the system may allow an operator to program a game/story for use as an interactive platform. Accordingly, the apparatus and system provides a platform for rehabilitative exercise of a patient as well as an entertainment device.}, url = {http://www.google.com/patents?id=5-0VAAAAEBAJ}, author = {Lathan,Corinna E. and Tracey,Michael R. and Vice,Jack M. and Druin, Allison and Plaisant, Catherine} } @article {15939, title = {The roots of self-awareness}, journal = {Phenomenology and the Cognitive Sciences}, volume = {4}, year = {2005}, month = {2005///}, pages = {297 - 333}, author = {Anderson,M. L and Perlis, Don} } @inbook {17656, title = {Scheduling on Unrelated Machines Under Tree-Like Precedence Constraints}, booktitle = {Approximation, Randomization and Combinatorial Optimization. Algorithms and TechniquesApproximation, Randomization and Combinatorial Optimization. Algorithms and Techniques}, series = {Lecture Notes in Computer Science}, volume = {3624}, year = {2005}, month = {2005///}, pages = {609 - 609}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We present polylogarithmic approximations for the R | prec | C max and R | prec |∑ j w j C j problems, when the precedence constraints are {\textquotedblleft}treelike{\textquotedblright} {\textendash} i.e., when the undirected graph underlying the precedences is a forest. We also obtain improved bounds for the weighted completion time and flow time for the case of chains with restricted assignment {\textendash} this generalizes the job shop problem to these objective functions. We use the same lower bound of {\textquotedblleft}congestion+dilation{\textquotedblright}, as in other job shop scheduling approaches. The first step in our algorithm for the R | prec | C max problem with treelike precedences involves using the algorithm of Lenstra, Shmoys and Tardos to obtain a processor assignment with the congestion + dilation value within a constant factor of the optimal. We then show how to generalize the random delays technique of Leighton, Maggs and Rao to the case of trees. For the weighted completion time, we show a certain type of reduction to the makespan problem, which dovetails well with the lower bound we employ for the makespan problem. For the special case of chains, we show a dependent rounding technique which leads to improved bounds on the weighted completion time and new bicriteria bounds for the flow time.}, isbn = {978-3-540-28239-6}, url = {http://dx.doi.org/10.1007/11538462_13}, author = {Kumar, V. and Marathe,Madhav and Parthasarathy,Srinivasan and Srinivasan, Aravind}, editor = {Chekuri,Chandra and Jansen,Klaus and Rolim,Jos{\'e} and Trevisan,Luca} } @article {16285, title = {Serendipitous discovery of Wolbachia genomes in multiple Drosophila species}, journal = {Genome Biology}, volume = {6}, year = {2005}, month = {2005/02/22/}, pages = {R23 - R23}, abstract = {The Trace Archive is a repository for the raw, unanalyzed data generated by large-scale genome sequencing projects. The existence of this data offers scientists the possibility of discovering additional genomic sequences beyond those originally sequenced. In particular, if the source DNA for a sequencing project came from a species that was colonized by another organism, then the project may yield substantial amounts of genomic DNA, including near-complete genomes, from the symbiotic or parasitic organism.}, isbn = {1465-6906}, doi = {10.1186/gb-2005-6-3-r23}, url = {http://genomebiology.com/2005/6/3/R23}, author = {Salzberg,Steven L. and Hotopp,Julie CD and Delcher,Arthur L. and Pop, Mihai and Smith,Douglas R. and Eisen,Michael B. and Nelson,William C.} } @conference {17359, title = {Show Me! Guidelines for producing recorded demonstrations}, booktitle = {2005 IEEE Symposium on Visual Languages and Human-Centric Computing}, year = {2005}, month = {2005/09/20/24}, pages = {171 - 178}, publisher = {IEEE}, organization = {IEEE}, abstract = {Although recorded demonstrations (screen capture animations with narration) have become a popular form of instruction for user interfaces, little work has been done to describe guidelines for their design. Based on our experience in several projects, we offer a starting set of guidelines for the design of visually appealing and cognitively effective recorded demonstrations. Technical guidelines encourage users to keep file sizes small, strive for universal usability, and ensure user control etc. and provide tips to achieve those goals. Content guidelines include: create short demonstrations that focus on tasks, highlight each step with auditory and visual cues, synchronize narration and animation carefully, and create demonstrations with a clear beginning, middle, and end.}, keywords = {animation, auditory cues, Computer aided instruction, Computer animation, Computer Graphics, Computer interfaces, computer literacy, content guidelines, documentation, Government, Graphical user interfaces, Guidelines, Laboratories, narration, recorded demonstrations, screen capture animation, technical guidelines, usability, User interfaces, visual appeal, visual cues}, isbn = {0-7695-2443-5}, doi = {10.1109/VLHCC.2005.57}, author = {Plaisant, Catherine and Shneiderman, Ben} } @article {16102, title = {Show Me! Guidelines for Producing Recorded Demonstrations (2005)}, year = {2005}, month = {2005///}, abstract = {Although recorded demonstrations (screen capture animations with narration) have become a popular form of instruction for user interfaces, little work has been done to describe guidelines for their design. Based on our experience in several projects, we offer a starting set of guidelines for the design of recorded demonstrations. Technical guidelines encourage users to keep file sizes small, strive for universal usability, and ensure user control etc. and provide tips to achieve those goals. Content guidelines include: create short demonstrations that focus on tasks, highlight each step with auditory and visual cues, synchronize narration and animation carefully, and create demonstrations with a clear beginning, middle, and end.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6533}, author = {Plaisant, Catherine and Shneiderman, Ben} } @article {17841, title = {A simulation and data analysis system for large-scale, data-driven oil reservoir simulation studies}, journal = {Concurrency and Computation: Practice and Experience}, volume = {17}, year = {2005}, month = {2005///}, pages = {1441 - 1467}, author = {Kurc, T. and Catalyurek,U. and Zhang,X. and Saltz, J. and Martino,R. and Wheeler,M. and Peszy{\'n}ska,M. and Sussman, Alan and Hansen,C. and Sen,M. and others} } @article {16357, title = {Software repository mining with Marmoset: an automated programming project snapshot and testing system}, journal = {ACM SIGSOFT Software Engineering Notes}, volume = {30}, year = {2005}, month = {2005///}, pages = {1 - 5}, author = {Spacco,J. and Strecker,J. and Hovemeyer,D. and Pugh, William} } @article {14023, title = {Speaker Localization Using Excitation Source Information in Speech}, journal = {Speech and Audio Processing, IEEE Transactions on}, volume = {13}, year = {2005}, month = {2005/09//}, pages = {751 - 761}, abstract = {This paper presents the results of simulation and real room studies for localization of a moving speaker using information about the excitation source of speech production. The first step in localization is the estimation of time-delay from speech collected by a pair of microphones. Methods for time-delay estimation generally use spectral features that correspond mostly to the shape of vocal tract during speech production. Spectral features are affected by degradations due to noise and reverberation. This paper proposes a method for localizing a speaker using features that arise from the excitation source during speech production. Experiments were conducted by simulating different noise and reverberation conditions to compare the performance of the time-delay estimation and source localization using the proposed method with the results obtained using the spectrum-based generalized cross correlation (GCC) methods. The results show that the proposed method shows lower number of discrepancies in the estimated time-delays. The bias, variance and the root mean square error (RMSE) of the proposed method is consistently equal or less than the GCC methods. The location of a moving speaker estimated using the time-delays obtained by the proposed method are closer to the actual values, than those obtained by the GCC method.}, keywords = {correlation, correlation;, cross, Delay, error, error;, estimation;, excitation, generalized, information;, localization;, mean, methods;, processing;, production;, root, source, speaker, speech, square, TIME}, isbn = {1063-6676}, doi = {10.1109/TSA.2005.851907}, author = {Raykar,V.C. and Yegnanarayana,B. and Prasanna,S. R.M and Duraiswami, Ramani} } @article {18718, title = {Structural Determinants for Selective Recognition of a Lys48-Linked Polyubiquitin Chain by a UBA Domain}, journal = {Molecular Cell}, volume = {18}, year = {2005}, month = {2005/06/10/}, pages = {687 - 698}, abstract = {SummaryAlthough functional diversity in polyubiquitin chain signaling has been ascribed to the ability of differently linked chains to bind in a distinctive manner to effector proteins, structural models of such interactions have been lacking. Here, we use NMR to unveil the structural basis of selective recognition of Lys48-linked di- and tetraubiquitin chains by the UBA2 domain of hHR23A. Although the interaction of UBA2 with Lys48-linked diubiquitin involves the same hydrophobic surface on each ubiquitin unit as that utilized in monoubiquitin:UBA complexes, our results show how the {\textquotedblleft}closed{\textquotedblright} conformation of Lys48-linked diubiquitin is crucial for high-affinity binding. Moreover, recognition of Lys48-linked diubiquitin involves a unique epitope on UBA, which allows the formation of a sandwich-like diubiqutin:UBA complex. Studies of the UBA-tetraubiquitin interaction suggest that this mode of UBA binding to diubiquitin is relevant for longer chains. }, isbn = {1097-2765}, doi = {10.1016/j.molcel.2005.05.013}, url = {http://www.sciencedirect.com/science/article/pii/S1097276505013195}, author = {Varadan,Ranjani and Assfalg,Michael and Raasi,Shahri and Pickart,Cecile and Fushman, David} } @inbook {14739, title = {Tagged Sets: A Secure and Transparent Coordination Medium}, booktitle = {Coordination Models and LanguagesCoordination Models and Languages}, series = {Lecture Notes in Computer Science}, volume = {3454}, year = {2005}, month = {2005///}, pages = {193 - 205}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {A simple and effective way of coordinating distributed, mobile, and parallel applications is to use a virtual shared memory (VSM), such as a Linda tuple-space. In this paper, we propose a new kind of VSM, called a tagged set . Each element in the VSM is a value with an associated tag, and values are read or removed from the VSM by matching the tag. Tagged sets exhibit three properties useful for VSMs: 1 Ease of use . A tagged value naturally corresponds to the notion that data has certain attributes, expressed by the tag, which can be used for later retrieval. 2 Flexibility . Tags are implemented as propositional logic formulae, and selection as logical implication, so the resulting system is quite powerful. Tagged sets naturally support a variety of applications, such as shared data repositories (e.g., for media or e-mail), message passing, and publish/subscribe algorithms; they are powerful enough to encode existing VSMs, such as Linda spaces. 3 Security . Our notion of tags naturally corresponds to keys, or capabilities: a user may not select data in the set unless she presents a legal key or keys. Normal tags correspond to symmetric keys, and we introduce asymmetric tags that correspond to public and private key pairs. Treating tags as keys permits users to easily specify protection criteria for data at a fine granularity. This paper motivates our approach, sketches its basic theory, and places it in the context of other data management strategies.}, isbn = {978-3-540-25630-4}, url = {http://dx.doi.org/10.1007/11417019_17}, author = {Oriol,Manuel and Hicks, Michael W.}, editor = {Jacquet,Jean-Marie and Picco,Gian} } @article {12932, title = {Temperature-Driven Campylobacter Seasonality in England and Wales}, journal = {Applied and Environmental MicrobiologyAppl. Environ. Microbiol.}, volume = {71}, year = {2005}, month = {2005/01/01/}, pages = {85 - 92}, abstract = {Campylobacter incidence in England and Wales between 1990 and 1999 was examined in conjunction with weather conditions. Over the 10-year interval, the average annual rate was determined to be 78.4 {\textpm} 15.0 cases per 100,000, with an upward trend. Rates were higher in males than in females, regardless of age, and highest in children less than 5 years old. Major regional differences were detected, with the highest rates in Wales and the southwest and the lowest in the southeast. The disease displayed a seasonal pattern, and increased campylobacter rates were found to be correlated with temperature. The most marked seasonal effect was observed for children under the age of 5. The seasonal pattern of campylobacter infections indicated a linkage with environmental factors rather than food sources. Therefore, public health interventions should not be restricted to food-borne approaches, and the epidemiology of the seasonal peak in human campylobacter infections may best be understood through studies in young children.}, isbn = {0099-2240, 1098-5336}, doi = {10.1128/AEM.71.1.85-92.2005}, url = {http://aem.asm.org/content/71/1/85}, author = {Louis,Val{\'e}rie R. and Gillespie,Iain A. and O{\textquoteright}Brien,Sarah J. and Russek-Cohen,Estelle and Pearson,Andrew D. and Rita R Colwell} } @article {16104, title = {Toward a Statistical Knowledge Network (2004)}, journal = {Institute for Systems Research Technical Reports}, year = {2005}, month = {2005///}, abstract = {Statistics support planning and decision making and enormous efforts are made to collect data and produce statistics at all levels of governance. An important principle of democratic societies is that government statistics should be accessible to the broadest possible constituencies to empower better plans and decisions in all aspects of life. Given the potential of near-ubiquitous Internet access in homes and workplaces and efforts by government agencies to mount websites, physical access to large volumes of government is close to a fait accompli. What remains a significant challenge is enabling access to the right statistical information at the right time and in the right form.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6522}, author = {Marchionini,Gary and Haas,Stephanie and Plaisant, Catherine and Shneiderman, Ben and Hert,Carol} } @article {16109, title = {Visualizing missing data: classification and empirical study}, journal = {Proceedings of INTERACT}, year = {2005}, month = {2005///}, pages = {861 - 872}, abstract = {Most visualization tools fail to provide support for missing data. We identify sources of missing, and categorize data visualization techniques based on the impact missing data have on the display: region dependent, attribute dependent, and neighbor dependent. We then report on a user study with 30 participants that compared three design variants. A between-subject graph interpretation study provides strong evidence for the need of indicating the presence of missing information, and some direction for addressing the problem.}, author = {Eaton,C. and Plaisant, Catherine and Drizd,T.} } @inbook {16108, title = {Visualizing Missing Data: Graph Interpretation User Study}, booktitle = {Human-Computer Interaction - INTERACT 2005}, series = {Lecture Notes in Computer Science}, volume = {3585}, year = {2005}, month = {2005///}, pages = {861 - 872}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Most visualization tools fail to provide support for missing data. In this paper, we identify sources of missing data and describe three levels of impact missing data can have on the visualization: perceivable, invisible or propagating. We then report on a user study with 30 participants that compared three design variants. A between-subject graph interpretation study provides strong evidence for the need of indicating the presence of missing information, and some direction for addressing the problem.}, isbn = {978-3-540-28943-2}, url = {http://dx.doi.org/10.1007/11555261_68}, author = {Eaton,Cyntrica and Plaisant, Catherine and Drizd,Terence}, editor = {Costabile,Maria and Patern{\`o},Fabio} } @conference {16003, title = {Active logic for more effective human-computer interaction and other commonsense applications}, booktitle = {Proceedings of the Workshop Empirically Successful First-Order Reasoning, International Joint Conference on Automated Reasoning}, year = {2004}, month = {2004///}, author = {Anderson,M. L and Josyula,D. and Perlis, Don and Purang,K.} } @conference {14559, title = {Adaptive limited-supply online auctions}, booktitle = {Proceedings of the 5th ACM Conference on Electronic Commerce}, year = {2004}, month = {2004///}, pages = {71 - 80}, author = {Hajiaghayi, Mohammad T. and Kleinberg,R. and Parkes,D. C} } @article {16392, title = {Agent-based and analytical modeling to evaluate the effectiveness of greenbelts}, journal = {Environmental Modelling \& Software}, volume = {19}, year = {2004}, month = {2004/12//}, pages = {1097 - 1109}, abstract = {We present several models of residential development at the rural{\textendash}urban fringe to evaluate the effectiveness of a greenbelt located beside a developed area, for delaying development outside the greenbelt. First, we develop a mathematical model, under two assumptions about the distributions of service centers, that represents the trade-off between greenbelt placement and width, their effects on the rate of development beyond the greenbelt, and how these interact with spatial patterns of aesthetic quality and the locations of services. Next, we present three agent-based models (ABMs) that include agents with the potential for heterogeneous preferences and a landscape with the potential for heterogeneous attributes. Results from experiments run with a one-dimensional ABM agree with the starkest of the results from the mathematical model, strengthening the support for both models. Further, we present two different two-dimensional ABMs and conduct a series of experiments to supplement our mathematical analysis. These include examining the effects of heterogeneous agent preferences, multiple landscape patterns, incomplete or imperfect information available to agents, and a positive aesthetic quality impact of the greenbelt on neighboring locations. These results suggest how width and location of the greenbelt could help determine the effectiveness of greenbelts for slowing sprawl, but that these relationships are sensitive to the patterns of landscape aesthetic quality and assumptions about service center locations.}, keywords = {Agent-based modeling, Land-use change, Landscape ecology, Urban sprawl}, isbn = {1364-8152}, doi = {10.1016/j.envsoft.2003.11.012}, url = {http://www.sciencedirect.com/science/article/pii/S1364815203002664}, author = {Brown,Daniel G. and Page,Scott E. and Riolo,Rick and Rand, William} } @article {15439, title = {Applying Model-based Distributed Continuous Quality Assurance Processes to Enhance Persistent Software Attributes}, journal = {IEEE Software}, volume = {21}, year = {2004}, month = {2004///}, pages = {32 - 40}, abstract = {Time and resource constraints often force developers of highlyconfigurable quality of service (QoS)-intensive software sys- tems to guarantee their system{\textquoteright}s persistent software attributes (PSAs) (e.g., functional correctness, portability, efficiency, and QoS) on very few platform configurations and to extrapolate from these configurations to the entire configuration space, which allows many sources of degradation to escape detec- tion until systems are fielded. This article illustrates how distributed continuous quality assurance (DCQA) processes help improve the assessment of these PSAs across large QoS- intensive software system configuration spaces. We also il- lustrate how model-based DCQA processes enable developers to run formally-designed screening experiments that isolate the most significant configuration options, such as different workload parameters, operating system, compiler flags, fea- ture sets, and/or run-time optimization controls. Our empir- ical results show that DCQA processes can be used monitor, safeguard, and enforce PSAs at an acceptable level of cost and effort. }, author = {Krishna,A. S and Yilmaz,C. and Memon, Atif M. and Porter, Adam and Schmidt,D. C and Gokhale,A. and Natarajan,B.} } @article {13508, title = {Automatic recognition of spontaneous speech for access to multilingual oral history archives}, journal = {IEEE Transactions on Speech and Audio Processing, Special Issue on Spontaneous Speech Processing}, volume = {12}, year = {2004}, month = {2004/07//}, pages = {420 - 435}, abstract = {The MALACH project has the goal of developing the technologies needed to facilitate access to large collections of spontaneous speech. Its aim is to dramatically improve the state of the art in key Automatic Speech Recognition (ASR), Natural Language Processing (NLP) technologies for use in large-scale retrieval systems. The project leverages a unique collection of oral history interviews with survivors of the Holocaust that has been assembled and extensively annotated by the Survivors of the Shoah Visual History Foundation. This paper describes the collection, 116,000 hours of interviews in 32 languages, and the way in which system requirements have been discerned through user studies. It discusses ASR methods for very difficult speech (heavily accented, emotional, and elderly spontaneous speech), including transcription to create training data and methods for language modeling and speaker adaptation. Results are presented for for English and Czech. NLP results are presented for named entity tagging, topic segmentation, and supervised topic classification, and the architecture of an integrated search system that uses these results is described.}, author = {Byrne,W. and David Doermann and Franz,M. and Gustman,S. and Hajic,J. and Oard, Douglas and Picheny,M. and Psutka,J. and Ramabhadran,B.} } @inbook {14203, title = {Bias in Shape Estimation}, booktitle = {Computer Vision - ECCV 2004Computer Vision - ECCV 2004}, series = {Lecture Notes in Computer Science}, volume = {3023}, year = {2004}, month = {2004///}, pages = {405 - 416}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {This paper analyses the uncertainty in the estimation of shape from motion and stereo. It is shown that there are computational limitations of a statistical nature that previously have not been recognized. Because there is noise in all the input parameters, we cannot avoid bias. The analysis rests on a new constraint which relates image lines and rotation to shape. Because the human visual system has to cope with bias as well, it makes errors. This explains the underestimation of slant found in computational and psychophysical experiments, and demonstrated here for an illusory display. We discuss properties of the best known estimators with regard to the problem, as well as possible avenues for visual systems to deal with the bias.}, isbn = {978-3-540-21982-8}, url = {http://dx.doi.org/10.1007/978-3-540-24672-5_32}, author = {Hui Ji and Ferm{\"u}ller, Cornelia}, editor = {Pajdla,Tom{\'a}{\v s} and Matas,Jir{\'\i}} } @conference {16118, title = {The challenge of information visualization evaluation}, booktitle = {Proceedings of the working conference on Advanced visual interfaces}, series = {AVI {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {109 - 116}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {As the field of information visualization matures, the tools and ideas described in our research publications are reaching users. The reports of usability studies and controlled experiments are helpful to understand the potential and limitations of our tools, but we need to consider other evaluation approaches that take into account the long exploratory nature of users tasks, the value of potential discoveries or the benefits of overall awareness. We need better metrics and benchmark repositories to compare tools, and we should also seek reports of successful adoption and demonstrated utility.}, keywords = {adoption, Evaluation, return on investment, technology transfer, usability, usefulness, User studies, Visualization}, isbn = {1-58113-867-9}, doi = {10.1145/989863.989880}, url = {http://doi.acm.org/10.1145/989863.989880}, author = {Plaisant, Catherine} } @article {14597, title = {CHARACTERIZATION OF Ath17, A QUANTITATIVE TRAIT LOCUS FOR ATHEROSCLEROSIS SUSCEPTIBILITY BETWEEN C57BL/6J AND 129S1/SvImJ; SINGLE-NUCLEOTIDE POLYMORPHISMS HAVE IMPORTANT IMPLICATIONS ON IDENTIFYING ATHEROSCLEROSIS MODIFIER GENES}, journal = {Cardiovascular Pathology}, volume = {13}, year = {2004}, month = {2004/06//May}, pages = {5 - 6}, abstract = {Although identifying quantitative trait loci (QTL) for atherosclerosis susceptibility in experimental murine models has helped us better understand the pathophysiology of atherosclerosis, identifying the genes underlying these QTL has been a slow and difficult process. We are currently in the process of identifying the gene that underlies Ath17, an atherosclerosis-susceptibility QTL we discovered in a (C57BL/6J{\texttimes}129S1/SvImJ)F2 intercross. Ath17 maps to a 6 cM interval region on chromosome 10 (D10Mit31, LOD score 6.6) and contains 46 annotated and 133 predicted genes. We searched the Celera Discovery System database for single nucleotide polymorphisms (SNPs) between C57BL/6J (B6) and 129S1/SvImJ in the Ath17 region and found defined blocks of high and low diversity and one polymorphic region of over 4 Mb surrounded by nonpolymorphic regions. Four of the genes, including the annotated Desrt, in the polymorphic region contained mis-sense mutations. The Desrt coding region contained four mis-sense mutations: B6 lysine to 129 glutamic acid at aa382, B6 asparagine to 129 serine at aa388, B6 threonine to 129 alanine at aa836, and B6 glycine to 129 serine at aa964. In addition, intron 5 of Desrt contained a SNP which co-localized with a putative transcription factor binding site. Desrt is a DNA binding protein, widely expressed in adult tissues, and likely plays a role in cell proliferation, differentiation, and development, making Dsert a strong candidate gene for Ath17. The murine SNP database dramatically reduced the percentage of the murine genome we had to search to find viable candidate genes for Ath17.}, isbn = {1054-8807}, doi = {10.1016/j.carpath.2004.03.010}, url = {http://www.sciencedirect.com/science/article/pii/S1054880704000377}, author = {Ishimori,Naoki and Walsh,Kenneth and Zheng,Xiangqun and Lu,Fu and Hannenhalli, Sridhar and Nusskern,Deborah and Mural,Richard and Paigen,Beverly} } @article {16249, title = {Comparative Genome Assembly}, journal = {Briefings in Bioinformatics}, volume = {5}, year = {2004}, month = {2004/09/01/}, pages = {237 - 248}, abstract = {One of the most complex and computationally intensive tasks of genome sequence analysis is genome assembly. Even today, few centres have the resources, in both software and hardware, to assemble a genome from the thousands or millions of individual sequences generated in a whole-genome shotgun sequencing project. With the rapid growth in the number of sequenced genomes has come an increase in the number of organisms for which two or more closely related species have been sequenced. This has created the possibility of building a comparative genome assembly algorithm, which can assemble a newly sequenced genome by mapping it onto a reference genome.We describe here a novel algorithm for comparative genome assembly that can accurately assemble a typical bacterial genome in less than four minutes on a standard desktop computer. The software is available as part of the open-source AMOS project.}, keywords = {Assembly, comparative genomics, open source, shotgun sequencing}, isbn = {1467-5463, 1477-4054}, doi = {10.1093/bib/5.3.237}, url = {http://bib.oxfordjournals.org/content/5/3/237}, author = {Pop, Mihai and Phillippy,Adam and Delcher,Arthur L. and Salzberg,Steven L.} } @article {19626, title = {Computational experience with exterior point algorithms for the transportation problem}, journal = {Applied Mathematics and Computation}, volume = {158}, year = {2004}, month = {2004/11/05/}, pages = {459 - 475}, abstract = {An experimental computational study to compare the classical primal simplex algorithm and the exterior point algorithms for the transportation problem (TP) is presented. Totally, four algorithms are compared on uniformly randomly generated test problems. The results are very encouraging for one of the competitive algorithms. In particular, a dual forest exterior point algorithm is on average up to 4.5 times faster than network simplex algorithm on TPs of size 300 {\texttimes} 300 and for all classes. This result leads into corresponding savings in computational time. From the computational performance we conclude that as the problem size increases, exterior point algorithm get relatively faster.}, keywords = {Algorithm evaluation, Experimental computational study, Exterior point simplex algorithms, Network simplex algorithm, Transportation problem}, isbn = {0096-3003}, url = {http://www.sciencedirect.com/science/article/pii/S0096300303010294}, author = {Charalampos Papamanthou and Paparrizos, Konstantinos and Samaras, Nikolaos} } @conference {15647, title = {A computational framework for incremental motion}, booktitle = {Proceedings of the twentieth annual symposium on Computational geometry}, series = {SCG {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {200 - 209}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We propose a generic computational framework for maintaining a discrete geometric structure defined by a collection of static and mobile objects. We assume that the mobile objects move incrementally, that is, in discrete time steps. We assume that the structure to be maintained is a function of the current locations of the mobile and static objects (independent of their prior motion). Unlike other models for kinetic computation, we place no restrictions on the motion nor on its predictability.In order to handle unrestricted incremental motion, our framework is based on the coordination of two computational entities. The first is the incremental motion algorithm. It is responsible for maintaining the structure and a set of certificates, or conditions, that prove the structure{\textquoteright}s correctness. The other entity, called the motion processor, is responsible for handling all the low-level aspects of motion, including computing and/or tracking the motion of the mobile objects, answering queries about their current positions and velocities, and validating that the object motions satisfy simple motion estimates, which are generated by the incremental motion algorithm. Computational efficiency is measured in terms of the number of interactions between these two entities.}, keywords = {competitive analysis, incremental motion, kinetic data structures}, isbn = {1-58113-885-7}, doi = {10.1145/997817.997849}, url = {http://doi.acm.org/10.1145/997817.997849}, author = {Mount, Dave and Netanyahu,Nathan S. and Piatko,Christine D. and Silverman,Ruth and Wu,Angela Y.} } @conference {12069, title = {Copilot - a coprocessor-based kernel runtime integrity monitor}, booktitle = {Proceedings of the 13th conference on USENIX Security Symposium - Volume 13}, series = {SSYM{\textquoteright}04}, year = {2004}, month = {2004///}, pages = {13 - 13}, publisher = {USENIX Association}, organization = {USENIX Association}, address = {San Diego, CA}, abstract = {Copilot is a coprocessor-based kernel integrity monitor for commodity systems. Copilot is designed to detect malicious modifications to a host{\textquoteright}s kernel and has correctly detected the presence of 12 real-world rootkits, each within 30 seconds of their installation with less than a 1\% penalty to the host{\textquoteright}s performance. Copilot requires no modifications to the protected host{\textquoteright}s software and can be expected to operate correctly even when the host kernel is thoroughly compromised - an advantage over traditional monitors designed to run on the host itself.}, keywords = {design, management, MONITORS, Security, security and protection}, url = {http://portal.acm.org/citation.cfm?id=1251375.1251388}, author = {Petroni,Jr. and Fraser,Timothy and Molina,Jesus and Arbaugh, William A.} } @conference {16311, title = {Covering arrays for efficient fault characterization in complex configuration spaces}, booktitle = {ACM SIGSOFT Software Engineering Notes}, volume = {29}, year = {2004}, month = {2004///}, pages = {45 - 54}, author = {Yilmaz,C. and Cohen,M. B and Porter, Adam} } @article {19031, title = {Designing secure sensor networks}, journal = {Wireless Communications, IEEE}, volume = {11}, year = {2004}, month = {2004}, pages = {38 - 43}, abstract = {Sensor networks are expected to play an essential role in the upcoming age of pervasive computing. Due to their constraints in computation, memory, and power resources, their susceptibility to physical capture, and use of wireless communications, security is a challenge in these networks. The scale of deployments of wireless sensor networks require careful decisions and trade-offs among various security measures. The authors discuss these issues and consider mechanisms to achieve secure communication in these networks.}, keywords = {Pervasive computing, secure sensor network design, telecommunication security, Ubiquitous Computing, wireless sensor network, Wireless sensor networks}, isbn = {1536-1284}, author = {Elaine Shi and Perrig, A.} } @book {16119, title = {Designing the user interface: Strategies for Effective Human-Computer Interaction}, year = {2004}, month = {2004///}, publisher = {Pearson Higher Education}, organization = {Pearson Higher Education}, author = {Plaisant, Catherine and Schneiderman,B.} } @book {16304, title = {Detecting and correcting a failure sequence in a computer system before a failure occurs}, year = {2004}, month = {2004/02//}, publisher = {Google Patents}, organization = {Google Patents}, author = {Gross,K. C and Votta,L. G. and Porter, Adam} } @conference {16302, title = {A distributed continuous quality assurance process to manage variability in performance-intensive software}, booktitle = {19th ACM OOPSLA Workshop on Component and Middleware Performance}, year = {2004}, month = {2004///}, author = {Krishna,A. and Yilmaz,C. and Memon, Atif M. and Porter, Adam and Schmidt,D. C and Gokhale,A. and Natarajan,B.} } @conference {15989, title = {Domain-independent reason-enhanced controller for task-oriented systems-DIRECTOR}, booktitle = {PROCEEDINGS OF THE NATIONAL CONFERENCE ON ARTIFICIAL INTELLIGENCE}, year = {2004}, month = {2004///}, pages = {1014 - 1015}, author = {Josyula,D. P and Anderson,M. L and Perlis, Don} } @article {19004, title = {The Drosophila U1-70K Protein Is Required for Viability, but Its Arginine-Rich Domain Is Dispensable}, journal = {GeneticsGenetics}, volume = {168}, year = {2004}, month = {2004/12/01/}, pages = {2059 - 2065}, abstract = {The conserved spliceosomal U1-70K protein is thought to play a key role in RNA splicing by linking the U1 snRNP particle to regulatory RNA-binding proteins. Although these protein interactions are mediated by repeating units rich in arginines and serines (RS domains) in vitro, tests of this domain{\textquoteright}s importance in intact multicellular organisms have not been carried out. Here we report a comprehensive genetic analysis of U1-70K function in Drosophila. Consistent with the idea that U1-70K is an essential splicing factor, we find that loss of U1-70K function results in lethality during embryogenesis. Surprisingly, and contrary to the current view of U1-70K function, animals carrying a mutant U1-70K protein lacking the arginine-rich domain, which includes two embedded sets of RS dipeptide repeats, have no discernible mutant phenotype. Through double-mutant studies, however, we show that the U1-70K RS domain deletion no longer supports viability when combined with a viable mutation in another U1 snRNP component. Together our studies demonstrate that while the protein interactions mediated by the U1-70K RS domain are not essential for viability, they nevertheless contribute to an essential U1 snRNP function.}, isbn = {0016-6731, 1943-2631}, doi = {10.1534/genetics.104.032532}, url = {http://www.genetics.org/content/168/4/2059}, author = {Salz,Helen K. and Mancebo,Ricardo S. Y. and Nagengast,Alexis A. and Speck,Olga and Psotka,Mitchell and Mount, Stephen M.} } @conference {17593, title = {End-to-end packet-scheduling in wireless ad-hoc networks}, booktitle = {Proceedings of the fifteenth annual ACM-SIAM symposium on Discrete algorithms}, series = {SODA {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {1021 - 1030}, publisher = {Society for Industrial and Applied Mathematics}, organization = {Society for Industrial and Applied Mathematics}, address = {Philadelphia, PA, USA}, abstract = {Packet-scheduling is a particular challenge in wireless networks due to interference from nearby transmissions. A distance-2 interference model serves as a useful abstraction here, and we study packet routing and scheduling under this model. The main focus of our work is the development of fully-distributed (decentralized) protocols. We present polylogarithmic/constant factor approximation algorithms for various families of disk graphs (which capture the geometric nature of wireless-signal propagation), as well as near-optimal approximation algorithms for general graphs. The packet-scheduling work by L eighton, Maggs and Rao (Combinatorica, 1994) and a basic distributed coloring procedure, originally due to Luby (J. Computer and System Sciences, 1993), underlie many of our algorithms. Experimental work of Finocchi, Panconesi, and Silvestri (SODA 2002) showed that a natural modification of Luby{\textquoteright}s algorithm leads to improved performance, and a rigorous explanation of this was left as an open question; we prove that the modified algorithm is provably better in the worst-case. Finally, using simulations, we study the impact of the routing strategy and the choice of parameters on the performance of our distributed algorithm for unit disk graphs.}, isbn = {0-89871-558-X}, url = {http://dl.acm.org/citation.cfm?id=982792.982945}, author = {Kumar,V. S. Anil and Marathe,Madhav V. and Parthasarathy,Srinivasan and Srinivasan, Aravind} } @conference {16462, title = {Exploiting multiple paths to express scientific queries}, booktitle = {16th International Conference on Scientific and Statistical Database Management, 2004. Proceedings}, year = {2004}, month = {2004/06/21/23}, pages = {357 - 360}, publisher = {IEEE}, organization = {IEEE}, abstract = {The purpose of this demonstration is to present the main features of the BioNavigation system. Scientific data collection needed in various stages of scientific discovery is typically performed manually. For each scientific object of interest (e.g., a gene, a sequence), scientists query a succession of Web resources following links between retrieved entries. Each of the steps provides part of the intended characterization of the scientific object. This process is sometimes partially supported by hard-coded scripts or complex queries that will be evaluated by a mediation-based data integration system or against a data warehouse. These approaches fail in guiding the scientists during the collection process. In contrast, the BioNavigation approach presented in the paper provides the scientists with information on the available alternative resources, their provenance, and the costs of data collection. The BioNavigation system enhances a mediation-based integration system and provides scientists with support for the following: to ask queries at a high conceptual level; to visualize the multiple alternative resources that may be exploited to execute their data collection queries; to choose the final execution path to evaluate their queries.}, keywords = {access protocols, biology computing, BioNavigation system, complex queries, Costs, Data analysis, data handling, Data visualization, data warehouse, Data warehouses, Databases, diseases, distributed databases, hard-coded scripts, information resources, Information retrieval, mediation-based data integration system, multiple paths, query evaluation, Query processing, scientific data collection, scientific discovery, scientific information, scientific information systems, scientific object of interest, scientific queries, sequences, Web resources}, isbn = {0-7695-2146-0}, doi = {10.1109/SSDM.2004.1311231}, author = {Lacroix,Z. and Moths,T. and Parekh,K. and Raschid, Louiqa and Vidal,M. -E} } @conference {17155, title = {Extending the utility of treemaps with flexible hierarchy}, booktitle = {Eighth International Conference on Information Visualisation, 2004. IV 2004. Proceedings}, year = {2004}, month = {2004/07/14/16}, pages = {335 - 344}, publisher = {IEEE}, organization = {IEEE}, abstract = {Treemaps are a visualization technique for presenting hierarchical information on two-dimensional displays. Prior implementations limit the visualization to pre-defined static hierarchies. Flexible hierarchy, a new capability of Treemap 4.0, enables users to define various hierarchies through dynamically selecting a series of data attributes so that they can discover patterns, clusters and outliers. This work describes the design and implementation issues of flexible hierarchy. It then reports on a usability study, which led to enhancements to the interface.}, keywords = {2D displays, Computer displays, Computer science, data visualisation, Data visualization, Educational institutions, flexible hierarchy, graphical user interface, Graphical user interfaces, hierarchical information, Nominations and elections, Switches, Tree data structures, Tree graphs, Treemap 4.0, Two dimensional displays, usability, visualization technique}, isbn = {0-7695-2177-0}, doi = {10.1109/IV.2004.1320166}, author = {Chintalapani,G. and Plaisant, Catherine and Shneiderman, Ben} } @article {13771, title = {Extrinsic Evaluation of Automatic Metrics for Summarization}, year = {2004}, month = {2004/07/20/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {This paper describes extrinsic-task evaluation of summarization. We show that it is possible to save time using summaries for relevance assessment without adversely impacting the degree of accuracy that would be possible with full documents. In addition, we demonstrate that the extrinsic task we have selected exhibits a high degree of interannotator agreement, i.e., consistent relevance decisions across subjects. We also conducted a composite experiment that better reflects the actual document selection process and found that using a surrogate improves the processing speed over reading the entire document. Finally, we have found a small yet statistically significant correlation between some of the intrinsic measures and a user{\textquoteright}s performance in an extrinsic task. The overall conclusion we can draw at this point is that ROUGE-1 does correlate with precision and to a somewhat lesser degree with accuracy, but that it remains to be investigated how stable these correlations are and how differences in ROUGE-1 translate into significant differences in human performance in an extrinsic task.}, keywords = {*ABSTRACTS, *ACCURACY, *DATA SUMMARIZATION, *DOCUMENT SUMMARIES, *DOCUMENTS, *READING MACHINES, *SOFTWARE METRICS, *STATISTICAL ANALYSIS, ABSTRACT SELECTION, ANNOTATIONS, Automation, COMPUTER PROGRAMMING AND SOFTWARE, COMPUTER SYSTEMS, CORRELATION TECHNIQUES, DATA PROCESSING, DOCUMENT SUMMARIES, EXTRINSIC TASKS, Information retrieval, INFORMATION SCIENCE, PERFORMANCE(HUMAN), PRECISION, RELEVANCY, ROUGE-1 COMPUTER PROGRAM, STATISTICS AND PROBABILITY, TEST AND EVALUATION}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA448065}, author = {Dorr, Bonnie J and Monz,Christof and Oard, Douglas and President,Stacy and Zajic, David and Schwartz,Richard} } @article {16344, title = {Finding bugs is easy}, journal = {ACM SIGPLAN NoticesSIGPLAN Not.}, volume = {39}, year = {2004}, month = {2004/12//}, pages = {92 - 92}, isbn = {03621340}, doi = {10.1145/1052883.1052895}, url = {http://dl.acm.org/citation.cfm?id=1052895}, author = {Hovemeyer, David and Pugh, William} } @conference {16361, title = {Finding concurrency bugs in java}, booktitle = {Proceedings of the PODC Workshop on Concurrency and Synchronization in Java Programs, St. John{\textquoteright}s, Newfoundland, Canada}, year = {2004}, month = {2004///}, author = {Hovemeyer,D. and Pugh, William} } @article {18843, title = {Geometric algorithms for automated design of multi-piece permanent molds}, journal = {Computer-Aided Design}, volume = {36}, year = {2004}, month = {2004/03//}, pages = {241 - 260}, abstract = {Multi-piece molds, which consist of more than two mold pieces, are capable of producing very complex parts{\textemdash}parts that cannot be produced by the traditional molds. The tooling cost is also low for multi-piece molds, which makes it a candidate for pre-production prototyping and bridge tooling. However, designing multi-piece molds is a time-consuming task. This article describes geometric algorithms for automated design of multi-piece molds. A multi-piece mold design algorithm has been developed to automate several important mold-design steps: finding parting directions, locating parting lines, creating parting surfaces, and constructing mold pieces. This algorithm constructs mold pieces based on global accessibility analysis results of the part and therefore guarantees the disassembly of the mold pieces. A software system has been developed, which has been successfully tested on several complex industrial parts.}, keywords = {mold design, Multi-piece molds, spatial partitioning}, isbn = {0010-4485}, doi = {10.1016/S0010-4485(03)00107-6}, url = {http://www.sciencedirect.com/science/article/pii/S0010448503001076}, author = {Priyadarshi,Alok K. and Gupta, Satyandra K.} } @article {13783, title = {Headline evaluation experiment results}, year = {2004}, month = {2004///}, institution = {University of Maryland, College Park, MD. UMIACS-TR-2004-18}, abstract = {This technical report describes an experiment intending to show that different summarizationtechniques have an effect on human performance of an extrinsic task. The task is document selection in the context of information retrieval. We conclude that the task was too difficult and ambiguous for subjects to perform at the level required in order to make statistically significant claims about the relationship between summarization techniques and human performance. An alternate interpretation of the experimental results is described and plans for future experiments are discussed. }, author = {Zajic, David and Dorr, Bonnie J and Schwartz,R. and President,S.} } @article {16239, title = {Hierarchical Scaffolding With Bambus}, journal = {Genome Research}, volume = {14}, year = {2004}, month = {2004/01/01/}, pages = {149 - 159}, abstract = {The output of a genome assembler generally comprises a collection of contiguous DNA sequences (contigs) whose relative placement along the genome is not defined. A procedure called scaffolding is commonly used to order and orient these contigs using paired read information. This ordering of contigs is an essential step when finishing and analyzing the data from a whole-genome shotgun project. Most recent assemblers include a scaffolding module; however, users have little control over the scaffolding algorithm or the information produced. We thus developed a general-purpose scaffolder, called Bambus, which affords users significant flexibility in controlling the scaffolding parameters. Bambus was used recently to scaffold the low-coverage draft dog genome data. Most significantly, Bambus enables the use of linking data other than that inferred from mate-pair information. For example, the sequence of a completed genome can be used to guide the scaffolding of a related organism. We present several applications of Bambus: support for finishing, comparative genomics, analysis of the haplotype structure of genomes, and scaffolding of a mammalian genome at low coverage. Bambus is available as an open-source package from our Web site.}, doi = {10.1101/gr.1536204}, url = {http://genome.cshlp.org/content/14/1/149.abstract}, author = {Pop, Mihai and Kosack,Daniel S. and Salzberg,Steven L.} } @article {18905, title = {HTN planning for Web Service composition using SHOP2}, journal = {Web Semantics: Science, Services and Agents on the World Wide Web}, volume = {1}, year = {2004}, month = {2004/10//}, pages = {377 - 396}, abstract = {Automated composition of Web Services can be achieved by using AI planning techniques. Hierarchical Task Network (HTN) planning is especially well-suited for this task. In this paper, we describe how HTN planning system SHOP2 can be used with OWL-S Web Service descriptions. We provide a sound and complete algorithm to translate OWL-S service descriptions to a SHOP2 domain. We prove the correctness of the algorithm by showing the correspondence to the situation calculus semantics of OWL-S. We implemented a system that plans over sets of OWL-S descriptions using SHOP2 and then executes the resulting plans over the Web. The system is also capable of executing information-providing Web Services during the planning process. We discuss the challenges and difficulties of using planning in the information-rich and human-oriented context of Web Services.}, keywords = {HTN planning, OWL-S, SHOP2, Web Service composition, Web services}, isbn = {1570-8268}, doi = {10.1016/j.websem.2004.06.005}, url = {http://www.sciencedirect.com/science/article/pii/S1570826804000113}, author = {Sirin,Evren and Parsia,Bijan and Wu,Dan and Hendler,James and Nau, Dana S.} } @inbook {13787, title = {iCLEF 2003 at Maryland: Translation Selection and Document Selection}, booktitle = {Comparative Evaluation of Multilingual Information Access SystemsComparative Evaluation of Multilingual Information Access Systems}, series = {Lecture Notes in Computer Science}, volume = {3237}, year = {2004}, month = {2004///}, pages = {231 - 265}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Maryland performed two sets of experiments for the 2003 Cross-Language Evaluation Forum{\textquoteright}s interactive track, one focused on interactive selection of appropriate translations for query terms, the second focused on interactive selection of relevant documents. Translation selection was supported using possible synonyms discovered through back translation and two techniques for generating KeyWord In Context (KWIC) examples of usage. The results indicate that searchers typically achieved a similar search effectiveness using fewer query iterations when interactive translation selection was available. For document selection, a complete extract of the first 40 words of each news story was compared to a compressed extract generated using an automated parse-and-trim approach that approximates one way in which people can produce headlines. The results indicate that compressed {\textquotedblleft}headlines{\textquotedblright} result in faster assessment, but with a 20\% relative reduction in the F α = 0.8 search effectiveness measure.}, isbn = {978-3-540-24017-4}, url = {http://dx.doi.org/10.1007/978-3-540-30222-3_42}, author = {Dorr, Bonnie J and He,Daqing and Luo,Jun and Oard, Douglas and Schwartz,Richard and Wang,Jianqiang and Zajic, David}, editor = {Peters,Carol and Gonzalo,Julio and Braschler,Martin and Kluck,Michael} } @article {16115, title = {Immediate usability: a case study of public access design for a community photo library}, journal = {Interacting with Computers}, volume = {16}, year = {2004}, month = {2004/12//}, pages = {1171 - 1193}, abstract = {This paper describes a novel instantiation of a digital photo library in a public access system. It demonstrates how designers can utilize characteristics of a target user community (social constraints, trust, and a lack of anonymity) to provide capabilities, such as unrestricted annotation and uploading of photos, which would be impractical in other types of public access systems. It also presents a compact set of design principles and guidelines for ensuring the immediate usability of public access information systems. These principles and guidelines were derived from our experience developing PhotoFinder Kiosk, a community photo library. Attendees of a major HCI conference (CHI 2001 Conference on Human Factors in Computing Systems) successfully used the tool to browse and annotate collections of photographs spanning 20 years of HCI-related conferences, producing a richly annotated photo history of the field of human{\textendash}computer interaction. Observations and usage log data were used to evaluate the tool and develop the guidelines. They provide specific guidance for practitioners, as well as a useful framework for additional research in public access interfaces.}, keywords = {Casual use, Community photo library, direct annotation, direct manipulation, Drag-and-drop, Group annotation, Immediate usability, Photo collection, Public access system, Walk-up-and-use, Zero-trial learning}, isbn = {0953-5438}, doi = {10.1016/j.intcom.2004.07.005}, url = {http://www.sciencedirect.com/science/article/pii/S0953543804000840}, author = {Kules,Bill and Kang,Hyunmo and Plaisant, Catherine and Rose,Anne and Shneiderman, Ben} } @conference {16308, title = {Improving the Quality of Performance-intensive Software via Model-integrated Distributed Continuous Quality Assurance}, booktitle = {Proceedings of the 8th International Conference on Software Reuse}, year = {2004}, month = {2004///}, author = {Krishna,A. S and Schmidt,D. C and Porter, Adam and Memon, Atif M. and Sevilla-Ruiz,D.} } @inbook {18909, title = {Information Gathering During Planning for Web Service Composition}, booktitle = {The Semantic Web {\textendash} ISWC 2004}, series = {Lecture Notes in Computer Science}, volume = {3298}, year = {2004}, month = {2004///}, pages = {335 - 349}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Hierarchical Task-Network (HTN) based planning techniques have been applied to the problem of composing Web Services, especially when described using the OWL - S service ontologies. Many of the existing Web Services are either exclusively information providing or crucially depend on information-providing services. Thus, many interesting service compositions involve collecting information either during execution or during the composition process itself. In this paper, we focus on the latter issue. In particular, we present ENQUIRER , an HTN-planning algorithm designed for planning domains in which the information about the initial state of the world may not be complete, but it is discoverable through plan-time information-gathering queries. We have shown that ENQUIRER is sound and complete, and derived several mathematical relationships among the amount of available information, the likelihood of the planner finding a plan, and the quality of the plan found. We have performed experimental tests that confirmed our theoretical results and that demonstrated how ENQUIRER can be used in Web Service composition.}, keywords = {Computer science}, isbn = {978-3-540-23798-3}, url = {http://www.springerlink.com/content/v829m5080fc0bpng/abstract/}, author = {Kuter,Ugur and Sirin,Evren and Nau, Dana S. and Parsia,Bijan and Hendler,James}, editor = {McIlraith,Sheila and Plexousakis,Dimitris and van Harmelen,Frank} } @conference {16121, title = {Les le{\c c}ons tir{\'e}es des deux comp{\'e}titions de visualisation d{\textquoteright}information}, booktitle = {Proceedings of the 16th conference on Association Francophone d{\textquoteright}Interaction Homme-Machine}, series = {IHM 2004}, year = {2004}, month = {2004///}, pages = {7 - 12}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Information visualization needs benchmarks to carry on. A benchmark is aimed at comparing information visualization techniques or systems. A benchmark is made of a dataset, a list of tasks mostly based on finding facts about the dataset, and a list of interesting or important findings about the datasets (the nuggets to find). For the second year, we are organizing the InfoVis Contest aimed at collecting results for benchmarks. We describe here the main lessons we learned.}, keywords = {benchmark, contest, Evaluation, Information Visualization}, isbn = {1-58113-926-8}, doi = {10.1145/1148613.1148616}, url = {http://doi.acm.org/10.1145/1148613.1148616}, author = {Fekete,Jean-Daniel and Plaisant, Catherine} } @inbook {19651, title = {An MCMC-Based Particle Filter for Tracking Multiple Interacting Targets}, booktitle = {Computer Vision - ECCV 2004}, series = {Lecture Notes in Computer Science}, year = {2004}, month = {2004/01/01/}, pages = {279 - 290}, publisher = {Springer Berlin Heidelberg}, organization = {Springer Berlin Heidelberg}, abstract = {We describe a Markov chain Monte Carlo based particle filter that effectively deals with interacting targets, i.e., targets that are influenced by the proximity and/or behavior of other targets. Such interactions cause problems for traditional approaches to the data association problem. In response, we developed a joint tracker that includes a more sophisticated motion model to maintain the identity of targets throughout an interaction, drastically reducing tracker failures. The paper presents two main contributions: (1) we show how a Markov random field (MRF) motion prior, built on the fly at each time step, can substantially improve tracking when targets interact, and (2) we show how this can be done efficiently using Markov chain Monte Carlo (MCMC) sampling. We prove that incorporating an MRF to model interactions is equivalent to adding an additional interaction factor to the importance weights in a joint particle filter. Since a joint particle filter suffers from exponential complexity in the number of tracked targets, we replace the traditional importance sampling step in the particle filter with an MCMC sampling step. The resulting filter deals efficiently and effectively with complicated interactions when targets approach each other. We present both qualitative and quantitative results to substantiate the claims made in the paper, including a large scale experiment on a video-sequence of over 10,000 frames in length.}, keywords = {Artificial Intelligence (incl. Robotics), Computer Graphics, Image Processing and Computer Vision, pattern recognition}, isbn = {978-3-540-21981-1, 978-3-540-24673-2}, url = {http://link.springer.com/chapter/10.1007/978-3-540-24673-2_23}, author = {Zia Khan and Balch, Tucker and Dellaert, Frank}, editor = {Pajdla, Tom{\'a}s and Matas, Ji{\v r}{\'\i}} } @article {16334, title = {Measuring HPC productivity}, journal = {International Journal of High Performance Computing Applications}, volume = {18}, year = {2004}, month = {2004///}, pages = {459 - 473}, author = {Faulk,S. and Gustafson,J. and Johnson,P. and Porter, Adam and Tichy,W. and Votta,L.} } @article {15446, title = {Model-Based Approaches-Validating Quality of Service for Reusable Software Via Model-Integrated Distributed Continuous Quality Assurance}, journal = {Lecture Notes in Computer Science}, volume = {3107}, year = {2004}, month = {2004///}, pages = {286 - 295}, author = {Krishna,A. S and Schmidt,D. C and Memon, Atif M. and Porter, Adam and Sevilla,D.} } @article {15454, title = {A Model-based Distributed Continuous Quality Assurance Process to Enhance the Quality of Service of Evolving Performance-intensive Software Systems}, journal = {Proceedings of the 2nd ICSE Workshop on Remote Analysis and Measurement of Software Systems (RAMSS), Edinburgh, Scotland, UK}, year = {2004}, month = {2004///}, abstract = {Performance-intensive software, such as that found in high-perfo-rmance computing systems and distributed real-time and embedded systems, increasingly executes on a multitude of platforms and user contexts. To ensure that performance-intensive software meets its quality of service (QoS) requirements, it must often be fine-tuned to specific platforms/contexts by adjusting many (in some cases hun- dreds of) configuration options. Developers who write these types of systems must therefore try to ensure that their additions and mod- ifications work across this large configuration space. In practice, however, time and resource constraints often force developers to assess performance on very few configurations and to extrapolate from these to the entire configuration space, which allows many performance bottlenecks and sources of QoS degradation to escape detection until systems are fielded. To improve the assessment of performance across large config- uration spaces, we present a model-based approach to develop- ing and deploying a new distributed continuous quality assurance (DCQA) process. Our approach builds upon and extends the Skoll environment, which is developing and validating novel software QA processes and tools that leverage the extensive computing resources of worldwide user communities in a distributed, continuous man- ner to significantly and rapidly improve software quality. This pa- per describes how our new DCQA performance assessment process enables developers to run formally-designed screening experiments that isolate the most significant options. After that, exhaustive ex- periments (on the now much smaller configuration space) are con- ducted. We implemented this process using model-based software tools and executed it in the Skoll environment to demonstrate its ef- fectiveness via two experiments on widely used QoS-enabled mid- dleware. Our results show that model-based DCQA processes im- proves developer insight into the effect of system changes on per- formance at an acceptable cost. }, author = {Yilmaz,C. and Krishna,A. S and Memon, Atif M. and Porter, Adam and Schmidt,D. C and Gokhale,A. and Natarajan,B.} } @article {16369, title = {Mpjava: High-performance message passing in java using java. nio}, journal = {Languages and Compilers for Parallel Computing}, year = {2004}, month = {2004///}, pages = {323 - 339}, author = {Pugh, William and Spacco,J.} } @article {13226, title = {Multi-cue exemplar-based nonparametric model for gesture recognition}, journal = {Indian Conference on Computer Vision, Graphics and Image Processing}, year = {2004}, month = {2004///}, pages = {16 - 18}, abstract = {This paper presents an approach for a multi-cue, view-based recognition of gestures. We describe an exemplar- based technique that combines two different forms of exem- plars - shape exemplars and motion exemplars - in a uni- fied probabilistic framework. Each gesture is represented as a sequence of learned body poses as well as a sequence of learned motion parameters. The shape exemplars are comprised of pose contours, and the motion exemplars are represented as affine motion parameters extracted using a robust estimation approach. The probabilistic framework learns by employing a nonparametric estimation technique to model the exemplar distributions. It imposes temporal constraints between different exemplars through a learned Hidden Markov Model (HMM) for each gesture. We use the proposed multi-cue approach to recognize a set of four- teen gestures and contrast it against a shape only, single- cue based system. }, author = {Shet,V. D and Prasad,V. S.N and Elgammal,A. and Yacoob,Yaser and Davis, Larry S.} } @article {13308, title = {Multi-resolution modeling, visualization and streaming of volume meshes}, journal = {Eurographics 2004, Tutorials 2: Multi-resolution Modeling, Visualization and Streaming of Volume Meshes}, year = {2004}, month = {2004///}, author = {Cignoni,P. and De Floriani, Leila and Lindstrom,P. and Pascucci,V. and Rossignac,J. and Silva,C.} } @article {13267, title = {A multi-resolution topological representation for non-manifold meshes}, journal = {Computer-Aided Design}, volume = {36}, year = {2004}, month = {2004/02//}, pages = {141 - 159}, abstract = {We address the problem of representing and processing 3D objects, described through simplicial meshes, which consist of parts of mixed dimensions, and with a non-manifold topology, at different levels of detail. First, we describe a multi-resolution model, that we call a non-manifold multi-tessellation (NMT), and we consider the selective refinement query, which is at the heart of several analysis operations on multi-resolution meshes. Next, we focus on a specific instance of a NMT, generated by simplifying simplicial meshes based on vertex-pair contraction, and we describe a compact data structure for encoding such a model. We also propose a new data structure for two-dimensional simplicial meshes, capable of representing both connectivity and adjacency information with a small memory overhead, which is used to describe the mesh extracted from an NMT through selective refinement. Finally, we present algorithms to efficiently perform updates on such a data structure.}, keywords = {Data structures, Multi-resolution, Non-manifold modeling}, isbn = {0010-4485}, doi = {10.1016/S0010-4485(03)00058-7}, url = {http://www.sciencedirect.com/science/article/pii/S0010448503000587}, author = {De Floriani, Leila and Magillo,Paola and Puppo,Enrico and Sobrero,Davide} } @article {14990, title = {Multiscale advanced raster map analysis system: definition, design and development}, journal = {Environmental and Ecological Statistics}, volume = {11}, year = {2004}, month = {2004///}, pages = {113 - 138}, abstract = {This paper brings together a multidisciplinary initiative to develop advanced statistical and computational techniques for analyzing, assessing, and extracting information from raster maps. This information will provide a rigorous foundation to address a wide range of applications including disease mapping, emerging infectious diseases, landscape ecological assessment, land cover trends and change detection, watershed assessment, and map accuracy assessment. It will develop an advanced map analysis system that integrates these techniques with an advanced visualization toolbox, and use the system to conduct large case studies using rich sets of raster data, primarily from remotely sensed imagery. As a result, it will be possible to study and evaluate raster maps of societal, ecological, and environmental variables to facilitate quantitative characterization and comparative analysis of geospatial trends, patterns, and phenomena. In addition to environmental and ecological studies, these techniques and tools can be used for policy decisions at national, state, and local levels, crisis management, and protection of infrastructure. Geospatial data form the foundation of an information-based society. Remote sensing has been a vastly under-utilized resource involving a multi-million dollar investment at the national levels. Even when utilized, the credibility has been at stake, largely because of lack of tools that can assess, visualize, and communicate accuracy and reliability in timely manner and at desired confidence levels. Consider an imminent 21st century scenario: What message does a multi-categorical map have about the large landscape it represents? And at what scale, and at what level of detail? Does the spatial pattern of the map reveal any societal, ecological, environmental condition of the landscape? And therefore can it be an indicator of change? How do you automate the assessment of the spatial structure and behavior of change to discover critical areas, hot spots, and their corridors? Is the map accurate? How accurate is it? How do you assess the accuracy of the map? How do we evaluate a temporal change map for change detection? What are the implications of the kind and amount of change and accuracy on what matters, whether climate change, carbon emission, water resources, urban sprawl, biodiversity, indicator species, human health, or early warning? And with what confidence? The proposed research initiative is expected to find answers to these questions and a few more that involve multi-categorical raster maps based on remote sensing and other geospatial data. It includes the development of techniques for map modeling and analysis using Markov Random Fields, geospatial statistics, accuracy assessment and change detection, upper echelons of surfaces, advanced computational techniques for geospatial data mining, and advanced visualization techniques.}, doi = {10.1023/B:EEST.0000027205.77490.8c}, author = {Patil,G. P. and Balbus,J. and Biging,G. and JaJa, Joseph F. and Myers,W. L. and Taillie,C.} } @article {12149, title = {New year{\textquoteright}s resolutions for software quality}, journal = {IEEE Software}, volume = {21}, year = {2004}, month = {2004///}, pages = {12 - 13}, abstract = {In the spirit of making resolutions for betterment in the New Year, ten distinguished individuals in the software quality field offer their recommendations on how organizations can improve software quality In the spirit of making resolutions for betterment in the New Year, ten distinguished individuals in the software quality field offer their recommendations on how organizations can improve software quality.}, author = {Basili, Victor R. and Boehm,B. and Davis,A. and Humphrey,W. S and Leveson,N. and Mead,N. R and Musa,J. D and Parnas,D. L and Pfleeger,S. L and Weyuker,E.} } @conference {17805, title = {Oasys: An opinion analysis system}, booktitle = {AAAI Spring symposium on Computational Approaches to Analyzing Weblogs}, year = {2004}, month = {2004///}, abstract = {There are numerous applications in which we would like toassess what opinions are being expressed in text documents. For example, Martha Stewart{\textquoteright}s company may have wished to assess the degree of harshness of news articles about her in the recent past. Likewise, a World Bank official may wish to as- sess the degree of criticism of a proposed dam in Bangladesh. The ability to gauge opinion on a given topic is therefore of critical interest. In this paper, we develop a suite of algo- rithms which take as input, a set D of documents as well as a topic t, and gauge the degree of opinion expressed about topic t in the set D of documents. Our algorithms can return both a number (larger the number, more positive the opinion) as well as a qualitative opinion (e.g. harsh, complimentary). We as- sess the accuracy of these algorithms via human experiments and show that the best of these algorithms can accurately re- flect human opinions. We have also conducted performance experiments showing that our algorithms are computationally fast. }, author = {Cesarano,C. and Dorr, Bonnie J and Picariello, A. and Reforgiato,D. and Sagoff,A. and V.S. Subrahmanian} } @article {18709, title = {Polyubiquitin chains: polymeric protein signals}, journal = {Current Opinion in Chemical Biology}, volume = {8}, year = {2004}, month = {2004/12//}, pages = {610 - 616}, abstract = {The 76-residue protein ubiquitin exists within eukaryotic cells both as a monomer and in the form of isopeptide-linked polymers called polyubiquitin chains. In two well-described cases, structurally distinct polyubiquitin chains represent functionally distinct intracellular signals. Recently, additional polymeric structures have been detected in vivo and in vitro, and several large families of proteins with polyubiquitin chain-binding activity have been discovered. Although the molecular mechanisms governing specificity in chain synthesis and recognition are still incompletely understood, the scope of signaling by polyubiquitin chains is likely to be broader than originally envisioned.}, isbn = {1367-5931}, doi = {10.1016/j.cbpa.2004.09.009}, url = {http://www.sciencedirect.com/science/article/pii/S1367593104001413}, author = {Pickart,Cecile M. and Fushman, David} } @article {15498, title = {Preserving distributed systems critical properties: a model-driven approach}, journal = {Software, IEEE}, volume = {21}, year = {2004}, month = {2004///}, pages = {32 - 40}, abstract = {The need for creating predictability in distributed systems is most often specified in terms of quality-of-service (QoS) requirements, which help define the acceptable levels of dependability with which capabilities such as processing capacity, data throughput, or service availability reach users. For longer-term properties such as scalability, maintainability, adaptability, and system security, we can similarly use persistent software attributes (PSAs) to specify how and to what degree such properties must remain intact as a network expands and evolves over time. The Skoll distributed continuous software quality assurance process helps to identify viable system and software configurations for meeting stringent QOS and PSA requirements by coordinating the use of distributed computing resources. The authors tested their process using the large, rapidly evolving ACE+TAO middleware suite.}, keywords = {configuration management, formal verification, Middleware, middleware suite, model-driven approach, persistent software attributes, QoS requirements, Quality assurance, quality of service, quality-of-service, Skoll distributed computing resources, software configuration, Software maintenance, Software quality, software quality assurance process, system dependability}, isbn = {0740-7459}, doi = {10.1109/MS.2004.50}, author = {Yilmaz,C. and Memon, Atif M. and Porter, Adam and Krishna,A. S and Schmidt,D. C and Gokhale,A. and Natarajan,B.} } @conference {17802, title = {The priority curve algorithm for video summarization}, booktitle = {Proceedings of the 2nd ACM international workshop on Multimedia databases}, series = {MMDB {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {28 - 35}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {In this paper, we introduce the concept of a priority curve associated with a video. We then provide an algorithm that can use the priority curve to create a summary (of a desired length) of any video. The summary thus created exhibits nice continuity properties and also avoids repetition. We have implemented the priority curve algorithm (PCA) and compared it with other summarization algorithms in the literature. We show that PCA is faster than existing algorithms and also produces better quality summaries. The quality of summaries was evaluated by a group of 200 students in Naples, Italy, who watched soccer videos. We also briefly describe a soccer video summarization system we have built on using the PCA architecture and various (classical) image processing algorithms.}, keywords = {probabilistic, Summarization, system, video}, isbn = {1-58113-975-6}, doi = {10.1145/1032604.1032611}, url = {http://doi.acm.org/10.1145/1032604.1032611}, author = {Fayzullin,M. and V.S. Subrahmanian and Albanese, M. and Picariello, A.} } @article {12080, title = {Proactive key distribution using neighbor graphs}, journal = {IEEE Wireless Communications}, volume = {11}, year = {2004}, month = {2004/02//}, pages = {26 - 36}, abstract = {User mobility in wireless data networks is increasing because of technological advances, and the desire for voice and multimedia applications. These applications, however, require that handoffs between base stations (or access points) be fast to maintain the quality of the connections. In this article we introduce a novel data structure, the neighbor graph, that dynamically captures the mobility topology of a wireless network. We show how neighbor graphs can be utilized to obtain a 99 percent reduction in the authentication time of an IEEE 802.11 handoff (full EAP-TLS) by proactively distributing necessary key material one hop ahead of the mobile user. We also present a reactive method for fast authentication that requires only firmware changes to access points and hence can easily be deployed on existing wireless networks.}, keywords = {access points, Authentication, authentication time, Base stations, Communication system security, Delay, graph theory, GSM, IEEE 802.11 handoff, Land mobile radio cellular systems, Message authentication, mobile radio, Multiaccess communication, neighbor graph, Network topology, Roaming, telecommunication security, Telephone sets, user mobility, Wi-Fi networks, wireless data networks, Wireless LAN, Wireless networks}, isbn = {1536-1284}, doi = {10.1109/MWC.2004.1269714}, author = {Mishra,A. and Min Ho Shin and Petroni,N. L. and Clancy,T. C and Arbaugh, William A.} } @conference {16120, title = {Project highlight: toward the statistical knowledge network}, booktitle = {Proceedings of the 2004 annual national conference on Digital government research}, series = {dg.o {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {125:1{\textendash}125:2 - 125:1{\textendash}125:2}, publisher = {Digital Government Society of North America}, organization = {Digital Government Society of North America}, abstract = {This project aims to help people find and understand government statistical information. To achieve this goal, we envision a statistical knowledge network that brings stakeholders from government at all levels together with citizens who provide or seek statistical information. The linchpin of this network is a series of human-computer interfaces that facilitate information seeking, understanding, and use. In turn, these interfaces depend on high-quality metadata and intra-agency cooperation. In this briefing, we summarize our accomplishments in the second year of the project.}, url = {http://dl.acm.org/citation.cfm?id=1124191.1124316}, author = {Marchionini,Gary and Haas,Stephanie and Shneiderman, Ben and Plaisant, Catherine and Hert,Carol A.} } @article {16122, title = {Project portfolio earned value management using treemaps}, journal = {Proceedings of the Project Management Institute Research Conference}, year = {2004}, month = {2004///}, abstract = {Project portfolio management deals with organizing and managing a set of projects in anorganization. Each organization has its own way of managing the portfolio that meets its business goals. One of the main challenges is to track project performance across the entire portfolio in a timely and effective manner. It allows managers to diagnose performance trends and identify projects in need of attention, giving them the opportunity to take management action in a timely fashion. In this paper, we investigate the use of Earned Value Management (EVM) for tracking project performance across the portfolio, and explore the benefits of an interactive visualization technique called Treemaps to display project performance metrics for the entire portfolio on a single screen. }, author = {Cable,J. H and Ordonez,J. F and Chintalapani,G. and Plaisant, Catherine} } @article {12075, title = {Security issues in IEEE 802.11 wireless local area networks: a survey}, journal = {Wireless Communications and Mobile Computing}, volume = {4}, year = {2004}, month = {2004///}, pages = {821 - 833}, author = {Mishra,A. and Petroni Jr,N. L and Arbaugh, William A. and Fraser,T.} } @article {13325, title = {Selective refinement queries for volume visualization of unstructured tetrahedral meshes}, journal = {Visualization and Computer Graphics, IEEE Transactions on}, volume = {10}, year = {2004}, month = {2004///}, pages = {29 - 45}, abstract = {We address the problem of the efficient visualization of large irregular volume data sets by exploiting a multiresolution model based on tetrahedral meshes. Multiresolution models, also called Level-Of-Detail (LOD) models, allow encoding the whole data set at a virtually continuous range of different resolutions. We have identified a set of queries for extracting meshes at variable resolution from a multiresolution model, based on field values, domain location, or opacity of the transfer function. Such queries allow trading off between resolution and speed in visualization. We define a new compact data structure for encoding a multiresolution tetrahedral mesh built through edge collapses to support selective refinement efficiently and show that such a structure has a storage cost from 3 to 5.5 times lower than standard data structures used for tetrahedral meshes. The data structures and variable resolution queries have been implemented together with state-of-the art visualization techniques in a system for the interactive visualization of three-dimensional scalar fields defined on tetrahedral meshes. Experimental results show that selective refinement queries can support interactive visualization of large data sets.}, keywords = {Automated;Signal Processing, Computer-Assisted;Imaging, Computer-Assisted;User-Computer Interface;, data structure;geometric modeling;interactive visualization;large data sets;multiresolution model;selective refinement queries;unstructured tetrahedral meshes;variable resolution queries;volume data visualization;data visualisation;mesh generation;query p, Three-Dimensional;Online Systems;Pattern Recognition}, isbn = {1077-2626}, doi = {10.1109/TVCG.2004.1260756}, author = {Cignoni,P. and De Floriani, Leila and Magillo,P. and Puppo,E. and Scopigno,R.} } @article {17657, title = {SESSION 2: STREAMING}, journal = {Proceedings of the 14th International Workshop on Network and Operating Systems Support for Digital Audio and Video: NOSSDAV 2003: June 16-18, 2004, Cork, Ireland}, year = {2004}, month = {2004///}, pages = {3 - 3}, author = {Banerjee,S. and Lee,S. and Braud,R. and Bhattacharjee, Bobby and Srinivasan, Aravind and Chu,Y. and Zhang,H. and Sinha,R. and Papadopoulos,C. and Boustead,P. and others} } @inbook {16286, title = {Shotgun Sequence Assembly}, booktitle = {Advances in ComputersAdvances in Computers}, volume = {Volume 60}, year = {2004}, month = {2004///}, pages = {193 - 248}, publisher = {Elsevier}, organization = {Elsevier}, abstract = {Shotgun sequencing is the most widely used technique for determining the DNA sequence of organisms. It involves breaking up the DNA into many small pieces that can be read by automated sequencing machines, then piecing together the original genome using specialized software programs called assemblers. Due to the large amounts of data being generated and to the complex structure of most organisms{\textquoteright} genomes, successful assembly programs rely on sophisticated algorithms based on knowledge from such diverse fields as statistics, graph theory, computer science, and computer engineering. Throughout this chapter we will describe the main computational challenges imposed by the shotgun sequencing method, and survey the most widely used assembly algorithms.}, isbn = {0065-2458}, url = {http://www.sciencedirect.com/science/article/pii/S0065245803600069}, author = {Pop, Mihai} } @conference {15468, title = {Skoll: distributed continuous quality assurance}, booktitle = {Software Engineering, 2004. ICSE 2004. Proceedings. 26th International Conference on}, year = {2004}, month = {2004/05//}, pages = {459 - 468}, abstract = {Quality assurance (QA) tasks, such as testing, profiling, and performance evaluation, have historically been done in-house on developer-generated workloads and regression suites. Since this approach is inadequate for many systems, tools and processes are being developed to improve software quality by increasing user participation in the QA process. A limitation of these approaches is that they focus on isolated mechanisms, not on the coordination and control policies and tools needed to make the global QA process efficient, effective, and scalable. To address these issues, we have initiated the Skoll project, which is developing and validating novel software QA processes and tools that leverage the extensive computing resources of worldwide user communities in a distributed, continuous manner to significantly and rapidly improve software quality. This paper provides several contributions to the study of distributed continuous QA. First, it illustrates the structure and functionality of a generic around-the-world, around-the-clock QA process and describes several sophisticated tools that support this process. Second, it describes several QA scenarios built using these tools and process. Finally, it presents a feasibility study applying these scenarios to a 1MLOC+ software package called ACE+TAO. While much work remains to be done, the study suggests that the Skoll process and tools effectively manage and control distributed, continuous QA processes. Using Skoll we rapidly identified problems that had taken the ACE+TAO developers substantially longer to find and several of which had previously not been found. Moreover, automatic analysis of QA task results often provided developers information that quickly led them to the root cause of the problems.}, keywords = {1MLOC+ software package, ACE+TAO, around-the-clock QA process, around-the-world QA process, distributed continuous QA, distributed continuous quality assurance, distributed programming, program verification, Quality assurance, Skoll, software performance evaluation, software profiling, Software quality, Software testing}, doi = {10.1109/ICSE.2004.1317468}, author = {Memon, Atif M. and Porter, Adam and Yilmaz,C. and Nagarajan,A. and Schmidt,D. and Natarajan,B.} } @article {18713, title = {Solution Conformation of Lys63-linked Di-ubiquitin Chain Provides Clues to Functional Diversity of Polyubiquitin Signaling}, journal = {Journal of Biological ChemistryJ. Biol. Chem.}, volume = {279}, year = {2004}, month = {2004/02/20/}, pages = {7055 - 7063}, abstract = {Diverse cellular events are regulated by post-translational modification of substrate proteins via covalent attachment of one or a chain of ubiquitin molecules. The outcome of (poly)ubiquitination depends upon the specific lysine residues involved in the formation of polyubiquitin chains. Lys48-linked chains act as a universal signal for proteasomal degradation, whereas Lys63-linked chains act as a specific signal in several non-degradative processes. Although it has been anticipated that functional diversity between alternatively linked polyubiquitin chains relies on linkage-dependent differences in chain conformation/topology, direct structural evidence in support of this model has been lacking. Here we use NMR methods to determine the structure of a Lys63-linked di-ubiquitin chain. The structure is characterized by an extended conformation, with no direct contact between the hydrophobic residues Leu8, Ile44, and Val70 on the ubiquitin units. This structure contrasts with the closed conformation observed for Lys48-linked di-ubiquitin wherein these residues form the interdomain interface (Cook, W. J., Jeffrey, L. C., Carson, M., Zhijian, C., and Pickart, C. M. (1992) J. Biol. Chem. 267, 16467-16471; Varadan, R., Walker, O., Pickart, C., and Fushman, D. (2002) J. Mol. Biol. 324, 637-647). Consistent with the open conformation of the Lys63-linked di-ubiquitin, our binding studies show that both ubiquitin domains in this chain can bind a ubiquitin-associated domain from HHR23A independently and in a mode similar to that for mono-ubiquitin. In contrast, Lys48-linked di-ubiquitin binds in a different, higher affinity mode that has yet to be determined. This is the first experimental evidence that alternatively linked polyubiquitin chains adopt distinct conformations.}, isbn = {0021-9258, 1083-351X}, doi = {10.1074/jbc.M309184200}, url = {http://www.jbc.org/content/279/8/7055}, author = {Varadan,Ranjani and Assfalg,Michael and Haririnia,Aydin and Raasi,Shahri and Pickart,Cecile and Fushman, David} } @article {16117, title = {Sonification of geo-referenced data for auditory information seeking: Design principle and pilot study}, journal = {Proceedings of ICAD}, year = {2004}, month = {2004///}, abstract = {We present an Auditory Information Seeking Principle (AISP)(gist, navigate, filter, and details-on-demand) modeled after the visual information seeking mantra [1]. We propose that data sonification designs should conform to this principle. We also present some design challenges imposed by human auditory perception characteristics. To improve blind access to geo- referenced statistical data, we developed two preliminary sonifications adhering to the above AISP, an enhanced table and a spatial choropleth map. Our pilot study shows people can recognize geographic data distribution patterns on a real map with 51 geographic regions, in both designs. The study also shows evidence that AISP conforms to people{\textquoteright}s information seeking strategies. Future work is discussed, including the improvement of the choropleth map design. }, author = {Zhao,H. and Plaisant, Catherine and Shneiderman, Ben and Duraiswami, Ramani} } @conference {19046, title = {The sybil attack in sensor networks: analysis \& defenses}, series = {IPSN {\textquoteright}04}, year = {2004}, month = {2004}, pages = {259 - 268}, publisher = {ACM}, organization = {ACM}, abstract = {Security is important for many sensor network applications. A particularly harmful attack against sensor and ad hoc networks is known as the Sybil attack [6], where a node illegitimately claims multiple identities. This paper systematically analyzes the threat posed by the Sybil attack to wireless sensor networks. We demonstrate that the attack can be exceedingly detrimental to many important functions of the sensor network such as routing, resource allocation, misbehavior detection, etc. We establish a classification of different types of the Sybil attack, which enables us to better understand the threats posed by each type, and better design countermeasures against each type. We then propose several novel techniques to defend against the Sybil attack, and analyze their effectiveness quantitatively.}, keywords = {Security, sensor networks, sybil attack}, isbn = {1-58113-846-6}, url = {http://doi.acm.org/10.1145/984622.984660}, author = {Newsome, James and Elaine Shi and Song, Dawn and Perrig, Adrian} } @conference {14749, title = {Transparent proxies for java futures}, booktitle = {Proceedings of the 19th annual ACM SIGPLAN conference on Object-oriented programming, systems, languages, and applications}, series = {OOPSLA {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {206 - 223}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {A proxy object is a surrogate or placeholder that controls access to another target object. Proxies can be used to support distributed programming, lazy or parallel evaluation, access control, and other simple forms of behavioral reflection. However, wrapper proxies (like futures or suspensions> for yet-to-be-computed results) can require significant code changes to be used in statically-typed languages, while proxies more generally can inadvertently violate assumptions of transparency, resulting in subtle bugs. To solve these problems, we have designed and implemented a simple framework for proxy programming that employs a static analysis based on qualifier inference, but with additional novelties. Code for using wrapper proxies is automatically introduced via a classfile-to-classfile transformation, and potential violations of transparency are signaled to the programmer. We have formalized our analysis and proven it sound. Our framework has a variety of applications, including support for asynchronous method calls returning futures. Experimental results demonstrate the benefits of our framework: programmers are relieved of managing and/or checking proxy usage, analysis times are reasonably fast, overheads introduced by added dynamic checks are negligible, and performance improvements can be significant. For example, changing two lines in a simple RMI-based peer-to-peer application and then using our framework resulted in a large performance gain.}, keywords = {future, java, proxy, Type inference, type qualifier}, isbn = {1-58113-831-8}, doi = {10.1145/1028976.1028994}, url = {http://doi.acm.org/10.1145/1028976.1028994}, author = {Pratikakis,Polyvios and Spacco,Jaime and Hicks, Michael W.} } @article {18725, title = {Ubistatins Inhibit Proteasome-Dependent Degradation by Binding the Ubiquitin Chain}, journal = {Science}, volume = {306}, year = {2004}, month = {2004/10/01/}, pages = {117 - 120}, abstract = {To identify previously unknown small molecules that inhibit cell cycle machinery, we performed a chemical genetic screen in Xenopus extracts. One class of inhibitors, termed ubistatins, blocked cell cycle progression by inhibiting cyclin B proteolysis and inhibited degradation of ubiquitinated Sic1 by purified proteasomes. Ubistatins blocked the binding of ubiquitinated substrates to the proteasome by targeting the ubiquitin-ubiquitin interface of Lys48-linked chains. The same interface is recognized by ubiquitin-chain receptors of the proteasome, indicating that ubistatins act by disrupting a critical protein-protein interaction in the ubiquitin-proteasome system.}, doi = {10.1126/science.1100946}, url = {http://www.sciencemag.org/cgi/content/abstract/sci;306/5693/117}, author = {Verma,Rati and Peters,Noel R. and D{\textquoteright}Onofrio,Mariapina and Tochtrop,Gregory P. and Sakamoto,Kathleen M. and Varadan,Ranjani and Zhang,Mingsheng and Coffino,Philip and Fushman, David and Deshaies,Raymond J. and King,Randall W.} } @conference {13210, title = {Uncalibrated stereo rectification for automatic 3D surveillance}, booktitle = {Image Processing, 2004. ICIP {\textquoteright}04. 2004 International Conference on}, volume = {2}, year = {2004}, month = {2004/10//}, pages = {1357 - 1360 Vol.2 - 1357 - 1360 Vol.2}, abstract = {We describe a stereo rectification method suitable for automatic 3D surveillance. We take advantage of the fact that in a typical urban scene, there is ordinarily a small number of dominant planes. Given two views of the scene, we align a dominant plane in one view with the other. Conjugate epipolar lines between the reference view and plane-aligned image become geometrically identical and can be added to the rectified image pair line by line. Selecting conjugate epipolar lines to cover the whole image is simplified since they are geometrically identical. In addition, the polarities of conjugate epipolar lines are automatically preserved by plane alignment, which simplifies stereo matching.}, keywords = {3D, AUTOMATIC, conjugate, epipolar, image, lines;, matching;, method;, processing;, rectification, scene;, stereo, surveillance;, uncalibrated, urban}, doi = {10.1109/ICIP.2004.1419753}, author = {Lim,S.-N. and Mittal,A. and Davis, Larry S. and Paragios,N.} } @inbook {16293, title = {Using the TIGR Assembler in Shotgun Sequencing Projects}, booktitle = {Bacterial Artificial Chromosomes}, series = {Methods in Molecular Biology}, volume = {255}, year = {2004}, month = {2004///}, pages = {279 - 294}, publisher = {Humana Press}, organization = {Humana Press}, abstract = {The TIGR Assembler (TA) ( 1 ) is the sequence assembly program used in sequencing projects at The Institute for Genomic Research (TIGR). Development of the TA was based on the experience obtained in more than 20 sequencing projects completed at TIGR ( see http://www.tigr.org ). This extensive experience led to a sequence assembler that produces few misassemblies ( 2 , 3 ) and has been used successfully in whole-genome shotgun sequencing of prokaryotic and eukaryotic organisms, bacterial artificial chromosome-based sequencing of eukaryotic organisms, and expressed sequence tag assembly.}, isbn = {978-1-59259-752-9}, url = {http://dx.doi.org/10.1385/1-59259-752-1:279}, author = {Pop, Mihai and Kosack,Dan}, editor = {Zhao,Shaying and Stodolsky,Marvin} } @inbook {12294, title = {Using Trust in Recommender Systems: An Experimental Analysis}, booktitle = {Trust ManagementTrust Management}, series = {Lecture Notes in Computer Science}, volume = {2995}, year = {2004}, month = {2004///}, pages = {221 - 235}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Recommender systems (RS) have been used for suggesting items (movies, books, songs, etc.) that users might like. RSs compute a user similarity between users and use it as a weight for the users{\textquoteright} ratings. However they have many weaknesses, such as sparseness, cold start and vulnerability to attacks. We assert that these weaknesses can be alleviated using a Trust-aware system that takes into account the {\textquotedblleft}web of trust{\textquotedblright} provided by every user. Specifically, we analyze data from the popular Internet web site epinions.com . The dataset consists of 49290 users who expressed reviews (with rating) on items and explicitly specified their web of trust, i.e. users whose reviews they have consistently found to be valuable. We show that any two users have usually few items rated in common. For this reason, the classic RS technique is often ineffective and is not able to compute a user similarity weight for many of the users. Instead exploiting the webs of trust, it is possible to propagate trust and infer an additional weight for other users. We show how this quantity can be computed against a larger number of users.}, keywords = {Computer, Science}, isbn = {978-3-540-21312-3}, url = {http://dx.doi.org/10.1007/978-3-540-24747-0_17}, author = {Massa,Paolo and Bhattacharjee, Bobby}, editor = {Jensen,Christian and Poslad,Stefan and Dimitrakos,Theo} } @article {15490, title = {Validating quality of service for reusable software via model-integrated distributed continuous quality assurance}, journal = {Software Reuse: Methods, Techniques, and Tools}, year = {2004}, month = {2004///}, pages = {286 - 295}, abstract = {Quality assurance (QA) tasks, such as testing, profiling, and performance evaluation, have historically been done in-house on developer-generated workloads and regression suites. Performance-intensive systems software, such as that found in the scientific computing grid and distributed real-time and embedded (DRE) domains, increasingly run on heterogeneous combinations of OS, compiler, and hardware platforms. Such software has stringent quality of service (QoS) requirements and often provides a variety of configuration options to optimize QoS. As a result, QA performed solely in-house is inadequate since it is hard to manage software variability, i.e., ensuring software quality on all supported target platforms across all desired configuration options. This paper describes how the Skoll project is addressing these issues by developing advanced QA processes and tools that leverage the extensive computing resources of user communities in a distributed, continuous manner to improve key software quality attributes.}, doi = {10.1007/978-3-540-27799-6_24}, author = {Krishna,A. and Schmidt,D. and Memon, Atif M. and Porter, Adam and Sevilla,D.} } @article {12936, title = {Viable but Nonculturable Vibrio Cholerae O1 in the Aquatic Environment of Argentina}, journal = {Applied and Environmental MicrobiologyAppl. Environ. Microbiol.}, volume = {70}, year = {2004}, month = {2004/12/01/}, pages = {7481 - 7486}, abstract = {In Argentina, as in other countries of Latin America, cholera has occurred in an epidemic pattern. Vibrio cholerae O1 is native to the aquatic environment, and it occurs in both culturable and viable but nonculturable (VNC) forms, the latter during interepidemic periods. This is the first report of the presence of VNC V. cholerae O1 in the estuarine and marine waters of the R{\'\i}o de la Plata and the Argentine shelf of the Atlantic Ocean, respectively. Employing immunofluorescence and PCR methods, we were able to detect reservoirs of V. cholerae O1 carrying the virulence-associated genes ctxA and tcpA. The VNC forms of V. cholerae O1 were identified in samples of water, phytoplankton, and zooplankton; the latter organisms were mainly the copepods Acartia tonsa, Diaptomus sp., Paracalanus crassirostris, and Paracalanus parvus. We found that under favorable conditions, the VNC form of V. cholerae can revert to the pathogenic, transmissible state. We concluded that V. cholerae O1 is a resident of Argentinean waters, as has been shown to be the case in other geographic regions of the world.}, isbn = {0099-2240, 1098-5336}, doi = {10.1128/AEM.70.12.7481-7486.2004}, url = {http://aem.asm.org/content/70/12/7481}, author = {Binsztein,Norma and Costagliola,Marcela C. and Pichel,Mariana and Jurquiza,Ver{\'o}nica and Ram{\'\i}rez,Fernando C. and Akselman,Rut and Vacchino,Marta and Huq,Anwarul and Rita R Colwell} } @conference {12697, title = {View independent human body pose estimation from a single perspective image}, booktitle = {Computer Vision and Pattern Recognition, 2004. CVPR 2004. Proceedings of the 2004 IEEE Computer Society Conference on}, volume = {2}, year = {2004}, month = {2004/07/02/june}, pages = {II-16 - II-22 Vol.2 - II-16 - II-22 Vol.2}, abstract = {Recovering the 3D coordinates of various joints of the human body from an image is a critical first step for several model-based human tracking and optical motion capture systems. Unlike previous approaches that have used a restrictive camera model or assumed a calibrated camera, our work deals with the general case of a perspective uncalibrated camera and is thus well suited for archived video. The input to the system is an image of the human body and correspondences of several body landmarks, while the output is the set of 3D coordinates of the landmarks in a body-centric coordinate system. Using ideas from 3D model based invariants, we set up a polynomial system of equations in the unknown head pitch, yaw and roll angles. If we are able to make the often-valid assumption that the torso twist is small, there are finite numbers of solutions to the head-orientation that can be computed readily. Once the head orientation is computed, the epipolar geometry of the camera is recovered, leading to solutions to the 3D joint positions. Results are presented on synthetic and real images.}, keywords = {3D, analysis;, biomechanics;, body, body-centric, camera;, capture, coordinate, coordinates;, detection;, epipolar, equation, estimation;, geometry;, human, image, image;, images;, model-based, models;, MOTION, object, optical, perspective, physiological, polynomial, polynomials;, pose, real, single, synthetic, system;, systems;, torso, tracking;, twist;, uncalibrated}, doi = {10.1109/CVPR.2004.1315139}, author = {Parameswaran, V. and Chellapa, Rama} } @article {12189, title = {Visualizations for taxonomic and phylogenetic trees}, journal = {Bioinformatics}, volume = {20}, year = {2004}, month = {2004///}, pages = {2997 - 2997}, author = {Parr,C.S. and Lee,B. and Campbell,D. and Bederson, Benjamin B.} } @article {18730, title = {Algorithms for computing global accessibility cones}, journal = {Journal of Computing and Information Science in Engineering}, volume = {3}, year = {2003}, month = {2003///}, pages = {200 - 200}, author = {Dhaliwal,S. and Gupta,S.K. and Huang,J. and Priyadarshini,A.} } @inbook {18896, title = {Automating DAML-S Web Services Composition Using SHOP2}, booktitle = {The Semantic Web - ISWC 2003}, series = {Lecture Notes in Computer Science}, volume = {2870}, year = {2003}, month = {2003///}, pages = {195 - 210}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {The DAML-S Process Model is designed to support the application of AI planning techniques to the automated composition of Web services. SHOP2 is an Hierarchical Task Network (HTN) planner well-suited for working with the Process Model. We have proven the correspondence between the semantics of SHOP2 and the situation calculus semantics of the Process Model. We have also implemented a system which soundly and completely plans over sets of DAML-S descriptions using a SHOP2 planner, and then executes the resulting plans over the Web. We discuss the challenges and difficulties of using SHOP2 in the information-rich and human-oriented context of Web services.}, keywords = {Computer science}, isbn = {978-3-540-20362-9}, url = {http://www.springerlink.com/content/rm5ejwlmbw0mdv97/abstract/}, author = {Wu,Dan and Parsia,Bijan and Sirin,Evren and Hendler,James and Nau, Dana S.}, editor = {Fensel,Dieter and Sycara,Katia and Mylopoulos,John} } @article {15721, title = {Blind Deconvolution Using a Regularized Structured Total Least Norm Approach}, journal = {SIAM J. on Matrix Analysis and Applications}, volume = {24}, year = {2003}, month = {2003///}, pages = {1018 - 1037}, url = {http://epubs.siam.org/sam-bin/dbq/article/39544http://epubs.siam.org/sam-bin/dbq/article/39544}, author = {Pruessner,Armin and O{\textquoteright}Leary, Dianne P.} } @inbook {16128, title = {Broadening Access to Large Online Databases by Generalizing Query Previews}, booktitle = {The Craft of Information VisualizationThe Craft of Information Visualization}, year = {2003}, month = {2003///}, pages = {31 - 37}, publisher = {Morgan Kaufmann}, organization = {Morgan Kaufmann}, address = {San Francisco}, abstract = {Companies, government agencies, and other types of organizations are making their large databases available to the world over the Internet. Current database front-ends do not give users information about the distribution of data. This leads many users to waste time and network resources posing queries that have either zero-hit or mega-hit result sets. Query previews form a novel visual approach for browsing large databases. Query previews supply data distribution information about the database that is being searched and give continuous feedback about the size of the result set for the query as it is being formed. On the other hand, query previews use only a few pre-selected attributes of the database. The distribution information is displayed only on these attributes. Unfortunately, many databases are formed of numerous relations and attributes. This paper introduces a generalization of query previews. We allow users to browse all of the relations and attributes of a database using a hierarchical browser. Any of the attributes can be used to display the distribution information, making query previews applicable to many public online databases.}, isbn = {978-1-55860-915-0}, url = {http://www.sciencedirect.com/science/article/pii/B978155860915050007X}, author = {Tanin,Egemen and Plaisant, Catherine and Shneiderman, Ben}, editor = {Bederson, Benjamin B. and Shneiderman, Ben} } @conference {16144, title = {The Challenge of Missing and Uncertain Data}, booktitle = {Proceedings of the 14th IEEE Visualization 2003 (VIS{\textquoteright}03)}, series = {VIS {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {100{\textendash} - 100{\textendash}}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Washington, DC, USA}, isbn = {0-7695-2030-8}, doi = {10.1109/VIS.2003.10029}, url = {http://dx.doi.org/10.1109/VIS.2003.10029}, author = {Eaton,Cyntrica and Plaisant, Catherine and Drizd,Terence} } @inbook {17639, title = {On the Covering Steiner Problem}, booktitle = {FST TCS 2003: Foundations of Software Technology and Theoretical Computer ScienceFST TCS 2003: Foundations of Software Technology and Theoretical Computer Science}, series = {Lecture Notes in Computer Science}, volume = {2914}, year = {2003}, month = {2003///}, pages = {244 - 251}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {The Covering Steiner problem is a common generalization of the k -MST and Group Steiner problems. An instance of the Covering Steiner problem consists of an undirected graph with edge-costs, and some subsets of vertices called groups , with each group being equipped with a non-negative integer value (called its requirement ); the problem is to find a minimum-cost tree which spans at least the required number of vertices from every group. When all requirements are equal to 1, this is the Group Steiner problem. While many covering problems (e.g., the covering integer programs such as set cover) become easier to approximate as the requirements increase, the Covering Steiner problem remains at least as hard to approximate as the Group Steiner problem; in fact, the best guarantees previously known for the Covering Steiner problem were worse than those for Group Steiner as the requirements became large. In this work, we present an improved approximation algorithm whose guarantee equals the best known guarantee for the Group Steiner problem.}, isbn = {978-3-540-20680-4}, url = {http://dx.doi.org/10.1007/978-3-540-24597-1_21}, author = {Gupta,Anupam and Srinivasan, Aravind}, editor = {Pandya,Paritosh and Radhakrishnan,Jaikumar} } @conference {17812, title = {The CPR model for summarizing video}, booktitle = {Proceedings of the 1st ACM international workshop on Multimedia databases}, series = {MMDB {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {2 - 9}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Most past work on video summarization has been based on selecting key frames from videos. We propose a model of video summarization based on three important parameters: Priority (of frames), Continuity (of the summary), and non-Repetition (of the summary). In short, a summary must include high priority frames, must be continuous and non-repetitive. An optimal summary is one that maximizes an objective function based on these three parameters. We develop formal definitions of all these concepts and provide algorithms to find optimal summaries. We briefly report on the performance of these algorithms.}, keywords = {multimedia, Summarization, video}, isbn = {1-58113-726-5}, doi = {10.1145/951676.951679}, url = {http://doi.acm.org/10.1145/951676.951679}, author = {Fayzullin,M. and V.S. Subrahmanian and Picariello, A. and Sapino,M. L} } @article {12088, title = {The dangers of mitigating security design flaws: a wireless case study}, journal = {IEEE Security \& Privacy}, volume = {1}, year = {2003}, month = {2003/02//Jan}, pages = {28 - 36}, abstract = {Mitigating design flaws often provides the only means to protect legacy equipment, particularly in wireless local area networks. A synchronous active attack against the wired equivalent privacy protocol demonstrates how mitigating one flaw or attack can facilitate another.}, keywords = {Communication system security, computer security, cryptography, design flaw mitigation, Dictionaries, legacy equipment, privacy, Protection, Protocols, security design flaws, security of data, synchronous active attack, telecommunication security, Telecommunication traffic, wired equivalent privacy protocol, Wireless LAN, wireless local area networks, Wireless networks}, isbn = {1540-7993}, doi = {10.1109/MSECP.2003.1176993}, author = {Petroni,N. L. and Arbaugh, William A.} } @conference {16136, title = {Data exploration with paired hierarchical visualizations: initial designs of PairTrees}, booktitle = {Proceedings of the 2003 annual national conference on Digital government research}, series = {dg.o {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {1 - 6}, publisher = {Digital Government Society of North America}, organization = {Digital Government Society of North America}, abstract = {Paired hierarchical visualizations (PairTrees) integrate treemaps, node-link diagrams, choropleth maps and other information visualization techniques to support exploration of hierarchical data sets at multiple levels of abstraction. This paper describes several novel applications of PairTrees in the econometric and health statistics domains, as well as some challenges and trade-offs inherent in the technique.}, url = {http://dl.acm.org/citation.cfm?id=1123196.1123233}, author = {Kules,Bill and Shneiderman, Ben and Plaisant, Catherine} } @inbook {13283, title = {Data structures for 3D Multi-Tessellations: an overview}, booktitle = {Data Visualization: The State of the ArtData Visualization: The State of the Art}, series = {KLUWER INTERNATIONAL SERIES IN ENGINEERING AND COMPUTER SCIENCE}, year = {2003}, month = {2003///}, pages = {239 - 256}, publisher = {Springer}, organization = {Springer}, isbn = {9781402072598}, author = {Danovaro,E. and De Floriani, Leila and Magillo,P. and Puppo,E.} } @inbook {16142, title = {Database Discovery with Dynamic Queries}, booktitle = {The craft of information visualization: readings and reflectionsThe craft of information visualization: readings and reflections}, year = {2003}, month = {2003///}, pages = {1 - 1}, publisher = {Morgan Kaufmann Publishers Inc.}, organization = {Morgan Kaufmann Publishers Inc.}, isbn = {978-1-55860-915-0}, author = {Fredrikson,A. and North,C. and Plaisant, Catherine and Shneiderman, Ben and Tanin,E. and Pkisant,C.} } @article {13285, title = {Decomposing non-manifold objects in arbitrary dimensions}, journal = {Graphical Models}, volume = {65}, year = {2003}, month = {2003/05//}, pages = {2 - 22}, abstract = {We address the problem of building valid representations of non-manifold d-dimensional objects through an approach based on decomposing a non-manifold d-dimensional object into an assembly of more regular components. We first define a standard decomposition of d-dimensional non-manifold objects described by abstract simplicial complexes. This decomposition splits a non-manifold object into components that belong to a well-understood class of objects, that we call initial quasi-manifold. Initial quasi-manifolds cannot be decomposed without cutting them along manifold faces. They form a decidable superset of d-manifolds for d⩾3, and coincide with manifolds for d⩽2. We then present an algorithm that computes the standard decomposition of a general non-manifold complex. This decomposition is unique, and removes all singularities which can be removed without cutting the complex along its manifold faces.}, isbn = {1524-0703}, doi = {10.1016/S1524-0703(03)00006-7}, url = {http://www.sciencedirect.com/science/article/pii/S1524070303000067}, author = {De Floriani, Leila and Mesmoudi,Mostefa M. and Morando,Franco and Puppo,Enrico} } @article {12194, title = {Designing a digital library for young children: An intergenerational partnership}, journal = {The craft of information visualization: readings and reflections}, volume = {178}, year = {2003}, month = {2003///}, author = {Druin, Allison and Bederson, Benjamin B. and Hourcade,J. P and Sherman,L. and Revelle,G. and Platner,M. and Weng,S.} } @article {18023, title = {Deterministic Resource Discovery in Distributed Networks}, journal = {Theory of Computing Systems}, volume = {36}, year = {2003}, month = {2003///}, pages = {479 - 495}, abstract = {The resource discovery problem was introduced by Harchol-Balter, Leighton, and Lewin. They developed a number of algorithms for the problem in the weakly connected directed graph model. This model is a directed logical graph that represents the vertices{\textquoteright} knowledge about the topology of the underlying communication network. The current paper proposes a deterministic algorithm for the problem in the same model, with improved time, message, and communication complexities. Each previous algorithm had a complexity that was higher at least in one of the measures. Specifically, previous deterministic solutions required either time linear in the diameter of the initial network, or communication complexity $O(n^3)$ (with message complexity $O(n^2)$), or message complexity $O(|E_0| {\l}og n)$ (where $E_0$ is the arc set of the initial graph $G_0$). Compared with the main randomized algorithm of Harchol-Balter, Leighton, and Lewin, the time complexity is reduced from $O({\l}og^2n)$ to\pagebreak[4] $O({\l}og n )$, the message complexity from $O(n {\l}og^2 n)$ to $O(n {\l}og n )$, and the communication complexity from $O(n^2 {\l}og^3 n)$ to $O(|E_0|{\l}og ^2 n )$. \par Our work significantly extends the connectivity algorithm of Shiloach and Vishkin which was originally given for a parallel model of computation. Our result also confirms a conjecture of Harchol-Balter, Leighton, and Lewin, and addresses an open question due to Lipton.}, keywords = {Computer, Science}, isbn = {1432-4350}, url = {http://dx.doi.org/10.1007/s00224-003-1084-8}, author = {Kutten,Shay and Peleg,David and Vishkin, Uzi} } @conference {16294, title = {Distributed continuous quality assurance: The Skoll project}, booktitle = {Workshop on Remote Analysis and Measurement of Software Systems (RAMSS)}, year = {2003}, month = {2003///}, author = {Yilmaz,C. and Porter, Adam and Schmidt,D. C} } @article {19078, title = {The dog genome: survey sequencing and comparative analysis}, journal = {Science}, volume = {301}, year = {2003}, month = {2003}, author = {Kirkness,E. F and Bafna,V. and Halpern,A. L and Levy,S. and Remington,K. and Rusch,D. B and Delcher,A. L and Pop, Mihai and Wang,W. and Fraser,C. M and others} } @article {16289, title = {The Dog Genome: Survey Sequencing and Comparative Analysis}, journal = {ScienceScience}, volume = {301}, year = {2003}, month = {2003/09/26/}, pages = {1898 - 1903}, abstract = {A survey of the dog genome sequence (6.22 million sequence reads; 1.5{\texttimes} coverage) demonstrates the power of sample sequencing for comparative analysis of mammalian genomes and the generation of species-specific resources. More than 650 million base pairs (>25\%) of dog sequence align uniquely to the human genome, including fragments of putative orthologs for 18,473 of 24,567 annotated human genes. Mutation rates, conserved synteny, repeat content, and phylogeny can be compared among human, mouse, and dog. A variety of polymorphic elements are identified that will be valuable for mapping the genetic basis of diseases and traits in the dog.}, isbn = {0036-8075, 1095-9203}, doi = {10.1126/science.1086432}, url = {http://www.sciencemag.org/content/301/5641/1898}, author = {Kirkness,Ewen F. and Bafna,Vineet and Halpern,Aaron L. and Levy,Samuel and Remington,Karin and Rusch,Douglas B and Delcher,Arthur L. and Pop, Mihai and Wang,Wei and Fraser,Claire M. and Venter,J. Craig} } @article {17844, title = {Driving scientific applications by data in distributed environments}, journal = {Computational Science{\textemdash}ICCS 2003}, year = {2003}, month = {2003///}, pages = {713 - 713}, author = {Saltz, J. and Catalyurek,U. and Kurc, T. and Gray,M. and Hastings,S. and Langella,S. and Narayanan,S. and Martino,R. and Bryant,S. and Peszynska,M. and others} } @article {12950, title = {Effect of treatment on the dynamics of circulating hypodermin C in cattle naturally infested with Hypoderma lineatum (Diptera: Oestridae)}, journal = {Veterinary Parasitology}, volume = {113}, year = {2003}, month = {2003/05/01/}, pages = {263 - 272}, abstract = {An antigen capture ELISA, using a murine monoclonal antibody recognising recombinant hypodermin C (rHyC), was used to evaluate the influence of early treatment with eprinomectin (Eprinex{\textregistered}) or fenthion (Spotton{\textregistered}) on the kinetics of circulating hypodermin C in calves naturally infested with Hypoderma lineatum. No viable larvae were collected from treated animals, whereas a variable number of warbles were found in control animals. Treatment provoked a decrease in circulating HyC levels that was significant 9 days post-treatment (p.t.). Circulating antigen levels in the treated cattle remained detectable for approximately 99 days p.t. In contrast, control animals had no detectable antigen at 64 days p.t., 42 days earlier than in the treated animals. These results suggest that larvae were either gradually killed, resulting in slow release of antigen or they were encapsulated, leading to the slow liberation of antigen. Kinetics of circulating HyC did not differ among the two insecticide treatments. Antibodies persisted, in all groups, throughout the 120-day study. These results suggest that the antigen capture ELISA will be useful as a technique for detecting successful treatment of cattle grub infestations and for the detection of new infestations in previously infested cattle.}, keywords = {Capture ELISA, Cattle-arthropoda, Control methods-arthropoda, Eprinomectin, Fenthion, Hypoderma spp.}, isbn = {0304-4017}, doi = {10.1016/S0304-4017(03)00084-0}, url = {http://www.sciencedirect.com/science/article/pii/S0304401703000840}, author = {Douglas D Colwell and Panadero-Fontan,Rosario and L{\'o}pez-Sandez,Ceferino and Parra-Fernandez,Francisco and Paz-Silva,A and S{\'a}nchez-Andrade,Rita and D{\i}́ez-Ba{\~n}os,Pablo} } @article {12717, title = {Face recognition: A literature survey}, journal = {ACM Comput. Surv.}, volume = {35}, year = {2003}, month = {2003/12//}, pages = {399 - 458}, abstract = {As one of the most successful applications of image analysis and understanding, face recognition has recently received significant attention, especially during the past several years. At least two reasons account for this trend: the first is the wide range of commercial and law enforcement applications, and the second is the availability of feasible technologies after 30 years of research. Even though current machine recognition systems have reached a certain level of maturity, their success is limited by the conditions imposed by many real applications. For example, recognition of face images acquired in an outdoor environment with changes in illumination and/or pose remains a largely unsolved problem. In other words, current systems are still far away from the capability of the human perception system.This paper provides an up-to-date critical survey of still- and video-based face recognition research. There are two underlying motivations for us to write this survey paper: the first is to provide an up-to-date review of the existing literature, and the second is to offer some insights into the studies of machine recognition of faces. To provide a comprehensive survey, we not only categorize existing recognition techniques but also present detailed descriptions of representative methods within each category. In addition, relevant topics such as psychophysical studies, system evaluation, and issues of illumination and pose variation are covered.}, keywords = {face recognition, person identification}, isbn = {0360-0300}, doi = {10.1145/954339.954342}, url = {http://doi.acm.org/10.1145/954339.954342}, author = {Zhao, W. and Chellapa, Rama and Phillips,P.J. and Rosenfeld, A.} } @article {16126, title = {Family Calendar Survey}, journal = {Technical Reports from UMIACS}, year = {2003}, month = {2003/01/21/}, abstract = {Beginning in late July 2002, we conducted a survey about people{\textquoteright}s personal andfamily calendaring habits. By the end of September, we had over 400 responses, which are summarized below. The survey was conducted to help inform our work in designing new technologies for families, motivated in part by our work on the interLiving project. InterLiving is a 3 year, European Union-funded project where we work with distributed, multi-generational families as design partners to create new technologies (see http://www.cs.umd.edu/hcil/interliving for details). The survey was administered from a web page (https://www.cs.umd.edu/users/hilary/survey/survey.htm), and participants were solicited via a "chain-mail" email approach. We began by sending a request to fill out a survey to our friends, families, and colleagues. We asked that they forward the request on to their friends, family and colleagues as well. While we realize that this was an imperfect approach, we believed that the respondents would be representative of the users we are initially targeting in our research on family calendaring and coordination - individuals who are already making relatively heavy use of computers at home and/or work. The results seem to validate this assumption. Many of our respondents likely come from the HCI community as the mailing went to our large lab mailing list. We may have some pollution in the data as a result of people in the same household (e.g. husband and wife) both filling out the survey. Despite these issues, the results we got were helpful in eliciting a number of important findings, namely that people rely on multiple calendars, many of which are still paper. (UMIACS-TR-2002-92) (HCIL-TR-2002-21) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/1236}, author = {Hutchinson,Hilary and Bederson, Benjamin B. and Plaisant, Catherine and Druin, Allison} } @conference {18340, title = {Footloose: a case for physical eventual consistency and selective conflict resolution}, booktitle = {Mobile Computing Systems and Applications, 2003. Proceedings. Fifth IEEE Workshop on}, year = {2003}, month = {2003///}, pages = {170 - 179}, publisher = {IEEE}, organization = {IEEE}, abstract = {Users are increasingly inundated with small devices with communication and storage capabilities. Unfortunately, the user is still responsible for reconciling all of the devices whenever a change is made. We present Footloose, a user-centered data store that can share data and reconcile conflicts across diverse devices. Footloose is an optimistic system based on physical eventual consistency: consistency based on the movement of devices, and selective conflict resolution, which allows conflicts to flow through devices that cannot resolve the conflict to devices that can. Using these techniques, Footloose can present consistent views of data on the devices closest to the user without user interaction.}, isbn = {0-7695-1995-4}, doi = {10.1109/MCSA.2003.1240778}, url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=1240778}, author = {Paluska and Saff and Yeh and Chen} } @article {16290, title = {The genome sequence of Bacillus anthracis Ames and comparison to closely related bacteria}, journal = {Nature}, volume = {423}, year = {2003}, month = {2003/05/01/}, pages = {81 - 86}, abstract = {Bacillus anthracis is an endospore-forming bacterium that causes inhalational anthrax1. Key virulence genes are found on plasmids (extra-chromosomal, circular, double-stranded DNA molecules) pXO1 (ref. 2) and pXO2 (ref. 3). To identify additional genes that might contribute to virulence, we analysed the complete sequence of the chromosome of B. anthracis Ames (about 5.23 megabases). We found several chromosomally encoded proteins that may contribute to pathogenicity{\textemdash}including haemolysins, phospholipases and iron acquisition functions{\textemdash}and identified numerous surface proteins that might be important targets for vaccines and drugs. Almost all these putative chromosomal virulence and surface proteins have homologues in Bacillus cereus, highlighting the similarity of B. anthracis to near-neighbours that are not associated with anthrax4. By performing a comparative genome hybridization of 19 B. cereus and Bacillus thuringiensis strains against a B. anthracis DNA microarray, we confirmed the general similarity of chromosomal genes among this group of close relatives. However, we found that the gene sequences of pXO1 and pXO2 were more variable between strains, suggesting plasmid mobility in the group. The complete sequence of B. anthracis is a step towards a better understanding of anthrax pathogenesis.}, isbn = {0028-0836}, doi = {10.1038/nature01586}, url = {http://www.nature.com/nature/journal/v423/n6935/full/nature01586.html}, author = {Read,Timothy D. and Peterson,Scott N. and Tourasse,Nicolas and Baillie,Les W. and Paulsen,Ian T. and Nelson,Karen E. and Tettelin,Herv|[eacute]| and Fouts,Derrick E. and Eisen,Jonathan A. and Gill,Steven R. and Holtzapple,Erik K. and |[Oslash]|kstad,Ole Andreas and Helgason,Erlendur and Rilstone,Jennifer and Wu,Martin and Kolonay,James F. and Beanan,Maureen J. and Dodson,Robert J. and Brinkac,Lauren M. and Gwinn,Michelle and DeBoy,Robert T. and Madpu,Ramana and Daugherty,Sean C. and Durkin,A. Scott and Haft,Daniel H. and Nelson,William C. and Peterson,Jeremy D. and Pop, Mihai and Khouri,Hoda M. and Radune,Diana and Benton,Jonathan L. and Mahamoud,Yasmin and Jiang,Lingxia and Hance,Ioana R. and Weidman,Janice F. and Berry,Kristi J. and Plaut,Roger D. and Wolf,Alex M. and Watkins,Kisha L. and Nierman,William C. and Hazen,Alyson and Cline,Robin and Redmond,Caroline and Thwaite,Joanne E. and White,Owen and Salzberg,Steven L. and Thomason,Brendan and Friedlander,Arthur M. and Koehler,Theresa M. and Hanna,Philip C. and Kolst|[oslash]|,Anne-Brit and Fraser,Claire M.} } @article {13234, title = {Guest editors{\textquoteright} Introduction to the special section on energy minimization methods in computer vision and pattern recognition}, journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on}, volume = {25}, year = {2003}, month = {2003/11//}, pages = {1361 - 1363}, isbn = {0162-8828}, doi = {10.1109/TPAMI.2003.1240110}, author = {Figueiredo,M.A.T. and Hancock,E.R. and Pelillo,M. and Zerubia, J.} } @conference {16138, title = {Helping users get started with visual interfaces: multi-layered interfaces, integrated initial guidance and video demonstrations}, booktitle = {Proceedings of the 2003 annual national conference on Digital government research}, series = {dg.o {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {1 - 1}, publisher = {Digital Government Society of North America}, organization = {Digital Government Society of North America}, abstract = {We are investigating new ways to help users learn to use public access interactive tools, in particular for the visual exploration of government statistics. Our work led to a series of interfaces using multi-layered design, a new help method called Integrated Initial Guidance, and video demonstrations. Multi-layer designs structure an interface so that a simpler interface is available for users to get started and more complex features are accessed as users move through the more advanced layers. Integrated Initial Guidance provides help within the working interface, right at the start of the application. Using the metaphor of "sticky notes" overlaid on top of the functional interface locates the main widgets, demonstrates their manipulation, and explains the resulting actions using preset activations of the interface.}, url = {http://dl.acm.org/citation.cfm?id=1123196.1123257}, author = {Kang,Hyunmo and Plaisant, Catherine and Shneiderman, Ben} } @article {16301, title = {ICSE workshop on remote analysis and measurement of software systems (RAMSS)}, journal = {ACM SIGSOFT Software Engineering NotesSIGSOFT Softw. Eng. Notes}, volume = {28}, year = {2003}, month = {2003/11//}, pages = {10 - 10}, isbn = {01635948}, doi = {10.1145/966221.966232}, url = {http://dl.acm.org/citation.cfm?id=966232}, author = {Orso,Alessandro and Porter, Adam} } @article {16125, title = {Immediate Usability: Kiosk design principles from the CHI 2001 Photo Library}, journal = {Technical Reports from UMIACS}, year = {2003}, month = {2003/01/21/}, abstract = {This paper describes a novel set of design principles and guidelines forensuring the immediate usability of public access systems. These principles and guidelines were formulated while developing PhotoFinder Kiosk, a community photo library. Attendees of CHI 2001 successfully used the tool to browse and annotate collections of photographs spanning 20 years of CHI and related conferences, producing a richly annotated photo history of the field of human-computer interaction. We used observations and log data to evaluate the tool and refine the guidelines. They provide specific guidance for practitioners, as well as a useful framework for additional research in public access interfaces. Keywords Photo collection, community photo library, group annotation, public access system, direct annotation, direct manipulation, drag-and-drop, immediate usability, zero-trial learning, walk-up-and-use, casual use. (UMIACS-TR-2001-71) (HCIL-TR-2001-23) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/1155}, author = {Kules,Bill and Kang,Hyunmo and Plaisant, Catherine and Rose,Anne and Shneiderman, Ben} } @article {16124, title = {Improving Accessibility and Usability of Geo-referenced Statistical Data}, year = {2003}, month = {2003/06/04/}, abstract = {Several technology breakthroughs are needed to achieve the goals ofuniversal accessibility and usability. These goals are especially challenging in the case of geo-referenced statistical data that many U.S. government agencies supply. We present technical and user-interface design challenges in accommodating users with low-end technology (slow network connection and low-end machine) and users who are blind or vision-impaired. Our solutions are presented and future work is discussed. (UMIACS-TR-2003-37) (HCIL-2003-11) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/1277}, author = {Zhao,Haixia and Plaisant, Catherine and Shneiderman, Ben} } @inbook {17237, title = {Innovating the Interaction}, booktitle = {The craft of information visualization: readings and reflectionsThe craft of information visualization: readings and reflections}, year = {2003}, month = {2003///}, pages = {295 - 295}, publisher = {Morgan Kaufmann}, organization = {Morgan Kaufmann}, address = {San Francisco}, isbn = {978-1-55860-915-0}, author = {Bederson, Benjamin B. and Plaisant, Catherine and Mushlin,R. and Snyder,A. and Li,J. and Heller,D. and Shneiderman, Ben and Hochheiser,H. and Fekete,J. D and Czenvinski,M.} } @article {16418, title = {The interaction between zoning regulations and residential preferences as a driver of urban form}, journal = {Proceedings of the 2003 UTEP Distinguished Faculty and Student Symposium}, year = {2003}, month = {2003///}, author = {Zellner,M.L. and Riolo,R and Rand, William and Page,S.E. and Brown,D.G. and Fernandez,L.E.} } @article {13299, title = {Interactive Visualization of Large Tetrahedral Meshes through Selective Refinement}, journal = {IEEE Transactions on Visualization and Computer Graphics}, year = {2003}, month = {2003///}, abstract = {In this paper, we address the problem of the efficient visualization of very largeirregular volume datasets. To this aim, we exploit a multiresolution representation based on a domain decomposition into tetrahedral cells. A new compact data struc- ture is described which encodes the whole dataset at a virtually continuous range of different resolutions, with a storage cost six times lower than a standard data struc- ture for tetrahedral meshes. Such structure supports on-line selective refinement to focus resolution on areas that the user considers more critical, based on either field values, or domain location, or opacity of the transfer function. Selective refinement is used to trade-off between resolution and speed in visualization, according to user needs and hardware constraints. These features have been implemented in a new system, called TAn2 (Tetrahedra Analyzer), for the interactive visualization of three- dimensional scalar fields defined on very large tetrahedral meshes. Multiresolution representation and selective refinement make the system fully scalable with respect to the size of the dataset and to hardware requirements. }, author = {Cignoni,P. and De Floriani, Leila and Magillo,P. and Puppo,E. and Scopigno,R. and di Genova,U.} } @inbook {16129, title = {LifeLines: Using Visualization to Enhance Navigation and Analysis of Patient Records}, booktitle = {The Craft of Information VisualizationThe Craft of Information Visualization}, year = {2003}, month = {2003///}, pages = {308 - 312}, publisher = {Morgan Kaufmann}, organization = {Morgan Kaufmann}, address = {San Francisco}, isbn = {978-1-55860-915-0}, url = {http://www.sciencedirect.com/science/article/pii/B978155860915050038X}, author = {Plaisant, Catherine and Mushlin,Richard and Snyder,Aaron and Li,Jia and Heller,Dan and Shneiderman, Ben}, editor = {Bederson, Benjamin B. and Shneiderman, Ben} } @conference {13306, title = {Morphology-driven simplification and multiresolution modeling of terrains}, booktitle = {Proceedings of the 11th ACM international symposium on Advances in geographic information systems}, series = {GIS {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {63 - 70}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We propose a technique for simplification and multiresolution modeling of a terrain represented as a TIN. Our goal is to maintain the morphological structure of the terrain in the resulting multiresolution model. To this aim, we extend Morse theory, developed for continuous and differentiable functions, to the case of piecewise linear functions. We decompose a TIN into areas with uniform morphological properties (such as valleys, basins, etc.) separated by a network of critical lines and points. We describe an algorithm to compute the above decomposition and the critical net, and a TIN simplification algorithm that preserves them. On this basis, we build a multiresolution terrain model, which provides a representation of critical features at any level of detail.}, keywords = {morphological structure, Multi-resolution, terrain models}, isbn = {1-58113-730-3}, doi = {10.1145/956676.956685}, url = {http://doi.acm.org/10.1145/956676.956685}, author = {Danovaro,Emanuele and De Floriani, Leila and Magillo,Paola and Mesmoudi,Mohammed Mostefa and Puppo,Enrico} } @inbook {16147, title = {Navigation Patterns and Usability of Zoomable User Interfaces With and Without an Overview}, booktitle = {The craft of information visualization: readings and reflections}, year = {2003}, month = {2003///}, pages = {120 - 120}, publisher = {Morgan Kaufmann Publishers Inc.}, organization = {Morgan Kaufmann Publishers Inc.}, isbn = {978-1-55860-915-0}, author = {Bederson, Benjamin B. and Plaisant, Catherine} } @article {16127, title = {Navigation patterns \& usability of zoomable user interfaces: with and without an overview}, journal = {interactions}, volume = {10}, year = {2003}, month = {2003/01//}, pages = {11 - 12}, abstract = {The following abstracts are from recent issues and the forthcoming issue of ACM{\textquoteright}s Transactions of Computer Human Interaction (ToCHI). They are included here to alert interactions{\textquoteright} readers to what research is being done in the field of Computer Human Interaction. The complete papers, when published, can be found in ACM{\textquoteright}s Digital Library at www.acm.org/pubs/contents/journals/tochi/}, isbn = {1072-5520}, doi = {10.1145/604575.604582}, url = {http://doi.acm.org/10.1145/604575.604582}, author = {Hornbaek,Kasper and Bederson, Benjamin B. and Plaisant, Catherine} } @conference {16137, title = {New approaches to help users get started with visual interfaces: multi-layered interfaces and integrated initial guidance}, booktitle = {Proceedings of the 2003 annual national conference on Digital government research}, series = {dg.o {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {1 - 6}, publisher = {Digital Government Society of North America}, organization = {Digital Government Society of North America}, abstract = {We are investigating new ways to help users learn to use public access interactive tools, in particular for the visual exploration of government statistics. Our work led to a series of interfaces using multi-layered design and a new help method called Integrated Initial Guidance. Multi-layer designs structure an interface so that a simpler interface is available for users to get started and more complex features are accessed as users move through the more advanced layers. Integrated Initial Guidance provides help within the working interface, right at the start of the application. Using the metaphor of "sticky notes" overlaid on top of the functional interface locates the main widgets, demonstrates their manipulation, and explains the resulting actions using preset animation of the interface. Usability testing with 12 participants led to refined designs and guidelines for the design of Integrated Initial Guidance interfaces.}, url = {http://dl.acm.org/citation.cfm?id=1123196.1123269}, author = {Kang,Hyunmo and Plaisant, Catherine and Shneiderman, Ben} } @conference {18931, title = {On-line computation of two types of structural relations in Japanese}, year = {2003}, month = {2003///}, author = {Aoshima,S. and Phillips,C. and Weinberg, Amy} } @article {16143, title = {Overlaying graph links on treemaps}, journal = {IEEE Symposium on Information Visualization Conference Compendium (demonstration)}, year = {2003}, month = {2003///}, abstract = {Every graph can be decomposed into a tree structure plus a set ofremaining edges. We describe a visualization technique that displays the tree structure as a Treemap and the remaining edges as curved links overlaid on the Treemap. Link curves are designed to show where the link starts and where it ends without requiring an explicit arrow that would clutter the already dense visualization. This technique is effective for visualizing structures where the underlying tree has some meaning, such as Web sites or XML documents with cross-references. Graphic attributes of the links {\textendash} such as color or thickness {\textendash} can be used to represent attributes of the edges. Users can choose to see all links at once or only the links to and from the node or branch under the cursor. }, author = {Fekete,J. D and Wang,D. and Dang,N. and Aris,A. and Plaisant, Catherine} } @inbook {16134, title = {The people in digital libraries: Multifaceted approaches to assessing needs and impact}, booktitle = {Digital library use: Social practice in design and evaluationDigital library use: Social practice in design and evaluation}, year = {2003}, month = {2003///}, pages = {119 - 160}, publisher = {MIT Press}, organization = {MIT Press}, isbn = {9780262025447}, author = {Marchionini,G. and Plaisant, Catherine and Komlodi,A.} } @article {12945, title = {Persistence of adhesive properties in Vibrio cholerae after long-term exposure to sea water}, journal = {Environmental Microbiology}, volume = {5}, year = {2003}, month = {2003/10/01/}, pages = {850 - 858}, abstract = {The effect of exposure to artificial sea water (ASW) on the ability of classical Vibrio cholerae O1 cells to interact with chitin-containing substrates and human intestinal cells was studied. Incubation of vibrios in ASW at 5{\textdegree}C and 18{\textdegree}C resulted in two kinds of cell responses: the viable but non-culturable (VBNC) state (i.e.~<0.1 colony forming unit ml-1) at 5{\textdegree}C, and starvation (i.e. maintenance of culturability of the population) at 18{\textdegree}C. The latter remained rod shaped and, after 40~days{\textquoteright} incubation, presented a 47{\textendash}58\% reduction in the number of cells attached to chitin, a 48{\textendash}53\% reduction in the number of bacteria adhering to copepods, and a 48{\textendash}54\% reduction in the number of bacteria adhering to human cultured intestinal cells, compared to control cells not suspended in ASW. Bacteria suspended in ASW at 5{\textdegree}C became coccoid and, after 40~days, showed 34{\textendash}42\% fewer cells attached to chitin, 52{\textendash}55\% fewer adhering to copep-ods, and 45{\textendash}48\% fewer cells adhering to intestinal cell monolayers, compared to controls. Sarkosyl-insoluble membrane proteins that bind chitin particles were isolated and analysed by SDS-PAGE. After 40~days incubation in ASW at both 5{\textdegree}C and 18{\textdegree}C vibrios expressed chitin-binding ligands similar to bacteria harvested in the stationary growth phase. It is concluded that as vibrios do not lose adhesive properties after long-term exposure to ASW, it is important to include methods for VBNC bacteria when testing environmental and clinical samples for purposes of public health safety.}, isbn = {1462-2920}, doi = {10.1046/j.1462-2920.2003.00498.x}, url = {http://onlinelibrary.wiley.com/doi/10.1046/j.1462-2920.2003.00498.x/abstract?userIsAuthenticated=false\&deniedAccessCustomisedMessage=}, author = {Pruzzo,Carla and Tarsi,Renato and Del Mar Lle{\`o},Maria and Signoretto,Caterina and Zampini,Massimiliano and Pane,Luigi and Rita R Colwell and Canepari,Pietro} } @article {13010, title = {Phylogenetic analysis reveals five independent transfers of the chloroplast gene {\i}t rbcL to the mitochondrial genome in angiosperms}, journal = {Curr Genet}, volume = {43}, year = {2003}, month = {2003/05//}, pages = {131 - 138}, abstract = {We used the chloroplast gene rbcL as a model to study the frequency and relative timing of transfer of chloroplast sequences to the mitochondrial genome. Southern blot survey of 20 mitochondrial DNAs confirmed three previously reported groups of plants containing rbcL in their mitochondrion, while PCR studies identified a new mitochondrial rbcL. Published and newly determined mitochondrial and chloroplast rbcL sequences were used to reconstruct rbcL phylogeny. The results imply five or six separate interorganellar transfers of rbcL among the angiosperms examined, and hundreds of successful transfers across all flowering plants. By taxonomic criteria, the crucifer transfer is the most ancient, two separate transfers within the grass family are of intermediate ancestry, and the morning-glory transfer is most recent. All five mitochondrial copies of rbcL examined exhibit insertion and/or deletion events that disrupt the reading frame (three are grossly truncated); and all are elevated in the proportion of nonsynonymous substitutions, providing clear evidence that these sequences are pseudogenes.}, doi = {10.1007/s00294-003-0378-3}, author = {Cummings, Michael P. and Nugent,J. M and Olmstead,R. G and Palmer,J. D} } @article {12949, title = {Predictability of Vibrio Cholerae in Chesapeake Bay}, journal = {Applied and Environmental MicrobiologyAppl. Environ. Microbiol.}, volume = {69}, year = {2003}, month = {2003/05/01/}, pages = {2773 - 2785}, abstract = {Vibrio cholerae is autochthonous to natural waters and can pose a health risk when it is consumed via untreated water or contaminated shellfish. The correlation between the occurrence of V. cholerae in Chesapeake Bay and environmental factors was investigated over a 3-year period. Water and plankton samples were collected monthly from five shore sampling sites in northern Chesapeake Bay (January 1998 to February 2000) and from research cruise stations on a north-south transect (summers of 1999 and 2000). Enrichment was used to detect culturable V. cholerae, and 21.1\% (n = 427) of the samples were positive. As determined by serology tests, the isolates, did not belong to serogroup O1 or O139 associated with cholera epidemics. A direct fluorescent-antibody assay was used to detect V. cholerae O1, and 23.8\% (n = 412) of the samples were positive. V. cholerae was more frequently detected during the warmer months and in northern Chesapeake Bay, where the salinity is lower. Statistical models successfully predicted the presence of V. cholerae as a function of water temperature and salinity. Temperatures above 19{\textdegree}C and salinities between 2 and 14 ppt yielded at least a fourfold increase in the number of detectable V. cholerae. The results suggest that salinity variation in Chesapeake Bay or other parameters associated with Susquehanna River inflow contribute to the variability in the occurrence of V. cholerae and that salinity is a useful indicator. Under scenarios of global climate change, increased climate variability, accompanied by higher stream flow rates and warmer temperatures, could favor conditions that increase the occurrence of V. cholerae in Chesapeake Bay.}, isbn = {0099-2240, 1098-5336}, doi = {10.1128/AEM.69.5.2773-2785.2003}, url = {http://aem.asm.org/content/69/5/2773}, author = {Louis,Val{\'e}rie R. and Russek-Cohen,Estelle and Choopun,Nipa and Rivera,Irma N. G. and Gangle,Brian and Jiang,Sunny C. and Rubin,Andrea and Patz,Jonathan A. and Huq,Anwar and Rita R Colwell} } @inbook {16140, title = {Preserving Context with Zoomable User Interfaces}, booktitle = {The craft of information visualization: readings and reflectionsThe craft of information visualization: readings and reflections}, year = {2003}, month = {2003///}, pages = {83 - 83}, publisher = {Morgan Kaufmann Publishers Inc.}, organization = {Morgan Kaufmann Publishers Inc.}, isbn = {978-1-55860-915-0}, author = {Hombsek,K. and Bederson, Benjamin B. and Plaisant, Catherine} } @conference {13268, title = {A representation for abstract simplicial complexes: an analysis and a comparison}, booktitle = {Discrete Geometry for Computer Imagery}, year = {2003}, month = {2003///}, pages = {454 - 464}, abstract = {Abstract simplicial complexes are used in many application contexts to represent multi-dimensional, possibly non-manifold and non-uniformly dimensional, geometric objects. In this paper we introduce a new general yet compact data structure for representing simplicial complexes, which is based on a decomposition approach that we have presented in our previous work [3]. We compare our data structure with the existing ones and we discuss in which respect it performs better than others.}, doi = {10.1007/978-3-540-39966-7_43}, author = {De Floriani, Leila and Morando,F. and Puppo,E.} } @conference {13322, title = {Representation of non-manifold objects}, booktitle = {Proceedings of the eighth ACM symposium on Solid modeling and applications}, series = {SM {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {304 - 309}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {In our previous work [2], we have shown that a non-manifold, mixed-dimensional object described by simplicial complexes can be decomposed in a unique way into regular components, all belonging to a well-understood class. Based on such decomposition, we define here a two-level topological data structure for representing non-manifold objects in any dimension: the first level represents components; while the second level represents the connectivity relation among them. The resulting data structure is compact and scalable, allowing for the efficient treatment of singularities without burdening well-behaved parts of a complex with excessive space overheads.}, keywords = {Data structures, Non-manifold modeling, simplicial complexes}, isbn = {1-58113-706-0}, doi = {10.1145/781606.781656}, url = {http://doi.acm.org/10.1145/781606.781656}, author = {De Floriani, Leila and Morando,Franco and Puppo,Enrico} } @conference {15963, title = {RGL study in a hybrid real-time system}, booktitle = {Proceedings of the IASTED NCI}, year = {2003}, month = {2003///}, author = {Hennacy,K. and Swamy,N. and Perlis, Don} } @inbook {16132, title = {Seeing the World Through Image Libraries}, booktitle = {The craft of information visualization: readings and reflectionsThe craft of information visualization: readings and reflections}, year = {2003}, month = {2003///}, pages = {47 - 47}, publisher = {Morgan Kaufmann Publishers Inc.}, organization = {Morgan Kaufmann Publishers Inc.}, isbn = {978-1-55860-915-0}, author = {North,C. and Shneiderman, Ben and Plaisant, Catherine and Kang,H. and Bederson, Benjamin B.} } @article {14104, title = {The sequence and analysis of Trypanosoma brucei chromosome II}, journal = {Nucleic acids research}, volume = {31}, year = {2003}, month = {2003///}, pages = {4856 - 4856}, author = {El-Sayed, Najib M. and Ghedin,E. and Song,J. and MacLeod,A. and Bringaud,F. and Larkin,C. and Wanless,D. and Peterson,J. and Hou,L. and Taylor,S. and others} } @article {15953, title = {Seven days in the life of a robotic agent}, journal = {Innovative Concepts for Agent-Based Systems}, year = {2003}, month = {2003///}, pages = {243 - 256}, author = {Chong,W. and O{\textquoteright}Donovan-Anderson,M. and Okamoto,Y. and Perlis, Don} } @article {16410, title = {Statistical validation of spatial patterns in agent-based models}, journal = {Proceedings of Agent Based Simulation}, volume = {4}, year = {2003}, month = {2003///}, abstract = {We present and evaluate an agent-based model(ABM) of land use change at the rural-urban fringe. This paper is part of a project that links the ABM to surveys of residential preferences and historical patterns of development. Validation is an important issue for such models and we discuss the use of distributional phenomena as a method of validation. We then highlight the ability of our ABM to gen- erate two phenomena evident in empirical analysis of urban development patterns: a power law rela- tionship between frequency and cluster size and a negative exponential relationship between density and distance from city center. We discuss these results in the light of validation of ABMs. }, author = {Rand, William and Brown,D.G. and Page,S.E. and Riolo,R and Fernandez,L.E. and Zellner,M} } @conference {16139, title = {Technology probes: inspiring design for and with families}, booktitle = {Proceedings of the SIGCHI conference on Human factors in computing systems}, series = {CHI {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {17 - 24}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We describe a new method for use in the process of co-designing technologies with users called technology probes. Technology probes are simple, flexible, adaptable technologies with three interdisciplinary goals: the social science goal of understanding the needs and desires of users in a real-world setting, the engineering goal of field-testing the technology, and the design goal of inspiring users and researchers to think about new technologies. We present the results of designing and deploying two technology probes, the messageProbe and the videoProbe, with diverse families in France, Sweden, and the U.S. We conclude with our plans for creating new technologies for and with families based on our experiences.}, keywords = {computer mediated communication, ethnography, Home, participatory design and cooperative design}, isbn = {1-58113-630-7}, doi = {10.1145/642611.642616}, url = {http://doi.acm.org/10.1145/642611.642616}, author = {Hutchinson,Hilary and Mackay,Wendy and Westerlund,Bo and Bederson, Benjamin B. and Druin, Allison and Plaisant, Catherine and Beaudouin-Lafon,Michel and Conversy,St{\'e}phane and Evans,Helen and Hansen,Heiko and Roussel,Nicolas and Eiderb{\"a}ck,Bj{\"o}rn} } @conference {16133, title = {Toward a statistical knowledge network}, booktitle = {Proceedings of the 2003 annual national conference on Digital government research}, series = {dg.o {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {1 - 6}, publisher = {Digital Government Society of North America}, organization = {Digital Government Society of North America}, abstract = {This paper describes an architecture for a statistical knowledge network that depends on user interfaces as the glue among the components. These interfaces aim to serve non-expert users with diverse needs and statistical and computing experiences. Such interfaces are crucially dependent on different indexing schemes and good metadata.}, url = {http://dl.acm.org/citation.cfm?id=1123196.1123312}, author = {Marchionini,Gary and Haas,Stephanie and Plaisant, Catherine and Shneiderman, Ben and Hert,Carol} } @conference {15930, title = {Towards domain-independent, task-oriented, conversational adequacy}, booktitle = {INTERNATIONAL JOINT CONFERENCE ON ARTIFICIAL INTELLIGENCE}, volume = {18}, year = {2003}, month = {2003///}, pages = {1637 - 1638}, author = {Josyula,D. P and Anderson,M. L and Perlis, Don} } @conference {14059, title = {Tracking a moving speaker using excitation source information}, booktitle = {Eighth European Conference on Speech Communication and Technology}, year = {2003}, month = {2003///}, abstract = {Microphone arrays are widely used to detect, locate, and track a stationary or moving speaker. The first step is to estimate the time delay, between the speech signals received by a pair of microphones. Conventional methods like generalized cross-correlation are based on the spectral content of the vocal tract system in the speech signal. The spectral content of the speech signal is affected due to degradations in the speech signal caused by noise and reverberation. However, features corresponding to the excitation source of speech are less affected by such degradations. This paper proposes a novel method to estimate the time delays using the excitation source information in speech. The estimated delays are used to get the position of the moving speaker. The proposed method is compared with the spectrum-based approach using real data from a microphone array setup.}, author = {Raykar,V.C. and Duraiswami, Ramani and Yegnanarayana,B. and Prasanna,S. R.M} } @conference {12299, title = {On the use of flow migration for handling short-term overloads}, booktitle = {Global Telecommunications Conference, 2003. GLOBECOM {\textquoteright}03. IEEE}, volume = {6}, year = {2003}, month = {2003/12//}, pages = {3108 - 3112 vol.6 - 3108 - 3112 vol.6}, abstract = {In this work, we investigate flow migration as a mechanism to sustain QoS to network users during short-term overloads in the context of an MPLS IP network. We experiment with three different control techniques: static long-term optimal mapping of flows to LSPs; on-line locally optimal mapping of flows to LSPs at flow set-up time; and dynamic flow migration in response to transient congestion. These techniques are applicable over different timescales, have different run-time overheads, and require different levels of monitoring and control software inside the network. We present results both from detailed simulations and a complete implementation using software IP routers. We use voice-over-IP as our test application, and show that if end-to-end quality is to be maintained during short unpredictable bursts of high load, then a fast-timescale control such as migration is required.}, keywords = {computing;, CONGESTION, congestion;, CONTROL, control;, dynamic, end-to-end, engineering, fast-timescale, flow, Internet, IP, label, long-term, mapping;, migration;, MPLS, multiprotocol, network, network;, networks;, of, optimal, overloads;, protocol;, QoS;, QUALITY, quality;, routers;, routing;, service;, set-up, short-term, software, software;, static, switching;, Telecommunication, telephony;, time;, transient, voice-over-IP;}, doi = {10.1109/GLOCOM.2003.1258807}, author = {Kuo,Kuo-Tung and Phuvoravan,S. and Bhattacharjee, Bobby and Jun La,R. and Shayman,M. and Chang,Hyeong Soo} } @conference {16123, title = {Using Visualization Tools to Gain Insight Into Your Data}, booktitle = {SPE Annual Technical Conference and Exhibition}, year = {2003}, month = {2003/10//}, publisher = {Society of Petroleum Engineers}, organization = {Society of Petroleum Engineers}, abstract = {When the term {\textquoteleft}visualization{\textquoteright} is used in the oil and gas industry, it is usually referring to the viewing of complex geologic structures in three-dimensional space. This paper illustrates insights gained by applying interactive visual environments to petroleum industry data that has traditionally been presented in spreadsheets, line graphs, and bar charts. Two information visualization tools, Treemap and SpaceTree, will be shown applied to a variety of oilfield related data.}, isbn = {9781555631529}, doi = {10.2118/84439-MS}, url = {http://www.onepetro.org/mslib/servlet/onepetropreview?id=00084439}, author = {Plaisant, Catherine and Chintalapani,G. and Lukehart,C. and Schiro,D. and Ryan,J.} } @inbook {16135, title = {Visual Information Management for Network Configuration}, booktitle = {The Craft of Information VisualizationThe Craft of Information Visualization}, year = {2003}, month = {2003///}, pages = {239 - 256}, publisher = {Morgan Kaufmann}, organization = {Morgan Kaufmann}, address = {San Francisco}, abstract = {Current network management systems rely heavily on forms in their user interfaces. The interfaces reflect the intricacies of the network hardware components but provide little support for guiding users through tasks. There is a scarcity of useful graphical visualizations and decision-support tools.}, isbn = {978-1-55860-915-0}, url = {http://www.sciencedirect.com/science/article/pii/B9781558609150500329}, author = {Kumar,Harsha and Plaisant, Catherine and Teittinen,Marko and Shneiderman, Ben}, editor = {Bederson, Benjamin B. and Shneiderman, Ben} } @conference {19625, title = {A Visualization of the Primal Simplex Algorithm for the Assignment Problem}, booktitle = {ITiCSE {\textquoteright}03 Proceedings of the 8th Annual Conference on Innovation and Technology in Computer Science Education }, series = {ITiCSE {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {267 - 267}, publisher = {ACM}, organization = {ACM}, abstract = {An educationally-oriented Visualization Software used to assist the teaching of the Primal Simplex Algorithm for the Assignment Problem is presented.}, keywords = {Assignment problem, Visualization}, isbn = {1-58113-672-2}, url = {http://doi.acm.org/10.1145/961511.961631}, author = {Charalampos Papamanthou and Paparrizos, Konstantinos} } @inbook {16130, title = {Visualizing websites using a hierarchical table of contents browser: WebTOC}, booktitle = {The craft of information visualization: readings and reflectionsThe craft of information visualization: readings and reflections}, year = {2003}, month = {2003///}, pages = {438 - 438}, publisher = {Morgan Kaufmann Publishers Inc.}, organization = {Morgan Kaufmann Publishers Inc.}, isbn = {9781558609150}, author = {Plaisant, Catherine and Marchionini,G. and Komlodi,A.} } @conference {16141, title = {Which comes first, usability or utility?}, booktitle = {Proceedings of the 14th IEEE Visualization 2003 (VIS{\textquoteright}03)}, series = {VIS {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {112{\textendash} - 112{\textendash}}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Washington, DC, USA}, isbn = {0-7695-2030-8}, doi = {10.1109/VISUAL.2003.1250426}, url = {http://dx.doi.org/10.1109/VISUAL.2003.1250426}, author = {Grinstein,Georges and Kobsa,Alfred and Plaisant, Catherine and Shneiderman, Ben and Stasko,John T} } @inbook {16131, title = {The World{\textquoteright}s Information in Digital Libraries}, booktitle = {The craft of information visualization: readings and reflectionsThe craft of information visualization: readings and reflections}, year = {2003}, month = {2003///}, pages = {149 - 149}, publisher = {Morgan Kaufmann Publishers Inc.}, organization = {Morgan Kaufmann Publishers Inc.}, isbn = {978-1-55860-915-0}, author = {Plaisant, Catherine and Marchimini,G. and Bruns,T. and Komlodi,A. and Campbell,L. and Rose,A. and Ding,G.M. and Beale Jr,J. and Nolet,V. and Shneiderman, Ben and others} } @article {13414, title = {AAAI 2002 Workshops}, journal = {AI Magazine}, volume = {23}, year = {2002}, month = {2002/12/15/}, pages = {113 - 113}, isbn = {0738-4602}, doi = {10.1609/aimag.v23i4.1678}, url = {http://www.aaai.org/ojs/index.php/aimagazine/article/viewArticle/1678}, author = {Blake,Brian and Haigh,Karen and Hexmoor,Henry and Falcone,Rino and Soh,Leen-Kiat and Baral,Chitta and McIlraith,Sheila and Gmytrasiewicz,Piotr and Parsons,Simon and Malaka,Rainer and Krueger,Antonio and Bouquet,Paolo and Smart,Bill and Kurumantani,Koichi and Pease,Adam and Brenner,Michael and desJardins, Marie and Junker,Ulrich and Delgrande,Jim and Doyle,Jon and Rossi,Francesca and Schaub,Torsten and Gomes,Carla and Walsh,Toby and Guo,Haipeng and Horvitz,Eric J and Ide,Nancy and Welty,Chris and Anger,Frank D and Guegen,Hans W and Ligozat,Gerald} } @article {15541, title = {Algorithmic issues in modeling motion}, journal = {ACM Comput. Surv.}, volume = {34}, year = {2002}, month = {2002/12//}, pages = {550 - 572}, abstract = {This article is a survey of research areas in which motion plays a pivotal role. The aim of the article is to review current approaches to modeling motion together with related data structures and algorithms, and to summarize the challenges that lie ahead in producing a more unified theory of motion representation that would be useful across several disciplines.}, keywords = {computational geometry, Computer vision, mobile networks, modeling, molecular biology, motion modeling, physical simulation, robotoics, spatio-temporal databases}, isbn = {0360-0300}, doi = {10.1145/592642.592647}, url = {http://doi.acm.org/10.1145/592642.592647}, author = {Agarwal,Pankaj K. and Guibas,Leonidas J. and Edelsbrunner,Herbert and Erickson,Jeff and Isard,Michael and Har-Peled,Sariel and Hershberger,John and Jensen,Christian and Kavraki,Lydia and Koehl,Patrice and Lin,Ming and Manocha,Dinesh and Metaxas,Dimitris and Mirtich,Brian and Mount, Dave and Muthukrishnan,S. and Pai,Dinesh and Sacks,Elisha and Snoeyink,Jack and Suri,Subhash and Wolefson,Ouri} } @conference {16002, title = {ALLI: An Information Integration System Based on Active Logic Framework}, booktitle = {Proceedings of the Third International Conference on Management Information Systems, Greece}, year = {2002}, month = {2002///}, pages = {339 - 348}, author = {Barfourosh,A. A. and Nezhad,H. R.M and Anderson,M. and Perlis, Don} } @conference {13729, title = {Automatic transcription of Czech language oral history in the MALACH project: resources and initial experiments}, booktitle = {Proceedings of the The 5th International Conference on Text, Speech and Dialogue}, year = {2002}, month = {2002///}, pages = {57 - 64}, author = {Oard, Douglas and Demner-Fushman,D. and Hajic,J. and Ramabhadran,B. and Gustman,S. and Byrne,WJ and Soergel,D. and Dorr, Bonnie J and Resnik, Philip and Picheny,M.} } @article {16151, title = {Case study: a message board as a technology probe for family communication and coordination}, journal = {Proceedings of the 2002 Conference on Human Factors in Computing Systems}, year = {2002}, month = {2002///}, abstract = {In this paper, we describe the design of an electronic family message board and the initial results of its deployment in three households. The message board was used as a {\textquotedblleft}technology probe{\textquotedblright} to help understand the communication and coordination needs of a distributed, multigenerational family. Using this information, we are working with the family as design partners to design new technologies.}, author = {Hutchinson,H. and Plaisant, Catherine and Druin, Allison} } @inbook {14598, title = {Combinatorial Algorithms for Design of DNA Arrays}, booktitle = {Chip TechnologyChip Technology}, series = {Advances in Biochemical Engineering/Biotechnology}, volume = {77}, year = {2002}, month = {2002///}, pages = {1 - 19}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Optimal design of DNA arrays requires the development of algorithms with two-fold goals: reducing the effects caused by unintended illumination ( border length minimization problem ) and reducing the complexity of masks ( mask decomposition problem ). We describe algorithms that reduce the number of rectangles in mask decomposition by 20{\textendash}30\% as compared to a standard array design under the assumption that the arrangement of oligonucleotides on the array is fixed. This algorithm produces provably optimal solution for all studied real instances of array design. We also address the difficult problem of finding an arrangement which minimizes the border length and come up with a new idea of threading that significantly reduces the border length as compared to standard designs.}, isbn = {978-3-540-43215-9}, url = {http://dx.doi.org/10.1007/3-540-45713-5_1}, author = {Hannenhalli, Sridhar and Hubbell,Earl and Lipshutz,Robert and Pevzner,Pavel}, editor = {Hoheisel,J{\"o}rg and Brazma,A. and B{\"u}ssow,K. and Cantor,C. and Christians,F. and Chui,G. and Diaz,R. and Drmanac,R. and Drmanac,S. and Eickhoff,H. and Fellenberg,K. and Hannenhalli, Sridhar and Hoheisel,J. and Hou,A. and Hubbell,E. and Jin,H. and Jin,P. and Jurinke,C. and Konthur,Z. and K{\"o}ster,H. and Kwon,S. and Lacy,S. and Lehrach,H. and Lipshutz,R. and Little,D. and Lueking,A. and McGall,G. and Moeur,B. and Nordhoff,E. and Nyarsik,L. and Pevzner,P. and Robinson,A. and Sarkans,U. and Shafto,J. and Sohail,M. and Southern,E. and Swanson,D. and Ukrainczyk,T. and van den Boom,D. and Vilo,J. and Vingron,M. and Walter,G. and Xu,C.} } @article {16250, title = {Comparative Genome Sequencing for Discovery of Novel Polymorphisms in Bacillus Anthracis}, journal = {Science}, volume = {296}, year = {2002}, month = {2002/06/14/}, pages = {2028 - 2033}, abstract = {Comparison of the whole-genome sequence ofBacillus anthracis isolated from a victim of a recent bioterrorist anthrax attack with a reference reveals 60 new markers that include single nucleotide polymorphisms (SNPs), inserted or deleted sequences, and tandem repeats. Genome comparison detected four high-quality SNPs between the two sequenced B. anthracischromosomes and seven differences among different preparations of the reference genome. These markers have been tested on a collection of anthrax isolates and were found to divide these samples into distinct families. These results demonstrate that genome-based analysis of microbial pathogens will provide a powerful new tool for investigation of infectious disease outbreaks.}, isbn = {0036-8075, 1095-9203}, doi = {10.1126/science.1071837}, url = {http://www.sciencemag.org/content/296/5575/2028}, author = {Read,Timothy D. and Salzberg,Steven L. and Pop, Mihai and Shumway,Martin and Umayam,Lowell and Jiang,Lingxia and Holtzapple,Erik and Busch,Joseph D and Smith,Kimothy L and Schupp,James M and Solomon,Daniel and Keim,Paul and Fraser,Claire M.} } @article {19075, title = {Complete genome sequence and comparative analysis of the metabolically versatile Pseudomonas putida KT2440}, journal = {Environmental microbiology}, volume = {4}, year = {2002}, month = {2002}, pages = {799 - 808}, abstract = {Pseudomonas putida is a metabolically versatile saprophytic soil bacterium that has been certified as a biosafety host for the cloning of foreign genes. The bacterium also has considerable potential for biotechnological applications. Sequence analysis of the 6.18 Mb genome of strain KT2440 reveals diverse transport and metabolic systems. Although there is a high level of genome conservation with the pathogenic Pseudomonad Pseudomonas aeruginosa (85\% of the predicted coding regions are shared), key virulence factors including exotoxin A and type III secretion systems are absent. Analysis of the genome gives insight into the non-pathogenic nature of P. putida and points to potential new applications in agriculture, biocatalysis, bioremediation and bioplastic production.}, author = {Nelson,K. E and Weinel,C. and Paulsen,IT and Dodson,RJ and Hilbert,H. and Martins dos Santos,VAP and Fouts,DE and Gill,S. R and Pop, Mihai and Holmes,M. and others} } @article {16411, title = {The complex interaction of agents and environments: An example in urban sprawl}, journal = {Proceedings of Agent Based Simulation}, year = {2002}, month = {2002///}, pages = {149 - 161}, abstract = {We present and evaluate a foundational agent-based model of land use change at therural-urban fringe within the context of a larger project that will link to surveys of the environmental and community preferences of residents with historical data on patterns of development. In this paper, we focus on the dynamics arising from a model of residential location resulting from preferences for services, density, and aesthetics focusing on the relationship between micro level preferences and policy relevant macro phenomena such as scattered development, largest city size, and the number of residential clusters. We consider two representations of agents{\textquoteright} utility functions {\textendash} one additively separable and one multiplicative {\textendash} to see if functional form has any impact on the dynamics of the system, and find that they produce similar results. Our analysis includes both representative agent runs, in which all agents have identical preferences, as well as runs in which the agents have diverse preferences. We find that diversity can increase sprawl through feedbacks associated with the spatial locations of services and agents. In addition, we examine cases in which the agents{\textquoteright} location decisions affect the aesthetic quality of neighboring sites and find that these feedbacks further exacerbate the sprawl effect. }, author = {Rand, William and Zellner,M and Page,S.E. and Riolo,R and Brown,D.G. and Fernandez,L.E.} } @inbook {18914, title = {On the Complexity of Plan Adaptation by Derivational Analogy in a Universal Classical Planning Framework}, booktitle = {Advances in Case-Based Reasoning}, series = {Lecture Notes in Computer Science}, volume = {2416}, year = {2002}, month = {2002///}, pages = {199 - 206}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {In this paper we present an algorithm called DerUCP, which can be regarded as a general model for plan adaptation using Derivational Analogy. Using DerUCP, we show that previous results on the complexity of plan adaptation do not apply to Derivational Analogy. We also show that Derivational Analogy can potentially produce exponential reductions in the size of the search space generated by a planning system.}, keywords = {Computer science}, isbn = {978-3-540-44109-0}, url = {http://www.springerlink.com/content/db2mr13j90mfrcaw/abstract/}, author = {Au,Tsz-Chiu and Mu{\~n}oz-Avila,H{\'e}ctor and Nau, Dana S.}, editor = {Craw,Susan and Preece,Alun} } @article {12125, title = {COTS-based software development: Processes and open issues}, journal = {Journal of Systems and Software}, volume = {61}, year = {2002}, month = {2002/04/01/}, pages = {189 - 199}, abstract = {The work described in this paper is an investigation of the COTS-based software development within a particular NASA environment, with an emphasis on the processes used. Fifteen projects using a COTS-based approach were studied and their actual process was documented. This process is evaluated to identify essential differences in comparison to traditional software development. The main differences, and the activities for which projects require more guidance, are requirements definition and COTS selection, high level design, integration and testing. Starting from these empirical observations, a new process and set of guidelines for COTS-based development are developed and briefly presented.}, keywords = {Commercial off-the-shelf, Component-based, COTS, empirical study, Software development process}, isbn = {0164-1212}, doi = {10.1016/S0164-1212(01)00147-9}, url = {http://www.sciencedirect.com/science/article/pii/S0164121201001479}, author = {Morisio,M and Seaman,C.B and Basili, Victor R. and Parra,A.T and Kraft,S.E and Condon,S.E} } @conference {17583, title = {Dependent rounding in bipartite graphs}, booktitle = {The 43rd Annual IEEE Symposium on Foundations of Computer Science, 2002. Proceedings}, year = {2002}, month = {2002///}, pages = {323 - 332}, publisher = {IEEE}, organization = {IEEE}, abstract = {We combine the pipage rounding technique of Ageev \& Sviridenko with a recent rounding method developed by Srinivasan (2001), to develop a new randomized rounding approach for fractional vectors defined on the edge-sets of bipartite graphs. We show various ways of combining this technique with other ideas, leading to the following applications: richer random-graph models for graphs with a given degree-sequence; improved approximation algorithms for: (i) throughput-maximization in broadcast scheduling, (ii) delay-minimization in broadcast scheduling, and (iii) capacitated vertex cover; fair scheduling of jobs on unrelated parallel machines. A useful feature of our method is that it lets us prove certain (probabilistic) per-user fairness properties.}, keywords = {Application software, Approximation algorithms, bipartite graph, bipartite graphs, broadcast channels, broadcast scheduling, Broadcasting, capacitated vertex cover, Character generation, computational complexity, Computer science, Delay, edge-sets, Educational institutions, fair scheduling, fractional vectors, graph theory, per-user fairness properties, pipage rounding technique, Processor scheduling, Random variables, random-graph models, randomized rounding approach, rounding method, scheduling, Scheduling algorithm, telecommunication computing, unrelated parallel machines}, isbn = {0-7695-1822-2}, doi = {10.1109/SFCS.2002.1181955}, author = {Gandhi,R. and Khuller, Samir and Parthasarathy,S. and Srinivasan, Aravind} } @article {12958, title = {Detection of Cytotoxin-Hemolysin mRNA in Nonculturable Populations of Environmental and Clinical Vibrio Vulnificus Strains in Artificial Seawater}, journal = {Applied and Environmental MicrobiologyAppl. Environ. Microbiol.}, volume = {68}, year = {2002}, month = {2002/11/01/}, pages = {5641 - 5646}, abstract = {The objective of this study was to develop a molecular detection method that better estimates the potential risk associated with the presence of Vibrio vulnificus. For that purpose, we applied seminested reverse transcription-PCR (RT-PCR) to viable but nonculturable (VBNC) populations of V. vulnificus and targeted the cytotoxin-hemolysin virulence gene vvhA. Three strains, two environmental, IF Vv10 and IF Vv18, and one clinical, C7184, were used in this study. Artificial seawater, inoculated with mid-log-phase cells, was maintained at 4{\textdegree}C. VBNC cells resulted after 3, 6, and 14 days for C7184, IF Vv18, and IF Vv10, respectively. Our data indicate that seminested RT-PCR is sensitive for the detection of vvhA mRNA in artificial seawater when exclusively nonculturable bacteria are present. This is the first report of the expression of a toxin gene in VBNC V. vulnificus. Moreover, vvhA transcripts were shown to persist in nonculturable populations over a 4.5-month period, with a progressive decline of the signal over time. This result indicates that special attention should be given to the presence of potentially pathogenic VBNC cells in environmental samples when assessing public health risk.}, isbn = {0099-2240, 1098-5336}, doi = {10.1128/AEM.68.11.5641-5646.2002}, url = {http://aem.asm.org/content/68/11/5641}, author = {Fischer-Le Saux,Marion and Hervio-Heath,Dominique and Loaec,Solen and Rita R Colwell and Pommepuy,Monique} } @inbook {13760, title = {DUSTer: A Method for Unraveling Cross-Language Divergences for Statistical Word-Level Alignment}, booktitle = {Machine Translation: From Research to Real UsersMachine Translation: From Research to Real Users}, series = {Lecture Notes in Computer Science}, volume = {2499}, year = {2002}, month = {2002///}, pages = {31 - 43}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {The frequent occurrence of divergenceS {\textemdash}structural differences between languages{\textemdash}presents a great challenge for statistical word-level alignment. In this paper, we introduce DUSTer, a method for systematically identifying common divergence types and transforming an English sentence structure to bear a closer resemblance to that of another language. Our ultimate goal is to enable more accurate alignment and projection of dependency trees in another language without requiring any training on dependency-tree data in that language. We present an empirical analysis comparing the complexities of performing word-level alignments with and without divergence handling. Our results suggest that our approach facilitates word-level alignment, particularly for sentence pairs containing divergences.}, isbn = {978-3-540-44282-0}, url = {http://dx.doi.org/10.1007/3-540-45820-4_4}, author = {Dorr, Bonnie J and Pearl,Lisa and Hwa,Rebecca and Habash,Nizar}, editor = {Richardson,Stephen} } @article {15613, title = {An efficient k-means clustering algorithm: analysis and implementation}, journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on}, volume = {24}, year = {2002}, month = {2002/07//}, pages = {881 - 892}, abstract = {In k-means clustering, we are given a set of n data points in d-dimensional space Rd and an integer k and the problem is to determine a set of k points in Rd, called centers, so as to minimize the mean squared distance from each data point to its nearest center. A popular heuristic for k-means clustering is Lloyd{\textquoteright}s (1982) algorithm. We present a simple and efficient implementation of Lloyd{\textquoteright}s k-means clustering algorithm, which we call the filtering algorithm. This algorithm is easy to implement, requiring a kd-tree as the only major data structure. We establish the practical efficiency of the filtering algorithm in two ways. First, we present a data-sensitive analysis of the algorithm{\textquoteright}s running time, which shows that the algorithm runs faster as the separation between clusters increases. Second, we present a number of empirical studies both on synthetically generated data and on real data sets from applications in color quantization, data compression, and image segmentation}, keywords = {algorithm;color, algorithm;image, algorithm;kd-tree;mean, analysis;filtering, clustering, clustering;, compression;data, distance;covariance, Lloyd, matrices;filtering, quantization;data, segmentation;k-means, squared, structure;data-sensitive, theory;pattern}, isbn = {0162-8828}, doi = {10.1109/TPAMI.2002.1017616}, author = {Kanungo,T. and Mount, Dave and Netanyahu,N. S and Piatko,C. D and Silverman,R. and Wu,A. Y} } @inbook {14692, title = {Evolution in Action: Using Active Networking to Evolve Network Support for Mobility}, booktitle = {Active NetworksActive Networks}, series = {Lecture Notes in Computer Science}, volume = {2546}, year = {2002}, month = {2002///}, pages = {146 - 161}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {A key early objective of Active Networking (AN) was to support on-the-fly network evolution. Although AN has been used relatively extensively to build application-customized protocols and even whole networking systems, demonstrations of evolution have been limited. This paper examines three AN mechanisms and how they enable evolution: active packets and plug-in extensions, well-known to the AN community, and update extensions, which are novel to AN. We devote our presentation to a series of demonstrations of how each type of evolution can be applied to the problem of adding support for mobility to a network. This represents the most large-scale demonstration of AN evolution to date. These demonstrations show what previous AN research has not: that AN technology can, in fact, support very significant changes to the network, even while the network is operational.}, isbn = {978-3-540-00223-9}, url = {http://dx.doi.org/10.1007/3-540-36199-5_12}, author = {Seong-Kyu,Song and Shannon,Stephen and Hicks, Michael W. and Nettles,Scott}, editor = {Sterbenz,James and Takada,Osamu and Tschudin,Christian and Plattner,Bernhard} } @conference {15268, title = {Experimental Construction of Very Large Scale DNA Databases with Associative Search}, booktitle = {DNA computing: 7th International Workshop on DNA-Based Computers, DNA 7, Tampa, FL, USA, June 10-13, 2001: revised papers}, volume = {7}, year = {2002}, month = {2002///}, pages = {231 - 231}, author = {Reif,J. H and LaBean,T. H and Pirrung,M. and Rana,V. S and Guo,B. and Kingsford, Carl and Wickham,G. S} } @conference {12763, title = {An experimental evaluation of linear and kernel-based methods for face recognition}, booktitle = {Applications of Computer Vision, 2002. (WACV 2002). Proceedings. Sixth IEEE Workshop on}, year = {2002}, month = {2002///}, pages = {13 - 18}, abstract = {In this paper we present the results of a comparative study of linear and kernel-based methods for face recognition. The methods used for dimensionality reduction are Principal Component Analysis (PCA), Kernel Principal Component Analysis (KPCA), Linear Discriminant Analysis (LDA) and Kernel Discriminant Analysis (KDA). The methods used for classification are Nearest Neighbor (NN) and Support Vector Machine (SVM). In addition, these classification methods are applied on raw images to gauge the performance of these dimensionality reduction techniques. All experiments have been performed on images from UMIST Face Database.}, keywords = {analysis;, classification;, component, discriminant, Face, image, Kernel, linear, Machine;, nearest, neighbor;, principal, recognition;, Support, vector}, doi = {10.1109/ACV.2002.1182137}, author = {Gupta, H. and Agrawala, Ashok K. and Pruthi, T. and Shekhar, C. and Chellapa, Rama} } @article {18762, title = {Extracting articulation models from CAD models of parts with curved surfaces}, journal = {TRANSACTIONS-AMERICAN SOCIETY OF MECHANICAL ENGINEERS JOURNAL OF MECHANICAL DESIGN}, volume = {124}, year = {2002}, month = {2002///}, pages = {106 - 114}, abstract = {Degrees of freedom in an assembly are realized by creating mating features that permit relativemotion between parts. In complex assemblies, interactions between individual degrees of freedom may result in a behavior different from the intended behavior. In addition, current methods perform assembly reasoning by approximating curved surfaces as piecewise linear surfaces. Therefore, it is important to be able to: reason about assemblies using exact representations of curved surfaces; verify global motion behavior of parts in the assembly; and create motion simulations of the assembly by examination of the geometry. In this paper, we present a linear algebraic constraint method to automatically construct the space of allowed instantaneous motions of an assembly from the geometry of its constituent parts. Our work builds on previous work on linear contact mechanics and on our previous work on curved surface contact mechanics. We enumerate the conditions under which general curved surfaces can be represented using a finite number of constraints linear in the instantaneous velocities. We compose such constraints to build a space of allowed instantaneous velocities for the assembly. The space is then described as a set-theoretic sum of contact-preserving and contact-breaking motion sub-spaces. Analysis of each subspace provides feedback to the designer, which we demonstrate through the use of an example assembly {\textendash} a 4-part arm. Finally, the results of the analysis of a 4-bar linkage are compared to those from mechanism theory. }, url = {http://www-2.cs.cmu.edu/afs/cs.cmu.edu/user/cjp/www/pubs/MechDes02.pdf}, author = {Sinha,R. and Gupta,S.K. and Paredis,C. J. J. and Khosla,P. K.} } @inbook {12965, title = {Fulfilling the promise of marine biotechnology}, booktitle = {Marine biotechnology in the twenty-first century: problems, promise, and productsMarine biotechnology in the twenty-first century: problems, promise, and products}, year = {2002}, month = {2002///}, pages = {39 - 39}, publisher = {National Academies Press}, organization = {National Academies Press}, isbn = {9780309083423}, author = {Rita R Colwell}, editor = {National Research Council (U S. ) Committee on Marine Biotechnology: Biomedical Applications of Marine Natural Products} } @article {18837, title = {Generating 3D Models of MEMS Devices by Process Emulation}, volume = {ISR; TR 2002-57}, year = {2002}, month = {2002///}, institution = {Institute for Systems Research, University of Maryland, College Park}, abstract = {MEMS designers often use numerical simulation for detecting errors in the mask layout. Numerical simulation involves generating 3D models of MEMS device from the mask layout and process description. The generated models can be meshed and simulated over different domains. This report describes an efficient algorithm that can generate 3D geometric models of MEMS devices. Specifically, the algorithm emulates the manufacturing of a single functional polysilicon layer MEMS devices using the MUMPSprocess.}, keywords = {Next-Generation Product Realization Systems}, url = {http://drum.lib.umd.edu//handle/1903/6285}, author = {Bellam,S. and Gupta, Satyandra K. and Priyadarshi,A. K.} } @article {16270, title = {Genome sequence and comparative analysis of the model rodent malaria parasite Plasmodium yoelii yoelii}, journal = {Nature}, volume = {419}, year = {2002}, month = {2002/10/03/}, pages = {512 - 519}, abstract = {Species of malaria parasite that infect rodents have long been used as models for malaria disease research. Here we report the whole-genome shotgun sequence of one species, Plasmodium yoelii yoelii, and comparative studies with the genome of the human malaria parasite Plasmodium falciparum clone 3D7. A synteny map of 2,212 P. y. yoelii contiguous DNA sequences (contigs) aligned to 14 P. falciparum chromosomes reveals marked conservation of gene synteny within the body of each chromosome. Of about 5,300 P. falciparum genes, more than 3,300 P. y. yoelii orthologues of predominantly metabolic function were identified. Over 800 copies of a variant antigen gene located in subtelomeric regions were found. This is the first genome sequence of a model eukaryotic parasite, and it provides insight into the use of such systems in the modelling of Plasmodium biology and disease.}, isbn = {0028-0836}, doi = {10.1038/nature01099}, url = {http://www.nature.com/nature/journal/v419/n6906/full/nature01099.html}, author = {Carlton,Jane M. and Angiuoli,Samuel V and Suh,Bernard B. and Kooij,Taco W. and Pertea,Mihaela and Silva,Joana C. and Ermolaeva,Maria D. and Allen,Jonathan E and Jeremy D Selengut and Koo,Hean L. and Peterson,Jeremy D. and Pop, Mihai and Kosack,Daniel S. and Shumway,Martin F. and Bidwell,Shelby L. and Shallom,Shamira J. and Aken,Susan E. van and Riedmuller,Steven B. and Feldblyum,Tamara V. and Cho,Jennifer K. and Quackenbush,John and Sedegah,Martha and Shoaibi,Azadeh and Cummings,Leda M. and Florens,Laurence and Yates,John R. and Raine,J. Dale and Sinden,Robert E. and Harris,Michael A. and Cunningham,Deirdre A. and Preiser,Peter R. and Bergman,Lawrence W. and Vaidya,Akhil B. and Lin,Leo H. van and Janse,Chris J. and Waters,Andrew P. and Smith,Hamilton O. and White,Owen R. and Salzberg,Steven L. and Venter,J. Craig and Fraser,Claire M. and Hoffman,Stephen L. and Gardner,Malcolm J. and Carucci,Daniel J.} } @article {16237, title = {Genome Sequence Assembly: Algorithms and Issues}, volume = {35}, year = {2002}, month = {2002///}, pages = {47 - 54}, abstract = {Ultimately, genome sequencing seeks to provide an organism{\textquoteright}s complete DNA sequence. Automation of DNA sequencing allowed scientists to decode entire genomes and gave birth to genomics, the analytic and comparative study of genomes. Although genomes can include billions of nucleotides, the chemical reactions researchers use to decode the DNA are accurate for only about 600 to 700 nucleotides at a time.The DNA reads that sequencing produces must then be assembled into a complete picture of the genome. Errors and certain DNA characteristics complicate assembly. Resolving these problemsentails an additional and costly finishing phase that involves extensive human intervention. Assembly programs can dramatically reduce this cost by taking into account additional informationobtained during finishing. Algorithms that can assemble millions of DNA fragments into gene sequences underlie the current revolution in biotechnology, helping researchers build the growingdatabase of complete genomes.}, isbn = {0018-9162}, author = {Pop, Mihai and Salzberg,Steven L. and Shumway,Martin} } @conference {16331, title = {A history-based test prioritization technique for regression testing in resource constrained environments}, booktitle = {Proceedings of the 24th international conference on Software engineering - ICSE {\textquoteright}02}, year = {2002}, month = {2002///}, pages = {119 - 119}, address = {Orlando, Florida}, doi = {10.1145/581339.581357}, url = {http://dl.acm.org/citation.cfm?id=581357}, author = {Kim,Jung-Min and Porter, Adam} } @article {13790, title = {Improved Word-Level Alignment: Injecting Knowledge about MT Divergences}, year = {2002}, month = {2002/02/14/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {Word-level alignments of bilingual text (bitexts) are not an integral part of statistical machine translation models, but also useful for lexical acquisition, treebank construction. and part-of-speech tagging. The frequent occurrence of divergences, structural differences between languages, presents a great challenge to the alignment task. We resolve some of the most prevalent divergence cases by using syntactic parse information to transform the sentence structure of one language to bear a closer resemblance to that of the other language. In this paper, we show that common divergence types can be found in multiple language pairs (in particular, we focus on English-Spanish and English-Arabic) and systematically identified. We describe our techniques for modifying English parse trees to form resulting sentences that share more similarity with the sentences in the other languages; finally, we present an empirical analysis comparing the complexities of performing word-level alignments with an without divergence handling. Our results suggest that divergence-handling can improve word-level alignment.}, keywords = {*LEXICOGRAPHY, *MACHINE TRANSLATION, *STATISTICAL ANALYSIS, *WORDS(LANGUAGE), ACQUISITION, ALIGNMENT, EXPERIMENTAL DATA, LANGUAGE, linguistics, MATHEMATICAL MODELS, STATISTICS AND PROBABILITY, TREES}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA458774}, author = {Dorr, Bonnie J and Pearl,Lisa and Hwa,Rebecca and Habash,Nizar} } @article {12963, title = {In vitro adhesion to human cells by viable but nonculturable Enterococcus faecalis}, journal = {Current microbiology}, volume = {45}, year = {2002}, month = {2002///}, pages = {105 - 110}, abstract = {The ability of viable but nonculturable (VBNC) Enterococcus faecalis to adhere to Caco-2 and Girardi heart cultured cells and to urinary tract epithelial cells (ECs) was studied. Enterococci were harvested during the vegetative growth phase (early exponential and stationary), in the VBNC state, and after recovery of the ability to divide. VBNC bacteria maintained their adherence capability but the efficiency of attachment was reduced by about 50 to 70\%, depending on the target cell employed. The decrease was transient, since enterococci that regained their culturability showed adherence values similar to those observed for actively growing cells. Analysis of the invasive properties of E. faecalis revealed that the VBNC state caused a decrease in the number of bacteria that entered the cultured HEK cells as a result of the reduction in the number of adhering bacteria. These results highlight the importance of studies of the VBNC phenomenon, with respect to both microbial survival in the environment and the impact on human health.}, doi = {10.1007/s00284-001-0089-2}, author = {Pruzzo,C. and Tarsi,R. and Lle{\`o},M. M. and Signoretto,C. and Zampini,M. and Rita R Colwell and Canepari,P.} } @conference {17243, title = {Interacting with identification technology: can it make us more secure?}, booktitle = {CHI {\textquoteright}02 extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}02}, year = {2002}, month = {2002///}, pages = {564 - 565}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {Biometrics, civil liberties, face recognition, national id card, privacy, Security}, isbn = {1-58113-454-1}, doi = {10.1145/506443.506484}, url = {http://doi.acm.org/10.1145/506443.506484}, author = {Scholtz,Jean and Johnson,Jeff and Shneiderman, Ben and Hope-Tindall,Peter and Gosling,Marcus and Phillips,Jonathon and Wexelblat,Alan} } @conference {16153, title = {Interactive information visualization of a million items}, booktitle = {Information Visualization, 2002. INFOVIS 2002. IEEE Symposium on}, year = {2002}, month = {2002///}, pages = {117 - 124}, abstract = {Existing information visualization techniques are usually limited to the display of a few thousand items. This article describes new interactive techniques capable of handling a million items (effectively visible and manageable on screen). We evaluate the use of hardware-based techniques available with newer graphics cards, as well as new animation techniques and non-standard graphical features such as stereovision and overlap count. These techniques have been applied to two popular information visualizations: treemaps and scatter plot diagrams; but are generic enough to be applied to other 2D representations as well.}, keywords = {animation, animation;, cards;, Computer, count;, data, diagrams;, Graphics, hardware-based, information, interactive, interpolation;, overlap, plot, scatter, stereovision;, systems;, techniques;, treemaps;, visualisation;, visualization;}, doi = {10.1109/INFVIS.2002.1173156}, author = {Fekete,J.-D. and Plaisant, Catherine} } @conference {12143, title = {Lessons learned from 25 years of process improvement: the rise and fall of the NASA software engineering laboratory}, booktitle = {Proceedings of the 24th International Conference on Software Engineering}, series = {ICSE {\textquoteright}02}, year = {2002}, month = {2002///}, pages = {69 - 79}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {For 25 years the NASA/GSFC Software Engineering Laboratory (SEL) has been a major resource in software process improvement activities. But due to a changing climate at NASA, agency reorganization, and budget cuts, the SEL has lost much of its impact. In this paper we describe the history of the SEL and give some lessons learned on what we did right, what we did wrong, and what others can learn from our experiences. We briefly describe the research that was conducted by the SEL, describe how we evolved our understanding of software process improvement, and provide a set of lessons learned and hypotheses that should enable future groups to learn from and improve on our quarter century of experiences.}, isbn = {1-58113-472-X}, doi = {10.1145/581339.581351}, url = {http://doi.acm.org/10.1145/581339.581351}, author = {Basili, Victor R. and McGarry,Frank E. and Pajerski,Rose and Zelkowitz, Marvin V} } @conference {15624, title = {A local search approximation algorithm for k-means clustering}, booktitle = {Proceedings of the eighteenth annual symposium on Computational geometry}, series = {SCG {\textquoteright}02}, year = {2002}, month = {2002///}, pages = {10 - 18}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {In k-means clustering we are given a set of n data points in d-dimensional space Rd and an integer k, and the problem is to determine a set of k points in {\'O}C;d, called centers, to minimize the mean squared distance from each data point to its nearest center. No exact polynomial-time algorithms are known for this problem. Although asymptotically efficient approximation algorithms exist, these algorithms are not practical due to the extremely high constant factors involved. There are many heuristics that are used in practice, but we know of no bounds on their performance.We consider the question of whether there exists a simple and practical approximation algorithm for k-means clustering. We present a local improvement heuristic based on swapping centers in and out. We prove that this yields a (9+\&egr;)-approximation algorithm. We show that the approximation factor is almost tight, by giving an example for which the algorithm achieves an approximation factor of (9-\&egr;). To establish the practical value of the heuristic, we present an empirical study that shows that, when combined with Lloyd{\textquoteright}s algorithm, this heuristic performs quite well in practice.}, keywords = {Approximation algorithms, clustering, computational geometry, k-means, local search}, isbn = {1-58113-504-1}, doi = {10.1145/513400.513402}, url = {http://doi.acm.org/10.1145/513400.513402}, author = {Kanungo,Tapas and Mount, Dave and Netanyahu,Nathan S. and Piatko,Christine D. and Silverman,Ruth and Wu,Angela Y.} } @article {16028, title = {Metareasoning for More Effective Human-Computer Dialogue}, year = {2002}, month = {2002/12/31/}, institution = {University of Maryland, College Park}, abstract = {The research project explores specific meta-dialogue behaviors in terms of both how a system could be made to perform them, and to what extent they can increase overall system performance. We focus on two types of meta-dialogue capabilities: ability to detect and recover from anomalous dialogue patterns in simple exchanges, and on-line extensions or changes to working vocabulary. Our main method involves detailed representation of the dialogue context, separating domain, language, and dialogue specific aspects, and significant amounts of meta-reasoning about the system{\textquoteright}s processing of these representations. An existing logical inference system, ALMA/CARNE, developed as part of a pilot study, is being used in an implementation phase of this work. We are also engaged in a study of existing dialogue corpora to investigate the range and frequency of meta-dialogue expressions in different task domains.}, keywords = {*LEARNING MACHINES, COMPUTER LOGIC, COMPUTER PROGRAMMING AND SOFTWARE, CYBERNETICS, MAN COMPUTER INTERFACE, METAREASONING, NATURAL LANGUAGE., PROCESSING}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA414209}, author = {Perlis, Don and Anderson,Mike} } @article {14507, title = {Method and apparatus for learning probabilistic relational models having attribute and link uncertainty and for performing selectivity estimation using probabilistic relational models}, volume = {09/922,324}, year = {2002}, month = {2002/08/01/}, abstract = {The invention comprises a method and apparatus for learning probabilistic models (PRM{\textquoteright}s) with attribute uncertainty. A PRM with attribute uncertainty defines a probability distribution over instantiations of a database. A learned PRM is useful for discovering interesting patterns and dependencies in the data. Unlike many existing techniques, the process is data-driven rather than hypothesis driven. This makes the technique particularly well-suited for exploratory data analysis. In addition, the invention comprises a method and apparatus for handling link uncertainty in PRM{\textquoteright}s. Link uncertainty is uncertainty over which entities are related in our domain. The invention comprises of two mechanisms for modeling link uncertainty: reference uncertainty and existence uncertainty. The invention includes learning algorithms for each form of link uncertainty. The third component of the invention is a technique for performing database selectivity estimation using probabilistic relational...}, url = {http://www.google.com/patents?id=FVqDAAAAEBAJ}, author = {Koller,Daphne and Getoor, Lise and Pfeffer,Avi and Friedman,Nir and Taskar,Ben} } @article {16413, title = {Modeling the Effects of Greenbelts at the Urban-Rural Fringe}, journal = {Proceedings of IEMSS 2002}, year = {2002}, month = {2002///}, pages = {190 - 195}, abstract = {We present and evaluate an agent based model (ABM) of land use change at the rural-urban fringe,comparing its performance to a mathematical model of the same process. Our simplified model was developed in Swarm using agents with heterogeneous preferences and a landscape with heterogeneous properties. The context of this work is a larger project that includes surveys of the preferences of residents and data on historical patterns of development. Our broader goal is to use the model to evaluate the ecological effects of alternative policies and designs. We begin by evaluating the influence of a greenbelt, which is located next to a developing area and in which no development is permitted. We present results of a mathematical model that illustrates the necessary trade-off between greenbelt placement and greenbelt width on its effectiveness at delaying develop- ment beyond. Experiments run with the ABM are validated by the mathematical model and illustrate analyses that can be performed by extending to two-dimensions, variable agent preferences, and multiple, and ultimately realistic, patterns of landscape variability. }, author = {Brown,D.G. and Page,S.E. and Riolo,R and Rand, William} } @article {16152, title = {Navigation patterns and usability of zoomable user interfaces with and without an overview}, journal = {ACM Transactions on Computer-Human Interaction (TOCHI)}, volume = {9}, year = {2002}, month = {2002///}, pages = {362 - 389}, author = {Hornbaek,K. and Bederson, Benjamin B. and Plaisant, Catherine} } @conference {13319, title = {Non-manifold decomposition in arbitrary dimensions}, booktitle = {Discrete Geometry for Computer Imagery}, year = {2002}, month = {2002///}, pages = {59 - 115}, abstract = {In this paper we consider the problem of decomposing a nonmanifold n-dimensional object described by an abstract simplicial complex into an assembly of {\textquoteleft}more-regular{\textquoteright} components. Manifolds, which would be natural candidates for components, cannot be used to this aim in high dimensions because they are not decidable sets. Therefore, we define d-quasi-manifolds, a decidable superset of the class of combinatorial d-manifolds that coincides with d-manifolds in dimension less or equal than two. We first introduce the notion of d-quasi-manifold complexes, then we sketch an algorithm to decompose an arbitrary complex into an assembly of quasi-manifold components abutting at non-manifold joints. This result provides a rigorous starting point for our future work, which includes designing efficient data structures for non-manifold modeling, as well as defining a notion of measure of shape complexity of such models.}, doi = {10.1007/3-540-45986-3_6}, author = {De Floriani, Leila and Mesmoudi,M. and Morando,F. and Puppo,E.} } @conference {13623, title = {Performance evaluation of object detection algorithms}, booktitle = {Pattern Recognition, 2002. Proceedings. 16th International Conference on}, volume = {3}, year = {2002}, month = {2002///}, pages = {965 - 969 vol.3 - 965 - 969 vol.3}, abstract = {The continuous development of object detection algorithms is ushering in the need for evaluation tools to quantify algorithm performance. In this paper a set of seven metrics are proposed for quantifying different aspects of a detection algorithm{\textquoteright}s performance. The strengths and weaknesses of these metrics are described. They are implemented in the Video Performance Evaluation Resource (ViPER) system and will be used to evaluate algorithms for detecting text, faces, moving people and vehicles. Results for running two previous text-detection algorithms on a common data set are presented.}, keywords = {algorithms;, common, data, DETECTION, detection;, Evaluation, evaluation;, image, object, performance, recognition;, resource, set;, system;, text-detection, video}, doi = {10.1109/ICPR.2002.1048198}, author = {Mariano,V.Y. and Min,Junghye and Park,Jin-Hyeong and Kasturi,R. and Mihalcik,D. and Huiping Li and David Doermann and Drayer,T.} } @article {16148, title = {A photo history of SIGCHI: evolution of design from personal to public}, journal = {interactions}, volume = {9}, year = {2002}, month = {2002/05//}, pages = {17 - 23}, abstract = {For 20 years I have been photographing personalities and events in the emerging discipline of human--computer interaction. Until now, only a few of these photos were published in newsletters or were shown to visitors who sought them out. Now this photo history is going from a personal record to a public archive. This archive should be interesting for professional members of this community who want to reminisce, as well as for historians and journalists who want to understand what happened. Students and Web surfers may also want to look at the people who created better interfaces and more satisfying user experiences.}, isbn = {1072-5520}, doi = {10.1145/506671.506682}, url = {http://doi.acm.org/10.1145/506671.506682}, author = {Shneiderman, Ben and Kang,Hyunmo and Kules,Bill and Plaisant, Catherine and Rose,Anne and Rucheir,Richesh} } @article {13009, title = {Phylogenetic analysis based on 18S ribosomal RNA gene sequences supports the existence of class Polyacanthocephala (Acanthocephala)}, journal = {Mol Phylogenet Evol}, volume = {23}, year = {2002}, month = {2002/05//}, pages = {288 - 292}, abstract = {Members of phylum Acanthocephala are parasites of vertebrates and arthropods and are distributed worldwide. The phylum has traditionally been divided into three classes, Archiacanthocephala, Palaeacanthocephala, and Eoacanthocephala; a fourth class, Polyacanthocephala, has been recently proposed. However, erection of this new class, based on morphological characters, has been controversial. We sequenced the near complete 18S rRNA gene of Polyacanthorhynchus caballeroi (Polyacanthocephala) and Rhadinorhynchus sp. (Palaeacanthocephala); these sequences were aligned with another 21 sequences of acanthocephalans representing the three widely recognized classes of the phylum and with 16 sequences from outgroup taxa. Phylogenetic relationships inferred by maximum-likelihood and maximum-parsimony analyses showed Archiacanthocephala as the most basal group within the phylum, whereas classes Polyacanthocephala + Eoacanthocephala formed a monophyletic clade, with Palaeacanthocephala as its sister group. These results are consistent with the view of Polyacanthocephala representing an independent class within Acanthocephala.}, doi = {10.1016/S1055-7903(02)00020-9}, author = {Garc{\'\i}a-Varela,M and Cummings, Michael P. and P{\'e}rez-Ponce de Le{\'o}n,G. and Gardner,S. L and Laclette,J. P} } @article {17815, title = {Presentation planning for distributed VoD systems}, journal = {Knowledge and Data Engineering, IEEE Transactions on}, volume = {14}, year = {2002}, month = {2002/10//sep}, pages = {1059 - 1077}, abstract = {A distributed video-on-demand (VoD) system is one where a collection of video data is located at dispersed sites across a computer network. In a single site environment, a local video server retrieves video data from its local storage device. However, in distributed VoD systems, when a customer requests a movie from the local server, the server may need to interact with other servers located across the network. In this paper, we present different types of presentation plans that a local server can construct in order to satisfy a customer request. Informally speaking, a presentation plan is a temporally synchronized sequence of steps that the local server must perform in order to present the requested movie to the customer. This involves obtaining commitments from other video servers, obtaining commitments from the network service provider, as well as making commitments of local resources, while keeping within the limitations of available bandwidth, available buffer, and customer data consumption rates. Furthermore, in order to evaluate the quality of a presentation plan, we introduce two measures of optimality for presentation plans: minimizing wait time for a customer and minimizing access bandwidth which, informally speaking, specifies how much network/disk bandwidth is used. We develop algorithms to compute three different optimal presentation plans that work at a block level, or at a segment level, or with a hybrid mix of the two, and compare their performance through simulation experiments. We have also mathematically proven effects of increased buffer or bandwidth and data replications for presentation plans which had previously been verified experimentally in the literature.}, keywords = {Computer, computing;, databases;, demand;, distributed, local, multimedia, network;, on, optimal, plan;, plans;, presentation, presentation;, server;, servers;, video, video-on-demand;, VoD;}, isbn = {1041-4347}, doi = {10.1109/TKDE.2002.1033774}, author = {Hwang,Eenjun and Prabhakaran,B. and V.S. Subrahmanian} } @conference {18635, title = {Probabilistic validation of intrusion tolerance}, year = {2002}, month = {2002///}, pages = {78 - 79}, url = {https://www.perform.csl.illinois.edu/Papers/USAN_papers/02SAN02.pdf}, author = {Sanders,W. H. and Michel Cukier and Webber,F. and Pal,P. and Watro,R.} } @conference {18690, title = {Quantifying the cost of providing intrusion tolerance in group communication systems}, year = {2002}, month = {2002///}, pages = {229 - 238}, abstract = {Group communication systems that provide consistent group membership and reliable, ordered multicast properties in the presence of faults resulting from malicious intrusions have not been analyzed extensively to quantify the cost of tolerating these intrusions. This paper attempts to quantify this cost by presenting results from an experimental evaluation of three new intrusion-tolerant microprotocols that have been added to an existing crash-fault-tolerant group communication system. The results are analyzed to identify the parts that contribute the most overhead during provision of intrusion tolerance at the group communication system level.}, keywords = {consistent group membership, crash-fault-tolerant group communication system, cryptography, finite state machines, groupware, intrusion-tolerant microprotocols, malicious intrusions, multicast communication, reliable ordered multicast properties, security of data, Transport protocols}, doi = {10.1109/DSN.2002.1028904}, author = {Ramasamy,H. V. and Pandey,P. and Lyons,J. and Michel Cukier and Sanders,W. H.} } @conference {12761, title = {Quasi-invariants for human action representation and recognition}, booktitle = {Pattern Recognition, 2002. Proceedings. 16th International Conference on}, volume = {1}, year = {2002}, month = {2002///}, pages = {307 - 310 vol.1 - 307 - 310 vol.1}, abstract = {Although human action recognition has been the subject of much research in the past, the issue of viewpoint invariance has received scarce attention. In this paper, we present an approach to detect human action with a high tolerance to viewpoint change. Canonical body poses are modeled in a view invariant manner to enable detection from a general viewpoint. While there exist no invariants for 3D to 2D projection, there exists a wealth of techniques in 2D invariance that can be used to advantage in 3D to 2D projection. We employ 2D invariants to recognize canonical poses of the human body leading to an effective way to represent and recognize human action which we evaluate theoretically and experimentally on 2D projections of publicly available human motion capture data.}, keywords = {2D, action, analysis;, body, canonical, change, human, image, invariance;, MOTION, poses;, quasi-invariants;, recognition;, representation;, tolerance;, viewpoint}, doi = {10.1109/ICPR.2002.1044699}, author = {Parameswaran, V. and Chellapa, Rama} } @book {14636, title = {RECOMB {\textquoteright}02: Proceedings of the sixth annual international conference on Computational biology}, year = {2002}, month = {2002///}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {The papers in this volume were presented at the Sixth Annual International Conference on Computational Biology RECOMB 2002, held in Washington, D.C. on April 18-21, 2002. The RECOMB series was started in 1997 by Sorin Istrail, Pavel Pevzner and Michael Waterman. RECOMB {\textquoteright}99 took place in Lyon, France, RECOMB 2000 was held in Tokyo, Japan, and RECOMB 2001 was held in Montreal, Quebec, Canada.This year{\textquoteright}s call for papers gave rise to 118 submissions, out of which the program committee selected 35 papers to be presented at the conference and included in the proceedings. Each submission was refereed by at least three members of the program committee. After the completion of the referees{\textquoteright} reports, an extensive web-based discussion took place.RECOMB 2002 had 8 invited speakers: Ruben Abagyan (The Scripps Research Institute), Ali H. Brivanlou (Rockefeller University), Evan Eichler (Case Western Reserve University), Harold "Skip" Garner (University of Texas Southwestern Medical Center at Dallas), David Ho (Rockefeller University), Gerry Rubin (Howard Hughes Medical Institute), J. Craig Venter (Celera) and Marc Vidal (Dana-Farber Cancer Institute). The Stanislaw Ulam Memorial Lecture was given by J. Craig Venter. The Distinguished Biology Lecture was given by David Ho. The Distinguished New Technologies Lecture was given by Harold Garner.Complete final versions of many of the papers presented in the conference will appear in a special issue of the Journal of Computational Biology, which is closely affiliated with the conference.}, isbn = {1-58113-498-3}, editor = {Myers,Gene and Hannenhalli, Sridhar and Sankoff,David and Istrail,Sorin and Pevzner,Pavel and Waterman,Michael} } @conference {12209, title = {SpaceTree: Design evolution of a node link tree browser}, booktitle = {Proc. InfoVis}, year = {2002}, month = {2002///}, pages = {57 - 64}, author = {Plaisant, Catherine and Grosjean,J. and Bederson, Benjamin B.} } @conference {16150, title = {SpaceTree: supporting exploration in large node link tree, design evolution and empirical evaluation}, booktitle = {Information Visualization, 2002. INFOVIS 2002. IEEE Symposium on}, year = {2002}, month = {2002///}, pages = {57 - 64}, abstract = {We present a novel tree browser that builds on the conventional node link tree diagrams. It adds dynamic rescaling of branches of the tree to best fit the available screen space, optimized camera movement, and the use of preview icons summarizing the topology of the branches that cannot be expanded. In addition, it includes integrated search and filter functions. This paper reflects on the evolution of the design and highlights the principles that emerged from it. A controlled experiment showed benefits for navigation to already previously visited nodes and estimation of overall tree topology.}, keywords = {browser;, camera, data, design, diagrams;, dynamic, evolution;, experiment;, exploration;, filter, functions;, graphical, icons;, integrated, interfaces;, large, link, movement;, node, novel, optimized, rescaling;, search;, SpaceTree;, structures;, topology;, tree, user, visualisation;, visualization;}, doi = {10.1109/INFVIS.2002.1173148}, author = {Plaisant, Catherine and Grosjean,J. and Bederson, Benjamin B.} } @article {18719, title = {Structural Properties of Polyubiquitin Chains in Solution}, journal = {Journal of Molecular Biology}, volume = {324}, year = {2002}, month = {2002/12/06/}, pages = {637 - 647}, abstract = {Because polyubiquitin chain structure modulates Ub-mediated signaling, knowledge of the physiological conformations of chain signals should provide insights into specific recognition. Here, we characterized the solution conformations of K48-linked Ub2 and Ub4 using a combination of NMR techniques, including chemical shift mapping of the interdomain interface, domain orientation measurements on the basis of 15N relaxation and residual dipolar couplings, and the solvent accessibility studies. Our data indicate a switch in the conformation of Ub2, from open to closed, with increasing pH. The closed conformation features a well-defined interface that is related to, but distinguishable from, that observed in the Ub2 crystal structure. This interface is dynamic in solution, such that important hydrophobic residues (L8, I44, V70) that are sequestered at the interface in the closed conformation may be accessible for direct interactions with recognition factors. Our results suggest that the distal two units of Ub4, which is the minimum signal for efficient proteasomal degradation, may adopt the closed Ub2 conformation.}, keywords = {chemical shift mapping, domain orientation measurements, polyubiquitin chains, spin relaxation, ubiquitin}, isbn = {0022-2836}, doi = {10.1016/S0022-2836(02)01198-1}, url = {http://www.sciencedirect.com/science/article/pii/S0022283602011981}, author = {Varadan,Ranjani and Walker,Olivier and Pickart,Cecile and Fushman, David} } @conference {15897, title = {Supporting access to large digital oral history archives}, booktitle = {Proceedings of the 2nd ACM/IEEE-CS joint conference on Digital libraries}, series = {JCDL {\textquoteright}02}, year = {2002}, month = {2002///}, pages = {18 - 27}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {This paper describes our experience with the creation, indexing, and provision of access to a very large archive of videotaped oral histories - 116,000 hours of digitized interviews in 32 languages from 52,000 survivors, liberators, rescuers, and witnesses of the Nazi Holocaust. It goes on to identify a set of critical research issues that must be addressed if we are to provide full and detailed access to collections of this size: issues in user requirement studies, automatic speech recognition, automatic classification, segmentation, summarization, retrieval, and user interfaces. The paper ends by inviting others to discuss use of these materials in their own research.}, keywords = {cataloging, oral history, research agenda}, isbn = {1-58113-513-0}, doi = {10.1145/544220.544224}, url = {http://doi.acm.org/10.1145/544220.544224}, author = {Gustman,Samuel and Soergel,Dagobert and Oard, Douglas and Byrne,William and Picheny,Michael and Ramabhadran,Bhuvana and Greenberg,Douglas} } @article {15935, title = {Symbol systems}, journal = {Encyclopedia of Cognitive Science}, year = {2002}, month = {2002///}, author = {Anderson,M. L and Perlis, Don} } @conference {16149, title = {Technologies for families}, booktitle = {CHI {\textquoteright}02 extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}02}, year = {2002}, month = {2002///}, pages = {938 - 939}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {In this workshop, we propose to bring together researchers from industry and academia to discuss the design of new technologies for families. We will focus on both design techniques and the technologies themselves. Through discussions and brainstorming we hope to discover new ideas, which can be disseminated more broadly.}, keywords = {cooperative design, disappearing computer, family, Home, participatory design, probe, user interface}, isbn = {1-58113-454-1}, doi = {10.1145/506443.506669}, url = {http://doi.acm.org/10.1145/506443.506669}, author = {Plaisant, Catherine and Druin, Allison and Hutchinson,Hilary} } @conference {15972, title = {Time-situated agency: Active logic and intention formation}, booktitle = {in: Workshop on Cognitive Agents, 25th German Conference on Artificial Intelligence}, year = {2002}, month = {2002///}, author = {Anderson,M. L and Josyula,D. P and Okamoto,Y. A and Perlis, Don} } @conference {15955, title = {The use-mention distinction and its importance to HCI}, booktitle = {Proceedings of the Sixth Workshop on the Semantics and Pragmatics of Dialog}, year = {2002}, month = {2002///}, pages = {21 - 28}, author = {Anderson,M. L and Okamoto,Y. and Josyula,D. and Perlis, Don} } @conference {13689, title = {Video analysis applications for pervasive environments}, booktitle = {1st International Conference on Mobile and Ubiquitous Multimedia}, year = {2002}, month = {2002///}, pages = {48 - 55}, abstract = {Network capabilities are expanding at arate that will soon allow a much wider range of image and video content to be delivered to and transmitted from mobile wireless devices. Current pervasive applications provide image and video content primarily in high performance networks or by streaming video over low-bandwidth networks. For video to be truly integrated into ubiquitous applications, we must carefully consider the role of automated video analysis to move beyond simple content delivery. Compression and content adaptation have been significant areas of research for some time and deal with the problem of moving media to devices with limited network, software and hardware capabilities. In the near future, mobile wireless devices that have been primarily consumers of image and video will be able to produce such content as well. In this paper, we will review some of the significant issues associated with providing video capabilities in pervasive environments. We consider not only streaming video applications, but point-to- point video and video messaging capabilities as well. We will show how our previous work on content adaptation is being applied to these types of environments, show what role more general video analysis can play and describe a general architecture for video capture, storage, analysis and transmission. }, author = {Karunanidhi,A. and David Doermann and Parekh,N. and Rautio,V.} } @article {12175, title = {A visual search tool for early elementary science students}, journal = {Journal of Science Education and Technology}, volume = {11}, year = {2002}, month = {2002///}, pages = {49 - 57}, author = {Revelle,G. and Druin, Allison and Platner,M. and Bederson, Benjamin B. and Hourcade,J. P and Sherman,L.} } @article {17670, title = {Wavelength rerouting in optical networks, or the Venetian Routing problem}, journal = {Journal of Algorithms}, volume = {45}, year = {2002}, month = {2002/11//}, pages = {93 - 125}, abstract = {Wavelength rerouting has been suggested as a viable and cost-effective method to improve the blocking performance of wavelength-routed wavelength-division multiplexing (WDM) networks. This method leads to the following combinatorial optimization problem, dubbed Venetian Routing. Given a directed multigraph G along with two vertices s and t and a collection of pairwise arc-disjoint paths, we wish to find an st-path which arc-intersects the smallest possible number of the given paths. In this paper we prove the computational hardness of this problem even in various special cases, and present several approximation algorithms for its solution. In particular we show a non-trivial connection between Venetian Routing and Label Cover.}, keywords = {Approximation algorithms, Label Cover, Optical networks, Shortest paths, Wavelength rerouting, Wavelength-division multiplexing}, isbn = {0196-6774}, doi = {10.1016/S0196-6774(02)00214-6}, url = {http://www.sciencedirect.com/science/article/pii/S0196677402002146}, author = {Caprara,Alberto and Italiano,Giuseppe F. and Mohan,G. and Panconesi,Alessandro and Srinivasan, Aravind} } @conference {18453, title = {What we have learned about fighting defects}, booktitle = {Software Metrics, 2002. Proceedings. Eighth IEEE Symposium on}, year = {2002}, month = {2002///}, pages = {249 - 258}, abstract = {The Center for Empirically Based Software Engineering helps improve software development by providing guidelines for selecting development techniques, recommending areas for further research, and supporting software engineering education. A central activity toward achieving this goal has been the running of "e- Workshops" that capture expert knowledge with a minimum of overhead effort to formulate heuristics on a particular topic. The resulting heuristics are a useful summary of the current state of knowledge in an area based on expert opinion. This paper discusses the results to date of a series of e-Workshops on software defect reduction. The original discussion items are presented along with an encapsulated summary of the expert discussion. The reformulated heuristics can be useful both to researchers (for pointing out gaps in the current state of the knowledge requiring further investigation) and to practitioners (for benchmarking or setting expectations about development practices).}, keywords = {based, Center, Computer, defect, development;, education;, electronic, Empiric, engineering, engineering;, eWorkshops;, for, heuristics;, reduction;, Science, software, workshops;}, doi = {10.1109/METRIC.2002.1011343}, author = {Shull, F. and Basili, Victor R. and Boehm,B. and Brown,A. W and Costa,P. and Lindvall,M. and Port,D. and Rus,I. and Tesoriero,R. and Zelkowitz, Marvin V} } @article {15596, title = {Approximating large convolutions in digital images}, journal = {Image Processing, IEEE Transactions on}, volume = {10}, year = {2001}, month = {2001/12//}, pages = {1826 - 1835}, abstract = {Computing discrete two-dimensional (2-D) convolutions is an important problem in image processing. In mathematical morphology, an important variant is that of computing binary convolutions, where the kernel of the convolution is a 0-1 valued function. This operation can be quite costly, especially when large kernels are involved. We present an algorithm for computing convolutions of this form, where the kernel of the binary convolution is derived from a convex polygon. Because the kernel is a geometric object, we allow the algorithm some flexibility in how it elects to digitize the convex kernel at each placement, as long as the digitization satisfies certain reasonable requirements. We say that such a convolution is valid. Given this flexibility we show that it is possible to compute binary convolutions more efficiently than would normally be possible for large kernels. Our main result is an algorithm which, given an m times;n image and a k-sided convex polygonal kernel K, computes a valid convolution in O(kmn) time. Unlike standard algorithms for computing correlations and convolutions, the running time is independent of the area or perimeter of K, and our techniques do not rely on computing fast Fourier transforms. Our algorithm is based on a novel use of Bresenham{\textquoteright}s (1965) line-drawing algorithm and prefix-sums to update the convolution incrementally as the kernel is moved from one position to another across the image}, keywords = {2D, algorithm;binary, approximation;mathematical, convolution, convolution;binary, convolutions;Bresenham{\textquoteright}s, convolutions;convex, convolutions;geometric, images;discrete, kernel;convex, kernel;digital, line-drawing, morphology;, morphology;approximation, object;image, polygonal, processing;large, processing;mathematical, theory;convolution;image, two-dimensional}, isbn = {1057-7149}, doi = {10.1109/83.974567}, author = {Mount, Dave and Kanungo,T. and Netanyahu,N. S and Piatko,C. and Silverman,R. and Wu,A. Y} } @article {17568, title = {Better Approximation Guarantees for Job-Shop Scheduling}, journal = {SIAM Journal on Discrete Mathematics}, volume = {14}, year = {2001}, month = {2001///}, pages = {67 - 67}, abstract = {Job-shop scheduling is a classical NP-hard problem. Shmoys, Stein, and Wein presented the first polynomial-time approximation algorithm for this problem that has a good (polylogarithmic) approximation guarantee. We improve the approximation guarantee of their work and present further improvements for some important NP-hard special cases of this problem (e.g., in the preemptive case where machines can suspend work on operations and later resume). We also present NC algorithms with improved approximation guarantees for some NP-hard special cases.}, isbn = {08954801}, doi = {10.1137/S0895480199326104}, url = {http://link.aip.org/link/SJDMEC/v14/i1/p67/s1\&Agg=doi}, author = {Goldberg,Leslie Ann and Paterson,Mike and Srinivasan, Aravind and Sweedyk,Elizabeth} } @conference {16346, title = {Core semantics of multithreaded Java}, booktitle = {Proceedings of the 2001 joint ACM-ISCOPE conference on Java Grande - JGI {\textquoteright}01}, year = {2001}, month = {2001///}, pages = {29 - 38}, address = {Palo Alto, California, United States}, doi = {10.1145/376656.376806}, url = {http://dl.acm.org/citation.cfm?id=376806}, author = {Manson,Jeremy and Pugh, William} } @conference {15455, title = {Coverage criteria for GUI testing}, booktitle = {Proceedings of the 8th European software engineering conference held jointly with 9th ACM SIGSOFT international symposium on Foundations of software engineering}, series = {ESEC/FSE-9}, year = {2001}, month = {2001///}, pages = {256 - 267}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {A widespread recognition of the usefulness of graphical user interfaces (GUIs) has established their importance as critical components of today{\textquoteright}s software. GUIs have characteristics different from traditional software, and conventional testing techniques do not directly apply to GUIs. This paper{\textquoteright}s focus is on coverage critieria for GUIs, important rules that provide an objective measure of test quality. We present new coverage criteria to help determine whether a GUI has been adequately tested. These coverage criteria use events and event sequences to specify a measure of test adequacy. Since the total number of permutations of event sequences in any non-trivial GUI is extremely large, the GUI{\textquoteright}s hierarchical structure is exploited to identify the important event sequences to be tested. A GUI is decomposed into GUI components, each of which is used as a basic unit of testing. A representation of a GUI component, called an event-flow graph, identifies the interaction of events within a component and intra-component criteria are used to evaluate the adequacy of tests on these events. The hierarchical relationship among components is represented by an integration tree, and inter-component coverage criteria are used to evaluate the adequacy of test sequences that cross components. Algorithms are given to construct event-flow graphs and an integration tree for a given GUI, and to evaluate the coverage of a given test suite with respect to the new coverage criteria. A case study illustrates the usefulness of the coverage report to guide further testing and an important correlation between event-based coverage of a GUI and statement coverage of its software{\textquoteright}s underlying code.}, keywords = {component testing, event-based coverage, event-flow graph, GUI test coverage, GUI testing, integration tree}, isbn = {1-58113-390-1}, doi = {10.1145/503209.503244}, url = {http://doi.acm.org/10.1145/503209.503244}, author = {Memon, Atif M. and Soffa,Mary Lou and Pollack,Martha E.} } @article {16154, title = {Designing an Interactive Message Board as a Technology Probe for Family Communication}, journal = {Technical Reports of the Computer Science Department}, year = {2001}, month = {2001/10/10/}, abstract = {In this paper, we describe the design issues and technical implementation of aninteractive Family Message Board. The Family Message Board enables members of a distributed family to communicate with one another both synchronously and asynchronously via simple, pen-based, digital notes. Each household running thi$ Java-based software can view, create, and manipulate notes in a zoomable space. The Family Message Board will be used as a technology probe to help us unders$ the communication needs of distributed families, and to help us design new devi$ to meet those needs. }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/526}, author = {Browne,Hilary and Bederson, Benjamin B. and Plaisant, Catherine and Druin, Allison} } @inbook {15072, title = {Efficient and Non-interactive Non-malleable Commitment}, booktitle = {Advances in Cryptology {\textemdash} EUROCRYPT 2001Advances in Cryptology {\textemdash} EUROCRYPT 2001}, series = {Lecture Notes in Computer Science}, volume = {2045}, year = {2001}, month = {2001///}, pages = {40 - 59}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We present new constructions of non-malleable commitment schemes, in the public parameter model (where a trusted party makes parameters available to all parties), based on the discrete logarithm or RSA assumptions. The main features of our schemes are: they achieve near-optimal communication for arbitrarily-large messages and are non-interactive . Previous schemes either required (several rounds of) interaction or focused on achieving non-malleable commitment based on general assumptions and were thus efficient only when committing to a single bit. Although our main constructions are for the case of perfectly-hiding commitment, we also present a communication-efficient, non-interactive commitment scheme (based on general assumptions) that is perfectly binding.}, keywords = {Computer science}, isbn = {978-3-540-42070-5}, url = {http://www.springerlink.com/content/nhbj60a9da101w0r/abstract/}, author = {Di Crescenzo,Giovanni and Katz, Jonathan and Ostrovsky,Rafail and Smith,Adam}, editor = {Pfitzmann,Birgit} } @conference {16260, title = {Efficient perspective-accurate silhouette computation and applications}, booktitle = {Proceedings of the seventeenth annual symposium on Computational geometry}, series = {SCG {\textquoteright}01}, year = {2001}, month = {2001///}, pages = {60 - 68}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Silhouettes are perceptually and geometrically salient features of geo metric models. Hence a number of graphics and visualization applications need to find them to aid further processing. The efficient computation of silhouettes, especially in the context of perspective projection, is known to be difficult. This paper presents a novel efficient and practical algorithm to compute silhouettes from a sequence of viewpoints under perspective projection. Parallel projection is a special case of this algorithm. Our approach is based on a point-plane duality in three dimensions, which allows an efficient computation of the \emph{changes} in the silhouette of a polygonal model between consecutive frames. In addition, we present several applications of our technique to problems from computer graphics and medical visualization. We also provide experimental data that show the efficiency of our approach. million vertices on an SGI Onyx workstation.}, keywords = {rendering, silhouette, simplification}, isbn = {1-58113-357-X}, doi = {10.1145/378583.378618}, url = {http://doi.acm.org/10.1145/378583.378618}, author = {Pop, Mihai and Duncan,Christian and Barequet,Gill and Goodrich,Michael and Huang,Wenjing and Kumar,Subodh} } @article {16324, title = {An empirical study of regression test selection techniques}, journal = {ACM Transactions on Software Engineering and MethodologyACM Trans. Softw. Eng. Methodol.}, volume = {10}, year = {2001}, month = {2001/04//}, pages = {184 - 208}, isbn = {1049331X}, doi = {10.1145/367008.367020}, url = {http://dl.acm.org/citation.cfm?id=367020}, author = {Graves,Todd L. and Harrold,Mary Jean and Kim,Jung-Min and Porter, Adam and Rothermel,Gregg} } @article {16157, title = {Enabling Commuters to Find the Best Route: An Interface for Analyzing Driving History Logs}, journal = {Proceedings of the IFIP Conference on Human-Computer Interaction (Interact2001)}, year = {2001}, month = {2001///}, pages = {799 - 800}, abstract = {This paper describes a prototype interface design for an automobile driving history log. It allows drivers to choose the best route among several alternatives for their common trips. Recorded data includes time to complete the travel, fuel consumption, and number of stops.}, author = {Konishi,M. and Plaisant, Catherine and Shneiderman, Ben} } @inbook {14217, title = {Eyes from Eyes}, booktitle = {3D Structure from Images {\textemdash} SMILE 20003D Structure from Images {\textemdash} SMILE 2000}, series = {Lecture Notes in Computer Science}, volume = {2018}, year = {2001}, month = {2001///}, pages = {204 - 217}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We describe a family of new imaging systems, called Argus eyes, that consist of common video cameras arranged in some network. The system we built consists of six cameras arranged so that they sample different parts of the visual sphere. This system has the capability of very accurately estimating its own 3D motion and consequently estimating shape models from the individual videos. The reason is that inherent ambiguities of confusion between translation and rotation disappear in this case. We provide an algorithm and several experiments using real outdoor or indoor images demonstrating the superiority of the new sensor with regard to 3D motion estimation.}, isbn = {978-3-540-41845-0}, url = {http://dx.doi.org/10.1007/3-540-45296-6_14}, author = {Baker,Patrick and Pless,Robert and Ferm{\"u}ller, Cornelia and Aloimonos, J.}, editor = {Pollefeys,Marc and Van Gool,Luc and Zisserman,Andrew and Fitzgibbon,Andrew} } @conference {15949, title = {Handling uncertainty with active logic}, booktitle = {In Proceedings, AAAI Fall Symposium on Uncertainty in Computation}, year = {2001}, month = {2001///}, author = {Bhatia,M. and Chi,P. and Chong,W. and Josyula,D. P and Okamoto,Y. and Perlis, Don and Purang,K.} } @article {15473, title = {Hierarchical GUI test case generation using automated planning}, journal = {Software Engineering, IEEE Transactions on}, volume = {27}, year = {2001}, month = {2001/02//}, pages = {144 - 155}, abstract = {The widespread use of GUIs for interacting with software is leading to the construction of more and more complex GUIs. With the growing complexity come challenges in testing the correctness of a GUI and its underlying software. We present a new technique to automatically generate test cases for GUIs that exploits planning, a well-developed and used technique in artificial intelligence. Given a set of operators, an initial state, and a goal state, a planner produces a sequence of the operators that will transform the initial state to the goal state. Our test case generation technique enables efficient application of planning by first creating a hierarchical model of a GUI based on its structure. The GUI model consists of hierarchical planning operators representing the possible events in the GUI. The test designer defines the preconditions and effects of the hierarchical operators, which are input into a plan-generation system. The test designer also creates scenarios that represent typical initial and goal states for a GUI user. The planner then generates plans representing sequences of GUI interactions that a user might employ to reach the goal state from the initial state. We implemented our test case generation system, called Planning Assisted Tester for Graphical User Interface Systems (PATHS) and experimentally evaluated its practicality and effectiveness. We describe a prototype implementation of PATHS and report on the results of controlled experiments to generate test cases for Microsoft{\textquoteright}s WordPad}, keywords = {Artificial intelligence, automated planning, automatic test case generation, Automatic testing, correctness testing, goal state, Graphical user interfaces, hierarchical GUI test case generation, initial state, Microsoft WordPad, operators, plan-generation system, planning (artificial intelligence), Planning Assisted Tester for Graphical User Interface Systems, program testing, software}, isbn = {0098-5589}, doi = {10.1109/32.908959}, author = {Memon, Atif M. and Pollack,M. E and Soffa,M. L} } @article {18731, title = {Intelligent assembly modeling and simulation}, journal = {Assembly Automation}, volume = {21}, year = {2001}, month = {2001///}, pages = {215 - 235}, abstract = {Because of the intense competition in the current global economy, a company must conceive, design, and manufacture new products quickly and inexpensively. The design cycle can be shortened through simulation. Rapid technical advances in many different areas of scientific computing provide the enabling technologies for creating a comprehensive simulation and visualization environment for assembly design and planning. An intelligent environment has been built in which simple simulation tools can be composed into complex simulations for detecting potential assembly problems. The goal in this research is to develop high fidelity assembly simulation and visualization tools that can detect assembly related problems without going through physical mock-ups. In addition, these tools can be used to create easy-to-visualize instructions for performing assembly and service operations.}, url = {http://www.ingentaconnect.com/content/mcb/033/2001/00000021/00000003/art00004}, author = {Gupta,S.K. and Paredis,C. J. J. and Sinha,R.} } @conference {18634, title = {Intrusion tolerance approaches in ITUA}, volume = {64}, year = {2001}, month = {2001///}, url = {http://www.dist-systems.bbn.com/papers/2001/ICDSN/01CUK01.pdf}, author = {Michel Cukier and Lyons,J. and Pandey,P. and Ramasamy,H. V. and Sanders,W. H. and Pal,P. and Webber,F. and Schantz,R. and Loyall,J. and Watro,R.} } @conference {16307, title = {Leveraging open-source communities to improve the quality \& performance of open-source software}, booktitle = {Proceedings of the 1st Workshop on Open Source Software Engineering}, year = {2001}, month = {2001///}, author = {Schmidt,D. C and Porter, Adam} } @conference {13826, title = {Mapping lexical entries in a verbs database to WordNet senses}, booktitle = {Proceedings of the 39th Annual Meeting on Association for Computational Linguistics}, series = {ACL {\textquoteright}01}, year = {2001}, month = {2001///}, pages = {244 - 251}, publisher = {Association for Computational Linguistics}, organization = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, abstract = {This paper describes automatic techniques for mapping 9611 entries in a database of English verbs to WordNet senses. The verbs were initially grouped into 491 classes based on syntactic features. Mapping these verbs into WordNet senses provides a resource that supports disambiguation in multilingual applications such as machine translation and cross-language information retrieval. Our techniques make use of (1) a training set of 1791 disambiguated entries, representing 1442 verb entries from 167 classes; (2) word sense probabilities, from frequency counts in a tagged corpus; (3) semantic similarity of WordNet senses for verbs within the same class; (4) probabilistic correlations between WordNet data and attributes of the verb classes. The best results achieved 72\% precision and 58\% recall, versus a lower bound of 62\% precision and 38\% recall for assigning the most frequently occurring WordNet sense, and an upper bound of 87\% precision and 75\% recall for human judgment.}, doi = {10.3115/1073012.1073044}, url = {http://dx.doi.org/10.3115/1073012.1073044}, author = {Green,Rebecca and Pearl,Lisa and Dorr, Bonnie J and Resnik, Philip} } @article {13827, title = {Mapping WorldNet Senses to a Lexical Database of Verbs}, year = {2001}, month = {2001/01//}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {This paper describes automatic techniques for mapping 9611 semantically classified English verbs to WordNet senses. The verbs were initially grouped into 491 semantic classes based on syntactic categories; they were then mapped into WordNet senses according to three pieces of information: (1) prior probability of WordNet senses; (2) semantic similarity of WordNet senses for verbs within the same category; and (3) probabilistic correlations between WordNet relationship and verb frame data. Our techniques make use of a training set of 1791 disambiguated entries representing 1442 verbs occurring in 167 of the categories. The best results achieved .58 recall and .72 precision, versus a lower bound of .38 recall and .62 precision for assigning the most frequently occurring WordNet sense, and an upper bound of .75 recall and .87 precision for human judgment.}, keywords = {*DATA BASES, *ENGLISH LANGUAGE, *LEXICAL DATABASES, *LEXICOGRAPHY, *MAPPING, *VERBS, *WORD MAPPING, *WORDNET, AMBIGUITY, correlation, FRAMES, Frequency, INFORMATION SCIENCE, JUDGEMENT(PSYCHOLOGY), linguistics, PRECISION, probability, RECALL, semantics, syntax, WORDS(LANGUAGE), WSD(WORD SENSE DISAMBIGUATION)}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA458846}, author = {Green,Rebecca and Pearl,Lisa and Dorr, Bonnie J} } @conference {16347, title = {More efficient network class loading through bundling}, booktitle = {Proceedings of the 2001 Symposium on Java TM Virtual Machine Research and Technology Symposium-Volume 1}, year = {2001}, month = {2001///}, pages = {17 - 17}, author = {Hovemeyer,D. and Pugh, William} } @conference {12074, title = {Personal secure booting}, booktitle = {Information Security and Privacy}, year = {2001}, month = {2001///}, pages = {130 - 144}, author = {Itoi,N. and Arbaugh, William A. and Pollack,S. and Reeves,D.} } @conference {14188, title = {A spherical eye from multiple cameras (makes better models of the world)}, booktitle = {Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2001. CVPR 2001}, volume = {1}, year = {2001}, month = {2001///}, pages = {I-576- I-583 vol.1 - I-576- I-583 vol.1}, publisher = {IEEE}, organization = {IEEE}, abstract = {The paper describes an imaging system that has been designed specifically for the purpose of recovering egomotion and structure from video. The system consists of six cameras in a network arranged so that they sample different parts of the visual sphere. This geometric configuration has provable advantages compared to small field of view cameras for the estimation of the system{\textquoteright}s own motion and consequently the estimation of shape models from the individual cameras. The reason is that inherent ambiguities of confusion between translation and rotation disappear. We provide algorithms for the calibration of the system and 3D motion estimation. The calibration is based on a new geometric constraint that relates the images of lines parallel in space to the rotation between the cameras. The 3D motion estimation uses a constraint relating structure directly to image gradients.}, keywords = {3D motion estimation, Calibration, camera network, CAMERAS, Computer vision, egomotion recovery, geometric configuration, geometric constraint, image gradients, image sampling, imaging system, Laboratories, Layout, Motion estimation, multiple cameras, Pixel, Robot vision systems, SHAPE, shape models, Space technology, spherical eye, system calibration, video, video cameras, video signal processing, visual sphere sampling}, isbn = {0-7695-1272-0}, doi = {10.1109/CVPR.2001.990525}, author = {Baker, P. and Ferm{\"u}ller, Cornelia and Aloimonos, J. and Pless, R.} } @conference {16156, title = {Therapeutic play with a storytelling robot}, booktitle = {CHI {\textquoteright}01 extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}01}, year = {2001}, month = {2001///}, pages = {27 - 28}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We are developing a prototype storytelling robot for use with children in rehabilitation. Children can remotely control a furry robot by using a variety of body sensors adapted to their disability or rehabilitation goal. We believe this robot can motivate children and help them reach their therapy goals through therapeutic play, either by exercising muscles or joints (e.g. for physically challenged children) or by reflecting on the stories (e.g. for children with developmental disabilities). To develop this technology we use an innovative design methodology involving children as design partners.}, keywords = {children, design process, rehabilitation, robot, therapeutic play, user interface}, isbn = {1-58113-340-5}, doi = {10.1145/634067.634088}, url = {http://doi.acm.org/10.1145/634067.634088}, author = {Lathan,Corinna and Vice,Jack Maxwell and Tracey,Michael and Plaisant, Catherine and Druin, Allison and Edward,Kris and Montemayor,Jaime} } @article {16158, title = {Understanding Manufacturing Systems with a Learning Historian for User-Directed Experimentation}, volume = {CS-TR-4243}, year = {2001}, month = {2001///}, institution = {Department of Computer Science, University of Maryland, College Park}, abstract = {This paper describes a learning historian to improve user-directed experimentation withdiscrete event simulation models of manufacturing systems. In user-directed experimentation, an analyst conducts simulation runs to estimate system performance. Then the analyst modifies the simulation model to evaluate other possibilities. An important characteristic is the ad hoc nature of the experimentation, as the analyst forms and runs new trials based on the results from previous trials. Through user-directed experimentation designers compare alternatives and students learn the relationships between input parameters and performance measures. Recording and reviewing previous trials while using simulation models enhances their benefits, transforming trial-and-error into learning. The learning historian combines a graphical user interface, a discrete event simulation model, and dynamic data visualization. Usability studies indicate that the learning historian is a usable and useful tool because it allows users to concentrate more on understanding system behavior than on operating simulation software. }, author = {Chipman,G. and Plaisant, Catherine and Gahagan,S. and Herrmann,J.W. and Hewitt,S. and Reaves,L.} } @conference {16155, title = {VEHICLE SPEED INFORMATION DISPLAYS FOR PUBLIC WEBSITES: A STUDY OF USER PREFERENCES}, booktitle = {ITS 2001: Conference Proceedings}, year = {2001}, month = {2001///}, author = {Plaisant, Catherine and Bhamidipati,P. and Tarnoff,P.} } @article {18291, title = {Watermarking scheme for image authentication}, year = {2001}, month = {2001/09/04/}, abstract = {A digital watermarking process whereby an invisible watermark inserted into a host image is utilized to determine whether or not the image has been altered and, if so, where in the image such alteration occurred. The watermarking method includes the steps of providing a look-up table containing a plurality of coefficients and corresponding values; transforming the image into a plurality of blocks, wherein each block contains coefficients matching coefficients in the look-up table; and embedding the watermark in the image by performing the following substeps for at least some of the blocks: First, a coefficient is selected for insertion of a marking value representative of a corresponding portion of the watermark. Next, the value of the selected coefficient to used to identify a corresponding value in the look-up table. Finally, the identified coefficient is left unchanged if the corresponding value is the same as the marking value, and is changed if the corresponding value is...}, url = {http://www.google.com/patents?id=BmoIAAAAEBAJ}, author = {M. Wu and Liu,Bede}, editor = {The Trustees of the University of Princeton} } @conference {15639, title = {The analysis of a simple k-means clustering algorithm}, booktitle = {Proceedings of the sixteenth annual symposium on Computational geometry}, series = {SCG {\textquoteright}00}, year = {2000}, month = {2000///}, pages = {100 - 109}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {1-58113-224-7}, doi = {10.1145/336154.336189}, url = {http://doi.acm.org/10.1145/336154.336189}, author = {Kanungo,Tapas and Mount, Dave and Netanyahu,Nathan S. and Piatko,Christine and Silverman,Ruth and Wu,Angela Y.} } @conference {18479, title = {Attacking the bottlenecks of backfilling schedulers}, booktitle = {Cluster Computing}, year = {2000}, month = {2000///}, author = {Zotkin,Dmitry N and Keleher,P. J and Perkovic,D.} } @article {15464, title = {Automated test oracles for GUIs}, journal = {SIGSOFT Softw. Eng. Notes}, volume = {25}, year = {2000}, month = {2000/11//}, pages = {30 - 39}, abstract = {Graphical User Interfaces (GUIs) are critical components of today{\textquoteright}s software. Because GUIs have different characteristics than traditional software, conventional testing techniques do not apply to GUI software. In previous work, we presented an approach to generate GUI test cases, which take the form of sequences of actions. In this paper we develop a test oracle technique to determine if a GUI behaves as expected for a given test case. Our oracle uses a formal model of a GUI, expressed as sets of objects, object properties, and actions. Given the formal model and a test case, our oracle automatically derives the expected state for every action in the test case. We represent the actual state of an executing GUI in terms of objects and their properties derived from the GUI{\textquoteright}s execution. Using the actual state acquired from an execution monitor, our oracle automatically compares the expected and actual states after each action to verify the correctness of the GUI for the test case. We implemented the oracle as a component in our GUI testing system, called Planning Assisted Tester for grapHical user interface Systems (PATHS), which is based on AI planning. We experimentally evaluated the practicality and effectiveness of our oracle technique and report on the results of experiments to test and verify the behavior of our version of the Microsoft WordPad{\textquoteright}s GUI.}, keywords = {automated oracles, GUI test oracles, GUI testing}, isbn = {0163-5948}, doi = {10.1145/357474.355050}, url = {http://doi.acm.org/10.1145/357474.355050}, author = {Memon, Atif M. and Pollack,Martha E. and Soffa,Mary Lou} } @article {12974, title = {A Case for Evolutionary Genomics and the Comprehensive Examination of Sequence Biodiversity}, journal = {Molecular Biology and EvolutionMol Biol Evol}, volume = {17}, year = {2000}, month = {2000/12/01/}, pages = {1776 - 1788}, abstract = {Comparative analysis is one of the most powerful methods available for understanding the diverse and complex systems found in biology, but it is often limited by a lack of comprehensive taxonomic sampling. Despite the recent development of powerful genome technologies capable of producing sequence data in large quantities (witness the recently completed first draft of the human genome), there has been relatively little change in how evolutionary studies are conducted. The application of genomic methods to evolutionary biology is a challenge, in part because gene segments from different organisms are manipulated separately, requiring individual purification, cloning, and sequencing. We suggest that a feasible approach to collecting genome-scale data sets for evolutionary biology (i.e., evolutionary genomics) may consist of combination of DNA samples prior to cloning and sequencing, followed by computational reconstruction of the original sequences. This approach will allow the full benefit of automated protocols developed by genome projects to be realized; taxon sampling levels can easily increase to thousands for targeted genomes and genomic regions. Sequence diversity at this level will dramatically improve the quality and accuracy of phylogenetic inference, as well as the accuracy and resolution of comparative evolutionary studies. In particular, it will be possible to make accurate estimates of normal evolution in the context of constant structural and functional constraints (i.e., site-specific substitution probabilities), along with accurate estimates of changes in evolutionary patterns, including pairwise coevolution between sites, adaptive bursts, and changes in selective constraints. These estimates can then be used to understand and predict the effects of protein structure and function on sequence evolution and to predict unknown details of protein structure, function, and functional divergence. In order to demonstrate the practicality of these ideas and the potential benefit for functional genomic analysis, we describe a pilot project we are conducting to simultaneously sequence large numbers of vertebrate mitochondrial genomes.}, isbn = {0737-4038, 1537-1719}, url = {http://mbe.oxfordjournals.org/content/17/12/1776}, author = {Pollock,David D and Eisen,Jonathan A. and Doggett,Norman A and Cummings, Michael P.} } @conference {18032, title = {Communication complexity of document exchange}, booktitle = {Proceedings of the eleventh annual ACM-SIAM symposium on Discrete algorithms}, series = {SODA {\textquoteright}00}, year = {2000}, month = {2000///}, pages = {197 - 206}, publisher = {Society for Industrial and Applied Mathematics}, organization = {Society for Industrial and Applied Mathematics}, address = {Philadelphia, PA, USA}, isbn = {0-89871-453-2}, url = {http://dl.acm.org/citation.cfm?id=338219.338252}, author = {Cormode,Graham and Paterson,Mike and Sahinalp,S{\"u}leyman Cenk and Vishkin, Uzi} } @article {17579, title = {Contention resolution with constant expected delay}, journal = {J. ACM}, volume = {47}, year = {2000}, month = {2000/11//}, pages = {1048 - 1096}, abstract = {We study contention resolution in a multiple-access channel such as the Ethernet channel. In the model that we consider, n users generate messages for the channel according to a probability distribution. Raghavan and Upfal have given a protocol in which the expected delay (time to get serviced) of every message is O(log n) when messages are generated according to a Bernoulli distribution with generation rate up to about 1/10. Our main results are the following protocols: (a) one in which the expected average message delay is O(1) when messages are generated according to a Bernoulli distribution with a generation rate smaller than 1/e, and (b) one in which the expected delay of any message is O(1) for an analogous model in which users are synchronized (i.e., they agree about the time), there are potentially an infinite number of users, and messages are generated according to a Poisson distribution with generation rate up to 1/e. (Each message constitutes a new user.)To achieve (a), we first show how to simulate (b) using n synchronized users, and then show how to build the synchronization into the protocol.}, keywords = {contention resolution, Ethernet, Markov chains, multiple-access channel}, isbn = {0004-5411}, doi = {10.1145/355541.355567}, url = {http://doi.acm.org/10.1145/355541.355567}, author = {Goldberg,Leslie Ann and Mackenzie,Philip D. and Paterson,Mike and Srinivasan, Aravind} } @conference {13893, title = {Designing StoryRooms: interactive storytelling spaces for children}, booktitle = {Proceedings of the 3rd conference on Designing interactive systems: processes, practices, methods, and techniques}, year = {2000}, month = {2000///}, pages = {95 - 104}, author = {Alborzi,H. and Druin, Allison and Montemayor,J. and Platner,M. and Porteous,J. and Sherman,L. and Boltman,A. and Tax{\'e}n,G. and Best,J. and Hammer,J. and others} } @article {12027, title = {Detecting independent motion: The statistics of temporal continuity}, journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on}, volume = {22}, year = {2000}, month = {2000///}, pages = {768 - 773}, author = {Pless, R. and Brodsky, T. and Aloimonos, J.} } @conference {16339, title = {Empirical studies of software engineering: a roadmap}, booktitle = {Proceedings of the conference on The future of Software engineering}, year = {2000}, month = {2000///}, pages = {345 - 355}, author = {Perry,D. E. and Porter, Adam and Votta,L. G.} } @conference {16341, title = {An empirical study of regression test application frequency}, booktitle = {Proceedings of the 22nd international conference on Software engineering - ICSE {\textquoteright}00}, year = {2000}, month = {2000///}, pages = {126 - 135}, address = {Limerick, Ireland}, doi = {10.1145/337180.337196}, url = {http://dl.acm.org/citation.cfm?id=337196}, author = {Kim,Jung-Min and Porter, Adam and Rothermel,Gregg} } @conference {16164, title = {Evaluation challenges for a Federation of heterogeneous information providers: the case of NASA{\textquoteright}s Earth Science Information Partnerships}, booktitle = {Enabling Technologies: Infrastructure for Collaborative Enterprises, 2000. (WET ICE 2000). Proeedings. IEEE 9th International Workshops on}, year = {2000}, month = {2000///}, pages = {130 - 135}, abstract = {NASA{\textquoteright}s Earth Science Information Partnership Federation is an experiment funded to assess the ability of a group of widely heterogeneous earth science data or service providers to self organize and provide improved and affordable access to an expanding earth science user community. As it is self-organizing, the Federation is mandated to set in place an evaluation methodology and collect metrics reflecting the outcomes and benefits of the Federation. This paper describes the challenges of organizing such a federated partnership self-evaluation and discusses the issues encountered during the metrics definition phase of the early data collection. Our experience indicates that a large number of metrics will be needed to fully represent the activities and strengths of all partners, but because of the heterogeneity of the ESIPs the qualitative data (comments accompanying the metric data and success stories) becomes the most useful information. Other lessons learned included the absolute need for online browsing tools to accompany data collection tools. Finally, our experience confirms the effect of evaluation as an agent of change, the best example being the high level of collaboration among the ESIPs which can be in part attributed to the initial identification of collaboration as one of the important evaluation factors of the Federation}, keywords = {Browsing, collection;heterogeneous, computing;groupware;, data;online, Earth, Federation;data, information, NASA{\textquoteright}s, Partnership, Science, systems;geophysics, tools;geographic}, doi = {10.1109/ENABL.2000.883717}, author = {Plaisant, Catherine and Komlodi,A. and Lindsay,F.} } @article {16161, title = {Facilitating data exploration with query previews: A study of user performance and preference}, journal = {Behaviour \& Information Technology}, volume = {19}, year = {2000}, month = {2000///}, pages = {393 - 403}, abstract = {Networked and local data exploration systems that use command languages, menus, or form fill-in interfaces rarely give users an indication of the distribution of data. This often leads users to waste time, posing queries that have zero-hit or mega-hit results. Query previews are a novel visual approach for browsing databases. Query previews supply users with data distribution information for selected attributes of the database, and give continuous feedback about the size of the result set as the query is being formed. Subsequent refinements might be necessary to narrow the search. As there is a risk that query previews are an additional step, leading to a more complex and slow search process, a within-subjects empirical study was ran with 12 subjects who used interfaces with and without query previews and with minimized network delays. Even with 12 subjects and minimized network delays statistically significant differences were found, showing that query previews could speed up performance 1.6 to 2.1 times and lead to higher user satisfaction.Networked and local data exploration systems that use command languages, menus, or form fill-in interfaces rarely give users an indication of the distribution of data. This often leads users to waste time, posing queries that have zero-hit or mega-hit results. Query previews are a novel visual approach for browsing databases. Query previews supply users with data distribution information for selected attributes of the database, and give continuous feedback about the size of the result set as the query is being formed. Subsequent refinements might be necessary to narrow the search. As there is a risk that query previews are an additional step, leading to a more complex and slow search process, a within-subjects empirical study was ran with 12 subjects who used interfaces with and without query previews and with minimized network delays. Even with 12 subjects and minimized network delays statistically significant differences were found, showing that query previews could speed up performance 1.6 to 2.1 times and lead to higher user satisfaction. }, isbn = {0144-929X}, doi = {10.1080/014492900750052651}, url = {http://www.tandfonline.com/doi/abs/10.1080/014492900750052651}, author = {Tanin,Egemen and Lotem,Amnon and Haddadin,Ihab and Shneiderman, Ben and Plaisant, Catherine and Slaughter,Laura} } @article {15234, title = {Fault tolerant K-center problems}, journal = {Theoretical Computer Science}, volume = {242}, year = {2000}, month = {2000/07/06/}, pages = {237 - 245}, abstract = {The basic K-center problem is a fundamental facility location problem, where we are asked to locate K facilities in a graph, and to assign vertices to facilities, so as to minimize the maximum distance from a vertex to the facility to which it is assigned. This problem is known to be NP-hard, and several optimal approximation algorithms that achieve an approximation factor of 2 have been developed for it. We focus our attention on a generalization of this problem, where each vertex is required to have a set of α (α⩽K) centers close to it. In particular, we study two different versions of this problem. In the first version, each vertex is required to have at least α centers close to it. In the second version, each vertex that does not have a center placed on it is required to have at least α centers close to it. For both these versions we are able to provide polynomial time approximation algorithms that achieve constant approximation factors for any α. For the first version we give an algorithm that achieves an approximation factor of 3 for any α, and achieves an approximation factor of 2 for α\<4. For the second version, we provide algorithms with approximation factors of 2 for any α. The best possible approximation factor for even the basic K-center problem is 2, assuming P/=NP. In addition, we give a polynomial time approximation algorithm for a generalization of the K-supplier problem where a subset of at most K supplier nodes must be selected as centers so that every demand node has at least α centers close to it. For this version our approximation factor is 3. The best possible approximation factor for even the basic K-supplier problem is 3, assuming P/=NP.}, keywords = {Approximation algorithms, Facility location, Fault-tolerance, K-center}, isbn = {0304-3975}, doi = {10.1016/S0304-3975(98)00222-9}, url = {http://www.sciencedirect.com/science/article/pii/S0304397598002229}, author = {Khuller, Samir and Pless,Robert and Sussmann,Yoram J.} } @article {16430, title = {GP+ echo+ subsumption= improved problem solving}, journal = {Proceedings of the Genetic and Evolutionary Computation Conference (GECCO 2000)}, year = {2000}, month = {2000///}, pages = {411 - 418}, abstract = {Real-time, adaptive control is a difficult problemthat can be addressed by EC architectures. We are interested in incorporating into an EC architecture some of the features that Holland{\textquoteright}s Echo architecture presents. Echo has been used to model everything from cultures to financial markets. However, the typical application of Echo is a simulation to observe the dynamics of the modeled elements such as found in control problems. We show in this paper that some aspects of Echo can be incorporated into Genetic Programming to solve control problems. The paper discusses EAGP (Echo Augmented Genetic Programming), a modified GP architecture that uses aspects of Echo, and subsumption. We demonstrate the usefulness of EAGP on a robot navigation problem }, author = {Punch,W.F. and Rand, William} } @article {16371, title = {The Java memory model is fatally flawed}, journal = {Concurrency - Practice and Experience}, volume = {12}, year = {2000}, month = {2000///}, pages = {445 - 455}, author = {Pugh, William} } @conference {13583, title = {Live multimedia adaptation through wireless hybrid networks}, booktitle = {Multimedia and Expo, 2000. ICME 2000. 2000 IEEE International Conference on}, volume = {3}, year = {2000}, month = {2000///}, pages = {1697 -1700 vol.3 - 1697 -1700 vol.3}, abstract = {We present new techniques to intelligently adapt and combine multimedia presentations in real-time, mobile service environments. We present the techniques necessary to perform mobile multimedia processing for multiple video streams with ldquo;value-adding rdquo; information. The adaptation uses video analysis, content-recognition and automated annotation to provide scalable and interactive presentations over hybrid networks to mobile terminals. As a concrete case, we present a mobile surveillance service called Princess, which has several video sources and supports control information, terminal adaptation and end-to-end service}, keywords = {adaptation;mobile, adaptation;value-adding, analysis;wireless, annotation;content-recognition;end-to-end, communication;multimedia, communication;real-time, hybrid, information;video, LAN;, multimedia, networks;mobile, Princess;automated, processing;mobile, service;live, service;mobile, streams;real-time, Surveillance, system;terminal, systems;surveillance;wireless, terminals;multiple, video}, doi = {10.1109/ICME.2000.871098}, author = {Koivisto,A. and Pietkainen,P. and Sauvola,J. and David Doermann} } @conference {15232, title = {On local search and placement of meters in networks}, booktitle = {Proceedings of the eleventh annual ACM-SIAM symposium on Discrete algorithms}, series = {SODA {\textquoteright}00}, year = {2000}, month = {2000///}, pages = {319 - 328}, publisher = {Society for Industrial and Applied Mathematics}, organization = {Society for Industrial and Applied Mathematics}, address = {Philadelphia, PA, USA}, isbn = {0-89871-453-2}, url = {http://dl.acm.org/citation.cfm?id=338219.338268}, author = {Khuller, Samir and Bhatia,Randeep and Pless,Robert} } @article {16000, title = {A logic for characterizing multiple bounded agents}, journal = {Autonomous Agents and Multi-Agent Systems}, volume = {3}, year = {2000}, month = {2000///}, pages = {351 - 387}, author = {Grant,J. and Kraus,S. and Perlis, Don} } @article {15942, title = {Meta-reasoning for intelligent dialog repair}, year = {2000}, month = {2000///}, institution = {Technical report, University of Maryland}, abstract = {Under consideration for other conferences (specify)? We present a logical approach and implmentation of a real-time rational agent for dialogue management, which includes an ability to represent, reason about, and repair (both implicitly and explicitly) incoherence in dialogue. We exemplify a target problem of negative feedback in action directive subdialogues, with an ability to make reference resolution sensitive to dialogue context and like intentions. The resulting system is able to behave more sensibly in several situations than earlier dialogue management systems in these same conditions. Meta-reasoning for intelligent dialog repair We present a logical approach and implmentation of a real-time rational agent for dialogue management, which includes an ability to represent, reason about, and repair (both implicitly and explicitly) incoherence in dialogue. We exemplify a target problem of negative feedback in action directive subdialogues, with an ability to make reference resolution sensitive to dialogue context and like intentions. The resulting system is able to behave more sensibly in several situations than earlier dialogue management systems in these same conditions.}, author = {Purang,K. and Traum,D. and Purushothaman,D. and Chong,W. and Okamato,Y. and Perlis, Don} } @conference {14232, title = {Multi-camera networks: eyes from eyes}, booktitle = {IEEE Workshop on Omnidirectional Vision, 2000. Proceedings}, year = {2000}, month = {2000///}, pages = {11 - 18}, publisher = {IEEE}, organization = {IEEE}, abstract = {Autonomous or semi-autonomous intelligent systems, in order to function appropriately, need to create models of their environment, i.e., models of space time. These are descriptions of objects and scenes and descriptions of changes of space over time, that is, events and actions. Despite the large amount of research on this problem, as a community we are still far from developing robust descriptions of a system{\textquoteright}s spatiotemporal environment using video input (image sequences). Undoubtedly, some progress has been made regarding the understanding of estimating the structure of visual space, but it has not led to solutions to specific applications. There is, however, an alternative approach which is in line with today{\textquoteright}s {\textquotedblleft}zeitgeist.{\textquotedblright} The vision of artificial systems can be enhanced by providing them with new eyes. If conventional video cameras are put together in various configurations, new sensors can be constructed that have much more power and the way they {\textquotedblleft}see{\textquotedblright} the world makes it much easier to solve problems of vision. This research is motivated by examining the wide variety of eye design in the biological world and obtaining inspiration for an ensemble of computational studies that relate how a system sees to what that system does (i.e. relating perception to action). This, coupled with the geometry of multiple views that has flourished in terms of theoretical results in the past few years, points to new ways of constructing powerful imaging devices which suit particular tasks in robotics, visualization, video processing, virtual reality and various computer vision applications, better than conventional cameras. This paper presents a number of new sensors that we built using common video cameras and shows their superiority with regard to developing models of space and motion}, keywords = {Biosensors, CAMERAS, Computer vision, Eyes, Image sequences, intelligent systems, Layout, Machine vision, Robot vision systems, Robustness, Spatiotemporal phenomena, video cameras, Virtual reality}, isbn = {0-7695-0704-2}, doi = {10.1109/OMNVIS.2000.853797}, author = {Ferm{\"u}ller, Cornelia and Aloimonos, J. and Baker, P. and Pless, R. and Neumann, J. and Stuart, B.} } @inbook {14236, title = {New Eyes for Shape and Motion Estimation}, booktitle = {Biologically Motivated Computer VisionBiologically Motivated Computer Vision}, series = {Lecture Notes in Computer Science}, volume = {1811}, year = {2000}, month = {2000///}, pages = {23 - 47}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Motivated by the full field of view of insect eyes and their fast and accurate estimation of egomotion, we constructed a system of cameras to take advantage of the full field of view (FOV) constraints that insects use. In this paper, we develop a new ego-motion algorithm for a rigidly mounted set of cameras undergoing arbitrary rigid motion. This egomotion algorithm combines the unambiguous components of the motion computed by each separate camera. We prove that the cyclotorsion is resistant to errors and show this empirically. We show how to calibrate the system with two novel algorithms, one using secondary cameras and one using self calibration. Given this system calibration, the new 3D motion algorithm first computes the rotation and then the 3D translation. We apply this algorithm to a camera system constructed with four rigidly mounted synchronized cameras pointing in various directions and present motion estimation results at www.cfar.umd.edu/ pbaker/argus.html.}, isbn = {978-3-540-67560-0}, url = {http://dx.doi.org/10.1007/3-540-45482-9_12}, author = {Baker,Patrick and Pless,Robert and Ferm{\"u}ller, Cornelia and Aloimonos, J.}, editor = {Lee,Seong-Whan and B{\"u}lthoff,Heinrich and Poggio,Tomaso} } @article {14166, title = {The Ouchi illusion as an artifact of biased flow estimation}, journal = {Vision Research}, volume = {40}, year = {2000}, month = {2000/01//}, pages = {77 - 95}, abstract = {A pattern by Ouchi has the surprising property that small motions can cause illusory relative motion between the inset and background regions. The effect can be attained with small retinal motions or a slight jiggling of the paper and is robust over large changes in the patterns, frequencies and boundary shapes. In this paper, we explain that the cause of the illusion lies in the statistical difficulty of integrating local one-dimensional motion signals into two-dimensional image velocity measurements. The estimation of image velocity generally is biased, and for the particular spatial gradient distributions of the Ouchi pattern the bias is highly pronounced, giving rise to a large difference in the velocity estimates in the two regions. The computational model introduced to describe the statistical estimation of image velocity also accounts for the findings of psychophysical studies with variations of the Ouchi pattern and for various findings on the perception of moving plaids. The insight gained from this computational study challenges the current models used to explain biological vision systems and to construct robotic vision systems. Considering the statistical difficulties in image velocity estimation in conjunction with the problem of discontinuity detection in motion fields suggests that theoretically the process of optical flow computations should not be carried out in isolation but in conjunction with the higher level processes of 3D motion estimation, segmentation and shape computation.}, keywords = {Bias, MOTION, optical flow, Plaid, Statistics}, isbn = {0042-6989}, doi = {10.1016/S0042-6989(99)00162-5}, url = {http://www.sciencedirect.com/science/article/pii/S0042698999001625}, author = {Ferm{\"u}ller, Cornelia and Pless,Robert and Aloimonos, J.} } @article {13011, title = {Phylogenetic relationships of Acanthocephala based on analysis of 18S ribosomal RNA gene sequences}, journal = {J Mol Evol}, volume = {50}, year = {2000}, month = {2000/06//}, pages = {532 - 540}, abstract = {Acanthocephala (thorny-headed worms) is a phylum of endoparasites of vertebrates and arthropods, included among the most phylogenetically basal tripoblastic pseudocoelomates. The phylum is divided into three classes: Archiacanthocephala, Palaeacanthocephala, and Eoacanthocephala. These classes are distinguished by morphological characters such as location of lacunar canals, persistence of ligament sacs in females, number and type of cement glands in males, number and size of proboscis hooks, host taxonomy, and ecology. To understand better the phylogenetic relationships within Acanthocephala, and between Acanthocephala and Rotifera, we sequenced the nearly complete 18S rRNA genes of nine species from the three classes of Acanthocephala and four species of Rotifera from the classes Bdelloidea and Monogononta. Phylogenetic relationships were inferred by maximum-likelihood analyses of these new sequences and others previously determined. The analyses showed that Acanthocephala is the sister group to a clade including Eoacanthocephala and Palaeacanthocephala. Archiacanthocephala exhibited a slower rate of evolution at the nucleotide level, as evidenced by shorter branch lengths for the group. We found statistically significant support for the monophyly of Rotifera, represented in our analysis by species from the clade Eurotatoria, which includes the classes Bdelloidea and Monogononta. Eurotatoria also appears as the sister group to Acanthocephala.}, author = {Garc{\'\i}a-Varela,M and P{\'e}rez-Ponce de Le{\'o}n,G. and de la Torre,P and Cummings, Michael P. and Sarma,SS and Laclette,J. P} } @article {15449, title = {A planning-based approach to GUI testing}, journal = {Proceedings of The 13th International Software/Internet Quality Week}, year = {2000}, month = {2000///}, author = {Memon, Atif M. and Pollack,M. E and Soffa,M. L} } @article {17327, title = {Previews and overviews in digital libraries: Designing surrogates to support visual information seeking}, journal = {Journal of the American Society for Information Science}, volume = {51}, year = {2000}, month = {2000/01/01/}, pages = {380 - 393}, abstract = {To aid designers of digital library interfaces, we present a framework for the design of information representations in terms of previews and overviews. Previews and overviews are graphic or textual representations of information abstracted from primary information objects. Previews act as surrogates for one or a few objects and overviews represent collections of objects. A design framework is elaborated in terms of the following three dimensions: (1) what information objects are available to users, (2) how information objects are related and displayed, and (3) how users can manipulate information objects. When utilized properly, previews and overviews allow users to rapidly discriminate objects of interest from those not of interest, and to more fully understand the scope and nature of digital libraries. This article presents a definition of previews and overviews in context, provides design guidelines, and describes four example applications.}, isbn = {1097-4571}, doi = {10.1002/(SICI)1097-4571(2000)51:4<380::AID-ASI7>3.0.CO;2-5}, url = {http://onlinelibrary.wiley.com/doi/10.1002/(SICI)1097-4571(2000)51:4\%3C380::AID-ASI7\%3E3.0.CO;2-5/abstract;jsessionid=E15C609DE95671E0E91A862B8AFD1CC6.d03t01}, author = {Greene,Stephan and Marchionini,Gary and Plaisant, Catherine and Shneiderman, Ben} } @article {15607, title = {Quantile approximation for robust statistical estimation and k-enclosing problems}, journal = {International Journal of Computational Geometry and Applications}, volume = {10}, year = {2000}, month = {2000///}, pages = {593 - 608}, abstract = {Given a set P of n points in Rd, a fundamental problem in computational geometryis concerned with finding the smallest shape of some type that encloses all the points of P. Well-known instances of this problem include finding the smallest enclosing box, minimum volume ball, and minimum volume annulus. In this paper we consider the following variant: Given a set of n points in Rd, find the smallest shape in question that contains at least k points or a certain quantile of the data. This type of problem is known as a k-enclosing problem. We present a simple algorithmic framework for computing quantile approximations for the minimum strip, ellipsoid, and annulus containing a given quantile of the points. The algorithms run in O(n log n) time. }, author = {Mount, Dave and Netanyahu,N. S and Piatko,C. D and Silverman,R. and Wu,A. Y} } @article {17651, title = {Retrieval scheduling for collaborative multimedia presentations}, journal = {Multimedia Systems}, volume = {8}, year = {2000}, month = {2000///}, pages = {146 - 155}, abstract = {The single-system approach is no longer sufficient to handle the load on popular Internet servers, especially for those offering extensive multimedia content. Such services have to be replicated to enhance their availability, performance, and reliability. In a highly replicated and available environment, server selection is an important issue. In this paper, we propose an application-layer broker (ALB) for this purpose. ALB employs a content-based, client-centric approach to negotiate with the servers and to identify the best server for the requested objects. ALB aims to maximize client buffer utilization in order to efficiently handle dynamic user interactions such as skip, reverse presentation, go back in time. We also present details of a collaborative multimedia presentation platform that we have developed based on ALB.}, isbn = {0942-4962}, url = {http://dx.doi.org/10.1007/s005300050157}, author = {Bai,Ping and Prabhakaran,B. and Srinivasan, Aravind} } @conference {16162, title = {Simulation based learning environments and the use of learning histories}, booktitle = {CHI {\textquoteright}00 extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}00}, year = {2000}, month = {2000///}, pages = {2 - 3}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We have developed an application framework for constructing simulation-based learning environments using dynamic simulations and visualizations to represent realistic time-dependent behavior. The development environment is described and many examples are given. In particular we will focus on the learning historian which provides users and learners with a manipulatable recording of their actions which facilitates the exchange of annotated history records among peers and mentors.}, keywords = {education, engineering, History, learning, simulation}, isbn = {1-58113-248-4}, doi = {10.1145/633292.633294}, url = {http://doi.acm.org/10.1145/633292.633294}, author = {Rose,A. and Salter,R. and Keswani,S. and Kositsyna,N. and Plaisant, Catherine and Rubloff,G. and Shneiderman, Ben} } @conference {18464, title = {Smart videoconferencing}, booktitle = {2000 IEEE International Conference on Multimedia and Expo, 2000. ICME 2000}, volume = {3}, year = {2000}, month = {2000///}, pages = {1597-1600 vol.3 - 1597-1600 vol.3}, publisher = {IEEE}, organization = {IEEE}, abstract = {The combination of acoustical and video processing to achieve a smart audio and video feed from a set of N microphones and M cameras is a task that might conventionally be accomplished by camera persons and control room staff. However, in the context of videoconferencing, this process needs to be performed by control software. We discuss the use of a multi-camera multi-microphone set-up for unattended videoconferencing, and present details of a prototype implementation being developed}, keywords = {acoustical processing, Automatic control, CAMERAS, computerised control, control software, Control systems, Intelligent sensors, Layout, Microphones, multi-camera multi-microphone set-up, multimedia systems, Protocols, prototype implementation, Prototypes, sensor fusion, smart videoconferencing, Switches, Teleconferencing, unattended videoconferencing, video processing}, isbn = {0-7803-6536-4}, doi = {10.1109/ICME.2000.871075}, author = {Zotkin,Dmitry N and Duraiswami, Ramani and Philomin,V. and Davis, Larry S.} } @conference {16163, title = {A storytelling robot for pediatric rehabilitation}, booktitle = {Proceedings of the fourth international ACM conference on Assistive technologies}, series = {Assets {\textquoteright}00}, year = {2000}, month = {2000///}, pages = {50 - 55}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {children, design process, rehabilitation, robot, therapeutic play, user interface}, isbn = {1-58113-313-8}, doi = {10.1145/354324.354338}, url = {http://doi.acm.org/10.1145/354324.354338}, author = {Plaisant, Catherine and Druin, Allison and Lathan,Corinna and Dakhane,Kapil and Edwards,Kris and Vice,Jack Maxwell and Montemayor,Jaime} } @article {15337, title = {Subgrid-scale models for compressible large-eddy simulations}, journal = {Theoretical and Computational Fluid Dynamics}, volume = {13}, year = {2000}, month = {2000///}, pages = {361 - 376}, author = {Martin, M.P and Piomelli,U. and Candler,G. V} } @article {15351, title = {Toward the large-eddy simulation over a hypersonic elliptical cross-section cone}, journal = {AIAA Paper No. 00-2311}, year = {2000}, month = {2000///}, author = {Martin, M.P and Weirs,G. and Candler,G. V and Piomelli,U. and Johnson,H. and Nompelis,I.} } @article {16159, title = {Vehicle Speed Information Displays for Public Websites A Survey of User Preferences}, journal = {Technical Reports from UMIACS}, year = {2000}, month = {2000/10/09/}, abstract = {The paper reports on a study comparing alternative presentations offreeway speed data on maps. The goal of the study was to inform the design of displays of real time speed data over the Internet to the general public. Subjects were presented with a series of displays and asked to rate their preferences. We looked at different choices of color (3 colors, 6 colors or a continuous range), and proposed line, sensor, and segment representations of the speed data. We also collected feedback on more complex displays such as comparison between current and "normal" speeds, and a chart of speed variation over a period of time at given locations. (Also cross-referenced as HCIL-TR-2000-23) (Also cross-referenced as UMIACS-TR-2000-73) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/1109}, author = {Plaisant, Catherine and Bhamidipati,Phanikumar} } @inbook {17671, title = {Wavelength Rerouting in Optical Networks, or the Venetian Routing Problem}, booktitle = {Approximation Algorithms for Combinatorial Optimization}, series = {Lecture Notes in Computer Science}, volume = {1913}, year = {2000}, month = {2000///}, pages = {71 - 84}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Wavelength rerouting has been suggested as a viable and cost-effective method to improve the blocking performance of wavelength-routed Wavelength-Division Multiplexing (WDM) networks. This method leads to the following combinatorial optimization problem, dubbed Venetian Routing. Given a directed multigraph G along with two vertices s and t and a collection of pairwise arc-disjoint paths, we wish to find an st -path which arc-intersects the smallest possible number of such paths. In this paper we prove the computational hardness oft his problem even in various special cases, and present several approximation algorithms for its solution. In particular we show a non-trivial connection between Venetian Routing and Label Cover.}, isbn = {978-3-540-67996-7}, url = {http://dx.doi.org/10.1007/3-540-44436-X_9}, author = {Caprara,Alberto and Italiano,Giuseppe and Mohan,G. and Panconesi,Alessandro and Srinivasan, Aravind}, editor = {Jansen,Klaus and Khuller, Samir} } @article {16018, title = {What does it take to refer}, journal = {Journal of Consciousness Studies}, volume = {7}, year = {2000}, month = {2000///}, pages = {67 - 9}, author = {Perlis, Don} } @article {16014, title = {What does it take to refer? a reply to Bojadziev}, journal = {Journal of Consciousness Studies}, volume = {7}, year = {2000}, month = {2000///}, pages = {67 - 69}, author = {Perlis, Don} } @conference {17553, title = {Application-layer broker for scalable Internet services with resource reservation}, booktitle = {Proceedings of the seventh ACM international conference on Multimedia (Part 2)}, series = {MULTIMEDIA {\textquoteright}99}, year = {1999}, month = {1999///}, pages = {103 - 106}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {1-58113-239-5}, doi = {10.1145/319878.319906}, url = {http://doi.acm.org/10.1145/319878.319906}, author = {Bai,Ping and Prabhakaran,B. and Srinivasan, Aravind} } @article {15546, title = {Binary space parititions in pl{\"u}cker space}, journal = {Algorithm Engineering and Experimentation}, year = {1999}, month = {1999///}, pages = {663 - 663}, abstract = {One of the important potential applications of computational geometry is in the field of computer graphics. One challenging computational problem in computer graphics is that of rendering scenes with nearly photographic realism. A major distinction in lighting and shading models in computer graphics is between local illumination models and global illumination models. Local illumination models are available with most commercial graphics software. In such a model the color of a point on an object is modeled as a function of the local surface properties of the object and its relation to a typically small number of point light sources. The other objects of scene have no effect. In contrast, in global illumination models, the color of a point is determined by considering illumination both from direct light sources as well as indirect lighting from other surfaces in the environment. In some sense, there is no longer a distinction between objects and light sources, since every surface is a potential emitter of (indirect) light. Two physical-based methods dominate the field of global illumination. They are ray tracing [8] and radiosity [3].}, doi = {10.1007/3-540-48518-X_6}, author = {Mount, Dave and Pu,F. T} } @inbook {16170, title = {Browsing hierarchical data with multi-level dynamic queries}, booktitle = {Readings in information visualizationReadings in information visualization}, year = {1999}, month = {1999///}, pages = {295 - 305}, publisher = {Morgan Kaufmann Publishers Inc.}, organization = {Morgan Kaufmann Publishers Inc.}, isbn = {9781558605336}, author = {Kumar,H. P and Plaisant, Catherine and Shneiderman, Ben} } @article {15502, title = {Comparing causal-link and propositional planners: Tradeoffs between plan length and domain size}, year = {1999}, month = {1999///}, institution = {Technical Report 99-06, University of Pittsburgh, Pittsburgh}, abstract = {Recent studies have shown that propositional planners, which de-rive from Graphplan and SATPLAN, can generate significantly longer plans than causal-link planners. We present experimental evidence demonstrating that while this may be true, propositional planners also have important limitations relative to the causal-link planners: specifically, they can generate plans only for smaller domains, where the size of a domain is defined by the number of distinguishable objects it contains. Our experiments were conducted in the domain of code optimization, in which the states of the world represent states of the computer program code and the planning operators are the optimiza- tion operators. This domain is well-suited to studying the trade-offs between plan length and domain size, because it is straightforward to manipulate both these factors. On the basis of our experiments, we conclude that causal-link and propositional planners have complementary strengths. }, author = {Memon, Atif M. and Pollack,M. and Soffa,M. L} } @conference {16355, title = {Compressing Java class files}, booktitle = {ACM SIGPLAN Notices}, volume = {34}, year = {1999}, month = {1999///}, pages = {247 - 258}, author = {Pugh, William} } @conference {15640, title = {Computing nearest neighbors for moving points and applications to clustering}, booktitle = {Proceedings of the tenth annual ACM-SIAM symposium on Discrete algorithms}, series = {SODA {\textquoteright}99}, year = {1999}, month = {1999///}, pages = {931 - 932}, publisher = {Society for Industrial and Applied Mathematics}, organization = {Society for Industrial and Applied Mathematics}, address = {Philadelphia, PA, USA}, isbn = {0-89871-434-6}, url = {http://dl.acm.org/citation.cfm?id=314500.315095}, author = {Kanungo,Tapas and Mount, Dave and Netanyahu,Nathan S. and Piatko,Christine and Silverman,Ruth and Wu,Angela Y.} } @book {13528, title = {Content-Based Access to Multimedia Information: From Technology Trends to Sate of the Art}, year = {1999}, month = {1999///}, publisher = {Kluwer}, organization = {Kluwer}, author = {Perry,B. and Chang,S-K. and Dinsmore,J. and David Doermann and Rosenfeld, A. and Stevens,S.} } @article {18668, title = {Coverage estimation methods for stratified fault-injection}, journal = {Computers, IEEE Transactions on}, volume = {48}, year = {1999}, month = {1999/07//}, pages = {707 - 723}, abstract = {This paper addresses the problem of estimating fault tolerance coverage through statistical processing of observations collected in fault-injection experiments. In an earlier paper, venous estimators based on simple sampling in the complete fault/activity input space and stratified sampling in a partitioned space were studied; frequentist confidence limits were derived based on a normal approximation. In this paper, the validity of this approximation is analyzed. The theory of confidence regions is introduced to estimate coverage without approximation when stratification is used. Three statistics are considered for defining confidence regions. It is shown that one-a vectorial statistic-is often more conservative than the other two. However, only the vectorial statistic is computationally tractable. We then consider Bayesian estimation methods for stratified sampling. Two methods are presented to obtain an approximation of the posterior distribution of the coverage by calculating its moments. The moments are then used to identify the type of the distribution in the Pearson distribution system, to estimate its parameters, and to obtain the coverage confidence limit. Three hypothetical example systems are used to compare the validity and the conservatism of the frequentist and Bayesian estimations}, keywords = {Bayes methods, Bayesian estimations, confidence regions, coverage estimation methods, fault tolerance coverage, fault tolerant computing, frequentist confidence limits, parameter estimation, parameters estimation, Pearson distribution system, statistical processing, stratified fault-injection, stratified sampling, vectorial statistic}, isbn = {0018-9340}, doi = {10.1109/12.780878}, author = {Michel Cukier and Powell,D. and Ariat,J.} } @conference {16166, title = {The design of history mechanisms and their use in collaborative educational simulations}, booktitle = {Proceedings of the 1999 conference on Computer support for collaborative learning}, series = {CSCL {\textquoteright}99}, year = {1999}, month = {1999///}, publisher = {International Society of the Learning Sciences}, organization = {International Society of the Learning Sciences}, abstract = {Reviewing past events has been useful in many domains. Videotapes and flight data recorders provide invaluable technological help to sports coaches or aviation engineers. Similarly, providing learners with a readable recording of their actions may help them monitor their behavior, reflect on their progress, and experiment with revisions of their experiences. It may also facilitate active collaboration among dispersed learning communities. Learning histories can help students and professionals make more effective use of digital library searching, word processing tasks, computer-assisted design tools, electronic performance support systems, and web navigation.This paper describes the design space and discusses the challenges of implementing learning histories. It presents guidelines for creating effective implementations, and the design tradeoffs between sparse and dense history records. The paper also presents a first implementation of learning histories for a simulation-based engineering learning environment called SimPLE (Simulated Processes in a Learning Environment) for the case of a semiconductor fabrication module, and reports on early user evaluation of learning histories implemented within SimPLE.}, keywords = {hci, scaffolding, simulation}, url = {http://dl.acm.org/citation.cfm?id=1150240.1150284}, author = {Plaisant, Catherine and Rose,Anne and Rubloff,Gary and Salter,Richard and Shneiderman, Ben} } @conference {13938, title = {Designing PETS: a personal electronic teller of stories}, booktitle = {Proceedings of the SIGCHI conference on Human factors in computing systems: the CHI is the limit}, series = {CHI {\textquoteright}99}, year = {1999}, month = {1999///}, pages = {326 - 329}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We have begun the development of a new robotic pet that cansupport children in the storytelling process. Children can build their own pet by snapping together the modular animal parts of the PETS robot. After their pet is built, children can tell stories using the My Pets software. These stories can then be acted out by their robotic pet. This video paper describes the motivation for this research and the design process of our intergenerational design team in building the first PETS prototypes. We will discuss our progress to date and our focus for the future. }, keywords = {children, cooperative inquiry, design techniques, educational applications, intergenerational design team, PETS, ROBOTICS}, isbn = {0-201-48559-1}, doi = {10.1145/302979.303103}, url = {http://doi.acm.org/10.1145/302979.303103}, author = {Druin, Allison and Montemayor,Jamie and Hendler,Jim and McAlister,Britt and Boltman,Angela and Fiterman,Eric and Plaisant,Aurelie and Kruskal,Alex and Olsen,Hanne and Revett,Isabella and Schwenn,Thomas Plaisant and Sumida,Lauren and Wagner,Rebecca} } @conference {17080, title = {Developing the next generation of Earth science data systems: the Global Land Cover Facility}, booktitle = {Geoscience and Remote Sensing Symposium, 1999. IGARSS {\textquoteright}99 Proceedings. IEEE 1999 International}, volume = {1}, year = {1999}, month = {1999///}, pages = {616-618 vol.1 - 616-618 vol.1}, publisher = {IEEE}, organization = {IEEE}, abstract = {A recent initiative by NASA has resulted in the formation of a federation of Earth science data partners. These Earth Science Information Partners (ESIPs) have been tasked with creating novel Earth science data products and services as well as distributing new and existing data sets to the Earth science community and the general public. The University of Maryland established its ESIP activities with the creation of the Global Land Cover Facility (GLCF). This joint effort of the Institute for Advanced Computer Studies (UMIACS) and the Department of Geography has developed an operational data archiving and distribution system aimed at advancing current land cover research efforts. The success of the GLCF is tied closely to assessing user needs as well. As the timely delivery of data products to the research community. This paper discusses the development and implementation of a web-based interface that allows users to query the authors{\textquoteright} data holdings and perform user requested processing tasks on demand. The GLCF takes advantage of a scaleable, high performance computing architecture for the manipulation of very large remote sensing data sets and the rapid spatial indexing of multiple format data types. The user interface has been developed with the cooperation of the Human-Computer Interaction Laboratory (HCIL) and demonstrates advances in spatial and temporal querying tools as well as the ability to overlay multiple raster and vector data sets. Their work provides one perspective concerning how critical earth science data may be handled in the near future by a coalition of distributed data centers}, keywords = {Computer architecture, data archiving, data distribution system, Data systems, Distributed computing, Earth science data products, Earth science data system, ESIP, geographic information system, geographic information systems, Geography, geophysical measurement technique, geophysical signal processing, geophysical techniques, Geoscience, GIS, GLCF, Global Land Cover Facility, High performance computing, Indexing, information service, Information services, Institute for Advanced Computer Studies, land cover, NASA, next generation, PACS, Remote sensing, terrain mapping, UMIACS, University of Maryland, User interfaces, web-based interface}, isbn = {0-7803-5207-6}, doi = {10.1109/IGARSS.1999.773583}, author = {Lindsay,F.E. and Townshend,J.R.G. and JaJa, Joseph F. and Humphries,J. and Plaisant, Catherine and Shneiderman, Ben} } @conference {16259, title = {Efficient perspective-accurate silhouette computation}, booktitle = {Proceedings of the fifteenth annual symposium on Computational geometry}, series = {SCG {\textquoteright}99}, year = {1999}, month = {1999///}, pages = {417 - 418}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {1-58113-068-6}, doi = {10.1145/304893.304999}, url = {http://doi.acm.org/10.1145/304893.304999}, author = {Barequet,G. and Duncan,C. A and Goodrich,M. T and Kumar,S. and Pop, Mihai} } @article {16172, title = {The end of zero-hit queries: query previews for NASA{\textquoteright}s Global Change Master Directory}, journal = {International Journal on Digital Libraries}, volume = {2}, year = {1999}, month = {1999///}, pages = {79 - 90}, abstract = {The Human-Computer Interaction Laboratory (HCIL) of the University of Maryland and NASA have collaborated over three years to refine and apply user interface research concepts developed at HCIL in order to improve the usability of NASA data services. The research focused on dynamic query user interfaces, visualization, and overview + preview designs. An operational prototype, using query previews, was implemented with NASA{\textquoteright}s Global Change Master Directory (GCMD), a directory service for earth science datasets. Users can see the histogram of the data distribution over several attributes and choose among attribute values. A result bar shows the cardinality of the result set, thereby preventing users from submitting queries that would have zero hits. Our experience confirmed the importance of metadata accuracy and completeness. The query preview interfaces make visible the problems or gaps in the metadata that are undetectable with classic form fill-in interfaces. This could be seen as a problem, but we think that it will have a long-term beneficial effect on the quality of the metadata as data providers will be compelled to produce more complete and accurate metadata. The adaptation of the research prototype to the NASA data required revised data structures and algorithms.}, isbn = {1432-5012}, url = {http://dx.doi.org/10.1007/s007990050039}, author = {Greene,Stephan and Tanin,Egemen and Plaisant, Catherine and Shneiderman, Ben and Olsen,Lola and Major,Gene and Johns,Steve} } @conference {16174, title = {Excentric labeling: dynamic neighborhood labeling for data visualization}, booktitle = {Proceedings of the SIGCHI conference on Human factors in computing systems: the CHI is the limit}, series = {CHI {\textquoteright}99}, year = {1999}, month = {1999///}, pages = {512 - 519}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {The widespread use of information visualization is hampered bythe lack of effective labeling techniques. An informal taxonomy of labeling methods is proposed. We then describe excentric labeling, a new dynamic technique to label a neighborhood of objects located around the cursor. This technique does not intrude into the existing interaction, it is not computationally intensive, and was easily applied to several visualization applications. A pilot study with eight subjects indicates a strong speed benefit over a zoom interface for tasks that involve the exploration of large numbers of objects. Observations and comments from users are presented. }, keywords = {dynamic labeling, Evaluation, label, Visualization}, isbn = {0-201-48559-1}, doi = {10.1145/302979.303148}, url = {http://doi.acm.org/10.1145/302979.303148}, author = {Fekete,Jean-Daniel and Plaisant, Catherine} } @conference {14778, title = {Exposing Application Alternatives}, booktitle = {Distributed Computing Systems, International Conference on}, year = {1999}, month = {1999///}, pages = {0384 - 0384}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {We present the design of an interface to allow applications to export tuning alternatives to a higher-level system. By exposing different parameters that can be changed at runtime, applications can be made to adapt to changes in their execution environment due to other programs, or the addition or deletion of nodes, communication links etc. An integral part of this interface is that an application not only expose its options, but also the resource utilization of each option and the effect that the option will have on the application{\textquoteright}s performance. We discuss how these options can be evaluated to tune the overall performance of a collection of applications in the system. Finally, we show preliminary results from a database application that is automatically reconfigured by the system from query shipping to data shipping based on the number of active clients.}, keywords = {computational grids, metacomputing, scheduling}, isbn = {0-7695-0222-9}, doi = {http://doi.ieeecomputersociety.org/10.1109/ICDCS.1999.776540}, author = {Keleher, Peter J. and Hollingsworth, Jeffrey K and Perkovic, Dejan} } @conference {18672, title = {Fault injection based on a partial view of the global state of a distributed system}, year = {1999}, month = {1999///}, pages = {168 - 177}, abstract = {This paper describes the basis for and preliminary implementation of a new fault injector, called Loki, developed specifically for distributed systems. Loki addresses issues related to injecting correlated faults in distributed systems. In Loki, fault injection is performed based on a partial view of the global state of an application. In particular, facilities are provided to pass user-specified state information between nodes to provide a partial view of the global state in order to try to inject complex faults successfully. A post-runtime analysis, using an off-line clock synchronization and a bounding technique, is used to place events and injections on a single global time-line and determine whether the intended faults were properly injected. Finally, observations containing successful fault injections are used to estimate specified dependability measures. In addition to describing the details of our new approach, we present experimental results obtained from a preliminary implementation in order to illustrate Loki{\textquoteright}s ability to inject complex faults predictably}, keywords = {bounding technique, clock synchronization, distributed programming, distributed software systems, fault injection, Loki, post-runtime analysis, program testing, program verification, software reliability, Synchronisation}, doi = {10.1109/RELDIS.1999.805093}, author = {Michel Cukier and Chandra,R. and Henke,D. and Pistole,J. and Sanders,W. H.} } @conference {16356, title = {Fixing the Java memory model}, booktitle = {Proceedings of the ACM 1999 conference on Java Grande}, year = {1999}, month = {1999///}, pages = {89 - 98}, author = {Pugh, William} } @conference {15923, title = {Fkpurang, darsana, traum, cfa, perlisg@ cs. umd. edu}, booktitle = {In Proceedings of the IJCAI{\textquoteright}99 Workshop on Practical Reasoning and Rationality}, year = {1999}, month = {1999///}, author = {Edu,C. U and Purang,K. and Purushothaman,D. and Traum,D. and Andersen,C. and Perlis, Don} } @article {17209, title = {Human-centered computing, online communities, and virtual environments}, journal = {IEEE Computer Graphics and Applications}, volume = {19}, year = {1999}, month = {1999/12//Nov}, pages = {70 - 74}, abstract = {This report summarizes results of the first EC/NSF joint Advanced Research Workshop, which identified key research challenges and opportunities in information technology. The group agreed that the first joint research workshop should concentrate on the themes of human-centered computing and VEs. Human-centered computing is perceived as an area of strategic importance because of the move towards greater decentralization and decomposition in the location and provision of computation. The area of VEs is one where increased collaboration should speed progress in solving some of the more intractable problems in building effective applications}, keywords = {Books, Collaboration, Collaborative work, Conferences, EC/NSF joint Advanced Research Workshop, Feeds, Human computer interaction, human-centered computing, Internet, Joining materials, Laboratories, Online communities, Research initiatives, USA Councils, User interfaces, Virtual environment, virtual environments, Virtual reality}, isbn = {0272-1716}, doi = {10.1109/38.799742}, author = {Brown,J. R and van Dam,A. and Earnshaw,R. and Encarnacao,J. and Guedj,R. and Preece,J. and Shneiderman, Ben and Vince,J.} } @article {18704, title = {Impact of Cl- and Na+ ions on simulated structure and dynamics of βARK1 PH domain}, journal = {Proteins: Structure, Function, and Bioinformatics}, volume = {35}, year = {1999}, month = {1999///}, pages = {206 - 217}, abstract = {A nonzero net charge of proteins at pH 7 is usually compensated by the addition of charge-balancing counter ions during molecular dynamics simulation, which reduces electrostatic interactions. For highly charged proteins, like the βARK1 PH domain used here, it seems reasonable to also add explicit salt ions. To assess the impact of explicit salt ions, two molecular dynamics simulations of solvated βARK1 PH domain have been carried out with different numbers of Cl- and Na+ ions, based on the Cornell et al. force field and the Ewald summation, which was used in the treatment of long-range electrostatic interactions. Initial positions of ions were obtained from the AMBER CION program. Increasing the number of ions alters the average structure in loop regions, as well as the fluctuation amplitudes of dihedral angles. We found unnaturally strong interactions between side chains in the absence of salt ions. The presence of salt ions reduces these electrostatic interactions. The time needed for the equilibration of the ionic environment around the protein, after initial placement of ions close to oppositely charged side chains, is in the nanosecond time range, which can be shortened by using a higher ionic strength. Our results also suggest selecting those methods that do not place the ions initially close to the protein surface. Proteins 1999;35:206{\textendash}217. {\textcopyright} 1999 Wiley-Liss, Inc.}, keywords = {counter ions, electrostatic interaction, equilibration, GRK2 PH domain, hydrogen bonds, ionic solvent, ionic strength, molecular dynamics simulation, Proteins}, isbn = {1097-0134}, doi = {10.1002/(SICI)1097-0134(19990501)35:2<206::AID-PROT7>3.0.CO;2-A}, url = {http://onlinelibrary.wiley.com/doi/10.1002/(SICI)1097-0134(19990501)35:2<206::AID-PROT7>3.0.CO;2-A/abstract}, author = {Pfeiffer,Stefania and Fushman, David and Cowburn,David} } @conference {11986, title = {Independent motion: the importance of history}, booktitle = {Computer Vision and Pattern Recognition, 1999. IEEE Computer Society Conference on.}, volume = {2}, year = {1999}, month = {1999///}, pages = {97 Vol. 2 - 97 Vol. 2}, publisher = {IEEE}, organization = {IEEE}, abstract = {We consider a problem central in aerial visual surveillance applications-detection and tracking of small, independently moving objects in long and noisy video sequences. We directly use spatiotemporal image intensity gradient measurements to compute an exact model of background motion. This allows the creation of accurate mosaics over many frames and the definition of a constraint violation function which acts as an indication of independent motion. A novel temporal integration method maintains confidence measures over long subsequences without computing the optic flow, requiring object models, or using a Kalman filler. The mosaic acts as a stable feature frame, allowing precise localization of the independently moving objects. We present a statistical analysis of the effects of image noise on the constraint violation measure and find a good match between the predicted probability distribution function and the measured sample frequencies in a test sequence}, keywords = {aerial visual surveillance, background image, Fluid flow measurement, Frequency measurement, History, Motion detection, Motion estimation, Motion measurement, Noise measurement, Optical computing, Optical noise, spatiotemporal image intensity gradient measurements, Spatiotemporal phenomena, Surveillance, Video sequences}, isbn = {0-7695-0149-4}, doi = {10.1109/CVPR.1999.784614}, author = {Pless, R. and Brodsky, T. and Aloimonos, J.} } @article {16165, title = {Interface and data architecture for query preview in networked information systems}, journal = {ACM Trans. Inf. Syst.}, volume = {17}, year = {1999}, month = {1999/07//}, pages = {320 - 341}, abstract = {There are numerous problems associated with formulating queries onnetworked information systems. These include increased data volume and complexity, accompanied by slow network access. This article proposes a new approach to a network query user interfaces that consists of two phases: query preview and query refinement. This new approach is based on the concepts of dynamic queries and query previews, which guides users in rapidly and dynamically eliminating undesired records, reducing the data volume to a manageable size, and refining queries locally before submission over a network. Examples of two applications are given: a Restaurant Finder and a prototype for NASA{\textquoteright}s Earth Observing Systems Data Information Systems (EOSDIS). Data architecture is discussed, and user feedback is presented. }, keywords = {direct manipulation, dynamic query, EOSDIS, graphical user interface, query preview, query refinement, science data}, isbn = {1046-8188}, doi = {10.1145/314516.314522}, url = {http://doi.acm.org/10.1145/314516.314522}, author = {Plaisant, Catherine and Shneiderman, Ben and Doan,Khoa and Bruns,Tom} } @article {14408, title = {Learning probabilistic relational models}, journal = {International Joint Conference on Artificial Intelligence}, volume = {16}, year = {1999}, month = {1999///}, pages = {1300 - 1309}, abstract = {A large portion of real-world data is stored in com-mercial relational database systems. In contrast, most statistical learning methods work only with {\textquotedblleft}flat{\textquotedblright} data representations. Thus, to apply these methods, we are forced to convert our data into a flat form, thereby losing much of the relational structure present in our database. This paper builds on the recent work on probabilistic relational mod- els (PRMs), and describes how to learn them from databases. PRMs allow the properties of an object to depend probabilistically both on other proper- ties of that object and on properties of related ob- jects. Although PRMs are significantly more ex- pressive than standard models, such as Bayesian networks, we show how to extend well-known sta- tistical methods for learning Bayesian networks to learn these models. We describe both parameter estimation and structure learning {\textemdash} the automatic induction of the dependency structure in a model. Moreover, we show how the learning procedure can exploit standard database retrieval techniques for efficient learning from large datasets. We present experimental results on both real and synthetic re- lational databases. }, author = {Friedman,N. and Getoor, Lise and Koller,D. and Pfeffer,A.} } @inbook {16169, title = {LifeLines: Visualizing Personal Histories}, booktitle = {Readings in Information Visualization: Using Vision to ThinkReadings in Information Visualization: Using Vision to Think}, year = {1999}, month = {1999///}, pages = {285 - 285}, publisher = {Morgan Kaufmann Publishers Inc.}, organization = {Morgan Kaufmann Publishers Inc.}, isbn = {9781558605336}, author = {Kumar,H. P and Plaisant, Catherine and Shneiderman, Ben} } @conference {15984, title = {Mixed initiative dialogue and intelligence via active logic}, booktitle = {proceedings of the AAAI99 Workshop on Mixed-Initiative Intelligence}, year = {1999}, month = {1999///}, pages = {60 - 67}, author = {Andersen,C. and Traum,D. and Purang,K. and Purushothaman,D. and Perlis, Don} } @article {16354, title = {Model-checking concurrent systems with unbounded integer variables: symbolic representations, approximations, and experimental results}, journal = {ACM Transactions on Programming Languages and SystemsACM Trans. Program. Lang. Syst.}, volume = {21}, year = {1999}, month = {1999/07//}, pages = {747 - 789}, isbn = {01640925}, doi = {10.1145/325478.325480}, url = {http://dl.acm.org/citation.cfm?id=325480}, author = {Bultan,Tevfik and Gerber,Richard and Pugh, William} } @article {15853, title = {Multilingual Information Discovery and AccesS (MIDAS): A Joint ACM DL{\textquoteright}99/ ACM SIGIR{\textquoteright}99 Workshop.}, journal = {D-Lib MagazineD-Lib Magazine}, volume = {5}, year = {1999}, month = {1999///}, pages = {1 - 12}, abstract = {Discusses a multidisciplinary workshop that addressed issues concerning internationally distributed information networks. Highlights include multilingual information access in media other than character-coded text; cross-language information retrieval and multilingual metadata; and evaluation of multilingual systems. (LRW)}, keywords = {Distributed computing, Electronic Media}, isbn = {ISSN-1082-9873}, url = {http://www.eric.ed.gov/ERICWebPortal/detail?accno=EJ601937}, author = {Oard, Douglas and Peters,Carol and Ruiz,Miguel and Frederking,Robert and Klavans,Judith and Sheridan,Paraic} } @article {13620, title = {Page Segmentation and Zone Classification: The State of the Art}, volume = {LAMP-TR-036,CAR-TR-927,CS-TR-4079}, year = {1999}, month = {1999/11//}, institution = {University of Maryland, College Park}, abstract = {Page segmentation and zone classification are key areas of research in document image processing, because they occupy an intermediate position between document preprocessing and higher-level document understanding such as logical page analysis and OCR. Such analysis of the page relies heavily on an appropriate document model and results in a representation of the physical structure of the document. The purpose of this review is to analyze progress made in page segmentation and zone classification and suggest what needs to be done to advance the field.}, author = {Okun,O. and David Doermann and Pietikainen,M.} } @article {17324, title = {Pixel Data Access for End-User Programming and Graphical Macros}, journal = {Technical Reports from UMIACS}, year = {1999}, month = {1999/05/25/}, abstract = {Pixel Data Access is an interprocess communication technique that enablesusers of graphical user interfaces to automate certain tasks. By accessing the contents of the display buffer, users can search for pixel representations of interface elements, and then initiate actions such as mouse clicks and keyboard entries. While this technique has limitations it offers users of current systems some unusually powerful features that are especially appealing in the area of end-user programming. Also cross-referenced as UMIACS-TR-99-27 }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/1009}, author = {Potter,Richard and Shneiderman, Ben} } @conference {16023, title = {Practical reasoning and plan execution with active logic}, booktitle = {Proceedings of the IJCAI-99 Workshop on Practical Reasoning and Rationality}, year = {1999}, month = {1999///}, pages = {30 - 38}, author = {Purang,K. and Purushothaman,D. and Traum,D. and Andersen,C. and Perlis, Don} } @book {15334, title = {A Priori Test of SGS Models in Compressible Turbulence}, year = {1999}, month = {1999///}, publisher = {Army High Performance Computing Research Center}, organization = {Army High Performance Computing Research Center}, author = {Martin, M.P and Piomelli,U. and Candler,G. V and Center,Army High Performance Computing Research and Minnesota,University of} } @conference {16167, title = {Refining query previews techniques for data with multivalued attributes: the case of NASA EOSDIS}, booktitle = {Research and Technology Advances in Digital Libraries, 1999. ADL {\textquoteright}99. Proceedings. IEEE Forum on}, year = {1999}, month = {1999///}, pages = {50 - 59}, abstract = {Query Previews allow users to rapidly gain an understanding of the content and scope of a digital data collection. These previews present overviews of abstracted metadata, enabling users to rapidly and dynamically avoid undesired data. We present our recent work on developing query previews for a variety of NASA EOSDIS situations. We focus on approaches that successfully address the challenge of multi-valued attribute data. Memory requirements and processing time associated with running these new solutions remain independent of the number of records in the dataset. We describe two techniques and their respective prototypes used to preview NASA Earth science data}, keywords = {attribute, attributes;processing, collection;memory, computing;meta, data, data;abstracted, data;digital, data;multivalued, data;query, Earth, EOSDIS;NASA, libraries;geophysics, metadata;dataset;digital, NASA, previews, processing;, requirements;multi-valued, Science, techniques;undesired, time;query}, doi = {10.1109/ADL.1999.777690}, author = {Plaisant, Catherine and Venkatraman,M. and Ngamkajorwiwat,K. and Barth,R. and Harberts,B. and Feng,Wenlan} } @article {16368, title = {SIPR: A new framework for generating efficient code for sparse matrix computations}, journal = {Languages and Compilers for Parallel Computing}, year = {1999}, month = {1999///}, pages = {213 - 229}, author = {Pugh, William and Shpeisman,T.} } @conference {11965, title = {Statistical biases in optic flow}, booktitle = {Computer Vision and Pattern Recognition, 1999. IEEE Computer Society Conference on.}, volume = {1}, year = {1999}, month = {1999///}, pages = {566 Vol. 1 - 566 Vol. 1}, publisher = {IEEE}, organization = {IEEE}, abstract = {The computation of optical flow from image derivatives is biased in regions of non uniform gradient distributions. A least-squares or total least squares approach to computing optic flow from image derivatives even in regions of consistent flow can lead to a systematic bias dependent upon the direction of the optic flow, the distribution of the gradient directions, and the distribution of the image noise. The bias a consistent underestimation of length and a directional error. Similar results hold for various methods of computing optical flow in the spatiotemporal frequency domain. The predicted bias in the optical flow is consistent with psychophysical evidence of human judgment of the velocity of moving plaids, and provides an explanation of the Ouchi illusion. Correction of the bias requires accurate estimates of the noise distribution; the failure of the human visual system to make these corrections illustrates both the difficulty of the task and the feasibility of using this distorted optic flow or undistorted normal flow in tasks requiring higher lever processing}, keywords = {Distributed computing, Frequency domain analysis, HUMANS, image derivatives, Image motion analysis, Image sequences, Least squares methods, Motion estimation, Optical computing, Optical distortion, optical flow, Optical noise, Ouchi illusion, perception of motion, Psychology, Spatiotemporal phenomena, statistical analysis, systematic bias, total least squares}, isbn = {0-7695-0149-4}, doi = {10.1109/CVPR.1999.786994}, author = {Ferm{\"u}ller, Cornelia and Pless, R. and Aloimonos, J.} } @conference {16173, title = {Temporal, geographical and categorical aggregations viewed through coordinated displays: a case study with highway incident data}, booktitle = {Proceedings of the 1999 workshop on new paradigms in information visualization and manipulation in conjunction with the eighth ACM internation conference on Information and knowledge management}, series = {NPIVM {\textquoteright}99}, year = {1999}, month = {1999///}, pages = {26 - 34}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Information visualization displays can hold a limited number of data points, typically a few thousand, before they get crowded. One way to solve this problem with larger data sets is to create aggregates. Aggregations were used together with the Snap-Together Visualization system to coordinate the visual displays of aggregates and their content. If two displays each hold one thousand items then rapid access and visibility can be maintained for a million points. This paper presents examples based on a database of highway incident data.}, isbn = {1-58113-254-9}, doi = {10.1145/331770.331780}, url = {http://doi.acm.org/10.1145/331770.331780}, author = {Fredrikson,Anna and North,Chris and Plaisant, Catherine and Shneiderman, Ben} } @article {16171, title = {Temporal visualization for legal case histories}, journal = {PROCEEDINGS OF THE ANNUAL MEETING-AMERICAN SOCIETY FOR INFORMATION SCIENCE}, volume = {36}, year = {1999}, month = {1999///}, pages = {271 - 279}, abstract = {This paper discusses visualization of legal information using a tool for temporal information called LifeLines. The direct and indirect histories of cases can become very complex. We explored ways that LifeLines could aid in viewing the links between the original case and the direct and indirect histories. The Apple Computer, Inc. v. Microsoft Corporation and Hewlett Packard Company case is used to illustrate the prototype. For example, if users want to find out how the rulings or statutes changed throughout this case, they could retrieve this information within a single display. Using the timeline, users could also choose at which point in time they would like to begin viewing the case. LifeLines support various views of a case{\textquoteright}s history. For instance, users can view the trial history of a case, the references involved in a case, and citations made to a case. The paper describes improvements to LifeLines that could help in providing a more useful visualization of case history.}, author = {Harris,C. and Allen,R.B. and Plaisant, Catherine and Shneiderman, Ben} } @article {14648, title = {Transforming cabbage into turnip: polynomial algorithm for sorting signed permutations by reversals}, journal = {J. ACM}, volume = {46}, year = {1999}, month = {1999/01//}, pages = {1 - 27}, abstract = {Genomes frequently evolve by reversals \&rgr;(i,j) that transform a gene order \&pgr;1 {\textellipsis} \&pgr;i\&pgr;i+1 {\textellipsis} \&pgr;j-1\&pgr;j {\textellipsis} \&pgr;n into \&pgr;1 {\textellipsis} \&pgr;i\&pgr;j-1 {\textellipsis} \&pgr;i+1\&pgr;j {\textellipsis} \&pgr;n. Reversal distance between permutations \&pgr; and \&sgr;is the minimum number of reversals to transform \&pgr; into \&Agr;. Analysis of genome rearrangements in molecular biology started in the late 1930{\textquoteright}s, when Dobzhansky and Sturtevant published a milestone paper presenting a rearrangement scenario with 17 inversions between the species of Drosophilia. Analysis of genomes evolving by inversions leads to a combinatorial problem of sorting by reversals studied in detail recently. We study sorting of signed permutations by reversals, a problem that adequately models rearrangements in a small genomes like chloroplast or mitochondrial DNA. The previously suggested approximation algorithms for sorting signed permutations by reversals compute the reversal distance between permutations with an astonishing accuracy for both simulated and biological data. We prove a duality theorem explaining this intriguing performance and show that there exists a {\textquotedblleft}hidden{\textquotedblright} parameter that allows one to compute the reversal distance between signed permutations in polynomial time.}, keywords = {Computational Biology, Genetics}, isbn = {0004-5411}, doi = {10.1145/300515.300516}, url = {http://doi.acm.org/10.1145/300515.300516}, author = {Hannenhalli, Sridhar and Pevzner,Pavel A.} } @conference {15467, title = {Using a goal-driven approach to generate test cases for GUIs}, booktitle = {Software Engineering, 1999. Proceedings of the 1999 International Conference on}, year = {1999}, month = {1999/05//}, pages = {257 - 266}, abstract = {The widespread use of GUIs for interacting with software is leading to the construction of more and more complex GUIs. With the growing complexity comes challenges in testing the correctness of a GUI and the underlying software. We present a new technique to automatically generate test cases for GUIs that exploits planning, a well developed and used technique in artificial intelligence. Given a set of operators, an initial state and a goal state, a planner produces a sequence of the operators that will change the initial state to the goal state. Our test case generation technique first analyzes a GUI and derives hierarchical planning operators from the actions in the GUI. The test designer determines the preconditions and effects of the hierarchical operators, which are then input into a planning system. With the knowledge of the GUI and the way in which the user will interact with the GUI, the test designer creates sets of initial and goal states. Given these initial and final states of the GUI, a hierarchical planner produces plans, or a set of test cases, that enable the goal state to be reached. Our technique has the additional benefit of putting verification commands into the test cases automatically. We implemented our technique by developing the GUI analyzer and extending a planner. We generated test cases for Microsoft{\textquoteright}s Word-Pad to demonstrate the viability and practicality of the approach.}, keywords = {Artificial intelligence, automatic test case generation, goal state, goal-driven approach, Graphical user interfaces, GUIs, hierarchical planning operators, initial state, Microsoft Word-Pad, operators, planning (artificial intelligence), program testing, software, verification commands}, author = {Memon, Atif M. and Pollack,M. E and Soffa,M. L} } @conference {18754, title = {3D spatial layouts using a-teams}, year = {1998}, month = {1998///}, abstract = {Spatial layout is the problem of arranging a set of componentsin an enclosure such that a set of objectives and constraints is sat- isfied. The constraints may include non-interference of objects, accessibility requirements and connection cost limits. Spatial lay- out problems are found primarily in the domains of electrical en- gineering and mechanical engineering in the design of integrated circuits and mechanical or electromechanical artifacts. Traditional approaches include ad-hoc (or specialized) heuristics, Genetic Al- gorithms and Simulated Annealing. The A-Teams approach pro- vides a way of synergistically combining these approaches in a modular agent based fashion. A-Teams are also open to the addi- tion of new agents. Modifications in the task requirements trans- late to modifications in the agent mix. In this paper we describe how modular A-Team based optimization can be used to solve 3 dimensional spatial layout problems. }, url = {http://www-2.cs.cmu.edu/afs/cs.cmu.edu/user/cjp/www/pubs/DETC98.pdf}, author = {Sachdev,S. and Paredis,C. J. J. and Gupta,S.K. and Talukdar,S. N.} } @article {15651, title = {Approximating large convolutions in digital images}, journal = {PROCEEDINGS-SPIE THE INTERNATIONAL SOCIETY FOR OPTICAL ENGINEERING}, year = {1998}, month = {1998///}, pages = {216 - 227}, abstract = {Computing discrete two-dimensional convolutions is an important problem in image processing. In mathematicalmorphology, an important variant is that of computing binary convolutions, where the kernel of the convolution is a 0{1 valued function. This operation can be quite costly, especially when large kernels are involved. In this paper, we present an algorithm for computing convolutions of this form, where the kernel of the binary convolution is derived from a convex polygon. Because the kernel is a geometric object, we allow the algorithm some exibility in how it elects to digitize the convex kernel at each placement, as long as the digitization satis es certain reasonable requirements. We say that such a convolution is valid. Given this exibility we show that it is possible to compute binary convolutions more e ciently than would normally be possible for large kernels. Our main result is an algorithm, which given an m n image and a k-sided convex polygonal kernel, computes a valid convolution in time O(kmn) time. Unlike standard algorithms for computing correlations and convolutions, the running time is independent of the area or perimeter of K, and our techniques do not rely on computing fast Fourier transforms. Our algorithm is based on a novel use of Bresenham{\textquoteright}s line-drawing algorithm and pre x-sums to update the convolution e ciently as the kernel is moved from one position to another across the image. }, author = {Kanungo,T. and Mount, Dave and Netanyahu,N. S and Piatko,C. and Silverman,R. and Wu,A. Y} } @conference {18662, title = {AQuA: an adaptive architecture that provides dependable distributed objects}, booktitle = {Seventeenth IEEE Symposium on Reliable Distributed Systems}, year = {1998}, month = {1998/10//}, pages = {245 - 253}, abstract = {Dependable distributed systems are difficult to build. This is particularly true if they have dependability requirements that change during the execution of an application, and are built with commercial off-the-shelf hardware. In that case, fault tolerance must be achieved using middleware software, and mechanisms must be provided to communicate the dependability requirements of a distributed application to the system and to adapt the system{\textquoteright}s configuration to try to achieve the desired dependability. The AQuA architecture allows distributed applications to request a desired level of availability using the Quality Objects (QuO) framework and includes a dependability manager that attempts to meet requested availability levels by configuring the system in response to outside requests and changes in system resources due to faults. The AQuA architecture uses the QuO runtime to process and invoke availability requests, the Proteus dependability manager to configure the system in response to faults and availability requests, and the Ensemble protocol stack to provide group communication services. Furthermore, a CORBA interface is provided to application objects using the AQuA gateway. The gateway provides a mechanism to translate between process-level communication, as supported by Ensemble, and IIOP messages, understood by Object Request Brokers. Both active and passive replication are supported, and the replication type to use is chosen based on the performance and dependability requirements of particular distributed applications}, keywords = {adaptive architecture, AQuA, availability requests, client-server systems, commercial off-the-shelf hardware, CORBA, dependability manager, dependability requirements, dependable distributed objects, distributed object management, Ensemble protocol stack, Fault tolerance, group communication services, middleware software, Object Request Brokers, process-level communication, proteus, Quality Objects, replication, software fault tolerance, Software quality}, doi = {10.1109/RELDIS.1998.740506}, author = {Cukier, Michel and Ren,J. and Sabnis,C. and Henke,D. and Pistole,J. and Sanders,W. H. and Bakken,D. E. and Berman,M.E. and Karr,D. A. and Schantz,R.E.} } @conference {16188, title = {Bringing treasures to the surface: previews and overviews in a prototype for the Library of Congress National Digital Library}, booktitle = {CHI 98 conference summary on Human factors in computing systems}, series = {CHI {\textquoteright}98}, year = {1998}, month = {1998///}, pages = {187 - 188}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {Browsing, digital libraries, metadata, multimedia, overviews, previews, World Wide Web}, isbn = {1-58113-028-7}, doi = {10.1145/286498.286665}, url = {http://doi.acm.org/10.1145/286498.286665}, author = {Plaisant, Catherine and Marchionini,Gary and Komlodi,Anita} } @conference {18740, title = {Capturing articulation in assemblies from component geometry}, year = {1998}, month = {1998///}, abstract = {This paper presents a method to extract instantaneous artic-ulation from part geometry, based on surface mating constraints as well as constraints imposed by other incidental contacts. Many assemblies contain joints, each of which have degrees of freedom associated with them. These joints allow the relative positions of parts in the mechanism to change as the joints are articulated. Being able to represent these joints and their behav- ior is important from the designers perspective because it enables him or her to verify whether kinematic requirements have been met.Therefore, it is useful to be able to obtain such joint informa- tion directly from part geometry and contact physics. The method presented here handles all lower pairs of kine- matic joints. Surface mating contacts are classified into one of three types: planar, spherical and cylindrical. The contacts are represented by algebraic inequalities describing the translational and angular velocities at the contact. Non-penetration conditions are written for a finite set of points on the boundary of each contact face, and it is shown that the finite set of conditions is representative of the entire boundary and the region enclosed by the boundary. Simultaneous satisfaction of the non-penetration conditions at all the contact surfaces between a pair of bodies is represented by a 6-dimensional simplex, which can be solved using linear programming. }, url = {http://www-cgi.cs.cmu.edu/afs/cs.cmu.edu/user/cjp/www/pubs/DAC98.pdf}, author = {Sinha,R. and Paredis,C. J. J. and Gupta,S.K. and Khosla,P. K.} } @article {16343, title = {Comparing detection methods for software requirements inspections: A replication using professional subjects}, journal = {Empirical Software Engineering}, volume = {3}, year = {1998}, month = {1998///}, pages = {355 - 379}, author = {Porter, Adam and Votta,L.} } @article {16574, title = {Computational Models for the Formation of Protocell Structures}, journal = {Artificial Life}, volume = {4}, year = {1998}, month = {1998///}, pages = {61 - 77}, author = {Peng,L. E.Y and Reggia, James A.} } @article {16351, title = {Constraint-based array dependence analysis}, journal = {ACM Transactions on Programming Languages and SystemsACM Trans. Program. Lang. Syst.}, volume = {20}, year = {1998}, month = {1998/05//}, pages = {635 - 678}, isbn = {01640925}, doi = {10.1145/291889.291900}, url = {http://dl.acm.org/citation.cfm?id=291889.291900}, author = {Pugh, William and Wonnacott,David} } @article {16029, title = {Conversational Adequacy: Mistakes are the Essence}, volume = {UMIACS-TR-96-41}, year = {1998}, month = {1998/10/15/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {We argue that meta-dialog and meta-reasoning, far from beingof only occasional use, are the very essence of conversation and communication between agents. We give four paradigm examples of massive use of meta-dialog where only limited base dialog may be present, and use these to bolster our claim of centrality for meta-dialog. We further illustrate this with related work in active logics. We argue moreover that there may be a core set of meta-dialog principles that is in some sense complete. If we are right, then implementing such a set would be of considerable interest. We give examples of existing computer programs that converse inadequately according to our guidelines. (Also cross-referenced as UMIACS-TR-96-41) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu//handle/1903/824}, author = {Perlis, Don and Purang,Khemdut} } @conference {16191, title = {Data object and label placement for information abundant visualizations}, booktitle = {Proceedings of the 1998 workshop on New paradigms in information visualization and manipulation}, series = {NPIV {\textquoteright}98}, year = {1998}, month = {1998///}, pages = {41 - 48}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {control panel, data object placement, Information Visualization, label placement, metrics, timelines, visual feedback}, isbn = {1-58113-179-8}, doi = {10.1145/324332.324341}, url = {http://doi.acm.org/10.1145/324332.324341}, author = {Li,Jia and Plaisant, Catherine and Shneiderman, Ben} } @inbook {16255, title = {De-amortization of Algorithms}, booktitle = {Computing and CombinatoricsComputing and Combinatorics}, series = {Lecture Notes in Computer Science}, volume = {1449}, year = {1998}, month = {1998///}, pages = {4 - 14}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {De-amortization aims to convert algorithms with excellent overall speed, f ( n ) for performing n operations, into algorithms that take no more than O ( f ( n )/ n ) steps for each operation. The paper reviews several existing techniques for de-amortization of algorithms.}, isbn = {978-3-540-64824-6}, url = {http://dx.doi.org/10.1007/3-540-68535-9_4}, author = {Rao Kosaraju,S. and Pop, Mihai}, editor = {Hsu,Wen-Lian and Kao,Ming-Yang} } @article {16026, title = {Defaults Denied}, volume = {UMIACS-TR-96-61}, year = {1998}, month = {1998/10/15/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {We take a tour of various themes in default reasoning, examining new ideasas well as those of Brachman, Delgrande, Poole, and Schlechta. An underlying issue is that of stating that a potential default principle is not appropriate. We see this arise most dramatically as a problem in an attempt to formalize what are often loosely called "prototypes", although it also arises in other formal approaches to default reasoning. Some formalisms in the literature provide solutions but not without costs. We propose a formalism that appears to avoid these costs; it can be seen as a step toward a population-based set-theoretic modification of these approaches, that may ultimately provide a closer tie to recent work on statistical (quantitative) foundations of (qualitative) defaults([1]). Our analysis in particular indicates the need to resolve a conflation between use and mention in many default formalisms. Our treatment proposes such a resolution, and also explores the use of sets toward a more population-based notion of default. (Also cross-referenced as UMIACS-TR-96-61) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu//handle/1903/841}, author = {Miller,Michael and Perlis, Don and Purang,Khemdut} } @article {16183, title = {The Design of a Telepathology Workstation: Exploring Remote Images}, journal = {Technical Reports of the Computer Science Department}, year = {1998}, month = {1998/10/15/}, abstract = {Dynamic telepathology uses a tele-operated microscope to allowpathologists to view samples at a remote location. However, time delays introduced by remote operation have made use of a commercial dynamic telepathology system difficult and frustrating. This paper describes experiments to evaluate and redesign the user interface. We also make recomendations for further automation to support the pathology process and increase the usefulness of the system. Copyright, 1994, by David Carr, Catherine Plaisant, and Hiroaki Hasegawa All rights reserved (Also cross-referenced as CAR-TR-708) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/408}, author = {Carr,David and Plaisant, Catherine and Hasegawa,Hiroaki} } @article {16176, title = {Designing a real-time telepathology workstation to mitigate communication delays}, journal = {Interacting with Computers}, volume = {11}, year = {1998}, month = {1998/12/01/}, pages = {33 - 52}, abstract = {Dynamic telepathology uses a teleoperated microscope to allow pathologists to view samples at a remote location. However, time delays introduced by remote operation have made use of a commercial dynamic telepathology system difficult and frustrating. This paper describes the iterative redesign of the user interface. We redesigned the interface, conducted experiments to evaluate the improvements, and then redesigned the user interface based on the results.Our work shows that predictive displays and local maps improve user control of the microscope and increase user comfort with the system. It also indicates that some degree of automation is necessary to support the navigation process and increase the overall usefulness of the system. Therefore, we also make recommendations for further automation to support the telepathology process and increase the usefulness of the system. While performed on a specific device using a dedicated communications system, the same problems would be encountered in other environments. For example, Internet-based systems that enable remote control or require browsing of large images will need to compensate for time delays and can benefit from our experience with the telepathology application. }, keywords = {Iterative design, Predictive displays, Remote control, Supervisory control, Telepathology, Time delays}, isbn = {0953-5438}, doi = {10.1016/S0953-5438(98)00032-0}, url = {http://www.sciencedirect.com/science/article/pii/S0953543898000320}, author = {Carr,David and Plaisant, Catherine and Hasegawa,Hiroaki} } @article {17076, title = {Designing to facilitate browsing: A look back at the Hyperties workstation browser}, year = {1998}, month = {1998/10/15/}, abstract = {Since browsing hypertext can present a formidable cognitivechallenge, user interface design plays a major role in determining acceptability. In the Unix workstation version of Hyperties, a research-oriented prototype, we focussed on design features that facilitate browsing. We first give a general overview of Hyperties and its markup language. Customizable documents can be generated by the conditional text feature that enables dynamic and selective display of text and graphics. In addition we present: - an innovative solution to link identification: pop-out graphical buttons of arbitrary shape. - application of pie menus to permit low cognitive load actions that reduce the distraction of common actions, such as page turning or window selection. - multiple window selection strategies that reduce clutter and housekeeping effort. We preferred piles-of-tiles, in which standard-sized windows were arranged in a consistent pattern on the display and actions could be done rapidly, allowing users to concentrate on the contents. (Also cross-referenced as CAR-TR-494) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/362}, author = {Shneiderman, Ben and Plaisant, Catherine and Botafogo,Rodrigo and Hopkins,Don and Weiland,William} } @conference {17094, title = {Distance learning: is it the end of education as most of us know it?}, booktitle = {CHI 98 conference summary on Human factors in computing systems}, series = {CHI {\textquoteright}98}, year = {1998}, month = {1998///}, pages = {86 - 87}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {distance learning, education, Internet, Web}, isbn = {1-58113-028-7}, doi = {10.1145/286498.286542}, url = {http://doi.acm.org/10.1145/286498.286542}, author = {Laurillard,Diana and Preece,Jenny and Shneiderman, Ben and Neal,Lisa and W{\ae}rn,Yvonne} } @inbook {16258, title = {Drawing of Two-Dimensional Irregular Meshes}, booktitle = {Graph DrawingGraph Drawing}, series = {Lecture Notes in Computer Science}, volume = {1547}, year = {1998}, month = {1998///}, pages = {1 - 14}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We present a method for transforming two-dimensional irregular meshes into square meshes with only a constant blow up in area. We also explore context invariant transformations of irregular meshes into square meshes and provide a lower bound for the transformation of down-staircases.}, isbn = {978-3-540-65473-5}, url = {http://dx.doi.org/10.1007/3-540-37623-2_1}, author = {Aggarwal,Alok and Rao Kosaraju,S. and Pop, Mihai}, editor = {Whitesides,Sue} } @article {16181, title = {Exploring LifeLines to Visualize Patient Records}, journal = {Technical Reports of the Computer Science Department}, year = {1998}, month = {1998/10/15/}, abstract = {LifeLines provide a general visualization environment for personalhistories. We explored its use for medical patient records. A one screen overview of the record using timelines provides direct access to the data. Problems, hospitalization and medications can be represented as horizontal lines, while icons represent discrete events such as physician consultations (and progress notes) or tests. Line color and thickness can illustrate relationships or significance. Techniques are described to display large records. Rescaling tools and filters allow users to focus on part of the information, revealing more details. Computerized medical records pose tremendous problems to system developers. Infrastructure and privacy issues need to be resolved before physicians can even start using the records. Non-intrusive hardware is required for physicians to do their work (i.e. interview patients) away from their desk and cumbersome workstations. But all the efforts to solve those problems will only succeed if appropriate attention is also given to the user interface design [1][8]. Long lists to scroll, clumsy search, endless menus and lengthy dialogs will lead to user rejection. But techniques are being developed to summarize, filter and present large amount of information, leading us to believe that rapid access to needed data is possible with careful design. While more attention is now put on developing standards for gathering medical records we found that very little effort had been made to design appropriate visualization and navigation techniques to present and explore personal history records. An intuitive approach to visualizing histories is to use graphical time series. The consistent, linear time scale allows comparisons and relations between the quantities displayed. Data can be graphed on the timeline to show time series of quantitative data. Highly interactive interfaces turn the display into a meaningfully structured menu with direct access to the data needed to review a problem or conduct the diagnosis. Also cross-referenced as CAR-TR-819 }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/455}, author = {Plaisant, Catherine and Rose,Anne} } @article {16178, title = {Facilitating Network Data Exploration with Query Previews: A Study of User Performance and Preference}, journal = {Technical Reports of the Computer Science Department}, year = {1998}, month = {1998/10/15/}, abstract = {Current network data exploration systems which use command languages (e.g.SQL) or form fill-in interfaces fail to give users an indication of the distribution of data items. This leads many users to waste time posing queries which have zero-hit or mega-hit result sets. Query previewing is a novel visual approach for browsing huge networked information warehouses. Query previews supply data distribution information about the database that is being searched and give continuous feedback about the size of the result set for the query as it is being formed. Our within-subjects empirical comparison studied 12 subjects using a form fill-in interface with and without query previews. We found statistically significant differences showing that query previews sped up performance 1.6 to 2.1 times and led to higher subjective satisfaction. (Also cross-referenced as UMIACS-98-14) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/488}, author = {Tanin,Egemen and Lotem,Amnon and Haddadin,Ihab and Shneiderman, Ben and Plaisant, Catherine and Slaughter,Laura} } @article {18640, title = {Frequentist and Bayesian Coverage Estimations for Stratified Fault-Injection}, journal = {DEPENDABLE COMPUTING AND FAULT TOLERANT SYSTEMS}, volume = {11}, year = {1998}, month = {1998///}, pages = {43 - 62}, abstract = {This paper addresses the problem of estimating the coverage of fault tolerancethrough statistical processing of observations collected in fault-injection experiments. In an earlier paper, we have studied various frequentist estimation methods based on simple sampling in the whole fault/activity input space and stratified sampling in a partitioned space. In this paper, Bayesian estimation methods are introduced for stratified sampling. Two methods are presented to obtain an approximation of the posterior distribution of the coverage by calculating its moments. The moments are then used to identify the type of the distribution in the Pearson distribution system, to estimate its parameters and to obtain the coverage confidence limit. Two hypothetical example systems are used to compare the validity and the conservatism of the Bayesian and frequentist estimations. It is shown that one Bayesian estimation method is valid for both examples and that its estimations are much less conservative than the frequentist ones. However, the Bayesian estimations for stratified sampling are still conservative compared to estimations for simple sampling. }, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.29.6784\&rep=rep1\&type=pdf}, author = {Michel Cukier and Arlat,J. and Powell,D.} } @article {16182, title = {Incorporating String Search in a Hypertext System:User Interface and Signature File Design Issues}, journal = {Technical Reports of the Computer Science Department}, year = {1998}, month = {1998/10/15/}, abstract = {Hypertext systems provide an appealing mechanism forinformally browsing databases by traversing selectable links. However, in many fact finding situations string search is an effective complement to browsing. This paper describes the application of the signature file method to achieve rapid and convenient string search in small personal computer hypertext environments. The method has been implemented in a prototype, as well as in a commercial product. Performance data for search times and storage space are presented from a commercial hypertext database. User interface issues are then discussed. Experience with the string search interface indicates that it was used sucessfully by novice users. (Also cross-referenced as CAR-TR-448) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/359}, author = {Faloutsos,Christos and Lee,Raymond and Plaisant, Catherine and Shneiderman, Ben} } @article {16184, title = {An information architecture to support the visualization of personal histories}, journal = {Information Processing \& Management}, volume = {34}, year = {1998}, month = {1998/09//}, pages = {581 - 597}, abstract = {This paper proposes an information architecture for personal history data and describes how the data model can be extended to a runtime model for a compact visualization using graphical timelines. Our information architecture was developed for juvenile justice and medical patient records, but is usable in other application domains such as personal resumes, financial histories, or customer support. Our model groups personal history events into aggregates that are contained in facets (e.g., doctor visits, hospitalizations, or lab tests). Crosslinks enable representation of arbitrary relationships across events and aggregates. Data attributes, such as severity, can be mapped by data administrators to visual attributes such as color and line thickness. End-users have powerful controls over the display contents, and they can modify the mapping to fit their tasks.}, keywords = {Graphical user interfaces, Information Visualization, LifeLines, medical patient record, personal histories, temporal data, timelines}, isbn = {0306-4573}, doi = {10.1016/S0306-4573(98)00024-7}, url = {http://www.sciencedirect.com/science/article/pii/S0306457398000247}, author = {Plaisant, Catherine and Shneiderman, Ben and Mushlin,Rich} } @conference {16186, title = {Information visualization advanced interface and Web design}, booktitle = {CHI 98 conference summary on Human factors in computing systems}, series = {CHI {\textquoteright}98}, year = {1998}, month = {1998///}, pages = {145 - 146}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {1-58113-028-7}, doi = {10.1145/286498.286625}, url = {http://doi.acm.org/10.1145/286498.286625}, author = {Shneiderman, Ben and Plaisant, Catherine} } @conference {18728, title = {Integrated design and rapid manufacturing over the Internet}, year = {1998}, month = {1998///}, abstract = {An Internet-based infrastructure is being developed inorder to provide designers with access to multiple layered- manufacturing services. The design domain being addressed is that of small mechanisms or electro-mechanical assemblies that would be used in robots or other mechatronic devices. The approach presented relies on the formalization of the data exchange interface between designers and manufacturers. The primary operatives in this system are Design Clients, Manufacturing Services and Process Brokers. The Design Client allows designers to submit completed designs for algorithmic decomposition, or alternately, to compose a design from primitives and library components that have been primed with some process-related information. During this early phase, the Manufacturing Service consists of a highly automated machine that can be used to build ceramic parts, and the associated software components for design decomposition, process planning and machine control. In later phases, multiple service providers will be made accessible. The Process Broker implements a number of supporting services including process selection and optimal part orientation. Future broker services will include manufacturability analysis, directory services and accreditation etc. Currently, this interface is being built and evaluated internally at Stanford and CMU. It will be made available for use by other selected universities in the near future. }, url = {http://cdr.stanford.edu/interface/publications/DETC98CIE-5519.pdf}, author = {Rajagopalan,S. and Pinilla,J. M. and Losleben,P. and Tian,Q. and Gupta,S.K.} } @conference {18739, title = {An intelligent environment for simulating mechanical assembly operations}, year = {1998}, month = {1998///}, pages = {13 - 16}, abstract = {Rapid technical advances in many different areas of scientificcomputing provide the enabling technologies for creating a com- prehensive simulation and visualization environment for assembly design and planning. We have built an intelligent environment in which simple simulations can be composed together to create com- plex simulations for detecting potential assembly problems. Our goal in this project is to develop high fidelity assembly simulation and visualization tools that can detect assembly related problems without going through physical mock-ups. In addition, these tools can be used to create easy-to-visualize instructions for performing assembly and service operations. }, url = {http://www.cs.cmu.edu/afs/cs.cmu.edu/Web/People/paredis/pubs/DFM98.pdf}, author = {Gupta,S.K. and Paredis,C. J. J. and Sinha,R. and Wang,C. H. and Brown,P. F.} } @article {16185, title = {Interfaces and tools for the library of congress national digital library program}, journal = {Information Processing \& Management}, volume = {34}, year = {1998}, month = {1998/09//}, pages = {535 - 555}, abstract = {This paper describes a collaborative effort to explore user needs in a digital library, develop interface prototypes for a digital library and suggest and prototype tools for digital librarians and users at the Library of Congress (LC). Interfaces were guided by an assessment of user needs and aimed to maximize interaction with primary resources and support both browsing and analytical search strategies. Tools to aid users and librarians in overviewing collections, previewing objects and gathering results were created and serve as the beginnings of a digital librarian toolkit. The design process and results are described and suggestions for future work are offered.}, isbn = {0306-4573}, doi = {10.1016/S0306-4573(98)00020-X}, url = {http://www.sciencedirect.com/science/article/pii/S030645739800020X}, author = {Marchionini,Gary and Plaisant, Catherine and Komlodi,Anita} } @article {16180, title = {Life cycle of user interface techniques: The DJJ information system design Process}, journal = {Technical Reports of the Computer Science Department}, year = {1998}, month = {1998/10/15/}, abstract = {To take advantage of todays technology, many organizations are migrating fromtheir legacy systems. With help from the Human-Computer Interaction Laboratory (HCIL) and Cognetics Corporation, the Maryland Department of Juvenile Justice (DJJ) is currently undergoing an effort to redesign their information system to take advantage of graphical user interfaces. As a research lab, HCIL identifies interesting research problems and then prototypes solutions. As a project matures, the exploratory prototypes are adapted to suit the end product requirements. This case study describes the life cycle of three DJJ prototypes: (1) LifeLines, which uses time lines to display an overview of a youth in one screen, (2) the DJJ Navigator, which helps manage individual workloads by displaying different user views, and (3) the ProgramFinder, a tool for selecting the best program for a youth. (Also cross-referenced as CAR-TR-826) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/458}, author = {Rose,Anne and Ellis,Jason and Plaisant, Catherine and Greene,Stephan} } @article {16190, title = {LifeLines: using visualization to enhance navigation and analysis of patient records}, journal = {In Proceedings of the 1998 American Medical Informatic Association Annual Fall SymposiumProc AMIA Symp}, year = {1998}, month = {1998///}, pages = {76 - 80}, abstract = {LifeLines provide a general visualization environment for personal histories. We explore its use for clinical patient records. A Java user interface is described, which presents a one-screen overview of a computerized patient record using timelines. Problems, diagnoses, test results or medications can be represented as dots or horizontal lines. Zooming provides more details; line color and thickness illustrate relationships or significance. The visual display acts as a giant menu, giving direct access to the data.}, isbn = {1531-605X}, author = {Plaisant, Catherine and Mushlin,R. and Snyder,A. and Li,J. and Heller,D. and Shneiderman, Ben} } @article {17281, title = {LifeLines: using visualization to enhance navigation and analysis of patient records.}, journal = {Proceedings of the AMIA Symposium}, year = {1998}, month = {1998///}, pages = {76 - 80}, abstract = {LifeLines provide a general visualization environment for personal histories. We explore its use for clinical patient records. A Java user interface is described, which presents a one-screen overview of a computerized patient record using timelines. Problems, diagnoses, test results or medications can be represented as dots or horizontal lines. Zooming provides more details; line color and thickness illustrate relationships or significance. The visual display acts as a giant menu, giving direct access to the data.}, isbn = {1531-605X}, author = {Plaisant, Catherine and Mushlin,R. and Snyder,A. and Li,J. and Heller,D. and Shneiderman, Ben} } @article {12778, title = {Probabilistic resource failure in real-time process algebra}, journal = {CONCUR{\textquoteright}98 Concurrency Theory}, year = {1998}, month = {1998///}, pages = {465 - 472}, author = {Philippou,A. and Cleaveland, Rance and Lee,I. and Smolka,S. and Sokolsky,O.} } @article {17951, title = {Salient Frame Detection for Molecular Dynamics Simulations}, journal = {Scientific Visualization: Interactions, Features, Metaphors}, volume = {2}, year = {1998}, month = {1998///}, pages = {160 - 175}, abstract = {Saliency-based analysis can be applied to time-varying 3D datasetsfor the purpose of summarization, abstraction, and motion analysis. As the sizes of time-varying datasets continue to grow, it becomes more and more difficult to comprehend vast amounts of data and information in a short period of time. Au- tomatically generated thumbnail images and previewing of time-varying datasets can help viewers explore and understand the datasets significantly faster as well as provide new insights. In this paper, we introduce a novel method for detect- ing salient frames for molecular dynamics simulations. Our method effectively detects crucial transitions in simulated mechanosensitive ion channel (MscS), in agreement with experimental data. }, author = {Kim,Y. and Patro,R. and Ip,C. Y and O{\textquoteright}Leary,D. P and Anishkin,A. and Sukharev,S. and Varshney, Amitabh} } @article {16337, title = {Specification-based Testing of Reactive Software: A Case Study in Technology Transfer}, journal = {Journal of Systems and Software}, volume = {40}, year = {1998}, month = {1998///}, pages = {249 - 262}, author = {Jategaonkar Jagadeesan,L. and Porter, Adam and Puchol,C. and Ramming,J. C and Votta,L. G.} } @article {15673, title = {Stabbing Orthogonal Objects in 3-Space}, volume = {UMIACS-TR-96-71}, year = {1998}, month = {1998/10/15/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {We consider a problem that arises in the design of data structuresfor answering {\em visibility range queries}, that is, given a $3$-dimensional scene defined by a set of polygonal patches, we wish to preprocess the scene to answer queries involving the set of patches of the scene that are visible from a given range of points over a given range of viewing directions. These data structures recursively subdivide space into cells until some criterion is satisfied. One of the important problems that arise in the construction of such data structures is that of determining whether a cell represents a nonempty region of space, and more generally computing the size of a cell. In this paper we introduce a measure of the {\em size} of the subset of lines in 3-space that stab a given set of $n$ polygonal patches, based on the maximum angle and distance between any two lines in the set. Although the best known algorithm for computing this size measure runs in $O(n^2)$ time, we show that if the polygonal patches are orthogonal rectangles, then this measure can be approximated to within a constant factor in $O(n)$ time. (Also cross-referenced as UMIACS-TR-96-71) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu//handle/1903/850}, author = {Mount, Dave and Pu,Fan-Tao} } @article {16175, title = {Understanding Transportation Management Systems Performance with a Simulation-Based Learning Environment}, journal = {Technical Reports of the Computer Science Department}, year = {1998}, month = {1998/12/05/}, abstract = {We have developed a simulation-based learning environment to provide systemdesigners and operators with an appreciation of the impact of incidents on traffic delay. We used an application framework developed at the University of Maryland for constructing simulation-based learning environments called SimPLE (Simulated Processes in a Learning Environment). Environments developed with SimPLE use dynamic simulations and visualizations to represent realistic time-dependent behavior and are coupled with guidance material and other software aids that facilitate learning. The simulation allows learners to close freeway lanes and divert traffic to an arterial road. Users can see the effect of the detour on freeway and arterial delay. Users can then adjust signal timing interactively on a time space diagram and watch the effect of their adjustment on green band changes and on arterial delays and total delays. }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/500}, author = {Plaisant, Catherine and Tarnoff,Phil and Saraf,Aditya and Rose,Anne} } @article {16024, title = {Updating Discourse Context with Active Logic}, volume = {UMIACS-TR-96-62}, year = {1998}, month = {1998/10/15/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {In this paper we present our implementation of a system of active logicthat processes natural language discourses. We focus on problems that involve presupposition and the associated well-known problems of the projection of presupposition. We discuss Heim{\textquoteright}s largely successful theory of presupposition and point out certain limitations. We then use these observations to build our discourse processor based on active logic. Our main contributions are the handling of problems that go beyond the scope of Heim{\textquoteright}s theory , especially discourses the involve cancellation of presupposition. Ongoing work suggests that conversational implicature and the cancellation of implicature can also be treated by our methods. Key words: presupposition, discourse, con text, accommodation, active logic, implicature. (Also cross-referenced as UMIACS-TR-96-62) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu//handle/1903/842}, author = {Gurney,John and Purang,Khemdut and Perlis, Don} } @article {16177, title = {User Interface Reengineering: A Diagnostic Approach}, journal = {Technical Reports of the Computer Science Department}, year = {1998}, month = {1998/10/15/}, abstract = {User interface technology has advanced rapidly in recent years.Incorporating new developments in existing systems could result in substantial improvements in usability, thereby improving performance and user satisfaction, while shortening training an d reducing error rates. Our focus is on low-effort high-payoff improvements to aspects such as data display and entry, consistency, messages, documentation, and system access. This paper provides guidelines for managers and designers responsible for use r interface reengineering, based on the experience we gained from six projects, and compiles our observations, recommendations and outcomes. (Also cross-referenced as CAR-TR-767) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/430}, author = {Vanniamparampil,Ajit J and Shneiderman, Ben and Plaisant, Catherine and Rose,Anne} } @article {16192, title = {Viewing personal history records: A comparison of tabular format and graphical presentation using LifeLines}, journal = {Behaviour \& Information Technology}, volume = {17}, year = {1998}, month = {1998///}, pages = {249 - 262}, abstract = {Thirty-six participants used a static version of either LifeLines, a graphical interface, or a tabular representation to answer questions about a database of temporal personal history information. Results suggest that overall the LifeLines representation led to much faster response times, primarily for questions which involved interval comparisons and making intercategorical connections. A {\textquoteright}first impression{\textquoteright} test showed that LifeLines can reduce some of the biases of the tabular record summary. A post-experimental memory test led to significantly (p< 0.004) higher recall for LifeLines. Finally, simple interaction techniques are proposed to compensate for the problems of the static LifeLines display{\textquoteright}s ability to deal with precise dates, attribute coding and overlaps.Thirty-six participants used a static version of either LifeLines, a graphical interface, or a tabular representation to answer questions about a database of temporal personal history information. Results suggest that overall the LifeLines representation led to much faster response times, primarily for questions which involved interval comparisons and making intercategorical connections. A {\textquoteright}first impression{\textquoteright} test showed that LifeLines can reduce some of the biases of the tabular record summary. A post-experimental memory test led to significantly (p< 0.004) higher recall for LifeLines. Finally, simple interaction techniques are proposed to compensate for the problems of the static LifeLines display{\textquoteright}s ability to deal with precise dates, attribute coding and overlaps. }, isbn = {0144-929X}, doi = {10.1080/014492998119328}, url = {http://www.tandfonline.com/doi/abs/10.1080/014492998119328}, author = {Alonso,Diane Lindwarm and Rose,Anne and Plaisant, Catherine and Norman,Kent L} } @conference {16189, title = {Visualizing medical records with LifeLines}, booktitle = {CHI 98 conference summary on Human factors in computing systems}, series = {CHI {\textquoteright}98}, year = {1998}, month = {1998///}, pages = {28 - 29}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {healthcare, History, medical record, overview, timeline, Visualization}, isbn = {1-58113-028-7}, doi = {10.1145/286498.286513}, url = {http://doi.acm.org/10.1145/286498.286513}, author = {Plaisant, Catherine and Heller,Daniel and Li,Jia and Shneiderman, Ben and Mushlin,Rich and Karat,John} } @conference {13492, title = {ADistributed Management System for Testing Document Image Database Analysis Algorithms}, booktitle = {ICDAR}, year = {1997}, month = {1997///}, pages = {989 - 995}, author = {David Doermann and Sauvola,J. and Haapakoski,S. and Kauniskangas,H. and Seppanen,T. and Pietikainen,M.} } @conference {16296, title = {Anywhere, Anytime Code Inspections: Using the Web to Remove Inspection Bottlenecks in Large-Scale Software Development}, booktitle = {Software Engineering, International Conference on}, year = {1997}, month = {1997///}, pages = {14 - 14}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {The dissemination of critical information and the synchronization of coordinated activities are critical problems in geographically separated, large-scale, software development. While these problems are not insurmountable, their solutions have varying trade-offs in terms of time, cost and effectiveness. Our previous studies have shown that the inspection interval is typically lengthened because of schedule conflicts among inspectors which delay the (usually) required inspection collection meeting.We present and justify a solution using an intranet web that is both timely in its dissemination of information and effective in its coordination of distributed inspectors. First, exploiting a naturally occurring experiment (reported here), we conclude that the asynchronous collection of inspection results is at least as effective as the synchronous collection of those results. Second, exploiting the information dissemination qualities and the on-demand nature of information retrieval of the web, and the platform independence of browsers, we built an inexpensive tool that integrates seamlessly into the current development process. By seamless we mean an identical paper flow that results in an almost identical inspection process.The acceptance of the inspection tool has been excellent. The cost savings just from the reduction in paper work and the time savings from the reduction in distribution interval of the inspection package (sometimes involving international mailings) have been substantial. These savings together with the seamless integration into the existing environment are the major factors for this acceptance. From our viewpoint as experimentalists, the acceptance came too readily. Therefore we lost our op portunity to explore this tool using a series of controlled experiments to isolate the underlying factors or its effectiveness. Nevertheless, by using historical data we can show that the new process is less expensive in terms of cost and at least as effective in terms of quality (defect detection effectiveness).}, keywords = {asynchronous; natural occurring inspection experiment; automated support for inspections, code inspections: web-based, meetingless}, doi = {http://doi.ieeecomputersociety.org/10.1109/ICSE.1997.610188}, author = {Perpich,J. M. and Perry,D. E. and Porter, Adam and Votta,L. G. and Wade,M. W.} } @article {16305, title = {Assessing software review meetings: results of a comparative analysis of two experimental studies}, journal = {IEEE Transactions on Software Engineering}, volume = {23}, year = {1997}, month = {1997/03//}, pages = {129 - 145}, abstract = {Software review is a fundamental tool for software quality assurance. Nevertheless, there are significant controversies as to the most efficient and effective review method. One of the most important questions currently being debated is the utility of meetings. Although almost all industrial review methods are centered around the inspection meeting, recent findings call their value into question. In prior research the authors separately and independently conducted controlled experimental studies to explore this issue. The paper presents new research to understand the broader implications of these two studies. To do this, they designed and carried out a process of {\textquotedblleft}reconciliation{\textquotedblright} in which they established a common framework for the comparison of the two experimental studies, reanalyzed the experimental data with respect to this common framework, and compared the results. Through this process they found many striking similarities between the results of the two studies, strengthening their individual conclusions. It also revealed interesting differences between the two experiments, suggesting important avenues for future research}, keywords = {Aggregates, Collaborative work, Computer Society, Costs, Helium, Inspection, inspection meeting, Job shop scheduling, Programming, reconciliation, Software development management, Software quality, software quality assurance, software review meeting assessment, Software reviews}, isbn = {0098-5589}, doi = {10.1109/32.585501}, author = {Porter, Adam and Johnson,P. M} } @conference {17567, title = {Better approximation guarantees for job-shop scheduling}, booktitle = {Proceedings of the eighth annual ACM-SIAM symposium on Discrete algorithms}, series = {SODA {\textquoteright}97}, year = {1997}, month = {1997///}, pages = {599 - 608}, publisher = {Society for Industrial and Applied Mathematics}, organization = {Society for Industrial and Applied Mathematics}, address = {Philadelphia, PA, USA}, isbn = {0-89871-390-0}, url = {http://dl.acm.org/citation.cfm?id=314161.314395}, author = {Goldberg,Leslie Ann and Paterson,Mike and Srinivasan, Aravind and Sweedyk,Elizabeth} } @conference {16195, title = {Bringing treasures to the surface: iterative design for the Library of Congress National Digital Library Program}, booktitle = {Proceedings of the SIGCHI conference on Human factors in computing systems}, series = {CHI {\textquoteright}97}, year = {1997}, month = {1997///}, pages = {518 - 525}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {browse, design process, digital libraries, dynamic query, java, preview, search, web design}, isbn = {0-89791-802-9}, doi = {10.1145/258549.259009}, url = {http://doi.acm.org/10.1145/258549.259009}, author = {Plaisant, Catherine and Marchionini,Gary and Bruns,Tom and Komlodi,Anita and Campbell,Laura} } @article {16194, title = {Browsing hierarchical data with multi-level dynamic queries and pruning}, journal = {International Journal of Human-Computer Studies}, volume = {46}, year = {1997}, month = {1997/01//}, pages = {103 - 124}, abstract = {Users often must browse hierarchies with thousands of nodes in search of those that best match their information needs. ThePDQ Tree-browser(Pruning with Dynamic Queries) visualization tool was specified, designed and developed for this purpose. This tool presents trees in two tightly-coupled views, one a detailed view and the other an overview. Users can use dynamic queries, a method for rapidly filtering data, to filter nodes at each level of the tree. The dynamic query panels are user-customizable. Sub-trees of unselected nodes are pruned out, leading to compact views of relevant nodes. Usability testing of the PDQ Tree-browser, done with eight subjects, helped assess strengths and identify possible improvements. The PDQ Tree-browser was used in Network Management (600 nodes) and UniversityFinder (1100 nodes) applications. A controlled experiment, with 24 subjects, showed that pruning significantly improved performance speed and subjective user satisfaction. Future research directions are suggested.}, isbn = {1071-5819}, doi = {10.1006/ijhc.1996.0085}, url = {http://www.sciencedirect.com/science/article/pii/S1071581996900853}, author = {Kumar,Harsha P. and Plaisant, Catherine and Shneiderman, Ben} } @inbook {17027, title = {Comprehension and object recognition capabilities for presentations of simultaneous video key frame surrogates}, booktitle = {Research and Advanced Technology for Digital LibrariesResearch and Advanced Technology for Digital Libraries}, series = {Lecture Notes in Computer Science}, volume = {1324}, year = {1997}, month = {1997///}, pages = {41 - 54}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {The demand for more efficient browsing of video data is expected to increase as greater access to this type of data becomes available. This experiment looked at one technique for displaying video data using key frame surrogates that are presented as a ldquoslide showrdquo. Subjects viewed key frames for between one and four video clips simultaneously. Following this presentation, the subjects performed object recognition and gist comprehension tasks in order to determine human thresholds for divided attention between these multiple displays. It was our belief that subject performance would degrade as the number of slide shows shown simultaneously increased. For object recognition and gist comprehension tasks, a decrease in performance between the one slide show display and the two, three or four slide show displays was found. In the case of two or three video presentations, performance is about the same, and there remains adequate object recognition abilities and comprehension of the video clips. Performance drops off to unacceptable levels when four slide shows are displayed at once.}, isbn = {978-3-540-63554-3}, url = {http://dx.doi.org/10.1007/BFb0026720}, author = {Slaughters,Laura and Shneiderman, Ben and Marchionini,Gary}, editor = {Peters,Carol and Thanos,Costantino} } @article {15937, title = {Consciousness as self-function}, journal = {Journal of Consciousness Studies, 4}, volume = {5}, year = {1997}, month = {1997///}, pages = {509 - 525}, author = {Perlis, Don} } @conference {13529, title = {Content-based Image Retrieval Using Composite Features}, booktitle = {Proceedings of the 1997 Scandinavian Conference on Image Analysis}, year = {1997}, month = {1997///}, pages = {35 - 42}, abstract = {In this paper, we demonstrate methods for content-based image retrieval problems utilizing a specially designed retrieval architecture implemented within the {\textquoteright}Intelligent Image Retrieval{\textquoteright} system (IIR). The method consists of new image features in the retrieval context, including segmentation methods and use of image frames. They are combined in a unique way with color, texture, shape and localization information with a special data ion construction in a graphical user interface. The IIR system provides an efficient retrieval architecture utilizing a tailored query language, retrieval mechanisms and an object-oriented database enabling the use of complex data structures and relations needed for successful query processing. Functionality and performance of methods and architecture are illustrated with a series of tests using a database that consists of several hundred {\textquoteright}scene{\textquoteright} images.}, author = {Kauniskangas,H. and Sauvola,J. and Pietikainen,M. and David Doermann} } @article {18382, title = {Editorial: Evaluation and assessment in software engineering}, journal = {Information and Software Technology}, volume = {39}, year = {1997}, month = {1997///}, author = {Kitchenham,B. and Brereton,P. and Budgen,D. and Linkman,S. and Almstrum,V. L and Pfleeger,S. L and Zelkowitz, Marvin V and Wallace,D.} } @article {16297, title = {An experiment to assess the cost-benefits of code inspections in large scale software development}, journal = {IEEE Transactions on Software Engineering}, volume = {23}, year = {1997}, month = {1997/06//}, pages = {329 - 346}, abstract = {We conducted a long term experiment to compare the costs and benefits of several different software inspection methods. These methods were applied by professional developers to a commercial software product they were creating. Because the laboratory for this experiment was a live development effort, we took special care to minimize cost and risk to the project, while maximizing our ability to gather useful data. The article has several goals: (1) to describe the experiment{\textquoteright}s design and show how we used simulation techniques to optimize it; (2) to present our results and discuss their implications for both software practitioners and researchers; and (3) to discuss several new questions raised by our findings. For each inspection, we randomly assigned three independent variables: (1) the number of reviewers on each inspection team (1, 2, or 4); (2) the number of teams inspecting the code unit (1 or 2); and (3) the requirement that defects be repaired between the first and second team{\textquoteright}s inspections. The reviewers for each inspection were randomly selected without replacement from a pool of 11 experienced software developers. The dependent variables for each inspection included inspection interval (elapsed time), total effort, and the defect detection rate. Our results showed that these treatments did not significantly influence the defect detection effectiveness, but that certain combinations of changes dramatically increased the inspection interval}, keywords = {Analysis of variance, code inspection cost benefits, code unit, commercial software product, Computer Society, cost-benefit analysis, Costs, defect detection effectiveness, defect detection rate, Design optimization, experienced software developers, experiment design, independent variables, Inspection, inspection interval, inspection team, Laboratories, large scale software development, Large-scale systems, live development effort, long term experiment, professional aspects, professional developers, Programming, reviewers, simulation techniques, software cost estimation, software inspection methods, software practitioners, Software quality, Switches}, isbn = {0098-5589}, doi = {10.1109/32.601071}, author = {Porter, Adam and Siy,H. P and Toman,C. A and Votta,L. G.} } @article {14220, title = {Families of Stationary Patterns Producing Illusory Movement: Insights into the Visual System}, journal = {Proceedings of the Royal Society of London. Series B: Biological SciencesProc. R. Soc. Lond. B}, volume = {264}, year = {1997}, month = {1997/06/22/}, pages = {795 - 806}, abstract = {A computational explanation of the illusory movement experienced upon extended viewing of Enigma, a static figure painted by Leviant, is presented. The explanation relies on a model for the interpretation of three{\textendash}dimensional motion information contained in retinal motion measurements. This model shows that the Enigma figure is a special case of a larger class of figures exhibiting the same illusory movement and these figures are introduced here. Our explanation suggests that eye movements and/or accommodation changes cause weak retinal motion signals, which are interpreted by higher{\textendash}level processes in a way that gives rise to these illusions, and proposes a number of new experiments to unravel the functional structure of the motion pathway.}, isbn = {0962-8452, 1471-2954}, doi = {10.1098/rspb.1997.0112}, url = {http://rspb.royalsocietypublishing.org/content/264/1383/795}, author = {Ferm{\"u}ller, Cornelia and Pless,Robert and Aloimonos, J.} } @article {16321, title = {Fundamental laws and assumptions of software maintenance}, journal = {Empirical Software Engineering}, volume = {2}, year = {1997}, month = {1997///}, pages = {119 - 131}, author = {Porter, Adam} } @conference {13561, title = {Graphical Tools and Techniques for Querying Document Databases}, booktitle = {BSDIA}, year = {1997}, month = {1997///}, pages = {213 - 224}, author = {Sauvola,J. and David Doermann and Kauniskangas,H. and Shin,C. and Koivusaari,M. and Pietikainen,M.} } @article {15952, title = {How to (plan to) meet a deadline between now and then}, journal = {Journal of Logic and Computation}, volume = {7}, year = {1997}, month = {1997///}, pages = {109 - 109}, author = {Nirkhe,M. and Kraus,S. and Miller,M. and Perlis, Don} } @conference {16328, title = {If your version control system could talk}, booktitle = {ICSE Workshop on Process Modelling and Empirical Studies of Software Engineering}, year = {1997}, month = {1997///}, author = {Ball,T. and Kim,J. M and Porter, Adam and Siy,H. P} } @conference {17543, title = {Implementing a performance forecasting system for metacomputing: the Network Weather Service}, booktitle = {Proceedings of the 1997 ACM/IEEE conference on Supercomputing (CDROM)}, series = {Supercomputing {\textquoteright}97}, year = {1997}, month = {1997///}, pages = {1 - 19}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {In this paper we describe the design and implementation of a system called the Network Weather Service (NWS) that takes periodic measurements of deliverable resource performance from distributed networked resources, and uses numerical models to dynamically generate forecasts of future performance levels. These performance forecasts, along with measures of performance fluctuation (e.g. the mean square prediction error) and forecast lifetime that the NWS generates, are made available to schedulers and other resource management mechanisms at runtime so that they may determine the quality-of-service that will be available from each resource.We describe the architecture of the NWS and implementations that we have developed and are currently deploying for the Legion [13] and Globus/Nexus [7] metacomputing infrastructures. We also detail NWS forecasts of resource performance using both the Legion and Globus/Nexus implementations. Our results show that simple forecasting techniques substantially outperform measurements of current conditions (commonly used to gauge resource availability and load) in terms of prediction accuracy. In addition, the techniques we have employed are almost as accurate as substantially more complex modeling methods. We compare our techniques to a sophisticated time-series analysis system in terms of forecasting accuracy and computational complexity.}, isbn = {0-89791-985-8}, doi = {10.1145/509593.509600}, url = {http://doi.acm.org/10.1145/509593.509600}, author = {Wolski,Rich and Spring, Neil and Peterson,Chris} } @article {15987, title = {Interpreting presuppositions using active logic: From contexts to utterances}, journal = {Computational Intelligence}, volume = {13}, year = {1997}, month = {1997///}, pages = {391 - 413}, author = {Gurney,J. and Perlis, Don and Purang,K.} } @conference {16348, title = {Iteration space slicing and its application to communication optimization}, booktitle = {Proceedings of the 11th international conference on Supercomputing}, year = {1997}, month = {1997///}, pages = {221 - 228}, author = {Pugh, William and Rosser,E.} } @conference {12217, title = {KidPad: a design collaboration between children, technologists, and educators}, booktitle = {Proceedings of the SIGCHI conference on Human factors in computing systems}, year = {1997}, month = {1997///}, pages = {463 - 470}, author = {Druin, Allison and Stewart,J. and Proft,D. and Bederson, Benjamin B. and Hollan,J.} } @article {13586, title = {Locally adaptive document skew detection}, journal = {Proceedings of SPIE}, volume = {3027}, year = {1997}, month = {1997/04/03/}, pages = {96 - 108}, abstract = {This paper proposes a new approach to the detection of local orientation and skew in document images. It is based on the observation that there are many documents where a single global estimate of the page skew is not sufficient. These documents require local adaptation to deal robustly with todays complex configurations of components on the page. The approach attempts to identify regions in the image which exhibit locally consistent physical properties and consistent physical properties and consistent orientation. To do this, we rapidly compute a coarse segmentation and delineate regions which differ with respect to layout and/or physical content. Each region is classified as text, graphics, mixed text/graphics, image or background using local features and additional features are extracted to estimate orientation. The local orientation decisions are propagated where appropriate to resolve ambiguity and to produce a global estimate of the skew for the page. The implementation of our algorithms is demonstrated on a set of images which have multiple regions with different orientations.}, isbn = {0277786X}, doi = {doi:10.1117/12.270063}, url = {http://spiedigitallibrary.org/proceedings/resource/2/psisdg/3027/1/96_1?isAuthorized=no}, author = {Sauvola,Jaakko J and David Doermann and Pietikaeinen,Matti} } @article {17283, title = {Low-effort, high-payoff user interface reengineering}, journal = {IEEE Software}, volume = {14}, year = {1997}, month = {1997/08//Jul}, pages = {66 - 72}, abstract = {Although increasingly sophisticated design methodologies for developing new user interfaces exist, low-effort, high-payoff user interface reengineering represents a new direction-and opportunity. Yet reengineering a working system is complex and risky because of the potential disruption to users and managers, their justifiable fear of change, and the lack of guarantees that such changes will be for the better. Our largely positive experiences with the projects described here lead us to believe that user interface reengineering is a viable and important process. Low effort, high-payoff improvement recommendations can probably be made for most existing systems. Nevertheless, a narrowly focused user interface reengineering plan may be inappropriate when the major problems lie outside the scope of the user interface, such as inadequate functionalities, frequent crashes, and network problems. Attempts at improving less severe problems while ignoring deeper ones may be perceived as insensitive by the users. In such cases it is important to consider either making similar short-term improvements for other parts of the systems or postponing short-term user interface reengineering in favour of a more complete system reengineering. Similarly, the need for interface stability might outweigh the benefits of the short-term improvements if a complete reengineering is planned for the near future. But most likely these proposed diagnostic strategies and opportunities for improvement are only a prelude to the much larger task of business reengineering, which implies extensive user interface reengineering}, keywords = {Business process re-engineering, complete system reengineering, Design methodology, Error analysis, Hardware, inadequate functionalities, interface stability, iterative methods, low-effort high-payoff user interface reengineering, short-term improvements, short-term user interface reengineering, software engineering, Software testing, System analysis and design, System testing, systems re-engineering, User centered design, user centred design, User interfaces}, isbn = {0740-7459}, doi = {10.1109/52.595958}, author = {Plaisant, Catherine and Rose,A. and Shneiderman, Ben and Vanniamparampil,A. J} } @article {15608, title = {Parallelizing and algorithm for visibility on polyhedral terrain}, journal = {International Journal of Computational Geometry and Applications}, volume = {7}, year = {1997}, month = {1997///}, pages = {75 - 84}, abstract = {The best known output-sensitive sequential algorithm for computing the viewshedon a polyhedral terrain from a given viewpoint was proposed by Katz, Overmars, and Sharir 10, and achieves time complexity O((k + n(n)) logn) where n and k are the input and output sizes respectively, and () is the inverse Ackermann{\textquoteright}s function. In this paper, we present a parallel algorithm that is based on the work mentioned above, and achieves O(log2 n) time complexity, with work complexity O((k +n(n)) logn) in a CREW PRAM model. This improves on previous parallel complexity while maintaining work e ciency with respect to the best sequential complexity known. }, author = {Teng,Y. A and Mount, Dave and Puppo,E. and Davis, Larry S.} } @article {17729, title = {Perturbation analysis for the QR decomposition}, journal = {SIAM Journal on Matrix Analysis and Applications}, volume = {18}, year = {1997}, month = {1997///}, pages = {775 - 791}, author = {Chang,X. C. and Paige,C. C. and Stewart, G.W.} } @article {16199, title = {Query previews for networked information systems: a case study with NASA environmental data}, journal = {SIGMOD Record}, volume = {26}, year = {1997}, month = {1997///}, pages = {75 - 81}, abstract = {Formulating queries on networked information systemsis laden with problems: data diversity, data complexity, network growth, varied user base, and slow network access. This paper proposes a new approach to a network query user interface which consists of two phases: query preview and query refinement. This new approach is based on dynamic queries and tight coupling, guiding users to rapidly and dynamically eliminate undesired items, reduce the data volume to a manageable size, and refine queries locally before submission over a network. A two-phase dynamic query system for NASA{\textquoteright}s Earth Observing Systems--Data Information Systems (EOSDIS) is presented. The prototype was well received by the team of scientists who evaluated the interface. }, author = {Doan,K. and Plaisant, Catherine and Shneiderman, Ben and Bruns,T.} } @conference {16196, title = {Query previews in networked information systems: the case of EOSDIS}, booktitle = {CHI {\textquoteright}97 extended abstracts on Human factors in computing systems: looking to the future}, series = {CHI EA {\textquoteright}97}, year = {1997}, month = {1997///}, pages = {202 - 203}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Dynamic queries have been shown to be an effective technique to browse information, and to find patterns and exceptions. Dynamic queries involve the interactive control by a user of visual query parameters that generate rapid (100 ms update), animated, and visual displays of database search results. The data of early implementations was stored in local memory to guarantee optimal speed. Problems arise when the data is very large and distributed over a network. To overcome the problems of slow networks and data volume we propose a two-phase approach to query formulation using query previews and query refinements [1]. Preview mechanisms have been used in the past [2] and we believe that their use will be a major component of successful networked information systems interfaces (e.g. [3]).}, keywords = {direct manipulation, dynamic query, earth science, network information system, query preview, Visualization}, isbn = {0-89791-926-2}, doi = {10.1145/1120212.1120343}, url = {http://doi.acm.org/10.1145/1120212.1120343}, author = {Plaisant, Catherine and Bruns,Tom and Shneiderman, Ben and Doan,Khoa} } @article {17647, title = {Randomized Distributed Edge Coloring via an Extension of the Chernoff--Hoeffding Bounds}, journal = {SIAM Journal on Computing}, volume = {26}, year = {1997}, month = {1997///}, pages = {350 - 350}, abstract = {Certain types of routing, scheduling, and resource-allocation problems in a distributed setting can be modeled as edge-coloring problems. We present fast and simple randomized algorithms for edge coloring a graph in the synchronous distributed point-to-point model of computation. Our algorithms compute an edge coloring of a graph $G$ with $n$ nodes and maximum degree $\Delta$ with at most $1.6 \Delta + O(\log^{1+ \delta} n)$ colors with high probability (arbitrarily close to 1) for any fixed $\delta > 0$; they run in polylogarithmic time. The upper bound on the number of colors improves upon the $(2 \Delta - 1)$-coloring achievable by a simple reduction to vertex coloring.To analyze the performance of our algorithms, we introduce new techniques for proving upper bounds on the tail probabilities of certain random variables. The Chernoff--Hoeffding bounds are fundamental tools that are used very frequently in estimating tail probabilities. However, they assume stochastic independence among certain random variables, which may not always hold. Our results extend the Chernoff--Hoeffding bounds to certain types of random variables which are not stochastically independent. We believe that these results are of independent interest and merit further study. }, isbn = {00975397}, doi = {10.1137/S0097539793250767}, url = {http://link.aip.org/link/SMJCAT/v26/i2/p350/s1\&Agg=doi}, author = {Panconesi,Alessandro and Srinivasan, Aravind} } @article {15976, title = {Sources of, and exploiting, inconsistency: preliminary report}, journal = {JOURNAL OF APPLIED NONCLASSICAL LOGICS}, volume = {7}, year = {1997}, month = {1997///}, pages = {13 - 24}, author = {Perlis, Don} } @conference {16336, title = {Specification-based Testing of Reactive Software: Tools and Experiments}, booktitle = {Software Engineering, International Conference on}, year = {1997}, month = {1997///}, pages = {525 - 525}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {Testing commercial software is expensive and time consuming. Automated testing methods promise to save a great deal of time and money throughout the software industry. One approach that is well-suited for the reactive systems found in telephone switching systems is specification-based testing.We have built a set of tools to automatically test softmare applications for violations of safety properties expressed in temporal logic. Our testing system automatically constructs finite state machine oracles corresponding to safety properties, builds test harnesses, and integrates them with the application. The test harness then generates inputs automatically to test the application.We describe a study examining the feasibility of this approach for testing industrial applications. To conduct this study we formally modeled an Automatic Protection Switching system (APS), which is an application common to many telephony systems. We then asked a number of computer science graduate students to develop several versions of the APS and use our tools to test them. We found that the tools are very effective, save significant amounts of human effort (at the expense of machine resources), and are easy to use. We also discuss improvements that are needed before we can use the tools with professional developers building commercial products.}, keywords = {empirical studies, reactive systems, specification-based testing, temporal logic}, doi = {http://doi.ieeecomputersociety.org/10.1109/ICSE.1997.610373}, author = {Jagadeesan,Lalita Jategaonkar and Porter, Adam and Puchol,Carlos and Ramming,J. Christopher and Votta,Lawrence G.} } @conference {16359, title = {Symbolic model checking of infinite state systems using Presburger arithmetic}, booktitle = {Computer Aided Verification}, year = {1997}, month = {1997///}, pages = {400 - 411}, author = {Bultan,T. and Gerber,R. and Pugh, William} } @conference {13658, title = {Techniques for Automated Testing for Automated Testing of Document Analysis Algorithms}, booktitle = {Proceedings of the First Brazilian Symposium on Document Image Analysis}, year = {1997}, month = {1997///}, pages = {201 - 212}, author = {Sauvola,J. and Kauniskangas,H. and David Doermann and Pietikainen,M.} } @inbook {14269, title = {Toward motion picture grammars}, booktitle = {Computer Vision {\textemdash} ACCV{\textquoteright}98Computer Vision {\textemdash} ACCV{\textquoteright}98}, series = {Lecture Notes in Computer Science}, volume = {1352}, year = {1997}, month = {1997///}, pages = {283 - 290}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We are interested in processing video data for the purpose of solving a variety of problems in video search, analysis, indexing, browsing and compression. Instead of concentrating on a particular problem, in this paper we present a framework for developing video applications. Our basic thesis is that video data can be represented at a higher level of abstraction as a string generated by a grammar, termed motion picture grammar. The rules of that grammar relate different spatiotemporal representations of the video content and, in particular, representations of action.}, isbn = {978-3-540-63931-2}, url = {http://dx.doi.org/10.1007/3-540-63931-4_228}, author = {Bolle,Ruud and Aloimonos, J. and Ferm{\"u}ller, Cornelia}, editor = {Chin,Roland and Pong,Ting-Chuen} } @article {16197, title = {User Interface Reengineering: Low-Effort, High-Payoff Strategies}, journal = {IEEE Software}, volume = {14}, year = {1997}, month = {1997///}, pages = {66 - 72}, abstract = {User interface technology has advanced rapidly in recent years. Incorporating new developments in existing systems could result in substantial improvements in usability, thereby improving performance and user satisfaction, while shortening training and reducing error rates. We describe low-effort, high-payoff strategies that focus attention on improvements to data display and entry, consistency, messages, documentation, system access and additional functionality. We report on experience from six projects, describing observations, recommendations and outcomes. We close with guidance for managers and designers who are responsible for user interface reengineering.}, author = {Plaisant, Catherine and Rose,A. and Shneiderman, Ben and Vanniamparampil,A. J} } @conference {15040, title = {The vegetation canopy lidar mission}, booktitle = {Proceedings of Land Satellite Information in the Next Decade, II: Sources and Applications. Bethesda (MD): American Society of Photogrammetry and Remote Sensing}, year = {1997}, month = {1997///}, pages = {100 - 112}, abstract = {The Vegetation Canopy Lidar (VCL) is the first selected mission of NASA{\textquoteright}s new EarthSystem Science Pathfinder program. The principal goal of VCL is the characterization of the three-dimensional structure of the earth; in particular, canopy vertical and horizontal structure and land surface topography. Its primary science objectives are: landcover characterization for terrestrial ecosystem modeling, monitoring and prediction; landcover characterization for climate modeling and prediction; and, production of a global reference data set of topographic spot heights and transects. VCL will provide unique data sets for understanding important environ- mental issues including climatic change and variability, biotic erosion and sustainable landuse, and will dramatically improve our estimation of global biomass and carbon stocks, fractional forest cover, forest extent and condition. It will also provide canopy data critical for biodiversity, natural hazard, and climate studies. Scheduled for launch in early 2000, VCL is an active lidar remote sensing system consisting of a five-beam instrument with 25 m contiguous along track resolution. The five beams are in a circular configuration 8 km across and each beam traces a separate ground track spaced 2 km apart, eventually producing 2 km coverage between 65{\textdegree} N and S. VCL{\textquoteright}s core measurement objectives are: (1) canopy top heights; (2) vertical distribution of intercepted surfaces (e.g. leaves and branches); and, (3) ground surface topographic elevations. These measurements are used to derive a variety of science data products including canopy heights, canopy vertical distribution, and ground elevations gridded monthly at 1{\textdegree} resolution and every 6 months at 2 km resolution, as well as a 2 km fractional forest cover product. }, author = {Dubayah, R. and Blair,J. B. and Bufton,J. L. and Clark,D. B. and JaJa, Joseph F. and Knox,R. and Luthcke,S. B. and Prince,S. and Weishampel,J.} } @article {16198, title = {Visual Information Seeking in Digital Image Libraries: The Visible Human Explorer}, journal = {Information in Images}, year = {1997}, month = {1997///}, abstract = {This chapter presents the Visible Human Explorer user interface, developed at theHuman-Computer Interaction Lab of the University of Maryland at College Park, for remotely accessing the National Library of Medicine{\textquoteright}s Visible Human digital image library. With the interface, users can visualize the library, browse contents, locate data of interest, and retrieve and zoom on desired image details. The interface presents a pair of tightly coupled views of library data: an overview of the overall search space, and a preview of high-resolution images available for retrieval. To explore, the user sweeps the views through the search space and receives smooth, rapid, visual feedback of contents. Desired details are automatically downloaded over the internet from the library. The interface software is completely functional (runs on Sun Workstations) and freely available for public use, at: http://www.nlm.nih.gov/. We also present several human-computer interaction design principles used to create the Visible Human Explorer interface, describe how these were applied to the design, and discuss issues in employing these techniques in user interfaces for other information collections. These principles are direct manipulation, visual information seeking, query previews, and multiple tightly coupled views. We illustrate these concepts with a plethora of pictures of user interface screens. Please also check the included CD-ROM for additional illustration media. }, author = {North,C. and Shneiderman, Ben and Plaisant, Catherine} } @article {15938, title = {Automated inference in active logics}, journal = {JOURNAL OF APPLIED NONCLASSICAL LOGICS}, volume = {6}, year = {1996}, month = {1996///}, pages = {9 - 28}, author = {Miller,M. and Perlis, Don} } @article {17638, title = {On the Complexity of Distributed Network Decomposition}, journal = {Journal of Algorithms}, volume = {20}, year = {1996}, month = {1996/03//}, pages = {356 - 374}, abstract = {In this paper, we improve the bounds for computing a network decomposition distributively and deterministically. Our algorithm computes an (nϵ(n),nϵ(n))-decomposition innO(ϵ(n))time, where[formula]. As a corollary we obtain improved deterministic bounds for distributively computing several graph structures such as maximal independent sets and Δ-vertex colorings. We also show that the class of graphs G whose maximum degree isnO(δ(n))where δ(n)=1/log lognis complete for the task of computing a near-optimal decomposition, i.e., a (logn, logn)-decomposition, in polylog(n) time. This is a corollary of a more general characterization, which pinpoints the weak points of existing network decomposition algorithms. Completeness is to be intended in the following sense: if we have an algorithmAthat computes a near-optimal decomposition in polylog(n) time for graphs inG, then we can compute a near-optimal decomposition in polylog(n) time for all graphs.}, isbn = {0196-6774}, doi = {10.1006/jagm.1996.0017}, url = {http://www.sciencedirect.com/science/article/pii/S0196677496900176}, author = {Panconesi,Alessandro and Srinivasan, Aravind} } @conference {13671, title = {The Development of a General Framework for Intelligent Document Image Retrieval}, booktitle = {Proceedings in the International Workshop on Document Analysis Systems}, year = {1996}, month = {1996///}, pages = {605 - 632}, author = {David Doermann and Sauvola,J. and Kauniskangas,H. and Shin,C. and Pietikainen,M. and Rosenfeld, A.} } @article {14105, title = {Differential expression of the expression site-associated gene I family in African trypanosomes}, journal = {Journal of Biological Chemistry}, volume = {271}, year = {1996}, month = {1996///}, pages = {9771 - 9771}, author = {Morgan,R. W and El-Sayed, Najib M. and Kepa,J. K and Pedram,M. and Donelson,J. E} } @article {16379, title = {Efficient distribution analysis via graph contraction}, journal = {Languages and Compilers for Parallel Computing}, year = {1996}, month = {1996///}, pages = {377 - 391}, author = {Sheffler,T. and Schreiber,R. and Pugh, William and Gilbert,J. and Chatterjee,S.} } @conference {16319, title = {An empirical exploration of code evolution}, booktitle = {International Workshop on Empirical Studies of Software Maintenance}, year = {1996}, month = {1996///}, author = {Karr,A. and Porter, Adam and Votta,L.} } @article {16310, title = {Evaluating workflow and process automation in wide-area software development}, journal = {Software Process Technology}, year = {1996}, month = {1996///}, pages = {188 - 193}, author = {Perry,D. and Porter, Adam and Votta,L. and Wade,M.} } @article {16206, title = {Hybrid network management (communication systems)}, journal = {16th AIAA International Communications Satellite Systems Conference}, year = {1996}, month = {1996///}, abstract = {We describe our collaborative efforts towards the design and implementation of a next-generation integrated network management system for hybrid networks (INMS/HN). We describe the overall software architecture of the system at its current stage of development. This NMS is specifically designed to address issues relevant to complex heterogeneous networks consisting of seamlessly interoperable terrestrial and satellite networks. NMSs are a key element for interoperability in such networks. We describe the integration of configuration management and performance management. The next step in this integration is fault management. In particular, we describe the object model, issues concerning the graphical user interface, browsing tools, performance data graphical widget displays, and management information database organization issues.}, author = {Baras,J. S and Ball,M. and Karne,R. K and Kelley,S. and Jang,K.D. and Plaisant, Catherine and Roussopoulos, Nick and Stathatos,K. and Vakhutinsky,A. and Valluri,J.} } @article {16202, title = {Integrated network management of hybrid networks}, journal = {AIP Conference Proceedings}, volume = {361}, year = {1996}, month = {1996/03/01/}, pages = {345 - 350}, abstract = {We describe our collaborative efforts towards the design and implementation of a next generation integrated network management system for hybrid networks (INMS/HN). We describe the overall software architecture of the system at its current stage of development. This network management system is specifically designed to address issues relevant for complex heterogeneous networks consisting of seamlessly interoperable terrestrial and satellite networks. Network management systems are a key element for interoperability in such networks. We describe the integration of configuration management and performance management. The next step in this integration is fault management. In particular we describe the object model, issues of the Graphical User Interface (GUI), browsing tools and performance data graphical widget displays, management information database (MIB) organization issues. Several components of the system are being commercialized by Hughes Network Systems. {\textcopyright} 1996 American Institute of Physics.}, isbn = {0094243X}, doi = {doi:10.1063/1.50028}, url = {http://proceedings.aip.org/resource/2/apcpcs/361/1/345_1?isAuthorized=no}, author = {Baras,John S and Ball,Mike and Karne,Ramesh K and Kelley,Steve and Jang,Kap D and Plaisant, Catherine and Roussopoulos, Nick and Stathatos,Kostas and Vakhutinsky,Andrew and Jaibharat,Valluri and Whitefield,David} } @conference {16203, title = {LifeLines: visualizing personal histories}, booktitle = {Proceedings of the SIGCHI conference on Human factors in computing systems: common ground}, series = {CHI {\textquoteright}96}, year = {1996}, month = {1996///}, pages = {221 - 227}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {History, justice, medical record, overview, personal record, screen design, screen management, timeline, Visualization}, isbn = {0-89791-777-4}, doi = {10.1145/238386.238493}, url = {http://doi.acm.org/10.1145/238386.238493}, author = {Plaisant, Catherine and Milash,Brett and Rose,Anne and Widoff,Seth and Shneiderman, Ben} } @conference {13924, title = {Local tools: an alternative to tool palettes}, booktitle = {Proceedings of the 9th annual ACM symposium on User interface software and technology}, year = {1996}, month = {1996///}, pages = {169 - 170}, author = {Bederson, Benjamin B. and Hollan,J.D. and Druin, Allison and Stewart,J. and Rogers,D. and Proft,D.} } @article {14568, title = {Positional sequencing by hybridization}, journal = {Computer applications in the biosciences : CABIOS}, volume = {12}, year = {1996}, month = {1996/02/01/}, pages = {19 - 24}, abstract = {Sequencing by hybridization (SBH) is a promising alternative to the classical DNA sequencing approaches. However, the resolving power of SBH is rather low: with 64kb sequencing chips, unknown DNA fragments only as long as 200 bp can be reconstructed in a single SBH experiment. To improve the resolving power of SBH, positional SBH (PSBH) has recently been suggested; this allows (with additional experimental work) approximate positions of every l-tuple in a target DNA fragment to be measured. We study the positional Eulerian path problem motivated by PSBH. The input to the positional eulerian path problem is an Eulerian graph G( V, E) in which every edge has an associated range of integers and the problem is to find an Eulerian path el, {\textellipsis}, e|E| in G such that the range of ei, contains i. We show that the positional Eulerian path problem is NP-complete even when the maximum out-degree (in-degree) of any vertex in the graph is 2. On a positive note we present polynomial algorithms to solve a special case of PSBH (bounded PSBH), where the range of the allowed positions for any edge is bounded by a constant (it corresponds to accurate experimental measurements of positions in PSBH). Moreover, if the positions of every l-tuple in an unknown DNA fragment of length n are measured with O(log n) error, then our algorithm runs in polynomial time. We also present an estimate of the resolving power of PSBH for a more realistic case when positions are measured with Θ(n) error.}, doi = {10.1093/bioinformatics/12.1.19}, url = {http://bioinformatics.oxfordjournals.org/content/12/1/19.abstract}, author = {Hannenhalli, Sridhar and Feldman,William and Lewis,Herbert F. and Skiena,Steven S. and Pevzner,Pavel A.} } @conference {16201, title = {Query previews in networked information systems}, booktitle = {Research and Technology Advances in Digital Libraries, 1996. ADL {\textquoteright}96., Proceedings of the Third Forum on}, year = {1996}, month = {1996/05//}, pages = {120 - 129}, abstract = {In a networked information system (such as the NASA Earth Observing System-Data Information System (EOS-DIS)), there are three major obstacles facing users in a querying process: network performance, data volume and data complexity. In order to overcome these obstacles, we propose a two phase approach to query formulation. The two phases are the Query Preview and the Query Refinement. In the Query Preview phase, users formulate an initial query by selecting rough attribute values. The estimated number of matching data sets is shown, graphically on preview bars which allows users to rapidly focus on a manageable number of relevant data sets. Query previews also prevent wasted steps by eliminating zero hit queries. When the estimated number of data sets is long enough, the initial query is submitted to the network which returns the metadata of the data sets for further refinement in the Query Refinement phase. The two phase approach to query formulation overcomes slow network performance, and reduces the data volume and data complexity, problems. This approach is especially appropriate for users who do not have extensive knowledge about the data and who prefer an exploratory method to discover data patterns and exceptions. Using this approach, we have developed dynamic query user interfaces to allow users to formulate their queries across a networked environment}, keywords = {attribute, complexity;data, data, Earth, environment;networked, EOS-DIS;NASA, formulation;querying, formulation;user, hit, information, interfaces;, interfaces;exploratory, method;matching, networks;information, Observing, patterns;data, performance;networked, Preview;Query, process;rough, queries;computer, query, refinement;data, retrieval;information, services;interactive, sets;network, System-Data, System;Query, systems;query, user, values;zero, volume;dynamic}, doi = {10.1109/ADL.1996.502522}, author = {Donn,K. and Plaisant, Catherine and Shneiderman, Ben} } @conference {17335, title = {Query previews in networked information systems}, booktitle = {Proceedings of the Third Forum on Research and Technology Advances in Digital Libraries, 1996. ADL {\textquoteright}96}, year = {1996}, month = {1996/05/13/15}, pages = {120 - 129}, publisher = {IEEE}, organization = {IEEE}, abstract = {In a networked information system (such as the NASA Earth Observing System-Data Information System (EOS-DIS)), there are three major obstacles facing users in a querying process: network performance, data volume and data complexity. In order to overcome these obstacles, we propose a two phase approach to query formulation. The two phases are the Query Preview and the Query Refinement. In the Query Preview phase, users formulate an initial query by selecting rough attribute values. The estimated number of matching data sets is shown, graphically on preview bars which allows users to rapidly focus on a manageable number of relevant data sets. Query previews also prevent wasted steps by eliminating zero hit queries. When the estimated number of data sets is long enough, the initial query is submitted to the network which returns the metadata of the data sets for further refinement in the Query Refinement phase. The two phase approach to query formulation overcomes slow network performance, and reduces the data volume and data complexity, problems. This approach is especially appropriate for users who do not have extensive knowledge about the data and who prefer an exploratory method to discover data patterns and exceptions. Using this approach, we have developed dynamic query user interfaces to allow users to formulate their queries across a networked environment}, keywords = {Computer networks, data complexity, data mining, data patterns, data volume, dynamic query user interfaces, Educational institutions, EOS-DIS, exploratory method, Information retrieval, Information services, Information systems, Intelligent networks, interactive systems, Laboratories, Manipulator dynamics, matching data sets, NASA Earth Observing System-Data Information System, Network performance, networked environment, networked information systems, query formulation, query preview, query refinement, querying process, rough attribute values, User interfaces, visual databases, zero hit queries}, isbn = {0-8186-7403-2}, doi = {10.1109/ADL.1996.502522}, author = {Donn,K. and Plaisant, Catherine and Shneiderman, Ben} } @article {18990, title = {Ribosomal RNA: Small nucleolar RNAs make their mark}, journal = {Current Biology}, volume = {6}, year = {1996}, month = {1996/11//}, pages = {1413 - 1415}, abstract = {Small nucleolar RNAs direct the location of certain methylations in ribosomal RNA by direct base pairing; although evolutionarily conserved, the physiological significance of these modifications remains unclear.}, isbn = {0960-9822}, doi = {10.1016/S0960-9822(96)00745-2}, url = {http://www.sciencedirect.com/science/article/pii/S0960982296007452}, author = {Peculis,Brenda A. and Mount, Stephen M.} } @inbook {18680, title = {On stratified sampling for high coverage estimations}, booktitle = {Dependable Computing {\textemdash} EDCC-2}, series = {Lecture Notes in Computer Science}, volume = {1150}, year = {1996}, month = {1996///}, pages = {35 - 54}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {This paper addresses the problem of estimating the coverage of a fault tolerance mechanism through statistical processing of observations collected in faultinjection experiments. In an earlier paper, several techniques for sampling the fault/activity input space of a fault tolerance mechanism were presented. Various estimators based on simple sampling in the whole space and stratified sampling in a partitioned space were studied; confidence limits were derived based on a normal approximation. In this paper, the validity of this approximation is analyzed, especially for high coverage systems. The theory of confidence regions is then introduced to estimate the coverage without approximation when, for practical reasons, stratification is used. Three statistics are considered for defining confidence regions. It is shown that one of these statistics {\textemdash} a vectorial statistic {\textemdash} is often more conservative than the other two. However, only the vectorial statistic is computationally tractable. The results obtained are compared with those based on approximation by means of three hypothetical example systems.}, keywords = {Computer science}, isbn = {978-3-540-61772-3}, url = {http://www.springerlink.com/content/7t2w2u472601h730/abstract/}, author = {Powell,David and Michel Cukier and Arlat,Jean}, editor = {Hlawiczka,Andrzej and Silva,Jo{\~a}o and Simoncini,Luca} } @conference {14643, title = {To cut{\textellipsis} or not to cut (applications of comparative physical maps in molecular evolution)}, booktitle = {Proceedings of the seventh annual ACM-SIAM symposium on Discrete algorithms}, series = {SODA {\textquoteright}96}, year = {1996}, month = {1996///}, pages = {304 - 313}, publisher = {Society for Industrial and Applied Mathematics}, organization = {Society for Industrial and Applied Mathematics}, address = {Philadelphia, PA, USA}, isbn = {0-89871-366-8}, url = {http://dl.acm.org/citation.cfm?id=313852.314077}, author = {Hannenhalli, Sridhar and Pevzner,Pavel} } @article {16349, title = {Transitive closure of infinite graphs and its applications}, journal = {Languages and Compilers for Parallel Computing}, year = {1996}, month = {1996///}, pages = {126 - 140}, author = {Kelly,W. and Pugh, William and Rosser,E. and Shpeisman,T.} } @conference {16204, title = {User controlled overviews of an image library: a case study of the visible human}, booktitle = {Proceedings of the first ACM international conference on Digital libraries}, series = {DL {\textquoteright}96}, year = {1996}, month = {1996///}, pages = {74 - 82}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {Browsing, digital library, image database, information exploration, Information retrieval, Internet, medical image, remote access, user interface, Visualization, World-wide web}, isbn = {0-89791-830-4}, doi = {10.1145/226931.226946}, url = {http://doi.acm.org/10.1145/226931.226946}, author = {North,Chris and Shneiderman, Ben and Plaisant, Catherine} } @article {15529, title = {Using priorities to combine knowledge bases}, journal = {International Journal of Cooperative Information Systems}, volume = {5}, year = {1996}, month = {1996///}, pages = {333 - 333}, abstract = {Two or more companies, each with its own knowledge base, may merge. In that case one option is to merge the knowledge bases into one knowledge base. It can happen that some of the information contained in one or more knowledge bases may be in conflict with information in the other knowledge bases. There may be several such points of conflict and any information may be involved in several different such points of conflict. In that case, the integrator of the knowledge bases may prefer a certain claim to another in one conflict-point without necessarily preferring that claim in another conflict-point.Our work constructs a framework within which the consequences of a set of such preferences (expressed as priorities among sets of statements) can be computed. We give three types of semantics for priorities, two of which are shown to be equivalent to one another. The third type of semantics for priorities is shown to be more cautious than the other two. In terms of these semantics for priorities, we give a function for combining knowledge from different sources such that the combined knowledge is conflict-free and satisfies all the priorities. }, doi = {10.1142/S0218843096000130}, author = {Pradhan,S. and Minker, Jack} } @conference {16205, title = {Where is information visualization technology going?}, booktitle = {Proceedings of the 9th annual ACM symposium on User interface software and technology}, series = {UIST {\textquoteright}96}, year = {1996}, month = {1996///}, pages = {75 - 77}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-798-7}, doi = {10.1145/237091.237101}, url = {http://doi.acm.org/10.1145/237091.237101}, author = {Hasco{\"e}t-Zizi,Mountaz and Ahlberg,Chris and Korfhage,Robert and Plaisant, Catherine and Chalmers,Matthew and Rao,Ramana} } @conference {16211, title = {An applied ethnographic method for redesigning user interfaces}, booktitle = {Proceedings of the 1st conference on Designing interactive systems: processes, practices, methods, \& techniques}, series = {DIS {\textquoteright}95}, year = {1995}, month = {1995///}, pages = {115 - 122}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-673-5}, doi = {10.1145/225434.225447}, url = {http://doi.acm.org/10.1145/225434.225447}, author = {Rose,Anne and Shneiderman, Ben and Plaisant, Catherine} } @article {13733, title = {Building a LCS-Based Lexicon in TAGs}, journal = {Proceedings of the AAAI-95 Spring Symposium Series, Representation and Acquisition of Lexical Knowledge: Polysemy, Ambiguity, and Generativity, Stanford, CA, March}, year = {1995}, month = {1995///}, pages = {27 - 29}, author = {Dorr, Bonnie J and Palmer,M.} } @conference {16363, title = {Code generation for multiple mappings}, booktitle = {Frontiers of Massively Parallel Computation, 1995. Proceedings. Frontiers {\textquoteright}95., Fifth Symposium on the}, year = {1995}, month = {1995/02/06/9}, pages = {332 - 341}, publisher = {IEEE}, organization = {IEEE}, abstract = {There has been a great amount of recent work toward unifying iteration reordering transformations. Many of these approaches represent transformations as affine mappings from the original iteration space to a new iteration space. These approaches show a great deal of promise, but they all rely on the ability to generate code that iterates over the points in these new iteration spaces in the appropriate order. This problem has been fairly well-studied in the case where all statements use the same mapping. We have developed an algorithm for the less well-studied case where each statement uses a potentially different mapping. Unlike many other approaches, our algorithm can also generate code from mappings corresponding to loop blocking. We address the important trade-off between reducing control overhead and duplicating code}, keywords = {code generation, Computer science, Concurrent computing, control overhead, Educational institutions, iteration reordering transformations, Law, Legal factors, loop blocking, multiple mappings, optimisation, optimising compilers, Optimizing compilers, PARALLEL PROCESSING, Performance analysis, program compilers}, isbn = {0-8186-6965-9}, doi = {10.1109/FMPC.1995.380437}, author = {Kelly,W. and Pugh, William and Rosser,E.} } @article {15538, title = {Combining databases with prioritized information}, journal = {Journal of Intelligent Information Systems}, volume = {4}, year = {1995}, month = {1995///}, pages = {231 - 260}, abstract = {To solve a problem one may need to combine the knowledge of several different experts. It can happen that some of the claims of one or more experts may be in conflict with the claims of other experts. There may be several such points of conflict and any claim may be involved in several different such points of conflict. In that case, the user of the knowledge of experts may prefer a certain claim to another in one conflict-point without necessarily preferring that statement in another conflict-point.Our work constructs a framework within which the consequences of a set of such preferences (expressed as priorities among sets of statements) can be computed. We give four types of semantics for priorities, three of which are shown to be equivalent to one another. The fourth type of semantics for priorities is shown to be more cautious than the other three. In terms of these semantics for priorities, we give a function for combining knowledge from different sources such that the combined knowledge is conflict-free and satisfies all the priorities. }, doi = {10.1007/BF00961654}, author = {Pradhan,S. and Minker, Jack and V.S. Subrahmanian} } @article {15537, title = {Combining Datalog databases using priorities}, year = {1995}, month = {1995///}, institution = {University of Maryland at College Park}, address = {College Park, MD, USA}, author = {Pradhan,Shekhar and Minker, Jack} } @article {16314, title = {Comparing detection methods for software requirements inspections: a replicated experiment}, journal = {IEEE Transactions on Software Engineering}, volume = {21}, year = {1995}, month = {1995/06//}, pages = {563 - 575}, abstract = {Software requirements specifications (SRS) are often validated manually. One such process is inspection, in which several reviewers independently analyze all or part of the specification and search for faults. These faults are then collected at a meeting of the reviewers and author(s). Usually, reviewers use Ad Hoc or Checklist methods to uncover faults. These methods force all reviewers to rely on nonsystematic techniques to search for a wide variety of faults. We hypothesize that a Scenario-based method, in which each reviewer uses different, systematic techniques to search for different, specific classes of faults, will have a significantly higher success rate. We evaluated this hypothesis using a 3{\texttimes}24 partial factorial, randomized experimental design. Forty eight graduate students in computer science participated in the experiment. They were assembled into sixteen, three-person teams. Each team inspected two SRS using some combination of Ad Hoc, Checklist or Scenario methods. For each inspection we performed four measurements: (1) individual fault detection rate, (2) team fault detection rate, (3) percentage of faults first identified at the collection meeting (meeting gain rate), and (4) percentage of faults first identified by an individual, but never reported at the collection meeting (meeting loss rate). The experimental results are that (1) the Scenario method had a higher fault detection rate than either Ad Hoc or Checklist methods, (2) Scenario reviewers were more effective at detecting the faults their scenarios are designed to uncover, and were no less effective at detecting other faults than both Ad Hoc or Checklist reviewers, (3) Checklist reviewers were no more effective than Ad Hoc reviewers, and (4) Collection meetings produced no net improvement in the fault detection rate-meeting gains were offset by meeting losses}, keywords = {Assembly, Computer science, Design for experiments, detection methods, Fault detection, fault detection rate, Fault diagnosis, formal specification, formal verification, Gain measurement, individual fault detection rate, Inspection, Loss measurement, nonsystematic techniques, performance evaluation, Performance gain, replicated experiment, scenario-based method, Software development management, software requirements inspections, software requirements specifications, team fault detection rate}, isbn = {0098-5589}, doi = {10.1109/32.391380}, author = {Porter, Adam and Votta,L. G. and Basili, Victor R.} } @article {15974, title = {Consciousness and complexity: the cognitive quest}, journal = {Annals of Mathematics and Artificial Intelligence}, volume = {14}, year = {1995}, month = {1995///}, pages = {309 - 321}, author = {Perlis, Don} } @article {14094, title = {Crystallization and preliminary X-ray investigation of the recombinant Trypanosoma brucei rhodesiense calmodulin}, journal = {Proteins: Structure, Function, and Bioinformatics}, volume = {21}, year = {1995}, month = {1995///}, pages = {354 - 357}, author = {El-Sayed, Najib M. and Patton,C. L and Harkins,P. C and Fox,R. O and Anderson,K.} } @conference {16318, title = {Experimental software engineering: A report on the state of the art}, booktitle = {INTERNATIONAL CONFERENCE ON SOFTWARE ENGINEERING}, volume = {17}, year = {1995}, month = {1995///}, pages = {277 - 277}, author = {Votta,L. G. and Porter, Adam and Perry,D.} } @article {15926, title = {Explicitly Biased Generalization}, journal = {Goal-Driven Learning}, year = {1995}, month = {1995///}, pages = {321 - 354}, author = {Gordon,D. and Perlis, Don} } @conference {16375, title = {Finding Legal Reordering Transformations using Mappings}, booktitle = {Languages and compilers for parallel computing: 7th International Workshop, Ithaca, NY, USA, August 8-10, 1994: proceedings}, volume = {7}, year = {1995}, month = {1995///}, pages = {107 - 107}, abstract = {We present a unified framework for applying iteration reordering transformations. This framework is able to represent traditional transformations such as loop interchange, loop skewing and loop distribution as well as compositions of these transformations. Using a unified framework rather than a sequence of adhoc transformations makes it easier to analyze and predict the effects of these transformations. Our framework is based on the idea that all reordering transformations can be represented as a mapping from the original iteration space to a new iteration space. An optimizing compiler would use our framework by finding a mapping that both corresponds to a legal transformation and produces efficient code. We present the mapping selection problem as a search problem by decomposing it into a sequence of smaller choices. We then characterize the set of all legal mappings by defining a search tree.}, doi = {10.1007/BFb0025874}, author = {Pugh, William and Kelly, Wayne} } @article {18978, title = {Genetic enhancement of RNA-processing defects by a dominant mutation in B52, the Drosophila gene for an SR protein splicing factor.}, journal = {Molecular and Cellular BiologyMol. Cell. Biol.}, volume = {15}, year = {1995}, month = {1995/11/01/}, pages = {6273 - 6282}, abstract = {SR proteins are essential for pre-mRNA splicing in vitro, act early in the splicing pathway, and can influence alternative splice site choice. Here we describe the isolation of both dominant and loss-of-function alleles of B52, the gene for a Drosophila SR protein. The allele B52ED was identified as a dominant second-site enhancer of white-apricot (wa), a retrotransposon insertion in the second intron of the eye pigmentation gene white with a complex RNA-processing defect. B52ED also exaggerates the mutant phenotype of a distinct white allele carrying a 5{\textquoteright} splice site mutation (wDR18), and alters the pattern of sex-specific splicing at doublesex under sensitized conditions, so that the male-specific splice is favored. In addition to being a dominant enhancer of these RNA-processing defects, B52ED is a recessive lethal allele that fails to complement other lethal alleles of B52. Comparison of B52ED with the B52+ allele from which it was derived revealed a single change in a conserved amino acid in the beta 4 strand of the first RNA-binding domain of B52, which suggests that altered RNA binding is responsible for the dominant phenotype. Reversion of the B52ED dominant allele with X rays led to the isolation of a B52 null allele. Together, these results indicate a critical role for the SR protein B52 in pre-mRNA splicing in vivo.}, isbn = {0270-7306, 1098-5549}, url = {http://mcb.asm.org/content/15/11/6273}, author = {Peng,X. and Mount, Stephen M.} } @article {14615, title = {Genome Sequence Comparison and Scenarios for Gene Rearrangements: A Test Case}, journal = {Genomics}, volume = {30}, year = {1995}, month = {1995/11//}, pages = {299 - 311}, abstract = {As large portions of related genomes are being sequenced, methods for comparing complete or nearly complete genomes, as opposed to comparing individual genes, are becoming progressively more important. A major, widespread phenomenon in genome evolution is the rearrangement of genes and gene blocks. There is, however, no consistent method for genome sequence comparison combined with the reconstruction of the evolutionary history of highly rearranged genomes. We developed a schema for genome sequence comparison that includes three successive steps: (i) comparison of all proteins encoded in different genomes and generation of genomic similarity plots; (ii) construction of an alphabet of conserved genes and gene blocks; and (iii) generation of most parsimonious genome rearrangement scenarios. The approach is illustrated by a comparison of the herpesvirus genomes that constitute the largest set of relatively long, complete genome sequences available to date. Herpesviruses have from 70 to about 200 genes; comparison of the amino acid sequences encoded in these genes results in an alphabet of about 30 conserved genes comprising 7 conserved blocks that are rearranged in the genomes of different herpesviruses. Algorithms to analyze rearrangements of multiple genomes were developed and applied to the derivation of most parsimonious scenarios of herpesvirus evolution under different evolutionary models. The developed approaches to genome comparison will be applicable to the comparative analysis of bacterial and eukaryotic genomes as soon as their sequences become available.}, isbn = {0888-7543}, doi = {10.1006/geno.1995.9873}, url = {http://www.sciencedirect.com/science/article/pii/S0888754385798734}, author = {Hannenhalli, Sridhar and Chappey,Colombe and Koonin,Eugene V. and Pevzner,Pavel A.} } @article {16378, title = {Going beyond integer programming with the Omega test to eliminate false data dependences}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {6}, year = {1995}, month = {1995/02//}, pages = {204 - 211}, abstract = {Array data dependence analysis methods currently in use generate false dependences that can prevent useful program transformations. These false dependences arise because the questions asked are conservative approximations to the questions we really should be asking. Unfortunately, the questions we really should be asking go beyond integer programming and require decision procedures for a subclass of Presburger formulas. In this paper, we describe how to extend the Omega test so that it can answer these queries and allow us to eliminate these false data dependences. We have implemented the techniques described here and believe they are suitable for use in production compilers}, keywords = {Algorithm design and analysis, Arithmetic, Computer science, Data analysis, false data dependences, integer programming, Linear programming, Omega test, Privatization, Production, production compilers, program compilers, Program processors, program testing, program transformations, Testing}, isbn = {1045-9219}, doi = {10.1109/71.342135}, author = {Pugh, William and Wonnacott,D.} } @book {17211, title = {Human-Computer Interaction Laboratory 1995 Video Reports}, year = {1995}, month = {1995///}, publisher = {University of Maryland at College Park, Human/Computer Interaction Laboratory}, organization = {University of Maryland at College Park, Human/Computer Interaction Laboratory}, author = {Plaisant, Catherine and Morrison,S. and Skokowski,C. and Reesch,J. and Shneiderman, Ben and Laboratory,University of Maryland at College Park. Human/Computer Interaction and Channel,F.} } @article {17222, title = {Image-browser taxonomy and guidelines for designers}, journal = {IEEE Software}, volume = {12}, year = {1995}, month = {1995/03//}, pages = {21 - 32}, abstract = {In many applications users must browse large images. Most designers merely use two one-dimensional scroll bars or ad hoc designs for two-dimensional scroll bars. However, the complexity of two-dimensional browsing suggests that more careful analysis, design, and evaluation might lead to significant improvements. Our exploration of existing 2D browsers has led us to identify many features and a wide variety of tasks performed with the browsers. We introduce an informal specification technique to describe 2D browsers and a task taxonomy, suggest design features and guidelines, and assess existing strategies. We focus on the tools to explore a selected image and so do not cover techniques to browse a series of images or to browse large-image databases}, keywords = {analysis, Computer Graphics, design, designer guidelines, Equations, Europe, Evaluation, Formal specifications, Graphical user interfaces, Guidelines, IMAGE PROCESSING, image-browser taxonomy, informal specification technique, Laboratories, large image browsing, Layout, Road transportation, selected image exploration, SHAPE, Software design, task taxonomy, Taxonomy, tools, two-dimensional browsing, user interface management systems, visual databases}, isbn = {0740-7459}, doi = {10.1109/52.368260}, author = {Plaisant, Catherine and Carr,D. and Shneiderman, Ben} } @article {17666, title = {The local nature of Δ-coloring and its algorithmic applications}, journal = {Combinatorica}, volume = {15}, year = {1995}, month = {1995///}, pages = {255 - 280}, abstract = {Given a connected graph G =( V, E ) with | V |= n and maximum degree Δ such that G is neither a complete graph nor an odd cycle, Brooks{\textquoteright} theorem states that G can be colored with Δ colors. We generalize this as follows: let G - v be Δ-colored; then, v can be colored by considering the vertices in an O (log Δ n ) radius around v and by recoloring an O (log Δ n ) length {\textquotedblleft}augmenting path{\textquotedblright} inside it. Using this, we show that Δ-coloring G is reducible in O (log 3 n /logΔ) time to (Δ+1)-vertex coloring G in a distributed model of computation. This leads to fast distributed algorithms and a linear-processor NC algorithm for Δ-coloring.}, isbn = {0209-9683}, url = {http://dx.doi.org/10.1007/BF01200759}, author = {Panconesi,Alessandro and Srinivasan, Aravind} } @article {16210, title = {Next generation network management technology}, journal = {AIP Conference Proceedings}, volume = {325}, year = {1995}, month = {1995/01/25/}, pages = {75 - 82}, abstract = {Today{\textquoteright}s telecommunications networks are becoming increasingly large, complex, mission critical and heterogeneous in several dimensions. For example, the underlying physical transmission facilities of a given network may be {\textquoteleft}{\textquoteleft}mixed media{\textquoteright}{\textquoteright} (copper, fiber-optic, radio, and satellite); the subnetworks may be acquired from different vendors due to economic, performance, or general availability reasons; the information being transmitted over the network may be {\textquoteleft}{\textquoteleft}multimedia{\textquoteright}{\textquoteright} (video, data, voice, and images) and, finally, varying performance criteria may be imposed e.g., data transfer may require high throughput while the others, whose concern is voice communications, may require low call blocking probability. For these reasons, future telecommunications networks are expected to be highly complex in their services and operations. Due to this growing complexity and the disparity among management systems for individual sub-networks, efficient network management systems have become critical to the current and future success of telecommunications companies. This paper addresses a research and development effort which focuses on prototyping configuration management, since that is the central process of network management and all other network management functions must be built upon it. Our prototype incorporates ergonomically designed graphical user interfaces tailored to the network configuration management subsystem and to the proposed advanced object-oriented database structure. The resulting design concept follows open standards such as Open Systems Interconnection (OSI) and incorporates object oriented programming methodology to associate data with functions, permit customization, and provide an open architecture environment. {\textcopyright} 1995 American Institute of Physics}, isbn = {0094243X}, doi = {doi:10.1063/1.47255}, url = {http://proceedings.aip.org/resource/2/apcpcs/325/1/75_1?isAuthorized=no}, author = {Baras,John S and Atallah,George C and Ball,Mike and Goli,Shravan and Karne,Ramesh K and Kelley,Steve and Kumar,Harsha and Plaisant, Catherine and Roussopoulos, Nick and Schneiderman,Ben and Srinivasarao,Mulugu and Stathatos,Kosta and Teittinen,Marko and Whitefield,David} } @conference {17316, title = {Organization overviews and role management: inspiration for future desktop environments}, booktitle = {Proceedings of the Fourth Workshop on Enabling Technologies: Infrastructure for Collaborative Enterprises, 1995}, year = {1995}, month = {1995/04/20/22}, pages = {14 - 22}, publisher = {IEEE}, organization = {IEEE}, abstract = {In our exploration of future work environments for the World Bank we proposed two concepts. First, organization overviews provide a consistent support to present the results of a variety of manual or semi-automated searches. Second this view can be adapted or expanded for each class of users to finally map the multiple personal roles an individual has in an organization. After command line interfaces, graphical user interfaces, and the current {\textquotedblleft}docu-centric{\textquotedblright} designs, a natural direction is towards a role-centered approach where we believe the emphasis is on the management of those multiple roles. Large visual overviews of the organization can be rapidly manipulated and zoomed in on to reveal the multiple roles each individual plays. Each role involves coordination with groups of people and accomplishment of tasks within a schedule}, keywords = {Asia, bank data processing, Databases, Environmental economics, Environmental management, future desktop environments, Graphical user interfaces, human resource management, management information systems, Management training, multiple personal roles, office automation, organization overviews, personnel, Project management, Prototypes, role management, role-centered approach, scheduling, semi-automated searches, User interfaces, Utility programs, World Bank}, isbn = {0-8186-7019-3}, doi = {10.1109/ENABL.1995.484544}, author = {Plaisant, Catherine and Shneiderman, Ben} } @article {16012, title = {Papers on Context: Theory and Practice}, journal = {Fundamenta Informaticae}, volume = {23}, year = {1995}, month = {1995///}, pages = {145 - 148}, author = {Perlis, Don} } @article {16377, title = {Parametric dispatching of hard real-time tasks}, journal = {IEEE Transactions on ComputersIEEE Trans. Comput.}, volume = {44}, year = {1995}, month = {1995/03//}, pages = {471 - 479}, isbn = {00189340}, doi = {10.1109/12.372041}, url = {http://dl.acm.org/citation.cfm?id=626999}, author = {Gerber,R. and Pugh, William and Saksena,M.} } @article {16207, title = {QUERY PREVIEWS IN NETWORKED INFORMATION SYSTEMS}, journal = {Institute for Systems Research Technical Reports}, year = {1995}, month = {1995/10//}, abstract = {In a networked information system, there are three major obstacles facing users in a querying process: network performance, data volume and data complexity. In order to overcome these obstacles, we propose a two-phase approach to dynamic query formulation by volume preview. The two phases are the Query Preview and Query Refinement. In the Query Preview phase, users formulate an initial query by selecting desired attribute values. The volume of matching data sets is shown graphically on preview bars which aid users to rapidly eliminate undesired data sets, and focus on a manageable number of relevant data sets. Query previews also prevent wasted steps by eliminating zero-hit queries. When the estimated number of data sets is low enough, the initial query is submitted to the network, which returns the metadata of the data sets for further refinement in the Query Refinement phase. The two-phase approach to query formulation overcomes slow network performance, and reduces the data volume and data complexity problems. This approach is especially appropriate for users who prefer the exploratory method to discover data patterns and exceptions during the query formulation process. Using this approach, we have developed dynamic query user interfaces to allow users to formulate their queries across a networked environment.}, author = {Doan,K. and Plaisant, Catherine and Shneiderman, Ben} } @article {13679, title = {The representation of document structure: A generic object-process approach}, volume = {CAR-TR-785}, year = {1995}, month = {1995///}, institution = {University of Maryland, College Park}, author = {Dori,D. and David Doermann and Shin,C. and Haralick,R. and Phillips,I. and Buchman,M. and Ross,D.} } @article {18426, title = {SEL{\textquoteright}s software process improvement program}, journal = {Software, IEEE}, volume = {12}, year = {1995}, month = {1995/11//}, pages = {83 - 87}, abstract = {We select candidates for process change on the basis of quantified Software Engineering Laboratory (SEL) experiences and clearly defined goals for the software. After we select the changes, we provide training and formulate experiment plans. We then apply the new process to one or more production projects and take detailed measurements. We assess process success by comparing these measures with the continually evolving baseline. Based upon the results of the analysis, we adopt, discard, or revise the process}, keywords = {baseline;detailed, engineering, engineering;, evolving, improvement, Laboratory;continually, measurements;experiment, plans;production, process, program;software, projects;training;software, SEL, software}, isbn = {0740-7459}, doi = {10.1109/52.469763}, author = {Basili, Victor R. and Zelkowitz, Marvin V and McGarry,F. and Page,J. and Waligora,S. and Pajerski,R.} } @article {17388, title = {Survival of the fittest: the evolution of multimedia user interfaces}, journal = {ACM Computing Surveys}, volume = {27}, year = {1995}, month = {1995/12//}, pages = {557 - 559}, isbn = {0360-0300}, doi = {10.1145/234782.234789}, url = {http://doi.acm.org/10.1145/234782.234789}, author = {Preece,Jenny and Shneiderman, Ben} } @article {15985, title = {Thinking takes time: a modal active-logic for reasoning in time}, journal = {Proc. of BISFAI-95}, year = {1995}, month = {1995///}, author = {Nirkhe,M. and Kraus,S. and Perlis, Don} } @inbook {14644, title = {Towards a computational theory of genome rearrangements}, booktitle = {Computer Science TodayComputer Science Today}, series = {Lecture Notes in Computer Science}, volume = {1000}, year = {1995}, month = {1995///}, pages = {184 - 202}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Analysis of genome rearrangements in molecular biology started in the late 1930{\textquoteright}s, when Dobzhansky and Sturtevant published a milestone paper presenting a rearrangement scenario with 17 inversions for the species of Drosophila. However, until recently there were no computer science results allowing a biologist to analyze genome rearrangements. The paper describes combinatorial problems motivated by genome rearrangements, surveys recently developed algorithms for genomic sequence comparison and presents applications of these algorithms to analyze rearrangements in herpes viruses, plant organelles, and mammalian chromosomes.}, isbn = {978-3-540-60105-0}, url = {http://dx.doi.org/10.1007/BFb0015244}, author = {Hannenhalli, Sridhar and Pevzner,Pavel}, editor = {van Leeuwen,Jan} } @conference {14647, title = {Transforming cabbage into turnip: polynomial algorithm for sorting signed permutations by reversals}, booktitle = {Proceedings of the twenty-seventh annual ACM symposium on Theory of computing}, series = {STOC {\textquoteright}95}, year = {1995}, month = {1995///}, pages = {178 - 189}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-718-9}, doi = {10.1145/225058.225112}, url = {http://doi.acm.org/10.1145/225058.225112}, author = {Hannenhalli, Sridhar and Pevzner,Pavel} } @conference {14565, title = {Transforming men into mice (polynomial algorithm for genomic distance problem)}, booktitle = {Foundations of Computer Science, Annual IEEE Symposium on}, year = {1995}, month = {1995///}, pages = {581 - 581}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {Many people believe that transformations of humans into mice happen only in fairy tales. However, despite some differences in appearance and habits, men and mice are genetically very similar. In the pioneering paper, J.H. Nadeau and B.A. Taylor (1984) estimated that surprisingly few genomic rearrangements (178/spl plusmn/39) happened since the divergence of human and mouse 80 million years ago. However, their analysis is nonconstructive and no rearrangement scenario for human-mouse evolution has been suggested yet. The problem is complicated by the fact that rearrangements in multi chromosomal genomes include inversions, translocations, fusions and fissions of chromosomes, a rather complex set of operations. As a result, at first glance, a polynomial algorithm for the genomic distance problem with all these operations looks almost as improbable as the transformation of a (real) man into a (real) mouse. We prove a duality theorem which expresses the genomic distance in terms of easily computable parameters reflecting different combinatorial properties of sets of strings. This theorem leads to a polynomial time algorithm for computing most parsimonious rearrangement scenarios. Based on this result and the latest comparative physical mapping data we have constructed a scenario of human-mouse evolution with 131 reversals/translocaitons/fusions/fissions. A combination of the genome rearrangement algorithm with the recently proposed experimental technique called ZOO FISH suggests a new constructive approach to the 100 year old problem of reconstructing mammalian evolution.}, keywords = {biology computing, combinatorial properties, comparative physical mapping data, computable parameters, duality (mathematics), duality theorem, evolution (biological), Genetics, genome rearrangement algorithm, genomic distance problem, genomic rearrangements, human-mouse evolution, mammalian evolution, multi chromosomal genomes, parsimonious rearrangement scenarios, pattern matching, polynomial algorithm, polynomial time algorithm, set theory, sorting, string matching, strings, zoo fish}, doi = {http://doi.ieeecomputersociety.org/10.1109/SFCS.1995.492588}, author = {Hannenhalli, Sridhar and Pevzner,P.A.} } @conference {16376, title = {A unifying framework for iteration reordering transformations}, booktitle = {Algorithms and Architectures for Parallel Processing, 1995. ICAPP 95. IEEE First ICA/sup 3/PP., IEEE First International Conference on}, volume = {1}, year = {1995}, month = {1995///}, pages = {153 - 162}, author = {Kelly,W. and Pugh, William} } @article {15992, title = {Calibrating, Counting, Grounding, Grouping}, volume = {FS-94-03}, year = {1994}, month = {1994///}, institution = {Association for the Advancement of Artificial Intelligence}, author = {Elgot-drapkin,J. and Gordon,D. and Kraus,S. and Miller,M. and Nirkhe,M. and Perlis, Don} } @article {13742, title = {Concept-based lexical selection}, journal = {Proceedings of the AAAI-94 Fall Symposium on Knowledge Representation for Natural Language Processing in Implemented Systems}, year = {1994}, month = {1994///}, author = {Dorr, Bonnie J and Voss,C. and Peterson,E. and Kiker,M.} } @article {16350, title = {Counting solutions to Presburger formulas: how and why}, journal = {ACM SIGPLAN Notices}, volume = {29}, year = {1994}, month = {1994///}, pages = {121 - 134}, author = {Pugh, William} } @conference {16214, title = {Dynamaps: dynamic queries on a health statistics atlas}, booktitle = {Conference companion on Human factors in computing systems}, series = {CHI {\textquoteright}94}, year = {1994}, month = {1994///}, pages = {439 - 440}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-651-4}, doi = {10.1145/259963.260438}, url = {http://doi.acm.org/10.1145/259963.260438}, author = {Plaisant, Catherine and Jain,Vinit} } @conference {15941, title = {An Error-Theory of Consciousness}, booktitle = {MARYLAND COMPUTER SCIENCE}, year = {1994}, month = {1994///}, author = {Perlis, Don} } @conference {16335, title = {An experiment to assess different defect detection methods for software requirements inspections}, booktitle = {Proceedings of the 16th international conference on Software engineering}, year = {1994}, month = {1994///}, pages = {103 - 112}, author = {Porter, Adam and Votta,L. G.} } @inbook {16213, title = {The future of graphic user interfaces: Personal role managers}, booktitle = {People and Computers}, year = {1994}, month = {1994///}, pages = {444 - 444}, publisher = {Cambridge University Press}, organization = {Cambridge University Press}, isbn = {9780521485579}, author = {Shneiderman, Ben and Plaisant, Catherine} } @conference {17876, title = {High performance computing for land cover dynamics}, booktitle = {Pattern Recognition, 1994. Vol. 3-Conference C: Signal Processing, Proceedings of the 12th IAPR International Conference on}, year = {1994}, month = {1994///}, pages = {234 - 238}, author = {Parulekar,R. and Davis, Larry S. and Chellapa, Rama and Saltz, J. and Sussman, Alan and Townshend,J.} } @article {16623, title = {High-specificity neurological localization using a connectionist model}, journal = {Artificial Intelligence in Medicine}, volume = {6}, year = {1994}, month = {1994///}, pages = {521 - 532}, author = {Tuhrim,S. and Reggia, James A. and Peng,Y.} } @article {16215, title = {Image Browsers: Taxonomy, Guidelines, and Informal Specifications}, journal = {Institute for Systems Research Technical Reports}, year = {1994}, month = {1994///}, abstract = {Image browsing is necessary in numerous applications. Designers have merely used two one-dimensional scroll bars or they have made ad hoc designs for a two-dimensional scroll bar. However, the complexity of two-dimensional browsing suggests that more careful analysis, design, and evaluation might lead to significant improvements. We present a task taxonomy for image browsing, suggest design features and guidelines, assess existing strategies, and introduce an informal specification}, keywords = {Graphics, Systems Integration, use interface}, url = {http://drum.lib.umd.edu/handle/1903/5591}, author = {Plaisant, Catherine and Carr,David A and Shneiderman, Ben} } @conference {15948, title = {Meta-languages, reflection principles and self-reference}, booktitle = {Handbook of logic in artificial intelligence and logic programming}, year = {1994}, month = {1994///}, pages = {323 - 358}, author = {Perlis, Don and V.S. Subrahmanian} } @article {16217, title = {Next Generation Network Management Technology}, year = {1994}, month = {1994///}, abstract = {Today{\textquoteright}s telecommunications networks are becoming increasingly large, complex, mission critical and heterogeneous in several dimensions. For example, the underlying physical transmission facilities of a given network may be ﲭixed media (copper, fiber- optic, radio, and satellite); the sub networks may be acquired from different vendors due to economic, performance, or general availability reasons; the information being transmitted over the network may be ﲭultimedia (video, data, voice, and images) and, finally, varying performance criteria may be imposed e.g. data transfer may require high throughput while the others, whose concern is voice communications, may require low call blocking probability. For these reasons, future telecommunications networks are expected to be highly complex in their services and operations. Due to this growing complexity and the disparity among management systems for individual sub networks, efficient network management systems have become critical to the current and future success of telecommunications companies. This paper addresses a research and development effort which focuses on prototyping configuration management, since that is the central process of network management and all other network management functions must be built upon it. Our prototype incorporates ergonomically designed graphical user interfaces tailored to the network configuration management subsystem and to the proposed advanced object-oriented database structure. The resulting design concept follows open standards such as Open Systems Interconnection (OSI) and incorporates object oriented programming methodology to associate data with functions, permit customization, and provide an open architecture environment. }, keywords = {Constraints for Network Management., Network Configuration Management, network management, Object Oriented Data Base Model for Network Management, Rules, Systems Integration, Visual Information Management for Network Configuration Management}, url = {http://drum.lib.umd.edu/handle/1903/5519}, author = {Atallah,George C and Ball,Michael O and Baras,John S and Goli,Shravan K and Karne,Ramesh K and Kelley,Stephen and Kumar,Harsha P. and Plaisant, Catherine and Roussopoulos, Nick and Shneiderman, Ben and Srinivasarao,Mulugu and Stathatos,Kostas and Teittinen,Marko and Whitefield,David} } @inbook {18049, title = {On a parallel-algorithms method for string matching problems (overview)}, booktitle = {Algorithms and ComplexityAlgorithms and Complexity}, series = {Lecture Notes in Computer Science}, volume = {778}, year = {1994}, month = {1994///}, pages = {22 - 32}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, keywords = {Computer, Science}, isbn = {978-3-540-57811-6}, url = {http://dx.doi.org/10.1007/3-540-57811-0_3}, author = {Sahinalp,Suleyman and Vishkin, Uzi}, editor = {Bonuccelli,M. and Crescenzi,P. and Petreschi,R.} } @article {16365, title = {Simplifying polynomial constraints over integers to make dependence analysis more precise}, journal = {Parallel Processing: CONPAR 94{\textemdash}VAPP VI}, year = {1994}, month = {1994///}, pages = {737 - 748}, author = {Maslov,V. and Pugh, William} } @article {18432, title = {Software Process Improvement in the NASA Software Engineering Laboratory.}, year = {1994}, month = {1994/12//}, institution = {CARNEGIE-MELLON UNIV PITTSBURGH PA SOFTWARE ENGINEERING INSTITUTE}, abstract = {The Software Engineering Laboratory (SEL) was established in 1976 for the purpose of studying and measuring software processes with the intent of identifying improvements that could be applied to the production of ground support software within the Flight Dynamics Division (FDD) at the National Aeronautics and Space Administration (NASA)/Goddard Space Flight Center (GSFC). The SEL has three member organizations: NASA/GSFC, the University of Maryland, and Computer Sciences Corporation (CSC). The concept of process improvement within the SEL focuses on the continual understanding of both process and product as well as goal-driven experimentation and analysis of process change within a production environment.}, keywords = {*AWARDS, *SOFTWARE ENGINEERING, *SYSTEMS ANALYSIS, *WORK MEASUREMENT, ADMINISTRATION AND MANAGEMENT, COMPUTER PROGRAMMING AND SOFTWARE, COMPUTER PROGRAMS, COMPUTERS, data acquisition, ENVIRONMENTS, EXPERIMENTAL DATA, GROUND SUPPORT., measurement, Organizations, PE63756E, Production, SOFTWARE PROCESS IMPROVEMENT, SPI(SOFTWARE PROCESS IMPROVEMENT), SPN-19950120014}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA289912}, author = {McGarry,Frank and Pajerski,Rose and Page,Gerald and Waligora,Sharon and Basili, Victor R.} } @inbook {18691, title = {Software reliability analysis of three successive generations of a Switching System}, booktitle = {Dependable Computing {\textemdash} EDCC-1}, series = {Lecture Notes in Computer Science}, volume = {852}, year = {1994}, month = {1994///}, pages = {471 - 490}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Most current approaches to software reliability evaluation are based on data collected on a single generation of products. However, many applications are developed through improvements of the existing software: to the families of products are added various generations as the need for new functionalities arises. Experimental studies dealing with the analysis of data collected on families of products are seldom reported. In this paper, we analyze the data (failure and correction reports) collected on the software of three successive generations of the Brazilian Switching System {\textemdash} TROPICO-R, during validation and operation. A comparative analysis of the three products is done and the main results are outlined. Emphasis is placed on the evolution of the software and the corresponding failures and corrected faults. The analysis addresses: i) the modifications introduced on system components, ii) the distribution of failures and corrected faults in the components and the functions fulfilled by the system, and iii) the evolution of the failure intensity functions.}, keywords = {Computer science}, isbn = {978-3-540-58426-1}, url = {http://www.springerlink.com/content/f71g30v177521471/abstract/}, author = {Ka{\^a}niche,M. and Kanoun,K. and Michel Cukier and Martini,M.}, editor = {Echtle,Klaus and Hammer,Dieter and Powell,David} } @book {18433, title = {Software Specification: A Comparison of Formal Methods}, year = {1994}, month = {1994///}, publisher = {Intellect Books}, organization = {Intellect Books}, keywords = {Computer software, Computer software/ Specifications, Computers / General, Computers / Programming / General, Formal methods (Computer science), software engineering, specifications}, isbn = {9781567500332}, author = {Gannon,John D. and Purtilo,James and Zelkowitz, Marvin V} } @article {16358, title = {Static analysis of upper and lower bounds on dependences and parallelism}, journal = {ACM Transactions on Programming Languages and SystemsACM Trans. Program. Lang. Syst.}, volume = {16}, year = {1994}, month = {1994/07//}, pages = {1248 - 1278}, isbn = {01640925}, doi = {10.1145/183432.183525}, url = {http://dl.acm.org/citation.cfm?id=183525}, author = {Pugh, William and Wonnacott,David} } @conference {16316, title = {Tool support for tailored software prototyping}, booktitle = {Assessment of Quality Software Development Tools, 1994, Proceedings., Third Symposium on}, year = {1994}, month = {1994///}, pages = {171 - 181}, author = {Chen,C. and Porter, Adam and Purtilo,J.} } @article {16216, title = {Usability Experiments for the Redesign of a Telepathology Workstation}, volume = {CS-TR-3270}, year = {1994}, month = {1994///}, institution = {Technical Report CS-TR-3270, University of Maryland}, abstract = {Dynamic telepathology uses a remotely controlled microscope to allow a pathologist to viewsamples at a remote location. However, time delays introduced by remote operation have made use of a commercial dynamic telepathology system difficult and frustrating. This paper describes experiments to evaluate and redesign the user interface. We also make recommendations for further automation to support the pathology process and increase the usefulness of the system. }, author = {Carr,D. and Plaisant, Catherine and Hasegawa,H.} } @conference {15967, title = {What Experts Deny, Novices Must Understand}, booktitle = {In 3rd International Workshop on Human and Machine Cognition}, year = {1994}, month = {1994///}, author = {Perlis, Don and Miller,M. and Perlis, Don} } @article {12780, title = {The concurrency workbench: a semantics-based tool for the verification of concurrent systems}, journal = {ACM Transactions on Programming Languages and Systems}, volume = {15}, year = {1993}, month = {1993/01/01/}, pages = {36 - 72}, isbn = {01640925}, doi = {10.1145/151646.151648}, url = {http://dl.acm.org/citation.cfm?id=151648}, author = {Cleaveland, Rance and Parrow,Joachim and Steffen,Bernhard} } @article {16608, title = {A connectionist approach to diagnostic problem solving using causal networks}, journal = {Information sciences}, volume = {70}, year = {1993}, month = {1993///}, pages = {27 - 48}, author = {Reggia, James A. and Peng,Y. and Tuhrim,S.} } @conference {14576, title = {A distributed algorithm for ear decomposition}, booktitle = {, Fifth International Conference on Computing and Information, 1993. Proceedings ICCI {\textquoteright}93}, year = {1993}, month = {1993/05/27/29}, pages = {180 - 184}, publisher = {IEEE}, organization = {IEEE}, abstract = {A distributed algorithm for finding an ear decomposition of an asynchronous communication network with n nodes and m links is presented. At the completion of the algorithm either the ears are correctly labeled or the nodes are informed that there exists no ear decomposition. First we present a novel algorithm to check the existence of an ear decomposition which uses O(m) messages. We also present two other algorithms, one which is time-optimal and the other which is message-optimal to determine the actual ears and their corresponding numbers after determining the existence of an ear decomposition}, keywords = {Asynchronous communication, asynchronous communication network, Automata, Communication networks, computational complexity, Computer networks, Computer science, decomposition graph, distributed algorithm, distributed algorithms, Distributed computing, Ear, ear decomposition, graph theory, message-optimal, network decomposition, sorting, Testing, time-optimal}, isbn = {0-8186-4212-2}, doi = {10.1109/ICCI.1993.315382}, author = {Hannenhalli, Sridhar and Perumalla,K. and Chandrasekharan,N. and Sridhar,R.} } @inbook {16218, title = {Evaluating three museum installations of a hypertext system}, booktitle = {Sparks of innovation in human-computer interaction}, volume = {40}, year = {1993}, month = {1993///}, pages = {404 - 404}, publisher = {Intellect Books}, organization = {Intellect Books}, isbn = {9781567500783}, author = {Shneiderman, Ben and Brethauer,D. and Plaisant, Catherine and Potter,R.} } @conference {16220, title = {Exploring remote images: a telepathology workstation}, booktitle = {Proceedings of the INTERACT {\textquoteright}93 and CHI {\textquoteright}93 conference on Human factors in computing systems}, series = {CHI {\textquoteright}93}, year = {1993}, month = {1993///}, pages = {518{\textendash} - 518{\textendash}}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-575-5}, doi = {10.1145/169059.169488}, url = {http://doi.acm.org/10.1145/169059.169488}, author = {Plaisant, Catherine and Carr,David A and Hasegawa,Hiroaki} } @conference {16219, title = {Facilitating Data Exploration: Dynamic Queries On A Health Statistics Map}, booktitle = {Proc. of the Government Statistics Section, Annual Meeting of the American Statistical Assoc. Conf. Proc, pg}, year = {1993}, month = {1993///}, pages = {18 - 23}, abstract = {: Users with no specialized computer training are often discouraged by the complex syntax of query languages and the output of long tables of alphanumerical values. The Human-Computer Interaction Laboratory has recently developed the concept of dynamic queries which allows user control of animated visual displays of information. Experiments with our first applications have shown that dynamic queries can help reveal trends or global properties as well as assist users in answering specific questions. We present a new application developed with the National Center for Health Statistics and running on a simple PC. A thematic map of the United States is animated by adjusting sliders displayed on the side of the map. A time slider illustrates time trends. The other sliders control the filtering out of areas of the map according to parameters such as demographics. Detailed data about a particular area is obtained by clicking directly on its location on the map. We have received encouraging fe...}, author = {Plaisant, Catherine and Laboratory,Human-computer Interaction} } @conference {15659, title = {Fast search algorithms with applications to split and multi-stage vector quantization of speech lsp parameters}, booktitle = {Speech Coding for Telecommunications, 1993. Proceedings., IEEE Workshop on}, year = {1993}, month = {1993///}, pages = {65 - 66}, doi = {10.1109/SCFT.1993.762341}, author = {Arya,S. and Phamdo,N. and Farvardin,N. and Mount, Dave} } @inbook {16222, title = {Guide to Opportunities in Volunteer Archaeology: case study on the use of a hypertext system in a museum exhibit}, booktitle = {Sparks of innovation in human-computer interactionSparks of innovation in human-computer interaction}, year = {1993}, month = {1993///}, pages = {223 - 223}, publisher = {Intellect Books}, organization = {Intellect Books}, isbn = {9781567500783}, author = {Plaisant, Catherine} } @article {17225, title = {Improving the accuracy of touchscreens: an experimental evaluation of three strategies}, journal = {Sparks of innovation in human-computer interaction}, year = {1993}, month = {1993///}, pages = {161 - 161}, author = {Potter,R.L. and Weldon,L.J. and Shneiderman, Ben} } @inbook {15764, title = {Iterative Methods for Finding the Stationary Vector for Markov Chains}, booktitle = {Linear Algebra, Markov Chains, and Queuing ModelsLinear Algebra, Markov Chains, and Queuing Models}, volume = {48}, year = {1993}, month = {1993///}, pages = {125 - 136}, publisher = {Springer-Verlag IMA Volumes in Math. and Its Applics.}, organization = {Springer-Verlag IMA Volumes in Math. and Its Applics.}, address = {New York}, author = {O{\textquoteright}Leary, Dianne P.}, editor = {Meyer,Carl and Plemmons,Robert} } @article {16556, title = {Local conditions for phase transitions in neural networks with variable connection strengths}, journal = {Neural networks}, volume = {6}, year = {1993}, month = {1993///}, pages = {667 - 676}, author = {McFadden,F. E and Peng,Y. and Reggia, James A.} } @conference {15964, title = {Logic and Artificial Intelligence: A New Synthesis?}, booktitle = {ANNALES-SOCIETATIS MATHEMATICAE POLONAE SERIES 4}, volume = {18}, year = {1993}, month = {1993///}, pages = {297 - 297}, author = {Perlis, Don} } @article {16366, title = {A partial evaluator for the Maruti hard real-time system}, journal = {Real-Time Systems}, volume = {5}, year = {1993}, month = {1993///}, pages = {13 - 30}, author = {Nirkhe,V. and Pugh, William} } @article {15554, title = {Point probe decision trees for geometric concept classes}, journal = {Algorithms and Data Structures}, year = {1993}, month = {1993///}, pages = {95 - 106}, abstract = {A fundamental problem in model-based computer vision is that of identifying to which of a given set of concept classes of geometric models an observed model belongs. Considering a ldquoproberdquo to be an oracle that tells whether or not the observed model is present at a given point in an image, we study the problem of computing efficient strategies (ldquodecision treesrdquo) for probing an image, with the goal to minimize the number of probes necessary (in the worst case) to determine in which class the observed model belongs. We prove a hardness result and give strategies that obtain decision trees whose height is within a log factor of optimal.These results grew out of discussions that began in a series of workshops on Geometric Probing in Computer Vision, sponsored by the Center for Night Vision and Electro-Optics, Fort Belvoir, Virginia, and monitored by the U.S. Army Research Office. The views, opinions, and/or findings contained in this report are those of the authors and should not be construed as an official Department of the Army position, policy, or decision, unless so designated by other documentation. }, doi = {10.1007/3-540-57155-8_239}, author = {Arkin,E. and Goodrich,M. and Mitchell,J. and Mount, Dave and Piatko,C. and Skiena,S.} } @conference {16007, title = {Presentations and this and that: logic in action}, booktitle = {Proceedings of the 15th Annual Conference of the Cognitive Science Society, Boulder, Colorado}, volume = {251}, year = {1993}, month = {1993///}, author = {Miller,M. and Perlis, Don} } @inbook {16223, title = {Remote direct manipulation: A case study of a telemedicine workstation}, booktitle = {Sparks of innovation in human-computer interactionSparks of innovation in human-computer interaction}, year = {1993}, month = {1993///}, pages = {51 - 51}, publisher = {Intellect Books}, organization = {Intellect Books}, isbn = {9781567500783}, author = {Keil-Slawik,R. and Plaisant, Catherine and Shneiderman, Ben} } @inbook {16221, title = {Scheduling home control devices: a case study of the transition from the research project to a product}, booktitle = {Sparks of innovation in human-computer interactionSparks of innovation in human-computer interaction}, year = {1993}, month = {1993///}, pages = {205 - 205}, publisher = {Intellect Books}, organization = {Intellect Books}, isbn = {9781567500783}, author = {Plaisant, Catherine and Shneiderman, Ben and Battagtia,J.} } @article {16513, title = {Simple systems that exhibit template-directed replication}, journal = {Science}, volume = {259}, year = {1993}, month = {1993///}, pages = {1282 - 1287}, author = {Reggia, James A. and Armentrout,S. L and Chou,H. H. and Peng,Y.} } @conference {16020, title = {Situated reasoning within tight deadlines and realistic space and computation bounds}, booktitle = {Proc. Common Sense}, volume = {93}, year = {1993}, month = {1993///}, author = {Nirkhe,M. and Kraus,S. and Perlis, Don} } @article {17377, title = {Speech versus mouse commands for word processing: an empirical evaluation}, journal = {International Journal of Man-Machine Studies}, volume = {39}, year = {1993}, month = {1993/10//}, pages = {667 - 687}, abstract = {Despite advances in speech technology, human factors research since the late 1970s has provided only weak evidence that automatic speech recognition devices are superior to conventional input devices such as keyboards and mice. However, recent studies indicate that there may be advantages to providing an additional input channel based on speech input to supplement the more common input modes. Recently the authors conducted an experiment to demonstrate the advantages of using speech-activated commands over mouse-activated commands for word processing applications when, in both cases, the keyboard is used for text entry and the mouse for direct manipulation. Sixteen experimental subjects, all professionals and all but one novice users of speech input, performed four simple word processing tasks using both input groups in this counterbalanced experiment. Performance times for all tasks were significantly faster when using speech to activate commands as opposed to using the mouse. On average, the reduction in task time due to using speech was 18{\textperiodcentered}7\%. The error rates due to subject mistakes were roughly the same for both input groups, and recognition errors, averaged over all the tasks, occurred for 6{\textperiodcentered}3\% of the speech-activated commands. Subjects made significantly more memorization errors when using speech as compared with the mouse for command activation. Overall, the subjects reacted positively to using speech input and preferred it over the mouse for command activation; however, they also voiced concerns about recognition accuracy, the interference of background noise, inadequate feedback and slow response time. The authors believe that the results of the experiment provide evidence for the utility of speech input for command activation in application programs.}, isbn = {0020-7373}, doi = {10.1006/imms.1993.1078}, url = {http://www.sciencedirect.com/science/article/pii/S0020737383710783}, author = {Karl,Lewis R. and Pettey,Michael and Shneiderman, Ben} } @article {16300, title = {Using measurement-driven modeling to provide empirical feedback to software developers}, journal = {Journal of Systems and Software}, volume = {20}, year = {1993}, month = {1993/03//}, pages = {237 - 243}, abstract = {Several authors have explored the application of classification methods to software development. These studies have concentrated on identifying modules that are difficult to develop or that have high fault density. While this information is important, it provides little help in determining appropriate corrective action. This article extends previous work by applying one classification method, classification tree analysis (CTA), to more a fine-grained problem routinely encountered by developers. In this article, we use CTA to identify software modules that have specific types of faults (e.g., logic, interface, etc.) We evaluate this approach using data collected from six actual software projects. Overall, CTA was able to correctly differentiate faulty modules from fault-free modules in 72\% of cases. Furthermore, 82\% of the faulty modules were correctly identified. We also show that CTA outperformed two simpler classification strategies.}, isbn = {0164-1212}, doi = {16/0164-1212(93)90067-8}, url = {http://www.sciencedirect.com/science/article/pii/0164121293900678}, author = {Porter, Adam} } @conference {15977, title = {Vacuum-logic}, booktitle = {Proceedings of AAAI 1993 Fall Symposium Series: Instantiating Real-World Agents}, year = {1993}, month = {1993///}, pages = {51 - 54}, author = {Elgot-drapkin,J. and Kraus,S. and Miller,M. and Nirkhe,M. and Perlis, Don} } @article {16596, title = {A connectionist approach to vertex covering problems}, journal = {Int{\textquoteright}l J. Neural Systems}, volume = {3}, year = {1992}, month = {1992///}, pages = {43 - 56}, author = {Peng,Y. and Reggia, James A. and Li,T.} } @article {16380, title = {Definitions of dependence distance}, journal = {ACM Letters on Programming Languages and SystemsACM Lett. Program. Lang. Syst.}, volume = {1}, year = {1992}, month = {1992/09//}, pages = {261 - 265}, isbn = {10574514}, doi = {10.1145/151640.151645}, url = {http://dl.acm.org/citation.cfm?id=151645}, author = {Pugh, William} } @article {15623, title = {Delaunay triangulation and computational fluid dynamics meshes}, journal = {Proceedings of the 4th Canadian Conference on Computational Geometry}, year = {1992}, month = {1992///}, pages = {316 - 321}, abstract = {In aerospace computational fluid dynamics (CFD) calculations, the Delaunay triangulation of suitable quadrilateral meshes can lead to unsuitable triangulated meshes. Here, we present case studies which illustrate the limitations of using structured grid generation methods which produce points in a curvilinear coordinate system for subsequent triangulations for CFD applications. We discuss conditions under which meshes of quadrilateral elements may not produce a Delaunay triangulation suitable for CFD calculations, particularly with regard to high aspect ratio, skewed quadrilateral elements.}, author = {Posenau,Mary-Anne K and Mount, Dave} } @article {16231, title = {The effects of time delays on a telepathology user interface.}, journal = {Proceedings of the Annual Symposium on Computer Application in Medical CareProc Annu Symp Comput Appl Med Care}, year = {1992}, month = {1992///}, pages = {256 - 260}, abstract = {Telepathology enables a pathologist to examine physically distant tissue samples by microscope operation over a communication link. Communication links can impose time delays which cause difficulties in controlling the remote device. Such difficulties were found in a microscope teleoperation system. Since the user interface is critical to pathologist{\textquoteright}s acceptance of telepathology, we redesigned the user interface for this system, built two different versions (a keypad whose movement commands operated by specifying a start command followed by a stop command and a trackball interface whose movement commands were incremental and directly proportional to the rotation of the trackball). We then conducted a pilot study to determine the effect of time delays on the new user interfaces. In our experiment, the keypad was the faster interface when the time delay is short. There was no evidence to favor either the keypad or trackball when the time delay was longer. Inexperienced participants benefitted by allowing them to move long distances over the microscope slide by dragging the field-of-view indicator on the touchscreen control panel. The experiment suggests that changes could be made to the trackball interface which would improve its performance.}, isbn = {0195-4210}, author = {Carr,D. and Hasegawa,H. and Lemmon,D. and Plaisant, Catherine} } @conference {16360, title = {Eliminating false data dependences using the Omega test}, booktitle = {Proceedings of the ACM SIGPLAN 1992 conference on Programming language design and implementation}, year = {1992}, month = {1992///}, pages = {140 - 151}, author = {Pugh, William and Wonnacott,D.} } @article {17956, title = {An environment projection approach to radiosity for meshconnected computers}, journal = {Third Eurographics Workshop on Rendering}, year = {1992}, month = {1992///}, pages = {271 - 281}, author = {Varshney, Amitabh and Prins,J. F} } @conference {17597, title = {Fast randomized algorithms for distributed edge coloring}, booktitle = {Proceedings of the eleventh annual ACM symposium on Principles of distributed computing}, series = {PODC {\textquoteright}92}, year = {1992}, month = {1992///}, pages = {251 - 262}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-495-3}, doi = {10.1145/135419.135465}, url = {http://doi.acm.org/10.1145/135419.135465}, author = {Panconesi,Alessandro and Srinivasan, Aravind} } @conference {18373, title = {An improved classification tree analysis of high cost modules based upon an axiomatic definition of complexity}, booktitle = {Software Reliability Engineering, 1992. Proceedings., Third International Symposium on}, year = {1992}, month = {1992/10//}, pages = {164 - 172}, abstract = {Identification of high cost modules has been viewed as one mechanism to improve overall system reliability, since such modules tend to produce more than their fair share of problems. A decision tree model has previously been used to identify such modules. In this paper, a previously developed axiomatic model of program complexity is merged with the previously developed decision tree process for an improvement in the ability to identify such modules. This improvement has been tested using data from the NASA Software Engineering Laboratory}, keywords = {(mathematics);, analysis;, axiomatic, classification, classification;, complexity;, computational, cost, decision, definition;, high, metrics;, model;, modules;, overall, program, reliability;, software, subroutines;, system, tree, TREES}, doi = {10.1109/ISSRE.1992.285848}, author = {Tian,Jianhui and Porter, Adam and Zelkowitz, Marvin V} } @conference {17611, title = {Improved distributed algorithms for coloring and network decomposition problems}, booktitle = {Proceedings of the twenty-fourth annual ACM symposium on Theory of computing}, series = {STOC {\textquoteright}92}, year = {1992}, month = {1992///}, pages = {581 - 592}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-511-9}, doi = {10.1145/129712.129769}, url = {http://doi.acm.org/10.1145/129712.129769}, author = {Panconesi,Alessandro and Srinivasan, Aravind} } @article {16225, title = {A new era for high precision touchscreens}, journal = {Advances in human-computer interaction}, volume = {3}, year = {1992}, month = {1992///}, pages = {1 - 33}, abstract = {While many input devices allow interfaces to be customized, increased directness distinguishes touchscreens. Touchscreens are easy to learn to use, fast, and result in low error rates when interfaces are designed carefully. Many actions which are diffi cult with a mouse, joystick, or keyboard are simple when using a touchscreen. Making rapid selections at widely separated locations on the screen, signing your name, dragging the hands of a clock in a circular motion are all simple when using a touchscre en, but may be awkward using other devices. This paper presents recent empirical research which can provide a basis for theories of touchscreen usage. We believe recent improvements warrant increased use of touchscreens.}, author = {Sears,A. and Plaisant, Catherine and Shneiderman, Ben} } @article {16226, title = {A new era for touchscreen applications: High precision, dragging icons, and refined feedback}, journal = {Advances in Human-Computer Interaction}, volume = {3}, year = {1992}, month = {1992///}, author = {Sears,A. and Plaisant, Catherine and Shneiderman, Ben} } @article {16364, title = {The Omega test: a fast and practical integer programming algorithm for dependence analysis}, journal = {Communications of the ACM}, volume = {8}, year = {1992}, month = {1992///}, pages = {2 - 6}, author = {Pugh, William} } @article {17830, title = {PARTI primitives for unstructured and block structured problems}, journal = {Computing Systems in Engineering}, volume = {3}, year = {1992}, month = {1992///}, pages = {73 - 86}, abstract = {This paper describes a set of primitives (PARTI) developed to efficiently execute unstructured and block structured problems on distributed memory parallel machines. We present experimental data from a three-dimensional unstructured Euler solver run on the Intel Touchstone Delta to demonstrate the usefulness of our methods.}, isbn = {0956-0521}, doi = {10.1016/0956-0521(92)90096-2}, url = {http://www.sciencedirect.com/science/article/pii/0956052192900962}, author = {Sussman, Alan and Saltz, J. and Das,R. and Gupta,S. and Mavriplis,D. and Ponnusamy,R. and Crowley,K.} } @conference {16383, title = {Partial evaluation of high-level imperative programming languages with applications in hard real-time systems}, booktitle = {Proceedings of the 19th ACM SIGPLAN-SIGACT symposium on Principles of programming languages}, year = {1992}, month = {1992///}, pages = {269 - 280}, author = {Nirkhe,V. and Pugh, William} } @conference {16228, title = {Remote manipulation interfaces the case of a telepathology workstation}, booktitle = {Posters and short talks of the 1992 SIGCHI conference on Human factors in computing systems}, series = {CHI {\textquoteright}92}, year = {1992}, month = {1992///}, pages = {65 - 65}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Telemedicine is the practice of medicine over communication links. The physician being consulted and the patient are in two different locations. A first telepathology system has been developed by Corabi Telemetrics. It allows a pathologist to render a diagnosis by examining tissue samples or body fluids under a remotely located microscope.}, doi = {10.1145/1125021.1125082}, url = {http://doi.acm.org/10.1145/1125021.1125082}, author = {Plaisant, Catherine and Carr,David A} } @article {16224, title = {Scheduling home control devices: design issues and usability evaluation of four touchscreen interfaces}, journal = {International Journal of Man-Machine Studies}, volume = {36}, year = {1992}, month = {1992/03//}, pages = {375 - 393}, abstract = {This article describes four different user interfaces supporting scheduling two-state (ON/OFF) devices over time periods ranging from minutes to days. The touchscreen-based user interfaces including a digital 12-h clock, 24-h linear and 24-h dial prototypes are described and compared on a feature by feature basis. A formative usability test with 14 subjects, feedback from more than 30 reviewers, and the flexibility to add functions favour the 24-h linear version.}, isbn = {0020-7373}, doi = {10.1016/0020-7373(92)90040-R}, url = {http://www.sciencedirect.com/science/article/pii/002073739290040R}, author = {Plaisant, Catherine and Shneiderman, Ben} } @article {16650, title = {Simple systems exhibiting self-directed replication: annex of transition functions and software documentation}, year = {1992}, month = {1992///}, institution = {University of Maryland at College Park}, address = {College Park, MD, USA}, author = {Reggia, James A. and Chou,Hui-Hsien and Armentrout,Steven L. and Peng,Yun} } @article {16230, title = {Touchscreen interfaces for alphanumeric data entry}, journal = {Human Factors and Ergonomics Society Annual Meeting Proceedings}, volume = {36}, year = {1992}, month = {1992///}, pages = {293 - 297}, author = {Plaisant, Catherine and Sears,A.} } @conference {16227, title = {Touchscreen toggle design}, booktitle = {Proceedings of the SIGCHI conference on Human factors in computing systems}, series = {CHI {\textquoteright}92}, year = {1992}, month = {1992///}, pages = {667 - 668}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-513-5}, doi = {10.1145/142750.143079}, url = {http://doi.acm.org/10.1145/142750.143079}, author = {Plaisant, Catherine and Wallace,Daniel} } @inbook {13719, title = {A two-level knowledge representation for machine translation: Lexical semantics and tense/aspect}, booktitle = {Lexical Semantics and Knowledge RepresentationLexical Semantics and Knowledge Representation}, series = {Lecture Notes in Computer Science}, volume = {627}, year = {1992}, month = {1992///}, pages = {269 - 287}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {This paper proposes a two-level model that integrates contemporary theories of tense and aspect with lexical semantics. The model is intended to be extensible to realms outside of the temporal domain (e.g., the spatial domain). The integration of tense and aspect with lexical-semantics is especially critical in machine translation because of the lexical selection process during generation: there is often a number of lexical connective and tense/aspect possibilities that may be produced from a lexical semantic representation, which, as defined in the model presented here, is largely underspecified. Temporal/aspectual information from the source-language sentence constrains the choice of target-language terms. In turn, the target-language terms limit the possibilities for generation of tense and aspect. Thus, there is a two-way communication channel between the two processes.}, isbn = {978-3-540-55801-9}, url = {http://dx.doi.org/10.1007/3-540-55801-2_41}, author = {Dorr, Bonnie J}, editor = {Pustejovsky,James and Bergler,Sabine} } @article {16229, title = {When an Intermediate View Matters a 2D-browser Experiment}, journal = {Institute for Systems Research Technical Reports}, year = {1992}, month = {1992///}, abstract = {The browsing of two dimensional images can be found in a large number of application. When the image to be viewed is much larger than the screen available, a two dimensional browser has to be provided to allow users to access all parts of the image. We show the diversity of tasks and systems available and the need for 2D browser design guidelines. In the context of a microscope image browser, we investigate one common technique consisting of a global view of the whole image, coupled to a detailed, magnified view of part of the image. In particular we look at the benefits of providing an intermediate view when the detail- to-overview ratio over 20:1. Our experience is also a good example of a real world application for which added features and added hardware need to be justified.}, keywords = {Human computer interaction, Supervisory control, Systems Integration}, url = {http://drum.lib.umd.edu/handle/1903/5300}, author = {Plaisant, Catherine and Carr,David A and Hasegawa,Hiroaki} } @conference {15979, title = {Deadline-coupled real-time planning}, booktitle = {Innovative approaches to planning, scheduling and control: proceedings of a Workshop [on Innovative Approaches to Planning, Scheduling, and Control], held at San Diego, California, November 5-8, 1991}, year = {1991}, month = {1991///}, pages = {100 - 100}, author = {Kraus,S. and Nirkhe,M. and Perlis, Don} } @article {17589, title = {Efficient algorithms for the minimum weighted dominating clique problem on permutation graphs}, journal = {Theoretical Computer Science}, volume = {91}, year = {1991}, month = {1991/12/09/}, pages = {1 - 21}, abstract = {Given a graph G=(V, E) with real weights assigned to its vertices, a clique of G that also dominates its vertex set V, is called a dominating clique (DC) of G. Given a permutation graph G with all its vertices having nonnegative weights, and its permutation representation, the problem addressed in this paper is that of finding any minimum weight DC of G. We improve the existing O(|V|2) algorithm for this problem to O(|V|log|V|). The space complexity of our algorithm is O(|V|). We also present a |V| processor, O(log|V|) time, O(|V|log|V|) space parallel EREW PRAM algorithm for this problem.}, isbn = {0304-3975}, doi = {10.1016/0304-3975(91)90265-4}, url = {http://www.sciencedirect.com/science/article/pii/0304397591902654}, author = {Srinivasan, Aravind and Pandu Rangan,C.} } @article {15950, title = {Fully deadline-coupled planning: One step at a time}, journal = {Methodologies for Intelligent Systems}, year = {1991}, month = {1991///}, pages = {589 - 599}, author = {Nirkhe,M. and Kraus,S. and Perlis, Don} } @article {15971, title = {Memory, reason and time: the Step-Logic approach}, journal = {Philosophy and AI: Essays at the Interface}, year = {1991}, month = {1991///}, pages = {79 - 103}, author = {Elgot-drapkin,J. and Miller,M. and Perlis, Don} } @conference {16332, title = {Metric-driven analysis and feedback systems for enabling empirically guided software development}, booktitle = {Proceedings of the 13th international conference on Software engineering}, year = {1991}, month = {1991///}, pages = {288 - 298}, author = {Selby,R. W and Porter, Adam and Schmidt,D. C and Berney,J.} } @article {15925, title = {Nonmonotonicity and the scope of reasoning}, journal = {Artificial Intelligence}, volume = {52}, year = {1991}, month = {1991///}, pages = {221 - 261}, author = {Etherington,D. W and Kraus,S. and Perlis, Don} } @article {16233, title = {An overview of Hyperties, its user interface and data model}, journal = {Hypermedia/Hypertext And Object-Oriented Databases. Chapman \& Hall}, year = {1991}, month = {1991///}, pages = {17 - 31}, author = {Plaisant, Catherine} } @article {18986, title = {Polyadenylylation in copia requires unusually distant upstream sequences}, journal = {Proceedings of the National Academy of SciencesPNAS}, volume = {88}, year = {1991}, month = {1991/04/15/}, pages = {3038 - 3042}, abstract = {Retroviruses and related genetic elements generate terminally redundant RNA products by differential polyadenylylation within a long terminal repeat. Expression of the white-apricot (wa) allele of Drosophila melanogaster, which carries an insertion of the 5.1-kilobase retrovirus-like transposable element copia in a small intron, is influenced by signals within copia. By using this indicator, we have isolated a 518-base-pair deletion, 312 base pairs upstream of the copia polyadenylylation site, that is phenotypically like much larger deletions and eliminates RNA species polyadenylylated in copia. This requirement of distant upstream sequences for copia polyadenylylation has implications for the expression of many genetic elements bearing long terminal repeats.}, isbn = {0027-8424, 1091-6490}, url = {http://www.pnas.org/content/88/8/3038}, author = {Kurkulos,M. and Weinberg,J. M. and Pepling,M. E. and Mount, Stephen M.} } @article {15966, title = {Putting One{\textquoteright}s Foot in One{\textquoteright}s Head{\textendash}Part I: Why}, journal = {No{\^u}s}, year = {1991}, month = {1991///}, pages = {435 - 455}, author = {Perlis, Don} } @article {15944, title = {Reasoning about ignorance: A note on the Bush-Gorbachev problem}, journal = {Fundam. Inform.}, volume = {15}, year = {1991}, month = {1991///}, pages = {325 - 332}, author = {Kraus,S. and Perlis, Don and Horty,J. F} } @article {16514, title = {Recent applications of competitive activation mechanisms}, journal = {Neural Networks: Advances and Applications}, year = {1991}, month = {1991///}, pages = {33 - 62}, author = {Reggia, James A. and Peng,Y. and Bourret,P.} } @conference {16232, title = {Scheduling on-off home control devices}, booktitle = {Proceedings of the SIGCHI conference on Human factors in computing systems: Reaching through technology}, series = {CHI {\textquoteright}91}, year = {1991}, month = {1991///}, pages = {459 - 460}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-383-3}, doi = {10.1145/108844.109002}, url = {http://doi.acm.org/10.1145/108844.109002}, author = {Plaisant, Catherine and Shneiderman, Ben} } @article {15980, title = {Stop the world{\textemdash}I want to think}, journal = {International Journal of Intelligent Systems}, volume = {6}, year = {1991}, month = {1991///}, pages = {443 - 456}, author = {Perlis, Don and Elgot-Drapkin,J. J and Miller,M.} } @article {15936, title = {Typicality constants and range defaults: Some pros and cons of a cognitive model of default reasoning}, journal = {Methodologies for Intelligent Systems}, year = {1991}, month = {1991///}, pages = {560 - 569}, author = {Miller,M. and Perlis, Don} } @conference {16373, title = {Uniform techniques for loop optimization}, booktitle = {Proceedings of the 5th international conference on Supercomputing}, year = {1991}, month = {1991///}, pages = {341 - 352}, author = {Pugh, William} } @article {18969, title = {Characterization of enhancer-of-white-apricot in Drosophila melanogaster.}, journal = {GeneticsGenetics}, volume = {126}, year = {1990}, month = {1990/12/01/}, pages = {1061 - 1069}, abstract = {The white-apricot (w(a)) allele differs from the wild-type white gene by the presence of the retrovirus-like transposable element copia within the transcription unit. Most RNAs derived from w(a) have 3{\textquoteright} termini within this insertion, and only small amounts of structurally normal RNA are produced. The activity of w(a) is reduced in trans by a semidominant mutation in the gene Enhancer-of-white-apricot (E(w(a)). Flies that are w(a) and heterozygous for the enhancer have eyes which are much lighter than the orange-yellow of w(a) alone while E(w(a)) homozygotes have white eyes. This semidominant effect on pigmentation is correlated with a corresponding decrease in white RNA having wild type structure, and flies homozygous for E(w(a)) have increased levels of aberrant RNAs. Three revertant alleles of E(w(a)) generated by reversion of the dominant enhancer phenotype with gamma radiation are noncomplementing recessive lethals, with death occurring during the larval stage. The effects on w(a) eye pigmentation of varying doses of the original E(w(a)) allele, the wild type allele, and the revertant alleles suggest that the original E(w(a)) allele produces a product that interferes with the activity of the wild type gene and that the revertants are null alleles. We propose that the E(w(a)) gene product influences the activity of the downstream copia long terminal repeat in 3{\textquoteright} end formation.}, isbn = {0016-6731, 1943-2631}, url = {http://www.genetics.org/content/126/4/1061}, author = {Peng,X. B. and Mount, Stephen M.} } @conference {12772, title = {The concurrency workbench}, booktitle = {Automatic Verification Methods for Finite State Systems}, year = {1990}, month = {1990///}, pages = {24 - 37}, author = {Cleaveland, Rance and Parrow,J. and Steffen,B.} } @article {16330, title = {Empirically guided software development using metric-based classification trees}, journal = {IEEE Software}, volume = {7}, year = {1990}, month = {1990/03//}, pages = {46 - 54}, abstract = {The identification of high-risk components early in the life cycle is addressed. A solution that casts this as a classification problem is examined. The proposed approach derives models of problematic components, based on their measurable attributes and those of their development processes. The models provide a basis for forecasting which components are likely to share the same high-risk properties, such as being error-prone or having a high development cost. Developers can use these classification techniques to localize the troublesome 20\% of the system. The method for generating the models, called automatic generation of metric-based classification trees, uses metrics from previous releases or projects to identify components that are historically high-risk.}, keywords = {Application software, Area measurement, automatic generation, classification problem, Classification tree analysis, Costs, empirically guided software development, Error correction, life cycle, measurable attributes, metric-based classification trees, Predictive models, Programming, software engineering, Software measurement, software metrics, Software systems}, isbn = {0740-7459}, doi = {10.1109/52.50773}, author = {Porter, Adam and Selby,R. W} } @article {16342, title = {Evaluating techniques for generating metric-based classification trees}, journal = {Journal of Systems and Software}, volume = {12}, year = {1990}, month = {1990/07//}, pages = {209 - 218}, abstract = {Metric-based classification trees provide an approach for identifying user-specified classes of high-risk software components throughout the software lifecycle. Based on measurable attributes of software components and processors, this empirically guided approach derives models of problematic software components. These models, which are represented as classification trees, are used on future systems to identify components likely to share the same high-risk properties. Example high-risk component properties include being fault-prone, change-prone, or effort-prone, or containing certain types of faults. Identifying these components allows developers to focus the application of specialized techniques and tools for analyzing, testing, and constructing software. A validation study using metric data from 16 NASA systems showed that the trees had an average classification accuracy of 79.3\% for fault-prone and effort-prone components in that environment.One fundamental feature of the classification tree generation algorithm is the method used for partitioning the metric data values into mutually exclusive and exhaustive ranges. This study compares the accuracy and the complexity of trees resulting from five techniques for partitioning metric data values. The techniques are quartiles, octiles, and three methods based on least weight subsequence (LWS-[chi]) analysis, where [chi] is the upper bound on the number of partitions. The LWS-3 and LWS-5 partition techniques resulted in trees with higher accuracy (in terms of completeness and consistency) than did quartiles and octiles. LWS-3 and LWS-5 trees were not statistically different in terms of accuracy, but LWS-3 trees had lower complexity than all other methods in terms of the number of unique metrics required. The trees from the three LWS methods (LWS-3, LWS-5, and LWS-8) had lower complexity than did the trees from quartiles and octiles. In general, the results indicate that distribution-sensitive partition techniques that use only relatively few partitions, such as the least weight subsequence techniques LWS-3 and LWS-5, can increase accuracy and decrease complexity in classification trees. Classification analysis techniques, along with other empirically based analysis techniques for large-scale software, will be supported in the Amadeus measurement and empirical analysis system. }, isbn = {0164-1212}, doi = {16/0164-1212(90)90041-J}, url = {http://www.sciencedirect.com/science/article/pii/016412129090041J}, author = {Porter, Adam and Selby,Richard W.} } @book {16563, title = {Inductive inference model for diagnostic problem-solving}, year = {1990}, month = {1990///}, publisher = {Springer-Verlag}, organization = {Springer-Verlag}, author = {Peng,Y. and Reggia, James A.} } @article {16021, title = {Intentionality and defaults}, journal = {International J. of Expert Systems}, volume = {3}, year = {1990}, month = {1990///}, pages = {345 - 354}, author = {Perlis, Don} } @article {15951, title = {Limited scope and circumscriptive reasoning}, journal = {International Journal of Expert Systems}, volume = {3}, year = {1990}, month = {1990///}, pages = {207 - 217}, author = {Etherington,D. W and Kraus,S. and Perlis, Don} } @article {16374, title = {Probabilistic analysis of set operations with constant-time set equality test}, journal = {Advances in Computing and Information{\textemdash}ICCI{\textquoteright}90}, year = {1990}, month = {1990///}, pages = {62 - 71}, author = {Pugh, William} } @article {15991, title = {Reasoning situated in time I: Basic concepts}, journal = {Journal of Experimental and Theoretical Artificial Intellige}, volume = {2}, year = {1990}, month = {1990///}, pages = {75 - 98}, author = {Elgot-Drapkin,J. J and Perlis, Don} } @article {18992, title = {Sequence of a cDNA from the Drosophila melanogaster white gene.}, journal = {Nucleic Acids ResearchNucleic Acids Res}, volume = {18}, year = {1990}, month = {1990/03/25/}, pages = {1633 - 1633}, isbn = {0305-1048}, url = {http://www.ncbi.nlm.nih.gov/pmc/articles/PMC330539/}, author = {Pepling,M and Mount, Stephen M.} } @article {16362, title = {Skip lists: a probabilistic alternative to balanced trees}, journal = {Communications of the ACMCommun. ACM}, volume = {33}, year = {1990}, month = {1990/06//}, pages = {668 - 676}, isbn = {00010782}, doi = {10.1145/78973.78977}, url = {http://dl.acm.org/citation.cfm?id=78977,}, author = {Pugh, William} } @article {15996, title = {Thing and Thought}, journal = {Knowledge Representation and Defeasible Reasoning. Kluwer}, year = {1990}, month = {1990///}, author = {Perlis, Don} } @conference {16367, title = {Two-directional record layout for multiple inheritance}, booktitle = {Proceedings of the ACM SIGPLAN 1990 conference on Programming language design and implementation}, year = {1990}, month = {1990///}, pages = {85 - 91}, author = {Pugh, William and Weddell,G.} } @conference {15957, title = {Assessing others{\textquoteright} knowledge and ignorance}, booktitle = {Proceedings of the 4th International Symposium on Methodologies for Intelligent Systems}, year = {1989}, month = {1989///}, author = {Kraus,S. and Perlis, Don} } @article {16234, title = {Automatically transforming regularly structured linear documents into hypertext.}, journal = {Electronic Publishing}, volume = {2}, year = {1989}, month = {1989///}, pages = {211 - 229}, abstract = {Fully automatic conversion of a paper-based document into hypertext can be achieved in manycases if the original document is naturally partitioned into a collection of small-sized pieces that are unambiguously and consistently structured. We describe the methodology that we have used successfully to design and implement several straightforward conversions from the original document{\textquoteright}s machine-readable markup. }, author = {Furuta,R. and Plaisant, Catherine and Shneiderman, Ben} } @article {16550, title = {A comfort measure for diagnostic problem solving}, journal = {Information Sciences}, volume = {47}, year = {1989}, month = {1989/03//}, pages = {149 - 184}, abstract = {In order to apply Bayes{\textquoteright} theorem for diagnostic problem solving when multiple disorders can occur simultaneously, several previous proposals have suggested using the ratio of posterior probabilities to rank diagnostic hypotheses. Approaches using such relative likelihoods lose the measure of absolute strengths of hypotheses, and thus are incapable of evaluating the {\textquotedblleft}quality{\textquotedblright} of a problem solution. In this paper, we propose to impose a quantity called a {\textquotedblleft}comfort measure{\textquotedblright} on the solution: a solution of a diagnostic problem is a minimal-size set of hypotheses such that the sum of their posterior probabilities exceeds a given comfort measure. Based on a probabilistic causal model developed previously, a problem-solving strategy is presented which does not require the manifestation independence assumption required with direct Bayesian classification, and which is applicable to multimembership classification problems. This strategy selectively generates diagnostic hypotheses and calculates both their relative likelihood and the lower and upper bounds of their posterior probabilities. These bounds are successively refined as more hypotheses are generated. Using these bounds, not the real posterior probabilities, the problem-solving strategy identifies a solution satisfying the given comfort measure, usually after only a small portion of all possible hypotheses have been generated.}, isbn = {0020-0255}, doi = {10.1016/0020-0255(89)90011-X}, url = {http://www.sciencedirect.com/science/article/pii/002002558990011X}, author = {Peng,Yun and Reggia, James A.} } @article {16529, title = {A connectionist model for diagnostic problem solving}, journal = {Systems, Man and Cybernetics, IEEE Transactions on}, volume = {19}, year = {1989}, month = {1989///}, pages = {285 - 298}, author = {Peng,Y. and Reggia, James A.} } @article {16969, title = {An experimental evaluation of three touch screen strategies within a hypertext database}, journal = {International Journal of Human-Computer Interaction}, volume = {1}, year = {1989}, month = {1989///}, pages = {41 - 52}, abstract = {High resolution touch screens and novel usage strategies have overcome earlier problems with parallax and inaccurate pointing. A study testing the utility of three touch screen strategies within the Hyperties hypertext environment was performed. This provided a replication and extension of an earlier touch screen strategy comparison that focused on small closely?spaced targets. The experiment compared three touch screen strategies in three experimental tasks that reflect hypertext usage. The results showed that a strategy that only uses the initial impact with the touch screen causes the user to miss the target more than other touch strategies. A statistically significant difference in errors was found. Our results should encourage system implementers and touch screen hardware designers to support ?touch mouse? strategies that enable cursor dragging on the touch screen surface.High resolution touch screens and novel usage strategies have overcome earlier problems with parallax and inaccurate pointing. A study testing the utility of three touch screen strategies within the Hyperties hypertext environment was performed. This provided a replication and extension of an earlier touch screen strategy comparison that focused on small closely?spaced targets. The experiment compared three touch screen strategies in three experimental tasks that reflect hypertext usage. The results showed that a strategy that only uses the initial impact with the touch screen causes the user to miss the target more than other touch strategies. A statistically significant difference in errors was found. Our results should encourage system implementers and touch screen hardware designers to support ?touch mouse? strategies that enable cursor dragging on the touch screen surface. }, isbn = {1044-7318}, doi = {10.1080/10447318909525956}, url = {http://www.tandfonline.com/doi/abs/10.1080/10447318909525956}, author = {Potter,Richard and Berman,Mitchell and Shneiderman, Ben} } @article {16016, title = {Explicitly biased generalization}, journal = {Computational Intelligence}, volume = {5}, year = {1989}, month = {1989///}, pages = {67 - 81}, author = {Gordon,D. and Perlis, Don} } @conference {16382, title = {Incremental computation via function caching}, booktitle = {Proceedings of the 16th ACM SIGPLAN-SIGACT symposium on Principles of programming languages}, year = {1989}, month = {1989///}, pages = {315 - 328}, author = {Pugh, William and Teitelbaum,T.} } @conference {12793, title = {A semantics based verification tool for finite state systems}, booktitle = {Proceedings of the IFIP WG6}, volume = {1}, year = {1989}, month = {1989///}, pages = {287 - 302}, author = {Cleaveland, Rance and Parrow,J. and Steffen,B.} } @conference {16320, title = {Software metric classification trees help guide the maintenance of large-scale systems}, booktitle = {, Conference on Software Maintenance, 1989., Proceedings}, year = {1989}, month = {1989/10/16/19}, pages = {116 - 123}, publisher = {IEEE}, organization = {IEEE}, abstract = {The 80:20 rule states that approximately 20\% of a software system is responsible for 80\% of its errors. The authors propose an automated method for generating empirically-based models of error-prone software objects. These models are intended to help localize the troublesome 20\%. The method uses a recursive algorithm to automatically generate classification trees whose nodes are multivalued functions based on software metrics. The purpose of the classification trees is to identify components that are likely to be error prone or costly, so that developers can focus their resources accordingly. A feasibility study was conducted using 16 NASA projects. On average, the classification trees correctly identified 79.3\% of the software modules that had high development effort or faults}, keywords = {automated method, automatic programming, classification, Classification tree analysis, classification trees, Computer errors, empirically-based models, error-prone software objects, Fault diagnosis, feasibility study, high development effort, Large-scale systems, multivalued functions, NASA, NASA projects, recursive algorithm, Software algorithms, software engineering, Software maintenance, Software measurement, software metrics, software modules, Software systems, trees (mathematics)}, isbn = {0-8186-1965-1}, doi = {10.1109/ICSM.1989.65202}, author = {Selby,R. W and Porter, Adam} } @article {16027, title = {Some Brief Essays on Mind}, year = {1989}, month = {1989/07//}, institution = {ROCHESTER UNIV NY DEPT OF COMPUTER SCIENCE}, abstract = {The author tries to explain his view of artificial intelligence, and more broadly how it fits into science as a whole. In doing so, he will not hesitate to indulge in sheer speculation when it seems to fit the topic. He will begin with a negative thought (one that he does not agree with). Consider the statement that, while robots and AI (artificial intelligence) may make great strides in the future, still they never will be able to produce music with the sensitivity of certain humans with great musical talent. (kr)}, keywords = {*ARTIFICIAL INTELLIGENCE, CYBERNETICS, HUMANS, Music, Robots}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA213887}, author = {Perlis, Don} } @article {16235, title = {A spectrum of automatic hypertext constructions}, journal = {Hypermedia}, volume = {1}, year = {1989}, month = {1989///}, pages = {179 - 195}, abstract = {We describe our experiences with four separate conversions from paper documents into hypertext and discuss the lessons we have learned. The paper document{\textquoteright}s organization affects the ease with which it can be converted and the appropriateness of the resu lting hypertext. The form of the paper document{\textquoteright}s machine-readable {\textquoteleft}markup{\textquoteright} description affects the ability to transform the structure automatically. Designing the link structures that tie together the parts of the hypertext takes special care in automa ting, as badly-designed and incorrectly-formed links destroy the integrity of the hypertext. Overall, each of the conversions followed the same basic methodology, providing the handle for the development of {\textquoteleft}power tools{\textquoteright} that can be applied to simplify s ubsequent conversions.}, author = {Furuta,R. and Plaisant, Catherine and Shneiderman, Ben} } @article {15983, title = {Truth and meaning (research note)}, journal = {Artificial intelligence}, volume = {39}, year = {1989}, month = {1989///}, pages = {245 - 250}, author = {Perlis, Don} } @article {16236, title = {The American Voice and Robotics" Guardian" System: A Case Study in User Interface Usability Evaluation}, year = {1988}, month = {1988///}, institution = {University of Maryland}, abstract = {American Voice and Robotics (AVR), in conjunction with the Maryland Industrial Partnerships(MIPS) program, contracted the Human-Computer Interaction Laboratory (HCIL) to evaluate the user interface of AVR{\textquoteright}s "Guardian"home automation system. Among their goals for the system were:Ease of use and learning of the system (intuitiveness), Aesthetic appeal, Unintimidating, Impressive, High Functionality, Fast and accurate input of useraction, Clear feedback, Forgiving to errors, and Fun to use. Researchers found that users were initially impressed with the system, but that they were not fully satisfied with it. Specific initial recommendations, theoretical and methodological concerns, procedures, results, and direction of future research will be discussed. }, author = {Wallace,D.F. and Norman,K. L and Plaisant, Catherine} } @article {15978, title = {Autocircumscription}, journal = {Artificial Intelligence}, volume = {36}, year = {1988}, month = {1988/09//}, pages = {223 - 236}, abstract = {Reasoning can be used to select among various possible interpretations of events. But how are these possibilities determined? We isolate two key technical features of circumscription (consistency and minimization), and use the first as the basis for a reformulation of the circumscription principle in a way related to possibility, self-knowledge, and negative introspection. The second (minimization) then can be separately expressed on its own. Conceptual clarity and a kind of validity are results of this separation, as well as a computational means to determine (sometimes) when a wff is not among a reasoner{\textquoteright}s conclusions.}, isbn = {0004-3702}, doi = {16/0004-3702(88)90003-3}, url = {http://www.sciencedirect.com/science/article/pii/0004370288900033}, author = {Perlis, Don} } @conference {15988, title = {Commonsense set theory}, booktitle = {Meta-Level Architectures and Reflection}, year = {1988}, month = {1988///}, pages = {87 - 98}, author = {Perlis, Don} } @conference {16345, title = {An improved replacement strategy for function caching}, booktitle = {Proceedings of the 1988 ACM conference on LISP and functional programming - LFP {\textquoteright}88}, year = {1988}, month = {1988///}, pages = {269 - 276}, address = {Snowbird, Utah, United States}, doi = {10.1145/62678.62719}, url = {http://dl.acm.org/citation.cfm?id=62678.62719}, author = {Pugh, William} } @article {15969, title = {Languages with self-reference II : Knowledge, belief, and modality}, journal = {Artificial Intelligence}, volume = {34}, year = {1988}, month = {1988/03//}, pages = {179 - 212}, abstract = {Negative results of Montague and Thomason have diverted research in propositional attitudes away from syntactic ("first-order") approaches, encouraging modal formalisms instead, especially in representing epistemic notions. We show that modal logics are on no firmer ground than first-order ones when equally endowed with substitutive self-reference. Nonetheless, there may still be remedies, hinging in part upon a distinction between "dynamic" and "static" notions of provability and belief (an earlier version of this paper emphasized a somewhat different distinction).}, isbn = {0004-3702}, doi = {16/0004-3702(88)90038-0}, url = {http://www.sciencedirect.com/science/article/pii/0004370288900380}, author = {Perlis, Don} } @article {16323, title = {Learning from examples: generation and evaluation of decision trees for software resource analysis}, journal = {IEEE Transactions on Software Engineering}, volume = {14}, year = {1988}, month = {1988/12//}, pages = {1743 - 1757}, abstract = {A general solution method for the automatic generation of decision (or classification) trees is investigated. The approach is to provide insights through in-depth empirical characterization and evaluation of decision trees for one problem domain, specifically, that of software resource data analysis. The purpose of the decision trees is to identify classes of objects (software modules) that had high development effort, i.e. in the uppermost quartile relative to past data. Sixteen software systems ranging from 3000 to 112000 source lines have been selected for analysis from a NASA production environment. The collection and analysis of 74 attributes (or metrics), for over 4700 objects, capture a multitude of information about the objects: development effort, faults, changes, design style, and implementation style. A total of 9600 decision trees are automatically generated and evaluated. The analysis focuses on the characterization and evaluation of decision tree accuracy, complexity, and composition. The decision trees correctly identified 79.3\% of the software modules that had high development effort or faults, on the average across all 9600 trees. The decision trees generated from the best parameter combinations correctly identified 88.4\% of the modules on the average. Visualization of the results is emphasized, and sample decision trees are included}, keywords = {Analysis of variance, Artificial intelligence, Classification tree analysis, Data analysis, decision theory, Decision trees, Fault diagnosis, Information analysis, machine learning, metrics, NASA, production environment, software engineering, software modules, software resource analysis, Software systems, Termination of employment, trees (mathematics)}, isbn = {0098-5589}, doi = {10.1109/32.9061}, author = {Selby,R. W and Porter, Adam} } @article {12789, title = {Type theory and concurrency}, journal = {International Journal of Parallel Programming}, volume = {17}, year = {1988}, month = {1988///}, pages = {153 - 206}, author = {Cleaveland, Rance and Panangaden,P.} } @article {15970, title = {Uniform accountability for multiple modes of reasoning}, journal = {International Journal of Approximate Reasoning}, volume = {2}, year = {1988}, month = {1988/07//}, pages = {233 - 246}, abstract = {This article discusses various issues surrounding the general debate on knowledge representation methods and argues in favor of the idea that while many methods are necessary, there also must be a kind of unified nature to the enterprise if it is to serve the needs of intelligence. Specific points include the misleading distinction between probabilistic and logical reasoning regarding the notion of truth and also some matters of nonmonotonicity. An effort is made to sample the broad range of approaches in the literature, with an eye toward such a unification.}, keywords = {Artificial intelligence, commonsense reasoning, probabilistic logic, probability theory, uncertainty representation}, isbn = {0888-613X}, doi = {16/0888-613X(88)90119-3}, url = {http://www.sciencedirect.com/science/article/pii/0888613X88901193}, author = {Kanal,Laveen and Perlis, Don} } @article {16370, title = {ALEX-an Alexical Programming Language}, volume = {TR87-835}, year = {1987}, month = {1987///}, institution = {Cornell University}, abstract = {ALEX is an experimental language for high-level parallel programming. It is a testbed for exploring various non-traditional ways of expressing algorithmic ideas, making extensive use of high-resolution color graphics. The language itself is not a programming language in the traditional sense, since there is no lexical syntax. This paper discusses the basic design of the ALEX user interface.}, url = {http://hdl.handle.net/1813/6675}, author = {Kozen,D. and Teitelbaum,T. and Chen,W. Z and Field,J. H and Pugh, William and Vander Zanden,B. T} } @article {15929, title = {Circumscribing with sets}, journal = {Artificial Intelligence}, volume = {31}, year = {1987}, month = {1987/02//}, pages = {201 - 211}, abstract = {Sets can play an important role in circumscription{\textquoteright}s ability to deal in a general way with certain aspects of commonsense reasoning. A result of Kueker indicates that sentences that intuitively one would want circumscription to prove are nonetheless not so provable in a formal setting devoid of sets. Furthermore, when sets are introduced, first-order circumscription handles these cases very easily, obviating the need for second-order circumscription. The "Aussonderungs" axiom of ZF set theory plays an intuitive role in this shift back to a first-order language}, isbn = {0004-3702}, doi = {16/0004-3702(87)90020-8}, url = {http://www.sciencedirect.com/science/article/pii/0004370287900208}, author = {Perlis, Don} } @conference {15943, title = {Circumscription as introspection}, booktitle = {Proceedings of the Second International Symposium on Methodologies for intelligent systems}, year = {1987}, month = {1987///}, pages = {440 - 444}, author = {Perlis, Don} } @article {16581, title = {Diagnostic problem-solving with causal chaining}, journal = {International Journal of Intelligent Systems}, volume = {2}, year = {1987}, month = {1987///}, pages = {265 - 302}, author = {Peng,Y. and Reggia, James A.} } @article {15655, title = {THE DISCRETE GEODESIC PROBLEM}, journal = {SIAM Journal on Computing (SICOMP)}, volume = {16}, year = {1987}, month = {1987///}, abstract = {We presentan algorithm for determining the shortest path between a source and a destinationon an arbitrary (possibly nonconvex) polyhedralsurface. The path is constrained to lie on the surface, and distances are measured according to the Euclidean metric. Our algorithm runs in time O(n log n) and requires O(n2) space, where n is the number of edges of the surface. After we run our algorithm, the distance from the source to any other destination may be determined using standard techniques in time O(log n) by locating the destination in the subdivision created by the algorithm. The actual shortest path from the source to a destination can bereported in time O(k+log n), where k is the number of faces crossed by the path. The algorithm generalizes to the case of multiple source points to build the Voronoi diagram on the surface, where n is now the maximum of the number of vertices and the number of sources. }, author = {JOSEPH,SB and Mount, Dave and Papadimitriou,C. H} } @conference {15934, title = {How can a program mean}, booktitle = {Proceedings of the 10th Int{\textquoteright}l Joint Conference on Artificial Intelligence}, year = {1987}, month = {1987///}, pages = {163 - 166}, author = {Perlis, Don} } @conference {15940, title = {Life on a desert island}, booktitle = {Proc. Workshop on The Frame Problem in Artificial Intelligence}, year = {1987}, month = {1987///}, pages = {349 - 357}, author = {Drapkin,J. and Miller,M. and Perlis, Don} } @article {16632, title = {Modeling diagnostic reasoning: a summary of parsimonious covering theory}, journal = {Computer methods and programs in biomedicine}, volume = {25}, year = {1987}, month = {1987///}, pages = {125 - 134}, author = {Reggia, James A. and Peng,Y.} } @article {16611, title = {A probabilistic causal model for diagnostic problem solving (parts 1 and 2)}, journal = {IEEE Transactions on Systems, Man and Cybernetics}, year = {1987}, month = {1987///}, pages = {146 - 162}, author = {Peng,Y. and Reggia, James A.} } @article {15999, title = {Proving self-utterances}, journal = {Journal of Automated Reasoning}, volume = {3}, year = {1987}, month = {1987///}, pages = {329 - 338}, author = {Miller,M. and Perlis, Don} } @article {16015, title = {Completeness results for circumscription}, journal = {Artificial Intelligence}, volume = {28}, year = {1986}, month = {1986/02//}, pages = {29 - 42}, abstract = {We investigate the model theory of the notion of circumscription, and find completeness theorems that provide a partial converse to a result of McCarthy. We show that the circumscriptive theorems are precisely the truths of the minimal models, in the case of various classes of theories, and for various versions of circumscription. We also present an example of commonsense reasoning in which first-order circumscription does not achieve the intuitive and desired minimization.}, isbn = {0004-3702}, doi = {16/0004-3702(86)90029-9}, url = {http://www.sciencedirect.com/science/article/pii/0004370286900299}, author = {Perlis, Don and Minker, Jack} } @article {15995, title = {On the consistency of commonsense reasoning}, journal = {Computational Intelligence}, volume = {2}, year = {1986}, month = {1986///}, pages = {180 - 190}, author = {Perlis, Don} } @article {15961, title = {Intentionality as internality}, journal = {Behavioral and Brain Sciences}, volume = {9}, year = {1986}, month = {1986///}, pages = {151 - 152}, author = {Perlis, Don and Hall,R.} } @conference {16004, title = {A Parallel Self-Modifying Default Reasoning System}, booktitle = {AAAI}, year = {1986}, month = {1986///}, pages = {923 - 927}, author = {Minker, Jack and Perlis, Don and Subramanian,K.} } @conference {15928, title = {A preliminary excursion into step-logics}, booktitle = {Proceedings of the ACM SIGART international symposium on Methodologies for intelligent systems -}, year = {1986}, month = {1986///}, pages = {262 - 269}, address = {Knoxville, Tennessee, United States}, doi = {10.1145/12808.12837}, url = {http://dl.acm.org/citation.cfm?id=12837}, author = {Drapkin,J. and Perlis, Don} } @conference {15933, title = {Self-reference, knowledge, belief, and modality}, booktitle = {Proc 5th National Conference on AI}, year = {1986}, month = {1986///}, pages = {416 - 420}, author = {Perlis, Don} } @conference {15945, title = {Step-logics: An alternative approach to limited reasoning}, booktitle = {Proceedings of the European Conf. on Artificial Intelligence}, year = {1986}, month = {1986///}, pages = {160 - 163}, author = {Drapkin,J. and Perlis, Don} } @article {16580, title = {Answer justification in abductive expert systems for diagnostic problem solving}, journal = {IEEE Transactions on Biomedical Engineering}, year = {1985}, month = {1985///}, author = {Reggia, James A. and Perricone,B. and Nau, Dana S. and Peng,Y.} } @article {16624, title = {Answer Justification in Diagnostic Expert Systems-Part II: Supporting Plausible Justifications}, journal = {Biomedical Engineering, IEEE Transactions on}, year = {1985}, month = {1985///}, pages = {268 - 272}, author = {Reggia, James A. and Perricone,B. T. and Nau, Dana S. and Peng,Y.} } @article {16011, title = {Computing protected circumscription}, journal = {The Journal of Logic Programming}, volume = {2}, year = {1985}, month = {1985///}, pages = {235 - 249}, author = {Minker, Jack and Perlis, Don} } @article {18120, title = {Efficient implementation of a shifting algorithm}, journal = {Discrete applied mathematics}, volume = {12}, year = {1985}, month = {1985///}, pages = {71 - 80}, author = {Perl,Y. and Vishkin, Uzi} } @article {16636, title = {A formal model of abductive inference}, journal = {Information Sciences}, volume = {37}, year = {1985}, month = {1985///}, pages = {227 - 285}, author = {Reggia, James A. and Nau, Dana S. and Wang,P. and Peng,Y.} } @article {16612, title = {A formal model of diagnostic inference, II. Algorithmic solution and application}, journal = {Information Sciences}, volume = {37}, year = {1985}, month = {1985///}, pages = {257 - 285}, author = {Reggia, James A. and Nau, Dana S. and Wang,P. Y and Peng,Y.} } @article {15962, title = {Languages with self-reference I: Foundations}, journal = {Artificial Intelligence}, volume = {25}, year = {1985}, month = {1985/03//}, pages = {301 - 322}, abstract = {It is argued that a proper treatment of cognitive notions such as beliefs and concepts should allow broad and consistent expression of syntax and semantics, and that this in turn depends on self-reference. A theory of quotation and unquotation is presented to this end that appears to make unnecessary the usual hierarchical and non-first-order constructions for these notions. In the current paper (Part I) the underlying theory is presented; a sequel will treat in more detail the applications to cognition.}, isbn = {0004-3702}, doi = {16/0004-3702(85)90075-X}, url = {http://www.sciencedirect.com/science/article/pii/000437028590075X}, author = {Perlis, Don} } @article {17273, title = {Learning a menu selection tree: training methods compared}, journal = {Behaviour \& Information Technology}, volume = {4}, year = {1985}, month = {1985///}, pages = {81 - 91}, abstract = {Abstract Abstract. Menu selection systems sometimes present learning problems for novice users. This comparison of four training methods for novice users found that the global tree diagram of the menu system was superior to command sequence and frame presentation methods, and somewhat better than trial and error. Methods were evaluated on the basis of (1) number of target nodes found, (2) mean number of selections to a target node, (3) recall of the menu structure, and (4) subjective rating of ease of learning.Abstract Abstract. Menu selection systems sometimes present learning problems for novice users. This comparison of four training methods for novice users found that the global tree diagram of the menu system was superior to command sequence and frame presentation methods, and somewhat better than trial and error. Methods were evaluated on the basis of (1) number of target nodes found, (2) mean number of selections to a target node, (3) recall of the menu structure, and (4) subjective rating of ease of learning. }, isbn = {0144-929X}, doi = {10.1080/01449298508901790}, url = {http://www.tandfonline.com/doi/abs/10.1080/01449298508901790}, author = {Parton,Diana and Huffman,Keith and Pridgen,Patty and Norman,Kent and Shneiderman, Ben} } @book {16535, title = {A theoretical foundation for abductive expert systems}, year = {1985}, month = {1985///}, publisher = {North-Holland, New York}, organization = {North-Holland, New York}, author = {Reggia, James A. and Nau, Dana S. and Peng,Y. and Perricone,B.} } @conference {16001, title = {Applications or Protected Circumscription}, booktitle = {7th International Conference on Automated Deduction, Napa, California, USA, May 14-16, 1984: proceedings}, volume = {170}, year = {1984}, month = {1984///}, pages = {414 - 414}, author = {Minter,J. and Perlis, Don} } @article {16610, title = {Computer-aided assessment of transient ischemic attacks. A clinical evaluation.}, journal = {Archives of neurology}, volume = {41}, year = {1984}, month = {1984///}, pages = {1248 - 1248}, author = {Reggia, James A. and Tabb,D. R. and Price,T. R. and Banko,M. and Hebel,R.} } @article {16968, title = {An experimental comparison of tabular and graphic data presentation}, journal = {International Journal of Man-Machine Studies}, volume = {20}, year = {1984}, month = {1984/06//}, pages = {545 - 566}, abstract = {We present the results of our experiment designed to test the hypothesis that more usable information can be conveyed using a combination of graphical and tabular data then by using either form alone. Our independent variables were memory (recall and non-recall) and form (tables, graphs, or both). Comprehension was measured with a multiple choice exam consisting of three types of questions (retrieve, compare, or compare/calculate answers). Both non-recall and tabular treatments significantly increased comprehension. Combinations of graphs and tables produced slower but more accurate performance. An executive should use the form with which he/she is most familiar and comfortable.}, isbn = {0020-7373}, doi = {10.1016/S0020-7373(84)80029-2}, url = {http://www.sciencedirect.com/science/article/pii/S0020737384800292}, author = {Powers,Matthew and Lashley,Conda and Sanchez,Pamela and Shneiderman, Ben} } @article {14972, title = {Information Transfer in Distributed Computing with Applications to VLSI}, journal = {Journal of the ACM (JACM)}, volume = {31}, year = {1984}, month = {1984/01//}, pages = {150 - 162}, isbn = {0004-5411}, doi = {10.1145/2422.322421}, url = {http://doi.acm.org/10.1145/2422.322421}, author = {JaJa, Joseph F. and Prasanna Kumar,V. K.} } @article {18405, title = {Monitoring an Ada software development}, journal = {ACM SIG Ada Letters}, volume = {IV}, year = {1984}, month = {1984/07//}, pages = {32 - 39}, isbn = {1094-3641}, doi = {10.1145/998401.998402}, url = {http://doi.acm.org/10.1145/998401.998402}, author = {Basili, Victor R. and Chang,Shih and Gannon,John and Katz,Elizabeth and Panlilio-Yap,N. Monina and Ramsey,Connie Loggia and Zelkowitz, Marvin V and Bailey,John and Kruesi,Elizabeth and Sheppard,Sylvia} } @conference {15958, title = {Non-monotonicity and real-time reasoning}, booktitle = {AAAI Workshop on Nonmonotonic Reasoning}, year = {1984}, month = {1984///}, author = {Perlis, Don} } @book {18416, title = {Programming languages: design and implementation}, year = {1984}, month = {1984///}, publisher = {Prentice-Hall}, organization = {Prentice-Hall}, author = {Pratt,T. W and Zelkowitz, Marvin V} } @conference {15927, title = {Protected circumscription}, booktitle = {Proc. Workshop on Non-Monotonic Reasoning}, year = {1984}, month = {1984///}, pages = {337 - 343}, author = {Minker, Jack and Perlis, Don} } @book {19123, title = {Achievements and Prospects with Regard to Energy Recycling in Textile Finishing Shops}, year = {1983}, month = {1983}, publisher = {UN}, organization = {UN}, author = {Pop, Mihai and Romas, D. and Romania} } @article {15715, title = {Analysis of Relaxation Processes: the Two Node, Two Label Case}, journal = {IEEE Transactions on Systems, Man, and Cybernetics}, volume = {SMC-13}, year = {1983}, month = {1983///}, pages = {618 - 623}, author = {O{\textquoteright}Leary, Dianne P. and Peleg,Shmuel} } @article {15742, title = {Digital Image Compression by Outer Product Expansion}, journal = {Communications, IEEE Transactions on}, volume = {31}, year = {1983}, month = {1983/03//}, pages = {441 - 444}, abstract = {We approximate a digital image as a sum of outer products dxyTwheredis a real number but the vectorsxandyhave elements +1, -1, or 0 only. The expansion gives a least squares approximation. Work is proportional to the number of pixels; reconstruction involves only additions.}, keywords = {approximation;, coding;, image, Least-squares, Transform}, isbn = {0090-6778}, doi = {10.1109/TCOM.1983.1095823}, author = {O{\textquoteright}Leary, Dianne P. and Peleg,S.} } @article {16528, title = {Expert systems based on set covering model}, journal = {International Journal on Man-Machine Studies}, volume = {19}, year = {1983}, month = {1983///}, pages = {443 - 460}, author = {Reggia, James A. and Dana,S. N. and Pearl,Y. W.} } @article {18138, title = {Parallel computation on 2-3-trees}, journal = {RAIRO Informatique th{\'e}orique}, volume = {17}, year = {1983}, month = {1983///}, pages = {397 - 404}, author = {Paul,W. and Vishkin, Uzi and Wagener,H.} } @article {18137, title = {Parallel dictionaries on 2{\textendash}3 trees}, journal = {Automata, Languages and Programming}, year = {1983}, month = {1983///}, pages = {597 - 609}, author = {Paul,W. and Vishkin, Uzi and Wagener,H.} } @article {18996, title = {Small Ribonucleoproteins from Eukaryotes: Structures and Roles in RNA Biogenesis}, journal = {Cold Spring Harbor Symposia on Quantitative BiologyCold Spring Harb Symp Quant Biol}, volume = {47}, year = {1983}, month = {1983/01/01/}, pages = {893 - 900}, abstract = {Detailed reviews describing work presented at the annual Cold Spring Harbor Symposia on Quantitative Biology}, isbn = {0091-7451, 1943-4456}, doi = {10.1101/SQB.1983.047.01.103}, url = {http://symposium.cshlp.org/content/47/893}, author = {Steitz,J. A. and Wolin,S. L. and Rinke,J. and Pettersson,I. and Mount, Stephen M. and Lerner,E. A. and Hinterberger,M. and Gottlieb,E.} } @article {18999, title = {Splicing of messenger RNA precursors is inhibited by antisera to small nuclear ribonucleoprotein}, journal = {Cell}, volume = {35}, year = {1983}, month = {1983/11//}, pages = {101 - 107}, isbn = {00928674}, doi = {10.1016/0092-8674(83)90212-X}, url = {http://ukpmc.ac.uk/abstract/MED/6194895}, author = {Padgett,Richard A. and Mount, Stephen M. and Steitz,Joan A. and Sharp,Phillip A.} } @article {19006, title = {The U1 small nuclear RNA-protein complex selectively binds a 5' splice site in vitro}, journal = {Cell}, volume = {33}, year = {1983}, month = {1983/06//}, pages = {509 - 518}, abstract = {The ability of purified U1 small nuclear RNA-protein complexes (U1 snRNPs) to bind in vitro to two RNAs transcribed from recombinant DNA clones by bacteriophage T7 RNA polymerase has been studied. A transcript which contains sequences corresponding to the small intron and flanking exons of the major mouse beta-globin gene is bound in marked preference to an RNA devoid of splice site sequences. The site of U1 snRNP binding to the globin RNA has been defined by T1 ribonuclease digestion of the RNA-U1 snRNP complex. A 15-17-nucleotide region, including the 5{\textquoteright} splice site, remains undigested and complexed with the snRNP such that it can be co-precipitated by antibodies directed against the U1 snRNP. Partial proteinase K digestion of the U1 snRNP abolishes interaction with the globin RNA, indicating that the snRNP proteins contribute significantly to RNA binding. No RNA cleavage, splicing, or recognition of the 3{\textquoteright} splice site by U1 snRNPs has been detected. Our results are discussed in terms of the probable role of U1 snRNPs in the messenger RNA splicing of eucaryotic cell nuclei.}, isbn = {00928674}, doi = {10.1016/0092-8674(83)90432-4}, url = {http://ukpmc.ac.uk/abstract/MED/6190573}, author = {Mount, Stephen M. and Pettersson,Ingvar and Hinterberger,Monique and Karmas,Aavo and Steitz,Joan A.} } @article {16554, title = {KMS reference manual}, journal = {Dep. Comput. Sci., Univ. Maryland, College Park, Tech. Rep. TR-1136}, year = {1982}, month = {1982///}, author = {Reggia, James A. and Perricone,B.} } @conference {16635, title = {Bayesian classification in medicine: The transferability question}, booktitle = {Proceedings of the Fifth Annual Symposium on Computer Applications in Medical Care}, year = {1981}, month = {1981///}, pages = {250 - 252}, author = {Zagoria,R. and Reggia, James A. and Price,T. and Banko,M.} } @conference {16618, title = {Knowledge-based decision support systems: Development through high-level languages}, booktitle = {Proc. 20th Ann. Tech. Meeting of Wash. DC ACM}, year = {1981}, month = {1981///}, author = {Reggia, James A. and Perricone,B. T.} } @article {15975, title = {A re-evaluation of story grammars*}, journal = {Cognitive Science}, volume = {5}, year = {1981}, month = {1981///}, pages = {79 - 86}, author = {Frisch,A. M and Perlis, Don} } @article {19007, title = {Transcription of cloned tRNA and 5S RNA genes in a Drosophila cell free extract}, journal = {Nucleic Acids ResearchNucl. Acids Res.}, volume = {9}, year = {1981}, month = {1981/08/25/}, pages = {3907 - 3918}, abstract = {We describe the preparation of a cell-free extract from Drosophila Kc cells which allows transcription of a variety of cloned eukaryotic RNA polymerase III genes. The extract has low RNA-processing nuclease activity and thus the major products obtained are primary transcripts.}, isbn = {0305-1048, 1362-4962}, doi = {10.1093/nar/9.16.3907}, url = {http://nar.oxfordjournals.org/content/9/16/3907}, author = {Dingermann,Theodor and Sharp,Stephen and Appel,Bernd and DeFranco,Donald and Mount, Stephen M. and Heiermann,Reinhard and Pongs,Olaf and S{\"o}ll,Dieter} } @article {15981, title = {Utility Functions, Public Goods, and Income Redistribution}, journal = {Public Finance Review}, volume = {5}, year = {1977}, month = {1977/01/01/}, pages = {9 - 22}, abstract = {The results of fiscal incidence studies depend upon the underlying utility functions pertinent to the distribution of public good benefits among income classes. Generally, the use of such functions is not explicitly recognized. By taking, in the first instance, an extended approach to utility based on an original Aaron-McGuire formulation and, in the second instance, a new approach, it is concluded that the income-equalizing tendencies of fiscal activities may be overstated when more conventional analysis is employed.}, doi = {10.1177/109114217700500102}, url = {http://pfr.sagepub.com/content/5/1/9.abstract}, author = {Mann,Arthur J. and Perlis, Don} } @article {15973, title = {An application of compiler simulation at the source language level}, journal = {The Computer Journal}, volume = {19}, year = {1976}, month = {1976///}, pages = {90 - 90}, author = {Perlis, Don} } @article {16005, title = {Group algebras and model theory}, journal = {Illinois journal of mathematics}, volume = {20}, year = {1976}, month = {1976///}, pages = {298 - 305}, author = {Perlis, Don} } @article {15931, title = {An extension of Ackermann{\textquoteright}s set theory}, journal = {Journal of Symbolic Logic}, volume = {37}, year = {1972}, month = {1972///}, pages = {703 - 704}, author = {Perlis, Don} }