@conference {19695, title = {Large-Scale Signature Matching Using Multi-stage Hashing}, booktitle = {Document Analysis and Recognition (ICDAR), 2013 12th International Conference on}, year = {2013}, month = {2013/00/01}, pages = {976 - 980}, publisher = {IEEE}, organization = {IEEE}, abstract = {In this paper, we propose a fast large-scale signature matching method based on locality sensitive hashing (LSH). Shape Context features are used to describe the structure of signatures. Two stages of hashing are performed to find the nearest neighbours for query signatures. In the first stage, we use M randomly generated hyper planes to separate shape context feature points into different bins, and compute a term-frequency histogram to represent the feature point distribution as a feature vector. In the second stage we again use LSH to categorize the high-level features into different classes. The experiments are carried out on two datasets - DS-I, a small dataset contains 189 signatures, and DS-II, a large dataset created by our group which contains 26,000 signatures. We show that our algorithm can achieve a high accuracy even when few signatures are collected from one same person and perform fast matching when dealing with a large dataset. View full abstract}, isbn = {978-0-7695-4999-6}, doi = {10.1109/ICDAR.2013.197}, url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6628762}, author = {Du, Xianzhi and Abdalmageed, W and David Doermann} } @article {19687, title = {Lexical and Hierarchical Topic Regression}, year = {2013}, month = {2013/00/01}, pages = {1106 - 1114}, abstract = {Abstract Inspired by a two-level theory that unifies agenda setting and ideological framing, we propose supervised hierarchical latent Dirichlet allocation (SHLDA) which jointly captures documents{\textquoteright} multi-level topic structure and their polar response variables. Our ...}, url = {http://papers.nips.cc/paper/5163-lexical-and-hierarchical-topic-regression}, author = {Nguyen, Viet-An and Jordan Boyd-Graber and Resnik, Philip} } @conference {19316, title = {Learning Document Structure for Retrieval and Classification}, booktitle = {International Conference on Pattern Recognition (ICPR 2012)}, year = {2012}, pages = {1558-1561}, abstract = {In this paper, we present a method for the retrieval of document images with chosen layout characteristics. The proposed method is based on statistics of patch codewords over different regions of image. We begin with a set of wanted and a random set of unwanted images representative of a large heterogeneous collection. We then use raw-image patches extracted from the unlabeled images to learn a codebook. To model the spatial relationships between patches, the image is recursively partitioned horizontally and vertically, and a histogram of patch-codewords is computed in each partition. The resulting set of features give a high precision and recall for the retrieval of hand-drawn and machine-print table-documents, and unconstrained mixed form-type documents, when trained using a random forest classifier. We compare our method to the spatial-pyramid method, and show that the proposed approach for learning layout characteristics is competitive for document images.}, author = {Kumar,Jayant and Ye,Peng and David Doermann} } @conference {19313, title = {Learning features for predicting OCR accuracy}, booktitle = {International Conference on Pattern Recognition (ICPR)}, year = {2012}, pages = {3204-3207}, abstract = {In this paper, we present a new method for assessing the quality of degraded document images using unsupervised feature learning. The goal is to build a computational model to automatically predict OCR accuracy of a degraded document image without a reference image. Current approaches for this problem typically rely on hand-crafted features whose design is based on heuristic rules that may not be generalizable. In contrast, we explore an unsupervised feature learning framework to learn effective and efficient features for predicting OCR accuracy. Our experimental results, on a set of historic newspaper images, show that the proposed method outperforms a baseline method which combines features from previous works.}, author = {Ye,Peng and David Doermann} } @conference {19319, title = {Learning Text-line Segmentation using Codebooks and Graph Partitioning}, booktitle = {International Conference on Frontiers in Handwriting Recognition (ICFHR)}, year = {2012}, pages = {63-68}, abstract = {In this paper, we present a codebook based method for handwritten text-line segmentation which uses image patches in the training data to learn a graph-based similarity for clustering. We first construct a codebook of image-patches using K-medoids, and obtain exemplars which encode local evidence. We then obtain the corresponding codewords for all patches extracted from a given image and construct a similarity graph using the learned evidence and partitioned to obtain textlines. Our learning based approach performs well on a field dataset containing degraded and un-constrained handwritten Arabic document images. Results on ICDAR 2009 segmentation contest dataset show that the method is competitive with previous approaches.}, author = {Kang,Le and Kumar,Jayant and Ye,Peng and David Doermann} } @conference {19317, title = {Linguistic Resources for Handwriting Recognition and Translation Evaluation}, booktitle = {Eight International Conference on Language Resources and Evaluation (LREC{\textquoteright}12)}, year = {2012}, author = {Song, Zhiyi and Ismael, Safa and Grimes, Stephen and David Doermann and Strassel,Stephanie} } @article {18540, title = {Lithium: Event-Driven Network Control}, volume = {GT-CS-12-03}, year = {2012}, month = {2012///}, institution = {Georgia Institute of Technology}, abstract = {This paper introduces event-driven network control, a network control framework that makes networks easier to manage by automating many tasks that must currently be performed by manually modifying low-level, distributed, and complex device configuration. We identify four policy domains that inherently capture many events: time, user, history, and traffic flow. We then present Lithium, an event-driven network control framework that can implement policies expressed using these domains. Lithium can support policies that automatically react to a wide range of events, from fluctuations in traffic volumes to changes in the time of day. Lithium allows network operators to specify networkwide policies in terms of a high-level, event-driven policy model, as opposed to configuring individual network devices with low-level commands. To show that Lithium is practical, general, and applicable in different types of network scenarios, we have deployed Lithium in both a campus network and a home network and used it to implement more flexible and dynamic network policies. We also perform evaluations to show that Lithium introduces negligible overhead beyond a conventional OpenFlow-based control framework.}, url = {http://hdl.handle.net/1853/43377}, author = {Kim,H. and Voellmy,A. and Burnett,S. and Feamster, Nick and Clark,R.} } @conference {13585, title = {Local Segmentation of Touching Characters using Contour based Shape Decomposition}, booktitle = {Document Analysis Systems}, year = {2012}, month = {2012///}, abstract = {We propose a contour based shape decomposition approach that provides local segmentation of touching characters. The shape contour is linearized into edgelets and edgelets are merged into boundary fragments. Connection cost between boundary fragments is obtained by considering local smoothness, connection length and a stroke-level property Similar Stroke Rate. Samples of connections among boundary fragments are randomly generated and the one with the minimum global cost is selected to produce optimal segmentation of the shape. To obtain a binary segmentation using this approach, we make an iterative search for the parameters that yields two components on a shape. Experimental results on a number of synthetic shape images and the LTP dataset showed that this contour based shape decomposition technique is promising and it is effective on providing local segmentation of touching characters.}, author = {Kang,Le and David Doermann and Cao,Huiagu and Prasad,Rohit and Natarajan,Prem} } @conference {13591, title = {Logo Retrieval in Document Images}, booktitle = {Document Analysis Systems}, year = {2012}, month = {2012///}, abstract = {This paper presents a scalable algorithm for segmentation free logo retrieval in document images. The contributions of this paper include the use of the SURF feature for logo retrieval, a novel indexing algorithm for efficient retrieval of SURF features and a method to filter results using the orientation of local features and geometric constraints. Results demonstrate that logo retrieval can be performed with high accurately and efficiently scaled to a large datasets.}, author = {Jain,Rajiv and David Doermann} } @article {20381, title = {Long-term effects of ocean warming on the prokaryotic community: evidence from the vibrios}, journal = {The ISME Journal}, volume = {6111114882511}, year = {2012}, month = {Jan-01-2012}, pages = {21 - 30}, abstract = {The long-term effects of ocean warming on prokaryotic communities are unknown because of lack of historical data. We overcame this gap by applying a retrospective molecular analysis to the bacterial community on formalin-fixed samples from the historical Continuous Plankton Recorder archive, which is one of the longest and most geographically extensive collections of marine biological samples in the world. We showed that during the last half century, ubiquitous marine bacteria of the Vibrio genus, including Vibrio cholerae, increased in dominance within the plankton-associated bacterial community of the North Sea, where an unprecedented increase in bathing infections related to these bacteria was recently reported. Among environmental variables, increased sea surface temperature explained 45\% of the variance in Vibrio data, supporting the view that ocean warming is favouring the spread of vibrios and may be the cause of the globally increasing trend in their associated diseases.}, issn = {1751-7362}, doi = {10.1038/ismej.2011.89}, url = {http://www.nature.com/articles/ismej201189}, author = {Vezzulli, Luigi and Brettar, Ingrid and Pezzati, Elisabetta and Reid, Philip C and Rita R Colwell and H{\"o}fle, Manfred G and Pruzzo, Carla} } @conference {14226, title = {Language Models for Semantic Extraction and Filtering in Video Action Recognition}, booktitle = {Workshops at the Twenty-Fifth AAAI Conference on Artificial Intelligence}, year = {2011}, month = {2011/08/24/}, abstract = {The paper addresses the following issues: (a) how to represent semantic information from natural language so that a vision model can utilize it? (b) how to extract the salient textual information relevant to vision? For a given domain, we present a new model of semantic extraction that takes into account word relatedness as well as word disambiguation in order to apply to a vision model. We automatically process the text transcripts and perform syntactic analysis to extract dependency relations. We then perform semantic extraction on the output to filter semantic entities related to actions. The resulting data are used to populate a matrix of co-occurrences utilized by the vision processing modules. Results show that explicitly modeling the co-occurrence of actions and tools significantly improved performance.}, url = {https://www.aaai.org/ocs/index.php/WS/AAAIW11/paper/viewPaper/3919}, author = {Tzoukermann,Evelyne and Neumann, Jan and Kosecka,Jana and Ferm{\"u}ller, Cornelia and Perera,Ian and Ferraro,Frank and Sapp,Ben and Chaudhry,Rizwan and Singh,Gautam} } @conference {13074, title = {A large-scale benchmark dataset for event recognition in surveillance video}, booktitle = {Computer Vision and Pattern Recognition (CVPR), 2011 IEEE Conference on}, year = {2011}, month = {2011/06//}, pages = {3153 - 3160}, abstract = {We introduce a new large-scale video dataset designed to assess the performance of diverse visual event recognition algorithms with a focus on continuous visual event recognition (CVER) in outdoor areas with wide coverage. Previous datasets for action recognition are unrealistic for real-world surveillance because they consist of short clips showing one action by one individual [15, 8]. Datasets have been developed for movies [11] and sports [12], but, these actions and scene conditions do not apply effectively to surveillance videos. Our dataset consists of many outdoor scenes with actions occurring naturally by non-actors in continuously captured videos of the real world. The dataset includes large numbers of instances for 23 event types distributed throughout 29 hours of video. This data is accompanied by detailed annotations which include both moving object tracks and event examples, which will provide solid basis for large-scale evaluation. Additionally, we propose different types of evaluation modes for visual recognition tasks and evaluation metrics along with our preliminary experimental results. We believe that this dataset will stimulate diverse aspects of computer vision research and help us to advance the CVER tasks in the years ahead.}, keywords = {algorithm;evaluation, CVER, databases;, databases;video, dataset;moving, event, metrics;large-scale, object, recognition, recognition;diverse, recognition;video, scenes;surveillance, surveillance;visual, tasks;computer, tracks;outdoor, video, video;computer, vision;continuous, vision;image, visual}, doi = {10.1109/CVPR.2011.5995586}, author = {Oh,Sangmin and Hoogs, A. and Perera,A. and Cuntoor, N. and Chen,Chia-Chih and Lee,Jong Taek and Mukherjee,S. and Aggarwal, JK and Lee,Hyungtae and Davis, Larry S. and Swears,E. and Wang,Xioyang and Ji,Qiang and Reddy,K. and Shah,M. and Vondrick,C. and Pirsiavash,H. and Ramanan,D. and Yuen,J. and Torralba,A. and Song,Bi and Fong,A. and Roy-Chowdhury, A. and Desai,M.} } @conference {15066, title = {Learning a discriminative dictionary for sparse coding via label consistent K-SVD}, booktitle = {Computer Vision and Pattern Recognition (CVPR), 2011 IEEE Conference on}, year = {2011}, month = {2011/06//}, pages = {1697 - 1704}, abstract = {A label consistent K-SVD (LC-KSVD) algorithm to learn a discriminative dictionary for sparse coding is presented. In addition to using class labels of training data, we also associate label information with each dictionary item (columns of the dictionary matrix) to enforce discriminability in sparse codes during the dictionary learning process. More specifically, we introduce a new label consistent constraint called {\textquoteleft}discriminative sparse-code error{\textquoteright} and combine it with the reconstruction error and the classification error to form a unified objective function. The optimal solution is efficiently obtained using the K-SVD algorithm. Our algorithm learns a single over-complete dictionary and an optimal linear classifier jointly. It yields dictionaries so that feature points with the same class labels have similar sparse codes. Experimental results demonstrate that our algorithm outperforms many recently proposed sparse coding techniques for face and object category recognition under the same learning conditions.}, keywords = {classification error, Dictionaries, dictionary learning process, discriminative sparse code error, face recognition, image classification, Image coding, K-SVD, label consistent, learning (artificial intelligence), object category recognition, Object recognition, optimal linear classifier, reconstruction error, singular value decomposition, Training data}, doi = {10.1109/CVPR.2011.5995354}, author = {Zhuolin Jiang and Zhe Lin and Davis, Larry S.} } @conference {14457, title = {Learning statistical models from relational data}, booktitle = {Proceedings of the 2011 international conference on Management of data}, series = {SIGMOD {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {1195 - 1198}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Statistical Relational Learning (SRL) is a subarea of machine learning which combines elements from statistical and probabilistic modeling with languages which support structured data representations. In this survey, we will: 1) provide an introduction to SRL, 2) describe some of the distinguishing characteristics of SRL systems, including relational feature construction and collective classification, 3) describe three SRL systems in detail, 4) discuss applications of SRL techniques to important data management problems such as entity resolution, selectivity estimation, and information integration, and 5) discuss connections between SRL methods and existing database research such as probabilistic databases.}, keywords = {machine learning, probabilistic graphical models, statistical relational learning}, isbn = {978-1-4503-0661-4}, doi = {10.1145/1989323.1989451}, url = {http://doi.acm.org/10.1145/1989323.1989451}, author = {Getoor, Lise and Mihalkova,Lilyana} } @article {14501, title = {Learning to predict web collaborations}, journal = {Workshop on User Modeling for Web Applications (UMWA-11)}, year = {2011}, month = {2011///}, abstract = {Much of the knowledge available on the web today comes asa result of fruitful collaborations among large groups of peo- ple. One of the most striking examples of successful web col- laboration is the online encyclopedia Wikipedia. The web is used as a collaboration platform by highly specialized blog- ging communities and by the scientific community. An im- portant reason for the richness of content generated through web collaborations is that the participants in such collabo- rations are not constrained by geographic location. Thus, like-minded individuals from across the world can join their efforts. This also means, however, that web collaborators of- ten do not know each other, and, thus, finding collaborators on the web is more difficult than it is with more traditional forms of collaboration that are initiated based on acquain- tance. This difficulty is further exacerbated by the fact that web collaborations tend to be more dynamic as participants join and abandon communities. We consider the task of rec- ommending project-specific potential collaborators to web users and propose an approach that is based on statistical relational learning. Our proposed model thus has the ad- vantages that it can include complex features composed of multiple properties and relationships of the entities, it can handle the high levels of noise and uncertainty inherent in user actions, and it allows for joint decision-making, which leads to more accurate predictions. To ensure scalability, our model is trained in an online fashion. We demonstrate the effectiveness of our approach on a data set collected from Wikipedia. }, author = {Mihalkova,L. and Moustafa,W. and Getoor, Lise} } @conference {18922, title = {The Life Game: Cognitive Strategies for Repeated Stochastic Games}, year = {2011}, month = {2011/10//}, pages = {95 - 102}, abstract = {Standard models in bio-evolutionary game theory involve repetitions of a single stage game (e.g., the Prisoner{\textquoteright}s Dilemma or the Stag Hunt); but it is clear that repeatedly playing the same stage game is not an accurate model of most individuals{\textquoteright} lives. Rather, individuals{\textquoteright} interactions with others correspond to many different kinds of stage games. In this work, we concentrate on discovering behavioral strategies that are successful for the life game, in which the stage game is chosen stochastically at each iteration. We present a cognitive agent model based on Social Value Orientation (SVO) theory. We provide extensive evaluations of our model{\textquoteright}s performance, both against standard agents from the game theory literature and against a large set of life-game agents written by students in two different countries. Our empirical results suggest that for life-game strategies to be successful in environments with such agents, it is important (i) to be unforgiving with respect to trust behavior and (ii) to use adaptive, fine-grained opponent models of the other agents.}, keywords = {adaptive model, behavioral strategy, bioevolutionary game theory, cognitive agent model, fine-grained opponent model, game theory, iteration, iterative methods, life game, prisoner{\textquoteright}s dilemma, repeated stochastic games, social value orientation theory, stag hunt, stage game, Stochastic processes, trust behavior}, doi = {10.1109/PASSAT/SocialCom.2011.62}, author = {Cheng,Kan-Leung and Zuckerman,I. and Nau, Dana S. and Golbeck,J.} } @conference {16036, title = {LifeFlow: visualizing an overview of event sequences}, booktitle = {Proceedings of the 2011 annual conference on Human factors in computing systems}, series = {CHI {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {1747 - 1756}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Event sequence analysis is an important task in many domains: medical researchers may study the patterns of transfers within the hospital for quality control; transportation experts may study accident response logs to identify best practices. In many cases they deal with thousands of records. While previous research has focused on searching and browsing, overview tasks are often overlooked. We introduce a novel interactive visual overview of event sequences called \emph{LifeFlow}. LifeFlow is scalable, can summarize all possible sequences, and represents the temporal spacing of the events within sequences. Two case studies with healthcare and transportation domain experts are presented to illustrate the usefulness of LifeFlow. A user study with ten participants confirmed that after 15 minutes of training novice users were able to rapidly answer questions about the prevalence and temporal characteristics of sequences, find anomalies, and gain significant insight from the data.}, keywords = {Information Visualization, overview visualization, temporal categorical data, timestamped event sequences}, isbn = {978-1-4503-0228-9}, doi = {10.1145/1978942.1979196}, url = {http://doi.acm.org/10.1145/1978942.1979196}, author = {Wongsuphasawat,Krist and Guerra G{\'o}mez,John Alexis and Plaisant, Catherine and Wang,Taowei David and Taieb-Maimon,Meirav and Shneiderman, Ben} } @conference {17280, title = {LifeFlow: visualizing an overview of event sequences (video preview)}, booktitle = {Proceedings of the 2011 annual conference extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {507 - 510}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Event sequence analysis is an important task in many domains: medical researchers may study the patterns of transfers within the hospital for quality control; transportation experts may study accident response logs to identify best practices. In many cases they deal with thousands of records. While previous research has focused on searching and browsing, overview tasks are often overlooked. We introduce a novel interactive visual overview of event sequences called LifeFlow. LifeFlow is scalable, can summarize all possible sequences, and represents the temporal spacing of the events within sequences. In this video, we show an example of patient transfer data and briefly demonstrate how to analyze them with LifeFlow. Please see [11] or visit http:www.cs.umd.eduhcillifeflow for more detail.}, keywords = {emergency room, healthcare, Information Visualization, overview visualization, temporal categorical data, timestamped event sequences}, isbn = {978-1-4503-0268-5}, doi = {10.1145/1979742.1979557}, url = {http://doi.acm.org/10.1145/1979742.1979557}, author = {Wongsuphasawat,Krist and Guerra G{\'o}mez,John Alexis and Plaisant, Catherine and Wang,Taowei and Taieb-Maimon,Meirav and Shneiderman, Ben} } @article {13380, title = {Lightweight Graphical Models for Selectivity Estimation Without Independence Assumptions}, journal = {Proceedings of the VLDB Endowment}, volume = {4}, year = {2011}, month = {2011///}, abstract = {As a result of decades of research and industrial development, mod-ern query optimizers are complex software artifacts. However, the quality of the query plan chosen by an optimizer is largely deter- mined by the quality of the underlying statistical summaries. Small selectivity estimation errors, propagated exponentially, can lead to severely sub-optimal plans. Modern optimizers typically maintain one-dimensional statistical summaries and make the attribute value independence and join uniformity assumptions for efficiently esti- mating selectivities. Therefore, selectivity estimation errors in to- day{\textquoteright}s optimizers are frequently caused by missed correlations be- tween attributes. We present a selectivity estimation approach that does not make the independence assumptions. By carefully using concepts from the field of graphical models, we are able to fac- tor the joint probability distribution of all the attributes in the data- base into small, usually two-dimensional distributions. We describe several optimizations that can make selectivity estimation highly efficient, and we present a complete implementation inside Post- greSQL{\textquoteright}s query optimizer. Experimental results indicate an order of magnitude better selectivity estimates, while keeping optimiza- tion time in the range of tens of milliseconds. }, author = {Tzoumas,K. and Deshpande, Amol and Jensen,C. S} } @conference {14702, title = {Lightweight monadic programming in ML}, booktitle = {Proceedings of the 16th ACM SIGPLAN international conference on Functional programming}, series = {ICFP {\textquoteright}11}, year = {2011}, month = {2011///}, pages = {15 - 27}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Many useful programming constructions can be expressed as monads. Examples include probabilistic modeling, functional reactive programming, parsing, and information flow tracking, not to mention effectful functionality like state and I/O. In this paper, we present a type-based rewriting algorithm to make programming with arbitrary monads as easy as using ML{\textquoteright}s built-in support for state and I/O. Developers write programs using monadic values of type m τ as if they were of type τ, and our algorithm inserts the necessary binds, units, and monad-to-monad morphisms so that the program type checks. Our algorithm, based on Jones{\textquoteright} qualified types, produces principal types. But principal types are sometimes problematic: the program{\textquoteright}s semantics could depend on the choice of instantiation when more than one instantiation is valid. In such situations we are able to simplify the types to remove any ambiguity but without adversely affecting typability; thus we can accept strictly more programs. Moreover, we have proved that this simplification is efficient (linear in the number of constraints) and coherent: while our algorithm induces a particular rewriting, all related rewritings will have the same semantics. We have implemented our approach for a core functional language and applied it successfully to simple examples from the domains listed above, which are used as illustrations throughout the paper.}, keywords = {coercion, coherence, monad, rewriting, type}, isbn = {978-1-4503-0865-6}, doi = {10.1145/2034773.2034778}, url = {http://doi.acm.org/10.1145/2034773.2034778}, author = {Swamy,Nikhil and Guts,Nataliya and Leijen,Daan and Hicks, Michael W.} } @article {15174, title = {Limits of computational differential privacy in the client/server setting}, journal = {Theory of Cryptography}, year = {2011}, month = {2011///}, pages = {417 - 431}, abstract = {Differential privacy is a well established definition guaranteeing that queries to a database do not reveal {\textquotedblleft}too much{\textquotedblright} information about specific individuals who have contributed to the database. The standard definition of differential privacy is information theoretic in nature, but it is natural to consider computational relaxations and to explore what can be achieved with respect to such notions. Mironov et al. (Crypto 2009) and McGregor et al. (FOCS 2010) recently introduced and studied several variants of computational differential privacy, and show that in the two-party setting (where data is split between two parties) these relaxations can offer significant advantages.Left open by prior work was the extent, if any, to which computational differential privacy can help in the usual client/server setting where the entire database resides at the server, and the client poses queries on this data. We show, for queries with output in ℝ n (for constant n) and with respect to a large class of utilities, that any computationally private mechanism can be converted to a statistically private mechanism that is equally efficient and achieves roughly the same utility. }, doi = {10.1007/978-3-642-19571-6_25}, author = {Groce,A. and Katz, Jonathan and Yerukhimovich,A.} } @article {15167, title = {Limits on the power of zero-knowledge proofs in cryptographic constructions}, journal = {Theory of Cryptography}, year = {2011}, month = {2011///}, pages = {559 - 578}, abstract = {For over 20 years, black-box impossibility results have been used to argue the infeasibility of constructing certain cryptographic primitives (e.g., key agreement) from others (e.g., one-way functions). A widely recognized limitation of such impossibility results, however, is that they say nothing about the usefulness of (known) nonblack-box techniques. This is unsatisfying, as we would at least like to rule out constructions using the set of techniques we have at our disposal.With this motivation in mind, we suggest a new framework for black-box constructions that encompasses constructions with a nonblack-box flavor: specifically, those that rely on zero-knowledge proofs relative to some oracle. We show that our framework is powerful enough to capture the Naor-Yung/Sahai paradigm for building a (shielding) CCA-secure public-key encryption scheme from a CPA-secure one, something ruled out by prior black-box separation results. On the other hand, we show that several black-box impossibility results still hold even in a setting that allows for zero-knowledge proofs. }, doi = {10.1007/978-3-642-19571-6_34}, author = {Brakerski,Z. and Katz, Jonathan and Segev,G. and Yerukhimovich,A.} } @article {13945, title = {Linear versus Mel Frequency Cepstral Coefficients for Speaker Recognition}, journal = {IEEE Automatic Speech Recognition and Understanding Workshop}, year = {2011}, month = {2011///}, abstract = {Mel-frequency cepstral coefficients (MFCC) havebeen dominantly used in speaker recognition as well as in speech recognition. However, based on theories in speech production, some speaker characteristics associated with the structure of the vocal tract, particularly the vocal tract length, are reflected more in the high frequency range of speech. This insight suggests that a linear scale in frequency may provide some advantages in speaker recognition over the mel scale. Based on two state-of-the- art speaker recognition back-end systems (one Joint Factor Analysis system and one Probabilistic Linear Discriminant Analysis system), this study compares the performances between MFCC and LFCC (Linear frequency cepstral coefficients) in the NIST SRE (Speaker Recognition Evaluation) 2010 extended-core task. Our results in SRE10 show that, while they are complementary to each other, LFCC consistently outperforms MFCC, mainly due to its better performance in the female trials. This can be explained by the relatively shorter vocal tract in females and the resulting higher formant frequencies in speech. LFCC benefits more in female speech by better capturing the spectral characteristics in the high frequency region. In addition, our results show some advantage of LFCC over MFCC in reverberant speech. LFCC is as robust as MFCC in the babble noise, but not in the white noise. It is concluded that LFCC should be more widely used, at least for the female trials, by the mainstream of the speaker recognition community. }, author = {Zhou,X. and Garcia-Romero,D. and Duraiswami, Ramani and Espy-Wilson,C. and Shamma,S.} } @conference {19035, title = {Link prediction by de-anonymization: How We Won the Kaggle Social Network Challenge}, year = {2011}, month = {2011}, pages = {1825 - 1834}, abstract = {This paper describes the winning entry to the IJCNN 2011 Social Network Challenge run by Kaggle.com. The goal of the contest was to promote research on real-world link prediction, and the dataset was a graph obtained by crawling the popular Flickr social photo sharing website, with user identities scrubbed. By de-anonymizing much of the competition test set using our own Flickr crawl, we were able to effectively game the competition. Our attack represents a new application of de-anonymization to gaming machine learning contests, suggesting changes in how future competitions should be run. We introduce a new simulated annealing-based weighted graph matching algorithm for the seeding step of de-anonymization. We also show how to combine de-anonymization with link prediction-the latter is required to achieve good performance on the portion of the test set not de-anonymized-for example by training the predictor on the de-anonymized portion of the test set, and combining probabilistic predictions from de-anonymization and link prediction.}, keywords = {deanonymization, Flickr social photo sharing Website, graph theory, IJCNN 2011 social network challenge, Kaggle social network challenge, learning (artificial intelligence), machine learning, realworld link prediction, Simulated annealing, simulated annealing-based weighted graph matching algorithm, social networking (online)}, author = {Narayanan, A. and Elaine Shi and Rubinstein, B.I.P.} } @article {17616, title = {Local Balancing Influences Global Structure in Social Networks}, journal = {Proceedings of the National Academy of SciencesPNAS}, volume = {108}, year = {2011}, month = {2011/02/01/}, pages = {1751 - 1752}, abstract = {Although social networks can often be viewed as graphs in their most basic form, many networks of interest include explicit sentiments (positive or negative views) of the nodes (constituent entities, also known as vertices) toward each other. This is particularly the case in geopolitics, settings where networks of firms compete in the creation of standards, and situations where polarization is frequent, such as in national elections, etc. The classical theory of structural balance developed by Heider (1) suggests how nodes may modify their relationships locally to maintain a type of balance within triads of nodes; this theory has been enriched in terms of explicit temporal dynamics by Kulakowski et al. (2). The work of Marvel et al. (3) in PNAS shows a rigorous analysis of these dynamics and brings out interesting properties and applications thereof.}, isbn = {0027-8424, 1091-6490}, doi = {10.1073/pnas.1018901108}, url = {http://www.pnas.org/content/108/5/1751}, author = {Srinivasan, Aravind} } @article {13082, title = {Local Response Context Applied to Pedestrian Detection}, journal = {Progress in Pattern Recognition, Image Analysis, Computer Vision, and Applications}, year = {2011}, month = {2011///}, pages = {181 - 188}, abstract = {Appearing as an important task in computer vision, pedestrian detection has been widely investigated in the recent years. To design a robust detector, we propose a feature descriptor called Local Response Context (LRC). This descriptor captures discriminative information regarding the surrounding of the person{\textquoteright}s location by sampling the response map obtained by a generic sliding window detector. A partial least squares regression model using LRC descriptors is learned and employed as a second classification stage (after the execution of the generic detector to obtain the response map). Experiments based on the ETHZ pedestrian dataset show that the proposed approach improves significantly the results achieved by the generic detector alone and is comparable to the state-of-the-art methods.}, author = {Schwartz,W. and Davis, Larry S. and Pedrini,H.} } @conference {14830, title = {Localizing parts of faces using a consensus of exemplars}, booktitle = {Computer Vision and Pattern Recognition (CVPR), 2011 IEEE Conference on}, year = {2011}, month = {2011/06//}, pages = {545 - 552}, abstract = {We present a novel approach to localizing parts in images of human faces. The approach combines the output of local detectors with a non-parametric set of global models for the part locations based on over one thousand hand-labeled exemplar images. By assuming that the global models generate the part locations as hidden variables, we derive a Bayesian objective function. This function is optimized using a consensus of models for these hidden variables. The resulting localizer handles a much wider range of expression, pose, lighting and occlusion than prior ones. We show excellent performance on a new dataset gathered from the internet and show that our localizer achieves state-of-the-art performance on the less challenging BioID dataset.}, keywords = {Bayesian, faces;lighting;occlusion;pose;Bayes, function;exemplar, images;expression;face, localization;human, methods;face, objective, part, recognition;}, doi = {10.1109/CVPR.2011.5995602}, author = {Belhumeur,P. N. and Jacobs, David W. and Kriegman, D.J. and Kumar,N.} } @article {14705, title = {LOCKSMITH: Practical static race detection for C}, journal = {ACM Trans. Program. Lang. Syst.}, volume = {33}, year = {2011}, month = {2011/01//}, pages = {3:1{\textendash}3:55 - 3:1{\textendash}3:55}, abstract = {Locksmith is a static analysis tool for automatically detecting data races in C programs. In this article, we describe each of Locksmith{\textquoteright}s component analyses precisely, and present systematic measurements that isolate interesting trade-offs between precision and efficiency in each analysis. Using a benchmark suite comprising stand-alone applications and Linux device drivers totaling more than 200,000 lines of code, we found that a simple no-worklist strategy yielded the most efficient interprocedural dataflow analysis; that our sharing analysis was able to determine that most locations are thread-local, and therefore need not be protected by locks; that modeling C structs and void pointers precisely is key to both precision and efficiency; and that context sensitivity yields a much more precise analysis, though with decreased scalability. Put together, our results illuminate some of the key engineering challenges in building Locksmith and data race detection analyses in particular, and constraint-based program analyses in general.}, keywords = {context sensitivity, contextual effects, correlation inference, Data race, locksmith, race detection, sharing analysis, static analysis}, isbn = {0164-0925}, doi = {10.1145/1889997.1890000}, url = {http://doi.acm.org/10.1145/1889997.1890000}, author = {Pratikakis,Polyvios and Foster, Jeffrey S. and Hicks, Michael W.} } @inbook {19186, title = {A Longitudinal Study of Pressure Sensing to Infer Real-World Water Usage Events in the Home}, booktitle = {Pervasive Computing}, series = {Lecture Notes in Computer Science}, volume = {6696}, year = {2011}, month = {2011}, pages = {50 - 69}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We present the first longitudinal study of pressure sensing to infer real-world water usage events in the home (e.g., dishwasher, upstairs bathroom sink, downstairs toilet). In order to study the pressure-based approach out in the wild , we deployed a ground truth sensor network for five weeks in three homes and two apartments that directly monitored valve-level water usage by fixtures and appliances . We use this data to, first, demonstrate the practical challenges in constructing water usage activity inference algorithms and, second, to inform the design of a new probabilistic-based classification approach. Inspired by algorithms in speech recognition, our novel Bayesian approach incorporates template matching, a language model, grammar, and prior probabilities. We show that with a single pressure sensor, our probabilistic algorithm can classify real-world water usage at the fixture level with 90\% accuracy and at the fixturecategory level with 96\% accuracy. With two pressure sensors, these accuracies increase to 94\% and 98\%. Finally, we show how our new approach can be trained with fewer examples than a strict template-matching approach alone.}, isbn = {978-3-642-21725-8}, url = {http://dx.doi.org/10.1007/978-3-642-21726-5_4}, author = {Jon Froehlich and Larson,Eric and Saba,Elliot and Campbell,Tim and Atlas,Les and Fogarty,James and Patel,Shwetak}, editor = {Lyons,Kent and Hightower,Jeffrey and Huang,Elaine} } @inbook {14324, title = {A Longitudinal Study of Pressure Sensing to Infer Real-World Water Usage Events in the Home}, booktitle = {Pervasive ComputingPervasive Computing}, series = {Lecture Notes in Computer Science}, volume = {6696}, year = {2011}, month = {2011///}, pages = {50 - 69}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {We present the first longitudinal study of pressure sensing to infer real-world water usage events in the home (e.g., dishwasher, upstairs bathroom sink, downstairs toilet). In order to study the pressure-based approach out in the wild , we deployed a ground truth sensor network for five weeks in three homes and two apartments that directly monitored valve-level water usage by fixtures and appliances . We use this data to, first, demonstrate the practical challenges in constructing water usage activity inference algorithms and, second, to inform the design of a new probabilistic-based classification approach. Inspired by algorithms in speech recognition, our novel Bayesian approach incorporates template matching, a language model, grammar, and prior probabilities. We show that with a single pressure sensor, our probabilistic algorithm can classify real-world water usage at the fixture level with 90\% accuracy and at the fixturecategory level with 96\% accuracy. With two pressure sensors, these accuracies increase to 94\% and 98\%. Finally, we show how our new approach can be trained with fewer examples than a strict template-matching approach alone.}, isbn = {978-3-642-21725-8}, url = {http://dx.doi.org/10.1007/978-3-642-21726-5_4}, author = {Jon Froehlich and Larson,Eric and Saba,Elliot and Campbell,Tim and Atlas,Les and Fogarty,James and Patel,Shwetak}, editor = {Lyons,Kent and Hightower,Jeffrey and Huang,Elaine} } @article {12864, title = {Long-term effects of ocean warming on the prokaryotic community: evidence from the vibrios}, journal = {The ISME Journal}, volume = {6}, year = {2011}, month = {2011/07/14/}, pages = {21 - 30}, abstract = {The long-term effects of ocean warming on prokaryotic communities are unknown because of lack of historical data. We overcame this gap by applying a retrospective molecular analysis to the bacterial community on formalin-fixed samples from the historical Continuous Plankton Recorder archive, which is one of the longest and most geographically extensive collections of marine biological samples in the world. We showed that during the last half century, ubiquitous marine bacteria of the Vibrio genus, including Vibrio cholerae, increased in dominance within the plankton-associated bacterial community of the North Sea, where an unprecedented increase in bathing infections related to these bacteria was recently reported. Among environmental variables, increased sea surface temperature explained 45\% of the variance in Vibrio data, supporting the view that ocean warming is favouring the spread of vibrios and may be the cause of the globally increasing trend in their associated diseases.}, keywords = {ecophysiology, ecosystems, environmental biotechnology, geomicrobiology, ISME J, microbe interactions, microbial communities, microbial ecology, microbial engineering, microbial epidemiology, microbial genomics, microorganisms}, isbn = {1751-7362}, doi = {10.1038/ismej.2011.89}, url = {http://www.nature.com/ismej/journal/v6/n1/full/ismej201189a.html?WT.ec_id=ISMEJ-201201}, author = {Vezzulli,Luigi and Brettar,Ingrid and Pezzati,Elisabetta and Reid,Philip C. and Rita R Colwell and H{\"o}fle,Manfred G. and Pruzzo,Carla} } @article {17980, title = {A Low-Overhead Asynchronous Interconnection Network for GALS Chip Multiprocessors}, journal = {Computer-Aided Design of Integrated Circuits and Systems, IEEE Transactions on}, volume = {30}, year = {2011}, month = {2011/04//}, pages = {494 - 507}, abstract = {A new asynchronous interconnection network is introduced for globally-asynchronous locally-synchronous (GALS) chip multiprocessors. The network eliminates the need for global clock distribution, and can interface multiple synchronous timing domains operating at unrelated clock rates. In particular, two new highly-concurrent asynchronous components are introduced which provide simple routing and arbitration/merge functions. Post-layout simulations in identical commercial 90 nm technology indicate that comparable recent synchronous router nodes have 5.6-10.7 more energy per packet and 2.8-6.4 greater area than the new asynchronous nodes. Under random traffic, the network provides significantly lower latency and identical throughput over the entire operating range of the 800 MHz network and through mid-range traffic rates for the 1.36 GHz network, but with degradation at higher traffic rates. Preliminary evaluations are also presented for a mixed-timing (GALS) network in a shared-memory parallel architecture, running both random traffic and parallel benchmark kernels, as well as directions for further improvement.}, keywords = {1.36, 800, 90, architecture;size, architectures;shared, asynchronous, benchmark, chip, chips;multiprocessor, distribution, distribution;frequency, GALS, GHz;frequency, interconnection, kernel;post-layout, layout;clock, locally-synchronous, memory, MHz;globally-asynchronous, multiple, multiprocessor;clock, multiprocessor;interface, network;mixed-timing, network;network, networks;microprocessor, networks;network, nm;circuit, Parallel, routing;network-on-chip;parallel, routing;parallel, simulation;random, synchronous, systems;, timing;low-overhead, traffic;shared-memory}, isbn = {0278-0070}, doi = {10.1109/TCAD.2011.2114970}, author = {Horak,M.N. and Nowick,S.M. and Carlberg,M. and Vishkin, Uzi} } @article {12502, title = {Large-scale matrix factorization with missing data under additional constraints}, journal = {Advances in Neural Information Processing Systems}, volume = {23}, year = {2010}, month = {2010///}, pages = {1642 - 1650}, abstract = {Matrix factorization in the presence of missing data is at the core of many com-puter vision problems such as structure from motion (SfM), non-rigid SfM and photometric stereo. We formulate the problem of matrix factorization with miss- ing data as a low-rank semidefinite program (LRSDP) with the advantage that: 1) an efficient quasi-Newton implementation of the LRSDP enables us to solve large-scale factorization problems, and 2) additional constraints such as ortho- normality, required in orthographic SfM, can be directly incorporated in the new formulation. Our empirical evaluations suggest that, under the conditions of ma- trix completion theory, the proposed algorithm finds the optimal solution, and also requires fewer observations compared to the current state-of-the-art algorithms. We further demonstrate the effectiveness of the proposed algorithm in solving the affine SfM problem, non-rigid SfM and photometric stereo problems. }, author = {Mitra, K. and Sheorey,S. and Chellapa, Rama} } @conference {17986, title = {Lazy binary-splitting: a run-time adaptive work-stealing scheduler}, booktitle = {Proceedings of the 15th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming}, series = {PPoPP {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {179 - 190}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {We present Lazy Binary Splitting (LBS), a user-level scheduler of nested parallelism for shared-memory multiprocessors that builds on existing Eager Binary Splitting work-stealing (EBS) implemented in Intel{\textquoteright}s Threading Building Blocks (TBB), but improves performance and ease-of-programming. In its simplest form (SP), EBS requires manual tuning by repeatedly running the application under carefully controlled conditions to determine a stop-splitting-threshold (sst)for every do-all loop in the code. This threshold limits the parallelism and prevents excessive overheads for fine-grain parallelism. Besides being tedious, this tuning also over-fits the code to some particular dataset, platform and calling context of the do-all loop, resulting in poor performance portability for the code. LBS overcomes both the performance portability and ease-of-programming pitfalls of a manually fixed threshold by adapting dynamically to run-time conditions without requiring tuning. We compare LBS to Auto-Partitioner (AP), the latest default scheduler of TBB, which does not require manual tuning either but lacks context portability, and outperform it by 38.9\% using TBB{\textquoteright}s default AP configuration, and by 16.2\% after we tuned AP to our experimental platform. We also compare LBS to SP by manually finding SP{\textquoteright}s sst using a training dataset and then running both on a different execution dataset. LBS outperforms SP by 19.5\% on average. while allowing for improved performance portability without requiring tedious manual tuning. LBS also outperforms SP with sst=1, its default value when undefined, by 56.7\%, and serializing work-stealing (SWS), another work-stealer by 54.7\%. Finally, compared to serializing inner parallelism (SI) which has been used by OpenMP, LBS is 54.2\% faster.}, keywords = {Dynamic scheduling, load balancing, nested parallelism, thread scheduling, work stealing}, isbn = {978-1-60558-877-3}, doi = {10.1145/1693453.1693479}, url = {http://doi.acm.org/10.1145/1693453.1693479}, author = {Tzannes,Alexandros and Caragea,George C. and Barua,Rajeev and Vishkin, Uzi} } @article {14421, title = {Learning algorithms for link prediction based on chance constraints}, journal = {Machine Learning and Knowledge Discovery in Databases}, year = {2010}, month = {2010///}, pages = {344 - 360}, abstract = {In this paper, we consider the link prediction problem, where we are given a partial snapshot of a network at some time and the goal is to predict the additional links formed at a later time. The accuracy of current prediction methods is quite low due to the extreme class skew and the large number of potential links. Here, we describe learning algorithms based on chance constrained programs and show that they exhibit all the properties needed for a good link predictor, namely, they allow preferential bias to positive or negative class; handle skewness in the data; and scale to large networks. Our experimental results on three real-world domains{\textemdash}co-authorship networks, biological networks and citation networks{\textemdash}show significant performance improvement over baseline algorithms. We conclude by briefly describing some promising future directions based on this work.}, doi = {10.1007/978-3-642-15880-3_28}, author = {Doppa,J. and Yu,J. and Tadepalli,P. and Getoor, Lise} } @article {12493, title = {A Learning Approach Towards Detection and Tracking of Lane Markings}, journal = {IEEE Transactions on Intelligent Transportation Systems}, volume = {PP}, year = {2010}, month = {2010/02/17/}, pages = {1 - 12}, abstract = {Road scene analysis is a challenging problem that has applications in autonomous navigation of vehicles. An integral component of this system is the robust detection and tracking of lane markings. It is a hard problem primarily due to large appearance variations in lane markings caused by factors such as occlusion (traffic on the road), shadows (from objects like trees), and changing lighting conditions of the scene (transition from day to night). In this paper, we address these issues through a learning-based approach using visual inputs from a camera mounted in front of a vehicle. We propose the following: 1) a pixel-hierarchy feature descriptor to model the contextual information shared by lane markings with the surrounding road region; 2) a robust boosting algorithm to select relevant contextual features for detecting lane markings; and 3) particle filters to track the lane markings, without knowledge of vehicle speed, by assuming the lane markings to be static through the video sequence and then learning the possible road scene variations from the statistics of tracked model parameters. We investigate the effectiveness of our algorithm on challenging daylight and night-time road video sequences.}, keywords = {Boosting, Context, Context modeling, Feature extraction, lane marking detection, outlier robustness, Roads, tracking and learning, Training, Vehicles}, isbn = {1524-9050}, doi = {10.1109/TITS.2012.2184756}, author = {Gopalan,R. and Hong, T. and Shneier, M. and Chellapa, Rama} } @conference {14227, title = {Learning shift-invariant sparse representation of actions}, booktitle = {2010 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2010}, month = {2010/06/13/18}, pages = {2630 - 2637}, publisher = {IEEE}, organization = {IEEE}, abstract = {A central problem in the analysis of motion capture (MoCap) data is how to decompose motion sequences into primitives. Ideally, a description in terms of primitives should facilitate the recognition, synthesis, and characterization of actions. We propose an unsupervised learning algorithm for automatically decomposing joint movements in human motion capture (MoCap) sequences into shift-invariant basis functions. Our formulation models the time series data of joint movements in actions as a sparse linear combination of short basis functions (snippets), which are executed (or {\textquotedblleft}activated{\textquotedblright}) at different positions in time. Given a set of MoCap sequences of different actions, our algorithm finds the decomposition of MoCap sequences in terms of basis functions and their activations in time. Using the tools of L1 minimization, the procedure alternately solves two large convex minimizations: Given the basis functions, a variant of Orthogonal Matching Pursuit solves for the activations, and given the activations, the Split Bregman Algorithm solves for the basis functions. Experiments demonstrate the power of the decomposition in a number of applications, including action recognition, retrieval, MoCap data compression, and as a tool for classification in the diagnosis of Parkinson (a motion disorder disease).}, keywords = {action characterization, Action recognition, action retrieval, action synthesis, Character recognition, data compression, human motion capture, HUMANS, Image matching, Image motion analysis, image representation, Image sequences, Information retrieval, joint movements, large convex minimizations, learning (artificial intelligence), learning shift-invariant sparse representation, Matching pursuit algorithms, minimisation, Minimization methods, MoCap data compression, Motion analysis, motion capture analysis, motion disorder disease, motion sequences, orthogonal matching pursuit, Parkinson diagnosis, Parkinson{\textquoteright}s disease, Pursuit algorithms, shift-invariant basis functions, short basis functions, snippets, sparse linear combination, split Bregman algorithm, time series, time series data, Unsupervised learning, unsupervised learning algorithm}, isbn = {978-1-4244-6984-0}, doi = {10.1109/CVPR.2010.5539977}, author = {Li,Yi and Ferm{\"u}ller, Cornelia and Aloimonos, J. and Hui Ji} } @inbook {12142, title = {Learning Through Application: The Maturing ofthe QIP in the SEL}, booktitle = {Making Software: What Really Works, and Why We Believe ItMaking Software: What Really Works, and Why We Believe It}, year = {2010}, month = {2010///}, pages = {65 - 65}, publisher = {O{\textquoteright}Reilly Media, Inc.}, organization = {O{\textquoteright}Reilly Media, Inc.}, isbn = {9780596808327}, author = {Basili, Victor R.} } @conference {15310, title = {Learning to efficiently rank}, booktitle = {Proceedings of the 33rd international ACM SIGIR conference on Research and development in information retrieval}, series = {SIGIR {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {138 - 145}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {It has been shown that learning to rank approaches are capable of learning highly effective ranking functions. However, these approaches have mostly ignored the important issue of efficiency. Given that both efficiency and effectiveness are important for real search engines, models that are optimized for effectiveness may not meet the strict efficiency requirements necessary to deploy in a production environment. In this work, we present a unified framework for jointly optimizing effectiveness and efficiency. We propose new metrics that capture the tradeoff between these two competing forces and devise a strategy for automatically learning models that directly optimize the tradeoff metrics. Experiments indicate that models learned in this way provide a good balance between retrieval effectiveness and efficiency. With specific loss functions, learned models converge to familiar existing ones, which demonstrates the generality of our framework. Finally, we show that our approach naturally leads to a reduction in the variance of query execution times, which is important for query load balancing and user satisfaction.}, keywords = {effectiveness and efficiency tradeoff, Learning to rank, Linear Models}, isbn = {978-1-4503-0153-4}, doi = {10.1145/1835449.1835475}, url = {http://doi.acm.org/10.1145/1835449.1835475}, author = {Wang,Lidan and Jimmy Lin and Metzler,Donald} } @article {13092, title = {Learning what and how of contextual models for scene labeling}, journal = {Computer Vision{\textendash}ECCV 2010}, year = {2010}, month = {2010///}, pages = {199 - 212}, abstract = {We present a data-driven approach to predict the importance of edges and construct a Markov network for image analysis based on statistical models of global and local image features. We also address the coupled problem of predicting the feature weights associated with each edge of a Markov network for evaluation of context. Experimental results indicate that this scene dependent structure construction model eliminates spurious edges and improves performance over fully-connected and neighborhood connected Markov network.}, author = {Jain, A. and Gupta,A. and Davis, Larry S.} } @conference {19282, title = {A Lightweight Dataflow Approach for Design and Implementation of SDR Systems}, booktitle = {Wireless Innovation Conference and Product Exposition, Washington DC, USA}, year = {2010}, month = {2010}, abstract = {Model-based design methods based on dataflow modelsof computation are attractive for design and implementation of wireless communication systems because of their intuitive correspondence to communication system block diagrams, and the formal structure that is exposed through formal dataflow representations (e.g., see [2]). In this paper, we introduce a novel lightweight dataflow (LWDF) programming model for model-based design and implementation of wireless communication and software-defined radio systems. The approach is suitable for improving the productivity of the design process; the agility with which designs can be retargeted across different platforms; and the quality of derived implementations. By {\textquotedblleft}lightweight{\textquotedblright}, we meant that the programming model is designed to be minimally intrusive on existing design processes, and require minimal dependence on specialized tools or libraries. This allows designers to integrate and experiment with dataflow modeling approaches relatively quickly and flexibly into existing design methodologies and processes. }, url = {http://www.researchgate.net/publication/228788399_A_lightweight_dataflow_approach_for_design_and_implementation_of_SDR_systems/file/d912f511472fa25833.pdf}, author = {Chung-Ching Shen and Plishker,William and Wu, Hsiang-Huang and Bhattacharyya, Shuvra S.} } @conference {13381, title = {Lineage processing over correlated probabilistic databases}, booktitle = {Proceedings of the 2010 international conference on Management of data}, series = {SIGMOD {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {675 - 686}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {In this paper, we address the problem of scalably evaluating conjunctive queries over correlated probabilistic databases containing tuple or attribute uncertainties. Like previous work, we adopt a two-phase approach where we first compute lineages of the output tuples, and then compute the probabilities of the lineage formulas. However unlike previous work, we allow for arbitrary and complex correlations to be present in the data, captured via a forest of junction trees. We observe that evaluating even read-once (tree structured) lineages (e.g., those generated by hierarchical conjunctive queries), polynomially computable over tuple independent probabilistic databases, is $\#$P-complete for lightly correlated probabilistic databases like Markov sequences. We characterize the complexity of exact computation of the probability of the lineage formula on a correlated database using a parameter called lwidth (analogous to the notion of treewidth). For lineages that result in low lwidth, we compute exact probabilities using a novel message passing algorithm, and for lineages that induce large lwidths, we develop approximate Monte Carlo algorithms to estimate the result probabilities. We scale our algorithms to very large correlated probabilistic databases using the previously proposed INDSEP data structure. To mitigate the complexity of lineage evaluation, we develop optimization techniques to process a batch of lineages by sharing computation across formulas, and to exploit any independence relationships that may exist in the data. Our experimental study illustrates the benefits of using our algorithms for processing lineage formulas over correlated probabilistic databases.}, keywords = {conjunctive queries, Indexing, junction trees, lineage, Probabilistic databases}, isbn = {978-1-4503-0032-2}, doi = {10.1145/1807167.1807241}, url = {http://doi.acm.org/10.1145/1807167.1807241}, author = {Kanagal,Bhargav and Deshpande, Amol} } @mastersthesis {12404, title = {Linguistic Extensions of Topic Models}, year = {2010}, month = {2010///}, school = {Princeton University}, abstract = {Topic models like latent Dirichlet allocation (LDA) provide a framework for analyzinglarge datasets where observations are collected into groups. Although topic modeling has been fruitfully applied to problems social science, biology, and computer vision, it has been most widely used to model datasets where documents are modeled as exchangeable groups of words. In this context, topic models discover topics, distribu- tions over words that express a coherent theme like {\textquotedblleft}business{\textquotedblright} or {\textquotedblleft}politics.{\textquotedblright} While one of the strengths of topic models is that they make few assumptions about the underlying data, such a general approach sometimes limits the type of problems topic models can solve. When we restrict our focus to natural language datasets, we can use insights from linguistics to create models that understand and discover richer language patterns. In this thesis, we extend LDA in three different ways: adding knowledge of word mean- ing, modeling multiple languages, and incorporating local syntactic context. These extensions apply topic models to new problems, such as discovering the meaning of ambiguous words, extend topic models for new datasets, such as unaligned multi- lingual corpora, and combine topic models with other sources of information about documents{\textquoteright} context. }, author = {Jordan Boyd-Graber} } @article {12144, title = {Linking Software Development and Business Strategy Through Measurement}, journal = {Computer}, volume = {43}, year = {2010}, month = {2010/04//}, pages = {57 - 65}, abstract = {The GQM+Strategies approach extends the goal/question/metric paradigm for measuring the success or failure of goals and strategies, adding enterprise-wide support for determining action on the basis of measurement results. An organization can thus integrate its measurement program across all levels.}, keywords = {approach;business, aspects;software, development;organisational, engineering;, GQM+Strategies, program;software, strategy;enterprise, support;measurement, wide}, isbn = {0018-9162}, doi = {10.1109/MC.2010.108}, author = {Basili, Victor R. and Lindvall,M. and Regardie,M. and Seaman,C. and Heidrich,J. and Munch,J. and Rombach,D. and Trendowicz,A.} } @conference {19290, title = {Loop transformations for interface-based hierarchies IN SDF graphs}, booktitle = {2010 21st IEEE International Conference on Application-specific Systems Architectures and Processors (ASAP)}, year = {2010}, month = {2010}, pages = {341 - 344}, abstract = {Data-flow has proven to be an attractive computation model for programming digital signal processing (DSP) applications. A restricted version of data-flow, termed synchronous data-flow (SDF), offers strong compile-time predictability properties, but has limited expressive power. A new type of hierarchy (Interface-based SDF) has been proposed allowing more expressivity while maintaining its predictability. One of the main problems with this hierarchical SDF model is the lack of trade-off between parallelism and network clustering. This paper presents a systematic method for applying an important class of loop transformation techniques in the context of interface-based SDF semantics. The resulting approach provides novel capabilities for integrating parallelism extraction properties of the targeted loop transformations with the useful modeling, analysis, and code reuse properties provided by SDF.}, keywords = {Application software, code generation, Computer architecture, Computer interfaces, Data-Flow programming, Digital signal processing, Loop parallelization, PARALLEL PROCESSING, Power engineering computing, Power system modeling, Processor scheduling, Programming profession, scheduling, SDF graph, system recovery}, author = {Piat, J. and Bhattacharyya, Shuvra S. and Raulet, M.} } @article {13954, title = {Loudspeaker and Microphone Array Signal Processing-Plane-Wave Decomposition of Acoustical Scenes Via Spherical and Cylindrical Microphone Arrays}, journal = {IEEE transactions on audio, speech, and language processing}, volume = {20}, year = {2010}, month = {2010///}, pages = {2 - 2}, author = {Zotkin,Dmitry N and Duraiswami, Ramani and Gumerov, Nail A.} } @article {19287, title = {A Low-overhead Scheduling Methodology for Fine-grained Acceleration of Signal Processing Systems}, journal = {Journal of Signal Processing Systems}, volume = {60}, year = {2010}, month = {2010}, pages = {333 - 343}, abstract = {Fine-grained accelerators have the potential to deliver significant benefits in various platforms for embedded signal processing. Due to the moderate complexity of their targeted operations, these accelerators must be managed with minimal run-time overhead. In this paper, we present a methodology for applying flow-shop scheduling techniques to make effective, low-overhead use of fine-grained DSP accelerators. We formulate the underlying scheduling approach in terms of general flow-shop scheduling concepts, and demonstrate our methodology concretely by applying it to MPEG-4 video decoding. We present quantitative experiments on a soft processor that runs on a field-programmable gate array, and provide insight on trends and trade-offs among different flow-shop scheduling approaches when applied to run-time management of fine-grained acceleration.}, url = {http://link.springer.com/article/10.1007/s11265-009-0366-z}, author = {Boutellier, Jani and Bhattacharyya, Shuvra S. and Silv{\'e}n, Olli} } @article {17621, title = {LP-rounding algorithms for facility-location problems}, journal = {arXiv:1007.3611}, year = {2010}, month = {2010/07/21/}, abstract = {We study LP-rounding approximation algorithms for metric uncapacitated facility-location problems. We first give a new analysis for the algorithm of Chudak and Shmoys, which differs from the analysis of Byrka and Aardal in that now we do not need any bound based on the solution to the dual LP program. Besides obtaining the optimal bifactor approximation as do Byrka and Aardal, we can now also show that the algorithm with scaling parameter equaling 1.58 is, in fact, an 1.58-approximation algorithm. More importantly, we suggest an approach based on additional randomization and analyses such as ours, which could achieve or approach the conjectured optimal 1.46...--approximation for this basic problem. Next, using essentially the same techniques, we obtain improved approximation algorithms in the 2-stage stochastic variant of the problem, where we must open a subset of facilities having only stochastic information about the future demand from the clients. For this problem we obtain a 2.2975-approximation algorithm in the standard setting, and a 2.4957-approximation in the more restricted, per-scenario setting. We then study robust fault-tolerant facility location, introduced by Chechik and Peleg: solutions here are designed to provide low connection cost in case of failure of up to $k$ facilities. Chechik and Peleg gave a 6.5-approximation algorithm for $k=1$ and a ($7.5k + 1.5$)-approximation algorithm for general $k$. We improve this to an LP-rounding $(k+5+4/k)$-approximation algorithm. We also observe that in case of oblivious failures the expected approximation ratio can be reduced to $k + 1.5$, and that the integrality gap of the natural LP-relaxation of the problem is at least $k + 1$.}, keywords = {Computer Science - Data Structures and Algorithms}, url = {http://arxiv.org/abs/1007.3611}, author = {Byrka,Jaroslaw and Ghodsi,Mohammadreza and Srinivasan, Aravind} } @article {13579, title = {Language Identification for Handwritten Document Images Using AShape Codebook}, journal = {Pattern Recognition}, volume = {42}, year = {2009}, month = {2009/12//}, pages = {3184 - 3191}, abstract = {Language identification for handwritten document images is an open document analysis problem. In this paper, we propose a novel approach to language identification for documents containing mixture of handwritten and machine printed text using image descriptors constructed from a codebook of shape features. We encode local text structures using scale and rotation invariant codewords, each representing a segmentation-free shape feature that is generic enough to be detected repeatably. We learn a concise, structurally indexed shape codebook from training by clustering and partitioning similar feature types through graph cuts. Our approach is easily extensible and does not require skew correction, scale normalization, or segmentation. We quantitatively evaluate our approach using a large real-world document image collection, which is composed of 1,512 documents in eight languages (Arabic, Chinese, English, Hindi, Japanese, Korean, Russian, and Thai) and contains a complex mixture of handwritten and machine printed content. Experiments demonstrate the robustness and flexibility of our approach, and show exceptional language identification performance that exceeds the state of the art.}, author = {Zhu,Guangyu and Yu,Xiaodong and Li,Yi and David Doermann} } @conference {13102, title = {Learning Discriminative Appearance-Based Models Using Partial Least Squares}, booktitle = {Computer Graphics and Image Processing (SIBGRAPI), 2009 XXII Brazilian Symposium on}, year = {2009}, month = {2009/10//}, pages = {322 - 329}, abstract = {Appearance information is essential for applications such as tracking and people recognition. One of the main problems of using appearance-based discriminative models is the ambiguities among classes when the number of persons being considered increases. To reduce the amount of ambiguity, we propose the use of a rich set of feature descriptors based on color, textures and edges. Another issue regarding appearance modeling is the limited number of training samples available for each appearance. The discriminative models are created using a powerful statistical tool called partial least squares (PLS), responsible for weighting the features according to their discriminative power for each different appearance. The experimental results, based on appearance-based person recognition, demonstrate that the use of an enriched feature set analyzed by PLS reduces the ambiguity among different appearances and provides higher recognition rates when compared to other machine learning techniques.}, keywords = {(artificial, analysis;learning, appearance, approximations;object, based, colour, descriptors;learning, discriminative, intelligence);least, learning, least, models;machine, person, recognition;, recognition;feature, squares, squares;image, techniques;partial}, doi = {10.1109/SIBGRAPI.2009.42}, author = {Schwartz, W.R. and Davis, Larry S.} } @article {12534, title = {Learning Facial Aging Models: A Face Recognition Perspective}, journal = {Biometrics: theory, methods, and applications}, volume = {9}, year = {2009}, month = {2009///}, pages = {271 - 271}, author = {Ramanathan,N. and Chellapa, Rama} } @conference {12519, title = {Learning multi-modal densities on Discriminative Temporal Interaction Manifold for group activity recognition}, booktitle = {Computer Vision and Pattern Recognition, 2009. CVPR 2009. IEEE Conference on}, year = {2009}, month = {2009/06//}, pages = {2450 - 2457}, abstract = {While video-based activity analysis and recognition has received much attention, existing body of work mostly deals with single object/person case. Coordinated multi-object activities, or group activities, present in a variety of applications such as surveillance, sports, and biological monitoring records, etc., are the main focus of this paper. Unlike earlier attempts which model the complex spatial temporal constraints among multiple objects with a parametric Bayesian network, we propose a Discriminative Temporal Interaction Manifold (DTIM) framework as a data-driven strategy to characterize the group motion pattern without employing specific domain knowledge. In particular, we establish probability densities on the DTIM, whose element, the discriminative temporal interaction matrix, compactly describes the coordination and interaction among multiple objects in a group activity. For each class of group activity we learn a multi-modal density function on the DTIM. A Maximum a Posteriori (MAP) classifier on the manifold is then designed for recognizing new activities. Experiments on football play recognition demonstrate the effectiveness of the approach.}, keywords = {a, activities;parametric, activity, analysis;belief, analysis;pattern, Bayesian, classification;image, classifier;multimodal, complex, constraints;data-driven, density, equipment;video, function;multiobject, interaction, manifold;discriminative, matrix;football, MOTION, network;video-based, networks;image, play, posteriori, processing;, recognition, recognition;group, recognition;maximum, signal, spatial, strategy;discriminative, temporal}, doi = {10.1109/CVPR.2009.5206676}, author = {Ruonan Li and Chellapa, Rama and Zhou,S. K} } @article {13450, title = {Learning to trust in the competence and commitment of agents}, journal = {Autonomous Agents and Multi-Agent Systems}, volume = {18}, year = {2009}, month = {2009///}, pages = {36 - 82}, abstract = {For agents to collaborate in open multi-agent systems, each agent must trust in the other agents{\textquoteright} ability to complete tasks and willingness to cooperate. Agents need to decide between cooperative and opportunistic behavior based on their assessment of another agents{\textquoteright} trustworthiness. In particular, an agent can have two beliefs about a potential partner that tend to indicate trustworthiness: that the partner is competent and that the partner expects to engage in future interactions . This paper explores an approach that models competence as an agent{\textquoteright}s probability of successfully performing an action, and models belief in future interactions as a discount factor. We evaluate the underlying decision framework{\textquoteright}s performance given accurate knowledge of the model{\textquoteright}s parameters in an evolutionary game setting. We then introduce a game-theoretic framework in which an agent can learn a model of another agent online, using the Harsanyi transformation. The learning agents evaluate a set of competing hypotheses about another agent during the simulated play of an indefinitely repeated game. The Harsanyi strategy is shown to demonstrate robust and successful online play against a variety of static, classic, and learning strategies in a variable-payoff Iterated Prisoner{\textquoteright}s Dilemma setting.}, isbn = {1387-2532}, url = {http://dx.doi.org/10.1007/s10458-008-9055-8}, author = {Smith,Michael and desJardins, Marie} } @article {14427, title = {Link-based active learning}, journal = {NIPS Workshop on Analyzing Networks and Learning with Graphs}, year = {2009}, month = {2009///}, abstract = {Supervised and semi-supervised data mining techniques require labeled data.However, labeling examples is costly for many real-world applications. To ad- dress this problem, active learning techniques have been developed to guide the labeling process in an effort to minimize the amount of labeled data without sac- rificing much from the quality of the learned models. Yet, most of the active learning methods to date have remained relatively agnostic to the rich structure offered by network data, often ignoring the relationships between the nodes of a network. On the other hand, the relational learning community has shown that the relationships can be very informative for various prediction tasks. In this paper, we propose different ways of adapting existing active learning work to network data while utilizing links to select better examples to label. }, author = {Bilgic,M. and Getoor, Lise} } @article {15768, title = {Locality bounds on hamiltonians for stabilizer codes}, journal = {Quantum Info. Comput.}, volume = {9}, year = {2009}, month = {2009/05//}, pages = {487 - 499}, abstract = {In this paper, we study the complexity of Hamiltonians whose groundstate is a stabilizer code. Weintroduce various notions of k-locality of a stabilizer code, inherited from the associated stabilizergroup. A choice of generators leads to a Hamiltonian with the code in its groundspace. We establishbounds on the locality of any other Hamiltonian whose groundspace contains such a code, whetheror not its Pauli tensor summands commute. Our results provide insight into the cost of creating anenergy gap for passive error correction and for adiabatic quantum computing. The results simplify inthe cases of XZ-split codes such as Calderbank-Shor-Steane stabilizer codes and topologically-orderedstabilizer codes arising from surface cellulations.}, isbn = {1533-7146}, url = {http://dl.acm.org/citation.cfm?id=2011791.2011800}, author = {Bullock,Stephen S. and O{\textquoteright}Leary, Dianne P.} } @conference {12516, title = {Locally time-invariant models of human activities using trajectories on the grassmannian}, booktitle = {Computer Vision and Pattern Recognition, 2009. CVPR 2009. IEEE Conference on}, year = {2009}, month = {2009/06//}, pages = {2435 - 2441}, abstract = {Human activity analysis is an important problem in computer vision with applications in surveillance and summarization and indexing of consumer content. Complex human activities are characterized by non-linear dynamics that make learning, inference and recognition hard. In this paper, we consider the problem of modeling and recognizing complex activities which exhibit time-varying dynamics. To this end, we describe activities as outputs of linear dynamic systems (LDS) whose parameters vary with time, or a time-varying linear dynamic system (TV-LDS). We discuss parameter estimation methods for this class of models by assuming that the parameters are locally time-invariant. Then, we represent the space of LDS models as a Grassmann manifold. Then, the TV-LDS model is defined as a trajectory on the Grassmann manifold. We show how trajectories on the Grassmannian can be characterized using appropriate distance metrics and statistical methods that reflect the underlying geometry of the manifold. This results in more expressive and powerful models for complex human activities. We demonstrate the strength of the framework for activity-based summarization of long videos and recognition of complex human actions on two datasets.}, keywords = {action, activity, analysis;local, analysis;video, content, dynamic, Estimation, estimation;statistical, Grassmann, indexing;distance, linear, manifold;activity-based, method;statistical, method;surveillance;time-varying, metrics;human, model;parameter, recognition;human, recognition;indexing;parameter, summarization;computer, surveillance;, system;computer, time-invariant, vision;consumer, vision;image}, doi = {10.1109/CVPR.2009.5206710}, author = {Turaga,P. and Chellapa, Rama} } @conference {13588, title = {Logo Matching for Document Image Retrieval}, booktitle = {International Conference on Document Analysis and Recognition (ICDAR 2009)}, year = {2009}, month = {2009///}, pages = {606 - 610}, abstract = {Graphics detection and recognition are fundamental research problems in document image analysis and retrieval. As one of the most pervasive graphical elements in business and government documents, logos may enable immediate identification of organizational entities and serve extensively as a declaration of a document{\textquoteright}s source and ownership. In this work, we developed an automatic logo-based document image retrieval system that handles: 1) Logo detection and segmentation by boosting a cascade of classifiers across multiple image scales; and 2) Logo matching using translation, scale, and rotation invariant shape descriptors and matching algorithms. Our approach is segmentation free and layout independent and we address logo retrieval in an unconstrained setting of 2-D feature point matching. Finally, we quantitatively evaluate the effectiveness of our approach using large collections of real-world complex document images.}, author = {Zhu,Guangyu and David Doermann} } @conference {13807, title = {Language and translation model adaptation using comparable corpora}, booktitle = {Proceedings of the Conference on Empirical Methods in Natural Language Processing}, series = {EMNLP {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {857 - 866}, publisher = {Association for Computational Linguistics}, organization = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, abstract = {Traditionally, statistical machine translation systems have relied on parallel bi-lingual data to train a translation model. While bi-lingual parallel data are expensive to generate, monolingual data are relatively common. Yet monolingual data have been under-utilized, having been used primarily for training a language model in the target language. This paper describes a novel method for utilizing monolingual target data to improve the performance of a statistical machine translation system on news stories. The method exploits the existence of comparable text---multiple texts in the target language that discuss the same or similar stories as found in the source language document. For every source document that is to be translated, a large monolingual data set in the target language is searched for documents that might be comparable to the source documents. These documents are then used to adapt the MT system to increase the probability of generating texts that resemble the comparable document. Experimental results obtained by adapting both the language and translation models show substantial gains over the baseline system.}, url = {http://dl.acm.org/citation.cfm?id=1613715.1613825}, author = {Snover,Matthew and Dorr, Bonnie J and Schwartz,Richard} } @inbook {13024, title = {The Lattice Project: a Grid research and production environment combining multiple Grid computing models}, booktitle = {Distributed \& Grid Computing {\textemdash} Science Made Transparent for Everyone. Principles, Applications and Supporting CommunitiesDistributed \& Grid Computing {\textemdash} Science Made Transparent for Everyone. Principles, Applications and Supporting Communities}, year = {2008}, month = {2008///}, pages = {2 - 13}, publisher = {Rechenkraft.net}, organization = {Rechenkraft.net}, address = {Marburg}, author = {Bazinet,A. L and Cummings, Michael P.}, editor = {Weber,MHW} } @article {13447, title = {Learning Abstract Properties of Swarm Systems}, journal = {Proceedings of the 8th International Conference on Autonomous Agents and Multiagent Systems}, year = {2008}, month = {2008///}, author = {Miner,D. and desJardins, Marie} } @conference {12543, title = {Learning action dictionaries from video}, booktitle = {Image Processing, 2008. ICIP 2008. 15th IEEE International Conference on}, year = {2008}, month = {2008/10//}, pages = {1704 - 1707}, abstract = {Summarizing the contents of a video containing human activities is an important problem in computer vision and has important applications in automated surveillance systems. Summarizing a video requires one to identify and learn a {\textquoteright}vocabulary{\textquoteright} of action-phrases corresponding to specific events and actions occurring in the video. We propose a generative model for dynamic scenes containing human activities as a composition of independent action-phrases - each of which is derived from an underlying vocabulary. Given a long video sequence, we propose a completely unsupervised approach to learn the vocabulary. Once the vocabulary is learnt, a video segment can be decomposed into a collection of phrases for summarization. We then describe methods to learn the correlations between activities and sequentiality of events. We also propose a novel method for building invariances to spatial transforms in the summarization scheme.}, keywords = {(artificial, action, action-phrases;learning, automated, decomposition;video, dictionaries;spatial, intelligence);video, segment, segmentation;learning, sequence;computer, Surveillance, surveillance;, systems;computer, transforms;video, vision;image, vision;independent}, doi = {10.1109/ICIP.2008.4712102}, author = {Turaga,P. and Chellapa, Rama} } @article {13131, title = {Learning pairwise dissimilarity profiles for appearance recognition in visual surveillance}, journal = {Advances in Visual Computing}, year = {2008}, month = {2008///}, pages = {23 - 34}, abstract = {Training discriminative classifiers for a large number of classes is a challenging problem due to increased ambiguities between classes. In order to better handle the ambiguities and to improve the scalability of classifiers to larger number of categories, we learn pairwise dissimilarity profiles (functions of spatial location) between categories and adapt them into nearest neighbor classification. We introduce a dissimilarity distance measure and linearly or nonlinearly combine it with direct distances. We illustrate and demonstrate the approach mainly in the context of appearance-based person recognition.}, author = {Lin,Z. and Davis, Larry S.} } @article {13449, title = {LEARNING STRUCTURED BAYESIAN NETWORKS: COMBINING ABSTRACTION HIERARCHIES AND TREE-STRUCTURED CONDITIONAL PROBABILITY TABLES}, journal = {Computational Intelligence}, volume = {24}, year = {2008}, month = {2008/02/01/}, pages = {1 - 22}, abstract = {Context-specific independence representations, such as tree-structured conditional probability distributions, capture local independence relationships among the random variables in a Bayesian network (BN). Local independence relationships among the random variables can also be captured by using attribute-value hierarchies to find an appropriate abstraction level for the values used to describe the conditional probability distributions. Capturing this local structure is important because it reduces the number of parameters required to represent the distribution. This can lead to more robust parameter estimation and structure selection, more efficient inference algorithms, and more interpretable models. In this paper, we introduce Tree-Abstraction-Based Search (TABS), an approach for learning a data distribution by inducing the graph structure and parameters of a BN from training data. TABS combines tree structure and attribute-value hierarchies to compactly represent conditional probability tables. To construct the attribute-value hierarchies, we investigate two data-driven techniques: a global clustering method, which uses all of the training data to build the attribute-value hierarchies, and can be performed as a preprocessing step; and a local clustering method, which uses only the local network structure to learn attribute-value hierarchies. We present empirical results for three real-world domains, finding that (1) combining tree structure and attribute-value hierarchies improves the accuracy of generalization, while providing a significant reduction in the number of parameters in the learned networks, and (2) data-derived hierarchies perform as well or better than expert-provided hierarchies.}, keywords = {abstraction hierarchies, background knowledge, Bayesian networks, clustering, machine learning, MDL}, isbn = {1467-8640}, doi = {10.1111/j.1467-8640.2007.00320.x}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1467-8640.2007.00320.x/abstract;jsessionid=1C2AC5C6C44B47B8A26E3D5FC3F8FE74.d01t04}, author = {desJardins, Marie and Rathod,Priyang and Getoor, Lise} } @article {18532, title = {Learning To Predict Bad Behavior}, journal = {NIPS 2007 Workshop on Machine Learning in Adversarial Environments for Computer Security}, year = {2008}, month = {2008///}, author = {Ahmed Syed,N. and Feamster, Nick and Gray,A.} } @conference {13581, title = {Learning Visual Shape Lexicon for Document Image Content Recognition}, booktitle = {The 10th European Conference on Computer Vision (ECCV 2008)}, year = {2008}, month = {2008///}, pages = {745 - 758}, address = {Marseille, France}, abstract = {Developing effective content recognition methods for diverse imagery continues to challenge computer vision researchers. We present a new approach for document image content categorization using a lexicon of shape features. Each lexical word corresponds to a scale and rotation invariant shape feature that is generic enough to be detected repeatably and segmentation free. We learn a concise, structurally indexed shape lexicon from training by clustering and partitioning feature types through graph cuts. We demonstrate our approach on two challenging document image content recognition problems: 1) The classification of 4,500 Web images crawled from Google Image Search into three content categories {\textemdash} pure image, image with text, and document image, and 2) Language identification of 8 languages (Arabic, Chinese, English, Hindi, Japanese, Korean, Russian, and Thai) on a 1,512 complex document image database composed of mixed machine printed text and handwriting. Our approach is capable to handle high intra-class variability and shows results that exceed other state-of-the-art approaches, allowing it to be used as a content recognizer in image indexing and retrieval systems.}, author = {Zhu,Guangyu and Yu,Xiaodong and Li,Yi and David Doermann} } @conference {14454, title = {Leveraging social context for searching social media}, booktitle = {Proceedings of the 2008 ACM workshop on Search in social media}, series = {SSM {\textquoteright}08}, year = {2008}, month = {2008///}, pages = {91 - 94}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {The ability to utilize and benefit from today{\textquoteright}s explosion of social media sites depends on providing tools that allow users to productively participate. In order to participate, users must be able to find resources (both people and information) that they find valuable. Here, we argue that in order to do this effectively, we should make use of a user{\textquoteright}s "social context". A user{\textquoteright}s social context includes both their personal social context (their friends and the communities to which they belong) and their community social context (their role and identity in different communities).}, keywords = {social context, social media, social search}, isbn = {978-1-60558-258-0}, doi = {10.1145/1458583.1458602}, url = {http://doi.acm.org/10.1145/1458583.1458602}, author = {Smith,Marc and Barash,Vladimir and Getoor, Lise and Lauw,Hady W.} } @conference {13118, title = {A Logic Framework for Sports Video Summarization Using Text-Based Semantic Annotation}, booktitle = {Semantic Media Adaptation and Personalization, 2008. SMAP {\textquoteright}08. Third International Workshop on}, year = {2008}, month = {2008/12//}, pages = {69 - 75}, abstract = {Detection of semantic events in sports videos is an essential step towards video summarization. A large volume of research has been conducted for automatic semantic event detection and summarization of sports videos. In this paper we present a novel sports video summarization framework using a combination of text, video and logic analysis. Parse trees are used to analyze structured and free-style text webcasting of sports games and extract the game{\^A}{\textquestiondown}s semantic events, such as goals and penalties in soccer games. Semantic events are then hierarchically arranged before being passed to a logic processing engine. The logic engine receives the summary preferences from the user and subsequently parses the event hierarchy to generate the game{\^A}{\textquestiondown}s summary according to the user{\^A}{\textquestiondown}s preferences. The proposed framework was applied to both soccer and basketball videos. We achieved an average accuracy of 98.6\% and 100\% on soccer and basketball videos, respectively.}, keywords = {(mathematics);video, analysis;trees, annotation;Internet;broadcasting;sport;text, AUTOMATIC, detection;logic, engine;parse, event, PROCESSING, processing;, semantic, signal, summarization;text, trees;sports, video, Webcasting;text-based}, doi = {10.1109/SMAP.2008.25}, author = {Refaey,M.A. and Abd-Almageed, Wael and Davis, Larry S.} } @article {12036, title = {A Language for Human Action}, journal = {Computer}, volume = {40}, year = {2007}, month = {2007/05//}, pages = {42 - 51}, abstract = {Human-centered computing (HCC) involves conforming computer technology to humans while naturally achieving human-machine interaction. In a human-centered system, the interaction focuses on human requirements, capabilities, and limitations. These anthropocentric systems also focus on the consideration of human sensory-motor skills in a wide range of activities. This ensures that the interface between artificial agents and human users accounts for perception and action in a novel interaction paradigm. In turn, this leads to behavior understanding through cognitive models that allow content description and, ultimately, the integration of real and virtual worlds. Our work focuses on building a language that maps to the lower-level sensory and motor languages and to the higher-level natural language. An empirically demonstrated human activity language provides sensory-motor-grounded representations for understanding human actions. A linguistic framework allows the analysis and synthesis of these actions.}, keywords = {anthropocentric system, artificial agent, Artificial intelligence, cognitive model, Concrete, Databases, human action, human activity language, Human computer interaction, human factors, human sensory-motor skill, human-centered computing, human-machine interaction, HUMANS, Intelligent sensors, linguistic framework, linguistics, Mirrors, Morphology, natural language, natural languages, Neurons, Power system modeling, user interface, User interfaces}, isbn = {0018-9162}, doi = {10.1109/MC.2007.154}, author = {Guerra-Filho,G. and Aloimonos, J.} } @conference {12254, title = {Large-scale byzantine fault tolerance: safe but not always live}, booktitle = {Proceedings of the 3rd workshop on on Hot Topics in System Dependability}, series = {HotDep{\textquoteright}07}, year = {2007}, month = {2007///}, publisher = {USENIX Association}, organization = {USENIX Association}, address = {Berkeley, CA, USA}, abstract = {The overall correctness of large-scale systems composed of many groups of replicas executing BFT protocols scales poorly with the number of groups. This is because the probability of at least one group being compromised (more than 1/3 faulty replicas) increases rapidly as the number of groups increases. In this paper we address this problem with a simple modification to Castro and Liskov{\textquoteright}s BFT replication that allows for arbitrary choice of n (number of replicas) and f (failure threshold). The price to pay is a more restrictive liveness requirement, and we present the design of a large-scale BFT replicated system that obviates this problem.}, url = {http://dl.acm.org/citation.cfm?id=1323140.1323157}, author = {Rodrigues,Rodrigo and Kouznetsov,Petr and Bhattacharjee, Bobby} } @conference {18000, title = {Layout-Accurate Design and Implementation of a High-Throughput Interconnection Network for Single-Chip Parallel Processing}, booktitle = {High-Performance Interconnects, 2007. HOTI 2007. 15th Annual IEEE Symposium on}, year = {2007}, month = {2007/08//}, pages = {21 - 28}, abstract = {A mesh of trees (MoT) on-chip interconnection network has been proposed recently to provide high throughput between memory units and processors for single-chip parallel processing (Balkan et al., 2006). In this paper, we report our findings in bringing this concept to silicon. Specifically, we conduct cycle-accurate Verilog simulations to verify the analytical results claimed in (Balkan et al., 2006). We synthesize and obtain the layout of the MoT interconnection networks of various sizes. To further improve throughput, we investigate different arbitration primitives to handle load and store, the two most common memory operations. We also study the use of pipeline registers in large networks when there are long wires. Simulation based on full network layout demonstrates that significant throughput improvement can be achieved over the original proposed MoT interconnection network. The importance of this work lies in its validation of performance features of the MoT interconnection network, as they were previously shown to be competitive with traditional network solutions. The MoT network is currently used in an eXplicit multi-threading (XMT) on-chip parallel processor, which is engineered to support parallel programming. In that context, a 32-terminal MoT network could support up to 512 on-chip XMT processors. Our 8-terminal network that could serve 8 processor clusters (or 128 total processors), was also accepted recently for fabrication.}, keywords = {description, design;mesh, interconnection, languages;multi-threading;multiprocessor, MoT, multi-threading;layout-accurate, network;on-chip, network;Verilog, networks;parallel, of, on-chip, Parallel, processing;, processing;hardware, processor;parallel, processors;on-chip, programming;pipeline, registers;single-chip, simulations;eXplicit, TREES, XMT}, doi = {10.1109/HOTI.2007.11}, author = {Balkan,A.O. and Horak,M.N. and Gang Qu and Vishkin, Uzi} } @article {13448, title = {Learning and visualizing user preferences over sets}, journal = {American Association for Artificial Intelligence (AAAI)}, year = {2007}, month = {2007///}, abstract = {In previous work, we introduced a representation language,DD-PREF, that balances preferences for particular items with preferences about the properties of the set. Specifically, DD- PREF permits the expression of preferences for depth (i.e., preferences for specific attribute values over others) and di- versity of sets (i.e., preferences for broad vs. narrow distri- butions of attribute values). We have also shown how pref- erences represented in DD-PREF can be learned from training data. In this paper, we present a new experimental method- ology, and give results in a Mars rover image domain. We also provide new visualizations of the learned preferences in this domain. Finally, we describe a Chinese restaurant menu domain for which we are currently gathering user data. }, author = {Wagstaff,K. and desJardins, Marie and Eaton,E. and Montminy,J.} } @conference {13142, title = {Learning Higher-order Transition Models in Medium-scale Camera Networks}, booktitle = {Computer Vision, 2007. ICCV 2007. IEEE 11th International Conference on}, year = {2007}, month = {2007/10//}, pages = {1 - 8}, abstract = {We present a Bayesian framework for learning higher- order transition models in video surveillance networks. Such higher-order models describe object movement between cameras in the network and have a greater predictive power for multi-camera tracking than camera adjacency alone. These models also provide inherent resilience to camera failure, filling in gaps left by single or even multiple non-adjacent camera failures. Our approach to estimating higher-order transition models relies on the accurate assignment of camera observations to the underlying trajectories of objects moving through the network. We addresses this data association problem by gathering the observations and evaluating alternative partitions of the observation set into individual object trajectories. Searching the complete partition space is intractable, so an incremental approach is taken, iteratively adding observations and pruning unlikely partitions. Partition likelihood is determined by the evaluation of a probabilistic graphical model. When the algorithm has considered all observations, the most likely (MAP) partition is taken as the true object trajectories. From these recovered trajectories, the higher-order statistics we seek can be derived and employed for tracking. The partitioning algorithm we present is parallel in nature and can be readily extended to distributed computation in medium-scale smart camera networks.}, keywords = {(artificial, approach;medium-scale, association, Bayesian, camera, cameras;video, framework;data, fusion;iterative, graphical, intelligence);optical, learning;incremental, likelihood;multicamera, likely, methods;higher, methods;learning, model, model;video, movement;probabilistic, network;Bayes, network;most, order, partition, problem;higher-order, statistics;higher-order, statistics;image, Surveillance, surveillance;, tracking;object, tracking;probability;video, transition}, doi = {10.1109/ICCV.2007.4409203}, author = {Farrell,R. and David Doermann and Davis, Larry S.} } @article {14129, title = {Least squares preconditioners for stabilized discretizations of the Navier-Stokes equations}, journal = {SIAM J. Sci. Comput}, volume = {30}, year = {2007}, month = {2007///}, pages = {290 - 311}, author = {Elman, Howard and Howle, V. E and Shadid, J. and Silvester, D. and Tuminaro, R.} } @conference {14453, title = {Leveraging data and structure in ontology integration}, booktitle = {Proceedings of the 2007 ACM SIGMOD international conference on Management of data}, series = {SIGMOD {\textquoteright}07}, year = {2007}, month = {2007///}, pages = {449 - 460}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {There is a great deal of research on ontology integration which makes use of rich logical constraints to reason about the structural and logical alignment of ontologies. There is also considerable work on matching data instances from heterogeneous schema or ontologies. However, little work exploits the fact that ontologies include both data and structure. We aim to close this gap by presenting a new algorithm (ILIADS) that tightly integrates both data matching and logical reasoning to achieve better matching of ontologies. We evaluate our algorithm on a set of 30 pairs of OWL Lite ontologies with the schema and data matchings found by human reviewers. We compare against two systems - the ontology matching tool FCA-merge [28] and the schema matching tool COMA++ [1]. ILIADS shows an average improvement of 25\% in quality over FCA-merge and a 11\% improvement in recall over COMA++.}, keywords = {Data integration, logical inference, ontology alignment, schema mapping, statistical inference}, isbn = {978-1-59593-686-8}, doi = {10.1145/1247480.1247531}, url = {http://doi.acm.org/10.1145/1247480.1247531}, author = {Udrea,Octavian and Getoor, Lise and Miller,Ren{\'e}e J.} } @article {14512, title = {Link-based Classification}, volume = {UMIACS-TR-2007-11}, year = {2007}, month = {2007/02/19/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {Over the past few years, a number of approximate inference algorithms fornetworked data have been put forth. We empirically compare the performance of three of the popular algorithms: loopy belief propagation, mean field relaxation labeling and iterative classification. We rate each algorithm in terms of its robustness to noise, both in attribute values and correlations across links. We also compare them across varying types of correlations across links. }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/4298}, author = {Sen,Prithviraj and Getoor, Lise} } @article {13452, title = {Local strategy learning in networked multi-agent team formation}, journal = {Autonomous Agents and Multi-Agent Systems}, volume = {15}, year = {2007}, month = {2007///}, pages = {29 - 45}, abstract = {Networked multi-agent systems are comprised of many autonomous yet interdependent agents situated in a virtual social network. Two examples of such systems are supply chain networks and sensor networks. A common challenge in many networked multi-agent systems is decentralized team formation among the spatially and logically extended agents. Even in cooperative multi-agent systems, efficient team formation is made difficult by the limited local information available to the individual agents. We present a model of distributed multi-agent team formation in networked multi-agent systems, describe a policy learning framework for joining teams based on local information, and give empirical results on improving team formation performance. In particular, we show that local policy learning from limited information leads to a significant increase in organizational team formation performance compared to a random policy.}, isbn = {1387-2532}, url = {http://dx.doi.org/10.1007/s10458-006-0007-x}, author = {Bulka,Blazej and Gaston,Matthew and desJardins, Marie} } @conference {12343, title = {Low-overhead run-time scheduling for fine-grained acceleration of signal processing systems}, booktitle = {Signal Processing Systems, 2007 IEEE Workshop on}, year = {2007}, month = {2007///}, pages = {457 - 462}, author = {Boutellier,J. and Bhattacharyya, Shuvra S. and Silv{\'e}n,O.} } @article {14354, title = {A latent dirichlet allocation model for entity resolution}, journal = {6th SIAM Conference on Data Mining (SDM), Bethesda, USA}, year = {2006}, month = {2006///}, abstract = {In this paper, we address the problem of entity resolution, where given many references to un-derlying objects, the task is to predict which references correspond to the same object. We propose a probabilistic model for collective entity resolution. Our approach differs from other recently pro- posed entity resolution approaches in that it is a) unsupervised, b) generative and c) introduces a hidden {\textquoteleft}group{\textquoteright} variable to capture collections of entities which are commonly observed together. The entity resolution decisions are not considered on an independent pairwise basis, but instead decisions are made collectively. We focus on how the use of relational links among the references can be exploited. We show how we can use Gibbs Sampling to infer the collaboration groups and the entities jointly from the observed co-author relationships among entity references and how this improves entity resolution performance. We demonstrate the utility of our approach on two real-world bibliographic datasets. In addition, we present preliminary results on characterizing conditions under which collaborative infor- mation is useful. }, author = {Bhattacharya,I. and Getoor, Lise} } @article {14436, title = {Learning through failure}, journal = {Probabilistic, Logical and Relational Learning-Towards a Synthesis}, year = {2006}, month = {2006///}, abstract = {PRISM, a symbolic-statistical modeling language we have been developing since {\textquoteright}97, recently incorporated a program transformation technique to handle failure in generative modeling. I{\textquoteright}ll show this feature opens a way to new breeds of symbolic models, including EM learning from negative observations, constrained HMMs and finite PCFGs.}, author = {Sato,T. and Kameya,Y. and De Raedt,L. and Dietterich,T. and Getoor, Lise and Muggleton,S. H} } @conference {18877, title = {Learning to do HTN planning}, year = {2006}, month = {2006///}, pages = {390 - 393}, abstract = {We describe HDL, an algorithm that learns HTN do- main descriptions by examining plan traces produced by an expert problem-solver. Prior work on learning HTN methods requires that all the methods{\textquoteright} informa- tion except for their preconditions be given in advance so that the learner can learn the preconditions. In con- trast, HDL has no prior information about the methods. In our experiments, in most cases HDL converged fully with no more than about 200 plan traces. Furthermore, even when HDL was given only half the plan traces it required to fully converge, it usually was able to pro- duce HTN methods that were sufficient to solve more than 3/4 of the planning problems in the test set.}, url = {https://www.aaai.org/Papers/ICAPS/2006/ICAPS06-048.pdf}, author = {Ilghami,O. and Nau, Dana S. and Munoz-Avila,H.} } @conference {13451, title = {Learning user preferences for sets of objects}, booktitle = {Proceedings of the 23rd international conference on Machine learning}, series = {ICML {\textquoteright}06}, year = {2006}, month = {2006///}, pages = {273 - 280}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Most work on preference learning has focused on pairwise preferences or rankings over individual items. In this paper, we present a method for learning preferences over sets of items. Our learning method takes as input a collection of positive examples---that is, one or more sets that have been identified by a user as desirable. Kernel density estimation is used to estimate the value function for individual items, and the desired set diversity is estimated from the average set diversity observed in the collection. Since this is a new learning problem, we introduce a new evaluation methodology and evaluate the learning method on two data collections: synthetic blocks-world data and a new real-world music data collection that we have gathered.}, isbn = {1-59593-383-2}, doi = {10.1145/1143844.1143879}, url = {http://doi.acm.org/10.1145/1143844.1143879}, author = {desJardins, Marie and Eaton,Eric and Wagstaff,Kiri L} } @article {15581, title = {On the Least Median Square Problem}, journal = {Discrete \& Computational Geometry}, volume = {36}, year = {2006}, month = {2006///}, pages = {593 - 607}, abstract = {We consider the exact and approximate computational complexity of the multivariate least median-of-squares (LMS) linear regression estimator. The LMS estimator is among the most widely used robust linear statistical estimators. Given a set of n points in and a parameter k, the problem is equivalent to computing the narrowest slab bounded by two parallel hyperplanes that contains k of the points. We present algorithms for the exact and approximate versions of the multivariate LMS problem. We also provide nearly matching lower bounds for these problems. These lower bounds hold under the assumptions that k is Ω(n) and that deciding whether n given points in are affinely non-degenerate requires Ω(n d ) time.}, keywords = {Mathematics and Statistics}, isbn = {0179-5376}, doi = {10.1007/s00454-006-1267-6}, url = {http://www.springerlink.com/content/j44818j11173017k/abstract/}, author = {Erickson,Jeff and Har-Peled,Sariel and Mount, Dave} } @article {13812, title = {Leveraging recurrent phrase structure in large-scale ontology translation}, journal = {Proceedings of the 11th Annual Conference of the European Association for Machine Translation}, year = {2006}, month = {2006///}, abstract = {This paper presents a process for leveraging structural relationships and reusablephrases when translating large-scale ontologies. Digital libraries are becoming more and more prevalent. An important step in providing universal access to such material is to pro- vide multi-lingual access to the underlying principles of organization via ontologies, thesauri, and controlled vocabularies. Machine translation of these resources requires high accuracy and a deep vocabulary. Human input is often required, but full manual translation can be slow and expensive. We report on a cost-effective approach to ontology translation. We describe our technique of prioritization, our process of collecting aligned translations and generating a new lexicon, and the resulting improvement to translation system output. Our preliminary evaluation indicates that this technique provides significant cost savings for human-assisted translation. The process we developed can be applied to ontologies in other domains and is easily incorporated into other translation systems. }, author = {Murray,G. C and Dorr, Bonnie J and Jimmy Lin and Haji{\v c},J. and Pecina,P.} } @conference {13813, title = {Leveraging reusability: cost-effective lexical acquisition for large-scale ontology translation}, booktitle = {Proceedings of the 21st International Conference on Computational Linguistics and the 44th annual meeting of the Association for Computational Linguistics}, series = {ACL-44}, year = {2006}, month = {2006///}, pages = {945 - 952}, publisher = {Association for Computational Linguistics}, organization = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, abstract = {Thesauri and ontologies provide important value in facilitating access to digital archives by representing underlying principles of organization. Translation of such resources into multiple languages is an important component for providing multilingual access. However, the specificity of vocabulary terms in most ontologies precludes fully-automated machine translation using general-domain lexical resources. In this paper, we present an efficient process for leveraging human translations when constructing domain-specific lexical resources. We evaluate the effectiveness of this process by producing a probabilistic phrase dictionary and translating a thesaurus of 56,000 concepts used to catalogue a large archive of oral histories. Our experiments demonstrate a cost-effective technique for accurate machine translation of large ontologies.}, doi = {10.3115/1220175.1220294}, url = {http://dx.doi.org/10.3115/1220175.1220294}, author = {Murray,G. Craig and Dorr, Bonnie J and Jimmy Lin and Haji{\v c},Jan and Pecina,Pavel} } @conference {14703, title = {Lock inference for atomic sections}, booktitle = {Proceedings of the First ACM SIGPLAN Workshop on Languages, Compilers, and Hardware Support for Transactional Computing}, year = {2006}, month = {2006///}, abstract = {To prevent unwanted interactions in multithreaded programs, pro-grammers have traditionally employed pessimistic, blocking con- currency primitives. Using such primitives correctly and efficiently is notoriously difficult. To simplify the problem, recent research proposes that programmers specify atomic sections of code whose executions should be atomic with respect to one another, without dictating exactly how atomicity enforced. Much work has explored using optimistic concurrency, or software transactions, as a means to implement atomic sections. This paper proposes to implement atomic sections using a static whole-program analysis to insert necessary uses of pessimistic con- currency primitives. Given a program that contains programmer- specified atomic sections and thread creations, our mutex infer- ence algorithm efficiently infers a set of locks for each atomic section that should be acquired (released) upon entering (exiting) the atomic section. The key part of this algorithm is determining which memory locations in the program could be shared between threads, and using this information to generate the necessary locks. To determine sharing, our analysis uses the notion of continuation effects to track the locations accessed after each program point. As continuation effects are flow sensitive, a memory location may be thread-local before a thread creation and thread-shared afterward. We prove that our algorithm is correct, and provides parallelism according to the precision of the points-to analysis. While our al- gorithm also attempts to reduce the number locks while preserving parallelism, we show that minimizing the number of locks is NP- hard. }, author = {Hicks, Michael W. and Foster, Jeffrey S. and Pratikakis,P.} } @article {14704, title = {LOCKSMITH: context-sensitive correlation analysis for race detection}, journal = {SIGPLAN Not.}, volume = {41}, year = {2006}, month = {2006/06//}, pages = {320 - 331}, abstract = {One common technique for preventing data races in multi-threaded programs is to ensure that all accesses to shared locations are consistently protected by a lock. We present a tool called LOCKSMITH for detecting data races in C programs by looking for violations of this pattern. We call the relationship between locks and the locations they protect consistent correlation, and the core of our technique is a novel constraint-based analysis that infers consistent correlation context-sensitively, using the results to check that locations are properly guarded by locks. We present the core of our algorithm for a simple formal language λ> which we have proven sound, and discuss how we scale it up to an algorithm that aims to be sound for all of C. We develop several techniques to improve the precision and performance of the analysis, including a sharing analysis for inferring thread locality; existential quantification for modeling locks in data structures; and heuristics for modeling unsafe features of C such as type casts. When applied to several benchmarks, including multi-threaded servers and Linux device drivers, LOCKSMITH found several races while producing a modest number of false alarm.}, keywords = {context-sensitivity, correlation, locksmith, multi-threaded programming, race detection, Type inference}, isbn = {0362-1340}, doi = {10.1145/1133255.1134019}, url = {http://doi.acm.org/10.1145/1133255.1134019}, author = {Pratikakis,Polyvios and Foster, Jeffrey S. and Hicks, Michael W.} } @inbook {17620, title = {Lower Bounds on the Deterministic and Quantum Communication Complexities of Hamming-Distance Problems}, booktitle = {Algorithms and ComputationAlgorithms and Computation}, series = {Lecture Notes in Computer Science}, volume = {4288}, year = {2006}, month = {2006///}, pages = {628 - 637}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {A sub-area of discrepancy theory that has received much attention in computer science re-cently, is that of explicit constructions of low-discrepancy point sets for various types of rectangle families in high dimension. This research has led to interesting applications in error-control cod- ing, distributed protocols, Web document filtering, derandomization, and other areas. We give a short survey of this area here. }, isbn = {978-3-540-49694-6}, url = {http://dx.doi.org/10.1007/11940128_63}, author = {Ambainis,Andris and Gasarch,William and Srinivasan, Aravind and Utis,Andrey}, editor = {Asano,Tetsuo} } @article {14918, title = {Lambertian reflectance and linear subspaces}, volume = {:~09/705,507}, year = {2005}, month = {2005/02/08/}, abstract = {A method for choosing an image from a plurality of three-dimensional models which is most similar to an input image is provided. The method includes the steps of: (a) providing a database of the plurality of three-dimensional models; (b) providing an input image; (c) positioning each three-dimensional model relative to the input image; (d) for each three-dimensional model, determining a rendered image that is most similar to the input image by: (d)(i) computing a linear subspace that describes an approximation to the set of all possible rendered images that each three-dimensional model can produce under all possible lighting conditions where each point in the linear subspace represents a possible image; and one of (d)(ii) finding the point on the linear subspace that is closest to the input image or finding a rendered image in a subset of the linear subspace obtained by projecting the set of images that are generated by positive lights onto the linear subspace; (e) computing a...}, url = {http://www.google.com/patents?id=6hEWAAAAEBAJ}, author = {Jacobs, David W. and Basri,Ronen}, editor = {NEC Laboratories America, Inc.} } @conference {13033, title = {A large-scale exploration of effective global features for a joint entity detection and tracking model}, booktitle = {Proceedings of the conference on Human Language Technology and Empirical Methods in Natural Language Processing}, year = {2005}, month = {2005///}, pages = {97 - 104}, author = {Daum{\'e}, Hal and Marcu,D.} } @conference {18911, title = {Learning approximate preconditions for methods in hierarchical plans}, series = {ICML {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {337 - 344}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {A significant challenge in developing planning systems for practical applications is the difficulty of acquiring the domain knowledge needed by such systems. One method for acquiring this knowledge is to learn it from plan traces, but this method typically requires a huge number of plan traces to converge. In this paper, we show that the problem with slow convergence can be circumvented by having the learner generate solution plans even before the planning domain is completely learned. Our empirical results show that these improvements reduce the size of the training set that is needed to find correct answers to a large percentage of planning problems in the test set.}, isbn = {1-59593-180-5}, doi = {10.1145/1102351.1102394}, url = {http://doi.acm.org/10.1145/1102351.1102394}, author = {Ilghami,Okhtay and Mu{\~n}oz-Avila,H{\'e}ctor and Nau, Dana S. and Aha,David W.} } @conference {13062, title = {Learning as search optimization: approximate large margin methods for structured prediction}, booktitle = {Proceedings of the 22nd international conference on Machine learning}, series = {ICML {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {169 - 176}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Mappings to structured output spaces (strings, trees, partitions, etc.) are typically learned using extensions of classification algorithms to simple graphical structures (eg., linear chains) in which search and parameter estimation can be performed exactly. Unfortunately, in many complex problems, it is rare that exact search or parameter estimation is tractable. Instead of learning exact models and searching via heuristic means, we embrace this difficulty and treat the structured output problem in terms of approximate search. We present a framework for learning as search optimization, and two parameter updates with convergence the-orems and bounds. Empirical evidence shows that our integrated approach to learning and decoding can outperform exact models at smaller computational cost.}, isbn = {1-59593-180-5}, doi = {10.1145/1102351.1102373}, url = {http://doi.acm.org/10.1145/1102351.1102373}, author = {Daum{\'e}, Hal and Marcu,Daniel} } @conference {11900, title = {A learning automata based power management for ad-hoc networks}, booktitle = {2005 IEEE International Conference on Systems, Man and Cybernetics}, volume = {4}, year = {2005}, month = {2005/10//}, pages = {3569- 3573 Vol. 4 - 3569- 3573 Vol. 4}, publisher = {IEEE}, organization = {IEEE}, abstract = {Power management is a very important aspect of ad-hoc networks. It directly impacts the network throughput among other network metrics. On the other hand, transmission power management may result in disconnected networks and increased level of collisions. In this paper, we introduce a transmission power control based on stochastic learning automata (SLA) to modify the transmission power. Based on the level of successful transmissions and the level of packet retransmissions, the SLA will modify the transmission power level either by increasing it or decreasing it. The probabilistic nature of SLA makes it a useful choice for ad-hoc networks. Using the network simulator NS, we show that using SLA for transmission power will result in an increased system bandwidth and a decrease in the collision levels.}, keywords = {ad hoc networks, ad-hoc networks, Computer network management, Computer networks, Energy management, Engineering management, learning automata, network metrics, network simulator, packet retransmissions, power control, power system management, power transmission control, stochastic learning automata, stochastic learning automta, Stochastic processes, system bandwidth, Technology management, Throughput, transmission power control, transmission power level, transmission power management}, isbn = {0-7803-9298-1}, doi = {10.1109/ICSMC.2005.1571701}, author = {El-Osery,A. I and Baird,D. and Abd-Almageed, Wael} } @article {18912, title = {Learning preconditions for planning from plan traces and HTN structure}, journal = {Computational Intelligence}, volume = {21}, year = {2005}, month = {2005///}, pages = {388 - 413}, abstract = {A great challenge in developing planning systems for practical applications is the difficulty of acquiring the domain information needed to guide such systems. This paper describes a way to learn some of that knowledge. More specifically, the following points are discussed. (1) We introduce a theoretical basis for formally defining algorithms that learn preconditions for Hierarchical Task Network (HTN) methods. (2) We describe Candidate Elimination Method Learner (CaMeL), a supervised, eager, and incremental learning process for preconditions of HTN methods. We state and prove theorems about CaMeL{\textquoteright}s soundness, completeness, and convergence properties. (3) We present empirical results about CaMeL{\textquoteright}s convergence under various conditions. Among other things, CaMeL converges the fastest on the preconditions of the HTN methods that are needed the most often. Thus CaMeL{\textquoteright}s output can be useful even before it has fully converged.}, keywords = {candidate elimination, HTN planning, learning, version spaces}, isbn = {1467-8640}, doi = {10.1111/j.1467-8640.2005.00279.x}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1467-8640.2005.00279.x/abstract}, author = {Ilghami,Okhtay and Nau, Dana S. and Mu{\~n}oz-Avila,H{\'e}ctor and Aha,David W.} } @conference {17276, title = {Leonardo{\textquoteright}s laptop: human needs and the new computing technologies}, booktitle = {Proceedings of the 14th ACM international conference on Information and knowledge management}, series = {CIKM {\textquoteright}05}, year = {2005}, month = {2005///}, pages = {1 - 1}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {The old computing was about what computers could do; the new computing is about what people can do.To accelerate the shift from the old to the new computing designers need to:reduce computer user frustration. Recent studies show 46\% of time is lost to crashes, confusing instructions, navigation problems, etc. Public pressure for change could promote design improvements and increase reliability, thereby dramatically enhancing user experiences.promote universal usability. Interfaces must be tailorable to a wide range of hardware, software, and networks, and users. When broad services such as voting, healthcare, and education are envisioned, the challenge to designers is substantial.envision a future in which human needs more directly shape technology evolution. Four circles of human relationships and four human activities map out the human needs for mobility, ubiquity, creativity, and community. The World Wide Med and million-person communities will be accessible through desktop, palmtop and fingertip devices to support e-learning, e-business, e-healthcare, and e-government.Leonardo da Vinci could help as an inspirational muse for the new computing. His example could push designers to improve quality through scientific study and more elegant visual design. Leonardo{\textquoteright}s example can guide us to the new computing, which emphasizes empowerment, creativity, and collaboration. Information visualization and personal photo interfaces will be shown: PhotoMesa (www.cs.umd.edu/hcil/photomesa) and PhotoFinder (www.cs.umd.edu/hcil/photolib).For more: http://mitpress.mit.edu/leonardoslaptop and http://www.cs.umd.edu/hcil/newcomputing.}, isbn = {1-59593-140-6}, doi = {10.1145/1099554.1099555}, url = {http://doi.acm.org/10.1145/1099554.1099555}, author = {Shneiderman, Ben} } @article {17418, title = {The limits of speech recognition: Understanding acoustic memory and appreciating prosody (2000)}, journal = {Institute for Systems Research Technical Reports}, year = {2005}, month = {2005///}, abstract = {Human-human relationships are rarely a good model for the design of effective user interfaces. Spoken language is effective for human-human interaction (HHI), but it often has severe limitations when applied to human-computer interaction (HCI). Speech is slow for presenting information, it is difficult to review or edit, and it interferes with other cognitive tasks. However speech has proven to be useful for store-and-forward messages, alerts in busy environments, and input-output for blind or motor-impaired users. Speech recognition for control is helpful for hands-busy, eyes-busy, mobilityrequired, or hostile environments and it shows promise for use in telephone-based services. Dictation input is increasingly accurate, but adoption outside the disabled users community has been slow compared to visual interfaces. Obvious physical problems include fatigue from speaking continuously and the disruption in an office filled with people speaking.

By understanding the cognitive processes surrounding human acoustic memory and processing, interface designers may be able to integrate speech more effectively and guide users more successfully. Then by appreciating the differences between HHI and HCI designers may be able to choose appropriate applications for human use of speech with computers. The key distinction may be the rich emotional content conveyed by prosody -- the pacing, intonation, and amplitude in spoken language. Prosody is potent for HHI, but may be disruptive for HCI.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6532}, author = {Shneiderman, Ben} } @conference {16681, title = {The linguist{\textquoteright}s search engine: an overview}, booktitle = {Proceedings of the ACL 2005 on Interactive poster and demonstration sessions}, year = {2005}, month = {2005///}, pages = {33 - 36}, author = {Resnik, Philip and Elkiss,A.} } @article {14360, title = {Link mining: a survey}, journal = {ACM SIGKDD Explorations Newsletter}, volume = {7}, year = {2005}, month = {2005/12//}, pages = {3 - 12}, abstract = {Many datasets of interest today are best described as a linked collection of interrelated objects. These may represent homogeneous networks, in which there is a single-object type and link type, or richer, heterogeneous networks, in which there may be multiple object and link types (and possibly other semantic information). Examples of homogeneous networks include single mode social networks, such as people connected by friendship links, or the WWW, a collection of linked web pages. Examples of heterogeneous networks include those in medical domains describing patients, diseases, treatments and contacts, or in bibliographic domains describing publications, authors, and venues. Link mining refers to data mining techniques that explicitly consider these links when building predictive or descriptive models of the linked data. Commonly addressed link mining tasks include object ranking, group detection, collective classification, link prediction and subgraph discovery. While network analysis has been studied in depth in particular areas such as social network analysis, hypertext mining, and web analysis, only recently has there been a cross-fertilization of ideas among these different communities. This is an exciting, rapidly expanding area. In this article, we review some of the common emerging themes.}, isbn = {1931-0145}, doi = {10.1145/1117454.1117456}, url = {http://doi.acm.org/10.1145/1117454.1117456}, author = {Getoor, Lise and Diehl,Christopher P.} } @article {14467, title = {Link Mining for the Semantic Web, Position Statement}, journal = {Proceedings of the Dagstuhl Seminar in Machine Learning for the Semantic Web}, year = {2005}, month = {2005///}, abstract = {A key challenge for data mining is tackling the problem of mining richly structured datasets, where the objectsare linked in some way and we have some ontological information describing the domain. Links among the objects demonstrate certain patterns, which can be helpful for many data mining tasks and are usually hard to capture with traditional statistical models. Our goals are to develop machine learning algorithms which can a) make use of the ontological theory during learning b) learn how to refine, or update the ontological theory, based on learned model and c) do a mix of both. }, author = {Getoor, Lise and Licamele,L.} } @article {14369, title = {Link-based classification}, journal = {Advanced methods for knowledge discovery from complex data}, year = {2005}, month = {2005///}, pages = {189 - 207}, abstract = {A key challenge for machine learning is the problem of mining richly structured data sets, where the objects are linked in some way due to either an explicit or implicit relationship that exists between the objects. Links among the objects demonstrate certain patterns, which can be helpful for many machine learning tasks and are usually hard to capture with traditional statistical models. Recently there has been a surge of interest in this area, fuelled largely by interest in web and hypertext mining, but also by interest in mining social networks, bibliographic citation data, epidemiological data and other domains best described using a linked or graph structure. In this chapter we propose a framework for modeling link distributions, a link-based model that supports discriminative models describing both the link distributions and the attributes of linked objects. We use a structured logistic regression model, capturing both content and links. We systematically evaluate several variants of our link-based model on a range of data sets including both web and citation collections. In all cases, the use of the link distribution improves classification performance.}, doi = {10.1007/1-84628-284-5_7}, author = {Getoor, Lise} } @article {17282, title = {Listening to Maps: User Evaluation of Interactive Sonifications of Geo-Referenced Data (2004)}, journal = {Institute for Systems Research Technical Reports}, year = {2005}, month = {2005///}, abstract = {In this paper, we summarize the Auditory Information Seeking Principle (AISP) (gist, navigate, filter, and details-ondemand). To improve blind access to geo-referenced statistical data, we developed several interactive sonifications, adhering to the above AISP. Two user studies are presented. In the first user study with nine sighted subjects, a preliminary map design is compared with an enhanced table design. The study shows subjects can recognize geographic data distribution patterns on a real map with 51 geographic regions, in both designs. The map-based design was strongly preferred. The study also shows evidence that AISP conforms to people information seeking strategies. Based on the observations from the first user study, a second user study was conducted with forty-eight sighted subjects comparing four map designs. The effects of using sound to encode vertical geographic positions and two map navigation methods were compared. The result is presented and future work is discussed.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6518}, author = {Zhao,Haixia and Smith,Benjamin K and Norman,Kent L and Plaisant, Catherine and Shneiderman, Ben} } @article {16096, title = {Listening to Maps: User Evaluation of Interactive Sonifications of Geo-Referenced Data}, journal = {Institute for Systems Research Technical Reports}, year = {2005}, month = {2005///}, abstract = {In this paper, we summarize the Auditory Information Seeking Principle (AISP) (gist, navigate, filter, and details-ondemand). To improve blind access to geo-referenced statistical data, we developed several interactive sonifications, adhering to the above AISP. Two user studies are presented. In the first user study with nine sighted subjects, a preliminary map design is compared with an enhanced table design. The study shows subjects can recognize geographic data distribution patterns on a real map with 51 geographic regions, in both designs. The map-based design was strongly preferred. The study also shows evidence that AISP conforms to people information seeking strategies. Based on the observations from the first user study, a second user study was conducted with forty-eight sighted subjects comparing four map designs. The effects of using sound to encode vertical geographic positions and two map navigation methods were compared. The result is presented and future work is discussed.}, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/6518}, author = {Zhao,Haixia and Smith,Benjamin K and Norman,Kent L and Plaisant, Catherine and Shneiderman, Ben} } @article {15990, title = {Logic, self-awareness and self-improvement: The metacognitive loop and the problem of brittleness}, journal = {Journal of Logic and Computation}, volume = {15}, year = {2005}, month = {2005///}, pages = {21 - 21}, author = {Anderson,M. L and Perlis, Don} } @article {15956, title = {A logic-based model of intention formation and action for multi-agent subcontracting}, journal = {Artificial Intelligence}, volume = {163}, year = {2005}, month = {2005/04//}, pages = {163 - 201}, abstract = {We present a formalism for representing the formation of intentions by agents engaged in cooperative activity. We use a syntactic approach presenting a formal logical calculus that can be regarded as a meta-logic that describes the reasoning and activities of the agents. Our central focus is on the evolving intentions of agents over time, and the conditions under which an agent can adopt and maintain an intention. In particular, the reasoning time and the time taken to subcontract are modeled explicitly in the logic. We axiomatize the concept of agent interactions in the meta-language, show that the meta-theory is consistent and describe the unique intended model of the meta-theory. In this context we deal both with subcontracting between agents and the presence of multiple recipes, that is, multiple ways of accomplishing tasks. We show that under various initial conditions and known facts about agent beliefs and abilities, the meta-theory representation yields good results.}, keywords = {Cooperative agents, intentions, Minimal model semantics, Subcontracting, Syntactic logic}, isbn = {0004-3702}, doi = {16/j.artint.2004.11.003}, url = {http://www.sciencedirect.com/science/article/pii/S0004370204001924}, author = {Grant,John and Kraus,Sarit and Perlis, Don} } @inbook {15766, title = {Left-Brain/Right-Brain Multi-Document Summarization}, booktitle = {DUC 04 Conference ProceedingsDUC 04 Conference Proceedings}, year = {2004}, month = {2004///}, publisher = {U.S. National Inst. of Standards and Technology}, organization = {U.S. National Inst. of Standards and Technology}, url = {\tt http://duc.nist.gov/}, author = {Conroy,John M. and Schlesinger,Judith D. and Goldstein,Jade and O{\textquoteright}Leary, Dianne P.} } @conference {16121, title = {Les le{\c c}ons tir{\'e}es des deux comp{\'e}titions de visualisation d{\textquoteright}information}, booktitle = {Proceedings of the 16th conference on Association Francophone d{\textquoteright}Interaction Homme-Machine}, series = {IHM 2004}, year = {2004}, month = {2004///}, pages = {7 - 12}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Information visualization needs benchmarks to carry on. A benchmark is aimed at comparing information visualization techniques or systems. A benchmark is made of a dataset, a list of tasks mostly based on finding facts about the dataset, and a list of interesting or important findings about the datasets (the nuggets to find). For the second year, we are organizing the InfoVis Contest aimed at collecting results for benchmarks. We describe here the main lessons we learned.}, keywords = {benchmark, contest, Evaluation, Information Visualization}, isbn = {1-58113-926-8}, doi = {10.1145/1148613.1148616}, url = {http://doi.acm.org/10.1145/1148613.1148616}, author = {Fekete,Jean-Daniel and Plaisant, Catherine} } @conference {13705, title = {A lexically-driven algorithm for disfluency detection}, booktitle = {Proceedings of HLT-NAACL 2004: Short Papers}, series = {HLT-NAACL-Short {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {157 - 160}, publisher = {Association for Computational Linguistics}, organization = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, abstract = {This paper describes a transformation-based learning approach to disfluency detection in speech transcripts using primarily lexical features. Our method produces comparable results to two other systems that make heavy use of prosodic features, thus demonstrating that reasonable performance can be achieved without extensive prosodic cues. In addition, we show that it is possible to facilitate the identification of less frequently disfluent discourse markers by taking speaker style into account.}, isbn = {1-932432-24-8}, url = {http://dl.acm.org/citation.cfm?id=1613984.1614024}, author = {Snover,Matthew and Dorr, Bonnie J and Schwartz,Richard} } @conference {13379, title = {Lifting the burden of history from adaptive query processing}, booktitle = {Proceedings of the Thirtieth international conference on Very large data bases - Volume 30}, series = {VLDB {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {948 - 959}, publisher = {VLDB Endowment}, organization = {VLDB Endowment}, abstract = {Adaptive query processing schemes attempt to re-optimize query plans during the course of query execution. A variety of techniques for adaptive query processing have been proposed, varying in the granularity at which they can make decisions [8]. The eddy [1] is the most aggressive of these techniques, with the flexibility to choose tuple-by-tuple how to order the application of operators. In this paper we identify and address a fundamental limitation of the original eddies proposal: the burden of history in routing. We observe that routing decisions have long-term effects on the state of operators in the query, and can severely constrain the ability of the eddy to adapt over time. We then propose a mechanism we call STAIRs that allows the query engine to manipulate the state stored inside the operators and undo the effects of past routing decisions. We demonstrate that eddies with STAIRs achieve both high adaptivity and good performance in the face of uncertainty, outperforming prior eddy proposals by orders of magnitude.}, isbn = {0-12-088469-0}, url = {http://dl.acm.org/citation.cfm?id=1316689.1316771}, author = {Deshpande, Amol and Hellerstein,Joseph M.} } @conference {17947, title = {Light Collages: Lighting Design for Effective Visualization}, booktitle = {Proceedings of the conference on Visualization {\textquoteright}04}, series = {VIS {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {281 - 288}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Washington, DC, USA}, abstract = {We introduce Light Collages {\textquestiondown} a lighting design system for effective visualization based on principles of human perception. Artists and illustrators enhance perception of features with lighting that is locally consistent and globally inconsistent. Inspired by these techniques, we design the placement of light sources to convey a greater sense of realism and better perception of shape with globally inconsistent lighting. Our algorithm segments the objects into local surface patches and uses a number of perceptual heuristics, such as highlights, shadows, and silhouettes, to enhance the perception of shape. We show our results on scientific and sculptured datasets.}, keywords = {inconsistent lighting, light placement, lighting design, proximity shadows, scientific illustration, silhouette enhancement}, isbn = {0-7803-8788-0}, doi = {10.1109/VISUAL.2004.62}, url = {http://dx.doi.org/10.1109/VISUAL.2004.62}, author = {Lee,Chang Ha and Hao,Xuejun and Varshney, Amitabh} } @conference {16487, title = {Links and paths through life sciences data sources}, booktitle = {Data Integration in the Life Sciences}, year = {2004}, month = {2004///}, pages = {203 - 211}, author = {Lacroix,Z. and Murthy,H. and Naumann,F. and Raschid, Louiqa} } @conference {15347, title = {Local Adaption and Dissipation Properties of a Weighted Essentially Non-Oscillatory Scheme}, booktitle = {34th AIAA Fluid Dynamics Conference and Exhibit}, year = {2004}, month = {2004///}, address = {Portland, OR}, abstract = {We perform several direct numerical simulations (DNS) of decaying isotropic turbulencewith various initial turbulent mach numbers Mt using a modifiedi weighted essentially non-oscillatory (WENO) method. We then present a procedure for the identification of shock-containing and smooth regions where the WENO scheme should respectively engage its adaption mechanism and revert to its linear optimal stencil. Weirsi has previously proposed the centrality index CI and nonlinearity index NI as two quantitative measures of stencil adaption, and we analyze these values within the regions of interest in the DNS flow fields. Our preliminary results indicate that these indices are suitable for assessments of the local adaption and dissipation properties of the WENO method; however, further studies must be conducted over a wider range of compressible flow conditions. }, author = {Taylor,E. M and Martin, M.P and Weirs,V. G} } @conference {18580, title = {Location diversity in anonymity networks}, booktitle = {Proceedings of the 2004 ACM workshop on Privacy in the electronic society}, series = {WPES {\textquoteright}04}, year = {2004}, month = {2004///}, pages = {66 - 76}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Anonymity networks have long relied on diversity of node location for protection against attacks---typically an adversary who can observe a larger fraction of the network can launch a more effective attack. We investigate the diversity of two deployed anonymity networks, Mixmaster and Tor, with respect to an adversary who controls a single Internet administrative domain. Specifically, we implement a variant of a recently proposed technique that passively estimates the set of administrative domains (also known as autonomous systems, or ASes) between two arbitrary end-hosts without having access to either end of the path. Using this technique, we analyze the AS-level paths that are likely to be used in these anonymity networks. We find several cases in each network where multiple nodes are in the same administrative domain. Further, many paths between nodes, and between nodes and popular endpoints, traverse the same domain.}, keywords = {anonymity, interdomain routing, mix networks}, isbn = {1-58113-968-3}, doi = {10.1145/1029179.1029199}, url = {http://doi.acm.org/10.1145/1029179.1029199}, author = {Feamster, Nick and Dingledine,Roger} } @conference {18881, title = {A logic of motion}, year = {2004}, month = {2004///}, pages = {85 - 94}, abstract = {There are numerous applications such as air traffic manage- ment, cellular phone location tracking, and vehicle protection systems where there is a critical need to reason about moving objects. In this paper, we propose a formal logic of motion (LOM for short). We provide a formal syntax for LOM, as well as a model theory for LOM. In addition, we develop al- gorithms to check consistency of LOM theories, as well as to answer certain kinds of queries posed to LOM theories. We have implemented these algorithms in a prototype LOM sys- tem - we describe experiments showing that such queries can be efficiently executed in practice.}, url = {http://www.aaai.org/Papers/KR/2004/KR04-011.pdf}, author = {Yaman,F. and Nau, Dana S. and V.S. Subrahmanian} } @conference {14552, title = {Low-dimensional embedding with extra information}, booktitle = {Proceedings of the twentieth annual symposium on Computational geometry}, year = {2004}, month = {2004///}, pages = {320 - 329}, author = {Bǎdoiu,M. and Demaine,E. D and Hajiaghayi, Mohammad T. and Indyk,P.} } @article {14886, title = {Lambertian reflectance and linear subspaces}, journal = {Pattern Analysis and Machine Intelligence, IEEE Transactions on}, volume = {25}, year = {2003}, month = {2003/02//}, pages = {218 - 233}, abstract = {We prove that the set of all Lambertian reflectance functions (the mapping from surface normals to intensities) obtained with arbitrary distant light sources lies close to a 9D linear subspace. This implies that, in general, the set of images of a convex Lambertian object obtained under a wide variety of lighting conditions can be approximated accurately by a low-dimensional linear subspace, explaining prior empirical results. We also provide a simple analytic characterization of this linear space. We obtain these results by representing lighting using spherical harmonics and describing the effects of Lambertian materials as the analog of a convolution. These results allow us to construct algorithms for object recognition based on linear methods as well as algorithms that use convex optimization to enforce nonnegative lighting functions. We also show a simple way to enforce nonnegative lighting when the images of an object lie near a 4D linear space. We apply these algorithms to perform face recognition by finding the 3D model that best matches a 2D query image.}, keywords = {2D, 4D, 9D, analog;, analytic, characterization;, convex, convolution, distant, functions;, harmonics;, image, image;, intensities;, Lambertian, light, lighting, linear, methods;, nonnegative, normals;, object, optimization;, programming;, query, recognition;, reflectance;, reflectivity;, set;, sources;, space;, spherical, subspace;, subspaces;, surface}, isbn = {0162-8828}, doi = {10.1109/TPAMI.2003.1177153}, author = {Basri,R. and Jacobs, David W.} } @conference {16473, title = {Latency Profiles: Performance Monitoring for Wide Area Applications}, booktitle = {Internet Applications, IEEE Workshop on}, year = {2003}, month = {2003///}, pages = {74 - 74}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, abstract = {Recent technological advances have enabled the deployment of wide area applications against Internet accessible sources. A performance challenge to applications in such a setting is the unpredictable end-to-end latency of accessing these sources. We use passive information gathering mechanisms to learn end-to-end latency distributions and construct Latency Profiles (LPs). We hypothesize that a group of clients, within an autonomous system (AS), that are accessing a content server, in another AS, may be represented by (one or more) LPs. Related networking research on IDMaps, points of congestion, and BGP routes support such hypothesis. We develop aggregate LPs to provide coverage of groups (clusters) of client-server pairs. Using data gathered from a (limited) experiment we demonstrate the feasibility of constructing LPs.}, doi = {http://doi.ieeecomputersociety.org/10.1109/WIAPP.2003.1210290}, author = {Raschid, Louiqa and Wen,Hui-Fang and Gal,Avigdor and Zadorozhny,Vladimir} } @conference {13247, title = {Learning dynamics for exemplar-based gesture recognition}, booktitle = {Computer Vision and Pattern Recognition, 2003. Proceedings. 2003 IEEE Computer Society Conference on}, volume = {1}, year = {2003}, month = {2003/06//}, pages = {I-571 - I-578 vol.1 - I-571 - I-578 vol.1}, abstract = {This paper addresses the problem of capturing the dynamics for exemplar-based recognition systems. Traditional HMM provides a probabilistic tool to capture system dynamics and in exemplar paradigm, HMM states are typically coupled with the exemplars. Alternatively, we propose a non-parametric HMM approach that uses a discrete HMM with arbitrary states (decoupled from exemplars) to capture the dynamics over a large exemplar space where a nonparametric estimation approach is used to model the exemplar distribution. This reduces the need for lengthy and non-optimal training of the HMM observation model. We used the proposed approach for view-based recognition of gestures. The approach is based on representing each gesture as a sequence of learned body poses (exemplars). The gestures are recognized through a probabilistic framework for matching these body poses and for imposing temporal constraints between different poses using the proposed non-parametric HMM.}, keywords = {arbitrary, body, by, Computer, constraint;, detection;, discrete, distribution, dynamics;, edge, estimation;, example;, exemplar, exemplar-based, extraction;, feature, framework;, gesture, gesture;, hidden, HMM;, human, image, learning, Markov, matching;, model;, models;, motion;, nonparametric, pose, probabilistic, recognition;, sequence;, space;, state;, statistics;, system, temporal, tool;, view-based, vision;}, doi = {10.1109/CVPR.2003.1211405}, author = {Elgammal,A. and Shet,V. and Yacoob,Yaser and Davis, Larry S.} } @article {14397, title = {Learning Structure From Statistical Models}, journal = {IEEE Data Engineering Bulletin}, volume = {26}, year = {2003}, month = {2003///}, pages = {11 - 18}, abstract = {Statistical relational learning is a newly emerging area of machine learning that combines statisticalmodeling with relational representations. Here we argue that it provides a unified framework for the discovery of structural information that can be exploited by a data management system. The categories of structure that can be discovered include: instance-level dependencies and correlations, for example intra-table column dependencies and inter-table join dependencies; record linkages and duplicates; and schema matching and schema discovery from unstructured and semi-structured data. }, author = {Getoor, Lise} } @book {17277, title = {Leonardo{\textquoteright}s Laptop: Human Needs and the New Computing Technologies}, year = {2003}, month = {2003/09/01/}, publisher = {MIT Press}, organization = {MIT Press}, abstract = {2003 IEEE-USAB Award for Distinguished Literary Contributions Furthering Public Understanding of the Profession. and Selected as a Finalist in the category of Computer/Internet in the 2002 Independent Publisher Book Awards (IPPYs) presented by Independent Publisher MagazineBen Shneiderman{\textquoteright}s book dramatically raises computer users{\textquoteright} expectations of what they should get from technology. He opens their eyes to new possibilities and invites them to think freshly about future technology. He challenges developers to build products that better support human needs and that are usable at any bandwidth. Shneiderman proposes Leonardo da Vinci as an inspirational muse for the "new computing." He wonders how Leonardo would use a laptop and what applications he would create.Shneiderman shifts the focus from what computers can do to what users can do. A key transformation is to what he calls "universal usability," enabling participation by young and old, novice and expert, able and disabled. This transformation would empower those yearning for literacy or coping with their limitations. Shneiderman proposes new computing applications in education, medicine, business, and government. He envisions a World Wide Med that delivers secure patient histories in local languages at any emergency room and thriving million-person communities for e-commerce and e-government. Raising larger questions about human relationships and society, he explores the computer{\textquoteright}s potential to support creativity, consensus-seeking, and conflict resolution. Each chapter ends with a Skeptic{\textquoteright}s Corner that challenges assumptions about trust, privacy, and digital divides.}, keywords = {Computers / Data Processing, Computers / Data Processing / General, Computers / Data Processing / Storage \& Retrieval, Computers / Social Aspects / Human-Computer Interaction, Computers / System Administration / Storage \& Retrieval, Electronic data processing, Human-computer interaction, Technological forecasting, Technology \& Engineering / Social Aspects}, isbn = {9780262692991}, author = {Shneiderman, Ben} } @inbook {16129, title = {LifeLines: Using Visualization to Enhance Navigation and Analysis of Patient Records}, booktitle = {The Craft of Information VisualizationThe Craft of Information Visualization}, year = {2003}, month = {2003///}, pages = {308 - 312}, publisher = {Morgan Kaufmann}, organization = {Morgan Kaufmann}, address = {San Francisco}, isbn = {978-1-55860-915-0}, url = {http://www.sciencedirect.com/science/article/pii/B978155860915050038X}, author = {Plaisant, Catherine and Mushlin,Richard and Snyder,Aaron and Li,Jia and Heller,Dan and Shneiderman, Ben}, editor = {Bederson, Benjamin B. and Shneiderman, Ben} } @article {14358, title = {Link mining: a new data mining challenge}, journal = {ACM SIGKDD Explorations Newsletter}, volume = {5}, year = {2003}, month = {2003/07//}, pages = {84 - 89}, abstract = {A key challenge for data mining is tackling the problem of mining richly structured datasets, where the objects are linked in some way. Links among the objects may demonstrate certain patterns, which can be helpful for many data mining tasks and are usually hard to capture with traditional statistical models. Recently there has been a surge of interest in this area, fueled largely by interest in web and hypertext mining, but also by interest in mining social networks, security and law enforcement data, bibliographic citations and epidemiological records.}, isbn = {1931-0145}, doi = {10.1145/959242.959253}, url = {http://doi.acm.org/10.1145/959242.959253}, author = {Getoor, Lise} } @article {12392, title = {Logic foundry: rapid prototyping for FPGA-based DSP systems}, journal = {EURASIP J. Appl. Signal Process.}, volume = {2003}, year = {2003}, month = {2003/01//}, pages = {565 - 579}, abstract = {We introduce the Logic Foundry, a system for the rapid creation and integration of FPGA-based digital signal processing systems. Recognizing that some of the greatest challenges in creating FPGA-based systems occur in the integration of the various components, we have proposed a system that targets the following four areas of integration: design flow integration, component integration, platform integration, and software integration. Using the Logic Foundry, a system can be easily specified, and then automatically constructed and integrated with system level software.}, keywords = {CAD tools, Design methodology, DSP, FPGA, integration, rapid prototyping}, isbn = {1110-8657}, doi = {10.1155/S1110865703301039}, url = {http://dx.doi.org/10.1155/S1110865703301039}, author = {Spivey,Gary and Bhattacharyya, Shuvra S. and Nakajima,Kazuo} } @conference {15146, title = {Lower bounds on the efficiency of encryption and digital signature schemes}, booktitle = {Proceedings of the thirty-fifth annual ACM symposium on Theory of computing}, series = {STOC {\textquoteright}03}, year = {2003}, month = {2003///}, pages = {417 - 425}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {A central focus of modern cryptography is to investigate the weakest possible assumptions under which various cryptographic algorithms exist. Typically, a proof that a "weak" primitive (e.g., a one-way function) implies the existence of a "strong" algorithm (e.g., a private-key encryption scheme) proceeds by giving an explicit construction of the latter from the former. In addition to showing the existence of such a construction, an equally important research direction is to explore the efficiency of such constructions.Among the most fundamental cryptographic algorithms are digital signature schemes and schemes for public- or private-key encryption. Here, we show the first lower bounds on the efficiency of any encryption or signature construction based on black-box access to one-way or trapdoor one-way permutations. If S is the assumed security of the permutation π (i.e., no adversary of size S can invert π on a fraction larger than 1/S of its inputs), our results show that:Any public-key encryption scheme for m-bit messages must query π at least Ω(m log S) times.Any private-key encryption scheme for m-bit messages (with k-bit keys) must query π at least Ω(m-k/log S) times.Any signature verification algorithm for m-bit messages must query π at least Ω(m log S) times.Our bounds match known upper bounds for the case of encryption.We prove our results in an extension of the Impagliazzo-Rudich model. That is, we show that any black-box construction beating our lower bounds would imply the unconditional existence of a one-way function.}, keywords = {black-box, digital signatures, encryption, lower bounds}, isbn = {1-58113-674-9}, doi = {10.1145/780542.780604}, url = {http://doi.acm.org/10.1145/780542.780604}, author = {Gennaro,Rosario and Gertner,Yael and Katz, Jonathan} } @article {14418, title = {Learning structured statistical models from relational data}, journal = {Link{\"o}ping Electronic Articles in Computer and Information Science}, volume = {7}, year = {2002}, month = {2002///}, pages = {13 - 13}, abstract = {Here we describe tools for constructing statistical models from relational data. Our goal is to learn structured probabilistic models that represent statistical correlations both between the properties of an entity and between the properties of related entities. These statistical models can then be used for a variety of tasks including knowledge discovery, exploratory data analysis, data summarization and anomaly detection. Unfortunately, most statistical learning methods work only with "flat" data representations. Thus, to apply these methods, we are forced to convert the data into a flat form, thereby not only losing its compact representation and structure but also potentially introducing statistical skew. These drawbacks severely limit the ability of current statistical methods to model relational databases. Here we describe two complementary approaches: one approach suited to making probabilistic statements about individuals and the second approach suited to making statements about frequencies in relational data. We describe algorithms for both learning and making inferences in these models, and give experimental results.}, author = {Getoor, Lise and Friedman,N. and Koller,D.} } @conference {12143, title = {Lessons learned from 25 years of process improvement: the rise and fall of the NASA software engineering laboratory}, booktitle = {Proceedings of the 24th International Conference on Software Engineering}, series = {ICSE {\textquoteright}02}, year = {2002}, month = {2002///}, pages = {69 - 79}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {For 25 years the NASA/GSFC Software Engineering Laboratory (SEL) has been a major resource in software process improvement activities. But due to a changing climate at NASA, agency reorganization, and budget cuts, the SEL has lost much of its impact. In this paper we describe the history of the SEL and give some lessons learned on what we did right, what we did wrong, and what others can learn from our experiences. We briefly describe the research that was conducted by the SEL, describe how we evolved our understanding of software process improvement, and provide a set of lessons learned and hypotheses that should enable future groups to learn from and improve on our quarter century of experiences.}, isbn = {1-58113-472-X}, doi = {10.1145/581339.581351}, url = {http://doi.acm.org/10.1145/581339.581351}, author = {Basili, Victor R. and McGarry,Frank E. and Pajerski,Rose and Zelkowitz, Marvin V} } @book {19352, title = {Level of Detail for 3D Graphics}, year = {2002}, month = {2002}, publisher = {Elsevier Science Inc.}, organization = {Elsevier Science Inc.}, address = {New York, NY, USA}, abstract = {Level of detail (LOD) techniques are increasingly used by professional real-time developers to strike the balance between breathtaking virtual worlds and smooth, flowing animation. Level of Detail for 3D Graphics brings together, for the first time, the mechanisms, principles, practices, and theory needed by every graphics developer seeking to apply LOD methods.Continuing advances in level of detail management have brought this powerful technology to the forefront of 3D graphics optimization research. This book, written by the very researchers and developers who have built LOD technology, is both a state-of-the-art chronicle of LOD advances and a practical sourcebook, which will enable graphics developers from all disciplines to apply these formidable techniques to their own work. }, isbn = {1558608389}, author = {Luebke, David and Watson, Benjamin and Cohen, Jonathan D. and Reddy, Martin and Varshney, Amitabh} } @conference {13582, title = {Lexicon Acquisition from Bilingual Dictionaries}, booktitle = {SPIEPhotonic West Electronic Imaging Conference}, year = {2002}, month = {2002///}, pages = {37 - 48}, author = {David Doermann and Ma,H. and Karagol-Ayan,B. and Oard, Douglas} } @conference {12107, title = {A light-weight process for capturing and evolving defect reduction experience}, booktitle = {Engineering of Complex Computer Systems, 2002. Proceedings. Eighth IEEE International Conference on}, year = {2002}, month = {2002/12//}, pages = {129 - 132}, abstract = {Selecting technologies for developing software is a crucial activity in software projects. Defect reduction is an example of an area in which software developers have to decide what technologies to use. CeBASE is a NSF funded project that has the role of improving software development by providing decision support on the selection of techniques and tools. The decision support is based on empirical data organized in experience bases and refined into high-level models. Empirical data is collected through various activities, for example through eWorkshops in which experts discuss important issues, and formalized using the lightweight knowledge dust to knowledge pearl process.}, keywords = {bases;experience, CeBASE;NSF, defect, development;software, experience;software, funded, knowledge;software, management;lightweight, management;software, project;decision, projects;software, reduction, reusability;software, reuse;software, support;eWorkshops;experience, tools;, tools;knowledge}, doi = {10.1109/ICECCS.2002.1181505}, author = {Basili, Victor R. and Lindvall,M. and Shull, F.} } @article {16911, title = {A Linear Iterative Approach for Hierarchical Shortest Path Finding}, journal = {Technical Reports from UMIACS, UMIACS-TR-2002-97}, year = {2002}, month = {2002///}, abstract = {We present a hierarchical approach that subdivides a network with $n$ vertices into $r$ regions with the same number $m$ of vertices ($n = r m$) and creates higher levels merging a constant number $c$ of adjacent regions. We propose linear iterative algorithms to find a shortest path and to expand this path into the lowest level. Since our approach is non-recursive, the complexity constants are small and the algorithms are more efficient in practice than other recursive optimal approaches. A hybrid shortest path algorithm to perform intra-regional queries in the lowest level is introduced. This strategy uses a subsequence of vertices that belong to the shortest path while actually computing the whole shortest path. The hybrid algorithm requires $O(m)$ time and space assuming an uniform distribution of vertices. This represents a further improvement concerning space, since a path view approach requires $O(m^{1.5})$ space in the lowest level. For higher $k$-levels, a path view approach spends $O(1)$ time and requires $O(c^k m)$ space.}, author = {Samet, Hanan and Filho,Gutemberg Guerra} } @conference {15624, title = {A local search approximation algorithm for k-means clustering}, booktitle = {Proceedings of the eighteenth annual symposium on Computational geometry}, series = {SCG {\textquoteright}02}, year = {2002}, month = {2002///}, pages = {10 - 18}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {In k-means clustering we are given a set of n data points in d-dimensional space Rd and an integer k, and the problem is to determine a set of k points in {\'O}C;d, called centers, to minimize the mean squared distance from each data point to its nearest center. No exact polynomial-time algorithms are known for this problem. Although asymptotically efficient approximation algorithms exist, these algorithms are not practical due to the extremely high constant factors involved. There are many heuristics that are used in practice, but we know of no bounds on their performance.We consider the question of whether there exists a simple and practical approximation algorithm for k-means clustering. We present a local improvement heuristic based on swapping centers in and out. We prove that this yields a (9+\&egr;)-approximation algorithm. We show that the approximation factor is almost tight, by giving an example for which the algorithm achieves an approximation factor of (9-\&egr;). To establish the practical value of the heuristic, we present an empirical study that shows that, when combined with Lloyd{\textquoteright}s algorithm, this heuristic performs quite well in practice.}, keywords = {Approximation algorithms, clustering, computational geometry, k-means, local search}, isbn = {1-58113-504-1}, doi = {10.1145/513400.513402}, url = {http://doi.acm.org/10.1145/513400.513402}, author = {Kanungo,Tapas and Mount, Dave and Netanyahu,Nathan S. and Piatko,Christine D. and Silverman,Ruth and Wu,Angela Y.} } @article {16477, title = {Locating and accessing data repositories with WebSemantics}, journal = {The VLDB journal}, volume = {11}, year = {2002}, month = {2002///}, pages = {47 - 57}, author = {Mihaila,G. A and Raschid, Louiqa and Tomasic,A.} } @conference {13587, title = {Logical Labeling of Document Images Using Layout Graph Matching with Adaptive Learning}, booktitle = {IAPR Conference on Document Analysis System}, year = {2002}, month = {2002///}, pages = {212 - 223}, abstract = {Logical structure analysis of document images is an important problem in document image understanding. In this paper, we propose a graph matching approach to label logical components on a document page. Our system is able to learn a model for a document class, use this model to label document images through graph matching, and adaptively improve the model with error feed back. We tested our method on journal/proceeding article title pages. The experimental results show promising accuracy, and confirm the ability of adaptive learning.}, author = {Liang,J. and David Doermann} } @article {15511, title = {A logic-based approach to data integration}, journal = {Theory Pract. Log. Program.}, volume = {2}, year = {2002}, month = {2002/05//}, pages = {323 - 368}, abstract = {An important aspect of data integration involves answering queries using various resources rather than by accessing database relations. The process of transforming a query from the database relations to the resources is often referred to as query folding or answering queries using views, where the views are the resources. We present a uniform approach that includes as special cases much of the previous work on this subject. Our approach is logic-based using resolution. We deal with integrity constraints, negation, and recursion also within this framework.}, keywords = {Clark completion, Data integration, materialized views, query folding, resolution method, stratified databases}, isbn = {1471-0684}, doi = {10.1017/S1471068401001375}, url = {http://dx.doi.org/10.1017/S1471068401001375}, author = {Grant,John and Minker, Jack} } @conference {14857, title = {Lambertian reflectance and linear subspaces}, booktitle = {Computer Vision, 2001. ICCV 2001. Proceedings. Eighth IEEE International Conference on}, volume = {2}, year = {2001}, month = {2001///}, pages = {383 -390 vol.2 - 383 -390 vol.2}, abstract = {We prove that the set of all reflectance functions (the mapping from surface normals to intensities) produced by Lambertian objects under distant, isotropic lighting lies close to a 9D linear subspace. This implies that the images of a convex Lambertian object obtained under a wide variety of lighting conditions can be approximated accurately with a low-dimensional linear subspace, explaining prior empirical results. We also provide a simple analytic characterization of this linear space. We obtain these results by representing lighting using spherical harmonics and describing the effects of Lambertian materials as the analog of a convolution. These results allow us to construct algorithms for object recognition based on linear methods as well as algorithms that use convex optimization to enforce non-negative lighting functions}, keywords = {functions;spherical, harmonics;convex, Lambertian, lighting;object, object;convex, objects;convex, optimization;isotropic, programming;object, recognition;, recognition;reflectance}, doi = {10.1109/ICCV.2001.937651}, author = {Basri,R. and Jacobs, David W.} } @article {14422, title = {Learning probabilistic models of relational structure}, journal = {MACHINE LEARNING-INTERNATIONAL WORKSHOP}, year = {2001}, month = {2001///}, pages = {170 - 177}, abstract = {Most real-world data is stored in relational form. Incontrast, most statistical learning methods work with {\textquotedblleft}flat{\textquotedblright} data representations, forcing us to convert our data into a form that loses much of the relational struc- ture. The recently introduced framework of proba- bilistic relational models (PRMs) allows us to repre- sent probabilistic models over multiple entities that utilize the relations between them. In this paper, we propose the use of probabilistic models not only for the attributes in a relational model, but for the rela- tional structure itself. We propose two mechanisms for modeling structural uncertainty: reference uncertainty and existence uncertainty. We describe the appropriate conditions for using each model and present learning algorithms for each. We present experimental results showing that the learned models can be used to pre- dict relational structure and, moreover, the observed relational structure can be used to provide better pre- dictions for the attributes in the model. }, author = {Getoor, Lise and Friedman,N. and Koller,D. and Taskar,B.} } @conference {16597, title = {Learning word pronunciations using a recurrent neural network}, booktitle = {Neural Networks, 2001. Proceedings. IJCNN{\textquoteright}01. International Joint Conference on}, volume = {1}, year = {2001}, month = {2001///}, pages = {11 - 15}, author = {Radio,M. J and Reggia, James A. and Berndt,R. S} } @conference {16307, title = {Leveraging open-source communities to improve the quality \& performance of open-source software}, booktitle = {Proceedings of the 1st Workshop on Open Source Software Engineering}, year = {2001}, month = {2001///}, author = {Schmidt,D. C and Porter, Adam} } @article {14828, title = {Linear Fitting with Missing Data for Structure-from-Motion}, journal = {Computer Vision and Image Understanding}, volume = {82}, year = {2001}, month = {2001/04//}, pages = {57 - 81}, abstract = {Several vision problems can be reduced to the problem of fitting a linear surface of low dimension to data. These include determining affine structure from motion or from intensity images. These methods must deal with missing data; for example, in structure from motion, missing data will occur if some point features are not visible in the image throughout the motion sequence. Once data is missing, linear fitting becomes a nonlinear optimization problem. Techniques such as gradient descent require a good initial estimate of the solution to ensure convergence to the correct answer. We propose a novel method for fitting a low rank matrix to a matrix with missing elements. This method produces a good starting point for descent-type algorithms and can produce an accurate solution without further refinement. We then focus on applying this method to the problem of structure-from-motion. We show that our method has desirable theoretical properties compared to previously proposed methods, because it can find solutions when there is less data present. We also show experimentally that our method provides good results compared to previously proposed methods.}, isbn = {1077-3142}, doi = {10.1006/cviu.2001.0906}, url = {http://www.sciencedirect.com/science/article/pii/S1077314201909063}, author = {Jacobs, David W.} } @article {13809, title = {Large-Scale Construction of a Chinese-English Semantic Hierarchy}, year = {2000}, month = {2000/04//}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {This paper addresses the problem of building conceptual resources for multilingual applications. We describe new techniques for large-scale construction of a semantic hierarchy for Chinese verbs, using thematic-role information to create links between Chinese concepts and English classes. We then present an approach to compensating for gaps in the existing resources. The resulting hierarchy is used for a multilingual lexicon for Chinese-English machine translation and cross-language information retrieval applications.}, keywords = {*CHINESE VERBS, *HIERARCHIES, *SEMANTICS, CHINA, CROSS CORRELATION, Information retrieval, INFORMATION SCIENCE, LANGUAGE, LEXICONS, linguistics, Machine translation, MULTILINGUAL, operations research, SYSTEMS APPROACH}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA458682}, author = {Dorr, Bonnie J and Levow,Gina-Anne and Lin,Dekang} } @article {18295, title = {Learned models for estimation of rigid and articulated human motion from stationary or moving camera}, journal = {International Journal of Computer Vision}, volume = {36}, year = {2000}, month = {2000///}, pages = {5 - 30}, author = {Yacoob,Yaser and Davis, Larry S.} } @article {14357, title = {Learning probabilistic relational models}, journal = {Abstraction, Reformulation, and Approximation}, year = {2000}, month = {2000///}, pages = {322 - 323}, abstract = {My work is on learning Probabilistic Relational Models (PRMs) from structured data (e.g., data in a relational database, an object-oriented database or a frame-based system). This work has as a starting point the framework of Probabilistic Relational Models, introduced in [5, 7]. We adapt and extend the machinery that has been developed over the years for learning Bayesian networks from data [1, 4, 6] to the task of learning PRMs from structured data. At the heart of this work is a search algorithm that explores the space of legal models using search operators that abstract or refine the model.}, doi = {10.1007/3-540-44914-0_25}, author = {Getoor, Lise} } @article {16494, title = {Learning response time for websources using query feedback and application in query optimization}, journal = {The VLDB Journal{\textemdash}The International Journal on Very Large Data Bases}, volume = {9}, year = {2000}, month = {2000///}, pages = {18 - 37}, author = {Gruser,J. R and Raschid, Louiqa and Zadorozhny,V. and Zhan,T.} } @article {14626, title = {Ligand-Receptor Pairing Via Tree Comparison}, journal = {Journal of Computational Biology}, volume = {7}, year = {2000}, month = {2000/02//}, pages = {59 - 70}, abstract = {This paper introduces a novel class of tree comparison problems strongly motivated by an important and cost intensive step in drug discovery pipeline viz., mapping cell bound receptors to the ligands they bind to and vice versa. Tree comparison studies motivated by problems such as virus-host tree comparison, gene-species tree comparison and consensus tree problem have been reported. None of these studies are applicable in our context because in all these problems, there is a well-defined mapping of the nodes the trees are built on across the set of trees being compared. A new class of tree comparison problems arises in cases where finding the correspondence among the nodes of the trees being compared is itself the problem. The problem arises while trying to find the interclass correspondence between the members of a pair of coevolving classes, e.g., cell bound receptors and their ligands. Given the evolution of the two classes, the combinatorial problem is to find a mapping among the leaves of the two trees that optimizes a given cost function. In this work we formulate various combinatorial optimization problems motivated by the aforementioned biological problem for the first time. We present hardness results, give an efficient algorithm for a restriction of the problem and demonstrate its applicability.}, isbn = {1066-5277, 1557-8666}, doi = {10.1089/10665270050081388}, url = {http://www.liebertonline.com/doi/abs/10.1089/10665270050081388}, author = {Bafna,Vineet and Hannenhalli, Sridhar and Rice,Ken and Vawter,Lisa} } @article {17417, title = {The limits of speech recognition}, journal = {Communications of the ACM}, volume = {43}, year = {2000}, month = {2000/09//}, pages = {63 - 65}, isbn = {0001-0782}, doi = {10.1145/348941.348990}, url = {http://doi.acm.org/10.1145/348941.348990}, author = {Shneiderman, Ben} } @conference {13583, title = {Live multimedia adaptation through wireless hybrid networks}, booktitle = {Multimedia and Expo, 2000. ICME 2000. 2000 IEEE International Conference on}, volume = {3}, year = {2000}, month = {2000///}, pages = {1697 -1700 vol.3 - 1697 -1700 vol.3}, abstract = {We present new techniques to intelligently adapt and combine multimedia presentations in real-time, mobile service environments. We present the techniques necessary to perform mobile multimedia processing for multiple video streams with ldquo;value-adding rdquo; information. The adaptation uses video analysis, content-recognition and automated annotation to provide scalable and interactive presentations over hybrid networks to mobile terminals. As a concrete case, we present a mobile surveillance service called Princess, which has several video sources and supports control information, terminal adaptation and end-to-end service}, keywords = {adaptation;mobile, adaptation;value-adding, analysis;wireless, annotation;content-recognition;end-to-end, communication;multimedia, communication;real-time, hybrid, information;video, LAN;, multimedia, networks;mobile, Princess;automated, processing;mobile, service;live, service;mobile, streams;real-time, Surveillance, system;terminal, systems;surveillance;wireless, terminals;multiple, video}, doi = {10.1109/ICME.2000.871098}, author = {Koivisto,A. and Pietkainen,P. and Sauvola,J. and David Doermann} } @article {15233, title = {The Loading Time Scheduling Problem}, journal = {Journal of Algorithms}, volume = {36}, year = {2000}, month = {2000/07//}, pages = {1 - 33}, abstract = {In this paper we study precedence constrained scheduling problems, where the tasks can only be executed on a specified subset of the set of machines. Each machine has a loading time that is incurred only for the first task that is scheduled on the machine in a particular run. This basic scheduling problem arises in the context of machining on numerically controlled machines and query optimization in databases and in other artificial intelligence applications. We give the first nontrivial approximation algorithm for this problem. We also prove nontrivial lower bounds on best possible approximation ratios for these problems. These improve on the nonapproximability results that are implied by the nonapproximability results for the shortest common supersequence problem. We use the same algorithm technique to obtain approximation algorithms for a problem arising in the context of code generation for parallel machines and for the weighted shortest common supersequence problem.}, isbn = {0196-6774}, doi = {10.1006/jagm.2000.1076}, url = {http://www.sciencedirect.com/science/article/pii/S0196677400910769}, author = {Bhatia,Randeep and Khuller, Samir and Naor,Joseph (Seffi)} } @conference {15232, title = {On local search and placement of meters in networks}, booktitle = {Proceedings of the eleventh annual ACM-SIAM symposium on Discrete algorithms}, series = {SODA {\textquoteright}00}, year = {2000}, month = {2000///}, pages = {319 - 328}, publisher = {Society for Industrial and Applied Mathematics}, organization = {Society for Industrial and Applied Mathematics}, address = {Philadelphia, PA, USA}, isbn = {0-89871-453-2}, url = {http://dl.acm.org/citation.cfm?id=338219.338268}, author = {Khuller, Samir and Bhatia,Randeep and Pless,Robert} } @article {16000, title = {A logic for characterizing multiple bounded agents}, journal = {Autonomous Agents and Multi-Agent Systems}, volume = {3}, year = {2000}, month = {2000///}, pages = {351 - 387}, author = {Grant,J. and Kraus,S. and Perlis, Don} } @book {15513, title = {Logic-Based Artificial Intelligence}, year = {2000}, month = {2000/12/01/}, publisher = {Springer}, organization = {Springer}, abstract = {This landmark volume represents the culmination of over 40 years of research in the use of logic as a basis for representing and manipulating problems in the field of artificial intelligence. The use of logic as a basis for commonsense reasoning was started by John McCarthy in 1959. The collection consists of both original research and surveys of almost every subject that uses logic in AI, contributed by leading scientists, and grew out of preliminary work presented at the Workshop on Logic-Based Artificial Intelligence held in Washington, DC, June 1999. All papers have been extensively refereed and revised. The introductory article presents background on research that has transpired since 1959 and discusses the significance of each chapter in this context. The topics covered in the book are commonsense reasoning, knowledge representation, nonmonotonic reasoning, logic for causation and actions, planning and problem solving, cognitive robotics, logic for agents and actions, inductive reasoning, possibilistic logic, logic and beliefs, logic and language, computational logic, knowledge base system implementations, and applications of theorem proving and logic programming. Logic-Based Artificial Intelligence is invaluable to graduate students and researchers in artificial intelligence, and advanced methods for database and knowledge base systems. Logic-Based Artificial Intelligence will also be of interest to those applying theorem proving methods to problems in program and hardware verification, to those who deal with large knowledge base systems, those developing cognitive robotics, and for those interested in the solution of McCarthy{\textquoteright}s 1959 "oldest planning problem in AI: getting from home to the airport".}, keywords = {Artificial intelligence, COMPUTER LOGIC, Computers / Computer Science, Computers / Intelligence (AI) \& Semantics, Logic, Symbolic and mathematical, Mathematics / Logic, Medical / General}, isbn = {9780792372240}, author = {Minker, Jack} } @article {16479, title = {Logic-based query optimization for object databases}, journal = {IEEE Transactions on Knowledge and Data Engineering}, volume = {12}, year = {2000}, month = {2000/08//Jul}, pages = {529 - 547}, abstract = {We present a technique for transferring query optimization techniques, developed for relational databases, into object databases. We demonstrate this technique for ODMG database schemas defined in ODL and object queries expressed in OQL. The object schema is represented using a logical representation (Datalog). Semantic knowledge about the object data model, e.g., class hierarchy information, relationship between objects, etc., as well as semantic knowledge about a particular schema and application domain are expressed as integrity constraints. An OQL object query is represented as a logic query and query optimization is performed in the Datalog representation. We obtain equivalent (optimized) logic queries, and subsequently obtain equivalent (optimized) OQL queries for each equivalent logic query. We present one optimization technique for semantic query optimization (SQO) based on the residue technique of U. Charavarthy et al. (1990; 1986; 1988). We show that our technique generalizes previous research on SQO for object databases. We handle a large class of OQL queries, including queries with constructors and methods. We demonstrate how SQO can be used to eliminate queries which contain contradictions and simplify queries, e.g., by eliminating joins, or by reducing the access scope for evaluating a query to some specific subclass(es). We also demonstrate how the definition of a method or integrity constraints describing the method, can be used in optimizing a query with a method}, keywords = {access scope, application domain, class hierarchy information, Constraint optimization, data integrity, Data models, Datalog, Datalog representation, deductive databases, equivalent logic query, integrity constraints, Lifting equipment, Logic, logic based query optimization, logic programming, logic queries, logic query, logical representation, object data model, object databases, object queries, object schema, object-oriented databases, ODL, ODMG database schemas, optimization technique, optimized OQL queries, OQL object query, query languages, Query optimization, query optimization techniques, Query processing, Relational databases, residue technique, semantic knowledge, semantic query optimization}, isbn = {1041-4347}, doi = {10.1109/69.868906}, author = {Grant,J. and Gryz,J. and Minker, Jack and Raschid, Louiqa} } @conference {18678, title = {Loki: a state-driven fault injector for distributed systems}, year = {2000}, month = {2000///}, pages = {237 - 242}, abstract = {Distributed applications can fail in subtle ways that depend on the state of multiple parts of a system. This complicates the validation of such systems via fault injection, since it suggests that faults should be injected based on the global state of the system. In Loki, fault injection is performed based on a partial view of the global state of a distributed system, i.e. faults injected in one node of the system can depend on the state of other nodes. Once faults are injected, a post-runtime analysis, using off-line clock synchronization, is used to place events and injections on a single global timeline and to determine whether the intended faults were properly injected. Finally, experiments containing successful fault injections are used to estimate the specified measures. In addition to briefly reviewing the concepts behind Loki and its organization, we detail Loki{\textquoteright}s user interface. In particular, we describe the graphical user interfaces for specifying state machines and faults, for executing a campaign and for verifying whether the faults were properly injected}, keywords = {campaign execution, computer testing, distributed applications failure, distributed processing, distributed system validation, Fault diagnosis, fault event placement, fault specification, global system state, global timeline, Graphical user interfaces, Loki, off-line clock synchronization, post-runtime analysis, specified measures estimation, state machine specification, state-driven fault injection, Synchronisation}, doi = {10.1109/ICDSN.2000.857544}, author = {Chandra,R. and Lefever,R.M. and Michel Cukier and Sanders,W. H.} } @article {17617, title = {Low discrepancy sets yield approximate min-wise independent permutation families}, journal = {Information Processing Letters}, volume = {73}, year = {2000}, month = {2000/01/31/}, pages = {29 - 32}, abstract = {Motivated by a problem of filtering near-duplicate Web documents, Broder, Charikar, Frieze and Mitzenmacher defined the following notion of ϵ-approximate min-wise independent permutation families. A multiset F of permutations of {0,1,{\textellipsis},n-1} is such a family if for all K⫅{0,1,{\textellipsis},n-1} and any x∈K, a permutation π chosen uniformly at random from F satisfies |Pr[min{π(K)}=π(x)]-1|K||<=ϵ|K|. We show connections of such families with low discrepancy sets for geometric rectangles, and give explicit constructions of such families F of size nO(logn) for ϵ=1/nΘ(1), improving upon the previously best-known bound of Indyk. We also present polynomial-size constructions when the min-wise condition is required only for |K|<=2O(log2/3n), with ϵ>=2-O(log2/3n).}, keywords = {combinatorial problems, Document filtering, explicit constructions, Information retrieval, Min-wise independent permutations, Pseudorandom permutations}, isbn = {0020-0190}, doi = {10.1016/S0020-0190(99)00163-5}, url = {http://www.sciencedirect.com/science/article/pii/S0020019099001635}, author = {Saks,Michael and Srinivasan, Aravind and Zhou,Shiyu and Zuckerman,David} } @article {17619, title = {Low-discrepancy sets for high-dimensional rectangles: a survey}, journal = {Bulletin of the EATCS}, volume = {70}, year = {2000}, month = {2000///}, pages = {67 - 76}, abstract = {A sub-area of discrepancy theory that has received much attention in computer science re-cently, is that of explicit constructions of low-discrepancy point sets for various types of rectangle families in high dimension. This research has led to interesting applications in error-control cod- ing, distributed protocols, Web document filtering, derandomization, and other areas. We give a short survey of this area here. }, author = {Srinivasan, Aravind} } @inbook {15765, title = {Latent Semantic Indexing via a Semi-Discrete Matrix Decomposition}, booktitle = {The Mathematics of Information Coding, Extraction and DistributionThe Mathematics of Information Coding, Extraction and Distribution}, year = {1999}, month = {1999///}, pages = {73 - 80}, publisher = {Springer-Verlag IMA Volumes in Math. and Its Applics.}, organization = {Springer-Verlag IMA Volumes in Math. and Its Applics.}, address = {New York}, abstract = {With the electronic storage of documents comes the possibility of building search engines that can automatically choose documents relevant to a given set of topics. In information retrieval, we wish to match queries with relevant documents. Documents can be represented by the terms that appear within them, but literal matching of terms does not necessarily retrieve all relevant documents. There are a number of information retrieval systems based on inexact matches. Latent Semantic Indexing represents documents by approximations and tends to cluster documents on similar topics even if their term profiles are somewhat different. This approximate representation is usually accomplished using a low-rank singular value decomposition (SVD) approximation. In this paper, we use an alternate decomposition, the semi-discrete decomposition (SDD). For equal query times, the SDD does as well as the SVD and uses less than one-tenth the storage for the MEDLINE test set.}, author = {Kolda,Tamara G. and O{\textquoteright}Leary, Dianne P.}, editor = {Cybenko,George and O{\textquoteright}Leary, Dianne P. and Rissanen,Jorma} } @article {14408, title = {Learning probabilistic relational models}, journal = {International Joint Conference on Artificial Intelligence}, volume = {16}, year = {1999}, month = {1999///}, pages = {1300 - 1309}, abstract = {A large portion of real-world data is stored in com-mercial relational database systems. In contrast, most statistical learning methods work only with {\textquotedblleft}flat{\textquotedblright} data representations. Thus, to apply these methods, we are forced to convert our data into a flat form, thereby losing much of the relational structure present in our database. This paper builds on the recent work on probabilistic relational mod- els (PRMs), and describes how to learn them from databases. PRMs allow the properties of an object to depend probabilistically both on other proper- ties of that object and on properties of related ob- jects. Although PRMs are significantly more ex- pressive than standard models, such as Bayesian networks, we show how to extend well-known sta- tistical methods for learning Bayesian networks to learn these models. We describe both parameter estimation and structure learning {\textemdash} the automatic induction of the dependency structure in a model. Moreover, we show how the learning procedure can exploit standard database retrieval techniques for efficient learning from large datasets. We present experimental results on both real and synthetic re- lational databases. }, author = {Friedman,N. and Getoor, Lise and Koller,D. and Pfeffer,A.} } @conference {16520, title = {Lesion effects in a bihemispheric letter-identification model}, booktitle = {Neural Networks, 1999. IJCNN{\textquoteright}99. International Joint Conference on}, volume = {1}, year = {1999}, month = {1999///}, pages = {215 - 218}, author = {Shevtsova,N. and Reggia, James A.} } @inbook {16169, title = {LifeLines: Visualizing Personal Histories}, booktitle = {Readings in Information Visualization: Using Vision to ThinkReadings in Information Visualization: Using Vision to Think}, year = {1999}, month = {1999///}, pages = {285 - 285}, publisher = {Morgan Kaufmann Publishers Inc.}, organization = {Morgan Kaufmann Publishers Inc.}, isbn = {9781558605336}, author = {Kumar,H. P and Plaisant, Catherine and Shneiderman, Ben} } @article {14917, title = {Linear fitting with missing data: applications to structure-from-motion and to characterizing intensity images}, year = {1999}, month = {1999/12/28/}, abstract = {A method for generating a complete scene structure from a video sequence that provides incomplete data. The method has a first step of building a first matrix consisting of point locations from a motion sequence by acquiring a sequence of images of a fixed scene using a moving camera; identifying and tracking point features through the sequence; and using the coordinates of the features to build the first matrix with some missing elements where some features are not present in some images. In a second step an approximate solution is built by selecting triples of columns from the first matrix; forming their nullspaces into a second matrix; and taking the three smallest components of the second matrix. In a third step, an iterative algorithm is applied to the three smallest components to build a third matrix and to improve the estimate. Lastly, in a fourth step the third matrix is decomposed to determine the complete scene structure. Another aspect of the present invention are...}, url = {http://www.google.com/patents?id=7LkYAAAAEBAJ}, author = {Jacobs, David W.}, editor = {NEC Research Institute, Inc.} } @article {12779, title = {Local model checking and protocol analysis}, journal = {International Journal on Software Tools for Technology Transfer (STTT)}, volume = {2}, year = {1999}, month = {1999///}, pages = {219 - 241}, author = {Du,X. and Smolka,S. A and Cleaveland, Rance} } @article {15517, title = {Logic and databases: a 20 year retrospective-updated in honor of Ray Reiter}, journal = {Logical Foundations for Cognitive Agents: Contributions in Honor of Ray Reiter}, year = {1999}, month = {1999///}, pages = {234 - 299}, author = {Minker, Jack} } @conference {14784, title = {LBF: a performance metric for program reorganization}, booktitle = {18th International Conference on Distributed Computing Systems, 1998. Proceedings}, year = {1998}, month = {1998/05/26/29}, pages = {222 - 229}, publisher = {IEEE}, organization = {IEEE}, abstract = {We introduce a new performance metric, called Load Balancing Factor (LBF), to assist programmers with evaluating different tuning alternatives. The LBF metric differs from traditional performance metrics since it is intended to measure the performance implications of a specific tuning alternative rather than quantifying where time is spent in the current version of the program. A second unique aspect of the metric is that it provides guidance about moving work within a distributed or parallel program rather than reducing it. A variation of the LBF metric can also be used to predict the performance impact of changing the underlying network. The LBF metric can be computed incrementally and online during the execution of the program to be tuned. We also present a case study that shows that our metric can predict the actual performance gains accurately for a test suite of six programs}, keywords = {case study, Computational modeling, computer network, Computer science, Debugging, distributed processing, distributed program, Educational institutions, Integrated circuit testing, LBF metric, load balancing factor, Load management, measurement, NIST, parallel program, parallel programming, performance metric, program reorganization, program tuning, Programming profession, resource allocation, software metrics, software performance evaluation, US Department of Energy}, isbn = {0-8186-8292-2}, doi = {10.1109/ICDCS.1998.679505}, author = {Eom, H. and Hollingsworth, Jeffrey K} } @inbook {13815, title = {Lexical Allocation in Interlingua-Based Machine Translation of Spatial Expressions}, booktitle = {Representation and processing of spatial expressions}, year = {1998}, month = {1998///}, pages = {125 - 125}, author = {Dorr, Bonnie J and Voss,C.R. and Sencan,M.U.} } @inbook {13817, title = {Lexical Selection for Cross-Language Applications: Combining LCS with WordNet}, booktitle = {Machine Translation and the Information SoupMachine Translation and the Information Soup}, series = {Lecture Notes in Computer Science}, volume = {1529}, year = {1998}, month = {1998///}, pages = {438 - 447}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {This paper describes experiments for testing the power of large-scale resources for lexical selection in machine translation (MT) and cross-language information retrieval (CLIR). We adopt the view that verbs with similar argument structure share certain meaning components, but that those meaning components are more relevant to argument realization than to idiosyncratic verb meaning. We verify this by demonstrating that verbs with similar argument structure as encoded in Lexical Conceptual Structure (LCS) are rarely synonymous in WordNet. We then use the results of this work to guide our implementation of an algorithm for cross-language selection of lexical items, exploiting the strengths of each resource: LCS for semantic structure and WordNet for semantic content. We use the Parka Knowledge-Based System to encode LCS representations and WordNet synonym sets and we implement our lexical-selection algorithm as Parka-based queries into a knowledge base containing both information types.}, isbn = {978-3-540-65259-5}, url = {http://dx.doi.org/10.1007/3-540-49478-2_39}, author = {Dorr, Bonnie J and Katsova,Maria}, editor = {Farwell,David and Gerber,Laurie and Hovy,Eduard} } @article {16180, title = {Life cycle of user interface techniques: The DJJ information system design Process}, journal = {Technical Reports of the Computer Science Department}, year = {1998}, month = {1998/10/15/}, abstract = {To take advantage of todays technology, many organizations are migrating fromtheir legacy systems. With help from the Human-Computer Interaction Laboratory (HCIL) and Cognetics Corporation, the Maryland Department of Juvenile Justice (DJJ) is currently undergoing an effort to redesign their information system to take advantage of graphical user interfaces. As a research lab, HCIL identifies interesting research problems and then prototypes solutions. As a project matures, the exploratory prototypes are adapted to suit the end product requirements. This case study describes the life cycle of three DJJ prototypes: (1) LifeLines, which uses time lines to display an overview of a youth in one screen, (2) the DJJ Navigator, which helps manage individual workloads by displaying different user views, and (3) the ProgramFinder, a tool for selecting the best program for a youth. (Also cross-referenced as CAR-TR-826) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu/handle/1903/458}, author = {Rose,Anne and Ellis,Jason and Plaisant, Catherine and Greene,Stephan} } @article {17281, title = {LifeLines: using visualization to enhance navigation and analysis of patient records.}, journal = {Proceedings of the AMIA Symposium}, year = {1998}, month = {1998///}, pages = {76 - 80}, abstract = {LifeLines provide a general visualization environment for personal histories. We explore its use for clinical patient records. A Java user interface is described, which presents a one-screen overview of a computerized patient record using timelines. Problems, diagnoses, test results or medications can be represented as dots or horizontal lines. Zooming provides more details; line color and thickness illustrate relationships or significance. The visual display acts as a giant menu, giving direct access to the data.}, isbn = {1531-605X}, author = {Plaisant, Catherine and Mushlin,R. and Snyder,A. and Li,J. and Heller,D. and Shneiderman, Ben} } @article {16190, title = {LifeLines: using visualization to enhance navigation and analysis of patient records}, journal = {In Proceedings of the 1998 American Medical Informatic Association Annual Fall SymposiumProc AMIA Symp}, year = {1998}, month = {1998///}, pages = {76 - 80}, abstract = {LifeLines provide a general visualization environment for personal histories. We explore its use for clinical patient records. A Java user interface is described, which presents a one-screen overview of a computerized patient record using timelines. Problems, diagnoses, test results or medications can be represented as dots or horizontal lines. Zooming provides more details; line color and thickness illustrate relationships or significance. The visual display acts as a giant menu, giving direct access to the data.}, isbn = {1531-605X}, author = {Plaisant, Catherine and Mushlin,R. and Snyder,A. and Li,J. and Heller,D. and Shneiderman, Ben} } @conference {14815, title = {Linger longer: Fine-grain cycle stealing for networks of workstations}, booktitle = {Proceedings of the 1998 ACM/IEEE Conference on Supercomputing (CDROM)}, year = {1998}, month = {1998///}, pages = {1 - 12}, author = {Ryu, K. D and Hollingsworth, Jeffrey K} } @article {15519, title = {Logic knowledge bases with two default rules}, journal = {Annals of Mathematics and Artificial Intelligence}, volume = {22}, year = {1998}, month = {1998///}, pages = {333 - 361}, author = {Ruiz,C. and Minker, Jack} } @article {18147, title = {Looking to Parallel Algorithms for ILP and Decentralization}, volume = {CS-TR-3921}, year = {1998}, month = {1998/10/15/}, institution = {Department of Computer Science, University of Maryland, College Park}, abstract = {We introduce explicit multi-threading (XMT), a decentralized architecturethat exploits fine-grained SPMD-style programming; a SPMD program can translate directly to MIPS assembly language using three additional instruction primitives. The motivation for XMT is: (i) to define an inherently decentralizable architecture, taking into account that the performance of future integrated circuits will be dominated by wire costs, (ii) to increase available instruction-level parallelism (ILP) by leveraging expertise in the world of parallel algorithms, and (iii) to reduce hardware complexity by alleviating the need to detect ILP at run-time: if parallel algorithms can give us an overabundance of work to do in the form of thread-level parallelism, one can extract instruction-level parallelism with greatly simplified dependence-checking. We show that implementations of such an architecture tend towards decentralization and that, when global communication is necessary, overall performance is relatively insensitive to large on-chip delays. We compare the performance of the design to more traditional parallel architectures and to a high-performance superscalar implementation, but the intent is merely to illustrate the performance behavior of the organization and to stimulate debate on the viability of introducing SPMD to the single-chip processor domain. We cannot offer at this stage hard comparisons with well-researched models of execution. When programming for the SPMD model, the total number of operations that the processor has to perform is often slightly higher. To counter this, we have observed that the length of the critical path through the dynamic execution graph is smaller than in the serial domain, and the amount of ILP is correspondingly larger. Fine-grained SPMD programming connects with a broad knowledge base in parallel algorithms and scales down to provide good performance relative to high-performance superscalar designs even with small input sizes and small numbers of functional units. Keywords: Fine-grained SPMD, parallel algorithms. spawn-join, prefix-sum, instruction-level parallelism, decentralized architecture. (Also cross-referenced as UMIACS-TR- 98-40) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu//handle/1903/496}, author = {Berkovich,Efraim and Jacob,Bruce and Nuzman,Joseph and Vishkin, Uzi} } @article {15227, title = {Low Degree Spanning Trees of Small Weight}, volume = {UMIACS-TR-94-1}, year = {1998}, month = {1998/10/15/}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {Given n points in the plane, the degree-K spanning tree problemasks for a spanning tree of minimum weight in which the degree of each vertex is at most K. This paper addresses the problem of computing low-weight degree-K spanning trees for K>2. It is shown that for an arbitrary collection of n points in the plane, there exists a spanning tree of degree three whose weight is at most 1.5 times the weight of a minimum spanning tree. It is shown that there exists a spanning tree of degree four whose weight is at most 1.25 times the weight of a minimum spanning tree. These results solve open problems posed by Papadimitriou and Vazirani. Moreover, if a minimum spanning tree is given as part of the input, the trees can be computed in O(n) time. The results are generalized to points in higher dimensions. It is shown that for any d [greater than or equal to] 3, an arbitrary collection of points in DimD contains a spanning tree of degree three, whose weight is at most 5/3 times the weight of a minimum spanning tree. This is the first paper that achieves factors better than two for these problems. (Also cross-referenced as UMIACS-TR-94-1) }, keywords = {Technical Report}, url = {http://drum.lib.umd.edu//handle/1903/611}, author = {Khuller, Samir and Raghavachari,Balaji and Young,Neal} } @inbook {17618, title = {Low-bandwidth routing and electrical power networks}, booktitle = {Automata, Languages and ProgrammingAutomata, Languages and Programming}, series = {Lecture Notes in Computer Science}, volume = {1443}, year = {1998}, month = {1998///}, pages = {604 - 615}, publisher = {Springer Berlin / Heidelberg}, organization = {Springer Berlin / Heidelberg}, abstract = {Given a graph G and a (multi-)set of pairs of vertices in it, the classical NP-hard maximum edge-disjoint-paths problem (MDP) is to connect as many of the given pairs as possible using pairwise edge-disjoint paths in G . We study a relative of this problem: we have a network with fixed link capacities that may have to service large demands when necessary. In particular, individual demands are allowed to exceed capacities, and thus flows for some request pairs necessarily have to be split into different flow-paths. This is the framework for computational problems arising from: (i) electrical power networks due to the proposed deregulation of the electric utility industry in the USA, and (ii) applications such as real-time Internet services (e.g., telephone, fax, video). We show that these problems come in a few variants, some efficiently solvable and many NP -hard; we also present approximation algorithms for many of the NP -hard variants presented. Some of our approximation algorithms benefit from certain improved tail estimates that we derive; the latter also yield improved approximations for a family of packing integer programs.}, isbn = {978-3-540-64781-2}, url = {http://dx.doi.org/10.1007/BFb0055088}, author = {Cook,Doug and Faber,Vance and Marathe,Madhav and Srinivasan, Aravind and Sussmann,Yoram}, editor = {Larsen,Kim and Skyum,Sven and Winskel,Glynn} } @article {16698, title = {A Language Identification Application Built on the Java Client/Server Platform}, journal = {From Research to Commercial Applications: Making NLP Work in Practice}, year = {1997}, month = {1997///}, pages = {43 - 47}, author = {Adams,G. and Resnik, Philip} } @conference {13808, title = {Large-scale acquisition of LCS-based lexicons for foreign language tutoring}, booktitle = {Proceedings of the fifth conference on Applied natural language processing}, series = {ANLC {\textquoteright}97}, year = {1997}, month = {1997///}, pages = {139 - 146}, publisher = {Association for Computational Linguistics}, organization = {Association for Computational Linguistics}, address = {Stroudsburg, PA, USA}, abstract = {We focus on the problem of building large repositories of lexical conceptual structure (LCS) representations for verbs in multiple languages. One of the man results of this work is the definition of a relation between broad semantic classes and LCS meaning components. Our acquisition program---LEXICALL---takes, as input, the result of previous work on verb classification and thematic grid tagging, and outputs LCS representations for different languages. These representations have been ported into English. Arabic and Spanish lexicons, each containing approximately 9000 verbs. We are currently using these lexicons in an operational foreign language tutoring and machine translation.}, doi = {10.3115/974557.974578}, url = {http://dx.doi.org/10.3115/974557.974578}, author = {Dorr, Bonnie J} } @article {13810, title = {Large-Scale Dictionary Construction for Foreign Language Tutoring and Interlingual Machine Translation}, journal = {Machine Translation}, volume = {12}, year = {1997}, month = {1997///}, pages = {271 - 322}, abstract = {This paper describes techniques for automatic construction of dictionaries for use in large-scale foreign language tutoring (FLT) and interlingual machine translation (MT) systems. The dictionaries are based on a language-independent representation called {\textquotedblleft}lexical conceptual structure{\textquotedblright} (LCS). A primary goal of the LCS research is to demonstrate that synonymous verb senses share distributional patterns. We show how the syntax{\textendash}semantics relation can be used to develop a lexical acquisition approach that contributes both toward the enrichment of existing online resources and toward the development of lexicons containing more complete information than is provided in any of these resources alone. We start by describing the structure of the LCS and showing how this representation is used in FLT and MT. We then focus on the problem of building LCS dictionaries for large-scale FLT and MT. First, we describe authoring tools for manual and semi-automatic construction of LCS dictionaries; we then present a more sophisticated approach that uses linguistic techniques for building word definitions automatically. These techniques have been implemented as part of a set of lexicon-development tools used in the milt FLT project.}, isbn = {0922-6567}, url = {http://dx.doi.org/10.1023/A:1007965530302}, author = {Dorr, Bonnie J} } @article {13811, title = {LCS-based Korean Parsing and Translation}, journal = {Ms. Institute for Advanced Computer Studies and Department of Computer Science, University of Maryland}, year = {1997}, month = {1997///}, abstract = {This document reports on research conducted at the University ofMaryland for the Korean/English Machine Translation (MT) project. Our primary objective was to develop an interlingual representation based on lexical conceptual structure (LCS) and to examine the relation between this representation and a set of linguistically motivated semantic classes. We have focused on several different areas, including syntactic and morphological analysis, development of a semantic lexicon, semantic analysis of military messages, and regularization of an existing Korean military message corpus. }, author = {Dorr, Bonnie J} } @conference {18321, title = {Learning parameterized models of image motion}, booktitle = {Computer Vision and Pattern Recognition, 1997. Proceedings., 1997 IEEE Computer Society Conference on}, year = {1997}, month = {1997/06//}, pages = {561 - 567}, abstract = {A framework for learning parameterized models of optical flow from image sequences is presented. A class of motions is represented by a set of orthogonal basis flow fields that are computed from a training set using principal component analysis. Many complex image motions can be represented by a linear combination of a small number of these basis flows. The learned motion models may be used for optical flow estimation and for model-based recognition. For optical flow estimation we describe a robust, multi-resolution scheme for directly computing the parameters of the learned flow models from image derivatives. As examples we consider learning motion discontinuities, non-rigid motion of human mouths, and articulated human motion}, keywords = {image motion, Image sequences, learning, learning (artificial intelligence), model-based recognition, Motion estimation, multi-resolution scheme, non-rigid motion, optical flow, optical flow estimation, parameterized models, Principal component analysis, training set}, doi = {10.1109/CVPR.1997.609381}, author = {Black,M. J and Yacoob,Yaser and Jepson,A. D and Fleet,D. J} } @article {13819, title = {LEXICALL: Lexicon Construction for Foreign Language Tutoring}, year = {1997}, month = {1997///}, institution = {Instititue for Advanced Computer Studies, Univ of Maryland, College Park}, abstract = {We focus on the problem of building large repositories of lexical conceptual structure (LCS) representations for verbs in multiple languages. One of the main results of this work is the definition of a relation between broad semantic classes and LCS meaning components. Our acquisition program LEXICALL takes, as input, the result of previous work on verb classification and thematic grid tagging, and outputs LCS representations for different languages. These representations have been ported into English, Arabic and Spanish lexicons, each containing approximately 9000 verbs. We are currently using these lexicons in an operational foreign language tutoring and machine translation.}, keywords = {*DATA ACQUISITION, *FOREIGN LANGUAGES, *MACHINE TRANSLATION, GRIDS, INFORMATION SCIENCE, LEXICOGRAPHY, LEXICONS, linguistics, Vocabulary}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA458641}, author = {Dorr, Bonnie J} } @conference {14840, title = {Linear fitting with missing data: applications to structure-from-motion and to characterizing intensity images}, booktitle = {Computer Vision and Pattern Recognition, 1997. Proceedings., 1997 IEEE Computer Society Conference on}, year = {1997}, month = {1997/06//}, pages = {206 - 212}, abstract = {Several vision problems can be reduced to the problem of fitting a linear surface of low dimension to data, including the problems of structure-from-affine-motion, and of characterizing the intensity images of a Lambertian scene by constructing the intensity manifold. For these problems, one must deal with a data matrix with some missing elements. In structure-from-motion, missing elements will occur if some point features are not visible in some frames. To construct the intensity manifold missing matrix elements will arise when the surface normals of some scene points do not face the light source in some images. We propose a novel method for fitting a low rank matrix to a matrix with missing elements. We show experimentally that our method produces good results in the presence of noise. These results can be either used directly, or can serve as an excellent starting point for an iterative method}, keywords = {data;structure-from-affine-motion;structure-from-motion;vision, estimation;, fitting;linear, images;iterative, Lambertian, matrix;intensity, matrix;missing, method;linear, methods;motion, problems;computer, rank, scene;data, surface;low, vision;iterative}, doi = {10.1109/CVPR.1997.609321}, author = {Jacobs, David W.} } @conference {13584, title = {Local correspondence for detecting random forgeries}, booktitle = {Document Analysis and Recognition, 1997., Proceedings of the Fourth International Conference on}, volume = {1}, year = {1997}, month = {1997/08//}, pages = {319 -323 vol.1 - 319 -323 vol.1}, abstract = {Progress on the problem of signature verification has advanced more rapidly in online applications than offline applications, in part because information which can easily be recorded in online environments, such as pen position and velocity, is lost in static offline data. In offline applications, valuable information which can be used to discriminate between genuine and forged signatures is embedded at the stroke level. We present an approach to segmenting strokes into stylistically meaningful segments and establish a local correspondence between a questioned signature and a reference signature to enable the analysis and comparison of stroke features. Questioned signatures which do not conform to the reference signature are identified as random forgeries. Most simple forgeries can also be identified, as they do not conform to the reference signature{\textquoteright}s invariant properties such as connections between letters. Since we have access to both local and global information, our approach also shows promise for extension to the identification of skilled forgeries}, keywords = {applications;online, applications;questioned, correspondence;offline, detection;reference, extraction;handwriting, features;stroke, forged, forgeries;random, forgeries;stroke, Forgery, level;stroke, meaningful, processing;, properties;local, recognition;image, segmentation;stylistically, segmentation;word, segments;feature, signature;random, signature;signature, signatures;invariant, verification;skilled}, doi = {10.1109/ICDAR.1997.619864}, author = {Guo,J. K and David Doermann and Rosenfeld, A.} } @article {14883, title = {Local Parallel Computation of Stochastic Completion Fields}, journal = {Neural Computation}, volume = {9}, year = {1997}, month = {1997///}, pages = {859 - 881}, abstract = {We describe a local parallel method for computing the stochastic completion field introduced in the previous article (Williams and Jacobs, 1997). The stochastic completion field represents the likelihood that a completion joining two contour fragments passes through any given position and orientation in the image plane. It is based on the assumption that the prior probability distribution of completion shape can be modeled as a random walk in a lattice of discrete positions and orientations. The local parallel method can be interpreted as a stable finite difference scheme for solving the underlying Fokker-Planck equation identified by Mumford (1994). The resulting algorithm is significantly faster than the previously employed method, which relied on convolution with large-kernel filters computed by Monte Carlo simulation. The complexity of the new method is O (n3m), while that of the previous algorithm was O(n4m2 (for an n {\texttimes} n image with m discrete orientations). Perhaps most significant, the use of a local method allows us to model the probability distribution of completion shape using stochastic processes that are neither homogeneous nor isotropic. For example, it is possible to modulate particle decay rate by a directional function of local image brightnesses (i.e., anisotropic decay). The effect is that illusory contours can be made to respect the local image brightness structure. Finally, we note that the new method is more plausible as a neural model since (1) unlike the previous method, it can be computed in a sparse, locally connected network, and (2) the network dynamics are consistent with psychophysical measurements of the time course of illusory contour formation.}, isbn = {0899-7667}, doi = {10.1162/neco.1997.9.4.859}, url = {http://dx.doi.org/10.1162/neco.1997.9.4.859}, author = {Williams,Lance R. and Jacobs, David W.} } @article {14627, title = {Local Rules for Protein Folding on a Triangular Lattice and Generalized Hydrophobicity in the HP Model}, journal = {Journal of Computational Biology}, volume = {4}, year = {1997}, month = {1997/01//}, pages = {275 - 296}, abstract = {We consider the problem of determining the three-dimensional folding of a protein given its one-dimensional amino acid sequence. We use the HP model for protein folding proposed by Dill (1985), which models protein as a chain of amino acid residues that are either hydrophobic or polar, and hydrophobic interactions are the dominant initial driving force for the protein folding. Hart and Istrail (1996a) gave approximation algorithms for folding proteins on the cubic lattice under the HP model. In this paper, we examine the choice of a lattice by considering its algorithmic and geometric implications and argue that the triangular lattice is a more reasonable choice. We present a set of folding rules for a triangular lattice and analyze the approximation ratio they achieve. In addition, we introduce a generalization of the HP model to account for residues having different levels of hydrophobicity. After describing the biological foundation for this generalization, we show that in the new model we are able to achieve similar constant factor approximation guarantees on the triangular lattice as were achieved in the standard HP model. While the structures derived from our folding rules are probably still far from biological reality, we hope that having a set of folding rules with different properties will yield more interesting folds when combined.}, isbn = {1066-5277, 1557-8666}, doi = {10.1089/cmb.1997.4.275}, url = {http://www.liebertonline.com/doi/abs/10.1089/cmb.1997.4.275}, author = {Agarwala,Richa and Batzoglou,Serafim and Dan{\v c}{\'\i}K,Vlado and Decatur,Scott E. and Hannenhalli, Sridhar and Farach,Martin and Muthukrishnan,S. and Skiena,Steven} } @article {13586, title = {Locally adaptive document skew detection}, journal = {Proceedings of SPIE}, volume = {3027}, year = {1997}, month = {1997/04/03/}, pages = {96 - 108}, abstract = {This paper proposes a new approach to the detection of local orientation and skew in document images. It is based on the observation that there are many documents where a single global estimate of the page skew is not sufficient. These documents require local adaptation to deal robustly with todays complex configurations of components on the page. The approach attempts to identify regions in the image which exhibit locally consistent physical properties and consistent physical properties and consistent orientation. To do this, we rapidly compute a coarse segmentation and delineate regions which differ with respect to layout and/or physical content. Each region is classified as text, graphics, mixed text/graphics, image or background using local features and additional features are extracted to estimate orientation. The local orientation decisions are propagated where appropriate to resolve ambiguity and to produce a global estimate of the skew for the page. The implementation of our algorithms is demonstrated on a set of images which have multiple regions with different orientations.}, isbn = {0277786X}, doi = {doi:10.1117/12.270063}, url = {http://spiedigitallibrary.org/proceedings/resource/2/psisdg/3027/1/96_1?isAuthorized=no}, author = {Sauvola,Jaakko J and David Doermann and Pietikaeinen,Matti} } @article {15527, title = {Logic and Databases Past, Present, and Future}, journal = {AI Magazine}, volume = {18}, year = {1997}, month = {1997///}, pages = {21 - 21}, author = {Minker, Jack} } @article {17283, title = {Low-effort, high-payoff user interface reengineering}, journal = {IEEE Software}, volume = {14}, year = {1997}, month = {1997/08//Jul}, pages = {66 - 72}, abstract = {Although increasingly sophisticated design methodologies for developing new user interfaces exist, low-effort, high-payoff user interface reengineering represents a new direction-and opportunity. Yet reengineering a working system is complex and risky because of the potential disruption to users and managers, their justifiable fear of change, and the lack of guarantees that such changes will be for the better. Our largely positive experiences with the projects described here lead us to believe that user interface reengineering is a viable and important process. Low effort, high-payoff improvement recommendations can probably be made for most existing systems. Nevertheless, a narrowly focused user interface reengineering plan may be inappropriate when the major problems lie outside the scope of the user interface, such as inadequate functionalities, frequent crashes, and network problems. Attempts at improving less severe problems while ignoring deeper ones may be perceived as insensitive by the users. In such cases it is important to consider either making similar short-term improvements for other parts of the systems or postponing short-term user interface reengineering in favour of a more complete system reengineering. Similarly, the need for interface stability might outweigh the benefits of the short-term improvements if a complete reengineering is planned for the near future. But most likely these proposed diagnostic strategies and opportunities for improvement are only a prelude to the much larger task of business reengineering, which implies extensive user interface reengineering}, keywords = {Business process re-engineering, complete system reengineering, Design methodology, Error analysis, Hardware, inadequate functionalities, interface stability, iterative methods, low-effort high-payoff user interface reengineering, short-term improvements, short-term user interface reengineering, software engineering, Software testing, System analysis and design, System testing, systems re-engineering, User centered design, user centred design, User interfaces}, isbn = {0740-7459}, doi = {10.1109/52.595958}, author = {Plaisant, Catherine and Rose,A. and Shneiderman, Ben and Vanniamparampil,A. J} } @article {15222, title = {Landmarks in graphs}, journal = {Discrete Applied Mathematics}, volume = {70}, year = {1996}, month = {1996/10/08/}, pages = {217 - 229}, abstract = {Navigation can be studied in a graph-structured framework in which the navigating agent (which we shall assume to be a point robot) moves from node to node of a {\textquotedblleft}graph space{\textquotedblright}. The robot can locate itself by the presence of distinctively labeled {\textquotedblleft}landmark{\textquotedblright} nodes in the graph space. For a robot navigating in Euclidean space, visual detection of a distinctive landmark provides information about the direction to the landmark, and allows the robot to determine its position by triangulation. On a graph, however, there is neither the concept of direction nor that of visibility. Instead, we shall assume that a robot navigating on a graph can sense the distances to a set of landmarks.Evidently, if the robot knows its distances to a sufficiently large set of landmarks, its position on the graph is uniquely determined. This suggests the following problem: given a graph, what are the fewest number of landmarks needed, and where should they be located, so that the distances to the landmarks uniquely determine the robot{\textquoteright}s position on the graph? This is actually a classical problem about metric spaces. A minimum set of landmarks which uniquely determine the robot{\textquoteright}s position is called a {\textquotedblleft}metric basis{\textquotedblright}, and the minimum number of landmarks is called the {\textquotedblleft}metric dimension{\textquotedblright} of the graph. In this paper we present some results about this problem. Our main new results are that the metric dimension of a graph with n nodes can be approximated in polynomial time within a factor of O(log n), and some properties of graphs with metric dimension two. }, isbn = {0166-218X}, doi = {10.1016/0166-218X(95)00106-2}, url = {http://www.sciencedirect.com/science/article/pii/0166218X95001062}, author = {Khuller, Samir and Raghavachari,Balaji and Rosenfeld,Azriel} } @conference {16551, title = {Learning activation rules for associative networks}, booktitle = {Neural Networks, 1996., IEEE International Conference on}, volume = {1}, year = {1996}, month = {1996///}, pages = {365 - 370}, author = {Reggia, James A. and Grundstrom,E. and Berndt,R. S} } @article {16625, title = {Learning Activation Rules Rather Than Connection Weights}, journal = {International journal of neural systems}, volume = {7}, year = {1996}, month = {1996///}, pages = {129 - 148}, author = {Grundstrom,E. L and Reggia, James A.} } @conference {16203, title = {LifeLines: visualizing personal histories}, booktitle = {Proceedings of the SIGCHI conference on Human factors in computing systems: common ground}, series = {CHI {\textquoteright}96}, year = {1996}, month = {1996///}, pages = {221 - 227}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {History, justice, medical record, overview, personal record, screen design, screen management, timeline, Visualization}, isbn = {0-89791-777-4}, doi = {10.1145/238386.238493}, url = {http://doi.acm.org/10.1145/238386.238493}, author = {Plaisant, Catherine and Milash,Brett and Rose,Anne and Widoff,Seth and Shneiderman, Ben} } @conference {13924, title = {Local tools: an alternative to tool palettes}, booktitle = {Proceedings of the 9th annual ACM symposium on User interface software and technology}, year = {1996}, month = {1996///}, pages = {169 - 170}, author = {Bederson, Benjamin B. and Hollan,J.D. and Druin, Allison and Stewart,J. and Rogers,D. and Proft,D.} } @article {15531, title = {Logic and databases: A 20 year retrospective}, journal = {Logic in Databases}, year = {1996}, month = {1996///}, pages = {1 - 57}, author = {Minker, Jack} } @conference {14981, title = {Land cover dynamics investigation using parallel computers}, booktitle = {Geoscience and Remote Sensing Symposium, 1995. IGARSS {\textquoteright}95. {\textquoteright}Quantitative Remote Sensing for Science and Applications{\textquoteright}, International}, volume = {1}, year = {1995}, month = {1995//10/14}, pages = {332 -334 vol.1 - 332 -334 vol.1}, abstract = {A comprehensive and highly interdisciplinary research program is being carried out to investigate global land cover dynamics in heterogeneous parallel computing environments. Some of the problems are addressed including atmospheric correction, mixture modeling, image classifications by Markovian random fields and by segmentation, global image/map databases, object oriented parallel programming and parallel/IO. During the initial two years project, significant progress has been made in all of these areas}, keywords = {; geographic information system; geophysical measurement technique; geophysics computing; image classification; image map database; image segmentation; land cover dynamics; land surface; mixture modeling; object oriented programming; optical imaging; para, GIS; IR imaging; Markovian random fields; atmospheric correction}, doi = {10.1109/IGARSS.1995.520273}, author = {Liang, S. and Davis, Larry S. and Townshend,J. and Chellapa, Rama and DeFries, R. and Dubayah, R. and Goward,S. and JaJa, Joseph F. and Krishnamachar, S. and Roussopoulos, Nick and Saltz, J. and Samet, Hanan and Shock,T. and Srinivasan, M.} } @conference {16620, title = {A large-scale neural model linking local neuronal dynamics to positron emission tomography (PET) regional cerebral blood ffow (rCBF) data}, booktitle = {Soc Neurosci Abstr}, volume = {21}, year = {1995}, month = {1995///}, pages = {1988 - 1988}, author = {Tagamets,M. A and Horwitz,B. and Reggia, James A.} } @article {13814, title = {Lexical allocation in interlingua-based machine translation of spatial expressions}, journal = {Working Notes for IJCAI-95 Workshop on the Representation and Processing of Spatial Expressions, Montreal, Canada}, year = {1995}, month = {1995///}, abstract = {Given a spatial expression, or its computationalsemantic form, how is the expression{\textquoteright}s spatial semantics to be allocated lexically, i.e., among entries in the lexicon? In interlingua-based ma- chine translation (MT) research, lexical alloca- tion is the problem of allocating or subdividing a linguistic expression{\textquoteright}s full interlingual (IL) structure into the substructures that are lexical IL forms, i.e., in the lexicon. Here we present our work developing IL forms and an IL lexicon for translating English spatial expressions into Turkish. We examine several co-occurrence patterns between motion verbs (spatial place- ment and displacement) and directional adpo- sitions (particles in English, postpositions in Turkish) and the lexical allocation of spatial vectors in these patterns }, author = {Voss,C.R. and Dorr, Bonnie J and Sencan,M.U.} } @article {17666, title = {The local nature of Δ-coloring and its algorithmic applications}, journal = {Combinatorica}, volume = {15}, year = {1995}, month = {1995///}, pages = {255 - 280}, abstract = {Given a connected graph G =( V, E ) with | V |= n and maximum degree Δ such that G is neither a complete graph nor an odd cycle, Brooks{\textquoteright} theorem states that G can be colored with Δ colors. We generalize this as follows: let G - v be Δ-colored; then, v can be colored by considering the vertices in an O (log Δ n ) radius around v and by recoloring an O (log Δ n ) length {\textquotedblleft}augmenting path{\textquotedblright} inside it. Using this, we show that Δ-coloring G is reducible in O (log 3 n /logΔ) time to (Δ+1)-vertex coloring G in a distributed model of computation. This leads to fast distributed algorithms and a linear-processor NC algorithm for Δ-coloring.}, isbn = {0209-9683}, url = {http://dx.doi.org/10.1007/BF01200759}, author = {Panconesi,Alessandro and Srinivasan, Aravind} } @article {18982, title = {Localization of Sequences Required for Size-specific Splicing of a SmallDrosophilaIntronin Vitro}, journal = {Journal of Molecular Biology}, volume = {253}, year = {1995}, month = {1995/10/27/}, pages = {426 - 437}, abstract = {Many introns inDrosophilaand other invertebrates are less than 80 nucleotides in length, too small to be recognized by the vertebrate splicing machinery. Comparison of nuclear splicing extracts from human HeLa andDrosophilaKc cells has revealed species-specificity, consistent with the observed size differences. Here we present additional results with the 68 nucleotide fifth intron of theDrosophilamyosin heavy chain gene. As observed with the 74 nucleotide second intron of theDrosophilawhite gene, the wild-type myosin intron is accurately spliced in a homologous extract, and increasing the size by 16 nucleotides both eliminates splicing in theDrosophilaextract and allows accurate splicing in the human extract. In contrast to previous results, however, an upstream cryptic 5' splice site is activated when the wild-type myosin intron is tested in a human HeLa cell nuclear extract, resulting in the removal of 98 nucleotide intron.The size dependence of splicing inDrosophilaextracts is also intron-specific; we noted that a naturally larger (150 nucleotide) intron from theftzgene is efficiently spliced in Kc cell extracts that do not splice enlarged introns (of 84, 90, 150 or 350 nucleotides) derived from the 74 nucleotidewhiteintron. Here, we have exploited that observation, using a series of hybrid introns to show that a region of 46 nucleotides at the 3' end of thewhiteintron is sufficient to confer the species-specific size effect. At least two sequence elements within this region, yet distinct from previously described branchpoint and pyrimidine tract signals, are required for efficient splicing of small splicing of small hybrid intronsin vitro. }, keywords = {Drosophila; pre-mRNA splicing; intron size; species specificity; in vitrosplicing}, isbn = {0022-2836}, doi = {10.1006/jmbi.1995.0564}, url = {http://www.sciencedirect.com/science/article/pii/S0022283685705645}, author = {Guo,Ming and Mount, Stephen M.} } @article {18304, title = {Labeling of Human Face Components from Range Data}, journal = {CVGIP: Image Understanding}, volume = {60}, year = {1994}, month = {1994/09//}, pages = {168 - 178}, abstract = {An approach to labeling the components of human faces from range images is proposed. The components of interest are those humans usually find significant for recognition. To cope with the nonrigidity of faces, an entirely qualitative approach is used. The preprocessing stage employs a multistage diffusion process to identify convexity and concavity points. These points are grouped into components and qualitative reasoning about possible interpretations of the components is performed. Consistency of hypothesized interpretations is carried out using context-based reasoning. Experimental results on real range images of several faces are provided.}, isbn = {1049-9660}, doi = {10.1006/ciun.1994.1045}, url = {http://www.sciencedirect.com/science/article/pii/S104996608471045X}, author = {Yacoob,Yaser and Davis, Larry S.} } @article {15820, title = {The linear algebra of block quasi-newton algorithms}, journal = {Linear Algebra and its Applications}, volume = {212{\textendash}213}, year = {1994}, month = {1994/11/15/}, pages = {153 - 168}, abstract = {The quasi-Newton family of algorithms for minimizing functions and solving systems of nonlinear equations has achieved a great deal of computational success and forms the core of many software libraries for solving these problems. In this work we extend the theory of the quasi-Newton algorithms to the block case, in which we minimize a collection of functions having a common Hessian matrix, or we solve a collection of nonlinear equations having a common Jacobian matrix. This paper focuses on the linear algebra: update formulas, positive definiteness, least-change secant properties, relation to block conjugate gradient algorithms, finite termination for quadratic function minimization or solving linear systems, and the use of the quasi-Newton matrices as preconditioners.}, isbn = {0024-3795}, doi = {10.1016/0024-3795(94)90401-4}, url = {http://www.sciencedirect.com/science/article/pii/0024379594904014}, author = {O{\textquoteright}Leary, Dianne P. and Yeremin,A.} } @article {12380, title = {Looped schedules for dataflow descriptions of multirate signal processing algorithms}, journal = {Formal Methods in System Design}, volume = {5}, year = {1994}, month = {1994///}, pages = {183 - 205}, author = {Bhattacharyya, Shuvra S. and Lee,E. A} } @article {15209, title = {The lattice structure of flow in planar graphs}, journal = {SIAM Journal on Discrete Mathematics}, volume = {6}, year = {1993}, month = {1993///}, pages = {477 - 477}, author = {Khuller, Samir and Naor,J. S and Klein,P.} } @article {16571, title = {Learning competition and cooperation}, journal = {Neural computation}, volume = {5}, year = {1993}, month = {1993///}, pages = {242 - 259}, author = {Cho,S. and Reggia, James A.} } @article {12770, title = {A linear-time model-checking algorithm for the alternation-free modal mu-calculus}, journal = {Formal methods in system design}, volume = {2}, year = {1993}, month = {1993///}, pages = {121 - 147}, author = {Cleaveland, Rance and Steffen,B.} } @article {16556, title = {Local conditions for phase transitions in neural networks with variable connection strengths}, journal = {Neural networks}, volume = {6}, year = {1993}, month = {1993///}, pages = {667 - 676}, author = {McFadden,F. E and Peng,Y. and Reggia, James A.} } @conference {15964, title = {Logic and Artificial Intelligence: A New Synthesis?}, booktitle = {ANNALES-SOCIETATIS MATHEMATICAE POLONAE SERIES 4}, volume = {18}, year = {1993}, month = {1993///}, pages = {297 - 297}, author = {Perlis, Don} } @article {13589, title = {Logo Recognition}, volume = {CS-TR-3145}, year = {1993}, month = {1993///}, institution = {University of Maryland, College Park}, address = {College Park, MD}, author = {David Doermann and Rivlin,E. and Weiss, I.} } @conference {13590, title = {Logo Recognition using Geomentric Invariants}, booktitle = {ICDAR}, year = {1993}, month = {1993///}, pages = {894 - 897}, author = {David Doermann and Rivlin,E. and Weiss, I.} } @conference {16654, title = {Lateral cortical inhibition without lateral inhibitory connections}, booktitle = {Neural Networks, 1992. IJCNN., International Joint Conference on}, volume = {3}, year = {1992}, month = {1992/06//}, pages = {415 -420 vol.3 - 415 -420 vol.3}, abstract = {The authors consider how a set of neurons forming a cortical column, none of which individually can competitively distribute its activity, can function collectively to produce competitive distribution of activation. To address this issue, they describe a model cortical column composed of three populations of excitatory neurons that collectively distribute their activity competitively. The idea of competitive distribution and the neural circuitry that could bring it about are described. Computer simulation results, implications, and testable predictions of the theory are presented}, keywords = {brain models, competitive distribution of activation, digital simulation, excitatory neurons, lateral cortical inhibition, neural circuitry, neurophysiology, physiological models}, doi = {10.1109/IJCNN.1992.227139}, author = {Reggia, James A. and Sutton,G. and D{\textquoteright}Autrechy,C. L and Weinrich,M.} } @conference {16588, title = {Learning visual coordinate transformations with competition}, booktitle = {Neural Networks, 1992. IJCNN., International Joint Conference on}, volume = {4}, year = {1992}, month = {1992///}, pages = {49 - 54}, author = {Cho,S. and Reggia, James A.} } @conference {16716, title = {Left-corner parsing and psychological plausibility}, booktitle = {Proceedings of the 14th conference on Computational linguistics-Volume 1}, year = {1992}, month = {1992///}, pages = {191 - 197}, author = {Resnik, Philip} } @article {14983, title = {Load balancing and routing on the hypercube and related networks}, journal = {Journal of Parallel and Distributed Computing}, volume = {14}, year = {1992}, month = {1992/04//}, pages = {431 - 435}, abstract = {Several results related to the load balancing problem on the hypercube, the shuffle-exchange, the cube-connected cycles, and the butterfly are shown. Implications of these results for routing algorithms are also discussed. Our results include the following: {\textbullet}⊎ Efficient load balancing algorithms are found for the hypercube, the shuffle-exchange, the cube-connected cycles, and the butterfly. {\textbullet} ⊎ Load balancing is shown to require more time on a p-processor shuffle-exchange, cube-connected cycle or butterfly than on a p-processor weak hypercube. {\textbullet} ⊎ Routing n packets on a p-processor hypercube can be done optimally whenever n = p1+1/k, for any fixed k \> 0. }, isbn = {0743-7315}, doi = {10.1016/0743-7315(92)90081-W}, url = {http://www.sciencedirect.com/science/article/pii/074373159290081W}, author = {JaJa, Joseph F. and Ryu,Kwan Woo} } @article {13818, title = {Lexical semantics and tense/aspect in machine translation}, year = {1991}, month = {1991///}, institution = {University of Maryland at College Park}, address = {College Park, MD, USA}, author = {Dorr, Bonnie J} } @mastersthesis {13816, title = {Lexical conceptual structure and machine translation}, year = {1990}, month = {1990///}, school = {Massachusetts Institute of Technology}, author = {Dorr, Bonnie J} } @article {15951, title = {Limited scope and circumscriptive reasoning}, journal = {International Journal of Expert Systems}, volume = {3}, year = {1990}, month = {1990///}, pages = {207 - 217}, author = {Etherington,D. W and Kraus,S. and Perlis, Don} } @book {14116, title = {Line iterative methods for cyclically reduced discrete convection-diffusion problems}, year = {1990}, month = {1990///}, publisher = {University of Maryland}, organization = {University of Maryland}, author = {Elman, Howard and Golub, G. H} } @conference {14984, title = {Load balancing on the hypercube and related networks}, booktitle = {Proceedings of 1990 International Conference on Parallel Processing}, volume = {1}, year = {1990}, month = {1990///}, pages = {203 - 210}, author = {JaJa, Joseph F. and Ryu,K. W.} } @article {11954, title = {Learning early-vision computations}, journal = {JOSA A}, volume = {6}, year = {1989}, month = {1989///}, pages = {908 - 919}, author = {Aloimonos, J. and Shulman, D.} } @conference {17278, title = {Lessons learned from the ACM hypertext on hypertext project}, booktitle = {Proceedings of the second annual ACM conference on Hypertext}, series = {HYPERTEXT {\textquoteright}89}, year = {1989}, month = {1989///}, pages = {385 - 386}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, isbn = {0-89791-339-6}, doi = {10.1145/74224.74255}, url = {http://doi.acm.org/10.1145/74224.74255}, author = {Rous,B. and Shneiderman, Ben and Yankelovich,N. and Yoder,E.} } @conference {14982, title = {List ranking on the Hypercube}, booktitle = {Proceedings of the 1989 International Conference on Parallel Processing}, volume = {3}, year = {1989}, month = {1989///}, pages = {20 - 23}, author = {Ryu,K. W. and JaJa, Joseph F.} } @article {15969, title = {Languages with self-reference II : Knowledge, belief, and modality}, journal = {Artificial Intelligence}, volume = {34}, year = {1988}, month = {1988/03//}, pages = {179 - 212}, abstract = {Negative results of Montague and Thomason have diverted research in propositional attitudes away from syntactic ("first-order") approaches, encouraging modal formalisms instead, especially in representing epistemic notions. We show that modal logics are on no firmer ground than first-order ones when equally endowed with substitutive self-reference. Nonetheless, there may still be remedies, hinging in part upon a distinction between "dynamic" and "static" notions of provability and belief (an earlier version of this paper emphasized a somewhat different distinction).}, isbn = {0004-3702}, doi = {16/0004-3702(88)90038-0}, url = {http://www.sciencedirect.com/science/article/pii/0004370288900380}, author = {Perlis, Don} } @article {16323, title = {Learning from examples: generation and evaluation of decision trees for software resource analysis}, journal = {IEEE Transactions on Software Engineering}, volume = {14}, year = {1988}, month = {1988/12//}, pages = {1743 - 1757}, abstract = {A general solution method for the automatic generation of decision (or classification) trees is investigated. The approach is to provide insights through in-depth empirical characterization and evaluation of decision trees for one problem domain, specifically, that of software resource data analysis. The purpose of the decision trees is to identify classes of objects (software modules) that had high development effort, i.e. in the uppermost quartile relative to past data. Sixteen software systems ranging from 3000 to 112000 source lines have been selected for analysis from a NASA production environment. The collection and analysis of 74 attributes (or metrics), for over 4700 objects, capture a multitude of information about the objects: development effort, faults, changes, design style, and implementation style. A total of 9600 decision trees are automatically generated and evaluated. The analysis focuses on the characterization and evaluation of decision tree accuracy, complexity, and composition. The decision trees correctly identified 79.3\% of the software modules that had high development effort or faults, on the average across all 9600 trees. The decision trees generated from the best parameter combinations correctly identified 88.4\% of the modules on the average. Visualization of the results is emphasized, and sample decision trees are included}, keywords = {Analysis of variance, Artificial intelligence, Classification tree analysis, Data analysis, decision theory, Decision trees, Fault diagnosis, Information analysis, machine learning, metrics, NASA, production environment, software engineering, software modules, software resource analysis, Software systems, Termination of employment, trees (mathematics)}, isbn = {0098-5589}, doi = {10.1109/32.9061}, author = {Selby,R. W and Porter, Adam} } @article {13704, title = {A Lexical Conceptual Approach to Generation for Machine Translation}, year = {1988}, month = {1988/01//}, institution = {MASSACHUSETTS INST OF TECH CAMBRIDGE ARTIFICIAL INTELLIGENCE LAB}, abstract = {Current approaches to generation for machine translation make use of direct-replacement templates, large grammars, and knowledge-based inferencing techniques. Not only are rules language-specific, but they are too simplistic to handle sentences that exhibit more complex phenomena. Furthermore, these systems are not easily extendable to other languages because the rules that map the internal representation to the surface form are entirely dependent on both the domain of the system and the language being generated. Finally an adequate interlingual representation has not yet been discovered; thus, knowledge-based inferencing is necessary and syntactic cross-linguistic generalization cannot be exploited. This report introduces a plan for the development of a theoretically based computational scheme of natural language generation for a translation system. The emphasis of the project is the mapping from the lexical conceptual structure of sentences to an underlying or base syntactic structure called deep structure. This approach tackles the problems of thematic and structural divergence, i.e., it allows generation of target language sentences that are not thematically or structurally equivalent to their conceptually equivalent source language counterparts. Two other more secondary tasks, construction of a dictionary and mapping from deep structure to surface structure, will also be discussed. The generator operates on a constrained grammatical theory rather than on a set of surface level tranformations.}, keywords = {*MACHINE TRANSLATION, *NATURAL LANGUAGE, *SYSTEMS APPROACH, COMPUTATIONS, CYBERNETICS, Dictionaries, grammars, internal, LEXICOGRAPHY, SURFACES, syntax, THEORY, TRANSLATIONS, WORDS(LANGUAGE)}, url = {http://stinet.dtic.mil/oai/oai?\&verb=getRecord\&metadataPrefix=html\&identifier=ADA197356}, author = {Dorr, Bonnie J} } @mastersthesis {18929, title = {Locality principles in syntax and in parsing}, year = {1988}, month = {1988///}, school = {Massachusetts Institute of Technology, Dept. of Linguistics and Philosophy}, url = {http://cat.inist.fr/?aModele=afficheN\&cpsidt=11810263}, author = {Weinberg, Amy} } @article {18091, title = {Locating alignments with k differences for nucleotide and amino acid sequences}, journal = {Computer applications in the biosciences: CABIOS}, volume = {4}, year = {1988}, month = {1988///}, pages = {19 - 19}, author = {Landau,G. M and Vishkin, Uzi and Nussinov,R.} } @article {12048, title = {Learning shape computations}, journal = {Proc. DARPA Image}, year = {1987}, month = {1987///}, author = {Aloimonos, J. and Shulman, D.} } @conference {15940, title = {Life on a desert island}, booktitle = {Proc. Workshop on The Frame Problem in Artificial Intelligence}, year = {1987}, month = {1987///}, pages = {349 - 357}, author = {Drapkin,J. and Miller,M. and Perlis, Don} } @article {17274, title = {Learning disabled students{\textquoteright} difficulties in learning to use a word processor: implications for design}, journal = {ACM SIGCHI Bulletin}, volume = {17}, year = {1986}, month = {1986/01//}, pages = {41 - 46}, abstract = {Learning disabled students can derive great benefits from using work processors. The ability to produce a neat, printed copy can increase motivation and encourage writing for a wider audience. The editing power makes revision possible without tedious re-copying, thus freeing students and teachers to approach writing as a process involving repeated drafts. Specific problems with handwriting and spelling can also be circumvented. However, learning to use the editing capabilities often presents problems for students, especially those with learning difficulties. Word processors must be designed that are simple, easy to learn, and yet powerful. This study makes software design recommendations based on a study of learning disabled students learning to use word processing.Two groups of four LD students (4th-6th grade) were given twelve hours of word processing instruction using two word processors. Detailed records of progess and errors were made during learning and a final assessment task. Specific design problems are reported and recommendations are made for tasks such as cursor movement, insertion/deletion, use of nulls, blanks, and formatting characters, and overall organization.}, isbn = {0736-6906}, doi = {10.1145/15671.15675}, url = {http://doi.acm.org/10.1145/15671.15675}, author = {MacArthur,Charles A. and Shneiderman, Ben} } @article {17275, title = {Learning Disabled Students{\textquoteright} Difficulties in Learning to Use A Word Processor: Implications for Instruction and Software Evaluation}, journal = {Journal of Learning DisabilitiesJ Learn Disabil}, volume = {19}, year = {1986}, month = {1986/04/01/}, pages = {248 - 253}, abstract = {Learning disabled (LD) students can derive great benefits from using word processors. The ability to produce a neat, printed copy can increase motivation and encourage writing for a wider audience. The editing power makes revision possible without tedious recopying, thus freeing students and teachers to approach writing as a process involving repeated drafts. Specific problems with handwriting and spelling can also be circumvented. However, learning to use a word processor often presents problems. Based on a study of LD students learning to use word processing, this paper makes recommendations for evaluating word processing software and designing instruction that is sensitive to students difficulties.}, isbn = {0022-2194,}, doi = {10.1177/002221948601900414}, url = {http://ldx.sagepub.com/content/19/4/248}, author = {MacArthur,Charles A. and Shneiderman, Ben} } @article {15962, title = {Languages with self-reference I: Foundations}, journal = {Artificial Intelligence}, volume = {25}, year = {1985}, month = {1985/03//}, pages = {301 - 322}, abstract = {It is argued that a proper treatment of cognitive notions such as beliefs and concepts should allow broad and consistent expression of syntax and semantics, and that this in turn depends on self-reference. A theory of quotation and unquotation is presented to this end that appears to make unnecessary the usual hierarchical and non-first-order constructions for these notions. In the current paper (Part I) the underlying theory is presented; a sequel will treat in more detail the applications to cognition.}, isbn = {0004-3702}, doi = {16/0004-3702(85)90075-X}, url = {http://www.sciencedirect.com/science/article/pii/000437028590075X}, author = {Perlis, Don} } @article {17273, title = {Learning a menu selection tree: training methods compared}, journal = {Behaviour \& Information Technology}, volume = {4}, year = {1985}, month = {1985///}, pages = {81 - 91}, abstract = {Abstract Abstract. Menu selection systems sometimes present learning problems for novice users. This comparison of four training methods for novice users found that the global tree diagram of the menu system was superior to command sequence and frame presentation methods, and somewhat better than trial and error. Methods were evaluated on the basis of (1) number of target nodes found, (2) mean number of selections to a target node, (3) recall of the menu structure, and (4) subjective rating of ease of learning.Abstract Abstract. Menu selection systems sometimes present learning problems for novice users. This comparison of four training methods for novice users found that the global tree diagram of the menu system was superior to command sequence and frame presentation methods, and somewhat better than trial and error. Methods were evaluated on the basis of (1) number of target nodes found, (2) mean number of selections to a target node, (3) recall of the menu structure, and (4) subjective rating of ease of learning. }, isbn = {0144-929X}, doi = {10.1080/01449298508901790}, url = {http://www.tandfonline.com/doi/abs/10.1080/01449298508901790}, author = {Parton,Diana and Huffman,Keith and Pridgen,Patty and Norman,Kent and Shneiderman, Ben} } @article {14985, title = {Lower bounds on monotone arithmetic circuits with restricted depths}, journal = {Computers \& Mathematics with Applications}, volume = {11}, year = {1985}, month = {1985/12//}, pages = {1155 - 1164}, abstract = {We consider monotone arithmetic circuits with restricted depths to compute monotone multivariate polynomials such as the elementary symmetric functions, convolution of several vectors and raising a matrix to a power. We develop general lower- and upper-bound techniques that seem to generate almost-matching bounds for all the functions considered. These results imply exponential lower bounds for circuits of bounded depths which compute any of these functions. We also obtain several examples for which negation can reduce the size exponentially.}, isbn = {0898-1221}, doi = {10.1016/0898-1221(85)90103-8}, url = {http://www.sciencedirect.com/science/article/pii/0898122185901038}, author = {JaJa, Joseph F.} } @book {17279, title = {Let{\textquoteright}s Learn BASIC: A Kids{\textquoteright} Introduction to BASIC Programming on IBM Personal Computers}, year = {1984}, month = {1984///}, publisher = {Little Brown and Company}, organization = {Little Brown and Company}, isbn = {0316787264}, author = {Shneiderman, Ben} } @article {16737, title = {The Logical Access Path Schema of a Database}, journal = {IEEE Transactions on Software Engineering}, volume = {SE-8}, year = {1982}, month = {1982/11//}, pages = {563 - 573}, abstract = {A new schema which models the usage of the logical access paths of the database is proposed. The schema models all database activities (i.e., retrievals and updates), and integrates their logical access paths by recognizing common subpaths and increasing the "weight" of the shared subpaths. The logical access path schema provides a comprehensive picture of the logical access paths, and the cumulative usage of the shared subpaths and/or intermediate results. The schema serves a dual purpose. Firstly, it is used as a model of the access requirements during the database design, and secondly, as the basis for optimization during the operation of the database.}, keywords = {Aggregation hierarchy, Calculus, Computer science, Data structures, Databases, Design optimization, external logical subschema, generalization hierarchy, Information retrieval, Joining processes, logical access path, propositional calculus, views}, isbn = {0098-5589}, doi = {10.1109/TSE.1982.235886}, author = {Roussopoulos, Nick} } @inbook {15767, title = {Linear programming Problems Arising from Partial Differential Equations}, booktitle = {Sparse Matrix Proceedings 1978Sparse Matrix Proceedings 1978}, year = {1979}, month = {1979///}, pages = {25 - 40}, publisher = {SIAM Press}, organization = {SIAM Press}, address = {Philadelphia}, author = {O{\textquoteright}Leary, Dianne P.}, editor = {Duff,Iain S. and Stewart, G.W.} }