@conference {14688, title = {Dynamically checking ownership policies in concurrent c/c++ programs}, booktitle = {Proceedings of the 37th annual ACM SIGPLAN-SIGACT symposium on Principles of programming languages}, series = {POPL {\textquoteright}10}, year = {2010}, month = {2010///}, pages = {457 - 470}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Concurrent programming errors arise when threads share data incorrectly. Programmers often avoid these errors by using synchronization to enforce a simple ownership policy: data is either owned exclusively by a thread that can read or write the data, or it is read owned by a set of threads that can read but not write the data. Unfortunately, incorrect synchronization often fails to enforce these policies and memory errors in languages like C and C++ can violate these policies even when synchronization is correct. In this paper, we present a dynamic analysis for checking ownership policies in concurrent C and C++ programs despite memory errors. The analysis can be used to find errors in commodity multi-threaded programs and to prevent attacks that exploit these errors. We require programmers to write ownership assertions that describe the sharing policies used by different parts of the program. These policies may change over time, as may the policies{\textquoteright} means of enforcement, whether it be locks, barriers, thread joins, etc. Our compiler inserts checks in the program that signal an error if these policies are violated at runtime. We evaluated our tool on several benchmark programs. The run-time overhead was reasonable: between 0 and 49\% with an average of 26\%. We also found the tool easy to use: the total number of ownership assertions is small, and the asserted specification and implementation can be debugged together by running the instrumented program and addressing the errors that arise. Our approach enjoys a pleasing modular soundness property: if a thread executes a sequence of statements on variables it owns, the statements are serializable within a valid execution, and thus their effects can be reasoned about in isolation from other threads in the program.}, keywords = {concurrency, Debugging, Dynamic analysis, Security, Testing, tools}, isbn = {978-1-60558-479-9}, doi = {10.1145/1706299.1706351}, url = {http://doi.acm.org/10.1145/1706299.1706351}, author = {Martin,Jean-Phillipe and Hicks, Michael W. and Costa,Manuel and Akritidis,Periklis and Castro,Miguel} } @article {15383, title = {Electromagnetic Modeling of Ethernet Transformers}, journal = {Magnetics, IEEE Transactions on}, volume = {46}, year = {2010}, month = {2010/02//}, pages = {563 - 569}, abstract = {The unique features of Ethernet transformers are reviewed, and novel testing techniques for identification of lumped parameters of equivalent circuits are presented. To account for the distributed nature of the cross-winding and intrawinding capacitances, novel distributed models for electromagnetic analysis of Ethernet transformers are presented.}, keywords = {analysis;electromagnetic, area, capacitances;lumped, circuits;ethernet, circuits;local, cross-winding;distributed, modeling;equivalent, models;electromagnetic, networks;lumped, networks;transformer, parameter, parameters;novel, techniques;capacitance;equivalent, Testing, transformers;intrawinding, windings;transformers;}, isbn = {0018-9464}, doi = {10.1109/TMAG.2009.2033203}, author = {Bowen,D. and Mayergoyz, Issak D and Krafft,C.} } @conference {17162, title = {Finding comparable temporal categorical records: A similarity measure with an interactive visualization}, booktitle = {IEEE Symposium on Visual Analytics Science and Technology, 2009. VAST 2009}, year = {2009}, month = {2009/10/12/13}, pages = {27 - 34}, publisher = {IEEE}, organization = {IEEE}, abstract = {An increasing number of temporal categorical databases are being collected: Electronic Health Records in healthcare organizations, traffic incident logs in transportation systems, or student records in universities. Finding similar records within these large databases requires effective similarity measures that capture the searcher{\textquoteright}s intent. Many similarity measures exist for numerical time series, but temporal categorical records are different. We propose a temporal categorical similarity measure, the M\&M (Match \& Mismatch) measure, which is based on the concept of aligning records by sentinel events, then matching events between the target and the compared records. The M\&M measure combines the time differences between pairs of events and the number of mismatches. To accom-modate customization of parameters in the M\&M measure and results interpretation, we implemented Similan, an interactive search and visualization tool for temporal categorical records. A usability study with 8 participants demonstrated that Similan was easy to learn and enabled them to find similar records, but users had difficulty understanding the M\&M measure. The usability study feedback, led to an improved version with a continuous timeline, which was tested in a pilot study with 5 participants.}, keywords = {data visualisation, Educational institutions, Feedback, Information retrieval, interactive search tool, interactive systems, interactive visualization tool, large databases, M\&M Measure, Match \& Mismatch measure, Medical services, numerical time series, parameters customization, Particle measurements, Similan, similarity measure, Similarity Search, temporal categorical databases, Temporal Categorical Records, temporal databases, Testing, Time measurement, time series, transportation, usability, very large databases, visual databases, Visualization}, isbn = {978-1-4244-5283-5}, doi = {10.1109/VAST.2009.5332595}, author = {Wongsuphasawat,K. and Shneiderman, Ben} } @article {12146, title = {Maturing Software Engineering Knowledge through Classifications: A Case Study on Unit Testing Techniques}, journal = {Software Engineering, IEEE Transactions on}, volume = {35}, year = {2009}, month = {2009/08//july}, pages = {551 - 565}, abstract = {Classification makes a significant contribution to advancing knowledge in both science and engineering. It is a way of investigating the relationships between the objects to be classified and identifies gaps in knowledge. Classification in engineering also has a practical application; it supports object selection. They can help mature software engineering knowledge, as classifications constitute an organized structure of knowledge items. Till date, there have been few attempts at classifying in software engineering. In this research, we examine how useful classifications in software engineering are for advancing knowledge by trying to classify testing techniques. The paper presents a preliminary classification of a set of unit testing techniques. To obtain this classification, we enacted a generic process for developing useful software engineering classifications. The proposed classification has been proven useful for maturing knowledge about testing techniques, and therefore, SE, as it helps to: 1) provide a systematic description of the techniques, 2) understand testing techniques by studying the relationships among techniques (measured in terms of differences and similarities), 3) identify potentially useful techniques that do not yet exist by analyzing gaps in the classification, and 4) support practitioners in testing technique selection by matching technique characteristics to project characteristics.}, keywords = {characteristic;project, characteristic;software, classification;matching, engineering, engineering;, knowledge;software, technique, techniques;program, Testing, testing;software, testing;unit}, isbn = {0098-5589}, doi = {10.1109/TSE.2009.13}, author = {Vegas,S. and Juristo,N. and Basili, Victor R.} } @conference {14251, title = {Real-time shape retrieval for robotics using skip Tri-Grams}, booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems, 2009. IROS 2009}, year = {2009}, month = {2009/10/10/15}, pages = {4731 - 4738}, publisher = {IEEE}, organization = {IEEE}, abstract = {The real time requirement is an additional constraint on many intelligent applications in robotics, such as shape recognition and retrieval using a mobile robot platform. In this paper, we present a scalable approach for efficiently retrieving closed contour shapes. The contour of an object is represented by piecewise linear segments. A skip Tri-Gram is obtained by selecting three segments in the clockwise order while allowing a constant number of segments to be {\~A}‚{\^A}{\textquestiondown}skipped{\~A}‚{\^A}{\textquestiondown} in between. The main idea is to use skip Tri-Grams of the segments to implicitly encode the distant dependency of the shape. All skip Tri-Grams are used for efficiently retrieving closed contour shapes without pairwise matching feature points from two shapes. The retrieval is at least an order of magnitude faster than other state-of-the-art algorithms. We score 80\% in the Bullseye retrieval test on the whole MPEG 7 shape dataset. We further test the algorithm using a mobile robot platform in an indoor environment. 8 objects are used for testing from different viewing directions, and we achieve 82\% accuracy.}, keywords = {Bullseye retrieval test, Clocks, closed contour shape retrieval, Image retrieval, Image segmentation, Indexing, Information retrieval, Intelligent robots, Jacobian matrices, mobile robot, Mobile robots, MPEG 7 shape dataset, piecewise linear segments, Piecewise linear techniques, Real time systems, real-time shape retrieval, robot vision, SHAPE, shape recognition, shape representation, skip Tri-Grams, Testing}, isbn = {978-1-4244-3803-7}, doi = {10.1109/IROS.2009.5354738}, author = {Li,Yi and Bitsakos,K. and Ferm{\"u}ller, Cornelia and Aloimonos, J.} } @conference {15447, title = {Alternating GUI Test Generation and Execution}, booktitle = {Practice and Research Techniques, 2008. TAIC PART {\textquoteright}08. Testing: Academic Industrial Conference}, year = {2008}, month = {2008///}, pages = {23 - 32}, abstract = {Users of today{\textquoteright}s software perform tasks by interacting with a graphical user interface (GUI) front-end via sequences of input events. Due to the flexibility offered by most GUIs, the number of event sequences grows exponentially with length. One ubiquitous challenge of GUI testing is to selectively generate those sequences that lead to potentially problematic states. This paper presents ALT, a new technique that generates GUI test cases in batches, by leveraging GUI run-time information from a previously run batch to obtain the next batch. Each successive batch consists of "longer" test cases that expand the state space to be explored, yet prune the "unimportant" states. The "alternating" nature of ALT allows it to enhance the next batch by leveraging certain relationships between GUI events (e.g., one enables the other, one alters the other{\textquoteright}s execution) that are revealed only at run-time and non-trivial to infer statically. An empirical study on four fielded GUI-based applications demonstrates that ALT is successful at identifying complex failure-causing interactions between GUI events.}, keywords = {ALT, complex failure-causing interactions, event sequences, graphical user interface, Graphical user interfaces, GUI test generation, Testing}, doi = {10.1109/TAIC-PART.2008.10}, author = {Xun Yuan and Memon, Atif M.} } @conference {12555, title = {Compressive wireless arrays for bearing estimation}, booktitle = {Acoustics, Speech and Signal Processing, 2008. ICASSP 2008. IEEE International Conference on}, year = {2008}, month = {2008/04/31/4}, pages = {2497 - 2500}, abstract = {Joint processing of sensor array outputs improves the performance of parameter estimation and hypothesis testing problems beyond the sum of the individual sensor processing results. When the sensors have high data sampling rates, arrays are tethered, creating a disadvantage for their deployment and also limiting their aperture size. In this paper, we develop the signal processing algorithms for randomly deployable wireless sensor arrays that are severely constrained in communication bandwidth. We focus on the acoustic bearing estimation problem and show that when the target bearings are modeled as a sparse vector in the angle space, low dimensional random projections of the microphone signals can be used to determine multiple source bearings by solving an l 1-norm minimization problem. Field data results are shown where only 10 bits of information is passed from each microphone to estimate multiple target bearings.}, keywords = {acoustic, algorithms;sparse, arrays;acoustic, arrays;hypothesis, bandwidth;compressive, bearing, Estimation, estimation;signal, estimation;sparse, matrices;vectors;wireless, minimization, networks;, problem;communication, problem;microphone, problems;joint, PROCESSING, processing;array, processing;direction-of-arrival, processing;l1-norm, sensor, signal, signals;parameter, Testing, vector;wireless, wireless}, doi = {10.1109/ICASSP.2008.4518155}, author = {Cevher, V. and Gurbuz, A.C. and McClellan, J.H. and Chellapa, Rama} } @conference {17360, title = {Similarity-Based Forecasting with Simultaneous Previews: A River Plot Interface for Time Series Forecasting}, booktitle = {Information Visualization, 2007. IV {\textquoteright}07. 11th International Conference}, year = {2007}, month = {2007/07/04/6}, pages = {191 - 196}, publisher = {IEEE}, organization = {IEEE}, abstract = {Time-series forecasting has a large number of applications. Users with a partial time series for auctions, new stock offerings, or industrial processes desire estimates of the future behavior. We present a data driven forecasting method and interface called similarity-based forecasting (SBF). A pattern matching search in an historical time series dataset produces a subset of curves similar to the partial time series. The forecast is displayed graphically as a river plot showing statistical information about the SBF subset. A forecasting preview interface allows users to interactively explore alternative pattern matching parameters and see multiple forecasts simultaneously. User testing with 8 users demonstrated advantages and led to improvements.}, keywords = {data driven forecasting method, data visualisation, Data visualization, Economic forecasting, forecasting preview interface, Graphical user interfaces, historical time series dataset, Laboratories, new stock offerings, partial time series, pattern matching, pattern matching search, Predictive models, river plot interface, Rivers, similarity-based forecasting, Smoothing methods, Technological innovation, Testing, time series, time series forecasting, Weather forecasting}, isbn = {0-7695-2900-3}, doi = {10.1109/IV.2007.101}, author = {Buono,P. and Plaisant, Catherine and Simeone,A. and Aris,A. and Shneiderman, Ben and Shmueli,G. and Jank,W.} } @article {17272, title = {Knowledge discovery in high-dimensional data: case studies and a user survey for the rank-by-feature framework}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {12}, year = {2006}, month = {2006/06//May}, pages = {311 - 322}, abstract = {Knowledge discovery in high-dimensional data is a challenging enterprise, but new visual analytic tools appear to offer users remarkable powers if they are ready to learn new concepts and interfaces. Our three-year effort to develop versions of the hierarchical clustering explorer (HCE) began with building an interactive tool for exploring clustering results. It expanded, based on user needs, to include other potent analytic and visualization tools for multivariate data, especially the rank-by-feature framework. Our own successes using HCE provided some testimonial evidence of its utility, but we felt it necessary to get beyond our subjective impressions. This paper presents an evaluation of the hierarchical clustering explorer (HCE) using three case studies and an e-mail user survey (n=57) to focus on skill acquisition with the novel concepts and interface for the rank-by-feature framework. Knowledgeable and motivated users in diverse fields provided multiple perspectives that refined our understanding of strengths and weaknesses. A user survey confirmed the benefits of HCE, but gave less guidance about improvements. Both evaluations suggested improved training methods}, keywords = {case study, Computer aided software engineering, Computer Society, Data analysis, data mining, data visualisation, Data visualization, database management systems, e-mail user survey, Genomics, Helium, Hierarchical Clustering Explorer, hierarchical clustering explorer., high-dimensional data, Histograms, Information visualization evaluation, interactive systems, interactive tool, knowledge discovery, multivariate data, Rank-by-feature framework, Scattering, Testing, user interface, User interfaces, user survey, visual analytic tools, visual analytics, visualization tools}, isbn = {1077-2626}, doi = {10.1109/TVCG.2006.50}, author = {Seo,Jinwook and Shneiderman, Ben} } @conference {17159, title = {Facilitating understanding of information visualizations: emerging principles and examples}, booktitle = {Eighth International Conference on Information Visualisation, 2004. IV 2004. Proceedings}, year = {2004}, month = {2004/07/14/16}, publisher = {IEEE}, organization = {IEEE}, abstract = {Summary form only given. The enthusiasm for information visualization has generated a wide variety of interesting tools for multi-dimensional, hierarchical, and other kinds of visualizations. However, some designs are readily accepted as understandable and useful, while others are perceived as confusing and useless. Controlled studies have begun to sort of some of the issues, but the insights of designers and usability tests are contributing interesting cognitive hypotheses for researchers and practical guidelines for developers. This paper offers examples of what works and what doesn{\textquoteright}t with a preliminary set of principles that might have wide applicability.}, keywords = {Computer science, data visualisation, Educational institutions, Guidelines, hierarchical visualization, HUMANS, Information Visualization, Laboratories, multidimensional visualization, Portable computers, Testing, usability, User interfaces, Visualization}, isbn = {0-7695-2177-0}, doi = {10.1109/IV.2004.1320117}, author = {Shneiderman, Ben} } @article {17508, title = {Why not make interfaces better than 3D reality?}, journal = {IEEE Computer Graphics and Applications}, volume = {23}, year = {2003}, month = {2003/12//Nov}, pages = {12 - 15}, abstract = {Many constrained interfaces are designed to be simpler than the real world by restricting movement, limiting interface actions, and keeping interface objects in a plane. However, the strong utility of pure 3D interfaces for medical, architectural, product design, and scientific visualization means that interface design for pure 3D remains an important challenge. An intriguing possibility is that enhanced 3D interfaces might offer simpler navigation, more compelling functionality, safer movements, and less occlusion, than 3D reality, especially for information exploration and visualization tasks. Such features can enable superhuman capabilities such as faster-than-light teleportation, flying through objects, and X-ray vision. Enhanced 3D interfaces might have supernatural tools such as magic wands for instantly shrinking, enlarging, duplicating, or sending objects and enchanted environments that provide error prevention, history keeping, and programming-by-demonstration. Playful game designers and creative application developers have already pushed the technology further than those who seek merely to mimic reality. Advanced designs are marked by their support of rapid situation awareness through effective overviews, reduced numbers of actions to accomplish tasks; and prompt, meaningful feedback for user actions. This article reviews these clever enhanced 3D-design features and encourages approaches that facilitate user tasks rather than mimic reality.}, keywords = {3D interfaces, 3D reality, Atmosphere, Avatars, Cities and towns, Collaboration, Computer displays, Computer Graphics, error prevention, history keeping, information exploration, Information Visualization, movements, Navigation, occlusion, overviews, programming-by-demonstration, rapid situation awareness, Testing, usability, user action feedback, User interfaces, Virtual reality, Visualization}, isbn = {0272-1716}, doi = {10.1109/MCG.2003.1242376}, author = {Shneiderman, Ben} } @conference {17314, title = {Ordered treemap layouts}, booktitle = {IEEE Symposium on Information Visualization, 2001. INFOVIS 2001}, year = {2001}, month = {2001///}, pages = {73 - 78}, publisher = {IEEE}, organization = {IEEE}, keywords = {Clustering algorithms, Computer science, Data visualization, Displays, Electronic switching systems, Filling, Monte Carlo methods, Read only memory, Testing}, isbn = {0-7695-7342-5}, doi = {10.1109/INFVIS.2001.963283}, author = {Shneiderman, Ben and Wattenberg,M.} } @article {14805, title = {A tool to help tune where computation is performed}, journal = {IEEE Transactions on Software Engineering}, volume = {27}, year = {2001}, month = {2001/07//}, pages = {618 - 629}, abstract = {We introduce a new performance metric, called load balancing factor (LBF), to assist programmers when evaluating different tuning alternatives. The LBF metric differs from traditional performance metrics since it is intended to measure the performance implications of a specific tuning alternative rather than quantifying where time is spent in the current version of the program. A second unique aspect of the metric is that it provides guidance about moving work within a distributed or parallel program rather than reducing it. A variation of the LBF metric can also be used to predict the performance impact of changing the underlying network. The LBF metric is computed incrementally and online during the execution of the program to be tuned. We also present a case study that shows that our metric can accurately predict the actual performance gains for a test suite of six programs}, keywords = {Computational modeling, Current measurement, Distributed computing, distributed program, distributed programming, load balancing factor, Load management, parallel program, parallel programming, Performance analysis, performance evaluation, Performance gain, performance metric, Programming profession, software metrics, software performance evaluation, Testing, Time measurement, tuning}, isbn = {0098-5589}, doi = {10.1109/32.935854}, author = {Eom, Hyeonsang and Hollingsworth, Jeffrey K} } @conference {17532, title = {Receiver based management of low bandwidth access links}, booktitle = {IEEE INFOCOM 2000. Nineteenth Annual Joint Conference of the IEEE Computer and Communications Societies. Proceedings}, volume = {1}, year = {2000}, month = {2000///}, pages = {245-254 vol.1 - 245-254 vol.1}, publisher = {IEEE}, organization = {IEEE}, abstract = {In this paper, we describe a receiver-based congestion control policy that leverages TCP flow control mechanisms to prioritize mixed traffic loads across access links. We manage queueing at the access link to: (1) improve the response time of interactive network applications; (2) reduce congestion-related packet losses; while (3) maintaining high throughput for bulk-transfer applications. Our policy controls queue length by manipulating receive socket buffer sizes. We have implemented this solution in a dynamically loadable Linux kernel module, and tested it over low-bandwidth links. Our approach yields a 7-fold improvement in packet latency over an unmodified system while maintaining 94\% link utilization. In the common case, congestion-related packet losses at the access link can be eliminated. Finally, by prioritizing short flows, we show that our system reduces the time to download a complex Web page during a large background transfer by a factor of two}, keywords = {Bandwidth, buffer storage, bulk-transfer applications, complex Web page, congestion control policy, Delay, dynamically loadable Linux kernel module, information resources, interactive network, Internet, Kernel, link utilization, Linux, low-bandwidth access links, mixed traffic load, packet latency, queue length, queueing theory, receive socket buffer sizes, receiver-based management, response time, short flow prioritizing, Size control, Sockets, subscriber loops, TCP flow control, telecommunication congestion control, telecommunication network management, Telecommunication traffic, Testing, Throughput, Transport protocols, Unix, Web pages}, isbn = {0-7803-5880-5}, doi = {10.1109/INFCOM.2000.832194}, author = {Spring, Neil and Chesire,M. and Berryman,M. and Sahasranaman,V. and Anderson,T. and Bershad,B.} } @article {15562, title = {Testing simple polygons}, journal = {Computational Geometry}, volume = {8}, year = {1997}, month = {1997/07//}, pages = {97 - 114}, abstract = {We consider the problem of verifying a simple polygon in the plane using {\textquotedblleft}test points{\textquotedblright}. A test point is a geometric probe that takes as input a point in Euclidean space, and returns {\textquotedblleft}+{\textquotedblright} if the point is inside the object being probed or {\textquotedblleft}-{\textquotedblright} if it is outside. A verification procedure takes as input a description of a target object, including its location and orientation, and it produces a set of test points that are used to verify whether a test object matches the description. We give a procedure for verifying an n-sided, non-degenerate, simple target polygon using 5n test points. This testing strategy works even if the test polygon has n + 1 vertices, and we show a lower bound of 3n + 1 test points for this case. We also give algorithms using O(n) test points for simple polygons that may be degenerate and for test polygons that may have up to n + 2 vertices. All of these algorithms work for polygons with holes. We also discuss extensions of our results to higher dimensions.}, keywords = {probing, Testing, Verifying}, isbn = {0925-7721}, doi = {10.1016/S0925-7721(96)00015-6}, url = {http://www.sciencedirect.com/science/article/pii/S0925772196000156}, author = {Arkin,Esther M. and Belleville,Patrice and Mitchell,Joseph S.B. and Mount, Dave and Romanik,Kathleen and Salzberg,Steven and Souvaine,Diane} } @article {17485, title = {Visual and textual consistency checking tools for graphical user interfaces}, journal = {IEEE Transactions on Software Engineering}, volume = {23}, year = {1997}, month = {1997/11//}, pages = {722 - 735}, abstract = {Designing user interfaces with consistent visual and textual properties is difficult. To demonstrate the harmful effects of inconsistency, we conducted an experiment with 60 subjects. Inconsistent interface terminology slowed user performance by 10 to 25 percent. Unfortunately, contemporary software tools provide only modest support for consistency control. Therefore, we developed SHERLOCK, a family of consistency analysis tools, which evaluates visual and textual properties of user interfaces. It provides graphical analysis tools such as a dialog box summary table that presents a compact overview of visual properties of all dialog boxes. SHERLOCK provides terminology analysis tools including an interface concordance, an interface spellchecker, and terminology baskets to check for inconsistent use of familiar groups of terms. Button analysis tools include a button concordance and a button layout table to detect variant capitalization, distinct typefaces, distinct colors, variant button sizes, and inconsistent button placements. We describe the design, software architecture, and the use of SHERLOCK. We tested SHERLOCK with four commercial prototypes. The outputs, analysis, and feedback from designers of the applications are presented}, keywords = {button analysis tools, Color, dialog box summary table, experiment, graphical analysis tools, Graphical user interfaces, human factors, inconsistent interface terminology, interface color, interface spell checker, Output feedback, Prototypes, SHERLOCK, Software architecture, Software design, software metrics, software prototyping, software tools, Terminology, terminology analysis tools, Testing, textual consistency checking tools, user interface management systems, User interfaces, user performance, visual consistency checking tools}, isbn = {0098-5589}, doi = {10.1109/32.637386}, author = {Mahajan,R. and Shneiderman, Ben} } @article {16378, title = {Going beyond integer programming with the Omega test to eliminate false data dependences}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {6}, year = {1995}, month = {1995/02//}, pages = {204 - 211}, abstract = {Array data dependence analysis methods currently in use generate false dependences that can prevent useful program transformations. These false dependences arise because the questions asked are conservative approximations to the questions we really should be asking. Unfortunately, the questions we really should be asking go beyond integer programming and require decision procedures for a subclass of Presburger formulas. In this paper, we describe how to extend the Omega test so that it can answer these queries and allow us to eliminate these false data dependences. We have implemented the techniques described here and believe they are suitable for use in production compilers}, keywords = {Algorithm design and analysis, Arithmetic, Computer science, Data analysis, false data dependences, integer programming, Linear programming, Omega test, Privatization, Production, production compilers, program compilers, Program processors, program testing, program transformations, Testing}, isbn = {1045-9219}, doi = {10.1109/71.342135}, author = {Pugh, William and Wonnacott,D.} } @conference {17577, title = {Computing with very weak random sources}, booktitle = {, 35th Annual Symposium on Foundations of Computer Science, 1994 Proceedings}, year = {1994}, month = {1994/11/20/22}, pages = {264 - 275}, publisher = {IEEE}, organization = {IEEE}, abstract = {For any fixed ε>0, we show how to simulate RP algorithms in time nO(log n) using the output of a δ-source with min-entropy R(ε). Such a weak random source is asked once for R(ε) bits; it outputs an R-bit string such that any string has probability at most 2-R(ε). If ε>1-1/(k+1), our BPP simulations take time nO(log(k n)) (log(k) is the logarithm iterated k times). We also give a polynomial-time BPP simulation using Chor-Goldreich sources of min-entropy RΩ(1), which is optimal. We present applications to time-space tradeoffs, expander constructions, and the hardness of approximation. Also of interest is our randomness-efficient Leftover Hash Lemma, found independently by Goldreich and Wigderson}, keywords = {Application software, BPP simulations, Chor-Goldreich sources, computational complexity, Computational modeling, Computer science, Computer simulation, cryptography, distributed algorithms, expander constructions, hardness, MATHEMATICS, min-entropy, Physics computing, Polynomials, probability, R-bit string, randomness-efficient Leftover Hash Lemma, RP algorithms simulation, Testing, time-space tradeoffs, very weak random sources}, isbn = {0-8186-6580-7}, doi = {10.1109/SFCS.1994.365688}, author = {Srinivasan, Aravind and Zuckerman,D.} } @conference {14576, title = {A distributed algorithm for ear decomposition}, booktitle = {, Fifth International Conference on Computing and Information, 1993. Proceedings ICCI {\textquoteright}93}, year = {1993}, month = {1993/05/27/29}, pages = {180 - 184}, publisher = {IEEE}, organization = {IEEE}, abstract = {A distributed algorithm for finding an ear decomposition of an asynchronous communication network with n nodes and m links is presented. At the completion of the algorithm either the ears are correctly labeled or the nodes are informed that there exists no ear decomposition. First we present a novel algorithm to check the existence of an ear decomposition which uses O(m) messages. We also present two other algorithms, one which is time-optimal and the other which is message-optimal to determine the actual ears and their corresponding numbers after determining the existence of an ear decomposition}, keywords = {Asynchronous communication, asynchronous communication network, Automata, Communication networks, computational complexity, Computer networks, Computer science, decomposition graph, distributed algorithm, distributed algorithms, Distributed computing, Ear, ear decomposition, graph theory, message-optimal, network decomposition, sorting, Testing, time-optimal}, isbn = {0-8186-4212-2}, doi = {10.1109/ICCI.1993.315382}, author = {Hannenhalli, Sridhar and Perumalla,K. and Chandrasekharan,N. and Sridhar,R.} } @article {17195, title = {Human Factors Experiments in Designing Interactive Systems}, journal = {Computer}, volume = {12}, year = {1979}, month = {1979/12//}, pages = {9 - 19}, abstract = {Successful industrial design gracefully unites esthetics and function at minimum cost. However, designers face special problems when they apply their skills to interactive computer systems.}, keywords = {Application software, Computer languages, Design engineering, Home computing, human factors, interactive systems, Process design, Testing}, isbn = {0018-9162}, doi = {10.1109/MC.1979.1658571}, author = {Shneiderman, Ben} }