@conference {19290, title = {Loop transformations for interface-based hierarchies IN SDF graphs}, booktitle = {2010 21st IEEE International Conference on Application-specific Systems Architectures and Processors (ASAP)}, year = {2010}, month = {2010}, pages = {341 - 344}, abstract = {Data-flow has proven to be an attractive computation model for programming digital signal processing (DSP) applications. A restricted version of data-flow, termed synchronous data-flow (SDF), offers strong compile-time predictability properties, but has limited expressive power. A new type of hierarchy (Interface-based SDF) has been proposed allowing more expressivity while maintaining its predictability. One of the main problems with this hierarchical SDF model is the lack of trade-off between parallelism and network clustering. This paper presents a systematic method for applying an important class of loop transformation techniques in the context of interface-based SDF semantics. The resulting approach provides novel capabilities for integrating parallelism extraction properties of the targeted loop transformations with the useful modeling, analysis, and code reuse properties provided by SDF.}, keywords = {Application software, code generation, Computer architecture, Computer interfaces, Data-Flow programming, Digital signal processing, Loop parallelization, PARALLEL PROCESSING, Power engineering computing, Power system modeling, Processor scheduling, Programming profession, scheduling, SDF graph, system recovery}, author = {Piat, J. and Bhattacharyya, Shuvra S. and Raulet, M.} } @conference {17168, title = {First Steps to Netviz Nirvana: Evaluating Social Network Analysis with NodeXL}, booktitle = {International Conference on Computational Science and Engineering, 2009. CSE {\textquoteright}09}, volume = {4}, year = {2009}, month = {2009/08/29/31}, pages = {332 - 339}, publisher = {IEEE}, organization = {IEEE}, abstract = {Social Network Analysis (SNA) has evolved as a popular, standard method for modeling meaningful, often hidden structural relationships in communities. Existing SNA tools often involve extensive pre-processing or intensive programming skills that can challenge practitioners and students alike. NodeXL, an open-source template for Microsoft Excel, integrates a library of common network metrics and graph layout algorithms within the familiar spreadsheet format, offering a potentially low-barrier-to-entry framework for teaching and learning SNA. We present the preliminary findings of 2 user studies of 21 graduate students who engaged in SNA using NodeXL. The majority of students, while information professionals, had little technical background or experience with SNA techniques. Six of the participants had more technical backgrounds and were chosen specifically for their experience with graph drawing and information visualization. Our primary objectives were (1) to evaluate NodeXL as an SNA tool for a broad base of users and (2) to explore methods for teaching SNA. Our complementary dual case-study format demonstrates the usability of NodeXL for a diverse set of users, and significantly, the power of a tightly integrated metrics/visualization tool to spark insight and facilitate sense-making for students of SNA.}, keywords = {Computer science, computer science education, data visualisation, Data visualization, Educational institutions, graph drawing, graph layout algorithm, Information services, Information Visualization, Internet, Libraries, Microsoft Excel open-source template, MILC, multi-dimensional in-depth long-term case studies, Netviz Nirvana, NodeXL, Open source software, Programming profession, SNA, social network analysis, Social network services, social networking (online), spreadsheet programs, structural relationship, teaching, visual analytics, visualization tool, Web sites}, isbn = {978-1-4244-5334-4}, doi = {10.1109/CSE.2009.120}, author = {Bonsignore,E. M and Dunne,C. and Rotman,D. and Smith,M. and Capone,T. and Hansen,D. L and Shneiderman, Ben} } @conference {14696, title = {Fable: A Language for Enforcing User-defined Security Policies}, booktitle = {IEEE Symposium on Security and Privacy, 2008. SP 2008}, year = {2008}, month = {2008/05/18/22}, pages = {369 - 383}, publisher = {IEEE}, organization = {IEEE}, abstract = {This paper presents FABLE, a core formalism for a programming language in which programmers may specify security policies and reason that these policies are properly enforced. In FABLE, security policies can be expressed by associating security labels with the data or actions they protect. Programmers define the semantics of labels in a separate part of the program called the enforcement policy. FABLE prevents a policy from being circumvented by allowing labeled terms to be manipulated only within the enforcement policy; application code must treat labeled values abstractly. Together, these features facilitate straightforward proofs that programs implementing a particular policy achieve their high-level security goals. FABLE is flexible enough to implement a wide variety of security policies, including access control, information flow, provenance, and security automata. We have implemented FABLE as part of the LINKS web programming language; we call the resulting language SELlNKS. We report on our experience using SELlNKS to build two substantial applications, a wiki and an on-line store, equipped with a combination of access control and provenance policies. To our knowledge, no existing framework enables the enforcement of such a wide variety of security policies with an equally high level of assurance.}, keywords = {Access control, Automata, Collaborative work, Communication system security, Computer languages, computer security, Data security, enforcement policy, FABLE, Government, high-level security goals, information flow, Information security, Language-based security, programming languages, Programming profession, provenance, security automata, security labels, security of data, user-defined security policies, verified enforcement, Web programming language}, isbn = {978-0-7695-3168-7}, doi = {10.1109/SP.2008.29}, author = {Swamy,N. and Corcoran,B.J. and Hicks, Michael W.} } @article {14805, title = {A tool to help tune where computation is performed}, journal = {IEEE Transactions on Software Engineering}, volume = {27}, year = {2001}, month = {2001/07//}, pages = {618 - 629}, abstract = {We introduce a new performance metric, called load balancing factor (LBF), to assist programmers when evaluating different tuning alternatives. The LBF metric differs from traditional performance metrics since it is intended to measure the performance implications of a specific tuning alternative rather than quantifying where time is spent in the current version of the program. A second unique aspect of the metric is that it provides guidance about moving work within a distributed or parallel program rather than reducing it. A variation of the LBF metric can also be used to predict the performance impact of changing the underlying network. The LBF metric is computed incrementally and online during the execution of the program to be tuned. We also present a case study that shows that our metric can accurately predict the actual performance gains for a test suite of six programs}, keywords = {Computational modeling, Current measurement, Distributed computing, distributed program, distributed programming, load balancing factor, Load management, parallel program, parallel programming, Performance analysis, performance evaluation, Performance gain, performance metric, Programming profession, software metrics, software performance evaluation, Testing, Time measurement, tuning}, isbn = {0098-5589}, doi = {10.1109/32.935854}, author = {Eom, Hyeonsang and Hollingsworth, Jeffrey K} } @article {12089, title = {Secure quality of service handling: SQoSH}, journal = {IEEE Communications Magazine}, volume = {38}, year = {2000}, month = {2000/04//}, pages = {106 - 112}, abstract = {Proposals for programmable network infrastructures, such as active networks and open signaling, provide programmers with access to network resources and data structures. The motivation for providing these interfaces is accelerated introduction of new services, but exposure of the interfaces introduces many new security risks. We describe some of the security issues raised by active networks. We then describe our secure active network environment (SANE) architecture. SANE was designed as a security infrastructure for active networks, and was implemented in the SwitchWare architecture. SANE restricts the actions that loaded modules can perform by restricting the resources that can be named; this is further extended to remote invocation by means of cryptographic credentials. SANE can be extended to support restricted control of quality of service in a programmable network element. The Piglet lightweight device kernel provides a {\textquotedblleft}virtual clock{\textquotedblright} type of scheduling discipline for network traffic, and exports several tuning knobs with which the clock can be adjusted. The ALIEN active loader provides safe access to these knobs to modules that operate on the network element. Thus, the proposed SQoSH architecture is able to provide safe, secure access to network resources, while allowing these resources to be managed by end users needing customized networking services. A desirable consequence of SQoSH{\textquoteright}s integration of access control and resource control is that a large class of denial-of-service attacks, unaddressed solely with access control and cryptographic protocols, can now be prevented}, keywords = {Acceleration, Access control, active networks, ALIEN active loader, Clocks, Computer network management, cryptographic credentials, cryptography, customized networking services, Data security, Data structures, denial-of-service attacks, interfaces, Kernel, loaded modules, network resources, network traffic, open signaling, packet switching, Piglet lightweight device kernel, programmable network element, programmable network infrastructures, Programming profession, Proposals, quality of service, remote invocation, resource control, restricted control of quality of service, SANE, scheduling, scheduling discipline, secure active network environment architecture, secure quality of service handling, security infrastructure, security risks, SQoSH, SwitchWare architecture, telecommunication security, tuning knobs, virtual clock}, isbn = {0163-6804}, doi = {10.1109/35.833566}, author = {Alexander,D. S and Arbaugh, William A. and Keromytis,A. D and Muir,S. and Smith,J. M} } @conference {14784, title = {LBF: a performance metric for program reorganization}, booktitle = {18th International Conference on Distributed Computing Systems, 1998. Proceedings}, year = {1998}, month = {1998/05/26/29}, pages = {222 - 229}, publisher = {IEEE}, organization = {IEEE}, abstract = {We introduce a new performance metric, called Load Balancing Factor (LBF), to assist programmers with evaluating different tuning alternatives. The LBF metric differs from traditional performance metrics since it is intended to measure the performance implications of a specific tuning alternative rather than quantifying where time is spent in the current version of the program. A second unique aspect of the metric is that it provides guidance about moving work within a distributed or parallel program rather than reducing it. A variation of the LBF metric can also be used to predict the performance impact of changing the underlying network. The LBF metric can be computed incrementally and online during the execution of the program to be tuned. We also present a case study that shows that our metric can predict the actual performance gains accurately for a test suite of six programs}, keywords = {case study, Computational modeling, computer network, Computer science, Debugging, distributed processing, distributed program, Educational institutions, Integrated circuit testing, LBF metric, load balancing factor, Load management, measurement, NIST, parallel program, parallel programming, performance metric, program reorganization, program tuning, Programming profession, resource allocation, software metrics, software performance evaluation, US Department of Energy}, isbn = {0-8186-8292-2}, doi = {10.1109/ICDCS.1998.679505}, author = {Eom, H. and Hollingsworth, Jeffrey K} } @article {14816, title = {The Paradyn parallel performance measurement tool}, journal = {Computer}, volume = {28}, year = {1995}, month = {1995/11//}, pages = {37 - 46}, abstract = {Paradyn is a tool for measuring the performance of large-scale parallel programs. Our goal in designing a new performance tool was to provide detailed, flexible performance information without incurring the space (and time) overhead typically associated with trace-based tools. Paradyn achieves this goal by dynamically instrumenting the application and automatically controlling this instrumentation in search of performance problems. Dynamic instrumentation lets us defer insertion until the moment it is needed (and remove it when it is no longer needed); Paradyn{\textquoteright}s Performance Consultant decides when and where to insert instrumentation}, keywords = {Aerodynamics, Automatic control, automatic instrumentation control, Debugging, dynamic instrumentation, flexible performance information, high level languages, insertion, Instruments, large-scale parallel program, Large-scale systems, measurement, Paradyn parallel performance measurement tool, Parallel machines, parallel programming, Performance Consultant, Programming profession, scalability, software performance evaluation, software tools}, isbn = {0018-9162}, doi = {10.1109/2.471178}, author = {Miller, B. P and Callaghan, M. D and Cargille, J. M and Hollingsworth, Jeffrey K and Irvin, R. B and Karavanic, K. L and Kunchithapadam, K. and Newhall, T.} } @conference {14760, title = {Dynamic program instrumentation for scalable performance tools}, booktitle = {Scalable High-Performance Computing Conference, 1994., Proceedings of the}, year = {1994}, month = {1994/05//}, pages = {841 - 850}, publisher = {IEEE}, organization = {IEEE}, abstract = {Presents a new technique called {\textquoteleft}dynamic instrumentation{\textquoteright} that provides efficient, scalable, yet detailed data collection for large-scale parallel applications. Our approach is unique because it defers inserting any instrumentation until the application is in execution. We can insert or change instrumentation at any time during execution by modifying the application{\textquoteright}s binary image. Only the instrumentation required for the currently selected analysis or visualization is inserted. As a result, our technique collects several orders of magnitude less data than traditional data collection approaches. We have implemented a prototype of our dynamic instrumentation on the CM-5, and present results for several real applications. In addition, we include recommendations to operating system designers, compiler writers, and computer architects about the features necessary to permit efficient monitoring of large-scale parallel systems}, keywords = {Application software, binary image, compiler writing, Computer architecture, Computer displays, Computerized monitoring, Concurrent computing, data acquisition, data collection, data visualisation, Data visualization, dynamic program instrumentation, efficient monitoring, executing program, Instruments, large-scale parallel applications, Large-scale systems, operating system design, Operating systems, parallel programming, program analysis, program diagnostics, program visualization, Programming profession, Sampling methods, scalable performance tools, software tools}, isbn = {0-8186-5680-8}, doi = {10.1109/SHPCC.1994.296728}, author = {Hollingsworth, Jeffrey K and Miller, B. P and Cargille, J.} } @article {14791, title = {IPS-2: the second generation of a parallel program measurement system}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {1}, year = {1990}, month = {1990/04//}, pages = {206 - 217}, abstract = {IPS, a performance measurement system for parallel and distributed programs, is currently running on its second implementation. IPS{\textquoteright}s model of parallel programs uses knowledge about the semantics of a program{\textquoteright}s structure to provide two important features. First, IPS provides a large amount of performance data about the execution of a parallel program, and this information is organized so that access to it is easy and intuitive. Secondly, IPS provides performance analysis techniques that help to guide the programmer automatically to the location of program bottlenecks. The first implementation of IPS was a testbed for the basic design concepts, providing experience with a hierarchical program and measurement model, interactive program analysis, and automatic guidance techniques. It was built on the Charlotte distributed operating system. The second implementation, IPS-2, extends the basic system with new instrumentation techniques, an interactive and graphical user interface, and new automatic guidance analysis techniques. This implementation runs on 4.3BSD UNIX systems, on the VAX, DECstation, Sun 4, and Sequent Symmetry multiprocessor}, keywords = {4.3BSD UNIX systems, automatic guidance techniques, Automatic testing, Charlotte distributed operating system, CPA, DECstation, design concepts, distributed programs, graphical user interface, Graphical user interfaces, Instruments, interactive program analysis, IPS-2, measurement, message systems, network operating systems, Operating systems, parallel program measurement system, parallel programming, parallel programs, Performance analysis, performance analysis techniques, performance evaluation, performance measurement system, Power system modeling, program bottlenecks, program diagnostics, Programming profession, semantics, Sequent Symmetry multiprocessor, shared-memory systems, software tools, Springs, Sun, Sun 4, Unix, VAX}, isbn = {1045-9219}, doi = {10.1109/71.80132}, author = {Miller, B. P and Clark, M. and Hollingsworth, Jeffrey K and Kierstead, S. and Lim,S. -S and Torzewski, T.} } @article {17090, title = {Display Strategies for Program Browsing: Concepts and Experiment}, journal = {IEEE Software}, volume = {3}, year = {1986}, month = {1986/05//}, pages = {7 - 15}, abstract = {The new, larger display screens can improve program comprehension{\textemdash}if the added space is used for mome effective presentation, not just more code or larger type.}, keywords = {Computer errors, Content management, Fault detection, Information analysis, Large screen displays, Microscopy, Phase detection, Programming profession, User interfaces}, isbn = {0740-7459}, doi = {10.1109/MS.1986.233405}, author = {Shneiderman, Ben and Shafer,P. and Simon,R. and Weldon,L.} } @article {16751, title = {SEES{\textemdash}A Software testing Environment Support System}, journal = {IEEE Transactions on Software Engineering}, volume = {SE-11}, year = {1985}, month = {1985/04//}, pages = {355 - 366}, abstract = {SEES is a database system to support program testing. The program database is automatically created during the compilation of the program by a compiler built using the YACC compiler-compiler.}, keywords = {Computer architecture, Database systems, Error correction, Program processors, Programming profession, Relational databases, Software testing, software tools, Workstations, Writing}, isbn = {0098-5589}, doi = {10.1109/TSE.1985.232225}, author = {Roussopoulos, Nick and Yeh,R. T} }