@conference {19465, title = {You{\textquoteright}Re Capped: Understanding the Effects of Bandwidth Caps on Broadband Use in the Home}, booktitle = {SIGCHI {\textquoteright}12}, series = {CHI {\textquoteright}12}, year = {2012}, month = {2012///}, pages = {3021 - 3030}, publisher = {ACM}, organization = {ACM}, abstract = {Bandwidth caps, a limit on the amount of data users can upload and download in a month, are common globally for both home and mobile Internet access. With caps, each bit of data consumed comes at a cost against a monthly quota or a running tab. Yet, relatively little work has considered the implications of this usage-based pricing model on the user experience. In this paper, we present results from a qualitative study of households living with bandwidth caps. Our findings suggest home users grapple with three uncertainties regarding their bandwidth usage: invisible balances, mysterious processes, and multiple users. We discuss how these uncertainties impact their usage and describe the potential for better tools to help monitor and manage data caps. We conclude that as a community we need to cater for users under Internet cost constraints.}, keywords = {Bandwidth, bandwidth cap, data cap, Internet, metered use, pricing, usage-based billing, usage-based pricing}, isbn = {978-1-4503-1015-4}, url = {http://doi.acm.org/10.1145/2207676.2208714}, author = {Marshini Chetty and Banks, Richard and Brush, A.J. and Donner, Jonathan and Grinter, Rebecca} } @conference {17861, title = {Decentralized, accurate, and low-cost network bandwidth prediction}, booktitle = {2011 Proceedings IEEE INFOCOM}, year = {2011}, month = {2011/04/10/15}, pages = {6 - 10}, publisher = {IEEE}, organization = {IEEE}, abstract = {The distributed nature of modern computing makes end-to-end prediction of network bandwidth increasingly important. Our work is inspired by prior work that treats the Internet and bandwidth as an approximate tree metric space. This paper presents a decentralized, accurate, and low cost system that predicts pairwise bandwidth between hosts. We describe an algorithm to construct a distributed tree that embeds bandwidth measurements. The correctness of the algorithm is provable when driven by precise measurements. We then describe three novel heuristics that achieve high accuracy for predicting bandwidth even with imprecise input data. Simulation experiments with a real-world dataset confirm that our approach shows high accuracy with low cost.}, keywords = {accuracy, approximate tree metric space, Bandwidth, bandwidth allocation, bandwidth measurement, decentralized low cost system, distributed tree, end-to-end prediction, Extraterrestrial measurements, Internet, low-cost network bandwidth prediction, Measurement uncertainty, pairwise bandwidth, Peer to peer computing, Prediction algorithms, trees (mathematics)}, isbn = {978-1-4244-9919-9}, doi = {10.1109/INFCOM.2011.5935251}, author = {Sukhyun Song and Keleher,P. and Bhattacharjee, Bobby and Sussman, Alan} } @conference {15005, title = {Optimization of linked list prefix computations on multithreaded GPUs using CUDA}, booktitle = {Parallel Distributed Processing (IPDPS), 2010 IEEE International Symposium on}, year = {2010}, month = {2010/04//}, pages = {1 - 8}, abstract = {We present a number of optimization techniques to compute prefix sums on linked lists and implement them on multithreaded GPUs using CUDA. Prefix computations on linked structures involve in general highly irregular fine grain memory accesses that are typical of many computations on linked lists, trees, and graphs. While the current generation of GPUs provides substantial computational power and extremely high bandwidth memory accesses, they may appear at first to be primarily geared toward streamed, highly data parallel computations. In this paper, we introduce an optimized multithreaded GPU algorithm for prefix computations through a randomization process that reduces the problem to a large number of fine-grain computations. We map these fine-grain computations onto multithreaded GPUs in such a way that the processing cost per element is shown to be close to the best possible. Our experimental results show scalability for list sizes ranging from 1M nodes to 256M nodes, and significantly improve on the recently published parallel implementations of list ranking, including implementations on the Cell Processor, the MTA-8, and the NVIDIA GeForce 200 series. They also compare favorably to the performance of the best known CUDA algorithm for the scan operation on the Tesla C1060.}, keywords = {200, accesses;fine, accesses;linked, Bandwidth, C1060;cell, computations;extremely, computations;multithreaded, CUDA;MTA;NVIDIA, GeForce, GPUs;optimization;prefix, grain, high, list, memory, Parallel, prefix, process;coprocessors;multi-threading;, processor;data, series;Tesla, sums;randomization}, doi = {10.1109/IPDPS.2010.5470455}, author = {Wei, Zheng and JaJa, Joseph F.} } @conference {16503, title = {Web Monitoring 2.0: Crossing Streams to Satisfy Complex Data Needs}, booktitle = {IEEE 25th International Conference on Data Engineering, 2009. ICDE {\textquoteright}09}, year = {2009}, month = {2009/04/29/March}, pages = {1215 - 1218}, publisher = {IEEE}, organization = {IEEE}, abstract = {Web monitoring 2.0 supports the complex information needs of clients who probe multiple information sources and generate mashups by integrating across these volatile streams. A proxy that aims at satisfying multiple customized client profiles will face a scalability challenge in trying to maximize the number of clients served while at the same time fully satisfying complex client needs. In this paper, we introduce an abstraction of complex execution intervals, a combination of time intervals and information streams, to capture complex client needs. Given some budgetary constraints (e.g., bandwidth), we present offline algorithmic solutions for the problem of maximizing completeness of capturing complex profiles.}, keywords = {Bandwidth, complex client information need, Data Delivery, Data engineering, database management systems, Educational institutions, Internet, Mashups, mashups generation, Monitoring, multiple information source, offline algorithmic solution, Portals, PROBES, Profiles, Query processing, scalability, scheduling, volatile information stream, Web 2.0, Web Monitoring}, isbn = {978-1-4244-3422-0}, doi = {10.1109/ICDE.2009.204}, author = {Roitman,H. and Gal,A. and Raschid, Louiqa} } @conference {16746, title = {Caching in Mobile Environments: A New Analysis and the Mobile-Cache System}, booktitle = {IEEE 18th International Symposium on Personal, Indoor and Mobile Radio Communications, 2007. PIMRC 2007}, year = {2007}, month = {2007/09/03/7}, pages = {1 - 5}, publisher = {IEEE}, organization = {IEEE}, abstract = {In the near future, we will be surrounded by ubiquitous wireless networks and so information dissemination for mobile users is a key issue. Hybrid push-pull constitutes a very effective and scalable solution. Our contribution is twofold. First we provide a new analysis that takes into account the user mobility. We argue that in a highly mobile setting, analysis and optimization goals discussed in past papers become irrelevant, since the most important aspect of the system is not delay, but rather the ability to answer as many queries as possible. As we show, the optimal pull-push bandwidth ratio depends on the mobility patterns of the users. Second, we use our findings to build Mobile-Cache, a system that can efficiently answer multiple queries over a wireless environment.}, keywords = {Bandwidth, Broadcasting, cache storage, Cities and towns, FCC, hybrid push-pull solution, information dissemination, Intelligent networks, Land mobile radio, mobile communication, Mobile computing, mobile environment, mobile radio, mobile-cache system, multiple query answering, pull-push bandwidth ratio, Query processing, Road safety, road vehicles, ubiquitous wireless network, Vehicle safety}, isbn = {978-1-4244-1144-3}, doi = {10.1109/PIMRC.2007.4394076}, author = {Frangiadakis,N. and Roussopoulos, Nick} } @article {17203, title = {Human Responsibility for Autonomous Agents}, journal = {IEEE Intelligent Systems}, volume = {22}, year = {2007}, month = {2007/04//March}, pages = {60 - 61}, abstract = {Automated or autonomous systems can sometimes fail harmlessly, but they can also destroy data, compromise privacy, and consume resources, such as bandwidth or server capacity. What{\textquoteright}s more troubling is that automated systems embedded in vital systems can cause financial losses, destruction of property, and loss of life. Controlling these dangers will increase trust while enabling broader use of these systems with higher degrees of safety. Obvious threats stem from design errors and software bugs, but we can{\textquoteright}t overlook mistaken assumptions by designers, unanticipated actions by humans, and interference from other computerized systems. This article is part of a special issue on Interacting with Autonomy.}, keywords = {Automatic control, Autonomous agents, autonomous systems, Bandwidth, Computer bugs, Computer errors, Control systems, data privacy, Human-computer interaction, HUMANS, Robots, Safety, Software design}, isbn = {1541-1672}, doi = {10.1109/MIS.2007.32}, author = {Shneiderman, Ben} } @article {12094, title = {Wireless Network Security and Interworking}, journal = {Proceedings of the IEEE}, volume = {94}, year = {2006}, month = {2006/02//}, pages = {455 - 466}, abstract = {A variety of wireless technologies have been standardized and commercialized, but no single technology is considered the best because of different coverage and bandwidth limitations. Thus, interworking between heterogeneous wireless networks is extremely important for ubiquitous and high-performance wireless communications. Security in interworking is a major challenge due to the vastly different security architectures used within each network. The goal of this paper is twofold. First, we provide a comprehensive discussion of security problems and current technologies in 3G and WLAN systems. Second, we provide introductory discussions about the security problems in interworking, the state-of-the-art solutions, and open problems.}, keywords = {3G mobile communication, 3G systems, Authentication, Bandwidth, Communication system security, computer network security, computer security, Data security, internetworking, Land mobile radio cellular systems, Paper technology, security architectures, security of data, telecommunication security, wireless communication, wireless communications, Wireless LAN, wireless network security, Wireless networks, wireless technologies, WLAN systems}, isbn = {0018-9219}, doi = {10.1109/JPROC.2005.862322}, author = {Shin,M. and Ma,J. and Mishra,A. and Arbaugh, William A.} } @article {18208, title = {Efficient bandwidth resource allocation for low-delay multiuser video streaming}, journal = {Circuits and Systems for Video Technology, IEEE Transactions on}, volume = {15}, year = {2005}, month = {2005/09//}, pages = {1124 - 1137}, abstract = {This paper studies efficient bandwidth resource allocation for streaming multiple MPEG-4 fine granularity scalability (FGS) video programs to multiple users. We begin with a simple single-user scenario and propose a rate-control algorithm that has low delay and achieves an excellent tradeoff between the average visual distortion and the quality fluctuation. The proposed algorithm employs two weight factors for adjusting the tradeoff, and the optimal choice of these factors is derived. We then extend to the multiuser case and propose a dynamic resource allocation algorithm with low delay and low computational complexity. By exploring the variations in the scene complexity of video programs as well as dynamically and jointly distributing the available system resources among users, our proposed algorithm provides low fluctuation of quality for each user, and can support consistent or differentiated quality among all users to meet applications{\textquoteright} needs. Experimental results show that compared to traditional look-ahead sliding-window approaches, our algorithm can achieve comparable visual quality and channel utilization at a much lower cost of delay, computation, and storage.}, keywords = {algorithm;, allocation, allocation;, approaches;, average, Bandwidth, channel, channels;, coding;, complexity;, computational, control;, distortion;, dynamic, efficient, fine, fluctuation;, granularity, look-ahead, low-delay, MPEG-4, multiple, multiuser, QUALITY, quality;, rate-control, resource, scalability, scenario;, single-user, sliding-window, streaming;, Telecommunication, utilization;, video, visual}, isbn = {1051-8215}, doi = {10.1109/TCSVT.2005.852626}, author = {Su,Guan-Ming and M. Wu} } @conference {18207, title = {Efficient bandwidth resource allocation for low-delay multiuser MPEG-4 video transmission}, booktitle = {Communications, 2004 IEEE International Conference on}, volume = {3}, year = {2004}, month = {2004/06//}, pages = {1308 - 1312 Vol.3 - 1308 - 1312 Vol.3}, abstract = {An efficient bandwidth resource allocation algorithm with low delay and low fluctuation of quality to transmit multiple MPEG-4 fine granularity scalability (FGS) video programs to multiple users is proposed in this paper. By exploring the variation in the scene complexity of each video program and jointly redistributing available system resources among users, our proposed algorithm provides low fluctuation of quality for each user and consistent quality among all users. Experimental results show that compared to a traditional look-ahead sliding-window approach, our scheme can achieve comparable perceptual quality and channel utilization at a much lower cost of delay, computation, and storage.}, keywords = {algorithm;, allocation, allocation;, approach;, Bandwidth, broadband, channel, channels;, coding;, delays;, expert, FGS;, fine, granularity, group;, look-ahead, moving, MPEG-4, multiuser, networks;, of, picture, program;, QUALITY, resource, scalability;, service;, sliding-window, transmission;, utilization;, video}, doi = {10.1109/ICC.2004.1312724}, author = {Su,Guan-Ming and M. Wu} } @conference {12100, title = {High-performance MAC for high-capacity wireless LANs}, booktitle = {13th International Conference on Computer Communications and Networks, 2004. ICCCN 2004. Proceedings}, year = {2004}, month = {2004/10/11/13}, pages = {167 - 172}, publisher = {IEEE}, organization = {IEEE}, abstract = {The next-generation wireless technologies, e.g., 802.11n and 802.15.3a, offer a physical-layer speed at least an-order-of-magnitude higher than the current standards. However, direct application of current MACs leads to high protocol overhead and significant throughput degradation. In this paper, we propose ADCA, a high-performance MAC that works with high-capacity physical layer. ADCA exploits two ideas of adaptive batch transmission and opportunistic selection of high-rate hosts to simultaneously reduce the overhead and improve the aggregate throughput. It opportunistically favors high-rate hosts by providing higher access probability and more access time, while ensuring each low-rate host certain minimum amount of channel access time. Simulations show that the ADCA design increases the throughput by 112\% and reduces the average delay by 55\% compared with the legacy DCF. It delivers more than 100 Mbps MAC-layer throughput as compared with 35 Mbps offered by the legacy MAC}, keywords = {35 Mbit/s, access protocols, Aggregates, Bandwidth, batch transmission, Computer science, Educational institutions, high-capacity wireless LAN, high-performance MAC, Laboratories, Local area networks, Media Access Protocol, opportunistic selection, Physical layer, probability, Throughput, Wireless LAN}, isbn = {0-7803-8814-3}, doi = {10.1109/ICCCN.2004.1401615}, author = {Yuan Yuan and Daqing Gu and Arbaugh, William A. and Jinyun Zhang} } @conference {13216, title = {Iterative figure-ground discrimination}, booktitle = {Pattern Recognition, 2004. ICPR 2004. Proceedings of the 17th International Conference on}, volume = {1}, year = {2004}, month = {2004/08//}, pages = {67 - 70 Vol.1 - 67 - 70 Vol.1}, abstract = {Figure-ground discrimination is an important problem in computer vision. Previous work usually assumes that the color distribution of the figure can be described by a low dimensional parametric model such as a mixture of Gaussians. However, such approach has difficulty selecting the number of mixture components and is sensitive to the initialization of the model parameters. In this paper, we employ non-parametric kernel estimation for color distributions of both the figure and background. We derive an iterative sampling-expectation (SE) algorithm for estimating the color, distribution and segmentation. There are several advantages of kernel-density estimation. First, it enables automatic selection of weights of different cues based on the bandwidth calculation from the image itself. Second, it does not require model parameter initialization and estimation. The experimental results on images of cluttered scenes demonstrate the effectiveness of the proposed algorithm.}, keywords = {algorithm;, analysis;, Bandwidth, calculation;, Color, colour, Computer, density, dimensional, discrimination;, distribution;, distributions;, Estimation, estimation;, expectation, figure, Gaussian, ground, image, initialization;, iterative, Kernel, low, methods;, mixture;, model, model;, nonparametric, parameter, parametric, processes;, sampling, sampling;, segmentation, segmentation;, statistics;, theory;, vision;}, doi = {10.1109/ICPR.2004.1334006}, author = {Zhao, L. and Davis, Larry S.} } @article {14768, title = {Resource policing to support fine-grain cycle stealing in networks of workstations}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {15}, year = {2004}, month = {2004/10//}, pages = {878 - 892}, abstract = {We present the design, implementation, and performance evaluation of a suite of resource policing mechanisms that allow guest processes to efficiently and unobtrusively exploit otherwise idle workstation resources. Unlike traditional policies that harvest cycles only from unused machines, we employ fine-grained cycle stealing to exploit resources even from machines that have active users. We developed a suite of kernel extensions that enable these policies to operate without significantly impacting host processes: 1) a new starvation-level CPU priority for guest jobs, 2) a new page replacement policy that imposes hard bounds on physical memory usage by guest processes, and 3) a new I/O scheduling mechanism called rate windows that throttle guest processes{\textquoteright} usage of I/O and network bandwidth. We evaluate both the individual impacts of each mechanism, and their utility for our fine-grain cycle stealing.}, keywords = {65, Application software, Bandwidth, cluster computing, Computer networks, Computer Society, Concurrent computing, cycle stealing, cycle stealing., grid computing, I/O scheduling, Intelligent networks, Kernel, network bandwidth, networks of workstations, page replacement policy, parallel computing, performance evaluation, Processor scheduling, resource allocation, resource scheduling, starvation-level CPU priority, workstation clusters, workstation resources, Workstations}, isbn = {1045-9219}, doi = {10.1109/TPDS.2004.58}, author = {Ryu, K. D and Hollingsworth, Jeffrey K} } @conference {17878, title = {Improving access to multi-dimensional self-describing scientific datasets}, booktitle = {3rd IEEE/ACM International Symposium on Cluster Computing and the Grid, 2003. Proceedings. CCGrid 2003}, year = {2003}, month = {2003/05/12/15}, pages = {172 - 179}, publisher = {IEEE}, organization = {IEEE}, abstract = {Applications that query into very large multidimensional datasets are becoming more common. Many self-describing scientific data file formats have also emerged, which have structural metadata to help navigate the multi-dimensional arrays that are stored in the files. The files may also contain application-specific semantic metadata. In this paper, we discuss efficient methods for performing searches for subsets of multi-dimensional data objects, using semantic information to build multidimensional indexes, and group data items into properly sized chunks to maximize disk I/O bandwidth. This work is the first step in the design and implementation of a generic indexing library that will work with various high-dimension scientific data file formats containing semantic information about the stored data. To validate the approach, we have implemented indexing structures for NASA remote sensing data stored in the HDF format with a specific schema (HDF-EOS), and show the performance improvements that are gained from indexing the datasets, compared to using the existing HDF library for accessing the data.}, keywords = {Application software, application-specific semantic metadata, Bandwidth, Computer science, database indexing, disk I/O bandwidth, distributed databases, Educational institutions, Indexing, indexing structures, Libraries, meta data, Middleware, multidimensional arrays, multidimensional datasets, Multidimensional systems, NASA, NASA remote sensing data, Navigation, query formulation, self-describing scientific data file formats, structural metadata, very large databases}, isbn = {0-7695-1919-9}, doi = {10.1109/CCGRID.2003.1199366}, author = {Nam,B. and Sussman, Alan} } @conference {13343, title = {Using state modules for adaptive query processing}, booktitle = {19th International Conference on Data Engineering, 2003. Proceedings}, year = {2003}, month = {2003/03/05/8}, pages = {353 - 364}, publisher = {IEEE}, organization = {IEEE}, abstract = {We present a query architecture in which join operators are decomposed into their constituent data structures (State Modules, or SteMs), and dataflow among these SteMs is managed adaptively by an eddy routing operator [R. Avnur et al., (2000)]. Breaking the encapsulation of joins serves two purposes. First, it allows the eddy to observe multiple physical operations embedded in a join algorithm, allowing for better calibration and control of these operations. Second, the SteM on a relation serves as a shared materialization point, enabling multiple competing access methods to share results, which can be leveraged by multiple competing join algorithms. Our architecture extends prior work significantly, allowing continuously adaptive decisions for most major aspects of traditional query optimization: choice of access methods and join algorithms, ordering of operators, and choice of a query spanning tree. SteMs introduce significant routing flexibility to the eddy, enabling more opportunities for adaptation, but also introducing the possibility of incorrect query results. We present constraints on eddy routing through SteMs that ensure correctness while preserving a great deal of flexibility. We also demonstrate the benefits of our architecture via experiments in the Telegraph dataflow system. We show that even a simple routing policy allows significant flexibility in adaptation, including novel effects like automatic "hybridization " of multiple algorithms for a single join.}, keywords = {adaptive query processing, Bandwidth, Calibration, data encapsulation, data structure, Data structures, Databases, Dictionaries, eddy routing, eddy routing operator, Encapsulation, join operator, multiple algorithm automatic hybridization, multiple competing join algorithm, query architecture, Query processing, query spanning tree, Routing, routing policy, Runtime, shared materialization point, State Module, SteMs, Telegraph dataflow system, Telegraphy, Tree data structures}, isbn = {0-7803-7665-X}, doi = {10.1109/ICDE.2003.1260805}, author = {Vijayshankar Raman and Deshpande, Amol and Hellerstein,J. M} } @conference {17835, title = {Multiple Query Optimization for Data Analysis Applications on Clusters of SMPs}, booktitle = {2nd IEEE/ACM International Symposium on Cluster Computing and the Grid, 2002}, year = {2002}, month = {2002/05/21/24}, pages = {154 - 154}, publisher = {IEEE}, organization = {IEEE}, abstract = {This paper is concerned with the efficient execution of multiple query workloads on a cluster of SMPs. We target applications that access and manipulate large scientific datasets. Queries in these applications involve user-defined processing operations and distributed data structures to hold intermediate and final results. Our goal is to implement system components to leverage previously computed query results and to effectively utilize processing power and aggregated I/O bandwidth on SMP nodes so that both single queries and multi-query batches can be efficiently executed.}, keywords = {Aggregates, Application software, Bandwidth, Data analysis, Data structures, Delay, Query processing, scheduling, Subcontracting, Switched-mode power supply}, isbn = {0-7695-1582-7}, doi = {10.1109/CCGRID.2002.1017123}, author = {Andrade,H. and Kurc, T. and Sussman, Alan and Saltz, J.} } @article {12307, title = {Scalable secure group communication over IP multicast}, journal = {Selected Areas in Communications, IEEE Journal on}, volume = {20}, year = {2002}, month = {2002/10//}, pages = {1511 - 1527}, abstract = {We introduce and analyze a scalable rekeying scheme for implementing secure group communications Internet protocol multicast. We show that our scheme incurs constant processing, message, and storage overhead for a rekey operation when a single member joins or leaves the group, and logarithmic overhead for bulk simultaneous changes to the group membership. These bounds hold even when group dynamics are not known a priori. Our rekeying algorithm requires a particular clustering of the members of the secure multicast group. We describe a protocol to achieve such clustering and show that it is feasible to efficiently cluster members over realistic Internet-like topologies. We evaluate the overhead of our own rekeying scheme and also of previously published schemes via simulation over an Internet topology map containing over 280 000 routers. Through analysis and detailed simulations, we show that this rekeying scheme performs better than previous schemes for a single change to group membership. Further, for bulk group changes, our algorithm outperforms all previously known schemes by several orders of magnitude in terms of actual bandwidth usage, processing costs, and storage requirements.}, keywords = {access, algorithm;, authentication;, Bandwidth, communication;, CONTROL, costs;, cryptography;, dynamics;, group, group;, Internet, Internet-like, Internet;, IP, logarithmic, map;, membership;, message, Multicast, multicast;, network, overhead;, PROCESSING, protocol, protocols;, rekeying, requirements;, routers;, routing;, scalable, secure, security;, server;, simulation;, storage, Telecommunication, topologies;, Topology, topology;, transport, usage;}, isbn = {0733-8716}, doi = {10.1109/JSAC.2002.803986}, author = {Banerjee,S. and Bhattacharjee, Bobby} } @conference {17532, title = {Receiver based management of low bandwidth access links}, booktitle = {IEEE INFOCOM 2000. Nineteenth Annual Joint Conference of the IEEE Computer and Communications Societies. Proceedings}, volume = {1}, year = {2000}, month = {2000///}, pages = {245-254 vol.1 - 245-254 vol.1}, publisher = {IEEE}, organization = {IEEE}, abstract = {In this paper, we describe a receiver-based congestion control policy that leverages TCP flow control mechanisms to prioritize mixed traffic loads across access links. We manage queueing at the access link to: (1) improve the response time of interactive network applications; (2) reduce congestion-related packet losses; while (3) maintaining high throughput for bulk-transfer applications. Our policy controls queue length by manipulating receive socket buffer sizes. We have implemented this solution in a dynamically loadable Linux kernel module, and tested it over low-bandwidth links. Our approach yields a 7-fold improvement in packet latency over an unmodified system while maintaining 94\% link utilization. In the common case, congestion-related packet losses at the access link can be eliminated. Finally, by prioritizing short flows, we show that our system reduces the time to download a complex Web page during a large background transfer by a factor of two}, keywords = {Bandwidth, buffer storage, bulk-transfer applications, complex Web page, congestion control policy, Delay, dynamically loadable Linux kernel module, information resources, interactive network, Internet, Kernel, link utilization, Linux, low-bandwidth access links, mixed traffic load, packet latency, queue length, queueing theory, receive socket buffer sizes, receiver-based management, response time, short flow prioritizing, Size control, Sockets, subscriber loops, TCP flow control, telecommunication congestion control, telecommunication network management, Telecommunication traffic, Testing, Throughput, Transport protocols, Unix, Web pages}, isbn = {0-7803-5880-5}, doi = {10.1109/INFCOM.2000.832194}, author = {Spring, Neil and Chesire,M. and Berryman,M. and Sahasranaman,V. and Anderson,T. and Bershad,B.} } @conference {15691, title = {Integrated admission control in hierarchical video-on-demand systems}, booktitle = {IEEE International Conference on Multimedia Computing and Systems, 1999}, volume = {1}, year = {1999}, month = {1999/07//}, pages = {220-225 vol.1 - 220-225 vol.1}, publisher = {IEEE}, organization = {IEEE}, abstract = {We develop a unified model of a hierarchical video-on-demand (VoD) system by integrating the storage and the network subsystems. Rather than restricting the analysis to an isolated subsystem the performance of the VoD system is analyzed as an end-to-end system. On a system-wide basis, request handling and admission control policies are designed to minimize global performance metrics. Through our simulation, we compare different request handling policies and show that a hierarchical VoD architecture with request handling that allows retrials at more than one resource will minimize overall blocking}, keywords = {Admission control, Bandwidth, blocking, Computer science, Design methodology, end-to-end system, hierarchical video-on-demand systems, integrated admission control, Intelligent networks, Load management, Motion pictures, Network servers, network subsystem, performance, Performance analysis, performance evaluation, quality of service, request handling, resource allocation, Resource management, simulation, storage subsystem, video on demand, video servers}, isbn = {0-7695-0253-9}, doi = {10.1109/MMCS.1999.779196}, author = {Mundur, Padma and Simon,R. and Sood,A.} } @conference {15687, title = {Network service selection for distributed multimedia applications}, booktitle = {Third International Conference on Computational Intelligence and Multimedia Applications, 1999. ICCIMA {\textquoteright}99. Proceedings}, year = {1999}, month = {1999///}, pages = {388 - 392}, publisher = {IEEE}, organization = {IEEE}, abstract = {An important question in the development of system support for distributed multimedia is the type of network service offered to applications. This paper compares two network service disciplines: weighted fair queueing (WFQ) and non-preemptive earliest deadline first (NEDF). We show that, for a broad class of high-bandwidth distributed multimedia applications, WFQ outperforms NEDF in terms of network throughput while still providing an application-adequate end-to-end service. This result holds despite the fact that NEDF offers applications far greater flexibility in terms of control over end-to-end delivery delay}, keywords = {Admission control, Application software, application-adequate end-to-end service, Bandwidth, Communication system traffic control, Computer science, Delay, distributed processing, end-to-end delivery delay control, flexibility, high-bandwidth distributed multimedia applications, interactive multimedia, multimedia systems, network service selection, network throughput, nonpreemptive earliest deadline first, queueing theory, Regulators, system support, telecommunication services, Throughput, Traffic control, weighted fair queueing}, isbn = {0-7695-0300-4}, doi = {10.1109/ICCIMA.1999.798561}, author = {Simon,R. and Sood,A. and Mundur, Padma} } @conference {17627, title = {Multicommodity flow and circuit switching}, booktitle = {, Proceedings of the Thirty-First Hawaii International Conference on System Sciences, 1998}, volume = {7}, year = {1998}, month = {1998/01/06/9}, pages = {459-465 vol.7 - 459-465 vol.7}, publisher = {IEEE}, organization = {IEEE}, abstract = {Given a set of request pairs in a network, the problem of routing virtual circuits with low congestion is to connect each pair by a path so that few paths use the same link in the network. We build on an earlier multicommodity flow based approach of Leighton and Rao (1996) to show that short flow paths lead to path selections with low congestion. This shows that such good path selections exist for constant-degree expanders with strong expansion, generalizing a result of (Broder et al., 1994). We also show, for infinitely many n, n-vertex undirected graphs Gn along with a set T of connection requests, such that: T is fractionally realizable using flow-paths that impose a (fractional) congestion of at most 1; but any rounding of such a flow to the given set of flow-paths, leads to a congestion of Ω(log n/log log n). This is progress on a long-standing open problem}, keywords = {Application software, Bandwidth, circuit switching, Computer science, constant-degree expanders, graph theory, High speed integrated circuits, Integrated circuit technology, Laboratories, low congestion, MATHEMATICS, multicommodity flow, National electric code, network routing, path selections, Routing, short flow paths, Switching circuits, switching theory, undirected graphs, virtual circuit routing}, isbn = {0-8186-8255-8}, doi = {10.1109/HICSS.1998.649241}, author = {Leighton,T. and Rao,S. and Srinivasan, Aravind} } @conference {17607, title = {Improved approximations for edge-disjoint paths, unsplittable flow, and related routing problems}, booktitle = {, 38th Annual Symposium on Foundations of Computer Science, 1997. Proceedings}, year = {1997}, month = {1997/10/20/22}, pages = {416 - 425}, publisher = {IEEE}, organization = {IEEE}, abstract = {We present improved approximation algorithms for a family of problems involving edge-disjoint paths and unsplittable flow, and for some related routing problems. The central theme of all our algorithms is the underlying multi-commodity flow relaxation}, keywords = {Approximation algorithms, Bandwidth, Channel allocation, computational complexity, Computer science, edge-disjoint paths, graph theory, High speed integrated circuits, IEL, Image motion analysis, Information systems, multi-commodity flow relaxation, Multiprocessor interconnection networks, network routing, Optical fiber networks, Routing, routing problems, unsplittable flow}, isbn = {0-8186-8197-7}, doi = {10.1109/SFCS.1997.646130}, author = {Srinivasan, Aravind} } @article {17839, title = {An integrated runtime and compile-time approach for parallelizing structured and block structured applications}, journal = {IEEE Transactions on Parallel and Distributed Systems}, volume = {6}, year = {1995}, month = {1995/07//}, pages = {747 - 754}, abstract = {In compiling applications for distributed memory machines, runtime analysis is required when data to be communicated cannot be determined at compile-time. One such class of applications requiring runtime analysis is block structured codes. These codes employ multiple structured meshes, which may be nested (for multigrid codes) and/or irregularly coupled (called multiblock or irregularly coupled regular mesh problems). In this paper, we present runtime and compile-time analysis for compiling such applications on distributed memory parallel machines in an efficient and machine-independent fashion. We have designed and implemented a runtime library which supports the runtime analysis required. The library is currently implemented on several different systems. We have also developed compiler analysis for determining data access patterns at compile time and inserting calls to the appropriate runtime routines. Our methods can be used by compilers for HPF-like parallel programming languages in compiling codes in which data distribution, loop bounds and/or strides are unknown at compile-time. To demonstrate the efficacy of our approach, we have implemented our compiler analysis in the Fortran 90D/HPF compiler developed at Syracuse University. We have experimented with a multi-bloc Navier-Stokes solver template and a multigrid code. Our experimental results show that our primitives have low runtime communication overheads and the compiler parallelized codes perform within 20\% of the codes parallelized by manually inserting calls to the runtime library}, keywords = {Bandwidth, block structured applications, block structured codes, compile-time approach, compiling applications, data access patterns, Data analysis, Delay, distributed memory machines, distributed memory systems, FORTRAN, Fortran 90D/HPF compiler, High performance computing, HPF-like parallel programming languages, integrated runtime approach, irregularly coupled regular mesh problems, multigrid code, Navier-Stokes solver template, Parallel machines, parallel programming, Pattern analysis, performance evaluation, program compilers, Program processors, Runtime library, Uninterruptible power systems}, isbn = {1045-9219}, doi = {10.1109/71.395403}, author = {Agrawal,G. and Sussman, Alan and Saltz, J.} } @article {16768, title = {A pipeline N-way join algorithm based on the 2-way semijoin program}, journal = {IEEE Transactions on Knowledge and Data Engineering}, volume = {3}, year = {1991}, month = {1991/12//}, pages = {486 - 495}, abstract = {The semijoin has been used as an effective operator in reducing data transmission and processing over a network that allows forward size reduction of relations and intermediate results generated during the processing of a distributed query. The authors propose a relational operator, two-way semijoin, which enhanced the semijoin with backward size reduction capability for more cost-effective query processing. A pipeline N-way join algorithm for joining the reduced relations residing on N sites is introduced. The main advantage of this algorithm is that it eliminates the need for transferring and storing intermediate results among the sites. A set of experiments showing that the proposed algorithm outperforms all known conventional join algorithms that generate intermediate results is included}, keywords = {2-way semijoin program, backward size reduction, Bandwidth, Computer networks, Costs, Data communication, data transmission, Database systems, database theory, Delay, distributed databases, distributed query, forward size reduction, intermediate results, Local area networks, network, Parallel algorithms, pipeline N-way join algorithm, pipeline processing, Pipelines, programming theory, Query processing, Relational databases, relational operator, SITES, Workstations}, isbn = {1041-4347}, doi = {10.1109/69.109109}, author = {Roussopoulos, Nick and Kang,H.} }