@inproceedings{3361, abstract = {In this paper, we investigate the computational complexity of quantitative information flow (QIF) problems. Information-theoretic quantitative relaxations of noninterference (based on Shannon entropy)have been introduced to enable more fine-grained reasoning about programs in situations where limited information flow is acceptable. The QIF bounding problem asks whether the information flow in a given program is bounded by a constant $d$. Our first result is that the QIF bounding problem is PSPACE-complete. The QIF memoryless synthesis problem asks whether it is possible to resolve nondeterministic choices in a given partial program in such a way that in the resulting deterministic program, the quantitative information flow is bounded by a given constant $d$. Our second result is that the QIF memoryless synthesis problem is also EXPTIME-complete. The QIF memoryless synthesis problem generalizes to QIF general synthesis problem which does not impose the memoryless requirement (that is, by allowing the synthesized program to have more variables then the original partial program). Our third result is that the QIF general synthesis problem is EXPTIME-hard.}, author = {Cerny, Pavol and Chatterjee, Krishnendu and Henzinger, Thomas A}, location = {Cernay-la-Ville, France}, pages = {205 -- 217}, publisher = {IEEE}, title = {{The complexity of quantitative information flow problems}}, doi = {10.1109/CSF.2011.21}, year = {2011}, } @inproceedings{3358, abstract = {The static scheduling problem often arises as a fundamental problem in real-time systems and grid computing. We consider the problem of statically scheduling a large job expressed as a task graph on a large number of computing nodes, such as a data center. This paper solves the large-scale static scheduling problem using abstraction refinement, a technique commonly used in formal verification to efficiently solve computationally hard problems. A scheduler based on abstraction refinement first attempts to solve the scheduling problem with abstract representations of the job and the computing resources. As abstract representations are generally small, the scheduling can be done reasonably fast. If the obtained schedule does not meet specified quality conditions (like data center utilization or schedule makespan) then the scheduler refines the job and data center abstractions and, again solves the scheduling problem. We develop different schedulers based on abstraction refinement. We implemented these schedulers and used them to schedule task graphs from various computing domains on simulated data centers with realistic topologies. We compared the speed of scheduling and the quality of the produced schedules with our abstraction refinement schedulers against a baseline scheduler that does not use any abstraction. We conclude that abstraction refinement techniques give a significant speed-up compared to traditional static scheduling heuristics, at a reasonable cost in the quality of the produced schedules. We further used our static schedulers in an actual system that we deployed on Amazon EC2 and compared it against the Hadoop dynamic scheduler for large MapReduce jobs. Our experiments indicate that there is great potential for static scheduling techniques.}, author = {Henzinger, Thomas A and Singh, Vasu and Wies, Thomas and Zufferey, Damien}, location = {Salzburg, Austria}, pages = {329 -- 342}, publisher = {ACM}, title = {{Scheduling large jobs by abstraction refinement}}, doi = {10.1145/1966445.1966476}, year = {2011}, } @inproceedings{3359, abstract = {Motivated by improvements in constraint-solving technology and by the increase of routinely available computational power, partial-program synthesis is emerging as an effective approach for increasing programmer productivity. The goal of the approach is to allow the programmer to specify a part of her intent imperatively (that is, give a partial program) and a part of her intent declaratively, by specifying which conditions need to be achieved or maintained. The task of the synthesizer is to construct a program that satisfies the specification. As an example, consider a partial program where threads access shared data without using any synchronization mechanism, and a declarative specification that excludes data races and deadlocks. The task of the synthesizer is then to place locks into the program code in order for the program to meet the specification. In this paper, we argue that quantitative objectives are needed in partial-program synthesis in order to produce higher-quality programs, while enabling simpler specifications. Returning to the example, the synthesizer could construct a naive solution that uses one global lock for shared data. This can be prevented either by constraining the solution space further (which is error-prone and partly defeats the point of synthesis), or by optimizing a quantitative objective that models performance. Other quantitative notions useful in synthesis include fault tolerance, robustness, resource (memory, power) consumption, and information flow.}, author = {Cerny, Pavol and Henzinger, Thomas A}, location = {Taipei; Taiwan}, pages = {149 -- 154}, publisher = {ACM}, title = {{From boolean to quantitative synthesis}}, doi = {10.1145/2038642.2038666}, year = {2011}, } @inproceedings{3357, abstract = {We consider two-player graph games whose objectives are request-response condition, i.e conjunctions of conditions of the form "if a state with property Rq is visited, then later a state with property Rp is visited". The winner of such games can be decided in EXPTIME and the problem is known to be NP-hard. In this paper, we close this gap by showing that this problem is, in fact, EXPTIME-complete. We show that the problem becomes PSPACE-complete if we only consider games played on DAGs, and NP-complete or PTIME-complete if there is only one player (depending on whether he wants to enforce or spoil the request-response condition). We also present near-optimal bounds on the memory needed to design winning strategies for each player, in each case.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Horn, Florian}, editor = {Dediu, Adrian-Horia and Inenaga, Shunsuke and Martín-Vide, Carlos}, location = {Tarragona, Spain}, pages = {227 -- 237}, publisher = {Springer}, title = {{The complexity of request-response games}}, doi = {10.1007/978-3-642-21254-3_17}, volume = {6638}, year = {2011}, } @article{336, abstract = {The growth kinetics of colloidal Bi2S3 nanorods was investigated. After nucleation, the length distribution of the growing Bi 2S3 nanorods narrows with the reaction time until a bimodal length distribution appears. From this critical reaction time on, the smallest nanorods of the ensemble dissolve, feeding with monomer the growth of the largest ones. A comprehensive characterization of the size-distribution evolution of Bi2S3 nanorods is used here to illustrate the dependences of the anisotropic growth rates of cylindrical nanoparticles on the nanoparticle dimensions and the monomer concentration in solution. With this goal in mind, a diffusion-reaction model is presented to explain the origin of the experimentally obtained length distribution focusing mechanism. The model is able to reproduce the decrease of the growth rate in the nanorod axial direction with both its thickness and length. On the other hand, low lateral reaction rates prevent the nanorod thickness distribution to be focused. In both crystallographic growth directions, a concentration-dependent critical thickness exists, which discriminates between nanorods with positive growth rates and those dissolving in the reaction solution. }, author = {Ibáñez, Maria and Guardia, Pablo and Shavel, Alexey and Cadavid, Doris and Arbiol, Jordi and Morante, Joan and Cabot, Andreu}, journal = {Journal of Physical Chemistry C}, number = {16}, pages = {7947 -- 7955}, publisher = {American Chemical Society}, title = {{Growth kinetics of asymmetric Bi2S3 nanocrystals: Size distribution focusing in nanorods}}, doi = {10.1021/jp2002904}, volume = {115}, year = {2011}, }