% Entries to add to "others.bib" -- batch 6 % Last edited on 2022-11-14 03:36:12 by stolfi @article{abo-els-sal-kin-19-aa-volflu, author = {Abdel A. Abou El-Ela and Ragab A. El-Sehiemy and Eman Salah Ali and Abdel-Mohsen Kinawy}, title = {Minimisation of Voltage Fluctuation Resulted from Renewable Energy Sources Uncertainty in Distribution Systems}, journal = {IET Generation Transmission {\&} Distribution}, year = 2019, volume = {13} number = {12}, pages = {2339-2351}, month = jun, doi = {10.1049/iet-gtd.2018.5136}, url = {https://ietresearch.onlinelibrary.wiley.com/doi/abs/10.1049/iet-gtd.2018.5136}, comment = {Mentions complex number AA}, abstract = {The penetration of renewable energy sources (RESs) in distribution systems faces many issues due to their output uncertainty resulted from climate conditions. The uncertainty impacts on the voltage fluctuations are reduced by using a proposed bi-stage method. At first, the system voltage is controlled by determining the optimal setting of voltage-regulating devices such as voltage regulators, transformer tap changers and static VAR compensator. Then, the dispatchable distributed generation (DDGs) units are accompanied by the voltage regulating devices to achieve more reduction in the voltage fluctuations. In this line, unbalanced backward–forward sweep load flow method is formulated to analyse the unbalanced operation of three-phase distribution systems. The main objectives of the proposed method are to reduce voltage fluctuations to maintain voltage profile within its permissible limits. In addition, the cat swarm optimiser (CSO) is implemented to obtain the optimal planning of voltage regulating devices and DDGs to achieve the lowest uncertainty influence on the voltage fluctuations. The proposed method is applied to a real unbalanced IEEE 34-bus distribution test system. The highest capability of CSO algorithm, i.e. CSO provides the highest reduction on the voltage fluctuations, is proven compared with particle swarm optimisation, harmony search and water cycle algorithms.} } @inproceedings{adh-sch-gri-12-aa-varfreq, author = {Sumit Adhikari and Florian Schupfer and Christoph Grimm}, title = {Co-Simulation Framework for Variation Analysis of Radio Frequency Transceivers}, booktitle = {Proceedings of the 2012 System, Software, SoC and Silicon Debug Conference}, year = 2012, pages = {1-6}, doi = {}, url = {https://ieeexplore.ieee.org/abstract/document/6338154/}, note = {No DOI?}, comment = {}, abstract = {Co-simulation provides an architect or a designer the freedom to change the implementation of a sub-module and analyse the design. Variational analysis during design procedure is an important tool to ensure higher-yield during production. State of the art co-simulators uses Monte Carlo analysis method for variational analysis, which is a multi-run method, slow and outcomes are not completely covered. In this article we proposed a co-simulation environment which uses affine arithmetic as variational analysis method. The result is an efficient and completely covered co-simulation environment.} } @article{ahm-jin-zhu-jav-akr-20-aa-linetens, author = {Ashfaq Ahmad and Yi Jin and Changan Zhu and Iqra Javed and M. Waqar Akrama}, title = {Investigating Tension in Overhead High Voltage Power Transmission Line using Finite Element Method}, journal = {International Journal of Electrical Power {\&} Energy Systems}, year = 2020, volume = {114}, number = {}, pages = {article 105418}, month = jan, doi = {10.1016/j.ijepes.2019.105418}, comment = {mentions AA}, abstract = {Transmission line failure is a serious problem in developing countries. The major cause of high voltage overhead transmission line failure in power system is prolonged excessive current. The determination of line segments where high tension is generated due to prolonged excessive current under the different arrangements of line spacing and current carrying conditions can lead us to analyze high tension zones for efficient design of transmission lines. In this context, the mechanical tension on an overhead high voltage electrical power transmission line resulting from excessive or normal currents was computed using Finite Element Method. Multiple segments of a long transmission line with symmetrical and unsymmetrical spacing under accidental and normal line current are examined. Therein, the line segments with high tension are determined. Maximum integrated tension is found in middle segments of both symmetrical and unsymmetrical spaced transmission line. While, maximum per unit tension is found in initial and bottom segments. However, variation in magnitude is found for symmetrical and unsymmetrical spacing. The present study may provide technical guidance to the transmission line design engineers and technicians regarding the risk and solutions related to electrical power transmission line systems.} } @inproceedings{ahm-zwo-07-aa-dspword, author = {Arash Ahmadi and Mark Zwolinski}, title = {A Symbolic Noise Analysis Approach to Word-Length Optimization in {DSP} Hardware}, booktitle = {Proceedings of the 2007 International Symposium on Integrated Circuits (ISIC)}, year = 2007, pages ={457-460}, doi = {10.1109/ISICIR.2007.4441897}, comment = {Mentions AA}, abstract = {This paper addresses the problem of choosing different word-lengths for each functional unit in fixed-point implementations of DSP algorithms. A symbolic-noise analysis method is introduced for high-level synthesis of DSP algorithms in digital hardware, together with a vector evaluated genetic algorithm for multiple objective optimization. The ability of this method to combine word-length optimization with high-level synthesis parameters and costs to minimize the overall design cost is demonstrated by example designs.} } @inproceedings{ahm-zwo-08-aa-compopt, author = {Arash Ahmadi and Mark Zwolinski}, title = {Symbolic Noise Analysis Approach to Computational Hardware Optimization}, journal = {Proceedings of 45th ACM/IEEE Design Automation Conference}, year = 2008, doi = {10.1145/1391469.1391573}, pages = {391-396}, comment = {Uses AA, but probabilistic?}, abstract = {This paper addresses the problem of computational error modeling and analysis. Choosing different word-lengths for each functional unit in hardware implementations of numerical algorithms always results in an optimization problem of trading computational error with implementation costs. In this study, a symbolic noise analysis method is introduced for high-level synthesis, which is based on symbolic modeling of the error bounds where the error symbols are considered to be specified with a probability distribution function over a known range. The ability to combine word-length optimization with high-level synthesis parameters and costs to minimize the overall design cost is demonstrated using case studies.} } @article{akh-kir-jam-19-aa-hybsurv, author = {Iram Akhtar and Sheeraz Kirmani and Majid Jamil}, title = {State of the Ability in Research on Microgrid Hybrid Energy Systems}, journal = {Journal of Engineering Science and Technology Review}, volume = {12}, number = {5}, pages = {8-24}, year = 2019, doi = {10.25103/jestr.125.02}, comment = {Survey article. Mentions AA briefly}, abstract = {The power demand with interest in green power has focused researcher to develop the distributed power generation using wind energy, solar energy, etc. With the fast depleting fossil fuel reserves, energy security and environmental concerns there is a huge requirement of alternate sources of energy to fulfill the present energy demand. Distribution generating system can support the weak grids, maintaining grid power, managing balance power and enhancing the power quality. Any research work base depends on the literature survey and the studies carried out by several kinds of research and their support to research field encourages for more scope of research. The important advantages related to microgrids have led to huge efforts to grow their dispersion in the power systems. While microgrid is fast growing but there are still various challenges to efficiently control, design, and operate microgrids when linked to the main grid, and also when in islanded mode, where wide research actions are ongoing to handle these issues. This paper presents a review of issues concerning integration of renewable energy sources to microgrids and offers a description of research in areas related to economic feasibility of microgrids system, including dynamic analysis of microgrid hybrid system in grid-connected mode, application of power electronics, microgrid operation, and control.} } @inproceedings{ale-cha-mul-15-aa-tunepi, author = {Alexandre dit Sandretto, Julien and Chapoutot, Alexandre and Mullier, Olivier}, title = {Tuning {PI} Controller in Non-Linear Uncertain Closed-Loop Systems with Interval Analysis}, booktitle = {Proceedings of the 2nd International Workshop on Synthesis of Complex Parameters (SynCoP'15)}, pages = {91--102}, series = {OpenAccess Series in Informatics (OASIcs)}, volume = {44}, year = 2015, month = dec, doi = {10.4230/OASIcs.SynCoP.2015.91}, comment = {Uses AA?} abstract = {The tuning of a PI controller is usually done through simulation, except for few classes of problems, e.g., linear systems. With a new approach for validated integration allowing us to simulate dynamical systems with uncertain parameters, we are able to design guaranteed PI controllers. In practical, we propose a new method to identify the parameters of a PI controller for non-linear plants with bounded uncertain parameters using tools from interval analysis and validated simulation. This work relies on interval computation and guaranteed numerical integration of ordinary differential equations based on Runge-Kutta methods. Our method is applied to the well-known cruise-control problem, under a simplified linear version and with the aerodynamic force taken into account leading to a non-linear formulation.} } @inproceedings{ale-wan-18-aa-reach, author = {Alexandre dit Sandretto, Julien and Wan, Jian}, title = {Reachability Analysis of Nonlinear {ODEs} using Polytopic Based Validated {Runge}-{Kutta}}, booktitle = {Proceedings of the 2018 International Conference on Reachability Problems (RP)}, year = 2018, month = aug, series = {Lecture Notes in Computer Science}, volume = {11123}, pages = {1–14}, doi = {10.1007/978-3-030-00250-3_1}, comment = {Validated Runge-Kutta. Calls AA zonotopic computation. Replaces by general polytopes?} abstract = {Ordinary Differential Equations (ODEs) are a general form of differential equations. This mathematical format is often used to represent the dynamic behavior of physical systems such as control systems and chemical processes. Linear ODEs can usually be solved analytically while nonlinear ODEs may need numerical methods to obtain approximate solutions. There are also various developments for validated simulation of nonlinear ODEs such as explicit and implicit guaranteed Runge-Kutta integration schemes. The implicit ones are mainly based on zonotopic computations using affine arithmetics. It allows to compute the reachability of a nonlinear ODE with a zonotopic set as its initial value. In this paper, we propose a new validated approach to solve nonlinear ODEs with a polytopic set as the initial value using an indirectly implemented polytopic set computation technique.} } @phdthesis{alm-22-aa-thesis, author = {Jos{\'e} Eduardo de {Almeida Ayres}}, title = {On Interval Numerical Methods for Dynamical Systems in the Plane. {Part} {I}: Condensation Strategies for Affine Arithmetic. {Part} {II}: Interval Methods for Fixed and Periodic Points - Development and Visualization}, school = {Instituto de Matem{\'a}tica Pura e Aplicada (IMPA)}, year = 2022, month = jan, advisor = {Luiz Henrique de Figueiredo}, note = {Comprises two articles.}, url = {https://impa.br/wp-content/uploads/2022/05/dout_tese_Jose_Eduardo_de_Almeida_Ayres.pdf}, abstract = {(1) Condensation Strategies for Affine Arithmetic: We develop several condensation strategies for affine arithmetic and compare their performance in the numerical simulation of some discrete dynamical systems in the plane. (2) Interval Methods for Fixed and Periodic Points: Development and Visualization: See~\cite{alm-fig-20-aa-fixper}} } @incollection{alm-22-aa-condens, author = {Jos{\'e} Eduardo de {Almeida Ayres}}, title = {Condensation Strategies for Affine Arithmetic}, booktitle = {On Interval Numerical Methods for Dynamical Systems in the Plane}, publisher = {Instituto de Matem{\'a}tica Pura e Aplicada (IMPA)}, note = {First part of~\cite{alm-22-aa-thesis}.} pages = {1-92}, url = {https://impa.br/wp-content/uploads/2022/05/dout_tese_Jose_Eduardo_de_Almeida_Ayres.pdf}, abstract = {We develop several condensation strategies for affine arithmetic and compare their performance in the numerical simulation of some discrete dynamical systems in the plane.} } @article{alm-fig-20-aa-fixper, author = {Jos{\'e} Eduardo de {Almeida Ayres} and Luiz Henrique de {Figueiredo}}, title = {Interval Methods for Fixed and Periodic Points: {Development} and Visualization}, journal = {Journal of Universal Computer Science}, volume = {26}, number = {10}, year = 2020, pages = {1312-1330}, month = oct, doi = {10.3897/jucs.2020.068}, note = {Also reproduced as second part of~\cite{alm-22-aa-thesis}}, abstract = {We describe the development of rigorous numerical methods based on interval analysis for finding all fixed points of a map and all attracting periodic points of a complex polynomial. We also discuss their performance with instructive visualizations.} } @inproceedings{alt-gre-kod-18-aa-taycor, author = {Matthias Althoff and Dmitry Grebenyuk and Niklas Kochdumper}, title = {Implementation of {Taylor} Models in {CORA} 2018}, booktitle = {Proceedings of the 5th International Workshop on Applied Verification of Continuous and Hybrid Systems (ARCH 2018)}, year = 2018, series = {EPiC Series in Computing}, volume = {54}, pages = {145–173}, doi = {10.29007/zzc7} url = {https://mediatum.ub.tum.de/doc/1454477/file.pdf}, comment = {Implements IA, AA, and mixed in Matlab. Claims to be the first to evaluate mixed AA/IA?}, abstract = {Tool Presentation: Computing guaranteed bounds of function outputs when their input variables are bounded by intervals is an essential technique for many formal methods. Due to the importance of bounding function outputs, several techniques have been proposed for this problem, such as interval arithmetic, affine arithmetic, and Taylor models. While all methods provide guaranteed bounds, it is typically unknown to a formal verification tool which approach is best suitable for a given problem. For this reason, we present an implementation of the aforementioned techniques in our MATLAB tool CORA so that advantages and disadvantages of different techniques can be quickly explored without having to compile code. In this work we present the implementation of Taylor models and affine arithmetic; our interval arithmetic implementation has already been published. We evaluate the performance of our implementation using a set of benchmarks against Flow* and INTLAB. To the best of our knowledge, we have also evaluated for the first time how a combination of interval arithmetic and Taylor models performs: our results indicate that this combination is faster and more accurate than only using Taylor models.} } @article{alv-19-aa-radist, author = {Alves, Helton do Nascimento}, title = {An Interval Arithmetic-Based Power Flow Algorithm for Radial Distribution Network with Distributed Generation}, journal = {Journal of Control, Automation and Electrical Systems}, volume = {30}, pages = {802-811}, year = 2019, month = may, doi = {10.1007/s40313-019-00478-7}, comment = {Uses IA. Says AA is future work.}, abstract = {This paper presents a primary distribution system power flow analysis in the presence of uncertainties in distributed generation and loads. The algorithm is based on a backward/forward sweep power flow algorithm with power flow updates. The uncertainties are modelled by real compact intervals based on interval arithmetic. A simple and interactive method is used to consider the generator bus voltage controls and reactive power limits. Simulations are presented on a 69-bus, a 104-bus and a 282-bus test distribution system to verify the effectiveness of the proposed method. The power flow solution bounds obtained by the proposed algorithm are compared to those calculated using a Monte Carlo simulation. The results confirm the efficiency of the proposed method which makes it promising to solve real problems of power flow analysis in distribution feeders.} } @article{alv-sil-mom-bak-ros-ola-18-aa-therm, author = {Alvarez, David L. and Silva, F. Faria da and Mombello, Enrique E. and Bak, Claus Leth and Rosero, Javier A. and {\'O}lasond}, Daníel Le{\'o}}, title = {An Approach to Dynamic Line Rating State Estimation at Thermal Steady State using Direct and Indirect Measurements}, journal = {Electric Power Systems Research}, volume = {163 B}, month = oct, pages = {599-611}, doi = {10.1016/j.epsr.2017.11.015}, year = 2018, comment = {Mentions AA only en passant?}, abstract = {Dynamic line rating has emerged as a solution for reducing congestion in overhead lines, allowing the optimization of power systems assets. This technique is based on direct and/or indirect monitoring of conductor temperature. Different devices and methods have been developed to sense conductor temperature in critical spans. In this work, an algorithm based on WLS is proposed to estimate temperature in all ruling spans of an overhead line. This algorithm uses indirect measurements – i.e. weather reports and/or downscaling nowcasting models as inputs as well as direct measurements of mechanical tension, sag and/or conductor temperature. The algorithm has been tested using typical atmospheric conditions in Iceland along with an overhead line's real design, showing robustness, efficiency and the ability to minimize error in measurements.} } @article{ara-cos-18-aa-curinj, author = {Araujo, B. M. C and Costa, V. M. da}, title = {New Developments in the Interval Current Injection Power Flow Formulation}, journal = {IEEE Latin America Transactions}, volume= {16}, number = {7}, pages = {1969-1976}, year = 2018, month = jul, doi = {10.1109/TLA.2018.8447364}, comment = {Uses AA}, abstract = {Power flow analysis uses a given set of generation and loads to determine steady state operating conditions of electric power systems. As the input data are imprecise, this paper employs the affine arithmetic to incorporate the effect of load data uncertainties on this analysis. The power flow model is expressed in terms of current injection equations, with the bus voltages represented in rectangular coordinates. Therefore, this paper addresses new developments in the interval current injection power flow. At each iteration, the resulting interval nonlinear system of equations is solved using Krawczyk method. The proposed developments are implemented in the Matlab environment using the Intlab toolbox. The IEEE 57-bus is used as test system. Results are later compared with the Monte Carlo Simulation and interval arithmetic.} } @article{ara-rey-16-aa-glopt, author = {Araya, Ignacio and Reyes, Victor}, title = {Interval Branch-and-Bound Algorithms for Optimization and Constraint Satisfaction: {A} Survey and Prospects}, journal = {Journal of Global Optimization}, volume = {65}, pages = {837–866}, year = 2016, month = aug, doi = {10.1007/s10898-015-0390-4}, comment = {Survey article. Mentions AA.} abstract = {Interval Branch and Bound algorithms are used to solve rigorously continuous constraint satisfaction and constrained global optimization problems. In this paper, we explain the basic principles behind interval Branch and Bound algorithms. We detail the main components and describe issues that should be considered to improve the efficiency of the algorithms.} } @misc{ara-tro-22-aa-taylor, author = {Araya, Ignacio and Trombettoni, Gilles}, title = {Taylorisation par Intervalles Convexe: {Premiers} R{\'e}sultats}, howpublished = {Online document}, note = {Article in English with abstract in French. Apparently submitted to 7{\`e}mes Journ{\'e}es Francophones de Programmation par Contraintes (JFPC 2011) but not accepted, or accpeted with a different title?}, year = 2022, month = jun, url = {http://www-sop.inria.fr/coprin/trombe/publis/xnewton_submittedjfpc_2011.pdf}, comment = {Generalizes AA to convex enclosures?}, abstract = {La taylorisation par intervalles est un outil math{\'e}matique important propos{\'e} dans les ann{\'e}es 1960 par Ramon E. Moore et la communaut{\'e} de l’analyse par intervalles. Elle permet de borner de mani`ere {\'e}l{\'e}gante le reste dans l’approximation polynomiale d’une fonction non convexe. La taylorisation par intervalles est l’ingr{\'e}dient de base des algorithmes de Newton sur intervalles qui peuvent r{\'e}soudre de mani{\`e}re fiable les syst{\`e}mes de contraintes non convexes, en prenant en compte les arrondis sur les nombres flottants et les incertitudes dans les donn{\'e}es. Malheureusement, {\`a} chaque it{\'e}ration de l’algorithme du Newton sur intervalles, l’approximation de l’ensemble des solutions g{\'e}n{\'e}r{\'e}e par la taylorisation par intervalles au premier ordre demeure non convexe. On ne peut donc a priori pas produire d’enveloppe convexe optimale en temps polynomial. Les seules sous-classes polynomiales connues ont peu d’int{\'e}rˆet en pratique. C’est pourquoi d’autres m{\'e}thodes de convexification connaissent un succ{\`e}s croissant, notamment l’arithm{\'e}tique affine. Il se trouve qu’une taylorisation par intervalles convexe a {\'e}t{\'e} ignor{\'e}e pendant des d{\'e}cennies, au moins dans son exploitation pratique. En choisissant un coin de la boˆıte {\'e}tudi{\'e}e comme point d’expansion, la taylorisation par intervalles extr{\^e}me produit une relaxation convexe (poly{\'e}drale) du syst{\`e}me dont on peut produire une enveloppe optimale en temps polynomial. Elle permet de construire une variante de l’algorithme de Newton sur intervalles, sans pr{\'e}conditionnement, qui peut contracter le domaine en de nombreux n{\oe}uds de l’arbre de recherche. Nous montrons que le choix du coin produisant la relaxation la plus fine est NP-difficile ainsi que des premi{\`e}res exp{\'e}rimentations en optimisation globale.} } @inproceedings{ara-tro-nev-12-aa-convtay, author = {Araya, Ignacio and Trombettoni, Gilles and Neveu, Bertrand}, title = {A Contractor based on Convex Interval {Taylor}}, booktitle = {Proceedings of the International Conference on Integration of Artificial Intelligence and Operations Research Techniques in Constraint Programming (CPAIOR)}, year = 2012, month = may, location = {Nantes, FR}, pages = {1–16}, series = {Lecture Notes in Computer Science}, volume = {7298}, isbn = {978-3-642-29827-1}, doi = {10.1007/978-3-642-29828-8_1}, comment = {Alternative to AA, compares with AA}, abstract = {nterval Taylor has been proposed in the sixties by the interval analysis community for relaxing continuous non-convex constraint systems. However, it generally produces a non-convex relaxation of the solution set. A simple way to build a convex polyhedral relaxation is to select a corner of the studied domain/box as expansion point of the interval Taylor form, instead of the usual midpoint. The idea has been proposed by Neumaier to produce a sharp range of a single function and by Lin and Stadtherr to handle n ×n (square) systems of equations. This paper presents an interval Newton-like operator, called X-Newton, that iteratively calls this interval convexification based on an endpoint interval Taylor. This general-purpose contractor uses no preconditioning and can handle any system of equality and inequality constraints. It uses Hansen’s variant to compute the interval Taylor form and uses two opposite corners of the domain for every constraint. The X-Newton operator can be rapidly encoded, and produces good speedups in constrained global optimization and constraint satisfaction. First experiments compare X-Newton with affine arithmetic.} } @misc{ara-tro-nev-11-aa-tayglop, author = {Araya, Ignacio and Trombettoni, Gilles and Neveu, Bertrand}, title = {Convex Interval {Taylorization} in Constrained Global Optimization}, howpublished = {Online document}, url = {http://www-sop.inria.fr/coprin/trombe/publis/xnewton_submitted.pdf} year = 2011, month = apr, note = {Date extracted from PDF properties}, comment = {Alternative to AA, compares with AA}, abstract = {Interval taylorisation has been proposed in the sixties by the interval analysis community for relaxing and filtering continuous constraint systems. Unfortunately, it generally produces a nonconvex relaxation of the solution set. A recent interval Branch & Bound for global optimization, called IbexOpt, generates a convex (polyhedral) approximation of the system at each node of the search tree by performing a specific interval taylorization. Following the works by Lin and Stadtherr, the idea is to select a corner of the studied domain/box as expansion point, instead of the usual midpoint. This paper studies how to better exploit this interval convexification. We first show that selecting the corner which produces the tightest relaxation is NP-hard. We then propose a greedy corner selection heuristic, a variant using several corners simultaneously and an interval Newton that iteratively calls this interval convexification. Experiments on a constrained global optimization benchmark highlight the best variants and allow a first comparison with affine arithmetic} } @inproceedings{ari-nap-mus-erw-tha-22-aa-capac, author = {Arief, Ardiaty and Nappu, Muhammad Bachtiar and Mustafa, Syahrul and Erwin and Thaha, Sarma}, title = {Optimal Capacitor Placement in a Dominant Induction Motor Loads Power System}, booktitle = {Proceedings of the 7th International Conference on Advances on Clean Energy Research (ICACER)}, month = dec, location = {Barcelona, ES}, series = {Energy Reports}, volume = {8}, number = {16}, year = 2022, pages = {592-597}, doi = {10.1016/j.egyr.2022.10.254}, comment = {Uses AA with modified division.}, abstract = {The challenge of optimal capacitor allocation is one of the complex problems in power systems, especially in large industries since they have many big capacity induction motors. In the literature, many works have been delivered for optimal capacitor placement, however, these works were simulated in a distribution network, not in an industry network with many large capacity induction motors. Therefore, this work proposes the optimal allocation of capacitors in a large industry with a significant amount of induction motor loads to minimize network losses. To determine the optimal capacitor location, this research uses the genetic algorithm (GA) method. This algorithm is interesting because of its simplicity and ability to discover optimal solutions comprehensively.} } @book{ash-lee-20-fmc, author = {Ashlock, Daniel and Lee, Colin}, title = {An Introduction to Proofs with Set Theory}, publisher = {Morgan {\&} Claypool}, doi = {10.2200/S01018ED1V01Y202006MAS035}, year = 2020, month = jun, pages = {249}, comment = {Table of contents is very much like our book \emph{Fundamentos ...}. Could he have copied it?}, abstract = {This text is intended as an introduction to mathematical proofs for students. It is distilled from the lecture notes for a course focused on set theory subject matter as a means of teaching proofs. [...]} } @article{aud-han-mes-nin-13-aa-octagon, author = {Audet, Charles and Hansen, Pierre and Messine, Fr{\'e}d{\'e}ric and Ninin, Jordan}, title = {The Small Octagons of Maximal Width}, journal = {Discrete {\&} Computational Geometry}, year = 2013, volume = {49}, pages = {589-600}, month = mar, doi = {10.1007/s00454-013-9489-x}, comment = {AA used to solve a problem of pure mathematics.}, abstract = {The paper answers an open problem introduced by Bezdek and Fodor (Arch. Math. 74:75–80, 2000). The width of any unit-diameter octagon is shown to be less than or equal to $\frac{1}{4}\sqrt{10+2\sqrt{7}}$ and there are infinitely many small octagons having this optimal width. The proof combines geometric and analytical reasoning as well as the use of a recent version of the deterministic and reliable global optimization code IBBA based on interval and affine arithmetics. The code guarantees a certified numerical accuracy of $1×10^{-7}$.} } @incollection{aue-alb-kec-lut-11-aa-stance, author = {Auer, Ekaterina and Albassam, Haider and Kecskem{\'e}thy, Andr{\'e}s and Luther, Wolfram}, title = {Verified Analysis of a Model for Stance Stabilization}, booktitle = {Modeling, Design, and Simulation of Systems with Uncertainties}, year = 2011, month = jan, pages = {293–308}, doi = {10.1007/978-3-642-15956-5_14}, isbn = {978-3-642-15955-8}, comment = {Application of AA to robotics.}, abstract = {The stabilization of stance is a subject of continuing research in biology, biomechanics and robotics. It plays an important role in many clinical applications as well as in forward dynamical gait simulation. In this paper, we propose a new model relying on a two cylinder foot contact scheme. This contact model has the advantage of simple and smooth dynamic behavior which in turn results in better efficiency in comparison with other contact models. However, a number of parameters in this model, such as position or mass of the pelvis, are known only with some uncertainty. To deal with the situation, we analyze the model using verifiedmethods, which includes propagating the uncertainty through the system and computing the sensitivities of the equations of motion in the first time interval. To perform verified simulations of the whole model, a verified initial value problem solver for a hybrid system is required, which can switch from one system of the equations of motion to the other depending on a certain switching function. While research in this direction remains a topic of high complexity, a simplified kinetostatic version of the model allows one to analyze the sensitivity of the model to parameter variations, as presented in this paper.} } @inproceedings{ayd-zan-22-aa-render, author = {Aydinlilar, Melike and Zanni, C{\'e}dric}, title = {Transparent Rendering and Slicing of Integral Surfaces using Per-Primitive Interval Arithmetic}, booktitle = {Proceedings of Eurographics 2022 - Short Papers}, location = {Reims, France}, year = 2022, month = apr, url = {https://hal.inria.fr/hal-03689606/}, comment = {Rendering of 3D convolution (brush-and-path) objects with IA. Mentions possible use of AA.}, abstract = {We present a method for efficient incorporation of integral surfaces within existing robust processing methods such as interval arithmetic and segment-tracing. We based our approach on high-level knowledge of the field function of the primitives. We show application to slicing and transparent rendering of integral surfaces based on interval arithmetic.} } @book{ayl-bel-gib-kno-20-aa-crowd, author = {Aylaj, Bouchra and Bellomo, Nicola and Gibelli, Livio and Knopoff, Dami{\'a}n}, title = {Crowd Dynamics by Kinetic Theory Modeling: {Complexity}, Modeling, Simulations, and Safety}, series = {Synthesis Lectures on Mathematics and Statistics}, volum = {}, year = 2020, month = oct, pages = {98}, publisher = {Morgan {\&} Claypool}, doi = {10.2200/S01055ED1V01Y202009MAS036}, comment = {Not clear whether or how it uses IA or AA}, abstract = {The contents of this brief Lecture Note are devoted to modeling, simulations, and applications with the aim of proposing a unified multiscale approach accounting for the physics and the psychology of people in crowds. The modeling approach is based on the mathematical theory of active particles, with the goal of contributing to safety problems of interest for the well-being of our society, for instance, by supporting crisis management in critical situations such as sudden evacuation dynamics induced through complex venues by incidents.} } @misc{bah-22-aa-chemeqs, author = {Baharev, Ali}, title = {Application of Interval Methods to Chemical Engineering Problems}, howpublished = {Online document}, year = 2009, month = may, note = {Date extracted from PDF metadata}, comment = {One-page abstract, maybe a submission to a conference. Could be summary of [bah-ach-rev-09-aa-distill]. Uses AA to design distillation columns.}, url = {https://reliablecomputing.eu/baharev_abs_en.pdf}, abstract = {The need of reliably solving large-scale system of nonlinear equations often arises in the everyday practice of chemical engineering. [\dots] A new general purpose root-finding algorithm has been proposed based on affine arithmetic and linear programming. The implementation is written in \texttt{C++} programming language. The method is fairly general, and is applicable to a wide variety of engineering problems. To the author’s best knowledge, computation of distillation columns with interval methods had not been considered earlier in the literature.} } @article{bah-kol-rev-11-aa-mulsta, author = {Baharev, Ali and Kolev, Lubomir and R{\'e}v, Endre}, title = {Computing Multiple Steady States in Homogeneous Azeotropic and Ideal Two-Product Distillation}, journal = {Journal of the American Institute of Chemical Engineers}, volume = {57}, number = {6}, pages = {1485-1495}, year = 2011, month = jun, doi = {10.1002/aic.12362}, comment = {Uses mixed IA - AA to find all solutions to chemical steady-state equations.}, abstract = {Multiple steady states are typically discovered by tracing a solution path, including turning points. A new technique is presented here that does not follow this approach. The original problem is solved directly, without tracing a solution path. The proposed branch-and-prune algorithm is guaranteed to find all solutions automatically. Core components of the framework are affine arithmetic, constraint propagation, and linear programming. The \textt{C++} implementation is available as an open-source solver and has an interface to the AMPL modeling environment. In certain difficult cases, only continuation methods have been reported to find the unstable solution automatically. The proposed method seems to be the first published alternative method in those cases. Although this article focuses mainly on distillation, the presented framework is fairly general and applicable to a wide variety of problems. Further, computational results are given to demonstrate this.} } @misc{bah-rev-08-ab-compinc, author = {Baharev, Ali and R{\'e}v, Endre}, title = {Comparing Inclusion Techniques on Chemical Engineering Problems}, howpublished = {Online document submitted to the 13th GAMM-IMACS International Symposium on Scientific Computing, Computer Arithmetic, and Verified Numerical Computations (SCAN); not in the proceedings?}, year = 2008, month = sep, note = {A 2-page abstract.} url = {https://reliablecomputing.eu/baharev-scan08-abstract.pdf}, abstract = {[\dots] Computing steady states of multistage separation processes requires solving large-scale nonlinear systems of equations. [\dots] computation of these problems with interval arithmetic have not yet been considered in the literature [\dots] The authors aim to compute steady states of homogeneous and heterogeneous azeotropic distillation columns with interval methods, keeping the algorithm as problem independent as possible. The results achieved so far are presented here. Numerical evidence published in the literature, e.g. [7, 8], seem to indicate superiority of the linear interval approximation (LIA, $L(x) = Ax+b$, $A$ is a real matrix), proposed by Kolev in a number of publications e.g. [9], compared to the traditional interval linear approximation (ILA, $L(x) = A(x − z) + f(z)$, $A$ is an interval matrix) such as the interval Newton method. LIA has the following advantages over ILA when applied to root-finding. (i) The solution set of the LIA has a much simpler form, the hull solution is straightforward: $X \cap − A^{−1} b$. (ii) Linear programming is directly applicable to prune the current box. The automatic computation of LIA is possible with affine arithmetic [10] which in turn (iii) automatically keeps track of correlation between the computed partial results yielding tighter enclosures. There is no significant difference in the computation time per iteration between LIA and ILA. In [11] LIA and ILA are compared as linearization techniques applying them to chemical engineering problems of real complexity. The examples considered are highly structured and are full of dependency. LIA outperforms the traditional ‘textbook’ interval Newton algorithm (IN/GS) by an order of magnitude in the case of the studied examples. Note that state-of-the-art variants of the interval Newton methods, e.g. [12, 13], also outperform the IN/GS used for comparison. Linear programming may be preferable as pruning technique for LIA because of its robustness. Considering the conclusions of [11], the \textbf{C++} class has been re-implemented, and the LP pruning method has been revised. The improvement is significant; real life medium-scale problems are successfully solved. Some of the problems used for comparison are suitable for benchmarks, they will be contributed soon.} } @article{bal-har-suf-11-aa-visimp, author = {Balsys, Ronald J. and Harbinson, Dirk J. and Suffern, Kevin G}, title = {Visualizing Nonmanifold and Singular Implicit Surfaces with Point Clouds}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {18}, issue = {2}, pages = {188-201} year = 2011, month = apr, doi = {10.1109/TVCG.2011.79}, comment = {Uses a fast test that is claimed to improve on ``naive'' IA and AA}, abstract = {We use octree spatial subdivision to generate point clouds on complex nonmanifold implicit surfaces in order to visualize them. The new spatial subdivision scheme only uses point sampling and an interval exclusion test. The algorithm includes a test for pruning the resulting plotting nodes so that only points in the closest nodes to the surface are used in rendering. This algorithm results in improved image quality compared to the naive use of intervals or affine arithmetic when rendering implicit surfaces, particularly in regions of high curvature. We discuss and compare CPU and GPU versions of the algorithm. We can now render nonmanifold features such as rays, ray-like tubes, cusps, ridges, thin sections that are at arbitrary angles to the octree node edges, and singular points located within plot nodes, all without artifacts. Our previous algorithm could not render these without severe aliasing. The algorithm can render the self-intersection curves of implicit surfaces by exploiting the fact that surfaces are singular where they self-intersect. It can also render the intersection curves of two implicit surfaces. We present new image space and object space algorithms for rendering these intersection curves as contours on one of the surfaces. These algorithms are better at rendering high curvature contours than our previous algorithms. To demonstrate the robustness of the node pruning algorithm we render a number of complex implicit surfaces such as high order polynomial surfaces and Gaussian curvature surfaces. We also compare the algorithm with ray casting in terms of speed and image quality. For the surfaces presented here, the point clouds can be computed in seconds to minutes on a typical Intel based PC. Once this is done, the surfaces can be rendered at much higher frame rates to allow some degree of interactive visualization.} } @incollection{bal-jab-moh-20-aa-natrans, author = {Balaei-sani, Salar and Jabari, Farkhondeh and Mohammadi-Ivatloo, Behnam}, title = {Stochastic Analysis of Gas-Electricity Hybrid Grid Using {Nataf} Transformation Combined with Point Estimation Method}, booktitle = {Integration of Clean and Sustainable Energy Resources and Storage in Multi-Generation Systems}, year = 2020, month = jul, doi = {10.1007/978-3-030-42420-6_13}, isbn = {978-3-030-42419-0}, pages = {259–281}, comment = {Does not use AA? But describes someone else's work that does.}, abstract = {Nowadays, the high fluctuations of the wind products causing large-scale energy systems are operated at their stability margins. In a gas-power interconnected system, if the wind power is insufficient and the electrical demand increases, more natural gas will be consumed by the conventional thermal power plants. Therefore, not only the optimum operating point of the electrical grid will change, but also the volume of the gas extracted from the suppliers, the pressure of the gas nodes, and the gas flow of the pipelines will vary. Hence, this chapter introduces a probabilistic optimal power flow (P-OPF) for modeling the uncertainties associated with the correlated input random variables (RVs). Moreover, the point estimate method (PEM) is used for generating $2m + 1$ stochastic scenarios in a way that the correlated wind speeds with Weibull and loads with normal distribution functions are transformed into the independent normal distribution using the Nataf transformation. The accuracy, calculation time, and the applicability of the PEM in probabilistic analysis of gas-power nexus are proven by simulations on IEEE RTS 24-bus electrical network and 20-node gas grid under generalized algebraic mathematical modeling system (GAMS).} } @inproceedings{ban-cas-men-mic-10-aa-wdlenkle, author = {Banciu, Andrei and Casseau, Emmanuel and Menard, Daniel and Michel, Thierry}, title = {A Case Study of the Stochastic Modeling Approach for Range Estimation}, booktitle = {Proceedings of the 2010 Conference on Design and Architectures for Signal and Image Processing (DASIP)}, year = 2010, month = oct, doi = {10.1109/DASIP.2010.5706256}, pages = {128-135}, comment = {Claims that IA and AA are too pessimistic for word length estimation in DSP. Proposes another method based on Karhunen-Loève expansion (KLE).}, abstract = {The floating-point to fixed-point conversion is an important part of the hardware design in order to obtain efficient implementations. When trying to optimize the integer word-length under performance constraints, the dynamic variations of the variables during execution must be determined. Traditional range estimation methods based on simulations are data dependent and time consuming whereas analytical methods like interval and affine arithmetic give pessimistic results as they lack of a statistical background. Recently, a novel approach, based on the Karhunen-Loève expansion (KLE) was presented for linear time-invariant (LTI) systems offering a solid stochastic foundation. Our paper presents an implementation of this theory and shows its efficiency for an OFDM modulator test case study. We also present a review of the uncertainty quantifications problem and the different phases of the range estimation methodology.} } @inproceedings{ban-cas-men-mic-11-aa-dsp, author = {Banciu, Andrei and Casseau, Emmanuel and Menard, Daniel and Michel, Thierry}, title = {Stochastic Modeling for Floating-Point to Fixed-Point Conversion}, booktitle = {Proceedings of the 2011 IEEE Workshop on Signal Processing Systems (SiPS)}, year = 2011, month = oct, comment = {Claims that IA and AA are too pessimistic for word length estimation in DSP. Proposes another method based on Karhunen-Loève expansion (KLE).}, abstract = {The floating-point to fixed-point transformation process is error prone and time consuming as the distortion introduced by the limited data size is difficult to evaluate. In this paper a method to estimate the range of variables in LTI systems with respect to the corresponding overflow probability is presented. Furthermore, we will show that the quantization noise evaluation can be realized using the same approach. The variance and the probability density function of the error are computed. The results obtained for several typical applications are presented.} } @inproceedings{bar-bec-dar-19-aa-smt, author = {Bard, Joachim and Becker, Heiko and Darulova, Eva}, title = {Formally Verified Roundoff Errors using {SMT}-based Certificates and Subdivisions}, booktitle = {Formal Methods - The Next 30 Years: Proccedings of the 2019 International Symposium on Formal Methods (FM)}, year = 2019, month = oct, location = {Oporto, PT}, pages = {38–44}, series = {Lecture Notes in Computer Science}, volume = {11800}, doi = {10.1007/978-3-030-30942-8_4}, comment = {Estimating roudoff and overflow errors in DSP. Says that the popular FloVer tool uses AA. Proposes a Satisfiability Modulo Theory (SMT) based range estimator that tracks non-linear correlations. Claims it is better than IA and AA, that ``report a spurious division by zero error'' (?).}, abstract = {When compared to idealized, real-valued arithmetic, finite precision arithmetic introduces unavoidable errors, for which numerous tools compute sound upper bounds. To ensure soundness, providing formal guarantees on these complex tools is highly valuable. In this paper we extend one such formally verified tool, FloVer. First, we extend FloVer with an SMT-based domain using results from an external SMT solver as an oracle. Second, we implement interval subdivision on top of the existing analyses. Our evaluation shows that these extensions allow FloVer to efficiently certify more precise bounds for nonlinear expressions.} } @inproceedings{bar-fur-gla-gri-hed-16-aa-anamix, author = {Barke, Erich and F{\"u}rtig, Andreas and Gl{\"a}ser, Georg and Grimm, Christoph and Hedrich, Lars and Heinen, Stefan and Hennig, Eckhard and Lee, Hyun-Sek Lukas and Nebel, Wolfgang and Nitsche, Gregor and Olbrich, Markus and Radoji{\v{c}}i{\'c}, {\v{C}}arna and Speicher, Fabian}, title = {Embedded Tutorial: {Analog}-/Mixed-Signal Verification Methods for {AMS} Coverage Analysis}, booktitle = {Proceedings of the 2016 Design, Automation {\&} Test in Europe Conference {\&} Exhibition (DATE)}, year = 2016, month = mar, location = {Dresden, DE}, pages = {1102-1111}, isbn = {978-3-9815-3707-9}, doi = {10.3850/9783981537079_1010}, comment = {Overview of many methods, including a description of Extended AA (XAA) that stores a set of poytopes intead of a single polytope. That allows conditional and iterative operations.}, abstract = {Analog-/Mixed-Signal (AMS) design verification is one of the most challenging and time consuming tasks of todays complex system on chip (SoC) designs. In contrast to digital system design, AMS designers have to deal with a continuous state space of conservative quantities, highly nonlinear relationships, non-functional influences, etc. enlarging the number of possibly critical scenarios to infinity. In this special session we demonstrate the verification of functional properties using simulative and formal methods. We combine different approaches including automated abstraction and refinement of mixed-level models, state-space discretization as well as affine arithmetic. To reach sufficient verification coverage with reasonable time and effort, we use enhanced simulation schemes to avoid conventional simulation drawbacks.}, } @inproceedings{bar-gra-gra-hed-hei-pop-ste-wan-09-aa-anacir, author = {Barke, Erich and Grabowski, Darius and Graeb, Helmut, and Hedrich, Lars and Heinen, Stefan and Popp, Ralf and Steinhorst, Sebastian and Wang, Yifan}, title = {Formal Approaches to Analog Circuit Verification}, booktitle = {Proceedings of the 2009 Design, Automation {\&} Test in Europe Conference {\&} Exhibition}, year = 2009, month = apr, doi = {10.1109/DATE.2009.5090759}, pages = {724-729}, comment = {Survey paper. Has a substantial section on AA used for non-linear electronic circuit simulation; compares with IA.}, abstract = {For a speed-up of analog design cycles to keep up with the continuously decreasing time to market, iterative design refinement and redesigns are more than ever regarded as showstoppers. To deal with this issue, referred to as design and verification gap, the development of a continuous and consistent verification is mandatory. In digital design, formal verification methods are considered as a key technology for efficient design flows. However, industrial availability of formal methods for analog circuit verification is still negligible despite a growing need. In recent years, research institutions have made considerable advances in the area of formal verification of analog circuits. This paper presents a selection of four recent approaches in analog verification that cover a broad scope of verification philosophies.} } @inproceedings{bar-kar-luw-sal-hed-olb-rad-sch-12-aa-robcirc, author = {Barke, Martin and K{\"a}rgel, Michel and Lu, W. and Salfelder, F. and Hedrich, L. and Olbrich, Markus and Radetzki, M. and Schlichtmann, U.}, title = {Robustness Validation of Integrated Circuits and Systems}, booktitle = {Proceedings of the 2012 4th Asia Symposium on Quality Electronic Design (ASQED)}, year = 2012, month = jul, pages = {145-154}, doi = {10.1109/ACQED.2012.6320491}, comment = {Uses AA to study robustness of analog and mixed signal circuits.}, abstract = {Robust system design is becoming increasingly important, because of the ongoing miniaturization of integrated circuits, the increasing effects of aging mechanisms, and the effects of parasitic elements, both intrinsic and external. For safety reasons, particular emphasis is placed on robust system design in the automotive and aerospace sectors. Until now, the term robustness has been applied very intuitively and there has been no proper way to actually measure robustness. However, the complexity of contemporary systems makes it difficult to fulfill tight specifications. For this reason, robustness must be integrated into a partially automated design flow. In this paper, a new approach to robustness modeling is presented, in addition to new ways to quantify or assess the robustness of a design. To demonstrate the flexibility of the proposed approach, it is adapted and applied to several different scenarios. These include the robustness evaluation of digital circuits under aging effects, such as NBTI; the robustness modeling of analog and mixed signal circuits using affine arithmetic; and the robustness study of software algorithms on a high system level.} } @article{bar-kar-olb-sch-14-aa-robcirc, author = {Barke, Martin and K{\"a}rgel, Michael and Olbrich, Markus and Schlichtmann, Ulf}, title = {Robustness Measurement of Integrated Circuits and Its Adaptation to Aging Effects}, journal = {Microelectronics Reliability}, year = 2014, month = jul, volume = {54}, number = {6-7}, pages = {1058-1065}, doi = {10.1016/j.microrel.2014.01.012}, comment = {Uses AA for robustness analysis of analog and mixed electronic circuits.}, abstract = {Even though nearly everybody has an intuitive understanding of what robustness means and even though the demands on system durability and its immunity against perturbation are getting increasingly important, there is no proper way how to measure robustness of integrated circuits already during the design phase. We therefore present a robustness model and methods of how to measure this quality. The methods can be directly integrated in today’s partially automated design flows. We demonstrate the broad applicability of our model by different use cases including degradation of digital circuits due to aging effects and the analysis of analog/mixed-signal circuits.} } @inproceedings{bar-par-sen-16-aa-powspec, author = {Barrois, Benjamin and Parashar, Karthick and Sentieys, Olivier}, title = {Leveraging Power Spectral Density for Scalable System-Level Accuracy Evaluation}, booktitle = {Proceedings of the 2016 Design, Automation & Test in Europe Conference & Exhibition (DATE)}, year = 2016, month = mar, doi = {10.3850/9783981537079_0204}, pages = {750-755}, comment = {Mentions AA. Perhaps only en passant?}, abstract = {The choice of fixed-point word-lengths critically impacts the system performance by impacting the quality of computation, its energy, speed and area. Making a good choice of fixed-point word-length generally requires solving an NP-hard problem by exploring a vast search space. Therefore, the entire fixed-point refinement process becomes critically dependent on evaluating the effects of accuracy degradation. In this paper, a novel technique for the system-level evaluation of fixed-point systems, which is more scalable and that renders better accuracy, is proposed. This technique makes use of the information hidden in the power-spectral density of quantization noises. It is shown to be very effective in systems consisting of more than one frequency sensitive components. Compared to state-of-the-art hierarchical methods that are agnostic to the quantization noise spectrum, we show that the proposed approach is 5x to 500x more accurate on some representative signal processing kernels.} } @phdthesis{fre-15-aa-optarith-th, author = {Frechtling, Michael}, title = {Automated Dynamic Error Analysis Methods for Optimization of Computer Arithmetic Systems}, school = {University of Sydney}, note = {Advisor: Philip H. W. Leong}, year = 2015, comment = {Mostly about FP rounding errors. But describes AA and a variation where al roundoff errors are summarized in just one noise symbol}, abstract = {Computer arithmetic is one of the more important topics within computer science and engineering. The earliest implementations of computer systems were designed to perform arithmetic operations and most if not all digital systems will be required to perform some sort of arithmetic as part of their normal operations. This reliance on the arithmetic operations of computers means the accurate representation of real numbers within digital systems is vital, and an understanding of how these systems are implemented and their possible drawbacks is essential in order to design and implement modern high performance systems. At present the most widely implemented system for computer arithmetic is the IEEE754 Floating Point system, while this system is deemed to the be the best available implementation it has several features that can result in serious errors of computation if not implemented correctly. Lack of understanding of these errors and their effects has lead to real world disasters in the past on several occasions. Systems for the detection of these errors are highly important and fast, efficient and easy to use implementations of these detection systems is a high priority. Detection of floating point rounding errors normally requires run-time analysis in order to be effective. Several systems have been proposed for the analysis of floating point arithmetic including Interval Arithmetic, Affine Arithmetic and Monte Carlo Arithmetic. While these systems have been well studied using theoretical and software based approaches, implementation of systems that can be applied to real world situations has been limited due to issues with implementation, performance and scalability. The majority of implementations have been software based and have not taken advantage of the performance gains associated with hardware accelerated computer arithmetic systems. This is especially problematic when it is considered that systems requiring high accuracy will often require high performance. The aim of this thesis and associated research is to increase understanding of error and error analysis methods through the development of easy to use and easy to understand implementations of these techniques}, url = {{\url{http://phwl.org/assets/images/2017/10/frechtling15.pdf}}}, quotes = {... Several systems have been proposed for the analysis of floating point arithmetic including Interval Arithmetic, Affine Arithmetic and Monte Carlo Arithmetic. While these systems have ...} } @misc{ben-cho-pur-bon-18-ab-fpgabits, author = {Benara, Vinamra and Choudhury, Ziaul and Purini, Suresh and Bondhugula, Uday}, title = {Synthesizing Power and Area Efficient Image Processing Pipelines on {FPGAs} using Customized Bit-widths}, howpublished = {Online document at the arXiv-CS repository, ID 1803.02660, version 3}, year = 2018, month = dec, url = {https://arxiv.org/abs/1803.02660v3}, doi = {10.48550/arXiv.1803.02660}, comment = {Describes AA use for bit width choice in image processing FPGas. Criticizes and then proposes an alternative using Satisfiability Modulo Theory (SMT).}, abstract = {High-level synthesis (HLS) has received significant attention in recent years for improving programmability of FPGAs. One could raise the level of abstraction further by using domain-specific languages (DSLs), improving productivity and performance simultaneously. PolyMage is a domain-specific language and compiler for image processing pipelines. Its PolyMage-HLS backend translates an input expressed as a DAG of image processing stages through the DSL into an equivalent circuit that can be synthesized on FPGAs, while leveraging an HLS suite. The power and area savings while performing arithmetic operations on fixed-point data type are well known to be significant over using floating-point data type. PolyMage-HLS stores data at each stage of a pipeline using a fixed-point data type ($\alpha$, $\beta$) where $\alpha$ and $\beta$ denote the number of integral and fractional bits. The integral bitwidth ($\alpha$) requirement at a pipeline stage can be inferred from its range. In this paper, we first propose an interval-arithmetic based range analysis algorithm to estimate the number of bits required to store the integral part of the data at each stage of an image processing pipeline. The analysis algorithm uses the homogeneity of pixel signals at each stage to cluster them and perform a combined range analysis. Secondly, we propose a software architecture for easily deploying any kind of interval/affine arithmetic based range analyses in the DSL compiler. Thirdly, we show that interval/affine arithmetic based techniques fail to take into account correlated computations across stages and hence could lead to poor range estimates. These errors in range estimates accumulate across stages, especially for iterative programs, such as Horn-Schunck Optical Flow, resulting in estimates nearly unusable in practice. Then, we propose a new range analysis technique using Satisfiability Modulo Theory (SMT) solvers, and show that the range estimates obtained through it are very close to the lower bounds obtained through profile-driven analysis. Finally, for estimating fractional bitwidth ($\beta$) requirement at each stage of the pipeline, we propose a simple and practical heuristic search algorithm, which makes very few profile passes, as opposed to techniques such as simulated annealing used in prior work. The analysis algorithm attempts to minimize the number of fractional bits required at each stage while preserving an application-specific quality metric. We evaluated our bitwidth analysis algorithms on four image processing benchmarks listed in the order of increasing complexity: Unsharp Mask, Down-Up Sampling, Harris Corner Detection and Horn-Schunck Optical Flow. The performance metrics considered are quality, power and area. For example, on Optical Flow, the interval analysis based approach showed an 1.4$\times$ and 1.14$\times$ improvement on area and power metrics over floating-point representation respectively; whereas the SMT solver based approach showed 2.49$\times$ and 1.58$\times$ improvement on area and power metrics when compared to interval analysis.} } @inproceedings{ber-her-ale-cha-21-aa-control, author = {Bertin, Etienne and H{\'e}riss{\'e}, Bruno and Alexandre dit Sandretto, Julien and Chapoutot, Alexandre}, title = {Spatio-Temporal Constrained Zonotopes for Validation of Optimal Control Problems}, booktitle = {Proceedings of the 60th IEEE Conference on Decision and Control (CDC)}, year = 2021, month = dec, pages = {6708-6713}, doi = {10.1109/CDC45484.2021.9683301}, comment = {Calls AA ``operations on zonotopes''. Applies it to systems control.}, abstract = {A controlled system subject to dynamics with unknown but bounded parameters is considered. The control is defined as the solution of an optimal control problem, which induces hybrid dynamics. A method to enclose all optimal trajectories of this system is proposed. Using interval and zonotope based validated simulation and Pontryagin’s Maximum Principle, a characterization of optimal trajectories, a conservative enclosure is constructed. The usual validated simulation framework is modified so that possible trajectories are enclosed with spatio-temporal zonotopes that simplify simulation through events. Then optimality conditions are propagated backward in time and added as constraints on the previously computed enclosure. The obtained constrained zonotopes form a thin enclosure of all optimal trajectories that is less susceptible to accumulation of error. This algorithm is applied on Goddard’s problem, an aerospace problem with a bang-bang control.} } @mastersthesis{bia-16-aa-pow-th, author = {Bianca Maria Costa Ara{\'u}jo}, title = {Aritm{\'e}ticas Intervalares Aplicadas {\`a} Solu{\c{c}}{\~a}o do Problema de Fluxo de Pot{\^e}ncia via Equa{\c{c}}{\~o}es de Inje{\c{c}}{\~a}o de Corrente}, school = {Federal University of Juiz de Fora}, note = {Advisor: Vander Menengoy da Costa}, year = 2016, month = feb, url = {http://repositorio.ufjf.br/jspui/bitstream/ufjf/2339/1/biancamariacostaaraujo.pdf}, abstract = {Power flow analysis typically uses a given set of generation and loading profiles to determine steady state operating conditions of electric power systems. When the input data are imprecise, several scenarios need to be analysed to cover the range of uncertainties. Under these conditions, it is necessary to utilise algorithms to incorporate the effect of the uncertainties within the power flow analysis. As an alternative solution to this issue, a new method has been proposed, based on the use of affine arithmetic. This alternative technique has been developed to improve the self-validated numerical analysis. Within affine arithmetic, the quantities of interest are represented by affine combinations of certain primitive variables. The affine combinations can signify both the source of the uncertainties in the data and the approximations during calculations. This technique is incorporated at the power flow which is expressed in terms of current injection equations, with the voltages represented in the rectangular form. The proposed results are later compared with the Monte Carlo Simulation and interval arithmetic, both of which solve the same issue: uncertainties in the power flow analysis of electric power grids.} } @misc{bid-gou-put-22-aa-reach, author = {Bidet, François and Goubault, {\'E}ric and Putot, Sylvie}, title = {Work in Progress: {Reachability} Analysis for Time-triggered Hybrid Systems, The Platoon Benchmark}, howpublished = {Online document.}, year = 2022, url = {http://www.lix.polytechnique.fr/Labo/Sylvie.Putot/Publications/fvps18.pdf}, comments = {Uses a combination of AA and Taylor models to solve reachability of hybrid continuous/discrete state systems}, abstract = {This article presents an extension of the method of [1] to time-triggered hybrid systems, providing over- and under-approximations of the set of reachable states. Our results on the vehicles platoon benchmark [2] compare favorably to the state of the art tools Flow* and SpaceEx, with more precise over-approximations. Moreover, we provide a measure of the approximation’s accuracy using the ratio of under- to overapproximation.} } @phdthesis{bla-13-aa-optan-th, author = {Blair, James}, title = {Real Option Analysis in Resilient Energy Networks}, school = {University of Manchester, UK}, note = {Advisor: John Moriarty}, year = 2013, url = {https://www.research.manchester.ac.uk/portal/files/54533724/FULL_TEXT.PDF}, comment = {Mentions AA and but does not use it. Reports claim that it is worse than IA.}, abstract = {The resilience of future power systems are being challenged in three fronts: (i) decarbonising energy supply will alter supply mix; (ii) shift of previous non-electric demand onto the energy network will require the system to work at higher capacity; and (iii) expected changes in climate will alter demand and performance of electrical network components. This thesis quantitatively assesses the impact of future climate change on the resilience of a power system, in secure and hazardous conditions. This is done through the use of reliability indices and probabilistic security assessment. Dynamical thermal ratings of circuits are used throughout this thesis given their potential for increased capacity over the standard static ratings. The first finding is that the predicted future climate scenarios will result in components with lower thermal ratings then if used currently. Due to this, it is found that the reliability of the system decreases under further climate scenarios. In order to keep a satisfactory level of reliability in the system, a method of temporary overloaded circuits is introduced which doesn’t result in a higher risk of component failure. The temporary overload method allows for the rating constraint to be violated provided the temperature constraint isn’t. Applying this to the system, and assessing the results under various climate scenarios, it is found that the method is beneficial in terms of economical cost and system reliability. When applied to hazardous conditions, it is found the method has a higher potential to strengthen the reliability of the system in comparison to when used on the ‘safe’ system. An approach is taken to aid the system operator in decision making under uncertain conditions. A scenario is devised in which an operator wants to plan the power dispatch for a future time period. This is done through the use of stochastic optimisation, where the uncertainty is encapsulated by the conductor ratings which are calculated using dynamical thermal ratings in which the weather parameters are stochastic. This is developed for a one and two period model, in which the two period model has the first and second period coupled through the addition of a ramp rate constraint in the optimisation. System adequacy indices and probabilistic security indices are added as constraints so the system operator can control the reliability of his system} } @inproceedings{bol-con-10-aa-autopol, author = {Boland, David and Constantinides, George A.}, title = {Automated Precision Analysis: {A} Polynomial Algebraic Approach}, booktitle = {Proceedings of the 18th IEEE Annual International Symposium on Field-Programmable Custom Computing Machines}, year = 2010, month = may, pages = {157-164}, doi = {10.1109/FCCM.2010.32}, comment = {Describes their own method to analyze roundodd errors of FP cpmútations. Applies to one itration of conjugate gradient. Claims better than AA.}, abstract = {When migrating an algorithm onto hardware, the potential saving that can be obtained by tuning the precision used in the algorithm to meet a range or error specification is often overlooked; the major reason is that it is hard to choose a number system which can guarantee any such specification can be met. Instead, the problem is mitigated by opting to use IEEE standard single or double precision so as to be `no worse' than a software implementation. However, the flexibility in the number representation is one of the key factors that can only be exploited on FPGAs, unlike GPUs and general purpose processors, and hence ignoring this potential significantly limits the performance achievable on an FPGA. To this end, this paper describes a tool which analyses algorithms with given input ranges under a finite precision to provide information that could be used to tune the hardware to the algorithm specifications. We demonstrate the proposed procedure on an iteration of the conjugate gradient algorithm, achieving a reduction in slices of over 40% when meeting the same error specification found by traditional methods. We also show it achieves comparable bounds to recent literature in a small fraction of the execution time, with greater scalability.}, url = {{\url{https://ieeexplore.ieee.org/abstract/document/5474056/?casa_token=Ag8abDncDvEAAAAA:ikzHesalutS3P0ckVQVR_RQO16hbSYM7XOsJfg3YJmf_L7JmBnauE5K3gG966T_l0VTeOafR}}}, quotes = {... It is argued in this paper that this method can achieve significantly tighter bounds than both interval and affine arithmetic, while running significantly faster, with better scalability, than the ...} } @article{bol-con-12-ab-scalpre-j, author = {Boland, David and Constantinides, George A.}, title = {A Scalable Precision Analysis Framework}, journal = {IEEE Transactions on Multimedia}, year = 2012, month = feb, volume = {15}, number = {2}, pages = {242-256}, doi = {10.1109/TMM.2012.2231666}, comment = {Describes their own method to analyze precision of floating-point errors}, abstract = {In embedded computing, typically some form of silicon area or power budget restricts the potential performance achievable. For algorithms with limited dynamic range, custom hardware accelerators manage to extract significant additional performance for such a budget via mapping operations in the algorithm to fixed-point. However, for complex applications requiring floating-point computation, the potential performance improvement over software is reduced. Nonetheless, custom hardware can still customize the precision of floating-point operators, unlike software which is restricted to IEEE standard single or double precision, to increase the overall performance at the cost of increasing the error observed in the final computational result. Unfortunately, because it is difficult to determine if this error increase is tolerable, this task is rarely performed. We present a new analytical technique to calculate bounds on the range or relative error of output variables, enabling custom hardware accelerators to be tolerant of floating point errors by design. In contrast to existing tools that perform this task, our approach scales to larger examples and obtains tighter bounds, within a smaller execution time. Furthermore, it allows a user to trade the quality of bounds with execution time of the procedure, making it suitable for both small and large-scale algorithms.} } @inproceedings{bol-con-12-aa-scalpre-c, author = {Boland, David and Constantinides, George A.}, title = {A Scalable Approach for Automated Precision Analysis}, booktitle = {Proceedings of the 2012 ACM/SIGDA international symposium on Field Programmable Gate Arrays (FPGA)}, year = 2012, month = feb, pages = {85–194}, doi = {10.1145/2145694.2145726}, note = {See jorunal version [bol-con-12-aa-scalpre-j].} comments = {Analysis of FP rounding errors. Mentions AA but does not use it. Compares?}, abstract = {The freedom over the choice of numerical precision is one of the key factors that can only be exploited throughout the datapath of an FPGA accelerator, providing the ability to trade the accuracy of the final computational result with the silicon area, power, operating frequency, and latency. However, in order to tune the precision used throughout hardware accelerators automatically, a tool is required to verify that the hardware will meet an error or range specification for a given precision. Existing tools to perform this task typically suffer either from a lack of tightness of bounds or require a large execution time when applied to large scale algorithms; in this work, we propose an approach that can both scale to larger examples and obtain tighter bounds, within a smaller execution time, than the existing methods. The approach we describe also provides a user with the ability to trade the quality of bounds with execution time of the procedure, making it suitable within a word-length optimization framework for both small and large-scale algorithms. We demonstrate the use of our approach on instances of iterative algorithms to solve a system of linear equations. We show that because our approach can track how the relative error decreases with increasing precision, unlike the existing methods, we can use it to create smaller hardware with guaranteed numerical properties. This results in a saving of 25% of the area in comparison to optimizing the precision using competing analytical techniques, whilst requiring a smaller execution time than the these methods, and saving almost 80% of area in comparison to adopting IEEE double precision arithmetic.} } @article{sha-akb-16-aa-powilp, author = {Shaban Boloukat, Mohammad Hadi and Akbari Foroud, Asghar}, title = {Stochastic-based Resource Expansion Planning for a Grid-Connected Microgrid using Interval Linear Programming}, journal = {Energy}, year = 2016, month = oct, volume = {113}, pages = {776-787}, doi = {10.1016/j.energy.2016.07.099}, comment = {Uses interval linear programming (ILP) which is relatd to AA.}, abstract = {This paper represents a stochastic approach for long-term optimal resource expansion planning of a grid-connected microgrid (MG) containing different technologies as intermittent renewable energy resources, energy storage systems and thermal resources. Maximizing profit and reliability, along with minimizing investment and operation costs, are major objectives which have been considered in this model. Also, the impacts of intermittency and uncertainty in renewable energy resources were investigated. The interval linear programming (ILP) was applied for modelling inherent stochastic nature of the renewable energy resources. ILP presents some superiority in modelling of uncertainties in MG planning. The problem was formulated as a mixed-integer linear programming. It has been demonstrated previously that the benders decomposition (BD) served as an effective tool for solving such problems. BD divides the original problem into a master (investment) problem and operation and reliability subproblems. In this paper a multiperiod MG planning is presented, considering life time, maximum penetration limit of each technology, interest rate, capital recovery factor and investment fund. Real-time energy exchange with the utility is covered, with a consideration of variable tariffs at different load blocks. The presented approach can help MG planners to adopt best decision under various uncertainty levels based on their budgetary policies.} } @inproceedings{bon-des-pel-men-18-aa-aparith, author = {Bonnot, Justine and Desnos, Karol and Pelcat, Maxime and M{\'e}nard, Daniel}, title = {A Fast and Fuzzy Functional Simulator of Inexact Arithmetic Operators for Approximate Computing Systems}, booktitle = {Proceedings of the 2018 Great Lakes Symposium on VLSI (GLSVLSI)}, pages = {195–200}, doi = {10.1145/3194554.3194574}, year = 2018, month = may, comment = {Mentions AA but dismisses it.} abstract = {Inexact operators are developed to exploit the tolerance of an application to imprecisions. These operators aim at reducing system energy consumption and memory footprint. In order to integrate the appropriate inexact operators in a complex system, the Quality of Service of the approximate system must be thoroughly studied through simulation. However, when simulating on a PC or workstation, the custom bit-level structures of inexact operators are not implemented in the instruction set of the simulating architecture. Consequently, the simulation requires a costly emulation, leading to expensive bit-level simulations. This paper proposes a new ``Fast and Fuzzy'' functional simulation method for inexact operators whose probabilistic behavior is correlated with the Most Significant Bits of the input operands. The proposed method processes real signal data and simplifies the error model for inexact operators, accelerating the simulation of the system. The modelization accuracy of the error can be controlled by a parameter called fuzzyness degree $F$. Using the proposed method, the bit-accurate logic-level simulation of inexact operators is replaced by an exact operator to which a pseudo-random error variable is added. Experiments on 16-bit operators show that the proposed simulation method, when compared to a bit-accurate logic level simulation, is up to 44 times faster.} } @entry{bon-dim-baa-vec-21-aa-deept, author = {Bonaert, Gregory and Dimitrov, Dimitar I. and Baader, Maximilian and Vechev, Martin}, title = {Fast and Precise Certification of Transformers}, journal = {Proceedings of the 42nd ACM SIGPLAN International Conference on Programming Language Design and Implementation (PLDI)}, month = jun, pages = {466–481}, doi = {10.1145/3453483.3454056}, year = 2021, comment = {Uses AA, that they call zonotope. The ``transformers'' are NOT electrical transformers but some AI or neural network thing.}, abstract = {We present DeepT, a novel method for certifying Transformer networks based on abstract interpretation. The key idea behind DeepT is our new Multi-norm Zonotope abstract domain, an extension of the classical Zonotope designed to handle $\ell_1$ and $\ell_2$-norm bound perturbations. We introduce all Multi-norm Zonotope abstract transformers necessary to handle these complex networks, including the challenging softmax function and dot product. Our evaluation shows that DeepT can certify average robustness radii that are 28$\times$ larger than the state-of-the-art, while scaling favorably. Further, for the first time, we certify Transformers against synonym attacks on long sequences of words, where each word can be replaced by any synonym. DeepT achieves a high certification success rate on sequences of words where enumeration-based verification would take 2 to 3 orders of magnitude more time.} } @incollection{bon-men-des-22-aa-appqual, author = {Bonnot, Justine and M{\'e}nard, Daniel and Desnos, Karol}, title = {Analysis of the Impact of Approximate Computing on the Application Quality}, booktitle = {Approximate Computing Techniques}, isbn = {978-3-030-94704-0}, pages = {145–176}, year = 2022, month = jan, doi = {10.1007/978-3-030-94705-7_6}, comment = {Survey paper. Has a description of AA and of Modified Affine Arithmetic (MAA). The latter tries to keep track of the PDF of a variable by approximating the PDF (not the variable) by a bunch of affine forms on binary intervals covering its range. Discusses application of AA to noise level estimation in in signal processing due to computation errors. Mentions the LibAffa \textt{C++} library.}, abstract = {By exploiting the error resilience of numerous applications Approximate Computing (AC) allows saving energy or reducing the application execution time but at the expense of introducing errors in the processing. The numerical accuracy of an application is now taken as a new tunable parameter to design more efficient systems. Nevertheless, the numerical accuracy of an application has to stay within an acceptable limit to be usable. For this reason, the impact of the induced errors on the application has to be studied. AC techniques generate various error profiles. When implementing AC in an application, the objective of error analysis is to derive the impact of the induced approximations on the application quality metric. The evaluation of the impact of the approximation on the application quality metric can be done in three steps. The first step corresponds to the AC error characterization which aims at developing a model defining the error due to a specific AC technique. In this chapter, the two types of techniques used to characterize the AC error metrics are described. Analytical approaches aim at defining a mathematical model of the error metrics. Simulation-based techniques integrate emulation techniques in the application source code to mimic the AC error behavior. The second and third steps aims at propagating the error inside the application to determine, respectively, an accuracy metric or directly the quality metric. Like for the first step, the available analytical and simulation-based techniques are described in this chapter.} } @article{bon-vac-vil-04-aa-thermal, author = {Bontempi, Gianluca and Vaccaro, Alfredo and Villacci, Domenico}, title = {Power Cables' Thermal Protection by Interval Simulation of Imprecise Dynamical Systems}, journal = {IEE Proceedings - Generation, Transmission and Distribution}, volume = {151}, number = {6}, month = nov, pages = {673–680}, doi = {10.1049/ip-gtd:20040826}, year = 2004, comment = {Not clear whether it uses AA or not.}, abstract = {The embedding of advanced simulation techniques in power cables enables improved thermal protection because of higher accuracy, adaptiveness and flexibility. In particular, they make possible (i) the accurate solution of differential equations describing the cables thermal dynamics and (ii) the adoption of the resulting solution in the accomplishment of dedicated protective functions. However, the use of model-based protective systems is exposed to the uncertainty affecting some model components (e.g. weather along the line route, thermophysical properties of the soil, cable parameters). When uncertainty can be described in terms of probability distribution, well-known techniques, such as Monte Carlo, are used to simulate the system behaviour. On the other hand, when the description of uncertainty in probabilistic terms is unfeasible or problematic, nonprobabilistic alternatives should be taken into consideration. This paper will discuss and compare three interval-based techniques as alternatives to probabilistic methods in the simulation of power cable dynamics. The experimental session will assess the interval-based approaches by simulating the thermal behaviour of medium voltage power cables.} }