# Last edited on 2022-11-20 02:48:05 by stolfi # New entries for "others.bib" - batch 7 @misc{bou-mim-cha-14-aa-hyson, author = {Bouissou, Olivier and Mimram, Samuel and Chapoutot, Alexandre}, title = {Simulation and Verification of Hybrid Systems using {HySon}}, howpublished = {Online document}, note = {Maybe submitted to the 1st International Workshop on Applied Verification for Continuous and Hybrid Systems (ARCH)}, year = 2014, url = {http://www.lix.polytechnique.fr/~smimram/docs/mimram_arch14.pdf}, comment = {Uses AA to do verification of continuous and hybrid systems.}, abstract = {In an industrial setting, control-command systems are usually validated using numerical simulation instead of formal verification as proposed by many academic tools. In this paper, we present a tool named HySon that tries to fill the gap between formal methods and industrial usage. HySon takes as input a dynamical system described by a Simulink model and proposes a new simulation engine that safely computes flowpipes of the system variables by adapting the numerical simulation algorithms to make them safely propagate sets of values instead of floatingpoint numbers. We show how the tool runs and give some results on small yet challenging examples.} } @inproceedings{bou-cha-djo-13-aa-runge, author = {Bouissou, Olivier and Chapoutot, Alexandre and Djoudi, Adel}, title = {Enclosing temporal evolution of dynamical systems using numerical methods}, booktitle = {Proceedings of the 5th International NASA Symposium on Formal Methods (NFM)}, location = {Moffett Field, US}, series = {Lecture Notes in Computer Science}, volume = {7871}, isbn = {978-3-642-38087-7}, year = 2013, month = may, pages = {108-123}, doi = {10.1007/978-3-642-38088-4_8}, comment = {Uses AA for reliable Runge-Kutta.}, abstract = {Numerical methods are necessary to understand the behaviors of complex hybrid systems used to design control-command systems. Especially, numerical integration methods are heavily used in simulation to compute approximations of the solution of differential equations, including non-linear and stiff solutions. Nevertheless, these methods only produce approximate results and they should not be used in formal verification methods as is. We propose a systematic way to make explicit Runge-Kutta integration method safe with respect to the mathematical solution. As side effect, we can hence compare different integration schemes in order to pick the right one in different situations.} } @misc{bou-cha-mim-13-aa-hybcon, author = {Bouissou, Olivier and Chapoutot, Alexandre and Mimram, Samuel}, title = {Computing Flowpipe of Nonlinear Hybrid Systems with Numerical Methods}, howpublished = {Online document at the arXiv/math website, ID 1306.2305 versin 1}, doi = {10.48550/arXiv.1306.2305}, year = 2013, url = {https://arxiv.org/abs/1306.2305}, comment = {Uses AA to encode sets of real vectors and reliably detect threshold events in Runge-Kutta integration. Description of HySon.}, abstract = {Modern control-command systems often include controllers that perform nonlinear computations to control a physical system, which can typically be described by an hybrid automaton containing high-dimensional systems of nonlinear differential equations. To prove safety of such systems, one must compute all the reachable sets from a given initial position, which might be uncertain (its value is not precisely known). On linear hybrid systems, efficient and precise techniques exist, but they fail to handle nonlinear flows or jump conditions. In this article, we present a new tool name HySon which computes the flowpipes of both linear and nonlinear hybrid systems using guaranteed generalization of classical efficient numerical simulation methods, including with variable integration step-size. In particular, we present an algorithm for detecting discrete events based on guaranteed interpolation polynomials that turns out to be both precise and efficient. Illustrations of the techniques developed in this article are given on representative examples.} } @inproceedings{bou-con-cou-cou-fer-gho-09-aa-space, author = {Bouissou, Olivier and Conquet, {\'E}ric and Cousot, Patrick and Cousot, Radhia and Feret, J{\'e}r{\^o}me and Ghorbal, Khalil and Goubault, {\'E}ric and Lesens, David and Mauborgne, Laurent and Min{\'e}, Antoine and Putot, Sylvie and Rival, Xavier and Turin, Michel}, title = {Space Software Validation using Abstract Interpretation}, booktitle = {Proceedings of the International Space System Engineering Conference - Data Systems in Aerospace (DASIA)}, location = {Istambul, TR}, month = may, pages = {1-7}, year = 2009, url = {https://hal.inria.fr/inria-00528590/}, comment = {Describes two tools, including FLUCTUAT, that uses AA. FLUCTUAT `` produces a graphical representation of the source of each numerical precision loss. It allows the user to know quickly the lines in the C source code causing the biggest losses of numerical precision. For loops, the tool also allows to produce graphics representing the evolution of bounds for the values and errors of variables during the computation.''}, abstract = {This paper reports the results of an ESA funded project on the use of abstract interpretation to validate critical real-time embedded space software. Abstract interpretation is industrially used since several years, especially for the validation of the Ariane 5 launcher. However, the limitations of the tools used so far prevented a wider deployment. Astrium Space Transportation, CEA, and ENS have analyzed the performances of two recent tools on a case study extracted from the safety software of the ATV: * ASTR{\'E}E, developed by ENS and CNRS, to check for run-time errors, * FLUCTUAT, developed by CEA, to analyse the accuracy of numerical computations. The conclusion of the study is that the performance of this new generation of tools has dramatically increased (no false alarms and fine analysis of numerical precision).} quotes = {This paper reports the results of an ESA funded project on the use of abstract interpretation to validate critical real-time embedded space software. Abstract interpretation is industrially ...} } @inproceedings{bou-gou-put-cha-san-16-aa-prbox, author = {Bouissou, Olivier and Goubault, {\'E}ric and Putot, Sylvie and Chakarov, Aleksandar and Sankaranarayanan, Sriram}, title = {Uncertainty Propagation using Probabilistic Affine Forms and Concentration of Measure Inequalities}, booktitle = {Proceedings of the 22nd International Conference Tools and Algorithms for the Construction and Analysis of Systems (TACAS)}, location = {Eindhoven, NL}, series = {Lecture Notes in Computer Science}, volume = {9636}, isbn = {978-3-662-49673-2}, year = 2016, month = apr, pages = {225-243}, doi = {10.1007/978-3-662-49674-9_13}, comment = {Extends AA implementation of probability boxes (P-boxes).}, abstract = {We consider the problem of reasoning about the probability of assertion violations in straight-line, nonlinear computations involving uncertain quantities modeled as random variables. Such computations are quite common in many areas such as cyber-physical systems and numerical computation. Our approach extends probabilistic affine forms, an interval-based calculus for precisely tracking how the distribution of a given program variable depends on uncertain inputs modeled as noise symbols. We extend probabilistic affine forms using the precise tracking of dependencies between noise symbols combined with the expectations and higher order moments of the noise symbols. Next, we show how to prove bounds on the probabilities that program variables take on specific values by using concentration of measure inequalities. Thus, we enable a new approach to this problem that explicitly avoids subdividing the domain of inputs, as is commonly done in the related work. We illustrate the approach in this paper on a variety of challenging benchmark examples, and thus study its applicability to uncertainty propagation.} } @inproceedings{bou-mim-cha-12-aa-hyson, author = {Bouissou, Olivier and Mimram, Samuel and Chapoutot, Alexandre}, title = {{HySon}: {Set}-Based Simulation of Hybrid Systems}, booktitle = {Proceedings of the 23rd IEEE International Symposium on Rapid System Prototyping (RSP)}, year = 2012, month = oct, pages = {79-85}, doi = {10.1109/RSP.2012.6380694}, comment = {Describes HySon, a system that uses AA to model state sets.}, abstract = {Hybrid systems are a widely used model to represent and reason about control-command systems. In an industrial context, these are often implemented in Simulink and their validity is checked by performing many numerical simulations in order to test their behavior with various possible inputs. In this article, we present a tool named HySon which performs set-based simulation of hybrid systems with uncertain parameters, expressed in Simulink. Our tool handles advanced features such as non-linear operations, zero-crossing events or discrete sampling. It is based on well-known efficient numerical algorithms that were adapted to handle set-based domains. We demonstrate the performance of our method on various examples.} } @inproceedings{bou-mim-str-cha-14-aa, author = {Bouissou, Olivier and Mimram, Samuel and Strazzulla, Baptiste and Chapoutot, Alexandre}, title = {Set-Based Simulation for Design and Verification of {Simulink} Models}, , Feb 2014, Toulouse, France. ⟨hal-01290286⟩ booktitle = {Proceedings of the 7th Conference on Embedded Real Time Software and Systems (ERTS2)}, year = 2014, month = feb, location = {Toulouse, FR}, url = {https://hal.archives-ouvertes.fr/hal-01290286}, pages = {??}, comment = {Describes HySon, an AA based simulator that takes Simulink system description}, abstract = {Model-based design is a widely used methodology for the development of embedded critical software, such as a discrete controller for a continuous plant. In this setting, numerical simulation of both the plant and the controller plays a crucial role, since it is used to validate the design choices in the early stages of development. However, classical numerical simulation has inherent limitations: it is of limited precision and cannot deal with the intrinsic non-determinism present in complex systems. In this article, we present a tool named HySon that overcomes these drawbacks. It takes as input a Simulink model of a control-command system with non-deterministic uncertainties and automatically computes flow-pipes that contain all possible trajectories of the system. We show on some examples how HySon can be used to improve the quality of model-based design.} } @inproceedings{bra-kor-mul-15-aa-taymod, author = {Brau{\ss}e, Franz and Korovina, Margarita and M{\"u}ller, Norbert}, title = {Using {Taylor} Models in Exact Real Arithmetic}, booktitle = {Revised Selected Papers from the 6th International Conference on Mathematical Aspects of Computer and Information Sciences (MACIS)}, location = {Berlin, DE}, year = 2015, month = nov, pages = {474-488}, doi = {10.1007/978-3-319-32859-1_41}, comment = {Mentions AA as a special case of Taylor bound arithmetic with exact coeffs.}, abstract = {Software libraries for Exact Real Arithmetic implement the theory of computability on non-denumerable sets. Usually they are based on interval arithmetic. We discuss enhancements where the interval arithmetic is augmented by versions of Taylor models. Although this has no effect on the abstract notion of computability, the efficiency of implementations can be improved dramatically.} } @article{buh-01-ab-linint, author = {B{\"u}hler, Katja}, title = {Linear Interval Estimations for Parametric Objects Theory and Application}, journal = {Computer Graphics Forum}, volume = {20}}, number = {3} year = 2001, pages = {C-522}, month = jul, doi = {10.1111/1467-8659.00520}, note = {The journal's site gives ``pages 0-0'' for this article (only).}, comment = {Uses AA as a special case of Taylor models?}, abstract = {The new concept of parametrized bounding volumes for parametric objects is proposed to replace the common compact bounding volumes like axis aligned bounding boxes and parallelepipeds. Linear Interval Estimations (LIEs) are developed as a realization of the discussed ideas. Two reliable methods for the computation of LIEs are introduced based on a new understanding of the use of affine arithmetics and a special application of Taylor Models. The particular structure of LIEs allows an effective intersection test of LIEs with rays, boxes and other LIEs. The test gives besides of a possible location of the intersection in object space information about affected parts in the parameter spaces of the enclosed objects. A subdivision algorithm for the intersection of two parametric surface patches with remarkable experimental results is presented as a possible application.} } @inproceedings{bun-20-aa-tayode, author = {B{\"u}nger, Florian}, title = {A {Taylor} model toolbox for solving {ODEs} implemented in {MATLAB}/{INTLAB}}, booktitle = {Proceedings 18th International Symposium on Scientific Computing, Computer Arithmetic, and Verified Numerical Computations (SCAN 2018)}, note = {Special issue of the Journal of Computational and Applied Mathematics, volume 368}, year = 2020, month = apr, pages = {article 112511}, doi = {10.1016/j.cam.2019.112511}, comment = {Describes a Taylor arithmetic package for ODE integration. Notes that AA is the degree-1 case of Taylor form.} abstract = {The new INTLAB release V11 contains two verified ODE solvers. One is a MATLAB implementation of Lohner’s classical AWA, the other one follows the so-called Taylor model approach which is the main subject of this article.} } @incollection{bue-02-aa-implc, author = {B{\"u}hler, Katja}, title = {Fast and Reliable Plotting of Implicit Curves}, booktitle = {Uncertainty in Geometric Computations}, isbn = {978-1-4613-5252-5}, series = {Engineering and Computer Science}, volume = {704}, publisher = {Springer}, year = 2002, pages = {15-28}, doi = {10.1007/978-1-4615-0813-7_2}, comment = {Uses AA}, abstract = {This paper presents a new, fast and reliable subdivision algorithm for adaptive enumeration and plotting of implicit curves. For this purpose, Implicit Linear Interval Estimations (ILIEs) based on affine arithmetics are introduced. They allow a significant acceleration of the subdivision process and a generation of reliable piecewise linear enclosures for the curve. The algorithm has been tested for algebraic curves of high degree and non-trivial trigonometric curves with remarkable results.} } @inproceedings{buh-dyl-lut-04-aa-intlie, author = {B{\"u}hler, Katja and Dyllong, Eva and Luther, Wolfram}, title = {Reliable Distance and Intersection Computation using Finite Precision Geometry}, booktitle = {Revised Papers of the 2003 Dagstuhl Seminar on Numerical Software with Result Verification} location = {Saarbr{\"u}cken, DE}, month = jan, year = 2004, pages = {160-190}, doi = {10.1007/978-3-540-24738-8_9}, abstract = {In this paper we discuss reliable methods in the field of finite precision geometry. We begin with a brief survey of geometric computing and approaches generally used in dealing with accuracy and robustness problems in finite precision geometry. Moreover, two reliable geometric algorithms based on these approaches are presented. The first one is a new distance algorithm for objects modeled in a common octree. The results are exact and include good bounds on all subdivision levels. Using smoother enclosures on the highest level, a link is provided to well-known algorithms for convex and non-convex objects. We discuss the general concept and advantages of special bounding volumes with representations directly connected to the representation of the enclosed object: Implicit and parametric Linear Interval Estimations (I)LIEs are roughly speaking, just thick planes enclosing the object. They are constructed using Taylor models or affine arithmetic. The particular structure of (I)LIEs allows the construction of effective hierarchies of bounding volumes and the development of effective intersection tests for the enclosed object with rays, boxes and other LIEs. In addition, a fast reliable intersection test for two LIEs is presented in detail.} } @phdthesis{caf-08-aa-thesis, author = {Caffarena Fern{\'a}ndez, Gabriel}, title = {Combined Word-Length Allocation and High-Level Synthesis of Digital Signal Processing Circuits}, school = {Universidad Polit{\'e}cnica de Madrid}, note = {Advisors: Octavio Nieto-Taladriz Garc{\'\i}a and Carlos Carreras Vaquer}, month = oct, year = 2008, doi = {10.20868/UPM.thesis.1822}, url = {http://oa.upm.es/1822}, comment = {Uses AA to estimate word length for DSP hardware.}, abstract = {This work is focused on the synthesis of Digital Signal Processing (DSP) circuits usingc specific hardware architectures. Due to its complexity, the design process has been subdivided into separate tasks, thus hindering the global optimization of the resulting systems. The author proposes the study of the combination of two major design tasks, Word-Length Allocation (WLA) and High-Level Synthesis (HLS), aiming at the optimization of DSP implementations using modern Field Programmable Gate Array devices (FPGAs). A multiple word-length approach (MWL) is adopted since it leads to highly optimized implementations. MWL implies the customization of the word-lengths of the signals of an algorithm. This complicates the design, since the number possible assignations between algorithm operations and hardware resources becomes very high. Moreover, this work also considers the use of heterogeneous FPGAs where there are several types of resources: configurable logic-based blocks (LUT-based) and specialized embedded resources. All these issues are addressed in this work and several automatic design techniques are proposed. The contributions of the Thesis cover the fields of WLA, HLS using FPGAs, and the combined application of WLA and HLS for implementation in FPGAs. A thorough approach of HLS has been implemented which considers a complete datapath composed of functional units (FUs), registers and multiplexers, as well as heterogeneous FPGA resources (LUT-based and embedded resources). The approach makes use of a resource library that accounts for MWL effects within the set of resources, thus producing highly optimized architectures. This library includes both LUT-based and embedded FPGA resources, which further increase the power of the HLS task. Another important contribution is the introduction of resource usage metrics suitable for heterogeneous-architecture FPGAs. A novel quantization error estimation based on affine arithmetic (AA) is presented, as well as its practical application to the automatic WLA of LTI and non-linear differentiable DSP systems. The error estimation is based on performing a pre-processing of the algorithm, which produces an expression of the quantization error at the system output. Therefore, the error can be easily computed leading to fast and accurate WLA optimizations. The analysis of the impact of different optimization techniques during WLA on HLS results is also presented. The variance in the obtained results corroborates the fact that it is worth using a single architecture model during WLA and HLS, and this is only possible by means of combining these tasks. The actual combination of WLA and HLS has been firstly performed by using a Mixed Integer Linear Programming (MILP) approach. The results prove the validity of the approach and also provide with insights into the combination of the two tasks that are used to generate heuristic synthesis algorithms. Finally, the global contribution of this thesis is an HLS heuristic algorithm able to perform the combined WLA and HLS of DSP systems for both homogeneous and heterogeneous FPGA architectures. Up to 20{\%} of resource usage reductions are reported, which proves the importance of such a combined approach, providing electronic designers with a design framework that enables highly improved DSP custom hardware implementations.} } @article{caf-car-lop-fer-10-aa-sqnr-j, author = {Caffarena, Gabriel and Carreras, Carlos and Lopez, Juan A. and Fern{\'a}ndez-Herrero, {\'A}ngel}, title = {{SQNR} Estimation of Fixed-Point {DSP} Algorithms}, journal = {EURASIP Journal on Advances in Signal Processing}, year = 2010, month = may, volume = {2010}, pages = {article 171027, 1-12}, doi = {10.1155/2010/171027}, comment = {Quantization noise estimator based on AA. SQNR is Signal-to-Quantization Noise Ratio.}, abstract = {A fast and accurate quantization noise estimator aiming at fixed-point implementations of Digital Signal Processing (DSP) algorithms is presented. The estimator enables significant reduction in the computation time required to perform complex wordlength optimizations. The proposed estimator is based on the use of Affine Arithmetic (AA) and it is presented in two versions: (i) a general version suitable for differentiable nonlinear algorithms, and Linear Time-Invariant (LTI) algorithms with and without feedbacks; and (ii) an LTI optimized version. The process relies on the parameterization of the statistical properties of the noise at the output of fixed-point algorithms. Once the output noise is parameterized (i.e., related to the fixed-point formats of the algorithm signals), a fast estimation can be applied throughout the word-length optimization process using as a precision metric the Signal-to-Quantization Noise Ratio (SQNR). The estimator is tested using different LTI filters and transforms, as well as a subset of non-linear operations, such as vector operations, adaptive filters, and a channel equalizer. Fixed-point optimization times are boosted by three orders of magnitude while keeping the average estimation error down to 4{\%}.} } @inproceedings{caf-fer-lop-car-10-aa-sqnr-c, author = {Caffarena, Gabriel and Fern{\'a}ndez-Herrero, {\'A}ngel and L{\'o}pez, Juan A. and Carreras, Carlos}, title = {Fast Fixed-Point Optimization of {DSP} Algorithms}, booktitle = {Forward-Looking Trends in IC and Systems Design: Proceedings of the 8th IFIP WG 10.5/IEEE International Conference on Very Large Scale Integration (VLSI-SoC)}, location = {Madrid, ES}, year = 2010, month = sep, pages = {182-205}, doi = {10.1007/978-3-642-28566-0_8}, note = {See also [caf-lop-fer-car-10-aa-nlsqnr-c].} comment = {Quantization noise estimator based on AA. SQNR is Signal-to-Quantization Noise Ratio.}, abstract = {In this chapter, the fast fixed-point optimization of Digital Signal Processing (DSP) algorithms is addressed. A fast quantization noise estimator is presented. The estimator enables a significant reduction in the computation time required to perform complex fixed-point optimizations, while providing a high accuracy. Also, a methodology to perform fixed-point optimization is developed. Affine Arithmetic (AA) is used to provide a fast Signal-to-Quantization Noise-Ratio (SQNR) estimation that can be used during the fixed-point optimization stage. The fast estimator covers differentiable non-linear algorithms with and without feedbacks. The estimation is based on the parameterization of the statistical properties of the noise at the output of fixed-point algorithms. This parameterization allows relating the fixed-point formats of the signals to the output noise distribution by means of fast matrix operations. Thus, a fast estimation is achieved and the computation time of the fixed-point optimization process is significantly reduced. The proposed estimator and the fixed-point optimization methodology are tested using a subset of non-linear algorithms, such as vector operations, IIR filter for mean power computation, adaptive filters - for both linear and non-linear system identification - and a channel equalizer. The computation time of fixed-point optimization is boosted by three orders of magnitude while keeping the average estimation error down to 6{\%} in most cases.} } @inproceedings{caf-lop-fer-car-10-aa-nlsqnr-c, author = {Caffarena, Gabriel and L{\'o}pez, Juan A. and Fern{\'a}ndez-Herrero, {\'A}ngel and Carreras, Carlos}, title = {{SQNR} Estimation of Non-Linear Fixed-Point Algorithms}, journal = {Proceedings of the 18th European Signal Processing Conference (EUSIPCO)}, location = {Aalborg, DK}, year = 2010, month = aug, pages = {522-526}, note = {No DOI? See also [caf-fer-lop-car-10-aa-sqnr-c].}, comment = {Quantization noise estimator based on AA. SQNR is Signal-to-Quantization Noise Ratio.}, abstract = {In this paper, a fast and accurate quantization noise estimator aiming at fixed-point implementations of Digital Signal Processing (DSP) algorithms is presented. The estimator enables significant reduction in the computation time required to perform complex wordlength optimizations. The proposed estimator is based on the use of Affine Arithmetic (AA) and it is aimed at differentiable non-linear algorithms with and without feedbacks. The estimation relies on the parameterization of the statistical properties of the noise at the output of fixed-point algorithms. Once the output noise is parameterized (i.e. related to the fixed-point formats of the algorithm signals), a fast estimation can be applied throughout the wordlength optimization process using as a precision metric the SQNR. The estimator is tested using a subset of non-linear algorithms such as vector operations, adaptive filters and a channel equalizers. wordlength optimization times are boosted by three orders of magnitude while keeping the average estimation error down to 13{\%}.} } @inproceedings{caf-mes-tou-16-aa-airplac, author = {Cafieri, Sonia and Messine, Fr{\'e}d{\'e}ric and Touhami, Ahmed}, title = {On Solving Aircraft Conflict Avoidance Using Deterministic Global Optimization {(sBB)} Codes}, bookttile = {Proceedings of the 13th Global Optimization Workshop (GOW)}, year = 2016, month = sep, location = {Minho, PT}, pages = {149-152}, note = {no DOI?}, url = {http://repositorium.sdum.uminho.pt/bitstream/1822/42944/1/Proceedings%20GOW16.pdf}, comment = {Application of AA (via Messine and Nin's IBBA branch-and-bound solver) to air traffic ontrol and collision avoidance. Extends IBBA with convex quadratic relaxation based on AA.} abstract = {This paper focuses on the aircraft merging and sequencing problem at Terminal Manoeuvring Areas through the use of Controlled Time of Arrival (CTA). A Mixed-Integer Linear Programming formulation is proposed in order to minimize the number of non achievable CTAs while maintaining separation between aircraft with regard to the horizontal, wake-turbulence, and runway occupancy time constraints. Computational experiments performed on real-world case studies of Paris Charles De-Gaulle (CDG) airport show that the approach is viable.} } @inproceedings{cal-fae-moe-20-aa-strudyn, author = {Callens, Robin R. P. and Faes, Matthias G. R. and Moens, David}, title = {Local Interval Fields for Spatial Inhomogeneous Uncertainty Modelling in Structural Dynamics}, booktitle = {Proceedings of the International Conference on Uncertainty in Structural Dynamics}, location = {Leuven, BE}, year = 2020, month = sep, pages = {1-14}, note = {no DOI? See also [cal-fae-moe-20-aa-inhom], [cal-fae-moe-21-aa-nonstat].}, comment = {Application of validated numerics to finite element analysis. Mentions AA but does not use it? Uses ``interval fields''.} abstract = {Interval fields have been introduced to model spatial uncertainty in Finite Element Models when the available data is insufficient to build representative probabilistic models. However, they are limited to modelling global non-stationary uncertainty and hence cannot model local non-stationary uncertainty. This is typically occurring in specific regions of a component or a structure which is produced with,e.g., casting, welding, drawing. This paper presents a more efficient local interval field approach to model the local uncertainty under scarce data. The method is based on the concept of explicit interval fields and aims to develop an alternative approach for the commonly applied inverse distance weighting approach for the generation of the basis functions. In this paper the method is applied on a two-dimensional spatial uncertainty case with a specific focus on dynamics. The paper compares the introduced local interval field approach with inverse distance weighting from a numerical and application point of view.} } @inproceedings{cal-fae-moe-20-aa-inhom, author = {Callens, Robin R. P. and Faes, Matthias G. R. and Moens, David}, title = {Local Interval Fields for Spatial Inhomogeneous Uncertainty Modelling}, journal = {Proceedings of the 5th International Symposium on Uncertainty Quantification and Stochastic Modelling (UNCERTAINTIES)}, year = 2020, month = aug, pages = {121-135}, doi = {10.1007/978-3-030-53669-5_10}, note = {Simpler version of [cal-fae-moe-20-aa-strudyn], [cal-fae-moe-21-aa-nonstat]?}, comment = {Application of validated numerics to finite element analysis. Does not use AA? Uses ``interval fields''.} abstract = {In an engineering context, design optimization is usually performed virtually using numerical models to approximate the underlying partial differential equations. However, valid criticism exists concerning such an approach, as more often than not, only partial or uninformative data are available to estimate the corresponding model parameters. As a result hereof, the results that are obtained by such numerical approximation can diverge significantly from the real structural behaviour of the design. Under such scarce data, especially interval analysis has been proven to provide robust bounds on the structure’s performance, often at a small-to-moderate cost. Furthermore, to model spatial dependence throughout the model domain, interval fields were recently introduced by the authors as an interval counterpart to the established random fields framework. However, currently available interval field methods cannot model local inhomogeneous uncertainty. This paper presents a local interval field approach to model the local inhomogeneous uncertainty under scarce data. The method is based on the use of explicit interval fields [1] and the recently used inverse distance weighting function [2]. This paper presents the approach for one dimension of spatial uncertainty. Nonetheless, the approach can be extended to an n-dimensional context. In the first part of the paper, a detailed theoretical background of interval fields is covered, and then the local interval fields approach is introduced. Furthermore, an academic case study is performed to compare the local interval field approach with inverse distance weighting.} } @article{cal-fae-moe-21-aa-nonstat, author = {Callens, Robin R. P. and Faes, Matthias G. R. and Moens, David}, title = {Local Explicit Interval Fields for Non-Stationary Uncertainty Modelling in Finite Element Models}, journal = {Computer Methods in Applied Mechanics and Engineering}, volume = {379}, pages = {article 113735}, year = 2021, month = jun, doi = {10.1016/j.cma.2021.113735}, comment = {Application of validated numerics to finite element analysis. Mentions AA but does not use it? Uses ``interval fields'', namely IA boxes, and computes the output box from the input box by ``optimisation''. Problems may have millions of variables.} abstract = {Interval fields have been introduced to model spatial uncertainty in Finite Element Models when the stochastic resolution of available data is too limited to build representative probabilistic models. However, current interval fields modelling techniques are according to the state-of-the-art limited in potential, as they homogenise the uncertain parameters to globally defined parameters. Hence, these techniques are inherently unable to represent local uncertainty. In practice, local variations in the uncertain parameters (non-stationary uncertainty) often occur, e.g., through local effects in manufacturing processes. This paper presents a novel method to model local explicit interval fields, that is furthermore less computationally demanding and less conservative than global explicit interval fields. The method presented in this paper is based on the concept of explicit interval fields and develops an alternative approach for the commonly applied inverse distance weighting approach for the generation of the basis functions. The paper includes three case studies to compare the introduced local explicit interval fields approach with the global explicit interval fields method. The obtained results are discussed from a numerical and application point of view to show the effectiveness and efficiency of the proposed methods.} } @article{cal-gal-pic-sia-07-aa-relay, author = {Calderaro, Vito and Galdi, Vincenzo and Piccolo, Antonio and Siano, Pierluigi}, title = {Adaptive Relays for Overhead Line Protection}, journal = {Electric Power Systems Research}, volume = {77}, number = {12}, pages = {1552-1559}, year = 2007, month = oct, comment = {Mentions AA but does not use it?}, abstract = {In the liberalized energy market scenario protective relays play an important role in assuring continuous service in the power system where a malfunctioning could lead to serious damages to a wide number of operators having access to the power system. Considering that power lines are operated many times below a rated load current, in this paper an adaptive procedure is presented in order to manage power distribution systems according to dependability or security requirements. In particular, a procedure to obtain an inverse time trip curve by means of a microprocessor, connected to a relay, is presented. The procedure adapts the trip characteristic depending on the conductor temperature, wind speed, emissivity and solar absorbity and is implemented on a microprocessor Rabbit 2200 considering a Drake conductor, 795 kcmil 26/7 ACSR.}, url = {{\url{https://www.sciencedirect.com/science/article/pii/S0378779606002744?casa_token=21NSYv-CizQAAAAA:837EcF8XZT7tULhzILImUV-nT7YfMWEuRahx89aaWd8-7CGxNcvN10zFLsBgD_X0ej0cBa1H}}}, quotes = {... , in [9], in order to predict the thermal behaviour on short and long time horizons, also in the presence of data uncertainties, an interesting approach, based on the use Affine Arithmetic, ...} } @article{cal-lam-gal-pic-18-aa-fuzznum, author = {Calderaro, Vito and Lamberti, Francesco and Galdi, Vincenzo and Piccolo, Antonio}, title = {Power Flow Problems with Nested Information: {An} Approach Based on Fuzzy Numbers and Possibility Theory}, volume = {158}, pages = {275-283}, year = 2018, month = may, doi = {10.1016/j.epsr.2018.01.008}, comment = {Mentions AA but does not use it}, abstract = {In this paper, we present a new approach based on possibility theory to deal with the power flow problems in electrical networks. The approach takes into account the available information of a power system that is characterized generally by big data, often uncertain, redundant or insufficient for a correct description of the network status. In particular, we present a method to deal with nested information that can be generated by inaccurate measurements of electrical parameters. In order to solve the power flow problem we define a way to model nested information in power system and formalize an AC fuzzy power flow problem. The power flow results are obtained by an innovative approach based on the solution of a simultaneous nonlinear equations fuzzy system. The effectiveness of the proposed method is proven by applying the proposed approach to two modified IEEE benchmark test systems. Simulation results show accuracy, robustness and good computational cost of the implemented method in the search of the solution.} } @article{cam-cas-alm-fer-16-aa-renvolt, author = {Camilo, Fernando Manuel and Castro, Rui and Almeida, Maria Eduarda and Fern{\~a}o Pires, Victor}, title = {Self-Consumption and Storage as a Way to Facilitate the Integration of Renewable Energy in Low Voltage Distribution Networks}, journal = {IET Renewable Power Generation}, volume = {10}, number = {7}, year = 2016, month = may, pages = {1741-1748}, doi = {10.1049/iet-gtd.2015.0431}, comment = {Mentions AA but does not use it.}, abstract = {Photovoltaic microgeneration ($\mu$G) located near the domestic consumers is expected to increase more in the future. Known issues regarding high μG penetration are voltage rise and reverse power flow. The concept of self-consumption and storage is emerging as a way to improve network power supply quality and to facilitate the integration of small renewable energy sources in low voltage networks. This paper intends to give a further contribution by assessing the improvements provided by allowing domestic clients to consume and store the energy they produce. The tool at hand to study the situation is the unbalanced three-phase load flow algorithm, based on the power summation technique, improved with the capability of explicitly compute the neutral voltages. The behaviour of a test radial distribution grid is assessed in different operating conditions, namely winter/summer, μG penetration level and μG operating mode. Voltage profile, active power flow in the service transformer and losses are monitored. The results suggest that self-consumption with storage operating mode is a promising solution. Voltage excursions above normal grid voltage operating limits were not observed, the situation of reverse active power flow does not occur and the power losses are reduced} } @article{cao-gao-che-hex-don-lin-22-aa-fuzzclu, author = {Cao, Huazhen and Gao, Chong and Chen, Peidong and He, Xuan and Dong, Zhihui and Lin. Lingxue}, title = {Distribution Network Dynamic Reconfiguration Based on Improved Fuzzy {C}-Means Clustering with Time Series Analysis}, journal = {IET Generation, Transmission {\&} Distribution}, year = 2022, month = oct, volume = {17}, number = {2}, month = feb, pages = {174-182}, doi = {doi.org/10.1002/tee.23504}, comment = {Uses ``affine Taylor expansion to solve the interval power flow equations''. Is this affine arithmetic?}. abstract = {The rapid growth of distributed energy resources integrated in distribution systems leads to an increasing need of continuously and automatically changing the system topology to realize the economic operation of distribution networks. This paper proposes an optimization model of dynamic reconfiguration for distribution networks based on a new method of time series analysis. Equivalent daily curve considering time-varying nature of distributed generator and load demands is divided by an improved fuzzy C-means clustering algorithm, where the indicator of section function is set to find the optimal reconfiguration time intervals. The uncertainty of distributed generator outputs and load demands is described by the interval algorithm. Then the affine Taylor expansion is adopted to solve the interval power flow equations. The reconfiguration optimization model is solved with decimal particle swarm optimization algorithm based on loop search. The optimal dynamic reconfiguration of a modified 70-bus test system with distributed generators is carried out and the simulation results demonstrate the effectiveness and superiority of the proposed method} } @article{lop-caf-car-nie-08-aa-noilit, author = {L{\'o}pez, Juan A. and Caffarena, Gabriel and Carreras, Carlos and Nieto-Taladriz, Octavio}, title = {Fast and Accurate Computation of the Round-Off Noise of Linear Time-Invariant Systems}, journal = {IET Circuits, Devices {\&} Systems}, volume = {2}, number = {4}, year = 2008, month = aug, doi = {10.1049/iet-cds:20070198}, comment = {Uses AA extensively for signal and image processing.}, abstract = {From its introduction in the last decade, affine arithmetic (AA) has shown beneficial properties to speed up the time of computation procedures in a wide variety of areas. In the determination of the optimum set of finite word-lengths of the digital signal processing systems, the use of AA has been recently suggested by several authors, but the existing procedures provide pessimistic results. The aim is to present a novel approach to compute the round-off noise (RON) using AA which is both faster and more accurate than the existing techniques and to justify that this type of computation is restricted to linear time-invariant systems. By a novel definition of AA-based models, this is the first methodology that performs interval-based computation of the RON. The provided comparative results show that the proposed technique is faster than the existing numerical ones with an observed speed-up ranging from 1.6 to 20.48, and that the application of discrete noise models leads to results up to five times more accurate than the traditional estimations.}, url = {{\url{https://www.academia.edu/download/56204293/iet-cds_3A2007019820180331-30513-jcv4fs.pdf}}} } @article{car-pis-vac-vil-16-aa-lineload, author = {Carlini, Enrico Maria and Pisani, Cosimo and Vaccaro, Alfredo and Villacci, Domenico}, title = {A Reliable Computing Framework for Dynamic Line Rating of Overhead Lines}, journal = {Electric Power Systems Research}, volume = {132}, pages = {1-8}, year = 2016, month = mar, doi = {10.1016/j.epsr.2015.11.004}, comment = {Uses AA to estimate the capacity of power lines.}, abstract = {Indirect methods for dynamic loadability analysis have been recognized as enabling methodologies for improving the overhead line exploitation, since they do not require the need of a direct measurement of the conductor temperature, implying simple and inexpensive installation, and maintenance procedures. Despite these benefits, indirect methods suffer for several limitations, which mainly derive from the strong uncertainties affecting the loadability estimation process. To overcome these limitations, a novel self-validated computing framework for indirect loadability analysis of overhead line is proposed in this article. Experimental results obtained on a real 150 kV overhead line are presented and discussed in order to assess the effectiveness of the proposed methodology.} } @misc{ceb-kre-cho-lud-05-aa-expert, author = {Ceberio, Martine and Kreinovich, Vladik and Chopra, Sanjeev and Lud{\"a}scher, Bertram}, title = {{Taylor}-Type Techniques for Handling Uncertainty in Expert Systems, with Potential Applications to Geoinformatics}, howpublished = {Online document}, year = 2005, month = apr, note = {Date extracted from PDF metadata}, comment = {Uses AA to compute probabilities, so that $p \vee \neg p$ is $[1_1]$ not $[0_1]$.}, abstract = {Expert knowledge consists of statements $S_j$ (facts and rules). The expert’s degree of confidence in each statement $S_j$ can be described as a (subjective) probability (some probabilities are known to be independent). Examples: if we are interested in oil, we should look at seismic data (confidence 90{\%}); a bank $A$ trusts a client $B$, so if we trust $A$, we should trust $B$ too (confidence 99{\%}). If a query $Q$ is deducible from facts and rules, what is our confidence $p(Q)$ in $Q$? We can describe $Q$ as a propositional formula $F$ in terms of $S_j$ ; computing $p(Q)$ exactly is NP-hard, so heuristics are needed. Traditionally, expert systems use technique similar to straightforward interval computations: we parse $F$ and replace each computation step with corresponding probability operation. Problem: at each step, we ignore the dependence between the intermediate results F_{j} ; hence intervals are too wide. Example: the estimate for $P(A\vee\neg A)$ is not 1. Solution: similarly to affine arithmetic, besides $P(F_{j} )$, we also compute $P(F_{j} \wedge F_{i})$ (or $P(F_{j1} \wedge . . . \wedge F_{jk}$ )), and on each step, use all combinations of l such probabilities to get new estimates. Results: e.g., $P(A \vee \neg A)$ is estimated as 1.} } @article{rou-cha-19-aa-spring, author = {Rout, Saudamini and Chakraverty, Snehashish}, title = {Solving Fully Fuzzy Nonlinear Eigenvalue Problems of Damped Spring-Mass Structural Systems using Novel Fuzzy-Affine Approach}, journal = {Computer Modeling in Engineering {\&} Sciences}, volume = {121}, number = {3}, pages = {947-980}, year = 2019, month = dec, doi = {10.32604/cmes.2019.08036}, url = {{\url{https://www.ingentaconnect.com/contentone/tsp/cmes/2019/00000121/00000003/art00009}}}, comment = {Uses AA to solve non-linear eigenvalue problems in structural mechanics. Defines ``fuzzy arithmetic'' where AA is used to approximate the charac function of the fuzzy set.}, abstract = {The dynamic analysis of damped structural system by using finite element method leads to nonlinear eigenvalue problem (NEP) (particularly, quadratic eigenvalue problem). In general, the parameters of NEP are considered as exact values. But in actual practice because of different errors and incomplete information, the parameters may have uncertain or vague values and such uncertain values may be considered in terms of fuzzy numbers. This article proposes an efficient fuzzy-affine approach to solve fully fuzzy nonlinear eigenvalue problems (FNEPs) where involved parameters are fuzzy numbers viz. triangular and trapezoidal. Based on the parametric form, fuzzy numbers have been transformed into family of standard intervals. Further due to the presence of interval overestimation problem in standard interval arithmetic, affine arithmetic based approach has been implemented. In the proposed method, the FNEP has been linearized into a generalized eigenvalue problem and further solved by using the fuzzy-affine approach. Several application problems of structures and also general NEPs with fuzzy parameters are investigated based on the proposed procedure. Lastly, fuzzy eigenvalue bounds are illustrated with fuzzy plots with respect to its membership function. Few comparisons are also demonstrated to show the reliability and efficacy of the present approach.} } @inproceedings{cha-did-vil-12-aa-simulink, author = {Chapoutot, Alexandre and Didier, Laurent-St{\'e}phane and Villers, Fanny} Bouissou, Olivier and Mimram, Samuel and }, title = {Range Estimation of Floating-Point Variables in {Simulink} Models}, booktitle = {Proceedings of the 2012 Conference on Design {\&} Architectures for Signal {\&} Image Processing (DASIP)}, location = {Karlsruhe, DE}, year = 2012, month = oct, isbn = {978-2-9539987-2-6}, pages = {article 13243874, 1-8}, note = {No DOI? No page numbers? See also [bou-mim-str-cha-14-aa] etc.}, comment = {Fixed-point word size determination in {DSP} hardware. Uses their own method and compares to AA.}, abstract = {Fixed-point arithmetic is widely used in embedded applications because it allows to build compact, fast and low-power application-specific integrated circuits designs. Practically, many of them are designed using model-based design tool such as Matlab/Simulink which allow simulations in floating-point representations. From such a high level simulable model, embedded system designers have to size the proper fixed-point representation. Thus, the challenge is to transform floating-point algorithms into numerical equivalent fixed-point programs. As software increases in complexity and both arithmetics do not have the same behaviors, designers need tools to help them in this task. In this article, we present a new statistical method based on Extreme Value Theory to estimate the dynamic range of program variables. We show that this model fits better than Gumbel model to the range estimation in digital signal processing applications both for linear and nonlinear systems. We present several experiments to illustrate the practical use of our approach. We show few simulations are required in order to estimate the bit-width of the bound of the range.} } @misc{cha-hil-che-12-aa-dsp-slides, author = {Chapoutot, Alexandre and Hilaire, Thibault and Chevrel, Philippe}, title = {Interval-based Robustness of Linear Parametrized Filters}, howpublished = {Slides presentated at SCAN'12 - 15th International Symposium on Scientific Computing, Computer Arithmetic and Verified Numerical Computations}, year = 2012, month = sep, url = {http://www.docmatic.fr/wp-content/papercite-data/slides/hila12b-slides.pdf}, note = {See [cha-hil-che-12-aa-dsp] for the paper} } @inproceedings{cha-hil-che-12-aa-dsp, author = {Chapoutot, Alexandre and Hilaire, Thibault and Chevrel, Philippe}, title = {Interval-based Robustness of Linear Parametrized Filters}, booktitle = {5th GAMM-IMACS International Symposium on Scientific Computing, Computer Arithmetic and Verified Numerical Computations (SCAN)}, location = {Novosibirsk, RU}, year = 2012, month = sep, url = {https://hal.archives-ouvertes.fr/hal-00706772}, note = {No page numbers? No DOI?}, comment = {Uses ``interval optimization methods''. Just IA, or AA?}, abstract = {This article deals with the resilient implementation of parametrized linear filters (or controllers), i.e. realizations that are robust with respect to their implementation with fixed-point arithmetic. The implementation of a linear filter/controller in an embedded device is a difficult task because the numerical version of such algorithms suff ers from a deterioration in performances and characteristics. This degradation has two separate origins, corresponding to the quantization of the embedded coefficients and the round-off occurring during the computations. The optimal realization problem is to find, for a given filter, the most resilient realization. We here consider linear filters that depends on a set of parameters that are not exactly known during the design. They are used for example in automotive control, where a very late re-tuning is required. The paper presents results on FWL resiliency analyzis using interval optimization methods [2], and compare them to those obtained with the sensitivity approach.} } @inproceedings{cha-kah-kan-kum-sar-13-aa-errcomp, author = {Chan, Wei-Ting J. and Kahng, Andrew B. and Kang, Seokhyeong and Kumar, Rakesh and Sartori, John}, title = {Statistical Analysis and Modeling for Error Composition in Approximate Computation Circuits}, booktitle = {Proceedings of the 31st IEEE International Conference on Computer Design (ICCD)}, year = 2013, month = oct, isbn = {978-1-4799-2987-0}, pages = {47-53}, doi = {10.1109/ICCD.2013.6657024}, comment = {Compares to AA but uses a different approach.}, abstract = {Aggressive requirements for low power and high performance in VLSI designs have led to increased interest in approximate computation. Approximate hardware modules can achieve improved energy efficiency compared to accurate hardware modules. While a number of previous works have proposed hardware modules for approximate arithmetic, these works focus on solitary approximate arithmetic operations. To utilize the benefit of approximate hardware modules, CAD tools should be able to quickly and accurately estimate the output quality of composed approximate designs. A previous work [10] proposes an interval-based approach for evaluating the output quality of certain approximate arithmetic designs. However, their approach uses sampled error distributions to store the characterization data of hardware, and its accuracy is limited by the number of intervals used during characterization. In this work, we propose an approach for output quality estimation of approximate designs that is based on a lookup table technique that characterizes the statistical properties of approximate hardwares and a regression-based technique for composing statistics to formulate output quality. These two techniques improve the speed and accuracy for several error metrics over a set of multiply-accumulator testcases. Compared to the interval-based modeling approach of [10], our approach for estimating output quality of approximate designs is 3.75$\beta$ more accurate for comparable runtime on the testcases and achieves 8.4$\beta$ runtime reduction for the error composition flow. We also demonstrate that our approach is applicable to general testcases.} } @article{cha-pla-veg-12-aa-radial, author = {Chattopadhyay, Amit and Plantinga, Simon and Vegter, Gert}, title = {Certified Meshing of Radial Basis Function Based Isosurfaces}, journal = {The Visual Computer}, volume = {28}, pages = {445-462}, year = 2012, month = may, doi = {10.1007/s00371-011-0627-2}, comment = {Tried AA but found that it was not fast enough so developed a different method, BPARAB, exploiting the special form of the radial functions. Apparently uses quadratic Taylor-interval-like bounds.}, abstract = {Radial Basis Functions are widely used in scattered data interpolation. The surface-reconstruction method using radial basis functions consists of two steps: (i) computing an interpolating implicit function the zero set of which contains the points in the data set, followed by (ii) extraction of isocurves or isosurfaces. In this paper we focus on the second step, generalizing the work on certified meshing of implicit surfaces based on interval arithmetic (Plantinga and Vegter in Visual Comput. 23:45-58, 2007). It turns out that interval arithmetic, and even the usually faster affine arithmetic, are far too slow in the context of RBF-based implicit surface meshing. We present optimized strategies giving acceptable running times and better space complexity, exploiting special properties of RBF-interpolants. We present pictures and timing results confirming the improved quality of these optimized strategies.} } @inproceedings{cha-pla-veg-09-aa-rbfmesh, author = {Chattopadhyay, Amit and Plantinga, Simon and Vegter, Gert}, booktitle = {25th European Workshop on Computational Geometry (EuroCG)}, location = {Brussels, BE}, title = {Certified Meshing of RBF-based Isosurfaces}, year = 2009, month = mar, pages = {101-104}, note = {See [cha-pla-veg-12-aa-radial]. No DOI?}, comment = {Tried AA but found that it was not fast enough so developed a different method.}, abstract = {Radial Basis Functions are widely used in scattered data interpolation. The process consists of two steps: (i) computing an interpolating implicit function the zero set of which contains the points in the data set, followed by (ii) extraction of isocurves or isosurfaces. We focus on the second step, generalizing our earlier work on certified meshing of implicit surfaces based on interval arithmetic. It turns out that interval arithmetic, and even the usually faster affine arithmetic, are far too slow in the context of RBFbased implicit surface meshing. We present optimized strategies giving acceptable running times and better space complexity, exploiting special properties of RBF-interpolants. We present pictures and timing results confirming the improved quality of these optimized strategies.} } @incollection{cha-rou-20-aa-uncstat, title = {Uncertain Static Problems}, author = {Chakraverty, Snehashish and Rout, Saudamini}, booktitle = {Affine Arithmetic Based Solution of Uncertain Static and Dynamic Problems}, series = {Synthesis Lectures on Mathematics {\&} Statistics}, year = 2020, pages = {75-96}, doi = {10.1007/978-3-031-02424-5_5}, comment = {Systems of linear equations using IA, AA, etc.}, abstract = {Under static conditions, the governing differential equations of various science and engineering problems lead to systems of simultaneous equations (linear and nonlinear). In this chapter, we focus on the solution for a system of linear equations. In mathematics, the theory of linear systems is the basis, and a fundamental part, of linear algebra. Also, the computational algorithms for finding the solutions for the system of linear equations are an important part of numerical linear algebra and play a prominent role in various fields viz. engineering, physics, chemistry, computer science and economics. For simplicity and easy computation, all the involved parameters and variables of the linear system are usually considered as deterministic or exact. But as a practical matter, due to the uncertain environment, one may have imprecise, incomplete, insufficient, or vague information about the parameters because of several errors. Traditionally, such uncertainty or vagueness may be modeled through a probabilistic approach. But a large amount of data is required for the traditional probabilistic approach. Without a sufficient amount of experimental data, the probabilistic methods may not deliver reliable results at the required precision. Therefore, intervals and/or fuzzy numbers may become efficient tools to handle uncertain and vague parameters when there is an insufficient amount of data available. In this regard, uncertain static problems may be modeled through an interval system of linear equations (ISLE) ($[A][x] = [b]$) and/or fuzzy system of linear equations (FSLE) ($\mathbf{A}\mathbf{x} =\mathbf{b}$).} } @incollection{cha-rou-20-aa-unldyn, title = {Uncertain Nonlinear Dynamic Problems}, author = {Chakraverty, Snehashish and Rout, Saudamini}, booktitle = {Affine Arithmetic Based Solution of Uncertain Static and Dynamic Problems}, series = {Synthesis Lectures on Mathematics {\&} Statistics}, year = 2020, pages = {125-150}, doi = {10.1007/978-3-031-02424-5_7}, comment = {Finds eigenvalues of matrices of affine forms.}, abstract = {Nonlinear dynamic problems from various fields of science and engineering lead to nonlinear eigenvalue problems. In this chapter, we focus on the solutions of nonlinear eigenvalue problems with uncertainty. A nonlinear eigenvalue problem is a generalization of a linear eigenvalue problem viz. standard eigenvalue problem or generalized eigenvalue problem to the equations that depend nonlinearly on the eigenvalues. Mathematically, a nonlinear eigenvalue problem is generally described by an equation of the form $M(\lambda)x = 0$, for all $\lambda$, and contains two unknowns viz. the eigenvalue parameter ($\lambda$) and the ``nontrivial'' vector(s) ($x$) (known as eigenvector) corresponding to it.} } @incollection{cha-rou-20-aa-fuzzy, title = {Fuzzy-Affine Arithmetic}, author = {Chakraverty, Snehashish and Rout, Saudamini}, booktitle = {Affine Arithmetic Based Solution of Uncertain Static and Dynamic Problems}, series = {Synthesis Lectures on Mathematics {\&} Statistics}, year = 2020, pages = {53-74}, doi = {10.1007/978-3-031-02424-5_4}, comment = {Defines fuzzy-affine arithmetic where fuzzy sets are represented by $a$-cut intervals (parametrized by {a}?). Then replaces intervals by affine forms.}, abstract = {Fuzzy numbers and their arithmetic are a very powerful tool to handle uncertain parameters. By adopting the a-cut technique, fuzzy numbers can be parameterized and transformed into a family of intervals. All problems where the operands are in the form of different fuzzy numbers may be solved by using parametric fuzzy arithmetic. The parametric fuzzy arithmetic is based upon the concepts and properties of classic interval arithmetic. But the dependency problem or overestimation problem in standard interval arithmetic is a major hurdle that often leads to overestimation of the solution bounds. As such, fuzzy-affine arithmetic may be used to handle the fuzzy parameters more efficiently.} } @incollection{cha-rou-20-aa-affine, title = {Affine Arithmetic}, author = {Chakraverty, Snehashish and Rout, Saudamini}, booktitle = {Affine Arithmetic Based Solution of Uncertain Static and Dynamic Problems}, series = {Synthesis Lectures on Mathematics {\&} Statistics}, year = 2020, pages = {39-51}, doi = {10.1007/978-3-031-02424-5_3}, comment = {Overview of affine arithmetic.}, abstract = {In 1993, Comba and Stolfi gave us the basic idea of affine arithmetic [Comba and Stolfi (1993) [3]]. The interval dependency problem that occurs in standard interval arithmetic is the main cause behind development of affine arithmetic. Affine arithmetic is a self-validated numerical model that records the range for each ideal quantity and also keeps track of first-order correlations between these quantities. For this additional information, the approximation error is incurred in each operation of affine arithmetic. Therefore, affine arithmetic can overcome the extreme increment of the width of the resulting interval. This benefit will help for several chained-interval computations where interval arithmetic goes through an error explosion. Also, affine arithmetic provides the geometric representation of joint ranges for the related quantities, which may be useful for different interval methods.} } @incollection{cha-rou-20-aa-ulindyn, title = {Uncertain Linear Dynamic Problems}, author = {Chakraverty, Snehashish and Rout, Saudamini}, booktitle = {Affine Arithmetic Based Solution of Uncertain Static and Dynamic Problems}, series = {Synthesis Lectures on Mathematics {\&} Statistics}, year = 2020, pages = {97-124}, doi = {10.1007/978-3-031-02424-5_6}, comment = {Linear eigenvalue problems with uncertainty. Uses AA?}, abstract = {The dynamic analysis of various science and engineering problems with different material and geometric properties lead to linear eigenvalue problems (LEPs) such as the generalized eigenvalue problem (GEP) and standard eigenvalue problem (SEP). In general, the material and geometric properties are assumed to be in the form of crisp (or exact). However, due to several errors and insufficient or incomplete information of data, uncertainties are assumed to be present in the material and geometric properties. Traditionally, these uncertainties are modeled through probabilistic approaches, which are unable to deliver efficient and reliable solutions without a sufficient amount of experimental data. Thus, these uncertain material and geometric properties may be modeled through closed intervals or convex normalized fuzzy sets. In this regard, efficient handling of these eigenvalue problems in an uncertain environment is a challenging and important task to deal with.} } @book{cha-rou-20-aa-book-spr, author = {Snehashish Chakraverty and Saudamini Rout}, title = {Affine Arithmetic Based Solution of Uncertain Static and Dynamic Problems}, series = {Synthesis Lectures on Mathematics and Statistics}, publisher = {Morgan {\&} Claypool}, year = 2020, month = mar, isbn = {978-3-031-01296-9}, pages = {17+152}, doi = {10.1007/978-3-031-02424-5}, note = {Reprint of original edition by Morgan {\&} Claypool; see [cha-rou-20-aa-book]}, comment = {General description of AA and applications. Fuzzy AA}, abstract = {Uncertainty is an inseparable component of almost every measurement and occurrence when dealing with real-world problems. Finding solutions to real-life problems in an uncertain environment is a difficult and challenging task. As such, this book addresses the solution of uncertain static and dynamic problems based on affine arithmetic approaches. Affine arithmetic is one of the recent developments designed to handle such uncertainties in a different manner which may be useful for overcoming the dependency problem and may compute better enclosures of the solutions. Further, uncertain static and dynamic problems turn into interval and/or fuzzy linear/nonlinear systems of equations and eigenvalue problems, respectively. Accordingly, this book includes newly developed efficient methods to handle the said problems based on the affine and interval/fuzzy approach. Various illustrative examples concerning static and dynamic problems of structures have been investigated in order to show the reliability and efficacy of the developed approaches.} } @inproceedings{cha-ale-mul-15-aa-runge, author = {Chapoutot, Alexandre and Alexandre dit Sandretto, Julien and Mullier, Olivier}, title = {Validated Explicit and Implicit {Runge}-{Kutta} Methods}, booktitle = {Proceedings of the Small Workshop on Interval Methods}, location = {Prague, CZ}, year = 2015, month = jun, comment = {Uses Taylor analysis of Runge-Kutta (after John Butcher) and ``some'' validated arithmetic. Lists AA as a keyword but does not mention it explicitly in the article.}, abstract = {The guaranteed solution of initial value problem of ordinary differential equations is well studied from interval analysis community. In the most of the cases Taylor models are used in this context, see [1] and the references therein. In contrast, in numerical analysis community other numerical integration methods, e.g., Runge-Kutta methods, are used. Indeed, these methods have very good stability properties [2] and they can be applied on a wide variety of problems. We propose a new method to validate the solution of initial value problem of ordinary differential equations based on Runge-Kutta methods. The strength of our contribution is to adapt any explicit and implicit Runge-Kutta methods to make them guaranteed. We experimentally verify our approach against Vericomp benchmark2 and the results are reported in [3].} } @article{che-fei-wus-liy-19-aa-vibro, author = {Chen, Qiang and Fei, Qingguo and Wu, Shaoqing and Li, Yanbin}, title = {Uncertainty Propagation of the Energy Flow in Vibro-Acoustic System with Fuzzy Parameters}, journal = {Aerospace Science and Technology}, volume = {94}, pages = {article 105367}, year = 2019, month = nov, doi = {10.1016/j.ast.2019.105367}, comment = {Mentions AA but uses only IA with subdivision.}, abstract = {Uncertainties to varying degrees often exist in the analysis and design problems of vibro-acoustic systems. This paper investigates the high-frequency vibro-acoustic analysis problem with large levels of fuzzy parameters. By employing the level-cut strategy, the energy balance equations with fuzzy parameters are firstly transformed into the interval forms. Then, by combining the perturbation method, the Sherman-Morrison-Woodbury formula, and the statistical energy analysis, a modified perturbation statistical energy analysis (MPSEA) is proposed to predict the uncertainty propagation of the energy flow in vibro-acoustic systems with fuzzy parameters. Further, the sub-interval technique is introduced in MPSEA to improve its accuracy for the systems with large levels of fuzzy parameters. Numerical simulations of a plate-cavity system and a launcher fairing model with fuzzy parameters are presented to demonstrate the efficiency and accuracy of proposed method. Results indicate that the proposed MPSEA with a suitable number of sub-intervals is capable of predicting the energy flow in vibro-acoustic system with large fuzzy parameter accurately and efficiently.} } @article{che-fei-wus-liy-19-aa-trans, author = {Chen, Qiang and Fei, Qingguo and Wu, Shaoqing and Li, Yanbin}, title = {Prediction of Transient Statistical Energy Response for Two-Subsystem Models Considering Interval Uncertainty}, journal = {Journal of Verification, Validation and Uncertainty Quantification}, volume = {4}, number = {2}, pages = {article 021004, 8 pages}, year = 2019, month = jun, doi = {10.1115/1.4045201}, comment = {Uses AA to compute the high-frequency transient response of solid parts.}, abstract = {The transient response analysis is important for the design and evaluation of uncertain engineering systems under impact excitations. In this paper, statistical energy analysis (SEA) is developed to evaluate the high-frequency transient energy response of two-subsystem models considering interval uncertainties. Affine arithmetic (AA) and a subinterval technique are introduced into SEA to improve the computational accuracy. Numerical simulations on a coupled-plate and a plate-cavity system considering interval uncertainties are performed. The analysis precision of the proposed approach is validated by Monte Carlo (MC) method. The results show that the analysis precision of the proposed method decreases with the increasing uncertainty level of parameters. The computational accuracy of the proposed method can be significantly improved by employing AA and subinterval technique.} } @article{che-fei-wus-liy-19-aa-staterg, author = {Chen, Qiang and Fei, Qingguo and Wu, Shaoqing and Li, Yanbin}, title = {Statistical Energy Analysis for the Vibro-Acoustic System with Interval Parameters}, journal = {Journal of Aircraft}, volume = {56}, number = {5}, pages = {1869-1879}, year = 2019, month = sep, doi = {10.2514/1.C035351}, comment = {Poposes method (AIPSEA) that combines AA with ``statistical energy analysis'' to compute the high-frequency response of a vibro-acoustic system.}, abstract = {Uncertainty quantification on the dynamic response of vibro-acoustic systems draws increasing attention in engineering applications. To calculate the high-frequency energy response of a vibro-acoustic system with interval parameters, an affine interval perturbation statistical energy analysis (AIPSEA) is proposed by combining the interval perturbation analysis, the affine arithmetic, and the statistical energy analysis (SEA). The subinterval technique is introduced in AIPSEA to improve the computational accuracy of AIPSEA when the levels of uncertainty are high. Numerical simulations of a plate-cavity coupled system and a simplified launch vehicle fairing with interval parameters are conducted. The accuracy of proposed method is verified by the Monte Carlo statistical energy analysis (MCSEA). Simulation results indicate that the accuracy of interval perturbation statistical energy analysis (IPSEA) and AIPSEA decreases with the increasing uncertainty levels. AIPSEA has better performance than IPSEA in the vibro-acoustic analysis of the system with interval parameters. By employing the subinterval technique, the computational accuracy of AIPSEA is significantly improved.} } @mastersthesis{che-08-aa-orthmux-th, author = {Chen, Yanyu}, title = {Bit Width Determination Using Complexity Information for Orthogonal Multiplexing Systems}, school = {National Jiaotong University, TW}, year = 2008, month = aug, url = {https://ir.nctu.edu.tw/bitstream/11536/38189/1/166701.pdf}, note = {Advisor: Jingyang Zhou. Names also romanized as Yen-yu Chen and Jing-Yan Jou}, comment = {Mentions AA but says it is not usable because of loops.}, abstract = {An automatic algorithm using complexity information for the floating point to fixed point conversion is proposed. The goal of the proposed algorithm is to minimize the hardware complexity and reduce the simulation times. The algorithm considers both the integer bit width and the fraction bit width. For the integer bit width, the algorithm identifies numbers of the integer bit width to prevent the overflow. For the fraction bit width, the algorithm uses the lower bound and the upper bound to find the results. We apply the proposed algorithm to the OFDM system. The results show that the proposed algorithm reduces almost 30{\%} simulation time than complexity-and-distortion measure and sequential search method.} } @inproceedings{che-oua-zid-19-aa-glopt, author = {Chebbah, Mohammed and Ouanes, Mohand and Zidna, Ahmed}, title = {Improvement Combined with (Analysis, {CSP}, Arithmetic Analysis and Interval) of Simulations for Efficient Combination of Two Lower Bound Functions in (Univariate, multivariate) Global Optimization and Generalization}, booktitle = {Proceedings of the International Conference Advances on Applied Mathematics (ICAAM)}, publisher = {Tunisian Association of Applied and Industrial Mathematics (ATMAI)}, year = 2019, month = dec, location = {Sfax, Tunisia}, pages = {9}, note = {No DOI or page numbers?}, url = {https://www.ummto.dz/dspace/handle/ummto/11525}, comment = {Mentions AA but it is not clear that they use it. Very badly written paper.}, abstract = {Univariate global optimization problems attract attention of researchers. Several methods have been studied in the literature for univariate global optimization problems. Optimization in $\mathbb{R}$ presents the same difficulty as in $\mathbb{R}^n$. Many algorithms are directed in this direction. For cutting methods in Global optimization or Optimsation gradient method in general . In this work, we propose to improve: The article submitted: (Simulations for efficient combination of two lower bound functions in univariate global optimization. AIP Conference Proceedings 1863, 250004 (2017) ; https ://doi.org/10.1063/1.4992412, (2017).) In this context too, we will accelerate the speed of the Algorithm for better complexity with technics (CSP, Arithmetic analysis and Interval and another). It should be noted that, we have made conclusive simulations in this direction.} } @misc{men-luk-fuh-08-aa-minbit-sl, author = {Mencer, Oskar and Luk, Wayne and Fu, Haouan}, title = {Optimizing Hardware Function Evaluation}, howpublished = {Online document; slidea from a tutorial.}, year = 2008, month = mar, url = {https://pdfs.semanticscholar.org/8081/e59bd6555350b60a1c3431263d4520eabce3.pdf}, note = {Google Scholar got the authors all wrong.}, comment = {Describes MiniBit, an AA-based tool to determine the min number of bits needed to avoid overflow and contain the roundoff error for each variable in a DSP FPGA.}, abstract = {1. Evaluation of Elementary Functions; 2. Automatic Exploration of FPGA Designs; 3. Hierarchical Segmentation; 4. Bit-width Optimization; 5. Further Reading} } @article{che-tao-xia-17-aa-volt, author = {Chen, Pengwei and Tao, Shun and Xiao, Xiangning}, title = {Uncertainty Level of Voltage in Distribution Network: {An} Interval Model and Application in Centralised Storage Location}, journal = {IET Generation, Transmission {\&} Distribution}, volume = {11}, number = {14}, pages = {3628-3636}, year = 2017, month = sep, doi = {10.1049/iet-gtd.2017.0445}, comment = {Uses AA to solve circuit equations.}, abstract = {Intermittent distributed generations, stochastic loads and other uncertainties have uncertain impacts on the node voltages in distribution network. To quantitatively assess the impact boundaries of uncertainties on node voltages, an interval model is proposed in this study from the perspective of linear description. The model parameters defined as the voltage influence intervals are solved for as follows: first, conduct the analysis sample collection through the affine power flow and the expected operating point estimated by point-estimate method; then, calculate the voltage influence intervals via interval regression for the above sample. To meet the different demand of solving efficiency and conservative level for parameter identification, two solution methods of interval regression based on quadratic programming and stochastic programming are proposed, respectively. To further illustrate the practical application of the interval model, an optimal location method for centralized energy storage is presented. The verification results based on the modified IEEE 33-bus system and practical 113-bus system demonstrate the interval model and its solution methods have an excellent performance in assessing the impact boundaries of uncertainties. The application case also shows that the proposed decision method can produce a global and robust location scheme with considering the bound uncertainty of distributed generation output fluctuations.} } @article{che-xia-wan-18-aa-powflow, author = {Chen, Pengwei and Xiao, Xiangning and Wang, Xuhui}, title = {Dynamic Optimal Power Flow Model Incorporating Interval Uncertainty Applied to Distribution Network}, journal = {IET Generation, Transmission {\&} Distribution}, volume = {12}, number = {12}, pages = {2926-2936}, doi = {10.1049/iet-gtd.2017.1874}, year = 2018, month = jul, comment = {Combines AA with Taylor(?) for optimizing power flow in presence of uncertainties.}, abstract = {Dynamic optimal power flow (DOPF) in active distribution networks generally relies on a perfect forecasting of uncertainties such as intermittent distributed generations and time-varying loads, which is generally difficult to achieve in practise. To make DOPF possess the ability to deal with uncertainties, especially for the satisfaction of operating constraints in an uncertain environment, an interval DOPF (I-DOPF) model is derived in this study, by using affine arithmetic and interval Taylor expansion. To solve the I-DOPF problem efficiently, the solving method based on successive linear approximation (SLA) and distributed optimisation strategy is further discussed. The proposed I-DOPF model and its solving method are subsequently applied to a modified IEEE 33-bus network and a real 113-bus distribution network. The simulation results demonstrate that the I-DOPF model has a good performance on boundary constraint satisfaction under uncertainties; the SLA-based solving method can be well integrated with distributed optimisation to meet the practical requirements of data exchange in large-scale active distribution networks.} } @article{che-xia-wan-19-aa-renpow, author = {Chen, Pengwei and Xiao, Xiangning and Wang, Xuhui}, title = {Interval Optimal Power Flow Applied to Distribution Networks Under Uncertainty of Loads and Renewable Resources}, journal = {Journal of Modern Power Systems and Clean Energy}, volume = {7}, pages = {pages139-150}, doi = {10.1007/s40565-018-0462-9}, year = 2019, comment = {Combines AA with Taylor(?) for optimizing power flow in presence of uncertainties.}, abstract = {Optimal power flow (OPF) has been used for energy dispatching in active distribution networks. To satisfy constraints fully and achieve strict operational bounds under the uncertainties from loads and sources, this paper derives an interval optimal power flow (I-OPF) method employing affine arithmetic and interval Taylor expansion. An enhanced I-OPF method based on successive linear approximation and second-order cone programming is developed to improve solution accuracy. The proposed methods are benchmarked against Monte Carlo simulation (MCS) and stochastic OPF. Tests on a modified IEEE 33-bus system and a real 113-bus distribution network validate the effectiveness and applicability of the proposed methods.} } @article{che-zuo-yan-wei-wan-22-aa-impdiv, author = {Cheng, Shan and Zuo, Xianwang and Yang Kun and Wei, Zhaobin and Wang Rui}, title = {Improved Affine Arithmetic-Based Power Flow Computation for Distribution Systems Considering Uncertainties}, journal = {IEEE Systems Journal}, year = 2022, month = jun, pages = {1-10}, doi = {10.1109/JSYST.2022.3176461}, comment = {Develops an ``improved AA'' that ``linearizes the nonlinear operation of affine division'' and uses it for power flow computations in distributed energy resources (DERs).}, abstract = {The randomness characteristics of large-scale grid-integrated intermittent distributed energy resources (DERs) and loads for power consumption pose significant power flow uncertainties to distribution networks. Although affine arithmetic (AA) is popularly and effectively used for uncertain power flow analysis, its nonlinear operation increases the conservatism of the identified solutions. Based on the interval Taylor formula, an improved AA (IAA) is developed to decrease conservatism and then an IAA-based power flow computation is proposed for distribution systems considering uncertainties. First, a multinoise elements affine model is established to predict the DERs’ generation, which considers the different effects and correlations of multiple uncertain factors of DERs. Second, an IAA based on the interval Taylor formula was proposed, which linearizes the nonlinear operation of affine division, and avoids the generation of new noise elements. Finally, the IAA is combined with forward/backward power flow computation to solve the uncertain power flow of the distribution network. The effectiveness and advantages of the proposed method are examined on IEEE 33, 69, and 118 systems. The results demonstrate that the proposed method has lower conservatism and higher computational efficiency, and it can provide guidance for power system operators to effectively monitor and control distribution systems under various uncertainties.} } @inproceedings{chi-gop-rak-sol-14-aa-maxfper, author = {Chiang, Wei-Fan and Gopalakrishnan, Ganesh and Rakamaric, Zvonimir and Solovyev, Alexey}, title = {Efficient Search for Inputs Causing High Floating-Point Errors}, booktitle = {Proceedings of the 19th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming (PPoPP)}, location = {Orlando, US}, pages = {43-52}, doi = {10.1145/2555243.2555265}, year = 2014, month = feb, comment = {Claims that AA bounds are too pessimistic so they develop their own method}, abstract = {Tools for floating-point error estimation are fundamental to program understanding and optimization. In this paper, we focus on tools for determining the input settings to a floating point routine that maximizes its result error. Such tools can help support activities such as precision allocation, performance optimization, and auto-tuning. We benchmark current abstraction-based precision analysis methods, and show that they often do not work at scale, or generate highly pessimistic error estimates, often caused by non-linear operators or complex input constraints that define the set of legal inputs. We show that while concrete-testing-based error estimation methods based on maintaining shadow values at higher precision can search out higher error-inducing inputs, suit able heuristic search guidance is key to finding higher errors. We develop a heuristic search algorithm called Binary Guided Random Testing (BGRT). In 45 of the 48 total benchmarks, including many real-world routines, BGRT returns higher guaranteed errors. We also evaluate BGRT against two other heuristic search methods called ILS and PSO, obtaining better results.} } @inproceedings{chi-gri-rad-15-aa-cyber, author = {Chipman, William and Grimm, Christoph and Radoji{\v{c}}i{\'c}, {\v{C}}arna}, title = {Coverage of Uncertainties in Cyber-Physical Systems}, booktitle = {Proceedings of the 8th GMM/ITG/GI Symposium on Reliability by Design}, isbn = {978-3-8007-4071-0}, year = 2015, month = sep, pages = {8}, note = {No DOI?}, location = {Siegen, DE}, comment = {Says that AA offers good tradeoff between efficiency and accuracy}, abstract = {Cyber-physical systems (CPS) consist of software systems and the physical entities that the software controls. CPS have become ubiquitous; the systems can be found in diverse environments. Because of the multitude of components, failures, changes or inaccuracies are inevitable but with the multitude of components also comes the ability to build resilience into the system. An unfortunate side-effect of this resiliency is the addition of unforeseen changes and deviations to the behavior of the system. Many of these cyber-physical systems (CPS) control or contribute significantly to the control of critical systems. In order to achieve first 'time right' system deployment, the accuracy of the models, and the validation of the application fitness is at least as important as the CPS modeling and accuracy. In this paper we discuss and give an overview of methods that strive for validation of CPS systems with increased coverage. In particular, we focus on modeling, verification and validation of uncertainties both known and unknown.} } @article{cic-lan-15-aa-vibro, author = {Cicirello, Alice and Langley, Robin S.}, title = {Vibro-Acoustic Response of Engineering Structures with Mixed Type of Probabilistic and Nonprobabilistic Uncertainty Models}, journal = {Journal of Risk and Uncertainty in Engineering Systems, Part B: Mechanical Engineering}, volume = {1}, number = {4}, pages = {article 041001, 13 pages}, doi = {10.1115/1.4030470}, year = 2015, month = dec, comment = {Compares several methods to analyze the effect of uncertainties on engineering structures, including AA.}, abstract = {The response of engineering structures is often sensitive to uncertainty in the system properties. The information on the uncertain parameters is frequently incomplete, limited to experts’ opinion, based on previous knowledge, or a combination of those. There have been recent advances in building mathematical models of structures, which combine nonparametric probabilistic and parametric (probabilistic or nonprobabilistic) models of uncertainty. Here, several strategies for establishing the response of random systems whose uncertainties are described by parametric probabilistic and nonprobabilistic approaches in combination with a nonparametric probabilistic approach are presented. The proposed strategies are illustrated by analyzing a built-up plate structure.} } @article{cla-tot-has-ake-10-aa-compil, author = {Clarberg, Petrik and Toth, Robert and Hasselgren, Jon and Akenine-M{\"o}ller, Tomas}, title = {An Optimizing Compiler for Automatic Shader Bounding}, journal = {Computer Graphics Forum}, volume = {29}, number = {4}, pages = {1259-1268}, doi = {10.1111/j.1467-8659.2010.01721.x}, year = 2010, month = jun, comment = {Mentions AA but apparently does not use it?}, abstract = {Programmable shading provides artistic control over materials and geometry, but the black box nature of shaders makes some rendering optimizations difficult to apply. In many cases, it is desirable to compute bounds of shaders in order to speed up rendering. A bounding shader can be automatically derived from the original shader by a compiler using interval analysis, but creating optimized interval arithmetic code is non-trivial. A key insight in this paper is that shaders contain metadata that can be automatically extracted by the compiler using data flow analysis. We present a number of domain-specific optimizations that make the generated code faster, while computing the same bounds as before. This enables a wider use and opens up possibilities for more efficient rendering. Our results show that on average 42-44{\%} of the shader instructions can be eliminated for a common use case: single-sided bounding shaders used in lightcuts and importance sampling.} } @inproceedings{com-rak-11-aa-envlin, author = {Combastel, Cristophe and Raka, Sid-Ahmed}, title = {On Computing Envelopes for Discrete-Time Linear Systems with Affine Parametric Uncertainties and Bounded Inputs}, bookttile = {Proceedings of the 18th IFAC World Congress}, series = {IFAC Proceedings Volumes}, volume = {44}, number = {1}, pages = {4525-4533}, year = 2011, month = jan, doi = {10.3182/20110828-6-IT-1002.02585}, comment = {Uses AA (called ``zonotopes'') for modeling uncertainties in analysis of linear dynamic systems. Tests on a 6th order oscillating mass-spring system. Instead of a matrix of affine forms, uses an affine form where the coefficients are matrics?}, abstract = {The computation of envelopes enclosing the possible states and/or outputs of a class of uncertain linear dynamical systems is the subject of this paper. The resulting algorithm can be useful in several areas of control systems such as verification of safety properties and fault diagnosis (in order to choose suitable thresholds on some residuals, for instance). A particular class of polytopes, zonotopes, can be used to implicitly represent the computed sets. The related reachability algorithms have shown to be well suited to control the wrapping effect in the case of linear dynamical systems. However, parametric uncertainties, when taken into account, are often modeled by interval matrices which lead to a loss of parametric dependencies and result in the computation of rather pessimistic sets. The main contribution of this paper consists in extending an existing algorithm based on zonotopes so that it can efficiently propagate structured parametric uncertainties. A 6th order oscillating mass-spring system illustrates the resulting control of the wrapping effect by comparison with Monte-Carlo simulations.} } @article{com-zol-19-aa-kalman, author = {Combastel, Cristophe and Zolghadri, Ali}, title = {A Distributed {Kalman} Filter with Symbolic Zonotopes and Unique Symbols Provider for Robust State Estimation in {CPS}}, journal = {International Journal of Control}, volume = {93}, number = {11}, month = dec, pages = {2596-2612} year = 2019, doi = {10.1080/00207179.2019.1707278}, note = {Print publication on dec/2020}, comment = {Uses ``symbolic zonotopes'' or ``s-zonotopes'' (affine forms?) for robust state estimation in process control applications. Introduces ``matrices with labelled colums''.}, abstract = {Robust state estimation is addressed in a noisy environment and within a distributed and networked architecture. Both bounded disturbances and random noises are considered. A Distributed Zonotopic and Gaussian Kalman Filter (DZG-KF) is proposed where each network node implements a local state estimator using symbolic Zonotopes and Gaussian noise Mergers (s-ZGM), a class of Set-membership and Probabilistic Mergers (SPM). Each network node communicates its own state information only to its neighbours. The proposed system includes a dedicated service called Unique Symbols Provider (USP) giving unique identifiers. It also includes Matrices with Labelled Columns (MLC) featuring column-wise sparsity, and symbolic zonotopes (s-zonotopes). This significantly enhances the propagation of uncertainties and preserves global dependencies that would otherwise be lost (or impeded) by the peer-to-peer communication through the network. A number of other network-related constraints can be managed within this framework. Numerical simulations show significant improvements compared to a non-symbolic approach.} } @article{con-kin-nic-11-aa-nrfpga, author = {Constantinides, George and Kinsman, Adam and Nicolici, Nicola}, title = {Numerical data representations for FPGA-based scientific computing}, journal = {IEEE Design {\&} Test of Computers}, volume = {28}, number = {4}, pages = {8-17}, month = aug, doi = {10.1109/MDT.2011.48}, year = 2011, comment = {Overview article? Mentions AA but not deeply?}, abstract = {Data representation is an important problem for scientific computing problems that are mapped to FPGAs. The key challenge here is to derive best trade-offs between precision and performance. This article describes methods to manage the complexity associated with the analysis of data representation techniques so that we thereby understand precision/performance trade-offs.} } @masterthesis{cos-14-aa-juros-junk, author = {Costa, Rivelino Duarte}, title = {Uma Abordagem da Matem{\'a}tica Financeira no Ensino M{\'e}dio para Explicitar as Metodologias do Fundo de Financiamento Estudantil - {FIES}}, school = {Universidade Federal do Cear{\'a}}, year = 2014, month = jun, note = {Advisor: Flávio Fran{\c{c}}a Cruz}, comment = {Spurious Google Scholar hit. By ``affine arithmentic'' means just affine approximations.}, abstract = {This work aims to propose a methodology for learning present in the calculations of Financial Aid (FIES) tables, based on financial mathematics taught in high school. The work is divided into six parts: introduction, simple capitalization and composed, present value and future value, amortization of loans, the FIES tables and closing remarks. As a preamble, we have included the history of finance and the simple and compound capitalization. The proposal also suggests an understanding of the mathematical content of interconnected way, such as: simple interest with affine arithmetic progression and function, compound interest and exponential function with geometric progression, without forgetting the present and future value amount, representing the value right of capital in a given period. Thus, it is intended to achieve an understanding of the variations in the value of money during the time period and repayment of loans, facts that will bring the understanding of the type of system used in financing if Amortization System Constant Amortization System or French (PRICE).} url = {{\url{https://repositorio.ufc.br/handle/riufc/8828}}}, quotes = {... The proposal also suggests an understanding of the mathematical content of interconnected way, such as: simple interest with affine arithmetic progression and function, compound ...} } @inproceedings{con-dah-rak-roc-21-aa-fpround, author = {Constantinides, George and Dahlqvist, Fredrik and Rakamari{\'c}, Zvonimir and Salvia, Rocco}, title = {Rigorous Roundoff Error Analysis of Probabilistic Floating-Point Computations}, booktitle = {Proceedings of the 33rd International Conference on Computer Aided Verification (CAV)- Part II}, isbn = {978-3-030-81687-2}, series = {Lecture Notes in Computer Science}, volume = {12760}, year = 2021, month = jul, location = {Virtual meeting}, doi = {10.1007/978-3-030-81688-9_29}, pages = {626-650}, comment = {Uses AA to obtain tight bounds to floating-point roundoff errors}, abstract = {We present a detailed study of roundoff errors in probabilistic floating-point computations. We derive closed-form expressions for the distribution of roundoff errors associated with a random variable, and we prove that roundoff errors are generally close to being uncorrelated with their generating distribution. Based on these theoretical advances, we propose a model of IEEE floating-point arithmetic for numerical expressions with probabilistic inputs and an algorithm for evaluating this model. Our algorithm provides rigorous bounds to the output and error distributions of arithmetic expressions over random variables, evaluated in the presence of roundoff errors. It keeps track of complex dependencies between random variables using an SMT solver, and is capable of providing sound but tight probabilistic bounds to roundoff errors using symbolic affine arithmetic. We implemented the algorithm in the PAF tool, and evaluated it on FPBench, a standard benchmark suite for the analysis of roundoff errors. Our evaluation shows that PAF computes tighter bounds than current state-of-the-art on almost all benchmarks.} } @inproceedings{cou-bha-luk-con-car-din-pet-12-aa-fpga, author = {Coutinho, Jose G. F. and Bhattacharya, Sujit and Luk, Wayne and Constantinides, George A. and Cardoso, João M. P., and Carvalho, Tiago and Diniz, Pedro C. and Petrov, Zlatko}, title = {Resource-Efficient Designs using an Aspect-Oriented Approach}, booktitle = {Proceedings of the 15th IEEE International Conference on Computational Science and Engineering (CSE)}, pages = {399-406}, doi = {10.1109/ICCSE.2012.62}, year = 2012, month = dec, comment = {Optimization of FPGA designs. Mentions AA but does not use it?}, abstract = {The increasing capability and flexibility of reconfigurable hardware, such as Field-Programmable Gate Arrays (FPGAs), give developers a wide range of architectural choices that can satisfy various non-functional requirements, such as those involving performance, resource and energy efficiency. This paper describes a novel approach, based on an aspect-oriented language called LARA, that enables systematic coding and reuse of optimisation strategies that address such non-functional requirements. Our approach will be presented in three steps. First, this approach is shown to support design space exploration (DSE) which makes use of various compilation and optimisation tools, through the deployment of a master weaver and multiple slave weavers. Second, we present three compilation and synthesis strategies for word-length optimisation based on this approach, which involve three tools: the WLOT word-length optimiser deploying a combination of analytical methods, the AutoESL tool compiling C-based descriptions into hardware, and the ISE tool targeting Xilinx devices. Third, the effectiveness of the approach is evaluated. In addition to promoting design re-use, our approach can be used to automatically produce a range of designs with different trade-offs in resource usage and numerical accuracy according to a given LARA-based strategy. For example, one implementation for a sub band filter in an MPEG encoder provides 31{\%} savings in area using non-uniform quantizers when compared to a floating-point description with a similar error specification at the output. Another fixed-point implementation for the grid Iterate kernel used by a 3D path planning application consumed 25{\%} less resources when the error specification is increased from $10^{-6}$ to $10^{-4}$.} } @inproceedings{cow-dra-har-20-aa-optvis, author = {Coward, Samuel and Drane, Theo and Harel, Yoav}, title = {Automatic Design Space Exploration for an Error Tolerant Application}, booktitle = {Proceedings of the 27th IEEE Symposium on Computer Arithmetic (ARITH)}, location = {Virtual meeting.}, doi = {10.1109/ARITH48897.2020.00025}, pages = {117-120}, month = jun, year = 2020, comment = {Can't tell whether it uses AA or just comments on it.}, abstract = {Creating optimized hardware for error tolerant applications presents significant challenges as well as opportunities. Many algorithms in computer graphics {\&} vision are error tolerant, as their application level correctness ultimately rests on human perception. This error tolerance can be exploited in reducing hardware implementation cost. The challenge is how to explore the space of application level correct designs to determine the optimized hardware architecture. This paper puts forward an approach to automatically explore the space which maximally exploits the acceptable error to minimize hardware cost for a particular graphics algorithm - Level-Of-Detail. Results, so far, have shown a 26{\%} hardware area improvement.} } @inproceedings{cox-san-cha-12-aa-bitprec, author = {Cox, Arlen and Sankaranarayanan, Sriram and Chang, Bor-Yuh Evan}, title = {A Bit Too Precise? {Bounded} Verification of Quantized Digital Filters}, booktitle = {Proceedings of the 18th International Conference on Tools and Algorithms for the Construction and Analysis of Systems (TACAS}, series = {Lecture Notes in Computer Science}, volume = {7214}, year = 2012, month = mar, location = {Tallinn, EE}, pages = {33-47}, doi = {10.1007/978-3-642-28756-5_4}, comment = {Concerned with validating digital processing circuits for overflows, wasted bits, non-convergence (loops), ect. Wants to get ``bit-precise'' results. Mentions AA as improvement but in the end does not use it? Instead uses satisfiability-modulo-theories (SMT) solvers?}, abstract = {Digital filters are simple yet ubiquitous components of a wide variety of digital processing and control systems. Errors in the filters can be catastrophic. Traditionally digital filters have been verified using methods from control theory and extensive testing. We study two alternative verification techniques: bit-precise analysis and real-valued error approximations. In this paper, we empirically evaluate several variants of these two fundamental approaches for verifying fixed-point implementations of digital filters. We design our comparison to reveal the best possible approach towards verifying real-world designs of infinite impulse response (IIR) digital filters. Our study reveals broader insights into cases where bit-reasoning is absolutely necessary and suggests efficient approaches using modern satisfiability-modulo-theories (SMT) solvers.} } @inproceedings{cro-dal-mar-05-ecc-junk, author = {F Crowe and A Daly and W Marnane}, title = {A Scalable Dual Mode Arithmetic Unit for Public Key Cryptosystems}, booktitle = {Proceedings of the International Conference on Information Technology: Coding and Computing (ITCC)}, year = 2005, month = apr, pages = {568-573}, doi = {10.1109/ITCC.2005.33}, comment = {Spurious Google Scholar hit. Modulo arithmetic for Elliptic Curve Cryptography}, abstract = {Elliptic curve cryptosystems (ECC) have become popular in recent years due to their smaller key sizes than traditional public key schemes such as RSA. However the gap between the sizes of these systems is increasing as security requirements become more demanding due to cryptanalytic advances. At current security levels of 80 bits, the corresponding key sizes for ECC and RSA are J60 and 1,024 bits respectively. Although the ECC key size is attractive for embedded applications, the popularity of RSA means that it will remain in legacy applications for the foreseeable future. This paper proposes a dual mode arithmetic unit capable of supporting the underlying field operations performed by both the ECC and RSA public key schemes. A hardware optimized version of the Montgomery algorithm is employed to perform modular multiplication efficiently. The disparity in key sizes is addressed by combining the dual processors to operate in parallel for ECC or in a pipelined series for RSA.} } @article{cse-bam-hat-22-aa-chaos, author = {Csendes, Tibor and B{\'a}mhelyi, Bal{\'a}sz and Hatvani, L{\'a}szl{\'o}}, title = {Towards a Computer-Assisted Proof for Chaos in Forced Damped Pendulum Equation}, journal = {Journal of Computational and Applied Mathematics}, year = 2007, volume = 199, number = 2, month = feb, doi = {/10.1016/j.cam.2005.08.046}, pages = {378-383}, comment = {Uses IA only. LHF says he saw this one.}, abstract = {We report on the first steps made towards the computational proof of the chaotic behaviour of the forced damped pendulum. Although, chaos for this pendulum was being conjectured for long, and it has been plausible on the basis of numerical simulations, there is no rigorous proof for it. In the present paper we provide computational details on a fitting model and on a verified method of solution. We also give guaranteed reliability solutions showing some trajectory properties necessary for complicate chaotic behaviour.} } @inproceedings{dal-raf-rat-gri-koc-22-aa-sysmd, author = {Dalecke, {\v{S}}ando and Rafique, Khushnood Adil and Ratzke, Axel, and Grimm, Christoph, and Koch, Johannes}, title = {{SysMD}: {Towards} ``Inclusive'' Systems Engineering}, booktitle = {Proceedings of the 5th IEEE International Conference on Industrial Cyber-Physical Systems (ICPS)}, year = 2022, month = may, pages = {1-6}, location = {Virtual meeting}, doi = {10.1109/ICPS51978.2022.9816856}, comment = {``The underlying constraint net is a tree network based Affine Arithmetic Decision Diagrams...'' }, abstract = {This paper gives an overview of SysMD. SysMD is a tool and a SysML v2 inspired language. It is a modeling tool specifically aimed at domain experts with little to no high level systems modeling expertise. The language is designed to use intuitive, near natural-language statements and is able to propagate constraints throughout the model by continuously solving a constraint net. Furthermore, the SysMD tool aims to use a recommender system to incentivize the users to document their work in markdown as the tool gives recommendations of existing elements and relationships applicable to the current statements. This structures the knowledge in an easy to use, highly connected, way. This paper describes the syntax and semantics of the language, as well as the reasoning why it was designed in this specific way.} } @inproceedings{dam-kel-06-aa-raytr, author = {Dammertz, Holger and Keller, Alexander}, title = {Improving Ray Tracing Precision by Object Space Intersection Computation}, bookttile = {Proceedings of the IEEE Symposium on Interactive Ray Tracing}, location = {Salt Lake City, US}, month = sep, pages = {25-31}, doi = {10.1109/RT.2006.280211}, year = 2006, comment = {Claims that AA or IA can be avoided if the intervals are subdivided finely enough. Seems bogus.}, abstract = {Instead of computing intersections along a ray, an algorithm is proposed that determines a point of intersection in object space. The method is based on the classical refinement of a hierarchy of axis-aligned bounding boxes that is computed on the fly. Typical rendering artifacts are avoided by taking into consideration the precision of floating point arithmetic. In addition the method lends itself to a simple solution of the self-intersection problem. Considering the obtained precision the algorithm is efficient, simple to use, and to implement} } @inproceedings{dam-mar-pan-qiu-san-tat-16-aa-fpbench, author = {Damouche, Nasrine and Martel, Matthieu and Panchekha, Pavel and Qiu, Chen and Sanchez-Stern, Alexander and Tatlock, Zachary}, title = {Toward a Standard Benchmark Format and Suite for Floating-Point Analysis}, booktitle = {Revised Selected Papers of the 9th International Workshop on Numerical Software Verification (NSV)}, location = {Toronto, CA}, series = {Lecture Notes in Computer Science}, volume = {10152}, month = jul, year = 2016, pages = {63–77}, doi = {10.1007/978-3-319-54292-8_6}, comment = {Describes FPBench, an evolving benchmark for ``validation and optimization of numerical accuracy in floating-point computations'', whatever that means. Mentions AA but it is not clear whether they use it.}, abstract = {We introduce FPBench, a standard benchmark format for validation and optimization of numerical accuracy in floating-point computations. FPBench is a first step toward addressing an increasing need in our community for comparisons and combinations of tools from different application domains. To this end, FPBench provides a basic floating-point benchmark format and accuracy measures for comparing different tools. The FPBench format and measures allow comparing and composing different floating-point tools. We describe the FPBench format and measures and show that FPBench expresses benchmarks from recent papers in the literature, by building an initial benchmark suite drawn from these papers. We intend for FPBench to grow into a standard benchmark suite for the members of the floating-point tools research community.} } @entry{dam-wal-06-aa-soc, author = {Damm, Markus and Waldschmidt, Klaus}, title = {Robustness in {SOC} Design}, journal = {Proceedings of the 9th EUROMICRO Conference on Digital System Design (DSD)}, year = 2006, location = {Dubrovnik, HR}, month = aug, doi = {10.1109/DSD.2006.82}, pages = {27-36}, comment = {SOC $=$ System on Chip. Says that AA ``seems very promising''.}, abstract = {Embedded systems, ubiquitous computing and networked architectures are getting more and more important within our society. System parts are often completely implemented as integrated circuits (SoC = System on chip). Consequently, their complexity and heterogeneity have grown dramatically in the recent past. Moreover, embedded systems are used in environments where parameters are subject to continuous changes. Hence, they have to respond to environmental requirements and changes of their own system parameters in a robust manner. To gain this robustness and to cope with the design methodology, formal measures and metrics are of great importance. Such measures need to be combined with the still increasing requirement for computing performance. The implementation of robust features requires adap-tivity by reconfiguration and parallelism. We will call the corresponding sys-tems adaptive computing systems (ACS). The ACS class offers the opportunity to adapt the whole architecture or parts of the architecture to the changing needs of applications or changing environments. The paper addresses some of these aspects and presents some ideas for modelling and designing adaptive computing systems (ACS). Especially measures, metrics and taxonomies for reliability, adaptivity and robustness are analysed and discussed. Robust behaviour of electronic systems will contribute to significantly higher trust of the society in modern technology. Therefore it is of very high eco-nomical relevance for industry and commerce.} } @phdthesis{dar-14-aa-progunc-th, author = {Darulov{\'a}, Eva}, title = {Programming with Numerical Uncertainties}, school = {{\'E}cole Polytechnique F{\'e}d{\'e}rale de Lausanne}, note = {Advisor: Viktor Kuncak}, year = 2014, month = nov, url = {https://infoscience.epfl.ch/record/203570}, comment = {Combines AA with SMT.}, abstract = {Numerical software, common in scientific computing or embedded systems, inevitably uses an approximation of the real arithmetic in which most algorithms are designed. In many domains, roundoff errors are not the only source of inaccuracy and measurement as well as truncation errors further increase the uncertainty of the computed results. Adequate tools are needed to help users select suitable approximations (data types and algorithms) which satisfy their accuracy requirements, especially for safety- critical applications. Determining that a computation produces accurate results is challenging. Roundoff errors and error propagation depend on the ranges of variables in complex and non-obvious ways; even determining these ranges accurately for nonlinear programs poses a significant challenge. In numerical loops, roundoff errors grow, in general, unboundedly. Finally, due to numerical errors, the control flow in the finite-precision implementation may diverge from the ideal real-valued one by taking a different branch and produce a result that is far-off of the expected one. In this thesis, we present techniques and tools for automated and sound analysis, verification and synthesis of numerical programs. We focus on numerical errors due to roundoff from floating-point and fixed-point arithmetic, external input uncertainties or truncation errors. Our work uses interval or affine arithmetic together with Satisfiability Modulo Theories (SMT) technology as well as analytical properties of the underlying mathematical problems. This combination of techniques enables us to compute sound and yet accurate error bounds for nonlinear computations, determine closed-form symbolic invariants for unbounded loops and quantify the effects of discontinuities on numerical errors. We can furthermore certify the results of self-correcting iterative algorithms. Accuracy usually comes at the expense of resource efficiency: more precise data types need more time, space and energy. We propose a programming model where the scientist writes his or her numerical program in a real-valued specification language with explicit error annotations. It is then the task of our verifying compiler to select a suitable floating-point or fixed-point data type which guarantees the needed accuracy. Sometimes accuracy can be gained by simply re-arranging the non-associative finite-precision computation. We present a scalable technique that searches for a more optimal evaluation order and show that the gains can be substantial. We have implemented all our techniques and evaluated them on a number of benchmarks from scientific computing and embedded systems, with promising results.} } @inproceedings{dar-hor-sha-18-aa-mixprec, author = {Darulov{\'a}, Eva and Horn, Einar and Sharma, Saksham}, title = {Sound Mixed-Precision Optimization with Rewriting}, booktitle = {Proceedings of the 9th ACM/IEEE International Conference on Cyber-Physical Systems (ICCPS)}, location = {Porto, PT}, pages = {208-219}, doi = {10.1109/ICCPS.2018.00028}, year = 2018, month = apr, comment = {``Fully automatic tool for optimizing the performance of floating-point and fixed-point arithmetic kernels.'' ``For the fitness function, we use the static error analysis ... with IA for computing ranges and AA for tracking errors.''}, abstract = {Finite-precision arithmetic, widely used in embedded systems for numerical calculations, faces an inherent tradeoff between accuracy and efficiency. The points in this tradeoff space are determined, among other factors, by different data types but also evaluation orders. To put it simply, the shorter a precision's bit-length, the larger the roundoff error will be, but the faster the program will run. Similarly, the fewer arithmetic operations the program performs, the faster it will run; however, the effect on the roundoff error is less clear-cut. Manually optimizing the efficiency of finite-precision programs while ensuring that results remain accurate enough is challenging. The unintuitive and discrete nature of finite-precision makes estimation of roundoff errors difficult; furthermore the space of possible data types and evaluation orders is prohibitively large. We present the first fully automated and sound technique and tool for optimizing the performance of floating-point and fixed-point arithmetic kernels. Our technique combines rewriting and mixed-precision tuning. Rewriting searches through different evaluation orders to find one which minimizes the roundoff error at no additional runtime cost. Mixed-precision tuning assigns different finite precisions to different variables and operations and thus provides finer-grained control than uniform precision. We show that when these two techniques are designed and applied together, they can provide higher performance improvements than each alone.} } @inproceedings{dar-izy-nas-rit-bec-bas-18-aa-daisy, author = {Darulov{\'a}, Eva and Izycheva, Anastasiia and Nasir, Fariha and Ritter, Fabian and Becker, Heiko and Bastian, Robert}, title = {{Daisy}: {Framework} for Analysis and Optimization of Numerical Programs}, note = {Tool paper.}, booktitle = {Proceedings of the 24th International Conference on Tools and Algorithms for the Construction and Analysis of Systems (TACAS), Part I}, location = {Thessaloniki, GR}, series = {Lecture Notes in Computer Science}, volume = {10805}, month = apr, pages = {270–287}, doi = {10.1007/978-3-319-89960-2_15}, year = 2018, comment = {Uses IA, AA, and SMT}, abstract = {Automated techniques for analysis and optimization of finite-precision computations have recently garnered significant interest. Most of these were, however, developed independently. As a consequence, reuse and combination of the techniques is challenging and much of the underlying building blocks have been re-implemented several times, including in our own tools. This paper presents a new framework, called Daisy, which provides in a single tool the main building blocks for accuracy analysis of floating-point and fixed-point computations which have emerged from recent related work. Together with its modular structure and optimization methods, Daisy allows developers to easily recombine, explore and develop new techniques. Daisy’s input language, a subset of Scala, and its limited dependencies make it furthermore user-friendly and portable.} } @techreport{dar-kun-10-aa-scala-tr, author = {Darulov{\'a}, Eva and Kuncak, Viktor}, title = {On Rigorous Numerical Computation as a {Scala} Library}, institution = {{\'E}cole Politecnique F{\'e}d{\'e}rale de Lausanne}, location = {Lausanne, CH}, year = 2010, month = nov, number = {158754}. pages = {11}, comment = {Library in Scala that supports IA and AA.}, abstract = {Modern programming languages have adopted the floating point type as a way to describe computations with real numbers. Thanks to the hardware support, such computations are efficient on modern architectures. However, rigorous reasoning about the resulting programs remains difficult, because of a large gap between the finite floating point representation and the infinite-precision real-number semantics that serves as the mental model for the developers. Because programming languages do not provide support for estimating errors, some computations in practice are performed more and some less precisely than needed. We present a library solution for rigorous arithmetic computation. Our library seamlessly integrates into the Scala programming language, thanks to its extensibility mechanisms such as implicit conversions and the treatment of arithmetic operations as method calls. Our numerical data type library tracks a (double) floating point value, but also an upper bound on the error between this value and the ideal value that would be computed in the real-value semantics. The library supports 1) an interval-based representation of the error, and 2) an affine arithmetic representation, which is generally more precise and keeps track of the correlation between different numerical values in the program. The library tracks errors arising from the rounding in arithmetic operations and constants, as well as user-provided errors that can model method errors of numerical algorithms or measurement errors arising in cyber-physical system applications. Our library provides approximations for most of the standard mathematical operations, including trigonometric functions. The library supports automated demand-driven refinement of computed errors by lazily increasing the precision of iteratively computed values to meet the desired precision of the final expression. Furthermore, the library supports dynamic transformation of the evaluation order following a set of algebraic rules to reduce the estimated error in the computed value. The transformed expressions can be used to suggest static rewrites of the source code to the developer. We evaluate the library on a number of examples from numerical analysis and physical simulations. We found it to be a useful tool for gaining confidence in the correctness of the computation.} } @inproceedings{dar-kun-11-aa-scalaoo, author = {Darulov{\'a}, Eva and Kuncak, Viktor}, title = {Trustworthy Numerical Computation in {Scala}}, booktitle = {Proceedings of the ACM International Conference on Object Oriented Programming Systems Languages and Applications (OOPSLA)}, year = 2011, month = oct, location = {Portland, US}, doi = {10.1145/2048066.2048094} pages = {325–344}, comment = {A Scala library implementing an extension of AA, with examples.}, abstract = {Modern computing has adopted the floating point type as a default way to describe computations with real numbers. Thanks to dedicated hardware support, such computations are efficient on modern architectures, even in double precision. However, rigorous reasoning about the resulting programs remains difficult. This is in part due to a large gap between the finite floating point representation and the infinite-precision real-number semantics that serves as the developers' mental model. Because programming languages do not provide support for estimating errors, some computations in practice are performed more and some less precisely than needed. We present a library solution for rigorous arithmetic computation. Our numerical data type library tracks a (double) floating point value, but also a guaranteed upper bound on the error between this value and the ideal value that would be computed in the real-value semantics. Our implementation involves a set of linear approximations based on an extension of affine arithmetic. The derived approximations cover most of the standard mathematical operations, including trigonometric functions, and are more comprehensive than any publicly available ones. Moreover, while interval arithmetic rapidly yields overly pessimistic estimates, our approach remains precise for several computational tasks of interest. We evaluate the library on a number of examples from numerical analysis and physical simulations. We found it to be a useful tool for gaining confidence in the correctness of the computation.} } @misc{dar-kun-11-aa-safloat, author = {Darulov{\'a}, Eva and Kuncak, Viktor}, title = {On the Design and Implementation of {SmartFloat} and {AffineFloat}}, year = 2011, howpublished = {Online document.}, url = {https://core.ac.uk/download/pdf/147971237.pdf}, month = apr, pages = {28}, comment = {May be another version of [dar-kun-11-aa-scalaoo].}, abstract = {Modern computing has adopted the floating point type as a default way to describe computations with real numbers. Thanks to dedicated hardware support, such computations are efficient on modern architectures. However, rigorous reasoning about the resulting programs remains difficult, because of a large gap between the finite floating point representation and the infinite-precision real-number semantics that serves as the mental model for the developers. Because programming languages do not provide support for estimating errors, some computations in practice are performed more and some less precisely than needed. We present a library solution for rigorous arithmetic computation. Our numerical data type library tracks a (double) floating point value, but also a guaranteed upper bound on the error between this value and the ideal value that would be computed in the real-value semantics. Our implementation involves a set of linear approximations based on an extension of affine arithmetic. The derived approximations cover most of the standard mathematical operations including trigonometric functions, and are more comprehensive than any publicly available ones. Moreover, while interval arithmetic rapidly yields overly pessimistic estimates, our approach remains precise for a range of computational tasks of interest. We evaluate the library on a number of examples from numerical analysis and physical simulations. We found it to be a useful tool for gaining confidence in the correctness of the computation.}, quotes = {... On the other hand, using our affine-arithmeticbased type we compute an absolute error of 1.34 \uc{2217} 10\uc{2212}15, which is (by the correctness of our approach) sound, yet several orders of ...} } @techreport{dar-kun-14-aa-fpsens-tr, author = {Darulov{\'a}, Eva and Kuncak, Viktor}, title = {On Numerical Error Propagation with Sensitivity}, institution = {{\'E}cole Politecnique F{\'e}d{\'e}rale de Lausanne}, location = {Lausanne, CH}, number = {200132}, year = 2014, month = jul, pages = {9},, url = {https://infoscience.epfl.ch/record/200132} comment = {Static analysis of FP roudoff errors. Handles conditionals and some loops. Mentions AA but says that their solution is better.}, abstract = {An emerging area of research is to automatically compute reasonably accurate upper bounds on numerical errors, including roundoffs due to the use of a finite-precision representation for real numbers such as floating point or fixed-point arithmetic. Previous approaches for this task are limited in their accuracy and scalability, especially in the presence of nonlinear arithmetic. Our main idea is to decouple the computation of newly introduced roundoff errors from the amplification of existing errors. To characterize the amplification of existing errors, we use the derivatives of functions corresponding to program fragments. We implemented this technique in an analysis for programs containing nonlinear computation, conditionals, and a certain class of loops. We evaluate our system on a number of benchmarks from embedded systems and scientific computation, showing substantial improvements in accuracy and scalability over the state of the art.} } @techreport{dar-kun-12-aa-solneq-tr, author = {Darulov{\'a}, Eva and Kuncak, Viktor}, title = {On the Automatic Computation of Error Bounds for Solutions of Nonlinear Equations}, institution = {{\'E}cole Politecnique F{\'e}d{\'e}rale de Lausanne}, year = 2012, month = jun, location = {Lausanne, CH}, number = {177989}, pages = {15}, url = {https://infoscience.epfl.ch/record/177989}, comment = {Static analysis of FP roudoff errors. Handles conditionals and some loops. Uses AA but seems to say that one can do better.}, abstract = {A large portion of software is used for numerical calculations in mathematics, physics and engineering applications. Among the things that make verification in this domain difficult is the quantification of numerical errors, such as roundoff errors and errors due to the approximate numerical method. Much of numerical software uses self-stabilizing iterative algorithms, for example, to find solutions of nonlinear equations. To support such algorithms, we present a runtime verification technique that checks, given a nonlinear equation and a tentative solution, whether this value is indeed a solution to within a specified precision. Our technique combines runtime verification approaches with information about the analytical equation being solved. It is independent of the algorithm used for finding the solution and is therefore applicable to a wide range of problems. We have implemented our technique for the Scala programming language using our affine arithmetic library and the macro facility of Scala 2.10.} } @inproceedings{dar-kun-12-aa-cert, author = {Darulov{\'a}, Eva and Kuncak, Viktor}, title = {Certifying Solutions for Numerical Constraints}, journal = {International Conference on Runtime Verification,}, year = 2012, booktitle = {Revised Selected Papers of the Third International Conference on Runtime Verification (RV)}, month = sep, location = {Istanbul, TR}, doi = {10.1007/978-3-642-35632-2_27}, series = {Lecture Notes in Computer Science}, volume = {7687}, pages = {277–291}, comment = {Uses AA library and other tools for (static?) analysis of errors.}, abstract = {A large portion of software is used for numerical computation in mathematics, physics and engineering. Among the aspects that make verification in this domain difficult is the need to quantify numerical errors, such as roundoff errors and errors due to the use of approximate numerical methods. Much of numerical software uses self-stabilizing iterative algorithms, for example, to find solutions of nonlinear equations. To support such algorithms, we present a runtime verification technique that checks, given a nonlinear equation and a tentative solution, whether this value is indeed a solution to within a specified precision. Our technique combines runtime verification approaches with information about the analytical equation being solved. It is independent of the algorithm used for finding the solution and is therefore applicable to a wide range of problems. We have implemented our technique for the Scala programming language using our affine arithmetic library and the macro facility of Scala 2.10.} } @inproceedings{dar-kun-14-aa-soundcp, author = {Darulov{\'a}, Eva and Kuncak, Viktor}, title = {Sound Compilation of Reals}, year = 2014, booktitle = {Proceedings of the 41st ACM SIGPLAN-SIGACT Symposium on Principles of Programming LanguagesJanuary (POPL)}, month = jan, location = {}, doi = {10.1145/2535838.2535874}, pages = {235–248}, comment = {Combines AA with SMT to generate finite-precision program/circuit that achieves specified accuracy.}, abstract = {Writing accurate numerical software is hard because of many sources of unavoidable uncertainties, including finite numerical precision of implementations. We present a programming model where the user writes a program in a real-valued implementation and specification language that explicitly includes different types of uncertainties. We then present a compilation algorithm that generates a finite-precision implementation that is guaranteed to meet the desired precision with respect to real numbers. Our compilation performs a number of verification steps for different candidate precisions. It generates verification conditions that treat all sources of uncertainties in a unified way and encode reasoning about finite-precision roundoff errors into reasoning about real numbers. Such verification conditions can be used as a standardized format for verifying the precision and the correctness of numerical programs. Due to their non-linear nature, precise reasoning about these verification conditions remains difficult and cannot be handled using state-of-the art SMT solvers alone. We therefore propose a new procedure that combines exact SMT solving over reals with approximate and sound affine and interval arithmetic. We show that this approach overcomes scalability limitations of SMT solvers while providing improved precision over affine and interval arithmetic. Our implementation gives promising results on several numerical models, including dynamical systems, transcendental functions, and controller implementations.} } @article{dar-kun-17-aa-rosa, author = {Darulov{\'a}, Eva and Kuncak, Viktor}, title = {Towards a Compiler for Reals}, journal = {ACM Transactions on Programming Languages and Systems}, year = 2017, month = mar, doi = {10.1145/3014426}, volume = {39}, number = {2}, pages = {article 8, 28 pages}, comment = {Uses AA in a source-to-source compiler \textt{Rosa} that converts a numerical program into a mixed float/fixed point program with prescribed accuracy.}, abstract = {Numerical software, common in scientific computing or embedded systems, inevitably uses a finite-precision approximation of the real arithmetic in which most algorithms are designed. In many applications, the roundoff errors introduced by finite-precision arithmetic are not the only source of inaccuracy, and measurement and other input errors further increase the uncertainty of the computed results. Adequate tools are needed to help users select suitable data types and evaluate the provided accuracy, especially for safety-critical applications. We present a source-to-source compiler called Rosa that takes as input a real-valued program with error specifications and synthesizes code over an appropriate floating-point or fixed-point data type. The main challenge of such a compiler is a fully automated, sound, and yet accurate-enough numerical error estimation. We introduce a unified technique for bounding roundoff errors from floating-point and fixed-point arithmetic of various precisions. The technique can handle nonlinear arithmetic, determine closed-form symbolic invariants for unbounded loops, and quantify the effects of discontinuities on numerical errors. We evaluate Rosa on a number of benchmarks from scientific computing and embedded systems and, comparing it to the state of the art in automated error estimation, show that it presents an interesting tradeoff between accuracy and performance.} } @inproceedings{dar-kun-maj-sah-13-aa-polev, author = {Darulov{\'a}, Eva and Kuncak, Viktor and Majumdar, Rupak and Saha, Indranil}, title = {Synthesis of Fixed-Point Programs}, booktitle = {Proceedings of the International Conference on Embedded Software (EMSOFT)}, year = 2013, month = sep, location = {Montreal, CA}, doi = {10.1109/EMSOFT.2013.6658600}, pages = {1-10}, comment = {Rearranges computation of a polynomial so as to minimize the error when evaluated with fixed point operations. Shows that the problem is NP-hard. Uses genetic algorithm to experiment with the formulas, and AA to evaluate the accuracy of each formula.}, abstract = {Several problems in the implementations of control systems, signal-processing systems, and scientific computing systems reduce to compiling a polynomial expression over the reals into an imperative program using fixed-point arithmetic. Fixed-point arithmetic only approximates real values, and its operators do not have the fundamental properties of real arithmetic, such as associativity. Consequently, a naive compilation process can yield a program that significantly deviates from the real polynomial, whereas a different order of evaluation can result in a program that is close to the real value on all inputs in its domain. We present a compilation scheme for real-valued arithmetic expressions to fixed-point arithmetic programs. Given a real-valued polynomial expression t, we find an expression t' that is equivalent to t over the reals, but whose implementation as a series of fixed-point operations minimizes the error between the fixed-point value and the value of t over the space of all inputs. We show that the corresponding decision problem, checking whether there is an implementation t' of t whose error is less than a given constant, is NP-hard. We then propose a solution technique based on genetic programming. Our technique evaluates the fitness of each candidate program using a static analysis based on affine arithmetic. We show that our tool can significantly reduce the error in the fixed-point implementation on a set of linear control system benchmarks. For example, our tool found implementations whose errors are only one half of the errors in the original fixed-point expressions.} } @techreport{dar-kun-maj-sah-13-aa-polcmp, author = {Darulov{\'a}, Eva and Kuncak, Viktor and Majumdar, Rupak and Saha, Indranil}, title = {On the Generation of Precise Fixed-Point Expressions}, institution = {{\'E}cole Politecnique F{\'e}d{\'e}rale de Lausanne}, month = oct, location = {Lausanne, CH}, number = {181818}, pages = {10}, comment = {Seems to be a preprint of [dar-kun-maj-sah-13-aa-polev]}, year = 2013, abstract = {Several problems in the implementations of control systems, signal-processing systems, and scientific computing systems reduce to compiling a polynomial expression over the reals into an imperative program using fixed-point arithmetic. Fixed-point arithmetic only approximates real values, and its operators do not have the fundamental properties of real arithmetic, such as associativity. Consequently, a naive compilation process can yield a program that significantly deviates from the real polynomial, whereas a different order of evaluation can result in a program that is close to the real value on all inputs in its domain. We present a compilation scheme for real-valued arithmetic expressions to fixed-point arithmetic programs. Given a real-valued polynomial expression t, we find an expression t' that is equivalent to t over the reals, but whose implementation as a series of fixed-point operations minimizes the error between the fixed-point value and the value of t over the space of all inputs. We show that the corresponding decision problem, checking whether there is an implementation t' of t whose error is less than a given constant, is NP-hard. We then propose a solution technique based on genetic programming. Our technique evaluates the fitness of each candidate program using a static analysis based on affine arithmetic. We show that our tool can significantly reduce the error in the fixed-point implementation on a set of linear control system benchmarks. For example, our tool found implementations whose errors are only one half of the errors in the original fixed-point expressions.}, url = {{\url{https://infoscience.epfl.ch/record/181818}}}, quotes = {... , we use a static analysis based on affine arithmetic to compute an upper bound on the error. ... tions in using static analysis based on affine-arithmetic, but our search method works with ...} } @inproceedings{def-fev-gra-jez-kir-wil-21-aa-iflop, author = {Defour, David and F{\'e}votte, François and Graillat, Stef and J{\'e}z{\'e}quel, Fabienne and Kirschenmann, Wilfried and Lamotte, Jean-Luc and Lathuilière, Bruno and Lhuillier, Yves and Petit, {\'E}ric and Signoles, Julien and Devan, Sohier and Vedrine, Franck}, title = {{InterFLOP}, Interoperable Tools for Computing, Debugging, Validation and Optimization of Floating-Point Programs}, booktitle = {Posters of the ISC High Performance Conference (ISC-HPC)}, location = {Virtual meeting}, year = 2021, month = jun, note = {Available at the HAL repository}, url = {https://hal.archives-ouvertes.fr/hal-03245586}, pages = {1}, comment = {Big project. Uses AA.}, abstract = {Objectives: Set a common platform integrating major tools of the French Floating-Point community to tackle the FP challenges and recent evolutions of software and hardware. We propose new analyses and combinations of existing ones to address the challenge of providing a quick and precise numerical diagnosis requiring little user expertise. InterFLOP will collect and combine information on numerical instabilities, catastrophic cancellations, unstable tests, build various statistical analyses of program executions at minimal overhead.} } @inproceedings{del-bas-20-aa-cadmpc, author = {Del Rio Ruiz, Aitor and Basterretxea, Kodo}, title = {Towards the Development of a Cad Tool for the Implementation of High-Speed Embedded {MPCS} on Fpgas}, journal = {2020 European Control ...,}, year = 2020, booktitle = {Proceedings of the European Control Conference (ECC)}, location = {Saint Petersburg, RU}, pages = {941-947}, month = may, doi = {10.23919/ECC51009.2020.9143666}, comment = {MPC $=$ Model Predictive Controller. Develps semi-automatic design tool for FPGAs. Uses AA for static error analysis.}, abstract = {Designing embedded model predictive controllers (MPCs) for systems with fast sampling rates is a tedious and multidisciplinary process. The reason is that from the high-level description of the control problem to the final implementation on a computational platform, the MPC design space covers multiple abstraction levels in which critical design decisions have to be made. Therefore, currently, the identification of the best design criteria can not be automated. Instead, in this paper we take an step towards the development of a computer-aided design (CAD) tool that enables the semi-automatic implementation of application-specific embedded MPCs on field-programmable gate arrays (FPGAs) for its application to control-engineering problems with fast sampling rates.} } @entry{del-gou-put-sou-tek-ved-09-aa-fluct, author = {Delmas, David and Goubault, {\'E}ric and Putot, Sylvie and Souyris, Jean and Tekkal Karim and V{\'e}drine, Franck}, title = {Towards an Industrial Use of {FLUCTUAT} on Safety-Critical Avionics Software}, booktitle = {Proceedings of the 14th International Workshop on Formal Methods for Industrial Critical Systems (FMICS)}, location = {Eindhoven, NL}, series = {Lecture Notes in Computer Science}, volume = {5825}, pages = {53–69}, year = 2009, month = nov, doi = {10.1007/978-3-642-04570-7_6}, comment = {Applies FLUCTUAT a static error propagation analyzer for C programs. Uses AA.}, abstract = {Most modern safety-critical control programs, such as those embedded in fly-by-wire control systems, perform a lot of floating-point computations. The well-known pitfalls of IEEE 754 arithmetic make stability and accuracy analyses a requirement for this type of software. This need is traditionally addressed through a combination of testing and sophisticated intellectual analyses, but such a process is both costly and error-prone. FLUCTUAT is a static analyzer developed by CEA-LIST for studying the propagation of rounding errors in C programs. After a long time research collaboration with CEA-LIST on this tool, Airbus is now willing to use FLUCTUAT industrially, in order to automate part of the accuracy analyses of some control programs. In this paper, we present the IEEE 754 standard, the FLUCTUAT tool, the types of codes to be analyzed and the analysis methodology, together with code examples and analysis results.} } @inproceedings{dem-che-den-dos-21-aa-tagged, author = {Demeure, Nestor and Chevalier, C{\'e}dric and Denis, Christophe and Dossantos-Uzarralde, Pierre}, title = {Tagged Error: {Tracing} Numerical Error through Computations}, journal = {2021 IEEE 28th ...,}, booktitle = {Proceedings of the 28th Symposium on Computer Arithmetic (ARITH)}, location = {Lyngby, DK}, pages = {9-16}, year = 2021, month = jun, doi = {10.1109/ARITH51176.2021.00014}, comment = {Defines ``Tagged error'' an arithmetic ``inspired by AA'', used to estimate error in programs ``taking error amplification or dampening into account''. Example: conjugate gradient.}, abstract = {Extensive work has been done to evaluate the numerical accuracy of computations. However, getting fine-grained information on the operations that caused the inaccuracies observed in a given output is still a hard problem. We propose a new method, under the name tagged error, to get fine information on the impact of user-defined code sections on the numerical error of any floating-point number in a program. Our method uses a dedicated arithmetic over a type that encapsulates both the result the user would have had with the original computation and an approximation of its numerical error stored as an unevaluated sum of terms that can each be attributed to a single source. It lets us quantify the impact of potential error sources on any output of a computation while taking phenomena such as error amplification or dampening, due to later operations, into account. Furthermore, we can use this information to do targeted modifications of an algorithm, improving both its speed and precision, as illustrated by a study on the conjugate gradient algorithm.} } @inprocedings{den-08-aa-geom, author = {Denner-Broser, Britta}, title = {An Algorithm for the Tracing Problem using Interval Analysis}, journal = {Proceedings of the ACM Symposium on Applied Computing (SAC)}, year = 2008, location = {Fortaleza, BR}, pages = {1832–1837}, month = mar, doi = {10.1145/1363686.1364127}, comment = {Says that AA should be used to reduce uncertainty but does not use it. Dynamic Geometry is interactive programs such as CABRI that simulate geometric constructions using intersection and interpolation of lines, circles, etc.. Problem (NP-hard) is detecing singular events such as 2 circles becoming tangent or coincident when centers are dragged with mouse. These critical events are avoided by using complex straight lines (2 dim instead of 1 dim) so that the path may detour around singularities.}, abstract = {We give an algorithm for the Tracing Problem in Dynamic Geometry that uses interval arithmetic. In this work, we focus on an algebraic model. Here the objects are real or complex numbers with the operations $+$, $-$, $\times$, $/$, and $\sqrt{}$. Originally, geometric objects like points, lines, or circles have been considered. Our algorithm proceeds stepwise and detects (potential) critical points in advance. For each step, the algorithm computes a steplength that is small enough to handle the ambiguity of the root function. This is achieved by using interval arithmetic. After the detection of a critical point, the singularity is avoided by a detour through the complex plane $\mathbb{C}$.}, url = {{\url{https://dl.acm.org/doi/abs/10.1145/1363686.1364127?casa_token=K62Z9u9ppx0AAAAA:_7XtOP4NKMlQOHRN7yK4vuAPjZyWjFe2BRTsqfV1qTM6W3df0j0HSzCmjsWcaJ8VNhY_s_uDiNs}}}, quotes = {... The usage of affine arithmetic might reduce this problem [3]. ... comments and for pointing out the relation to affine arithmetic. ... Affine arithmetic: Concepts and applications. Numerical ...} } @misc{den-22-theory-junk, author = {Deninger, Christopher}, title = {Dynamical Systems for Arithmetic Schemes}, howpublished = {arXiv preprint}, number = {1807.06400, version 3}, url = {https://arxiv.org/abs/1807.06400}, year = 2022, pages = {}, month = jan, doi = {10.48550/arXiv.1807.06400}, comment = {Nothing to do with our AA.}, abstract = {Motivated by work of Kucharczyk and Scholze, we use sheafified rational Witt vectors to attach a new ringed space $W_{\mathrm{rat}}(X)$ to every scheme $X$. We also define $R$-valued points $W_{\mathrm{rat}}(X)(R)$ of $W_{\mathrm{rat}}(X)$ for every commutative ring R. For normal schemes $X$ of finite type over $\mathrm{spec}\mathbb{Z}$, using $W_{\mathrm{rat}}(X)(\mathbb{C})$ we construct infinite dimensional $\mathbb{C}$-dynamical systems whose periodic orbits are related to the closed points of $X$. Various aspects of these topological dynamical systems are studied. We also explain how certain p-adic points of $W_{\mathrm{rat}}(X)$ for $X$ the spectrum of a $p$-adic local number ring are related to the points of the Fargues-Fontaine curve. The new intrinsic construction of the dynamical systems generalizes and clarifies the original extrinsic construction in v.1 and v.2. Many further results have been added.} } @inproceedings{den-bat-zha-won-14-aa-dctrans, author = {Deng, Jian and Batselier, Kim and Zhang, Yang and Wong, Ngai}, title = {An Efficient Two-Level {DC} Operating Points Finder for Transistor Circuits}, year = 2014, booktitle = {Proceedings of the 51st ACM/EDAC/IEEE Design Automation Conference (DAC)}, location = {San Francisco, US}, pages = {1-6}, month = jun, doi = {10.1145/2593069.2593087}, comment = {Uses AA to find all solution of a DC circuit with (non-linear) transistors.}, abstract = {DC analysis, as a foundation for the simulation of many electronic circuits, is concerned with locating DC operating points. In this paper, a new and efficient algorithm to find all DC operating points is proposed for transistor circuits. The novelty of this DC operating points finder is its two-level simple implementation based on the affine arithmetic preconditioning and interval contraction method. Compared to traditional methods such as homotopy, this finder offers a dramatically faster way of computing all roots, without sacrificing any accuracy. Explicit numerical examples and comparative analysis are given to demonstrate the feasibility and accuracy of the proposed approach.} } @misc{dic-17-aa-genetic, author = {Dick, Grant}, title = {Interval Arithmetic and Interval-Aware Operators for Genetic Programming}, howpublished = {Online document at the arXiv CS repository.}, number = {1704.04998}, url = {https://arxiv.org/abs/1704.04998} pages = {8}, year = 2017, month = apr, doi = {10.48550/arXiv.1704.04998}, comment = {Uses IA to ``allow genetic programming to perform regression using unprotected operators'', whatever that means. Mentions AA but does not use it.}, abstract = {Symbolic regression via genetic programming is a flexible approach to machine learning that does not require up-front specification of model structure. However, traditional approaches to symbolic regression require the use of protected operators, which can lead to perverse model characteristics and poor generalisation. In this paper, we revisit interval arithmetic as one possible solution to allow genetic programming to perform regression using unprotected operators. Using standard benchmarks, we show that using interval arithmetic within model evaluation does not prevent invalid solutions from entering the population, meaning that search performance remains compromised. We extend the basic interval arithmetic concept with `safe' search operators that integrate interval information into their process, thereby greatly reducing the number of invalid solutions produced during search. The resulting algorithms are able to more effectively identify good models that generalise well to unseen data. We conclude with an analysis of the sensitivity of interval arithmetic-based operators with respect to the accuracy of the supplied input feature intervals.} } @book{dim-tom-vac-11-aa-osti, author = {Dimitrovski, Aleksandar D and Tomsovic, Kevin and Vaccaro, Alfredo}, title = {Reliable Algorithms for Power System Analysis in the Presence of Data Uncertainties}, publisher = {U.S. Department of Energy, Office of Scientific and Technical Information}, year = 2011, pages = {??}, month = jan, number = {OSTI.1050951}, url = {https://www.osti.gov/biblio/1050951}, note = {}, comment = {Proposed AA for power flow analysis as better than statistical or monte carlo methods.}, abstract = {A robust and reliable power flow analysis represents an essential requirement for many power systems applications as far as network optimization, voltage control, state estimation, and service restoration are concerned. The most common power flow approach, referred to here as a deterministic power flow (PLF), requires precise or 'crisp' values chosen by the analyst for each input variable. The solution provides precise network voltages and flows through each line. The specified values rest upon assumptions about the operating condition derived from historical measurements or predictions about future conditions and thus, cannot be considered accurate. Even in the case where the inputs are based on measurements, inaccuracies arise from time-skew problems, three-phase unbalance, static modeling approximations of dynamic components (e.g., transformer tap changers), variations in line parameters, and so on. The advent of deregulation and competitive power markets will only exacerbate this problem as well-known generation patterns change, loading becomes less predictable and the transmission paths grow more diverse. Conventional methodologies proposed in literature address tolerance analysis of power flow solution by means of detailed probabilistic methods, accounting for the variability and stochastic nature of the input data, and sampling based approaches. In particular uncertainty propagation using sampling based methods, such as the Monte Carlo, requires several model runs that sample various combinations of input values. Since the number of model runs can sometimes be very large, the required computer resources can sometimes be prohibitively expensive resulting in substantial computational demands. As far as probabilistic methods are concerned, they represent a useful tool, especially for planning studies, but, as evidenced by the many discussions reported in literature, they could reveal some shortcomings principally arising from: (1) the non-normal distribution and the statistical dependence of the input data; and (2) the difficulty arising in accurately identifying probability distributions for some input data, such as the power generated by wind or photovoltaic generators. All these could result in time consuming computations with several limitations in practical applications especially in power flow analysis of complex power networks. In order to try and overcome some of these limitations, obtaining thereby comprehensive power flow solution tolerance analysis at adequate computational costs, self validated computation could play a crucial role. Armed with such a vision, this chapter will analyze two advanced techniques for power flow analysis in the presence of data uncertainty namely the boundary power flow and the affine arithmetic power flow.}, url = {{\url{https://www.osti.gov/biblio/1050951}}}, quotes = {... such a vision, this chapter will analyze two advanced techniques for power flow analysis in the presence of data uncertainty namely the boundary power flow and the affine arithmetic ...} } @article{din-bor-lif-guo-sun-guw-zho-14-aa-obbt, author = {Ding, Tao and Bo, Rui and Li, Fanxing and Guo, Qinglai and Sun, Hongbin and Gu, Wei and Zhou, Gan}, title = {Interval Power Flow Analysis using Linear Relaxation and Optimality-Based Bounds Tightening ({OBBT}) Methods}, journal = {IEEE Transactions on Power Systems}, year = 2014, volume = {30}, number = {1}, pages = {177-188}, month = jan, doi = {10.1109/TPWRS.2014.2316271}, comment = {Formulates the power flow problem as a quadratically constrained quadratic programming problem, then uses affine approximations and ``optimality-based bounds tightening'' to solve it. Compares their method to AA and claims it is better.}, abstract = {With increasingly large scale of intermittent and non-dispatchable resources being integrated into power systems, the power flow problem presents greater uncertainty. In order to obtain the upper and lower bounds of power flow solutions including voltage magnitudes, voltage angles and line flows, Cartesian coordinates-based power flow is utilized in this paper. A quadratically constrained quadratic programming (QCQP) model is then established to formulate the interval power flow problem. This non-convex QCQP model is relaxed to linear programming problem by introducing convex and concave enclosures of the original feasible region. To improve the solutions bounds while still encompassing the true interval solution, optimality-based bounds tightening (OBBT) method is employed to find a better outer hull of the feasible region. Numerical results on IEEE 9-bus, 30-bus, 57-bus, and 118-bus test systems validate the effectiveness of the proposed method.} } @misc{din-egb-din-03-aa-neural, author = {Dinerstein, Jonathan and Egbert, Parris and Dinerstein, Nelson}, title = {Fast and Accurate Interval Arithmetic through Neural Network Approximation}, howpublished = {Online document}, url = {https://www.researchgate.net/publication/265245016_Fast_and_Accurate_Interval_Arithmetic_through_Neural_Network_Approximation}, year = 2003, pages = {12}, month = mar, note = {Date extracted from PDF metadata.}, comment = {Introduces ``neural interval arithmetic''. Apparently trains a neural network $N_1$ to compute bounds for a class of target function. The input of $N-1$ is a vector $u$ of parameters that determines the function within the class. Then trains another NN $N_2$ to compute the approximate error of $N_1$ given the vector $u$. Then analyzes $N_1$ and $N_2$ with a global maximum algorithm to find a reliable error bound. Claims it is better than AA. But in the examples the parameter vector $u$ is a set of sample values of the function. Uses thousands of input-output pairs to train the NNs. Not convincng at all...}, abstract = {Interval arithmetic has become a popular tool for computer graphics. It has been applied to collision detection, ray tracing, implicit curve/surface enumeration, etc. It is useful for general optimization problems such as robust root finding and global maximum/minimum finding. However, interval arithmetic and related techniques (e.g. affine arithmetic) suffer from two significant weaknesses. First, their evaluations are often very conservative, making the techniques useless in many practical situations. Second, they can be much slower than traditional arithmetic. In this paper we present a new approach to computing interval arithmetic: neural network approximation. This naturally provides more accuracy since each traditional interval operation can introduce errors that compound, but a neural emulation approach requires only one approximation. Further, greater overall speed can be achieved due to the higher accuracy.} } @book{cos-21-difeq-junk, author = {Costa, Peter J.}, title = {Select Ideas in Partial Differential Equations}, series = {Synthesis Lectures on Mathematics and Statistics,}, year = 2021, publisher = {Morgan {\&} Claypool}, pages = {214}, month = jun, doi = {10.2200/S01080ED1V01Y202102MAS040}, comment = {Spurious Google Scholar hit}, abstract = {This text provides an introduction to the applications and implementations of partial differential equations. The content is structured in three progressive levels which are suited for upper–level undergraduates with background in multivariable calculus and elementary linear algebra (chapters 1–5), first– and second–year graduate students who have taken advanced calculus and real analysis (chapters 6-7), as well as doctoral-level students with an understanding of linear and nonlinear functional analysis (chapters 7-8) respectively. Level one gives readers a full exposure to the fundamental linear partial differential equations of physics. It details methods to understand and solve these equations leading ultimately to solutions of Maxwell’s equations. Level two addresses nonlinearity and provides examples of separation of variables, linearizing change of variables, and the inverse scattering transform for select nonlinear partial differential equations. Level three presents rich sources of advanced techniques and strategies for the study of nonlinear partial differential equations, including unique and previously unpublished results. Ultimately the text aims to familiarize readers in applied mathematics, physics, and engineering with some of the myriad techniques that have been developed to model and solve linear and nonlinear partial differential equations.} } @entry{cho-21-mtcarlo-junk, author = {Chowdhury, Sujaul}, title = {{Monte} {Carlo} Methods: {A} Hands-On Computational Introduction Utilizing {Excel}}, year = 2021, series = {Synthesis Lectures on Mathematics and Statistics}, publisher = {Morgan {\&} Claypool}, pages = {133}, month = dec, doi = {10.2200/S01073ED1V01Y202101MAS037}, comment = {Spurious Google Scholar hit.}, abstract = {This book is intended for undergraduate students of Mathematics, Statistics, and Physics who know nothing about Monte Carlo Methods but wish to know how they work. All treatments have been done as much manually as is practicable. The treatments are deliberately manual to let the readers get the real feel of how Monte Carlo Methods work. Definite integrals $F(x)$ of a total of five functions have been evaluated using constant, linear, Gaussian, and exponential probability density functions $p(x)$. It is shown that results agree with known exact values better if $p(x)$ is proportional to $F(x)$. Deviation from the proportionality results in worse agreement. This book is on Monte Carlo Methods which are numerical methods for Computational Physics. ... } } @inproceedings{com-mac-fre-ned-oli-21-aa-impsamp, author = {Comba, Jo{\~a}o and Maciel, Anderson and Freitas, Carla and Nedel, Luciana and Oliveira, Manuel M.}, title = {Interactive Visualization at {UFRGS}: {Ongoing} Research at the {Computer} {Graphics} and {Interaction} Group}, booktitle = {Proceedings of the Workshop on Interactive Visualization (WIVis) of the 25th Conference on Graphics, Patterns and Images (SIBGRAPI)}, year = 2021, location = {Ouro Preto, BR}, publisher = {Universidade Federal de Ouro Preto}, issn = {9-772176-085006}, pages = {1-4}, month = aug, note = {No DOI?}, comment = {Short survey of group's work in progress. Mentions use of AA to place sample points on implicit surfaces in order to extract crease lines.}, abstract = {The Computer Graphics and Interaction group at UFRGS has a well-established tradition of working in the subject of interactive visualization. With publications in the main venues of the field, such as the IEEE Visualization and Eurovis conferences, or journals such as IEEE TVCG (Transactions on Visualization and Computer Graphics) and Computer Graphics Forum (CGF), the group has consistently produced research in the area of interactive visualization throughout the years. Along the visualization group from USP-Sao Carlos, it organized the two ˜ previous instances of the workshop on interactive visualization, held at Sibgrapi 2007 and 2010. In this paper we summarize the recently published research in the period of 2010 to today. We also describe ongoing projects, that we hope can promote discussions and foster collaborations with other research groups.} } @article{din-lif-lix-sun-bor-15-aa-radial, author = {Ding, Tao and Li, Fangxing and Li, Xue and Sun, Hongbin and Bo, Rui}, title = {Interval Radial Power Flow using Extended {DistFlow} Formulation and {Krawczyk} Iteration Method with Sparse Approximate Inverse Preconditioner}, journal = {IET Generation, Transmission {\&} Distribution}, year = 2015, volume = {9}, number = {14}, pages = {1998-2006}, month = nov, doi = {10.1049/iet-gtd.2014.1170}, comment = {Mentions AA but apparently does not use it. Uses IA to solve systems of equations arising in power flow. What is the Krawczyk iteration method? Uses preconditioning to reduce the Frobenius norm of the matrix to ensure convergence.}, abstract = {Confronted with uncertainties, especially from large amounts of renewable energy sources, power flow studies need further analysis to cover the range of voltage magnitude and transferred power. To address this issue, this work proposes a novel interval power flow for the radial network by the use of an extended, simplified DistFlow formulation, which can be transformed into a set of interval linear equations. Furthermore, the Krawczyk iteration method, including an approximate inverse preconditioner using Frobenius norm minimisation, is employed to solve this problem. The approximate inverse preconditioner guarantees the convergence of the iterative method and has the potential for parallel implementation. In addition, to avoid generating a dense approximate inverse matrix in the preconditioning step, a dropping strategy is introduced to perform a sparse representation, which can significantly reduce the memory requirement and ease the matrix operation burden. The proposed methods are demonstrated on 33-bus, 69-bus, 123-bus, and several large systems. A comparison with interval LU decomposition, interval Gauss elimination method, and Monte Carlo simulation verifies its effectiveness.} } @article{din-rid-dor-mcg-rog-ciz-gol-05-aa-face, author = {Dinges, David F. and Rider, Robert L. and Dorrian, Jillian and McGlinchey, Eleanor L. and Rogers, Naomi L. and Cizman, Ziga and Goldenstein, Siome K. and Vogler, Christian and Venkataraman, Sundara and Metaxas, Dimitris N.}, title = {Optical Computer Recognition of Facial Expressions Associated with Stress Induced by Performance Demands}, journal = {Aviation, Space, and Environmental Medicine},, Volume , Supplement 1 year = 2005, volume = {76}, number = {1}, pages = {B172-B182}, month = jun, note = {No DOI?}, comment = {Uses AA to fit face models to images.}, abstract = {Application of computer vision to track changes in human facial expressions during long-duration spaceflight may be a useful way to unobtrusively detect the presence of stress during critical operations. To develop such an approach, we applied optical computer recognition (OCR) algorithms for detecting facial changes during performance while people experienced both low- and high-stressor performance demands. Workload and social feedback were used to vary performance stress in 60 healthy adults (29 men, 31 women; mean age 30 yr). High-stressor scenarios involved more difficult performance tasks, negative social feedback, and greater time pressure relative to low workload scenarios. Stress reactions were tracked using self-report ratings, salivary cortisol, and heart rate. Subjects also completed personality, mood, and alexithymia questionnaires. To bootstrap development of the OCR algorithm, we had a human observer, blind to stressor condition, identify the expressive elements of the face of people undergoing high- vs. low-stressor performance. Different sets of videos of subjects’ faces during performance conditions were used for OCR algorithm training. Subjective ratings of stress, task difficulty, effort required, frustration, and negative mood were significantly increased during high-stressor performance bouts relative to low-stressor bouts (all p < 0.01). The OCR algorithm was refined to provide robust 3-d tracking of facial expressions during head movement. Movements of eyebrows and asymmetries in the mouth were extracted. These parameters are being used in a Hidden Markov model to identify high- and low-stressor conditions. Preliminary results suggest that an OCR algorithm using mouth and eyebrow regions has the potential to discriminate high- from low-stressor performance bouts in 75–88% of subjects. The validity of the workload paradigm to induce differential levels of stress in facial expressions was established. The paradigm also provided the basic stress-related facial expressions required to establish a prototypical OCR algorithm to detect such changes. Efforts are underway to further improve the OCR algorithm by adding facial touching and automating application of the deformable masks and OCR algorithms to video footage of the moving faces as a prelude to blind validation of the automated approach.} } @article{doi-hor-nak-kim-06-aa-bitlen, author = {Doi, Noburo and Horiyama, Takashi and Nakanishi, Masaki and Kimura, Shinji}, title = {Bit-Length Optimization Method for High-Level Synthesis based on Non-Linear Programming Technique}, journal = {IEICE TRANSACTIONS on Fundamentals of Electronics, Communications and Computer Sciences}, year = 2006, volume = {E89-A}, number = {12}, pages = {3427-3434}, month = 12, doi = {10.1093/ietfec/e89-a.12.3427}, comment = {Considers conversion of a C program with float variables to a digital circuit with fixed-point variables. Uses AA to propagate errors.}, abstract = {High-level synthesis is a novel method to generate a RT-level hardware description automatically from a high-level language such as C, and is used at recent digital circuit design. Floating-point to fixed-point conversion with bit-length optimization is one of the key issues for the area and speed optimization in high-level synthesis. However, the conversion task is a rather tedious work for designers. This paper introduces automatic bit-length optimization method on floating-point to fixed-point conversion for high-level synthesis. The method estimates computational errors statistically, and formalizes an optimization problem as a non-linear problem. The application of NLP technique improves the balancing between computational accuracy and total hardware cost. Various constraints such as unit sharing, maximum bit-length of function units can be modeled easily, too. Experimental result shows that our method is fast compared with typical one, and reduces the hardware area.} } @article{dol-abd-moh-22-aa-caes, author = {Dolatabadi, Amirhossein and Abdeltawab, Hussein Hassan and Mohamed, Yasser Abdel-Rady}, title = {Deep Reinforcement Learning-Based Self-scheduling Strategy for a {CAES}-{PV} System Using Accurate Sky Images-based Forecasting}, journal = {IEEE Transactions on Power Systems}, year = 2022, volume = {Early access}, pages = {11}, doi = {doi: 10.1109/TPWRS.2022.3177704}, note = {No month, volume, issue, pages assigned yet.}, comment = {Optimization of compressed-air energy storage. Mentions AA but it is not clear whether it uses it.}, abstract = {Compressed air energy storage (CAES) is a scalable and clean energy storage technology with great potential in renewables accommodation. From the point of view of the facility owner participating in the energy market, the profit of a CAES-PV system's coordinated operation is still at a notable risk. This paper addresses this problem by using a novel model-free deep reinforcement learning (DRL) method to optimize the CAES energy arbitrage in the presence of a sky images-based solar irradiance forecasting model. To overcome the risk associated with the highly intermittent solar power productions, and thus efficient participation in an electricity market, a hybrid forecasting model based on 2-D convolutional neural networks (CNNs) and bidirectional long short-term memory (BLSTM) units is developed to capture high levels of abstractions in solar irradiance data, especially during cloudy days. Moreover, the thermodynamic characteristics of the CAES facility is considered to achieve more realistic real-time scheduling results. The comparative results based on a realistic-based case study demonstrate the effectiveness and applicability of the proposed framework compared to the state-of-the-art methods in the recent literature.} } @inproceedings{don-wan-yul-20-aa-sunvolt, author = {Dong, Yichao and Wang, Shouxiang and Yu, Lu}, title = {Voltage Sensitivity Analysis Based {PV} Hosting Capacity Evaluation Considering Uncertainties}, booktitle = {Proceedings of the General Meeting of the IEEE Power {\&} Energy Society (PES GM)}, location = {Virtual meeting}, pages = {1-5}, month = aug, doi = {10.1109/PESGM41954.2020.9281896}, comment = {Uses AA and IA to analyze votage sensitivity of solar energy systems.}, abstract = {The increasing penetration of photovoltaic (PV) systems in distribution network (DN) currently makes the PV hosting capacity (PVHC) evaluation a research hotspot. To this end, this paper proposes an improved PVHC evaluation method based on the analyses of affine-form voltage sensitivity and interval-form overvoltage risk. This method can not only reduce the computational burden by voltage sensitivity analysis, but also quantify the overvoltage risk caused by the uncertainty of PV configuration. Meanwhile, the application of interval arithmetic (IA) and affine arithmetic (AA) can overcome the shortcomings of probabilistic approaches in dealing with uncertainties. The IEEE 33-bus test feeder is used to verify its reasonability and the value in decision-making of PV planning for utilities.} } @inproceedings{dub-mye-sok-syr-yak-17-aa-mixsig, author = {Dubikhin, Vladimir and Myers, Chris and Sokolov, Danil and Syranidis, Ioannis and Yakovlev, Alex}, title = {Advances in Formal Methods for the Design of Analog/Mixed-Signal Systems}, booktitle = {Proceedings of the 54th ACM/EDAC/IEEE Design Automation Conference (DAC)}, location = {Austin, US}, pages = {1-6}, year = 2017, month = jun, doi = {10.1145/3061639.3072945}, note = {Invited paper}, comment = {Survey paper. Mentions AA.}, abstract = {Analog/mixed-signal (AMS) systems are rapidly expanding in all domains of information and communication technology. They are a critical part of the support for large-scale high-performance digital systems, provide important functionalities in medium-scale embedded and mobile systems, and act as a core organ of autonomous electronics such as sensor nodes. Analog and digital parts are closely inter-mixed, hence demanding AMS design methods and tools to be more holistic. In particular, the emergence of ``little digital'' electronics inside or near analog circuitry calls for the increasing use of asynchronous logic. To cope with the growing complexity of AMS designs, formal methods are required to complement traditional simulation approaches. This paper presents an overview of the state-of-the-art in AMS formal verification and asynchronous design that enables the development of analog/asynchronous co-design methods. One such co-design methodology is exemplified by the LEMA-Workcraft workflow currently under development by the authors.} } @phdthesis{dub-19-aa-mixsig-th, author = {Dubikhin, Vladimir} title = {Synthesis and Verification of Mixed-Signal Systems with Asynchronous Control}, year = 2019, school = {Newcastle University}, number = {NCL-EEE-MICRO-TR-2020-217}, pages = {145}, month = nov, note = {Advisor: Alex Yakovlev. No DOI?}, url = {http://async.org.uk/tech-reports/NCL-EEE-MICRO-TR-2020-217.pdf}, comment = {Mentions AA only briefly}, abstract = {Analog/mixed signal (AMS) systems are widely used in electronic devices, such as mobile phones, autonomous sensors, and radio transmitters. The traditional design flows are based on synchronous circuits, which simplify the design process but raise a number of limitations in certain applications. For example, in order to react promptly to the changes in an analog environment the control module needs to have a high clocking frequency. This in return leads to higher power consumption and wasted clock cycles, when no changes occur in the environment. Asynchronous circuits do not have this disadvantage as they react to input events at the rate they occur. However, with design automation being a huge concern asynchronous circuits are not widely used by industry. Another problem related to the AMS system design is the reliance on simulation as the verification method. A simulation trace shows only one possible behavior of the system, as a result simulation based verification largely depends on quantity and diversity of tests. Formal methods, such as the reachability analysis, aim to address this problem. However, a lot of the proposed methodologies are disruptive to the existing design flows and require engineers to manually construct abstract models for their systems. The main goal of this work is to introduce the novel automated workflow, which enables formal verification of AMS systems with asynchronous control that has been optimized with correct timing assumptions extracted from the full-system model. One of the key features of the proposed design flow is the ability to reuse existing simulation traces to generate abstract models, used for system validation. To overcome a number of flaws in the existing model generator a new version, utilizing data clusterization and process mining techniques, is created as a stand-alone framework in Java. The new model generator is designed to construct more general models that produce correct behavior, when used with a different control module} } @inproceedings{duj-tia-wuz-lia-abb-sun-20-aa-dflow, author = {Du, Jinqiao and Tian, Jie and Wu, Zhi and Li, Ao and Abbas, Ghulam and Sun, Qirun}, title = {An Interval Power Flow Method Based on Linearized {DistFlow} Equations for Radial Distribution Systems}, booktitle = {Proceedings of the IEEE PES Asia-Pacific Power and Energy Engineering Conference (APPEEC)}, location = {Nanging, CN}, pages = {1-6}, year = 2020, month = sep, doi = {10.1109/APPEEC48164.2020.9220333}, comment = {Computation of radial-type power distribution systems. Mentions AA; not clear whether uses it.}, abstract = {With consideration of the radial structure of most distribution systems, this paper proposes an interval power flow method based on linearized Distflow equations. In the proposed method, the calculation of interval power flow solution has been modeled as an optimization problem, and the uncertainties derived from the randomness of distributed renewable resources as well as electric vehicle loads are properly involved. The proposed method is more high-efficiency than the existing methods since its linear property, and the 33-bus distribution system as well as a practical urban distribution system is employed to verify its effectiveness.} } @article{dup-yan-cao-zha-yan-20-aa-acdc, author = {Du, Pingjing and Yang, Ming and Cao, Liangjing and Zhai, Hefeng and Yang, Jiajun}, title = {Affine Power Flow Algorithm for {AC}/{DC} Systems with Voltage Source Converter}, journal = {Transactions of China Electrotechnical Society}, year = 2020, volume = {35}, number = {5}, pages = {1106-1117}, month = mar, doi = {10.19595/j.cnki.1000-6753.tces.190351 }, url = {http://dgjsxb.ces-transaction.com/CN/article/downloadArticleFile.do?attachType=PDF&id=6382}, note = {In Chinese.}, comment = {Uses AA to evaluate power flow in an AC/DC grid with uncertain supply.}, abstract = {In this paper, an affine arithmetic based AC/DC system power flow algorithm is proposed to provide a conservative estimation result when the injection power of the system is uncertain. The impacts of each uncertain variable on the system state variables can also be calculated. Firstly, the affine calculation method of the trigonometric function was proposed. And then the uncertain power flow model of the AC/DC system with voltage source converter (VSC) based on affine arithmetic was established. The VSC nodes were equivalent to different types of AC nodes and DC nodes respectively to correspond the solutions of AC and DC power flow models. Furthermore, the fast-decoupled method was used to solve the sequential AC/DC affine power flow. Finally, the effectiveness and rapidity of the proposed algorithm were verified by case studies. And the impacts of the uncertain variables on the system state variables of AC/DC systems were also be given.} } @inproceedings{dup-yan-yan-zho-19-aa-acdc, author = {Du, Pingjing and Yang, Ming and Yang, Jiajun and Zhou, Yu}, title = {Uncertain Power Flow Algorithm for Hybrid {AC}/{DC} Grids Incorporating {VSCs}}, year = 2019, booktitle = {Proceedings of the 3rd IEEE International Electrical and Energy Conference (CIEEC)}, location = {Beijing, CN}, pages = {800-805}, month = sep, doi = {10.1109/CIEEC47146.2019.CIEEC-2019308}, note = {}, comment = {Uses AA to compute ``sequential AC/DC power flow''.}, abstract = {The AC/DC hybrid system enhances the ability of the power system to accept renewable energy sources. At the same time, the uncertainty of renewable energy output presents new problems in AC/DC hybrid systems, especially in converter buses. In this paper, affine arithmetic based sequential AC/DC power flow method is established. It can provide less conservative affine estimation results when the system contains uncertain injection power. Furthermore, affine numbers can preserve the correlation between the uncertainties, so the quantitative analysis of the influence of the uncertainty can be carried out by the index of the impact of the uncertain variable on the system state variables. In the affine arithmetic based sequential AC/DC power flow method, the VSC bus is equivalent to the type of the AC bus and the DC bus, and corresponds to the pure AC and pure DC power flow model. The fastness and accuracy of the algorithm are tested by an AC/DC grid incorporating VSCs. At the same time, the impacts of the uncertain variable on the system state variables of the test system is analyzed.} } @inproceedings{dur-far-kon-tah-14-aa-funcia, author = {Duracz, Jan and Farjudian, Amin and Kone{\v{c}}n{\'y}, Michal and Taha, Walid}, title = {Function Interval Arithmetic}, booktitle = {Proceedings of the4th International Conference on Mathematical Software (ICMS)}, location = {Seoul, KR}, isbn = {978-3-662-44199-2}, series = {Lecture Notes in Computer Science}, volume = {8592}, pages = {677–684}, year = 2014, month = aug, doi = {10.1007/978-3-662-44199-2_101}, comment = {Generalization of IA where the bounds can be arbitrary functions, e.g. polynomials. Claims that it is a generalization of AA, and is more precise. But does not seem to record correlation between variables, and may have problems with multiplication when the functions cross zero. Or with non-linear operations in general, e. g. $\max$ or $\sqrt{}$.}, abstract = {We propose an arithmetic of function intervals as a basis for convenient rigorous numerical computation. Function intervals can be used as mathematical objects in their own right or as enclosures of functions over the reals. We present two areas of application of function interval arithmetic and associated software that implements the arithmetic: (1) Validated ordinary differential equation solving using the AERN library and within the Acumen hybrid system modeling tool. (2) Numerical theorem proving using the PolyPaver prover.} } @inproceedings{dyl-08-aa-hiermod, author = {Dyllong, Eva}, title = {A Note on Some Applications of Interval Arithmetic in Hierarchical Solid Modeling}, booktitle = {Proceedings of the Dagstuhl Seminar on Numerical Validation in Current Hardware Architectures}, series = {Dagstuhl Seminar Proceedings (DagSemProc)}, volume = {8021}, year = 2008, location = {Dagstuhl, DE}, pages = {1--4}, month = apr, doi = {10.4230/DagSemProc.08021.5}, url = {https://drops.dagstuhl.de/opus/volltexte/2008/1440/}, comment = {Mentions AA, but uses only IA to build an octree for the surface $S$ of a CSG solid model. Nodes whose intersection with $S$ comes from a single primitive are not subdivided further. Nodes whose intersection with $S$ is more complex are subdivided further as needed. Claims that the idea of simplifying the CSG operation tree in each box is due to Duff.}, abstract = {Techniques of reliable computing like interval arithmetic can be used to guarantee a reliable solution even in the presence of numerical round-off errors. The need to trace bounds for the error function separately can be eliminated using these techniques. In this talk, we focus on some demonstrations how the techniques and algorithms of reliable computing can be applied to the construction and further processing of hierarchical solid representations using the octree model as an example. An octree is a common hierarchical data structure to represent 3D geometrical objects in solid modeling systems or to reconstruct a real scene. The solid representation is based on recursive cell decompositions of the space. Unfortunately, the data structure may require a large amount of memory when it uses a set of very small cubic nodes to approximate a solid. In this talk, we present a novel generalization of the octree model created from a CSG object that uses interval arithmetic and allows us to extend the tests for classifying points in space as inside, on the boundary or outside the object to handle whole sections of the space at once. Tree nodes with additional information about relevant parts of the CSG object are introduced in order to reduce the depth of the required subdivision. Furthermore, this talk is concerned with interval-based algorithms for reliable proximity queries between the extended octrees and with further processing of the structure. We conclude the talk with some examples of implementations.}, url = {{\url{}}}, quotes = {... The utilization of affine arithmetic and other improved techniques of reliable computing in the field of modeling can be found in [3,4]. For moving multibody models a continuous collision ...} } @article{dyl-kie-12-aa-impldist, author = {Dyllong, Eva and Kiel, Stefan}, title = {A Comparison of Verified Distance Computation between Implicit Objects using Different Arithmetics for Range Enclosure}, journal = {Computing}, volume = {94}, pages = {281–296}, year = 2012, month = nov, doi = {10.1007/s00607-011-0161-0}, comment = {Uses validated computing to build axis-aligned box enclosures for implicit surfaces and estimates distances between surfaces by the distances between the boxes, without regard for the box contents. Evaluates various validated computing methods, includin IA, AA, centered forms, Taylor models, for their efficiency at eliminate empty boxes. But with AA we can build slab enclosures.}, abstract = {This paper describes a new algorithm for computing verified bounds on the distance between two arbitrary fat implicit objects. The algorithm dissects the objects into axis-aligned boxes by constructing an adaptive hierarchical decomposition during runtime. Actual distance computation is performed on the cubes independently of the original object’s complexity. As the whole decomposition process and the distance computation are carried out using verified techniques like interval arithmetic, the calculated bounds are rigorous. In the second part of the paper, we test our algorithm using 18 different test cases, split up into 5 groups. Each group represents a different level of complexity, ranging from simple surfaces like the sphere to more complex surfaces like the Kleins bottle. The algorithm is independent of the actual technique for range bounding, which allows us to compare different verified arithmetics. Using our newly developed uniform framework for verified computations, we perform tests with interval arithmetic, centered forms, affine arithmetic and Taylor models. Finally, we compare them based on the time needed for deriving verified bounds with a user defined accuracy.} } @article{dze-15-aa-valode, author = {Dzetkuli{\v{c}}, Tom{\'a}{\v{s}}}, title = {Rigorous Integration of Non-Linear Ordinary Differential Equations in {Chebyshev} Basis}, journal = {Numerical Algorithms}, volume = {69}, pages = {183–205}, year = 2015, month = jul, doi = {10.1007/s11075-014-9889-x}, comment = {Uses ``multi-variable function enclosures in the form of coefficients of the truncated Chebyshev series and the remainder term stored as an interval.'' Says that his approach is similar to AA, but then says that AA does NOT handle approximation errors like other noise errors, which is false.}, abstract = {In this paper, we introduce a new approach to multiple step verified integration of non-linear ordinary differential equations. The approach is based on the technique of a Taylor model integration, however, a novel method is introduced to suppress the wrapping effect over several integration steps. This method is simpler and more robust compared to the known methods. It allows more general inputs, while it does not require rigorous matrix inversion. Moreover, our integration algorithm allows the use of various types of underlying function enclosures. We present rigorous arithmetic operations with function enclosures based on the truncated Chebyshev series. Computational experiments are used to show the wrapping effect suppression of our method and to compare integration algorithm that uses Chebyshev function enclosures with the existing algorithms that use function enclosures based on the truncated Taylor series (Taylor models).} }