From: Ben Lynn Date: Thu, 18 Sep 2008 23:26:32 +0000 (-0700) Subject: Cosmetic changes. X-Git-Url: https://repo.or.cz/w/frac.git/commitdiff_plain/3d07acbd295d326231d4cf649b8248f39fc9f5ec Cosmetic changes. --- diff --git a/Makefile b/Makefile index d8ad84f..d363ac5 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ CF_OBJS:=cf.o mobius.o famous.o bihom.o taylor.o test.o newton.o tee.o TESTS:=bihom_test cf_test famous_test mobius_test newton_test tee_test -BINS:=pi epow hakmem +BINS:=pi hakmem target : $(BINS) diff --git a/README b/README dissimilarity index 61% index bc0a2d6..4e0aa36 100644 --- a/README +++ b/README @@ -1,145 +1,136 @@ -= Continued Fraction Library = -Ben Lynn, September 2008 - -Frac is a C library for computing with continued fractions. - -== Continued Fractions == - -1/3 has no finite floating point representation. How can a standard tool for -working with real numbers be unable to handle a number a young child could -understand? - -Enter continued fractions. Finite continued fractions uniquely (if you forbid -the last term from being 1) and exactly represent rationals. Contrast this -with 1/3 = 0.333... and 1 = 0.999... They live in a realm free from the -tyranny of base 2. Commonly encountered irrational numbers, both algebraic and -transcendental, are readily represented with infinite continued fractions, and -when we truncate them we have an approximation along with an error bound. In -contrast, floating point numbers must be accompanied at all times with their -error bounds. - -Repeated digits in decimal representation occur for fractions, but why waste -such an interesting phenomenon on something as mundane as a rational? With -continued fractions, repeated terms occur only for square roots. Other infinite -but patterned sequences represent other celebrated numbers, such as e and pi. - -Let's say we use floating-point to compute some answer to 100 decimal places. -What if we now want 200 decimal places? Even if we possess the first 100 -digits, and carefully preserved the program state, we must start from scratch -and redo all our arithmetic with 200 digit precision. - -If only we had used continued fractions, for then we could arbitrarily -increase the precision of our answer without redoing any work. Furthermore, -the error analysis becomes trivial. - -We must pay a price for the advantages of continued fractions. We -have a representation from which we can instantly infer error bounds. A -representation for which we may arbitrarily increase precision without redoing -any work. A representation which in some sense is the closest we can hope to -get to an exact number. Is it any wonder therefore that binary operations, -whose output must conform to this wondrous representation, are clumsy and -convoluted, and that deriving continued fraction expansions for many -reals is time-consuming? - -These drawbacks are almost certainly why these fascinating creatures remain -obscure. Yet surely there are some problems that scream for continued -fractions? How about a screen saver that computes more and more digits of pi, -each time picking up where it left off? Or imagine a real-time application -where all continued fractions simply output the last computed convergent on a -timer interrupt. Under heavy system load, approximations are coarser but the -show goes on. - -== Installation == - -I use the GMP library for multi-precision arithmetic. Type - - $ make - -to compile. Try "pi 2000" to generate 2000 digits of pi using a continued -fraction. It takes longer than I had hoped, but it's faster than - - $ echo "scale = 2000; 4*a(1)" | bc -l - -There's no API documentation; see the source. - -== Design == - -Threads are the future, if not already the present. Once, single core CPUs grew -faster at an astounding pace. But limits are being approached, and engineers -are turning to multicore designs to surmount technical obstacles. Multicore -systems are already commonplace, and as time passes, the number of cores per -system will steadily march upward. Happily, this suits continued fractions. - -Inspired by Doug McIlroy's "Squinting at Power Series" (search for it), we -spawn at least one thread per continued fraction, and also per operation on -continued fractions. The threads communicate via crude demand channels, each -providing a few output terms upon each request. - -This scheme allows us to quickly write code for generating continued -fraction expansions or operating on them. For example, consider the code for -generating 'e' = 2.71828... - -.............................................................................. -// e = [2; 1, 2, 1, 1, 4, 1, ...] -static void *e_expansion(cf_t cf) { - int even = 2; - cf_put_int(cf, even); - - while(cf_wait(cf)) { - cf_put_int(cf, 1); - cf_put_int(cf, even); - even += 2; - cf_put_int(cf, 1); - } - - return NULL; -} -.............................................................................. - -The function +cf_put+ places a term on the output channel, and +cf_wait+ is our -implementation of demand channels, a sort of cooperative multitasking. Without -it, not only would we have to destroy and clean up after the thread ourselves, -but more seriously, the thread might consume vast amounts of resources on -unwanted terms. The +cf_wait+ functions instructs the thread to stay idle. -Our threads call this function often, and if it returns zero, our threads -clean themselves up and exit. - -== Bugs and other issues == - -I intend to devote time to projects that operate on real numbers rather than -study real numbers for its own sake. But perhaps then I'll find a perfect -application for continued fractions, which will push me to fix deficiencies -in this code. - -- The API could use cosmetic surgery. - -- I want to replace the condition variable with a semaphore to silence Helgrind - (whose documentation states that condition variables almost unconditionally - and invariably raise false alarms). - -- The Makefile is fragile and annoying to maintain. - -- The testing infrastructure needs upgrading. - -- I'd like a split function as described by McIlroy, though for continued - fractions a "tee"-like function is more apt: rather than spawning a thread - for each term held back, we spawn and watch over two threads on invocation to - service the two demand channels, storing all backlogged terms in one place. - -=== Disclaimer === - -Let me put my salesmanship on hold, and temper my anti-floating-point rhetoric. -Increasing the precision of floating-point operations without redoing work is -in fact possible for many common cases. For example, Newton's method is -self-correcting, meaning we may use low precision at first, and increase it for -future iterations. Even pi enjoys such methods: there is a formula computing -any hexadecimal digit of pi without computing any previous digits, though it -requires about the same amount of work. - -Also, Newton's method, and other floating-point algorithms converge -quadratically or faster, thus a good implementation will asymptotically -outperform continued fractions as these converge only linearly. - -However, I'd prefer continued fractions to Newton's method when writing code -to compute digits of, say, sqrt(2). Precision decisions are made automatically: -one simply needs enough bits to hold the last computed convergents. += Continued Fraction Library = +Ben Lynn, September 2008 + +Frac is a C library for computing with continued fractions. + +== Continued Fractions == + +The binary representation of 1/3 is infinite. Floating-point numbers, our +standard tool for working with reals, cannot even handle numbers a young child +could understand. + +In contrast, every rational can be uniquely and exactly represented by a finite +continued fraction (provided we forbid those with last term 1). Continued +fractions are immune from oddities such as 1/3 = 0.333... and 1 = 0.999..., +living in a realm free from the tyranny of binary, or any other base. + +Every irrational number can also be uniquely expressed as an infinite continued +fraction. Many frequently encountered algebraic and transcendental numbers have +easily computable continued fraction expansions which can be truncated to yield +approximations with built-in error bounds, unlike floating-point +approximations, which must be accompanied at all times with their error bounds. + +Furthermore, suppose we use floating-point to compute some answer to 100 +decimal places. What if we now want 200 decimal places? Even if we possess the +first 100 digits and carefully preserve the program state, we must start from +scratch and redo all our arithmetic with 200 digit precision. If we had used +continued fractions, we could arbitrarily increase the precision of our answer +without redoing any work, and without any tedious error analysis. + +But we must pay a price for these advantages. We have a representation from +which we can instantly infer error bounds. A representation for which we may +arbitrarily increase precision without repeating any calculations. A +representation which in some sense is the closest we can hope to get to an +exact number using rationals. Is it any wonder therefore that binary operations +on continued fractions, whose output must also uphold these high standards, are +clumsy and convoluted? + +These drawbacks are almost certainly why these fascinating creatures remain +obscure. Yet surely there are some problems that scream for continued +fractions. How about a screen saver that computes more and more digits of pi, +each time picking up where it left off? Or imagine a real-time application +where all continued fractions simply output the last computed convergent on a +timer interrupt. Under heavy system load, approximations are coarser but the +show goes on. + +== Installation == + +The GMP library is required. Type: + + $ make + +to compile. Try "pi 2000" to generate 2000 digits of pi using a continued +fraction. It takes longer than I had hoped, but it's faster than + + $ echo "scale = 2000; 4*a(1)" | bc -l + +There's no API documentation; see the source. + +== Design == + +Threads are the future, if not already the present. Multicore systems are +already commonplace, and as time passes, the number of cores per system will +steadily march upward. Happily, this suits continued fractions. + +Inspired by Doug McIlroy's "Squinting at Power Series" (search for it), we +spawn at least one thread per continued fraction, and also per operation on +continued fractions. The threads communicate via crude demand channels, each +providing a few output terms upon each request. + +This scheme allows us to quickly write code for generating continued fraction +expansions or operating on them. For example, consider the code for generating +'e' = 2.71828... + +.............................................................................. +// e = [2; 1, 2, 1, 1, 4, 1, ...] +static void *e_expansion(cf_t cf) { + int even = 2; + cf_put_int(cf, even); + + while(cf_wait(cf)) { + cf_put_int(cf, 1); + cf_put_int(cf, even); + even += 2; + cf_put_int(cf, 1); + } + + return NULL; +} +.............................................................................. + +The function +cf_put+ places a term on the output channel, and +cf_wait+ is our +implementation of demand channels, a sort of cooperative multitasking. Without +it, not only would we have to destroy and clean up after the thread ourselves, +but more seriously, the thread might consume vast amounts of resources +computing unwanted terms. The +cf_wait+ functions instructs the thread to stay +idle. Our threads call this function often, and if it returns zero, our threads +clean themselves up and exit. + +== Bugs and other issues == + +I intend to devote time to projects that operate on real numbers rather than +study real numbers for its own sake. But perhaps then I'll find a perfect +application for continued fractions, which will push me to fix deficiencies in +this code: + +- The API could use cosmetic surgery. + +- I want to replace the condition variable with a semaphore to silence Helgrind + (whose documentation states that condition variables almost unconditionally + and invariably raise false alarms). + +- The Makefile is fragile and annoying to maintain and use. + +- The testing infrastructure needs upgrading. + +- Much more, e.g. see TODOs sprinkled throughout the code. + +=== Disclaimer === + +Lest my salesmanship backfire, let me temper my anti-floating-point rhetoric. +Increasing the precision of floating-point operations without redoing work is +in fact possible for many common cases. For example, Newton's method is +self-correcting, meaning we may use low precision at first, and increase it for +future iterations. Even pi enjoys such methods: there exists a formula +revealing any hexadecimal digit of pi without computing any previous digits, +though it requires about the same amount of work. + +Moreover, several floating-point algorithms converge quadratically or faster, +thus good implementations will asymptotically outperform continued fractions +as these converge only linearly. + +Nonetheless, for lower accuracies, the smaller overhead may give continued +fractions the edge in certain problems such as finding the square root of 2. +Additionally, precision decisions are automatic, for example, one simply needs +enough bits to hold the last computed convergents. Built-in error analysis +simplifies and hence accelerates development. diff --git a/epow.c b/epow.c deleted file mode 100644 index 4dde0cb..0000000 --- a/epow.c +++ /dev/null @@ -1,42 +0,0 @@ -#include -#include -#include -#include -#include "cf.h" - -int main() { - mpz_t z; - mpz_init(z); - cf_t x, conv; - - mpz_set_si(z, 3); - x = cf_new_tanh(z); - - printf("tanh 3 = "); - conv = cf_new_cf_to_decimal(x); - for (int i = 0; i <= 5000; i++) { - cf_get(z, conv); - gmp_printf("%Zd", z); - if (!(i % 5)) putchar(' '); - if (!(i % 50)) putchar('\n'); - } - cf_free(conv); - cf_free(x); - - mpz_set_ui(z, 2); - x = cf_new_epow(z); - - printf("e^2 = "); - conv = cf_new_cf_to_decimal(x); - for (int i = 0; i <= 5000; i++) { - cf_get(z, conv); - gmp_printf("%Zd", z); - if (!(i % 5)) putchar(' '); - if (!(i % 50)) putchar('\n'); - } - cf_free(conv); - cf_free(x); - - mpz_clear(z); - return 0; -} diff --git a/hakmem.c b/hakmem.c index 3dc6d97..0ec8c7e 100644 --- a/hakmem.c +++ b/hakmem.c @@ -1,11 +1,13 @@ // Compute the HAKMEM constant. // (sqrt(3/pi^2 + e)/(tanh(sqrt(5))-sin(69)) -// which is +// +// Confirm with: // $ echo "pi=4*a(1) // x=2*sqrt(5) // (sqrt(3/pi^2+e(1)))/((e(x)-1)/(e(x)+1)-s(69))" | bc -l -// Almost exclusively uses techniques described by Gosper. -// Differences: +// +// Almost exclusively uses techniques described by Gosper. Differences: +// // - Taylor series for cos(1) to find its continued fraction expansion. // - Binary search instead of Newton's method when finding integer part of // solution of quadratic. @@ -30,19 +32,24 @@ void cf_dump(cf_t cf, int n) { mpz_t z; mpz_init(z); cf_t dec = cf_new_cf_to_decimal(cf); - for (int i = 0; i < n; i++) { + int i; + for (i = 0; i <= n; i++) { cf_get(z, dec); gmp_printf("%Zd", z); if (!(i % 5)) putchar(' '); if (!(i % 50)) putchar('\n'); } - printf("\n"); + if (n % 50) putchar('\n'); cf_free(dec); mpz_clear(z); } -int main() { - int n = 500; +int main(int argc, char **argv) { + int n = 100; + if (argc > 1) { + n = atoi(argv[1]); + if (n <= 0) n = 100; + } cf_t c[7]; cf_t t[6][2]; mpz_t b[8];