move DF work to "top of list"
[CommonLispStat.git] / TODO.lisp
blob7b1af86fc30522874da3f79e628cf08f00c58412
1 ;;; -*- mode: lisp -*-
3 ;;; Time-stamp: <2009-03-23 08:15:54 tony>
4 ;;; Creation: <2008-09-08 08:06:30 tony>
5 ;;; File: TODO.lisp
6 ;;; Author: AJ Rossini <blindglobe@gmail.com>
7 ;;; Copyright: (c) 2007-2008, AJ Rossini <blindglobe@gmail.com>. BSD.
8 ;;; Purpose: Stuff that needs to be made working sits inside the progns...
10 ;;; What is this talk of 'release'? Klingons do not make software
11 ;;; 'releases'. Our software 'escapes', leaving a bloody trail of
12 ;;; designers and quality assurance people in its wake.
14 ;;; This file contains the current challenges to solve, including a
15 ;;; description of the setup and the work to solve....
17 ;;; SET UP
19 (in-package :cl-user)
20 ;;(asdf:oos 'asdf:compile-op 'lispstat)
21 ;;(asdf:oos 'asdf:load-op 'lispstat)
23 (in-package :lisp-stat-unittests)
25 ;; tests = 89, failures = 7, errors = 37
27 (describe (run-tests :suite 'lisp-stat-ut))
28 (run-tests :suite 'lisp-stat-ut)
31 ;; FIXME: Example: currently not relevant, yet
32 (describe
33 (lift::run-test
34 :test-case 'lisp-stat-unittests::create-proto
35 :suite 'lisp-stat-unittests::lisp-stat-ut-proto))
38 (describe 'lisp-stat-ut)
39 (in-package :ls-user)
42 (progn ;; dataframe
44 (defparameter *my-df-1*
45 (make-instance 'dataframe-array
46 :storage #2A((1 2 3 4 5)
47 (10 20 30 40 50))
48 :doc "This is an interesting dataframe-array"
49 :case-labels (list "x" "y")
50 :var-labels (list "a" "b" "c" "d" "e")))
53 (defparameter *my-df-2*
54 (make-instance 'dataframe-array
55 :storage
56 (make-array-from-listoflists
57 (cybertiggyr-dsv::load-escaped
58 "/media/disk/Desktop/sandbox/CLS.git/Data/example-mixed.csv"))
59 :doc "This is an interesting dataframe-array"))
60 #| :case-labels (list "x" "y")
61 :var-labels (list "a" "b" "c" "d" "e")
62 |#
67 (progn ;; Data setup
69 (describe 'make-matrix)
71 (defparameter *indep-vars-2-matrix*
72 (make-matrix (length iron) 2
73 :initial-contents
74 (mapcar #'(lambda (x y)
75 (list (coerce x 'double-float)
76 (coerce y 'double-float)))
77 iron aluminum)))
80 (defparameter *dep-var*
81 (make-vector (length absorbtion)
82 :type :row
83 :initial-contents
84 (list
85 (mapcar #'(lambda (x) (coerce x 'double-float))
86 absorbtion))))
88 (defparameter *dep-var-int*
89 (make-vector (length absorbtion)
90 :type :row
91 :element-type 'integer
92 :initial-contents (list absorbtion)))
95 (defparameter *xv+1a*
96 (make-matrix
97 8 2
98 :initial-contents #2A((1d0 1d0)
99 (1d0 3d0)
100 (1d0 2d0)
101 (1d0 4d0)
102 (1d0 3d0)
103 (1d0 5d0)
104 (1d0 4d0)
105 (1d0 6d0))))
107 (defparameter *xv+1b*
108 (bind2
109 (ones 8 1)
110 (make-matrix
112 :initial-contents '((1d0)
113 (3d0)
114 (2d0)
115 (4d0)
116 (3d0)
117 (5d0)
118 (4d0)
119 (6d0)))
120 :by :column))
122 (m= *xv+1a* *xv+1b*) ; => T
124 (princ "Data Set up"))
129 (progn
130 ;; REVIEW: general Lisp use guidance
132 (fdefinition 'make-matrix)
133 (documentation 'make-matrix 'function)
135 #| Examples from CLHS, a bit of guidance.
137 ;; This function assumes its callers have checked the types of the
138 ;; arguments, and authorizes the compiler to build in that assumption.
139 (defun discriminant (a b c)
140 (declare (number a b c))
141 "Compute the discriminant for a quadratic equation."
142 (- (* b b) (* 4 a c))) => DISCRIMINANT
143 (discriminant 1 2/3 -2) => 76/9
145 ;; This function assumes its callers have not checked the types of the
146 ;; arguments, and performs explicit type checks before making any assumptions.
147 (defun careful-discriminant (a b c)
148 "Compute the discriminant for a quadratic equation."
149 (check-type a number)
150 (check-type b number)
151 (check-type c number)
152 (locally (declare (number a b c))
153 (- (* b b) (* 4 a c)))) => CAREFUL-DISCRIMINANT
154 (careful-discriminant 1 2/3 -2) => 76/9
159 #+nil
160 (progn ;; FIXME: Regression modeling
162 ;; data setup in previous FIXME
163 (defparameter *m* nil
164 "holding variable.")
165 ;; need to make vectors and matrices from the lists...
167 ;; BROKEN
168 (def *m* (regression-model (list->vector-like iron)
169 (list->vector-like absorbtion)))
171 (def m (regression-model (list->vector-like iron)
172 (list->vector-like absorbtion) :print nil))
173 ;;Good
174 (send m :print)
175 (send m :own-slots)
176 (send m :own-methods)
177 ;; (lsos::ls-objects-methods m) ; bogus?
178 (send m :show)
180 (def m (regression-model (list->vector-like iron)
181 (list->vector-like absorbtion)))
183 (def m (regression-model (listoflists->matrix-like (list iron aluminum))
184 (list->vector-like absorbtion) :print nil))
187 (send m :compute)
188 (send m :sweep-matrix)
189 (format t "~%~A~%" (send m :sweep-matrix))
191 ;; need to get multiple-linear regression working (simple linear regr
192 ;; works)... to do this, we need to redo the whole numeric structure,
193 ;; I'm keeping these in as example of brokenness...
195 (send m :basis) ;; this should be positive?
196 (send m :coef-estimates) )
198 #+nil
199 (progn ;; FIXME: Need to clean up data examples, licenses, attributions, etc.
200 ;; The following breaks because we should use a package to hold
201 ;; configuration details, and this would be the only package outside
202 ;; of packages.lisp, as it holds the overall defsystem structure.
203 (load-data "iris.lsp") ;; (the above partially fixed).
204 (variables)
205 diabetes )
210 (progn ;; FIXME: read data from CSV file. To do.
213 ;; challenge is to ensure that we get mixed arrays when we want them,
214 ;; and single-type (simple) arrays in other cases.
217 (defparameter *csv-num*
218 (cybertiggyr-dsv::load-escaped
219 #p"/media/disk/Desktop/sandbox/CLS.git/Data/example-numeric.csv"
220 :field-separator #\,
221 :trace T))
223 (nth 0 (nth 0 *csv-num*))
225 (defparameter *csv-num*
226 (cybertiggyr-dsv::load-escaped
227 #p"/media/disk/Desktop/sandbox/CLS.git/Data/example-numeric2.dsv"
228 :field-separator #\:))
230 (nth 0 (nth 0 *csv-num*))
233 ;; The handling of these types should be compariable to what we do for
234 ;; matrices, but without the numerical processing. i.e. mref, bind2,
235 ;; make-dataframe, and the class structure should be similar.
237 ;; With numerical data, there should be a straightforward mapping from
238 ;; the data.frame to a matrix. With categorical data (including
239 ;; dense categories such as doc-strings, as well as sparse categories
240 ;; such as binary data), we need to include metadata about ordering,
241 ;; coding, and such. So the structures should probably consider
243 ;; Using the CSV file:
245 (defun parse-number (s)
246 (let* ((*read-eval* nil)
247 (n (read-from-string s)))
248 (if (numberp n) n)))
250 (parse-number "34")
251 (parse-number "34 ")
252 (parse-number " 34")
253 (parse-number " 34 ")
255 (+ (parse-number "3.4") 3)
256 (parse-number "3.4 ")
257 (parse-number " 3.4")
258 (+ (parse-number " 3.4 ") 3)
260 (parse-number "a")
262 ;; (coerce "2.3" 'number) => ERROR
263 ;; (coerce "2" 'float) => ERROR
265 (defparameter *csv-num*
266 (cybertiggyr-dsv::load-escaped
267 #p"/media/disk/Desktop/sandbox/CLS.git/Data/example-numeric.csv"
268 :field-separator #\,
269 :filter #'parse-number
270 :trace T))
272 (nth 0 (nth 0 *csv-num*))
274 (defparameter *csv-num*
275 (cybertiggyr-dsv::load-escaped
276 #p"/media/disk/Desktop/sandbox/CLS.git/Data/example-numeric2.dsv"
277 :field-separator #\:
278 :filter #'parse-number))
280 (nth 0 (nth 0 *csv-num*))
282 ;; now we've got the DSV code in the codebase, auto-loaded I hope:
283 cybertiggyr-dsv:*field-separator*
284 (defparameter *example-numeric.csv*
285 (cybertiggyr-dsv:load-escaped "Data/example-numeric.csv"
286 :field-separator #\,))
287 *example-numeric.csv*
289 ;; the following fails because we've got a bit of string conversion
290 ;; to do. 2 thoughts: #1 modify dsv package, but mucking with
291 ;; encapsulation. #2 add a coercion tool (better, but potentially
292 ;; inefficient).
293 #+nil(coerce (nth 3 (nth 3 *example-numeric.csv*)) 'double-float)
295 ;; cases, simple to not so
296 (defparameter *test-string1* "1.2")
297 (defparameter *test-string2* " 1.2")
298 (defparameter *test-string3* " 1.2 ")
302 #+nil
303 (progn ;; experiments with GSL and the Lisp interface.
304 (asdf:oos 'asdf:load-op 'gsll)
305 (asdf:oos 'asdf:load-op 'gsll-tests)
307 ;; the following should be equivalent
308 (setf *t1* (LIST 6.18d0 6.647777777777779d0 6.18d0))
309 (setf *t2* (MULTIPLE-VALUE-LIST
310 (LET ((VEC
311 (gsll:make-marray 'DOUBLE-FLOAT
312 :INITIAL-CONTENTS '(-3.21d0 1.0d0 12.8d0)))
313 (WEIGHTS
314 (gsll:MAKE-MARRAY 'DOUBLE-FLOAT
315 :INITIAL-CONTENTS '(3.0d0 1.0d0 2.0d0))))
316 (LET ((MEAN (gsll:MEAN VEC)))
317 (LIST (gsll:ABSOLUTE-DEVIATION VEC)
318 (gsll:WEIGHTED-ABSOLUTE-DEVIATION VEC WEIGHTS)
319 (gsll:ABSOLUTE-DEVIATION VEC MEAN))))))
320 (eql *t1* *t2*)
322 ;; from (gsll:examples 'gsll::numerical-integration) ...
323 (gsll:integration-qng gsll::one-sine 0.0d0 PI)
325 (gsll:defun-single axpb (x) (+ (* 2 x) 3)) ;; a<-2, b<-3
326 (gsll:integration-qng axpb 1d0 2d0)
328 (let ((a 2)
329 (b 3))
330 (defun-single axpb2 (x) (+ (* a x) b)))
331 (gsll:integration-qng axpb2 1d0 2d0)
333 ;; BAD
334 ;; (gsll:integration-qng
335 ;; (let ((a 2)
336 ;; (b 3))
337 ;; (defun-single axpb2 (x) (+ (* a x) b)))
338 ;; 1d0 2d0)
340 ;; right, but weird expansion...
341 (gsll:integration-qng
342 (let ((a 2)
343 (b 3))
344 (defun axpb2 (x) (+ (* a x) b))
345 (gsll:def-single-function axpb2)
346 axpb2)
347 1d0 2d0)
349 ;; Linear least squares
351 (gsll:gsl-lookup "gsl_linalg_LU_decomp") ; => gsll:lu-decomposition
352 (gsll:gsl-lookup "gsl_linalg_LU_solve") ; => gsll:lu-solve
357 #+nil
358 (progn ;; philosophy time
360 (setf my-model (model :name "ex1"
361 :data-slots (list w x y z)
362 :param-slots (list alpha beta gamma)
363 :math-form (regression-model :formula '(= w (+ (* beta x)
364 (* alpha y)
365 (* gamma z)
366 normal-error))
367 :centrality 'median ; 'mean
370 #| or:
371 #R"W ~ x+ y + z "
374 (setf my-dataset (statistical-table :table data-frame-contents
375 :metadata (list (:case-names (list ))
376 (:var-names (list ))
377 (:documentation "string of doc"))))
379 (setf my-analysis (analysis
380 :model my-model
381 :data my-dataset
382 :parameter-map (pairing (model-param-slots my-model)
383 (data-var-names my-dataset))))
385 ;; ontological implications -- the analysis is an abstract class of
386 ;; data, model, and mapping between the model and data. The fit is
387 ;; the instantiation of such. This provides a statistical object
388 ;; computation theory which can be realized as "executable
389 ;; statistics" or "computable statistics".
390 (setf my-analysis (analyze my-fit
391 :estimation-method 'linear-least-squares-regression))
393 ;; one of the tricks here is that one needs to provide the structure
394 ;; from which to consider estimation, and more importantly, the
395 ;; validity of the estimation.
398 (setf linear-least-squares-regression
399 (estimation-method-definition
400 :variable-defintions ((list
401 ;; from MachLearn: supervised,
402 ;; unsupervised
403 :data-response-vars list-drv ; nil if unsup
405 :param-vars list-pv
406 :data-predictor-vars list-dpv
407 ;; nil in this case. these
408 ;; describe "out-of-box" specs
409 :hyper-vars list-hv))
410 :form '(regression-additive-error
411 :central-form (linear-form drv pv dpv)
412 :error-form 'normal-error)
413 :resulting-decision '(point-estimation interval-estimation)
414 :philosophy 'frequentist
415 :documentation "use least squares to fit a linear regression
416 model to data."))
418 (defparameter *statistical-philosophies*
419 '(frequentist bayesian fiducial decision-analysis)
420 "can be combined to build decision-making approaches and
421 characterizations")
423 (defparameter *decisions*
424 '(estimation selection testing)
425 "possible results from a...")
426 ;; is this really true? One can embedded hypothesis testing within
427 ;; estimation, as the hypothesis estimated to select. And
428 ;; categorical/continuous rear their ugly heads, but not really in
429 ;; an essential way.
431 (defparameter *ontology-of-decision-procedures*
432 (list :decisions
433 (list :estimation
434 (list :point
435 (list :maximum-likelihood
436 :minimum-entropy
437 :least-squares
438 :method-of-moments)
439 :interval
440 (list :maximum-likelihood
442 :testing
443 (list :fisherian
444 :neyman-pearson
445 (list :traditional
446 :bioequivalence-inversion)
447 :selection
448 (list :ranking
449 :top-k-of-n-select))
450 :parametric
451 :partially-parametric))
452 "start of ontology"))
455 ;;;; LM
457 (progn
459 (defparameter *y*
460 (make-vector
462 :type :row
463 :initial-contents '((1d0 2d0 3d0 4d0 5d0 6d0 7d0 8d0))))
466 (defparameter *xv+1*
467 (make-matrix
469 :initial-contents '((1d0 1d0)
470 (1d0 3d0)
471 (1d0 2d0)
472 (1d0 4d0)
473 (1d0 3d0)
474 (1d0 5d0)
475 (1d0 4d0)
476 (1d0 6d0))))
479 ;; so something like (NOTE: matrices are transposed to begin with, hence the incongruety)
480 (defparameter *xtx-2* (m* (transpose *xv+1*) *xv+1*))
481 ;; #<LA-SIMPLE-MATRIX-DOUBLE 2 x 2
482 ;; 8.0d0 28.0d0
483 ;; 28.0d0 116.0d0>
485 (defparameter *xty-2* (m* (transpose *xv+1*) (transpose *y*)))
486 ;; #<LA-SIMPLE-VECTOR-DOUBLE (2 x 1)
487 ;; 36.0d0
488 ;; 150.0d0>
490 (defparameter *rcond-2* 0.000001)
491 (defparameter *betahat-2* (gelsy *xtx-2* *xty-2* *rcond-2*))
492 ;; *xtx-2* => "details of complete orthogonal factorization"
493 ;; according to man page:
494 ;; #<LA-SIMPLE-MATRIX-DOUBLE 2 x 2
495 ;; -119.33147112141039d0 -29.095426104883202d0
496 ;; 0.7873402682880205d0 -1.20672274167718d0>
498 ;; *xty-2* => output becomes solution:
499 ;; #<LA-SIMPLE-VECTOR-DOUBLE (2 x 1)
500 ;; -0.16666666666668312d0
501 ;; 1.333333333333337d0>
503 *betahat-2* ; which matches R, see below
505 (documentation 'gelsy 'function)
508 ;; (#<LA-SIMPLE-VECTOR-DOUBLE (2 x 1)
509 ;; -0.16666666666668312 1.333333333333337>
510 ;; 2)
512 ;; ## Test case in R:
513 ;; x <- c( 1.0, 3.0, 2.0, 4.0, 3.0, 5.0, 4.0, 6.0)
514 ;; y <- c( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0)
515 ;; lm(y~x)
516 ;; ## => Call: lm(formula = y ~ x)
518 ;; Coefficients: (Intercept) x
519 ;; -0.1667 1.3333
521 ;; summary(lm(y~x))
522 ;; ## =>
524 ;; Call:
525 ;; lm(formula = y ~ x)
527 ;; Residuals:
528 ;; Min 1Q Median 3Q Max
529 ;; -1.833e+00 -6.667e-01 -3.886e-16 6.667e-01 1.833e+00
531 ;; Coefficients:
532 ;; Estimate Std. Error t value Pr(>|t|)
533 ;; (Intercept) -0.1667 1.1587 -0.144 0.89034
534 ;; x 1.3333 0.3043 4.382 0.00466 **
535 ;; ---
536 ;; Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
538 ;; Residual standard error: 1.291 on 6 degrees of freedom
539 ;; Multiple R-squared: 0.7619, Adjusted R-squared: 0.7222
540 ;; F-statistic: 19.2 on 1 and 6 DF, p-value: 0.004659
544 ;; which suggests one might do (modulo ensuring correct
545 ;; orientations). When this is finalized, it should migrate to
546 ;; CLS.
550 (defparameter *n* 20) ; # rows = # obsns
551 (defparameter *p* 10) ; # cols = # vars
552 (defparameter *x-temp* (rand *n* *p*))
553 (defparameter *b-temp* (rand *p* 1))
554 (defparameter *y-temp* (m* *x-temp* *b-temp*))
555 ;; so Y=Xb + \eps
556 (defparameter *rcond* (* (coerce (expt 2 -52) 'double-float)
557 (max (nrows *x-temp*) (ncols *y-temp*))))
558 (defparameter *orig-x* (copy *x-temp*))
559 (defparameter *orig-b* (copy *b-temp*))
560 (defparameter *orig-y* (copy *y-temp*))
562 (defparameter *lm-result* (lm *x-temp* *y-temp*))
563 (princ (first *lm-result*))
564 (princ (second *lm-result*))
565 (princ (third *lm-result*))
566 (v= (third *lm-result*)
567 (v- (first (first *lm-result*))
568 (first (second *lm-result*))))
573 ;; Some issues exist in the LAPACK vs. LINPACK variants, hence R
574 ;; uses LINPACK primarily, rather than LAPACK. See comments in R
575 ;; source for issues.
578 ;; Goal is to start from X, Y and then realize that if
579 ;; Y = X \beta, then, i.e. 8x1 = 8xp px1 + 8x1
580 ;; XtX \hat\beta = Xt Y
581 ;; so that we can solve the equation W \beta = Z where W and Z
582 ;; are known, to estimate \beta.
584 ;; the above is known to be numerically instable -- some processing
585 ;; of X is preferred and should be done prior. And most of the
586 ;; transformation-based work does precisely that.
588 ;; recall: Var[Y] = E[(Y - E[Y])(Y-E[Y])t]
589 ;; = E[Y Yt] - 2 \mu \mut + \mu \mut
590 ;; = E[Y Yt] - \mu \mut
592 ;; Var Y = E[Y^2] - \mu^2
595 ;; For initial estimates of covariance of \hat\beta:
597 ;; \hat\beta = (Xt X)^-1 Xt Y
598 ;; with E[ \hat\beta ]
599 ;; = E[ (Xt X)^-1 Xt Y ]
600 ;; = E[(Xt X)^-1 Xt (X\beta)]
601 ;; = \beta
603 ;; So Var[\hat\beta] = ...
604 ;; (Xt X)
605 ;; and this gives SE(\beta_i) = (* (sqrt (mref Var i i)) adjustment)
608 ;; from docs:
610 (setf *temp-result*
611 (let ((*default-implementation* :foreign-array))
612 (let* ((m 10)
613 (n 10)
614 (a (rand m n))
615 (x (rand n 1))
616 (b (m* a x))
617 (rcond (* (coerce (expt 2 -52) 'double-float)
618 (max (nrows a) (ncols a))))
619 (orig-a (copy a))
620 (orig-b (copy b))
621 (orig-x (copy x)))
622 (list x (gelsy a b rcond))
623 ;; no applicable conversion?
624 ;; (m- (#<FA-SIMPLE-VECTOR-DOUBLE (10 x 1))
625 ;; (#<FA-SIMPLE-VECTOR-DOUBLE (10 x 1)) )
626 (v- x (first (gelsy a b rcond))))))
629 (princ *temp-result*)
631 (setf *temp-result*
632 (let ((*default-implementation* :lisp-array))
633 (let* ((m 10)
634 (n 10)
635 (a (rand m n))
636 (x (rand n 1))
637 (b (m* a x))
638 (rcond (* (coerce (expt 2 -52) 'double-float)
639 (max (nrows a) (ncols a))))
640 (orig-a (copy a))
641 (orig-b (copy b))
642 (orig-x (copy x)))
643 (list x (gelsy a b rcond))
644 (m- x (first (gelsy a b rcond)))
646 (princ *temp-result*)
649 (defparameter *xv*
650 (make-vector
652 :type :row ;; default, not usually needed!
653 :initial-contents '((1d0 3d0 2d0 4d0 3d0 5d0 4d0 6d0))))
655 (defparameter *y*
656 (make-vector
658 :type :row
659 :initial-contents '((1d0 2d0 3d0 4d0 5d0 6d0 7d0 8d0))))
661 ;; so something like (NOTE: matrices are transposed to begin with, hence the incongruety)
662 (defparameter *xtx-1* (m* *xv* (transpose *xv*)))
663 (defparameter *xty-1* (m* *xv* (transpose *y*)))
664 (defparameter *rcond-in* (* (coerce (expt 2 -52) 'double-float)
665 (max (nrows *xtx-1*)
666 (ncols *xty-1*))))
668 (defparameter *betahat* (gelsy *xtx-1* *xty-1* *rcond-in*))
670 ;; (#<LA-SIMPLE-VECTOR-DOUBLE (1 x 1)
671 ;; 1.293103448275862>
672 ;; 1)
674 ;; ## Test case in R:
675 ;; x <- c( 1.0, 3.0, 2.0, 4.0, 3.0, 5.0, 4.0, 6.0)
676 ;; y <- c( 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0)
677 ;; lm(y~x-1)
678 ;; ## =>
679 ;; Call:
680 ;; lm(formula = y ~ x - 1)
682 ;; Coefficients:
683 ;; x
684 ;; 1.293
686 (first *betahat*))
690 #+nil
691 (progn
693 (asdf:oos 'asdf:load-op 'cl-plplot)
695 (plot-ex))