From d09544614cce6d6818c42287e1d9b2b6eddf5ace Mon Sep 17 00:00:00 2001 From: Eric Schulte Date: Tue, 9 Nov 2010 12:31:24 -0700 Subject: [PATCH] simple perceptron learning -- binary representations --- neural_net/core.clj | 49 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 13 deletions(-) diff --git a/neural_net/core.clj b/neural_net/core.clj index 9f2c2ec..1af3c1c 100644 --- a/neural_net/core.clj +++ b/neural_net/core.clj @@ -1,4 +1,5 @@ -(ns neural-net.core) +(ns neural-net.core + (:use clojure.contrib.math)) (defprotocol Neural "Protocol implemented by any element of a neural network." @@ -16,7 +17,7 @@ (spec [this] this) (run [this x] ((this :phi) ((this :accum) x (this :weights)))) (learn [this x y d] ((this :learn) this x (or y (run this x)) d)) - (train [this x y d] ((fn [delta] [delta ((this :train) delta)]) + (train [this x y d] ((fn [delta] [delta ((this :train) this delta)]) (learn this x y d))) (inputs [this] (count (this :weights))) (outputs [this] 1) @@ -97,9 +98,8 @@ ;; learning on a perceptron (defn perceptron:learn [this x y d] - (if (= y d) - (map (fn [_] 0) (this :weights)) - (vec (map (fn [w x] (* (this :eta) (- w x))) (this :weights) x)))) + (let [dir ((fn [dif] (if (= dif 0) 0 (/ dif (abs dif)))) (- d y))] + (vec (map (fn [x] (* dir (this :eta) x)) x)))) (let [n {:phi identity :weights [1 1] @@ -108,6 +108,29 @@ :eta 0.1}] (learn n [0 1] (run n [0 1]) 0)) ; [0.1 0.0] + (let [n {:phi identity + :weights [0 0] + :accum (comp (partial reduce +) (partial map (fn [x w] (* x w)))) + :learn perceptron:learn + :train (fn [n delta] + (assoc n :weights (vec (map + (n :weights) delta)))) + :eta 0.01} + epic [[[0 0] 0] + [[0 1] 1] + [[1 0] 2] + [[1 1] 3]] + x [1 1] + d 1] + (= d (run (second (train n x (run n x) d)) x)) ; true + ((second (train n [0 1] (run n [0 1]) 0)) :weights) ; [1.0 0.9] + ;; converting binary to decimal + ((reduce (fn [m _] + (reduce + (fn [n p] + (second (train n (first p) (run n (first p)) (second p)))) + m epic)) n (range 100)) + :weights)) ; [2.0000000000000013 1.0000000000000007] + ;; back propagation learning (defn back-prop:run [v] {:v v :y v}) @@ -122,11 +145,11 @@ (map :y x) (this :weights)))) (if (and (map? d) (get d :desired)) (* (- (d :desired) (res :y)) ; output layer - ((this :d-phi) (res :v))) + ((this :d-phi) (res :v))) (* ((this :d-phi) (get res :v 1)) ; hidden layer (reduce + (map (fn [a] (* (a :gradient) (a :weight))) (if (vector? d) d (vec (list d))))))))) - + (let [n {:phi back-prop:run :d-phi (fn [_] 1) :accum (comp (partial reduce +) @@ -140,11 +163,11 @@ (run n1 x) ;; => ;; {:v 2, :y 2} - ;; + ;; (run (list n1 n2) x) ;; => ;; [{:v 2, :y 2} {:v 4, :y 4}] - ;; + ;; (run [(list n1 n2) n1] x) ;; => ;; {:v 6, :y 6} @@ -152,14 +175,14 @@ (learn n1 x (run n1 x) {:desired 3}) ;; => [{:delta-w 0.1, :weight 1, :gradient 1} ;; {:delta-w 0.1, :weight 1, :gradient 1}] - ;; + ;; (learn (list n1 n2) x nil [{:desired 3} {:desired 3}]) ;; => ;; [[{:delta-w 0.1, :weight 1, :gradient 1} ;; {:delta-w -0.1, :weight 2, :gradient -1}] ;; [{:delta-w 0.1, :weight 1, :gradient 1} ;; {:delta-w -0.1, :weight 2, :gradient -1}]] - ;; + ;; (learn (list n1 n2) x nil (learn n1 x (run n1 x) {:desired 3})) ;; => ;; [[{:delta-w 0.1, :weight 1, :gradient 1} @@ -191,5 +214,5 @@ x [{:y 1} {:y 1}] d {:desired 3}] (check [(list n1 n2) n2 - (list n1 n1) (list n1 n1 n2 n2 n1)])) ; ({:at 1, :from 1, :to 2}) - ) \ No newline at end of file + (list n1 n1) (list n1 n1 n2 n2 n1)])) ; ({:at 1, :from 1, :to 2}) + ) -- 2.11.4.GIT