From 2a5cffbf408a7a1ee4e5405afe27205b8288a474 Mon Sep 17 00:00:00 2001 From: Petr Baudis Date: Sat, 13 Mar 2010 23:01:39 +0100 Subject: [PATCH] tex: Naive Bayes experimental results --- tex/gostyle.tex | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tex/gostyle.tex b/tex/gostyle.tex index a7eaa45..c004c2b 100644 --- a/tex/gostyle.tex +++ b/tex/gostyle.tex @@ -1360,6 +1360,7 @@ Neural network & 4.32 & 6.06 & {\bf 3.37} & 3.60 & 4.337 $k$-NN ($k=3$) & 4.20 & {\bf 5.73} & 4.92 & 2.90 & 4.439 & 2.65 \\ $k$-NN ($k=2$) & 4.21 & 6.18 & 4.83 & {\bf 2.80} & 4.503 & 2.62 \\ $k$-NN ($k=4$) & {\bf 4.01} & 6.25 & 5.06 & 3.05 & 4.590 & 2.57 \\ +Naive Bayes & 4.48 & 6.90 & 5.48 & 3.70 & 5.143 & 2.29 \\ Random class. & 12.26 & 12.33 & 12.40 & 10.11 & 11.776 & 1.00 \\ \hline %Joint classifier & {\bf 4.008} & {\bf 5.732} & 3.379 & {\bf 2.796} & {\bf 3.979} & {\bf 2.96} \\ \hline %Neural network & 4.319 & 6.060 & {\bf 3.368} & 3.602 & 4.337 & 2.72 \\ @@ -1400,6 +1401,13 @@ Input & Hidden & Output \\ \hline The network was trained until the square error on the training set was smaller than $0.0003$. Due to a small number of input vectors, this only took $20$ iterations of RPROP learning algorithm on average. +\subsubsection{Naive Bayes parameters} + +We have chosen $k = 10/7$ as our discretization parameter; +ideally, we would use $k = 1$ to fully cover the style marks +domain, however our training sample is probably too small for +that. + \section{Proposed Applications} We believe that our findings might be useful for many applications -- 2.11.4.GIT