tex: Chi-square intro
[gostyle.git] / tex / gostyle.tex
blobc5842059fff165c890238c1f31eb083d67559a69
1 \documentclass[journal]{IEEEtran}
3 \usepackage{cite}
4 % cite.sty was written by Donald Arseneau
5 % V1.6 and later of IEEEtran pre-defines the format of the cite.sty package
6 % \cite{} output to follow that of IEEE. Loading the cite package will
7 % result in citation numbers being automatically sorted and properly
8 % "compressed/ranged". e.g., [1], [9], [2], [7], [5], [6] without using
9 % cite.sty will become [1], [2], [5]--[7], [9] using cite.sty. cite.sty's
10 % \cite will automatically add leading space, if needed. Use cite.sty's
11 % noadjust option (cite.sty V3.8 and later) if you want to turn this off.
12 % cite.sty is already installed on most LaTeX systems. Be sure and use
13 % version 4.0 (2003-05-27) and later if using hyperref.sty. cite.sty does
14 % not currently provide for hyperlinked citations.
15 % The latest version can be obtained at:
16 % http://www.ctan.org/tex-archive/macros/latex/contrib/cite/
17 % The documentation is contained in the cite.sty file itself.
20 % *** GRAPHICS RELATED PACKAGES ***
22 \ifCLASSINFOpdf
23 % \usepackage[pdftex]{graphicx}
24 % declare the path(s) where your graphic files are
25 % \graphicspath{{../pdf/}{../jpeg/}}
26 % and their extensions so you won't have to specify these with
27 % every instance of \includegraphics
28 % \DeclareGraphicsExtensions{.pdf,.jpeg,.png}
29 \else
30 % or other class option (dvipsone, dvipdf, if not using dvips). graphicx
31 % will default to the driver specified in the system graphics.cfg if no
32 % driver is specified.
33 % \usepackage[dvips]{graphicx}
34 \usepackage{graphicx}
35 % declare the path(s) where your graphic files are
36 % \graphicspath{{../eps/}}
37 % and their extensions so you won't have to specify these with
38 % every instance of \includegraphics
39 % \DeclareGraphicsExtensions{.eps}
40 \fi
42 \usepackage{threeparttable}
44 \usepackage{algorithm}
45 \usepackage{algorithmic}
46 %\usepackage{algpseudocode}
47 % WICKED: nefunguje ani jedno???
48 % algorithmic.sty can be obtained at:
49 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithms/
50 % There is also a support site at:
51 % http://algorithms.berlios.de/index.html
52 % Also of interest may be the (relatively newer and more customizable)
53 % algorithmicx.sty package by Szasz Janos:
54 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithmicx/
56 % *** ALIGNMENT PACKAGES ***
58 %\usepackage{array}
59 % http://www.ctan.org/tex-archive/macros/latex/required/tools/
62 \usepackage{amsmath}
63 %\usepackage{mdwtab}
64 % http://www.ctan.org/tex-archive/macros/latex/contrib/mdwtools/
67 % IEEEtran contains the IEEEeqnarray family of commands that can be used to
68 % generate multiline equations as well as matrices, tables, etc., of high
69 % quality.
71 %\usepackage{eqparbox}
72 % Also of notable interest is Scott Pakin's eqparbox package for creating
73 % (automatically sized) equal width boxes - aka "natural width parboxes".
74 % Available at:
75 % http://www.ctan.org/tex-archive/macros/latex/contrib/eqparbox/
79 % *** SUBFIGURE PACKAGES ***
80 %\usepackage[tight,footnotesize]{subfigure}
81 % subfigure.sty was written by Steven Douglas Cochran. This package makes it
82 % easy to put subfigures in your figures. e.g., "Figure 1a and 1b". For IEEE
83 % work, it is a good idea to load it with the tight package option to reduce
84 % the amount of white space around the subfigures. subfigure.sty is already
85 % installed on most LaTeX systems. The latest version and documentation can
86 % be obtained at:
87 % http://www.ctan.org/tex-archive/obsolete/macros/latex/contrib/subfigure/
88 % subfigure.sty has been superceeded by subfig.sty.
92 %\usepackage[caption=false]{caption}
93 %\usepackage[font=footnotesize]{subfig}
94 % subfig.sty, also written by Steven Douglas Cochran, is the modern
95 % replacement for subfigure.sty. However, subfig.sty requires and
96 % automatically loads Axel Sommerfeldt's caption.sty which will override
97 % IEEEtran.cls handling of captions and this will result in nonIEEE style
98 % figure/table captions. To prevent this problem, be sure and preload
99 % caption.sty with its "caption=false" package option. This is will preserve
100 % IEEEtran.cls handing of captions. Version 1.3 (2005/06/28) and later
101 % (recommended due to many improvements over 1.2) of subfig.sty supports
102 % the caption=false option directly:
103 %\usepackage[caption=false,font=footnotesize]{subfig}
105 % The latest version and documentation can be obtained at:
106 % http://www.ctan.org/tex-archive/macros/latex/contrib/subfig/
107 % The latest version and documentation of caption.sty can be obtained at:
108 % http://www.ctan.org/tex-archive/macros/latex/contrib/caption/
112 % *** FLOAT PACKAGES ***
114 %\usepackage{fixltx2e}
115 % fixltx2e, the successor to the earlier fix2col.sty, was written by
116 % Frank Mittelbach and David Carlisle. This package corrects a few problems
117 % in the LaTeX2e kernel, the most notable of which is that in current
118 % LaTeX2e releases, the ordering of single and double column floats is not
119 % guaranteed to be preserved. Thus, an unpatched LaTeX2e can allow a
120 % single column figure to be placed prior to an earlier double column
121 % figure. The latest version and documentation can be found at:
122 % http://www.ctan.org/tex-archive/macros/latex/base/
126 %\usepackage{stfloats}
127 % stfloats.sty was written by Sigitas Tolusis. This package gives LaTeX2e
128 % the ability to do double column floats at the bottom of the page as well
129 % as the top. (e.g., "\begin{figure*}[!b]" is not normally possible in
130 % LaTeX2e). It also provides a command:
131 %\fnbelowfloat
132 % to enable the placement of footnotes below bottom floats (the standard
133 % LaTeX2e kernel puts them above bottom floats). This is an invasive package
134 % which rewrites many portions of the LaTeX2e float routines. It may not work
135 % with other packages that modify the LaTeX2e float routines. The latest
136 % version and documentation can be obtained at:
137 % http://www.ctan.org/tex-archive/macros/latex/contrib/sttools/
138 % Documentation is contained in the stfloats.sty comments as well as in the
139 % presfull.pdf file. Do not use the stfloats baselinefloat ability as IEEE
140 % does not allow \baselineskip to stretch. Authors submitting work to the
141 % IEEE should note that IEEE rarely uses double column equations and
142 % that authors should try to avoid such use. Do not be tempted to use the
143 % cuted.sty or midfloat.sty packages (also by Sigitas Tolusis) as IEEE does
144 % not format its papers in such ways.
147 %\ifCLASSOPTIONcaptionsoff
148 % \usepackage[nomarkers]{endfloat}
149 % \let\MYoriglatexcaption\caption
150 % \renewcommand{\caption}[2][\relax]{\MYoriglatexcaption[#2]{#2}}
151 %\fi
152 % endfloat.sty was written by James Darrell McCauley and Jeff Goldberg.
153 % This package may be useful when used in conjunction with IEEEtran.cls'
154 % captionsoff option. Some IEEE journals/societies require that submissions
155 % have lists of figures/tables at the end of the paper and that
156 % figures/tables without any captions are placed on a page by themselves at
157 % the end of the document. If needed, the draftcls IEEEtran class option or
158 % \CLASSINPUTbaselinestretch interface can be used to increase the line
159 % spacing as well. Be sure and use the nomarkers option of endfloat to
160 % prevent endfloat from "marking" where the figures would have been placed
161 % in the text. The two hack lines of code above are a slight modification of
162 % that suggested by in the endfloat docs (section 8.3.1) to ensure that
163 % the full captions always appear in the list of figures/tables - even if
164 % the user used the short optional argument of \caption[]{}.
165 % IEEE papers do not typically make use of \caption[]'s optional argument,
166 % so this should not be an issue. A similar trick can be used to disable
167 % captions of packages such as subfig.sty that lack options to turn off
168 % the subcaptions:
169 % For subfig.sty:
170 % \let\MYorigsubfloat\subfloat
171 % \renewcommand{\subfloat}[2][\relax]{\MYorigsubfloat[]{#2}}
172 % For subfigure.sty:
173 % \let\MYorigsubfigure\subfigure
174 % \renewcommand{\subfigure}[2][\relax]{\MYorigsubfigure[]{#2}}
175 % However, the above trick will not work if both optional arguments of
176 % the \subfloat/subfig command are used. Furthermore, there needs to be a
177 % description of each subfigure *somewhere* and endfloat does not add
178 % subfigure captions to its list of figures. Thus, the best approach is to
179 % avoid the use of subfigure captions (many IEEE journals avoid them anyway)
180 % and instead reference/explain all the subfigures within the main caption.
181 % The latest version of endfloat.sty and its documentation can obtained at:
182 % http://www.ctan.org/tex-archive/macros/latex/contrib/endfloat/
184 % The IEEEtran \ifCLASSOPTIONcaptionsoff conditional can also be used
185 % later in the document, say, to conditionally put the References on a
186 % page by themselves.
188 % *** PDF, URL AND HYPERLINK PACKAGES ***
190 %\usepackage{url}
191 % url.sty was written by Donald Arseneau. It provides better support for
192 % handling and breaking URLs. url.sty is already installed on most LaTeX
193 % systems. The latest version can be obtained at:
194 % http://www.ctan.org/tex-archive/macros/latex/contrib/misc/
195 % Read the url.sty source comments for usage information. Basically,
196 % \url{my_url_here}.
199 % *** Do not adjust lengths that control margins, column widths, etc. ***
200 % *** Do not use packages that alter fonts (such as pslatex). ***
201 % There should be no need to do such things with IEEEtran.cls V1.6 and later.
202 % (Unless specifically asked to do so by the journal or conference you plan
203 % to submit to, of course. )
205 % correct bad hyphenation here
206 \hyphenation{op-tical net-works semi-conduc-tor}
209 \begin{document}
211 % paper title
212 % can use linebreaks \\ within to get better formatting as desired
213 \title{On Move Pattern Trends\\in Large Go Games Corpus}
215 % use \thanks{} to gain access to the first footnote area
216 % a separate \thanks must be used for each paragraph as LaTeX2e's \thanks
217 % was not built to handle multiple paragraphs
218 \author{Petr~Baudi\v{s},~Josef~Moud\v{r}\'{i}k% <-this % stops a space
219 \thanks{P. Baudi\v{s} is student at the Faculty of Math and Physics, Charles University, Prague, CZ, and also does some of his Computer Go research as an employee of SUSE Labs Prague, Novell CZ.}% <-this % stops a space
220 \thanks{J. Moud\v{r}\'{i}k is student at the Faculty of Math and Physics, Charles University, Prague, CZ.}}
222 % note the % following the last \IEEEmembership and also \thanks -
223 % these prevent an unwanted space from occurring between the last author name
224 % and the end of the author line. i.e., if you had this:
226 % \author{....lastname \thanks{...} \thanks{...} }
227 % ^------------^------------^----Do not want these spaces!
229 % a space would be appended to the last name and could cause every name on that
230 % line to be shifted left slightly. This is one of those "LaTeX things". For
231 % instance, "\textbf{A} \textbf{B}" will typeset as "A B" not "AB". To get
232 % "AB" then you have to do: "\textbf{A}\textbf{B}"
233 % \thanks is no different in this regard, so shield the last } of each \thanks
234 % that ends a line with a % and do not let a space in before the next \thanks.
235 % Spaces after \IEEEmembership other than the last one are OK (and needed) as
236 % you are supposed to have spaces between the names. For what it is worth,
237 % this is a minor point as most people would not even notice if the said evil
238 % space somehow managed to creep in.
241 % The paper headers
242 \markboth{Transactions on Computational Intelligence and AI in Games}%
243 {On Pattern Feature Trends in Large Go Game Corpus}
244 % The only time the second header will appear is for the odd numbered pages
245 % after the title page when using the twoside option.
247 % *** Note that you probably will NOT want to include the author's ***
248 % *** name in the headers of peer review papers. ***
249 % You can use \ifCLASSOPTIONpeerreview for conditional compilation here if
250 % you desire.
255 % If you want to put a publisher's ID mark on the page you can do it like
256 % this:
257 %\IEEEpubid{0000--0000/00\$00.00~\copyright~2007 IEEE}
258 % Remember, if you use this you must call \IEEEpubidadjcol in the second
259 % column for its text to clear the IEEEpubid mark.
263 % use for special paper notices
264 %\IEEEspecialpapernotice{(Invited Paper)}
269 % make the title area
270 \maketitle
273 \begin{abstract}
274 %\boldmath
276 We process a~large corpus of game records of the board game of Go and
277 propose a~way to extract per-player summary information on played moves.
278 We then apply several basic data-mining methods on the summary
279 information to identify the most differentiating features within the
280 summary information, and discuss their correspondence with traditional
281 Go knowledge. We show mappings of the features to player attributes
282 like playing strength or informally perceived ``playing style'' (such as
283 territoriality or aggressivity), and propose applications including
284 seeding real-work ranks of internet players, aiding in Go study, or
285 contribution to discussion within Go theory on the scope of ``playing
286 style''.
288 \end{abstract}
289 % IEEEtran.cls defaults to using nonbold math in the Abstract.
290 % This preserves the distinction between vectors and scalars. However,
291 % if the journal you are submitting to favors bold math in the abstract,
292 % then you can use LaTeX's standard command \boldmath at the very start
293 % of the abstract to achieve this. Many IEEE journals frown on math
294 % in the abstract anyway.
296 % Note that keywords are not normally used for peerreview papers.
297 \begin{IEEEkeywords}
298 board games, go, data mining, pattern recongition, player strength, playing style
299 \end{IEEEkeywords}
306 % For peer review papers, you can put extra information on the cover
307 % page as needed:
308 % \ifCLASSOPTIONpeerreview
309 % \begin{center} \bfseries EDICS Category: 3-BBND \end{center}
310 % \fi
312 % For peerreview papers, this IEEEtran command inserts a page break and
313 % creates the second title. It will be ignored for other modes.
314 \IEEEpeerreviewmaketitle
318 \section{Introduction}
319 % The very first letter is a 2 line initial drop letter followed
320 % by the rest of the first word in caps.
322 % form to use if the first word consists of a single letter:
323 % \IEEEPARstart{A}{demo} file is ....
325 % form to use if you need the single drop letter followed by
326 % normal text (unknown if ever used by IEEE):
327 % \IEEEPARstart{A}{}demo file is ....
329 % Some journals put the first two words in caps:
330 % \IEEEPARstart{T}{his demo} file is ....
332 % Here we have the typical use of a "T" for an initial drop letter
333 % and "HIS" in caps to complete the first word.
334 \IEEEPARstart{T}{he} field of Computer Go usually focuses on the problem
335 of creating a~program to play the game, finding the best move from a~given
336 board position. We will make use of one method developed in the course
337 of such research and apply it to the analysis of existing game records
338 with the aim of helping humans to play the game better instead.
340 Go is a~two-player full-information board game played
341 on a~square grid (usually $19\times19$ lines) with black and white
342 stones; the goal of the game is to surround the most territory and
343 capture enemy stones. We assume basic familiarity with the game.
345 Many Go players are eager to play using computers (usually over
346 the internet) and review games played by others on computers as well.
347 This means that large amounts of game records are collected and digitally
348 stored, enabling easy processing of such collections. However, so far
349 only little has been done with the available data --- we are aware
350 only of uses for simple win/loss statistics (TODO: KGS Stats, KGS Analytics,
351 Pro Go Rating) and ''next move'' statistics on a~specific position (TODO:
352 Kombilo, Moyo Go Studio).
354 We present a~more in-depth approach --- from all played moves, we devise
355 a~compact evaluation of each player. We then explore correlations between
356 evaluations of various players in light of externally given information.
357 This way, we can discover similarity between moves characteristics of
358 players with the same playing strength, or discuss the meaning of the
359 "playing style" concept on the assumption that similar playing styles
360 should yield similar moves characteristics.
363 \section{Data Extraction}
364 \label{pattern-vectors}
366 As the input of our analysis, we use large collections of game records\footnote{We
367 use the SGF format (TODO) in our implementation.} organized by player names.
368 In order to generate the required compact description of most frequently played moves,
369 we construct a set of $n$ most occuring patterns (\emph{top patterns})
370 across all players and games from the database.\footnote{We use $n=500$ in our analysis.}
372 For each player, we then count how many times was each of those $n$ patterns played
373 during all his games and finally assign him a~{\em pattern vector} $\vec p$ of dimension $n$, with each
374 dimension corresponding to the relative number of occurences of a given pattern
375 (with respect to player's most played \emph{top pattern}). Using relative numbers of occurences ensures that
376 each dimension of player's \emph{pattern vector} is scaled to range $[0,1]$ and
377 therefore even players with different number of games in the database have comparable \emph{pattern vectors}.
379 \subsection{Pattern Features}
380 We need to define how to compose the patterns we use to describe moves.
381 There are some tradeoffs in play - overly general descriptions carry too few
382 information to discern various player attributes; too specific descriptions
383 gather too few specimen over the games sample and the vector differences are
384 not statistically significant.
386 We have chosen an intuitive and simple approach inspired by pattern features
387 used when computing ELO ratings for candidate patterns in Computer Go play.
388 \cite{ELO} Each pattern is a~combination of several {\em pattern features}
389 (name--value pairs) matched at the position of the played move.
390 We use these features:
392 \begin{itemize}
393 \item capture move flag
394 \item atari move flag
395 \item atari escape flag
396 \item contiguity-to-last flag --- whether the move has been played in one of 8 neighbors of the last move
397 \item contiguity-to-second-last flag
398 \item board edge distance --- only up to distance 4
399 \item spatial pattern --- configuration of stones around the played move
400 \end{itemize}
402 The spatial patterns are normalized (using a dictionary) to be always
403 black-to-play and maintain translational and rotational symmetry.
404 Configurations of radius between 2 and 9 in the gridcular metric%
405 \footnote{The {\em gridcular} metric
406 $d(x,y) = |\delta x| + |\delta y| + \max(|\delta x|, |\delta y|)$ defines
407 a circle-like structure on the Go board square grid. \cite{SpatPat} }
408 are matched.
410 \subsection{Implementation}
412 We have implemented the data extraction by making use of the pattern
413 features matching implementation within the Pachi go-playing program
414 (TODO). We extract information on players by converting the SGF game
415 records to GTP (TODO) stream that feeds Pachi's {\tt patternscan}
416 engine which outputs a~single {\em patternspec} (string representation
417 of the particular pattern features combination) per move.
419 We can then gather all patternspecs played by a~given player and summarize
420 them; the $\vec p$ vector then consists of normalized counts of
421 the given $n$ most frequent patternspecs.
424 \section{Data Mining}
425 \label{data-mining}
427 To assess the properties of gathered \emph{pattern vectors}
428 and their influence on playing styles,
429 we have processes the data using a~few basic data minining techniques.
431 The first two methods ({\em analytic}) rely purely on data gathered
432 from the game collection
433 and serve to show internal structure and correlations within the data set.
434 Principal component analysis finds orthogonal vector components that
435 have the largest variance.
436 Reversing the process then indicates which patterns correlate with each style.
437 Additionally, PCA can be used as a vector-preprocessing for methods
438 that are negatively sensitive to \emph{pattern vector} component correlations.
440 A~second method -- Kohonen maps -- is based on the theory of self-organizing maps of abstract neurons that
441 compete against each other for representation of the input space.
442 Because neurons in the network are organized in a two-dimensional plane,
443 the trained network virtually spreads vectors to the 2D plane,
444 allowing for simple visualization of clusters of players with similar styles.
446 TODO: style vector -> output vector?
448 Furthermore, we have used and compared two \emph{classification} methods
449 that approximate well-defined but unknown \emph{style vector} $\vec S$
450 based on input \emph{pattern vector} $\vec P$.
451 The methods are calibrated based on expert or prior knowledge about
452 training pattern vectors and then their error is measured on a testing
453 set of pattern vectors.
455 One of the methods is $k$-Nearest Neighbor (kNN) classifier:
456 we approximate $\vec S$ by composing the \emph{style vectors} of $k$ nearest \emph{pattern vectors}.
457 The other is based on a multi-layer feed-forward Artificial Neural Network:
458 the neural network can learn correlations between input and output vectors
459 and generalize the ``knowledge'' to unknown vectors; it can be more flexible
460 in the interpretation of different pattern vector elements and discern more
461 complex relations than the kNN classifier, but e.g. requires larger training sample.
463 TODO: Dava ta posledni veta nejaky smysl?!
465 \subsection{Principal Component Analysis}
466 \label{data-mining}
467 We use Principal Component Analysis \emph{PCA} \cite{Jolliffe1986}
468 to reduce the dimensions of the \emph{pattern vectors} while preserving
469 as much information as possible.
471 Briefly, PCA is an eigenvalue decomposition of a~covariance matrix of centered \emph{pattern vectors},
472 producing a~linear mapping $o$ from $n$-dimensional vector space
473 to a~reduced $m$-dimensional vector space.
474 The $m$ eigenvectors of the original vectors' covariance matrix
475 with the largest eigenvalues are used as the base of the reduced vector space;
476 the eigenvectors form the transformation matrix $W$.
478 For each original \emph{pattern vector} $\vec p_i$,
479 we obtain its new representation $\vec r_i$ in the PCA base
480 as shown in the following equation:
481 \begin{equation}
482 \vec r_i = W \cdot \vec p_i
483 \end{equation}
485 The whole process is described in the Algorithm \ref{alg:pca}.
487 \begin{algorithm}
488 \caption{PCA -- Principal Component Analysis}
489 \begin{algorithmic}
490 \label{alg:pca}
491 \REQUIRE{$m > 0$, set of players $R$ with \emph{pattern vectors} $p_r$}
492 \STATE $\vec \mu \leftarrow 1/|R| \cdot \sum_{r \in R}{\vec p_r}$
493 \FOR{ $r \in R$}
494 \STATE $\vec p_r \leftarrow \vec p_r - \vec \mu$
495 \ENDFOR
496 \FOR{ $(i,j) \in \{1,... ,n\} \times \{1,... ,n\}$}
497 \STATE $\mathit{Cov}[i,j] \leftarrow 1/|R| \cdot \sum_{r \in R}{\vec p_{ri} \cdot \vec p_{rj}}$
498 \ENDFOR
499 \STATE Compute Eigenvalue Decomposition of $\mathit{Cov}$ matrix
500 \STATE Get $m$ largest eigenvalues
501 \STATE Most significant eigenvectors ordered by decreasing eigenvalues form the rows of matrix $W$
502 \FOR{ $r \in R$}
503 \STATE $\vec r_r\leftarrow W \vec p_r$
504 \ENDFOR
505 \end{algorithmic}
506 \end{algorithm}
508 We will want to find dependencies between PCA dimensions and dimensions
509 of some prior knowledge (player rank, style vector). For this, we use
510 the well-known {\em Pearson's $\chi^2$ test} \cite{Pearson}; the test
511 yields the probability of a null hypothesis that two distributions
512 are statistically independent, we will instead use the probability
513 of the alternative hypothesis that they are in fact dependent.
515 TODO: Chi-square computation.
517 \subsection{Kohonen Maps}
518 \label{koh}
519 Kohonen map is a self-organizing network with neurons spread over a two-dimensional plane.
520 Neurons in the map compete for representation of portions of the input vector space.
521 Each neuron $\vec n$ represents a vector
522 and the network is trained so that the neurons that are topologically close
523 tend to represent vectors that are close as well.
525 First, a randomly initialized network is sequentially trained;
526 in each iteration, we choose a random training vector $\vec t$
527 and find the neuron $\vec w$ that is closest to $\vec t$ in Euclidean metric
528 (we call $\vec w$ a \emph{winner neuron}).
530 We then adapt neurons from the neighbourhood of $\vec w$ employing an equation:
531 \begin{equation}
532 \vec n = \vec n + \alpha \cdot \mathit{Influence}(\vec w, \vec n) \cdot (\vec t - \vec n)
533 \end{equation}
534 where $\alpha$ is a learning parameter, usually decreasing in time.
535 $Influence()$ is a function that forces neurons to spread.
536 Such function is usually realised using a mexican hat function or a difference-of-gaussians
537 (see \cite{TODO} for details).
538 The state of the network can be evaluated by calculating mean square difference
539 between each $\vec t \in T$ and its corresponding \emph{winner neuron} $\vec w_t$:
540 \begin{equation}
541 \mathit{Error}(N,T) = \sum_{\vec t \in T}{|\vec w_t - \vec t|}
542 \end{equation}
545 \begin{algorithm}
546 \caption{Kohonen maps -- training}
547 \begin{algorithmic}
548 \label{alg:koh}
549 \REQUIRE{Set of training vectors $T$, input dimension $D$}
550 \REQUIRE{max number of iterations $M$, desired error $E$}
551 \STATE $N \leftarrow \{\vec n | \vec n$ random, $\mathit{dim}(\vec n) = D\}$
552 \REPEAT
553 \STATE $\mathit{It} \leftarrow \mathit{It} + 1$
554 \STATE $\vec t \leftarrow \mathit{PickRandom}(T)$
555 \FORALL{$\vec n \in N$}
556 \STATE $D[\vec n] \leftarrow \mathit{EuclideanDistance}(\vec n, \vec t)$
557 \ENDFOR
558 \STATE Find $ \vec w \in N$ so that $D[\vec w] <= D[\vec m], \forall \vec m \in N$
559 \FORALL{$\vec n \in \mathit{TopologicalNeigbors}(N, \vec w)$}
560 \STATE $\vec n \leftarrow \vec n + \alpha(It) \cdot \mathit{Influence}(\vec w, \vec n) \cdot ( \vec t - \vec n ) $
561 \ENDFOR
562 \UNTIL{$\mathit{Error}(N, T) < E$ or $ \mathit{It} > M$}
563 \end{algorithmic}
564 \end{algorithm}
567 \subsection{k-nearest Neighbors Classifier}
568 \label{knn}
569 Our goal is to approximate player's \emph{style vector} $\vec S$
570 based on their \emph{pattern vector} $\vec P$.
571 To achieve this, we require prior knowledge of \emph{reference style vectors}
572 (see section \ref{style-vectors}).
574 In this method, we assume that similarities in players' \emph{pattern vectors}
575 uniformly correlate with similarities in players' \emph{style vectors}.
576 We try to approximate $\vec S$ as a weighted average of \emph{style vectors}
577 $\vec s_i$ of $k$ players with \emph{pattern vectors} $\vec p_i$ closest to $\vec P$.
578 This is illustrated in the Algorithm \ref{alg:knn}.
579 Note that the weight is a function of distance and it is not explicitly defined in Algorithm \ref{alg:knn}.
580 During our research, exponentially decreasing weight has proven to be sufficient.
582 \begin{algorithm}
583 \caption{k-Nearest Neighbors}
584 \begin{algorithmic}
585 \label{alg:knn}
586 \REQUIRE{pattern vector $\vec P$, $k > 0$, set of reference players $R$}
587 \FORALL{$r \in R$ }
588 \STATE $D[r] \leftarrow \mathit{EuclideanDistance}(\vec p_r, \vec P)$
589 \ENDFOR
590 \STATE $N \leftarrow \mathit{SelectSmallest}(k, R, D)$
591 \STATE $\vec S \leftarrow \vec 0$
592 \FORALL{$r \in N $}
593 \STATE $\vec S \leftarrow \vec S + \mathit{Weight}(D[r]) \cdot \vec s_r $
594 \ENDFOR
595 \end{algorithmic}
596 \end{algorithm}
598 \subsection{Neural Network Classifier}
599 \label{neural-net}
601 As an alternative to the k-Nearest Neigbors algorithm (section \ref{knn}),
602 we have used a classificator based on feed-forward artificial neural networks \cite{TODO}.
603 Neural networks (NN) are known for their ability to generalize
604 and find correlations and patterns between input and output data.
605 Neural network is an adaptive system that must undergo a training
606 period before it can be reasonably used, similarly to the requirement
607 of reference vectors for the k-Nearest Neighbors algorithm above.
609 \subsubsection{Computation and activation of the NN}
610 Technically, neural network is a network of interconnected computational units called neurons.
611 A feedforward neural network has a layered topology;
612 it usually has one \emph{input layer}, one \emph{output layer}
613 and an arbitrary number of \emph{hidden layers} inbetween.
615 Each neuron $i$ is connected to all neurons in the previous layer and each connection has its weight $w_{ij}$
617 The computation proceeds in discrete time steps.
618 In the first step, the neurons in the \emph{input layer}
619 are \emph{activated} according to the \emph{input vector}.
620 Then, we iteratively compute output of each neuron in the next layer
621 until the output layer is reached.
622 The activity of output layer is then presented as the result.
624 The activation $y_i$ of neuron $i$ from the layer $I$ is computed as
625 \begin{equation}
626 y_i = f\left(\sum_{j \in J}{w_{ij} y_j}\right)
627 \end{equation}
628 where $J$ is the previous layer, while $y_j$ is the activation for neurons from $J$ layer.
629 Function $f()$ is so-called \emph{activation function}
630 and its purpose is to bound the outputs of neurons.
631 A typical example of an activation function is the sigmoid function.%
632 \footnote{A special case of the logistic function, defined by the formula
633 $\sigma(x)=\frac{1}{1+e^{-(rx+k)}}$; parameters control the growth rate ($r$)
634 and the x-position ($k$).}
636 \subsubsection{Training}
637 The training of the feed-forward neural network usually involves some
638 modification of supervised Backpropagation learning algorithm. \cite{TODO}
639 We use first-order optimization algorithm called RPROP \cite{Riedmiller1993}.
641 Because the \emph{reference set} is usually not very large,
642 we have devised a simple method for its extension.
643 This enhancement is based upon adding random linear combinations
644 of \emph{style and pattern vectors} to the training set.
646 TODO: Tohle je puvodni napad?
648 As outlined above, the training set consists of pairs of
649 input vectors (\emph{pattern vectors}) and
650 desired output vectors (\emph{style vectors}).
651 The training set $T$ is then extended by adding the linear combinations:
652 \begin{equation}
653 T_\mathit{base} = \{(\vec p_r, \vec s_r) | r \in R\}\\
654 \end{equation}
655 \begin{equation}
656 T_\mathit{ext} = \left\{(\vec p, \vec s) \,\middle|\, \exists D \subseteq R : \vec p = \sum_{d \in D}{g_d \vec p_d}, \vec s = \sum_{d \in D}{g_d \vec s_d}\right\}
657 \end{equation}
658 TODO zabudovat $g_d$ dovnitr?
659 where $g_d, d \in D$ are random coeficients, so that $\sum_{d \in D}{g_d} = 1$.
660 The training set is then constructed as:
661 \begin{equation}
662 T = T_\mathit{base} \cup \mathit{SomeFiniteSubset}(T_\mathit{ext})
663 \end{equation}
665 The network is trained as shown in Algorithm \ref{alg:tnn}.
667 \begin{algorithm}
668 \caption{Training Neural Network}
669 \begin{algorithmic}
670 \label{alg:tnn}
671 \REQUIRE{Train set $T$, desired error $e$, max iterations $M$}
672 \STATE $N \leftarrow \mathit{RandomlyInitializedNetwork}()$
673 \STATE $\mathit{It} \leftarrow 0$
674 \REPEAT
675 \STATE $\mathit{It} \leftarrow \mathit{It} + 1$
676 \STATE $\Delta \vec w \leftarrow \vec 0$
677 \STATE $\mathit{TotalError} \leftarrow 0$
678 %\FORALL{$(\overrightarrow{Input}, \overrightarrow{DesiredOutput}) \in T$}
679 %\STATE $\overrightarrow{Output} \leftarrow Result(N, \overrightarrow{Input})$
680 %\STATE $E \leftarrow |\overrightarrow{DesiredOutput} - \overrightarrow{Output}|$
681 \FORALL{$(\mathit{Input}, \mathit{DesiredOutput}) \in T$}
682 \STATE $\mathit{Output} \leftarrow \mathit{Result}(N, \mathit{Input})$
683 \STATE $\mathit{Error} \leftarrow |\mathit{DesiredOutput} - \mathit{Output}|$
684 \STATE $\Delta \vec w \leftarrow \Delta \vec w + \mathit{WeightUpdate}(N,\mathit{Error})$
685 \STATE $\mathit{TotalError} \leftarrow \mathit{TotalError} + \mathit{Error}$
686 \ENDFOR
687 \STATE $N \leftarrow \mathit{ModifyWeights}(N, \Delta \vec w)$
688 \UNTIL{$\mathit{TotalError} < e$ or $ \mathit{It} > M$}
689 \end{algorithmic}
690 \end{algorithm}
693 \subsubsection{Architecture details}
694 TODO num layers, num neurons, ..
697 \subsection{Implementation}
699 We have implemented the data mining methods as an open-source framework ``gostyle'' \cite{TODO},
700 made available under the GNU GPL licence.
701 We use python for the basic processing and most of the analysis;
702 the MDP library \cite{MDP} is used for PCA analysis, Kohonen library \cite{KohonenPy} for Kohonen maps.
703 The neuron network classifier is using the libfann C library. \cite{TODO}
706 \section{Strength Estimator}
708 \begin{figure*}[!t]
709 \centering
710 \includegraphics[width=7in]{strength-pca}
711 \caption{PCA of by-strength vectors}
712 \label{fig:strength_pca}
713 \end{figure*}
715 First, we have used our framework to analyse correlations of pattern vectors
716 and playing strength. Like in other competitively played board games, Go players
717 receive real-world rating based on tournament games, and rank based on their
718 rating.\footnote{Elo-like rating system \cite{GoR} is usually used,
719 corresponding to even win chances for game of two players with the same rank,
720 and about 2:3 win chance for stronger in case of one rank difference.}%
721 \footnote{Professional ranks and dan ranks in some Asia countries may
722 be assigned differently.} The amateur ranks range from 30kyu (beginner) to
723 1kyu (intermediate) and then follows 1dan to 7dan (9dan in some systems;
724 top-level player). Multiple independent real-world ranking scales exist
725 (geographically based) and online servers maintain their own user ranking;
726 the difference can be up to several stones.
728 As the source game collection, we use Go Teaching Ladder
729 reviews\footnote{The reviews contain comments and variations --- we consider only the actual played game.}
730 \cite{GTL} --- this collection contains 7700 games of players with strength ranging
731 from 30k to 4d; we consider only even games with clear rank information, and then
732 randomly separate 770 games as a testing set. Since the rank information is provided
733 by the users and may not be consistent, we are forced to take a simplified look
734 at the ranks, discarding the differences between various systems and thus increasing
735 error in our model.\footnote{Since
736 our results seem satisfying, we did not pursue to try another collection}
738 First, we have created a single pattern vector for each rank, from 30k to 4d;
739 we have performed PCA analysis on the pattern vectors, achieving near-perfect
740 rank correspondence in the first PCA dimension\footnote{The eigenvalue of the
741 second dimension was four orders of magnitude smaller, with no discernable
742 structure revealed within the lower-order eigenvectors.}
743 (figure \ref{fig:strength_pca}).
745 In order to measure the accuracy of approximation of strength by the first dimension,
746 we have used the $\chi^2$ test, yielding probability $p=TODO$ that it is dependent
747 on the player strength.
748 Using the eigenvector position directly for classification
749 of players within the test group yields MSE TODO, thus providing
750 reasonably satisfying accuracy.
752 To further enhance the strength estimator accuracy,
753 we have tried to train a NN classifier on our train set, consisting
754 of one $(\vec p, {\rm rank})$ pair per player --- we use the pattern vector
755 for activation of input neurons and rank number as result of the output
756 neuron. We then proceeded to test the NN on per-player pattern vectors built
757 from the games in the test set, yielding MSE of TODO with TODO games per player
758 on average.
761 \section{Style Estimator}
763 As a second case study for our pattern analysis, we investigate pattern vectors $\vec p$
764 of various well-known players, their relationships and correlations to prior
765 knowledge to explore its correlaction with extracted patterns. We look for
766 relationship between pattern vectors and perceived ``playing style'' and
767 attempt to use our classifiers to transform pattern vector $\vec p$ to style vector $\vec s$.
769 The source game collection is GoGoD Winter 2008 \cite{GoGoD} containing 55000
770 professional games, dating from the early Go history 1500 years ago to the present.
771 We consider only games of a small subset of players (fig. \ref{fig:style_marks});
772 we have chosen these for being well-known within the players community and
773 having large number of played games in our collection.
775 \subsection{Expert-based knowledge}
776 \label{style-vectors}
777 In order to provide a reference frame for our style analysis,
778 we have gathered some expert-based information about various
779 traditionally perceived style aspects.
780 This expert-based knowledge allows us to predict styles of unknown players based on
781 the similarity of their pattern vectors, as well as discover correlations between
782 styles and proportions of played patterns.
784 Experts were asked to mark each style aspect of the given players
785 on the scale from 1 to 10. The style aspects are defined as shown:
787 \vspace{4mm}
788 \noindent
789 %\begin{table}
790 \begin{center}
791 %\caption{Styles}
792 \begin{tabular}{|c|c|c|}
793 \hline
794 \multicolumn{3}{|c|}{Styles} \\ \hline
795 Style & 1 & 10\\ \hline
796 Territoriality $\tau$ & Moyo & Territorial \\
797 Orthodoxity $\omega$ & Classic & Novel \\
798 Aggressivity $\alpha$ & Calm & Figting \\
799 Thickness $\theta$ & Safe & Shinogi \\ \hline
800 \end{tabular}
801 \end{center}
802 %\end{table}
803 \vspace{4mm}
805 Averaging this expert based evaluation yields
806 \emph{reference style vector} $\vec s_r$ (of dimension $4$) for each player $r$
807 from the set of \emph{reference players} $R$.
809 Three high-level Go players (Alexander Dinerstein 3-pro, Motoki Noguchi
810 7-dan and V\'{i}t Brunner 4-dan) have judged style of the reference
811 players.
812 Mean standard deviation of the answers is 0.952,
813 making the data reasonably reliable,
814 though much larger sample would of course be more desirable.
815 The complete list of answers is in table \ref{fig:style_marks}.
817 \begin{table}[!t]
818 % increase table row spacing, adjust to taste
819 \renewcommand{\arraystretch}{1.3}
820 \begin{threeparttable}
821 \caption{Style Aspects of Selected Professionals\tnote{1}}
822 \label{fig:style_marks}
823 \centering
824 % Some packages, such as MDW tools, offer better commands for making tables
825 % than the plain LaTeX2e tabular which is used here.
826 \begin{tabular}{|c||c||c||c||c|}
827 \hline
828 Player & $\tau$ & $\omega$ & $\alpha$ & $\theta$ \\
829 \hline
830 Yoda Norimoto & $6.3 \pm 1.7$ & $4.3 \pm 2.1$ & $4.3 \pm 2.1$ & $3.3 \pm 1.2$ \\
831 Yi Se-tol & $5.3 \pm 0.5$ & $6.6 \pm 2.5$ & $9.3 \pm 0.5$ & $6.6 \pm 1.2$ \\
832 Yi Ch'ang-ho\tnote{2}& $7.0 \pm 0.8$ & $5.0 \pm 1.4$ & $2.6 \pm 0.9$ & $2.6 \pm 1.2$ \\
833 Takemiya Masaki & $1.3 \pm 0.5$ & $6.3 \pm 2.1$ & $7.0 \pm 0.8$ & $1.3 \pm 0.5$ \\
834 Sakata Eio & $7.6 \pm 1.7$ & $4.6 \pm 0.5$ & $7.3 \pm 0.9$ & $8.0 \pm 1.6$ \\
835 Rui Naiwei & $4.6 \pm 1.2$ & $5.6 \pm 0.5$ & $9.0 \pm 0.8$ & $3.3 \pm 1.2$ \\
836 Otake Hideo & $4.3 \pm 0.5$ & $3.0 \pm 0.0$ & $4.6 \pm 1.2$ & $3.6 \pm 0.9$ \\
837 O Meien & $2.6 \pm 1.2$ & $9.6 \pm 0.5$ & $8.3 \pm 1.7$ & $3.6 \pm 1.2$ \\
838 Ma Xiaochun & $8.0 \pm 2.2$ & $6.3 \pm 0.5$ & $5.6 \pm 1.9$ & $8.0 \pm 0.8$ \\
839 Luo Xihe & $7.3 \pm 0.9$ & $7.3 \pm 2.5$ & $7.6 \pm 0.9$ & $6.0 \pm 1.4$ \\
840 Ishida Yoshio & $8.0 \pm 1.4$ & $5.0 \pm 1.4$ & $3.3 \pm 1.2$ & $5.3 \pm 0.5$ \\
841 Gu Li & $5.6 \pm 0.9$ & $7.0 \pm 0.8$ & $9.0 \pm 0.8$ & $4.0 \pm 0.8$ \\
842 Cho U & $7.3 \pm 2.4$ & $6.0 \pm 0.8$ & $5.3 \pm 1.7$ & $6.3 \pm 1.7$ \\
843 Cho Chikun & $9.0 \pm 0.8$ & $7.6 \pm 0.9$ & $6.6 \pm 1.2$ & $9.0 \pm 0.8$ \\
844 Yuki Satoshi & $3.0 \pm 1.0$ & $8.5 \pm 0.5$ & $9.0 \pm 1.0$ & $4.5 \pm 0.5$ \\
845 Yamashita Keigo & $2.0 \pm 0.0$ & $9.0 \pm 1.0$ & $9.5 \pm 0.5$ & $3.0 \pm 1.0$ \\
846 Takao Shinji & $5.0 \pm 1.0$ & $3.5 \pm 0.5$ & $5.5 \pm 1.5$ & $4.5 \pm 0.5$ \\
847 Miyazawa Goro & $1.5 \pm 0.5$ & $10 \pm 0 $ & $9.5 \pm 0.5$ & $4.0 \pm 1.0$ \\
848 Kobayashi Koichi & $9.0 \pm 1.0$ & $2.5 \pm 0.5$ & $2.5 \pm 0.5$ & $5.5 \pm 0.5$ \\
849 Kato Masao & $2.5 \pm 0.5$ & $4.5 \pm 1.5$ & $9.5 \pm 0.5$ & $4.0 \pm 0.0$ \\
850 Hane Naoki & $7.5 \pm 0.5$ & $2.5 \pm 0.5$ & $4.0 \pm 0.0$ & $4.5 \pm 1.5$ \\
851 Go Seigen & $6.0 \pm 2.0$ & $9.0 \pm 1.0$ & $8.0 \pm 1.0$ & $5.0 \pm 1.0$ \\
852 Fujisawa Hideyuki & $3.5 \pm 0.5$ & $9.0 \pm 1.0$ & $7.0 \pm 0.0$ & $4.0 \pm 0.0$ \\
853 Chen Yaoye & $6.0 \pm 1.0$ & $4.0 \pm 1.0$ & $6.0 \pm 1.0$ & $5.5 \pm 0.5$ \\
854 \hline
855 \end{tabular}
856 \begin{tablenotes}
857 \item [1] Including standard deviation. Only players where we got at least two out of tree answers are included.
858 \item [2] We consider games only up to year 2004, since Yi Ch'ang-ho was prominent representative of a balanced, careful player until then, but is regarded to have altered his style significantly afterwards.
859 \end{tablenotes}
860 \end{threeparttable}
861 \end{table}
863 \subsection{Style Components Analysis}
865 \begin{figure}[!t]
866 \centering
867 \includegraphics[width=3.75in]{style-pca}
868 \caption{PCA of per-player vectors}
869 \label{fig:style_pca}
870 \end{figure}
872 We have looked at the three most significant dimensions of the pattern data
873 yielded by the PCA analysis (fig. \ref{fig:style_pca}). We have again
874 performend $\chi^2$--test between the three most significant PCA dimensions
875 and dimensions of the prior knowledge style vectors to find correlations;
876 the found correlations are presented in table \ref{fig:style_chisq}.
877 We also list the characteristic spatial patterns of the PCA dimension
878 extremes (table \ref{fig:style_patterns}).
880 It is immediately
881 obvious that by far the most significant vector corresponds very well
882 to the player territoriality,\footnote{Cho Chikun, perhaps the best-known
883 super-territorial player, is not well visible in the cluster, but he is
884 positioned just below $-0.5$ on the first dimension.}
885 confirming the intuitive notion that this aspect of style
886 is the one easiest to pin-point and also
887 most obvious in the played shapes and sequences
888 (that can obviously aim directly at taking secure territory
889 or building center-oriented framework).
891 The other PCA dimensions are far less obvious --- TODO.
893 Kohonen map view.
895 \subsection{Style Classification}
897 We then tried to apply the NN classifier with linear output function on the dataset
898 and that yielded Y (see fig. Z), with MSE abcd.
901 \section{Proposed Applications}
903 We believe that our findings might be useful for many applications
904 in the area of Go support software as well as Go-playing computer engines.
906 The style analysis can be an excellent teaching aid --- classifying style
907 dimensions based on player's pattern vector, many study recommendations
908 can be given, e.g. about the professional games to replay, the goal being
909 balancing understanding of various styles to achieve well-rounded skill set.
910 This was also our original aim when starting the research and a user-friendly
911 tool based on our work is now being created.
913 We hope that more strong players will look into the style dimensions found
914 by our statistical analysis --- analysis of most played patterns of prospective
915 opponents might prepare for the game, but we especially hope that new insights
916 on strategic purposes of various shapes and general human understanding
917 of the game might be achieved by investigating the style-specific patterns.
919 Classifying playing strength of a pattern vector of a player can be used
920 e.g. to help determine initial real-world rating of a player before their
921 first tournament based on games played on the internet; some players especially
922 in less populated areas could get fairly strong before playing their first
923 real tournament.
925 Analysis of pattern vectors extracted from games of Go-playing programs
926 in light of the shown strength and style distributions might help to
927 highlight some weaknesses and room for improvements. (However, since
928 correlation does not imply causation, simply optimizing Go-playing programs
929 according to these vectors is unlikely to yield good results.)
930 Another interesting applications in Go-playing programs might be strength
931 adjustment; the program can classify the player's level based on the pattern
932 vector from its previous games and auto-adjust its difficulty settings
933 accordingly to provide more even games for beginners.
936 % An example of a floating figure using the graphicx package.
937 % Note that \label must occur AFTER (or within) \caption.
938 % For figures, \caption should occur after the \includegraphics.
939 % Note that IEEEtran v1.7 and later has special internal code that
940 % is designed to preserve the operation of \label within \caption
941 % even when the captionsoff option is in effect. However, because
942 % of issues like this, it may be the safest practice to put all your
943 % \label just after \caption rather than within \caption{}.
945 % Reminder: the "draftcls" or "draftclsnofoot", not "draft", class
946 % option should be used if it is desired that the figures are to be
947 % displayed while in draft mode.
949 %\begin{figure}[!t]
950 %\centering
951 %\includegraphics[width=2.5in]{myfigure}
952 % where an .eps filename suffix will be assumed under latex,
953 % and a .pdf suffix will be assumed for pdflatex; or what has been declared
954 % via \DeclareGraphicsExtensions.
955 %\caption{Simulation Results}
956 %\label{fig_sim}
957 %\end{figure}
959 % Note that IEEE typically puts floats only at the top, even when this
960 % results in a large percentage of a column being occupied by floats.
963 % An example of a double column floating figure using two subfigures.
964 % (The subfig.sty package must be loaded for this to work.)
965 % The subfigure \label commands are set within each subfloat command, the
966 % \label for the overall figure must come after \caption.
967 % \hfil must be used as a separator to get equal spacing.
968 % The subfigure.sty package works much the same way, except \subfigure is
969 % used instead of \subfloat.
971 %\begin{figure*}[!t]
972 %\centerline{\subfloat[Case I]\includegraphics[width=2.5in]{subfigcase1}%
973 %\label{fig_first_case}}
974 %\hfil
975 %\subfloat[Case II]{\includegraphics[width=2.5in]{subfigcase2}%
976 %\label{fig_second_case}}}
977 %\caption{Simulation results}
978 %\label{fig_sim}
979 %\end{figure*}
981 % Note that often IEEE papers with subfigures do not employ subfigure
982 % captions (using the optional argument to \subfloat), but instead will
983 % reference/describe all of them (a), (b), etc., within the main caption.
986 % An example of a floating table. Note that, for IEEE style tables, the
987 % \caption command should come BEFORE the table. Table text will default to
988 % \footnotesize as IEEE normally uses this smaller font for tables.
989 % The \label must come after \caption as always.
991 %\begin{table}[!t]
992 %% increase table row spacing, adjust to taste
993 %\renewcommand{\arraystretch}{1.3}
994 % if using array.sty, it might be a good idea to tweak the value of
995 % \extrarowheight as needed to properly center the text within the cells
996 %\caption{An Example of a Table}
997 %\label{table_example}
998 %\centering
999 %% Some packages, such as MDW tools, offer better commands for making tables
1000 %% than the plain LaTeX2e tabular which is used here.
1001 %\begin{tabular}{|c||c|}
1002 %\hline
1003 %One & Two\\
1004 %\hline
1005 %Three & Four\\
1006 %\hline
1007 %\end{tabular}
1008 %\end{table}
1011 % Note that IEEE does not put floats in the very first column - or typically
1012 % anywhere on the first page for that matter. Also, in-text middle ("here")
1013 % positioning is not used. Most IEEE journals use top floats exclusively.
1014 % Note that, LaTeX2e, unlike IEEE journals, places footnotes above bottom
1015 % floats. This can be corrected via the \fnbelowfloat command of the
1016 % stfloats package.
1020 \section{Conclusion}
1021 The conclusion goes here.
1022 We have shown brm and proposed brm.
1024 Since we are not aware of any previous research on this topic and we
1025 are limited by space and time constraints, plenty of research remains
1026 to be done. There is plenty of room for further research in all parts
1027 of our analysis --- different methods of generating the $\vec p$ vectors
1028 can be explored; other data mining methods could be tried.
1029 It can be argued that many players adjust their style by game conditions
1030 (Go development era, handicap, komi and color, time limits, opponent)
1031 or styles might express differently in various game stages.
1032 More professional players could be consulted on the findings
1033 and for style scales calibration. Impact of handicap games on by-strength
1034 $\vec p$ distribution should be investigated.
1036 TODO: Future research --- Sparse PCA
1041 % if have a single appendix:
1042 %\appendix[Proof of the Zonklar Equations]
1043 % or
1044 %\appendix % for no appendix heading
1045 % do not use \section anymore after \appendix, only \section*
1046 % is possibly needed
1048 % use appendices with more than one appendix
1049 % then use \section to start each appendix
1050 % you must declare a \section before using any
1051 % \subsection or using \label (\appendices by itself
1052 % starts a section numbered zero.)
1056 %\appendices
1057 %\section{Proof of the First Zonklar Equation}
1058 %Appendix one text goes here.
1060 %% you can choose not to have a title for an appendix
1061 %% if you want by leaving the argument blank
1062 %\section{}
1063 %Appendix two text goes here.
1066 % use section* for acknowledgement
1067 \section*{Acknowledgment}
1068 \label{acknowledgement}
1070 We would like to thank Radka ``chidori'' Hane\v{c}kov\'{a} for the original research idea
1071 and X for reviewing our paper.
1072 We appreciate helpful comments on our general methodology
1073 by John Fairbairn, T. M. Hall, Robert Jasiek, Franti\v{s}ek Mr\'{a}z
1074 and several GoDiscussions.com users. \cite{GoDiscThread}
1075 Finally, we are very grateful for detailed input on specific go styles
1076 by Alexander Dinerstein, Motoki Noguchi and V\'{i}t Brunner.
1079 % Can use something like this to put references on a page
1080 % by themselves when using endfloat and the captionsoff option.
1081 \ifCLASSOPTIONcaptionsoff
1082 \newpage
1087 % trigger a \newpage just before the given reference
1088 % number - used to balance the columns on the last page
1089 % adjust value as needed - may need to be readjusted if
1090 % the document is modified later
1091 %\IEEEtriggeratref{8}
1092 % The "triggered" command can be changed if desired:
1093 %\IEEEtriggercmd{\enlargethispage{-5in}}
1095 % references section
1097 % can use a bibliography generated by BibTeX as a .bbl file
1098 % BibTeX documentation can be easily obtained at:
1099 % http://www.ctan.org/tex-archive/biblio/bibtex/contrib/doc/
1100 % The IEEEtran BibTeX style support page is at:
1101 % http://www.michaelshell.org/tex/ieeetran/bibtex/
1102 \bibliographystyle{IEEEtran}
1103 % argument is your BibTeX string definitions and bibliography database(s)
1104 \bibliography{gostyle}
1106 % <OR> manually copy in the resultant .bbl file
1107 % set second argument of \begin to the number of references
1108 % (used to reserve space for the reference number labels box)
1109 %\begin{thebibliography}{1}
1111 %\bibitem{MasterMCTS}
1113 %\end{thebibliography}
1115 % biography section
1117 % If you have an EPS/PDF photo (graphicx package needed) extra braces are
1118 % needed around the contents of the optional argument to biography to prevent
1119 % the LaTeX parser from getting confused when it sees the complicated
1120 % \includegraphics command within an optional argument. (You could create
1121 % your own custom macro containing the \includegraphics command to make things
1122 % simpler here.)
1123 %\begin{biography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{mshell}}]{Michael Shell}
1124 % or if you just want to reserve a space for a photo:
1126 \begin{IEEEbiography}{Michael Shell}
1127 Biography text here.
1128 \end{IEEEbiography}
1130 % if you will not have a photo at all:
1131 \begin{IEEEbiographynophoto}{John Doe}
1132 Biography text here.
1133 \end{IEEEbiographynophoto}
1135 % insert where needed to balance the two columns on the last page with
1136 % biographies
1137 %\newpage
1139 \begin{IEEEbiographynophoto}{Jane Doe}
1140 Biography text here.
1141 \end{IEEEbiographynophoto}
1143 % You can push biographies down or up by placing
1144 % a \vfill before or after them. The appropriate
1145 % use of \vfill depends on what kind of text is
1146 % on the last page and whether or not the columns
1147 % are being equalized.
1149 %\vfill
1151 % Can be used to pull up biographies so that the bottom of the last one
1152 % is flush with the other column.
1153 %\enlargethispage{-5in}
1157 % that's all folks
1158 \end{document}