blob 6b11580cdbf1c0515393d7031a857c16f49cbab5
1 \documentclass[journal]{IEEEtran}
3 \usepackage{cite}
4 % cite.sty was written by Donald Arseneau
5 % V1.6 and later of IEEEtran pre-defines the format of the cite.sty package
6 % \cite{} output to follow that of IEEE. Loading the cite package will
7 % result in citation numbers being automatically sorted and properly
8 % "compressed/ranged". e.g., [1], [9], [2], [7], [5], [6] without using
9 % cite.sty will become [1], [2], [5]--[7], [9] using cite.sty. cite.sty's
10 % \cite will automatically add leading space, if needed. Use cite.sty's
11 % noadjust option (cite.sty V3.8 and later) if you want to turn this off.
12 % cite.sty is already installed on most LaTeX systems. Be sure and use
13 % version 4.0 (2003-05-27) and later if using hyperref.sty. cite.sty does
14 % not currently provide for hyperlinked citations.
15 % The latest version can be obtained at:
16 % http://www.ctan.org/tex-archive/macros/latex/contrib/cite/
17 % The documentation is contained in the cite.sty file itself.
20 % *** GRAPHICS RELATED PACKAGES ***
22 \ifCLASSINFOpdf
23 % \usepackage[pdftex]{graphicx}
24 % declare the path(s) where your graphic files are
25 % \graphicspath{{../pdf/}{../jpeg/}}
26 % and their extensions so you won't have to specify these with
27 % every instance of \includegraphics
28 % \DeclareGraphicsExtensions{.pdf,.jpeg,.png}
29 \else
30 % or other class option (dvipsone, dvipdf, if not using dvips). graphicx
31 % will default to the driver specified in the system graphics.cfg if no
32 % driver is specified.
33 % \usepackage[dvips]{graphicx}
34 % declare the path(s) where your graphic files are
35 % \graphicspath{{../eps/}}
36 % and their extensions so you won't have to specify these with
37 % every instance of \includegraphics
38 % \DeclareGraphicsExtensions{.eps}
39 \fi
41 \usepackage{algorithm}
42 \usepackage{algorithmic}
43 %\usepackage{algpseudocode}
44 % WICKED: nefunguje ani jedno???
45 % algorithmic.sty can be obtained at:
46 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithms/
47 % There is also a support site at:
48 % http://algorithms.berlios.de/index.html
49 % Also of interest may be the (relatively newer and more customizable)
50 % algorithmicx.sty package by Szasz Janos:
51 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithmicx/
53 % *** ALIGNMENT PACKAGES ***
55 %\usepackage{array}
56 % http://www.ctan.org/tex-archive/macros/latex/required/tools/
59 \usepackage{amsmath}
60 %\usepackage{mdwtab}
61 % http://www.ctan.org/tex-archive/macros/latex/contrib/mdwtools/
64 % IEEEtran contains the IEEEeqnarray family of commands that can be used to
65 % generate multiline equations as well as matrices, tables, etc., of high
66 % quality.
68 %\usepackage{eqparbox}
69 % Also of notable interest is Scott Pakin's eqparbox package for creating
70 % (automatically sized) equal width boxes - aka "natural width parboxes".
71 % Available at:
72 % http://www.ctan.org/tex-archive/macros/latex/contrib/eqparbox/
76 % *** SUBFIGURE PACKAGES ***
77 %\usepackage[tight,footnotesize]{subfigure}
78 % subfigure.sty was written by Steven Douglas Cochran. This package makes it
79 % easy to put subfigures in your figures. e.g., "Figure 1a and 1b". For IEEE
80 % work, it is a good idea to load it with the tight package option to reduce
81 % the amount of white space around the subfigures. subfigure.sty is already
82 % installed on most LaTeX systems. The latest version and documentation can
83 % be obtained at:
84 % http://www.ctan.org/tex-archive/obsolete/macros/latex/contrib/subfigure/
85 % subfigure.sty has been superceeded by subfig.sty.
89 %\usepackage[caption=false]{caption}
90 %\usepackage[font=footnotesize]{subfig}
91 % subfig.sty, also written by Steven Douglas Cochran, is the modern
92 % replacement for subfigure.sty. However, subfig.sty requires and
93 % automatically loads Axel Sommerfeldt's caption.sty which will override
94 % IEEEtran.cls handling of captions and this will result in nonIEEE style
95 % figure/table captions. To prevent this problem, be sure and preload
96 % caption.sty with its "caption=false" package option. This is will preserve
97 % IEEEtran.cls handing of captions. Version 1.3 (2005/06/28) and later
98 % (recommended due to many improvements over 1.2) of subfig.sty supports
99 % the caption=false option directly:
100 %\usepackage[caption=false,font=footnotesize]{subfig}
102 % The latest version and documentation can be obtained at:
103 % http://www.ctan.org/tex-archive/macros/latex/contrib/subfig/
104 % The latest version and documentation of caption.sty can be obtained at:
105 % http://www.ctan.org/tex-archive/macros/latex/contrib/caption/
109 % *** FLOAT PACKAGES ***
111 %\usepackage{fixltx2e}
112 % fixltx2e, the successor to the earlier fix2col.sty, was written by
113 % Frank Mittelbach and David Carlisle. This package corrects a few problems
114 % in the LaTeX2e kernel, the most notable of which is that in current
115 % LaTeX2e releases, the ordering of single and double column floats is not
116 % guaranteed to be preserved. Thus, an unpatched LaTeX2e can allow a
117 % single column figure to be placed prior to an earlier double column
118 % figure. The latest version and documentation can be found at:
119 % http://www.ctan.org/tex-archive/macros/latex/base/
123 %\usepackage{stfloats}
124 % stfloats.sty was written by Sigitas Tolusis. This package gives LaTeX2e
125 % the ability to do double column floats at the bottom of the page as well
126 % as the top. (e.g., "\begin{figure*}[!b]" is not normally possible in
127 % LaTeX2e). It also provides a command:
128 %\fnbelowfloat
129 % to enable the placement of footnotes below bottom floats (the standard
130 % LaTeX2e kernel puts them above bottom floats). This is an invasive package
131 % which rewrites many portions of the LaTeX2e float routines. It may not work
132 % with other packages that modify the LaTeX2e float routines. The latest
133 % version and documentation can be obtained at:
134 % http://www.ctan.org/tex-archive/macros/latex/contrib/sttools/
135 % Documentation is contained in the stfloats.sty comments as well as in the
136 % presfull.pdf file. Do not use the stfloats baselinefloat ability as IEEE
137 % does not allow \baselineskip to stretch. Authors submitting work to the
138 % IEEE should note that IEEE rarely uses double column equations and
139 % that authors should try to avoid such use. Do not be tempted to use the
140 % cuted.sty or midfloat.sty packages (also by Sigitas Tolusis) as IEEE does
141 % not format its papers in such ways.
144 %\ifCLASSOPTIONcaptionsoff
145 % \usepackage[nomarkers]{endfloat}
146 % \let\MYoriglatexcaption\caption
147 % \renewcommand{\caption}[2][\relax]{\MYoriglatexcaption[#2]{#2}}
148 %\fi
149 % endfloat.sty was written by James Darrell McCauley and Jeff Goldberg.
150 % This package may be useful when used in conjunction with IEEEtran.cls'
151 % captionsoff option. Some IEEE journals/societies require that submissions
152 % have lists of figures/tables at the end of the paper and that
153 % figures/tables without any captions are placed on a page by themselves at
154 % the end of the document. If needed, the draftcls IEEEtran class option or
155 % \CLASSINPUTbaselinestretch interface can be used to increase the line
156 % spacing as well. Be sure and use the nomarkers option of endfloat to
157 % prevent endfloat from "marking" where the figures would have been placed
158 % in the text. The two hack lines of code above are a slight modification of
159 % that suggested by in the endfloat docs (section 8.3.1) to ensure that
160 % the full captions always appear in the list of figures/tables - even if
161 % the user used the short optional argument of \caption[]{}.
162 % IEEE papers do not typically make use of \caption[]'s optional argument,
163 % so this should not be an issue. A similar trick can be used to disable
164 % captions of packages such as subfig.sty that lack options to turn off
165 % the subcaptions:
166 % For subfig.sty:
167 % \let\MYorigsubfloat\subfloat
168 % \renewcommand{\subfloat}[2][\relax]{\MYorigsubfloat[]{#2}}
169 % For subfigure.sty:
170 % \let\MYorigsubfigure\subfigure
171 % \renewcommand{\subfigure}[2][\relax]{\MYorigsubfigure[]{#2}}
172 % However, the above trick will not work if both optional arguments of
173 % the \subfloat/subfig command are used. Furthermore, there needs to be a
174 % description of each subfigure *somewhere* and endfloat does not add
175 % subfigure captions to its list of figures. Thus, the best approach is to
176 % avoid the use of subfigure captions (many IEEE journals avoid them anyway)
177 % and instead reference/explain all the subfigures within the main caption.
178 % The latest version of endfloat.sty and its documentation can obtained at:
179 % http://www.ctan.org/tex-archive/macros/latex/contrib/endfloat/
181 % The IEEEtran \ifCLASSOPTIONcaptionsoff conditional can also be used
182 % later in the document, say, to conditionally put the References on a
183 % page by themselves.
185 % *** PDF, URL AND HYPERLINK PACKAGES ***
187 %\usepackage{url}
188 % url.sty was written by Donald Arseneau. It provides better support for
189 % handling and breaking URLs. url.sty is already installed on most LaTeX
190 % systems. The latest version can be obtained at:
191 % http://www.ctan.org/tex-archive/macros/latex/contrib/misc/
192 % Read the url.sty source comments for usage information. Basically,
193 % \url{my_url_here}.
196 % *** Do not adjust lengths that control margins, column widths, etc. ***
197 % *** Do not use packages that alter fonts (such as pslatex). ***
198 % There should be no need to do such things with IEEEtran.cls V1.6 and later.
199 % (Unless specifically asked to do so by the journal or conference you plan
200 % to submit to, of course. )
202 % correct bad hyphenation here
203 \hyphenation{op-tical net-works semi-conduc-tor}
206 \begin{document}
208 % paper title
209 % can use linebreaks \\ within to get better formatting as desired
210 \title{On Move Pattern Trends\\in Large Go Games Corpus}
212 % use \thanks{} to gain access to the first footnote area
213 % a separate \thanks must be used for each paragraph as LaTeX2e's \thanks
214 % was not built to handle multiple paragraphs
215 \author{Petr~Baudi\v{s},~Josef~Moud\v{r}\'{i}k% <-this % stops a space
216 \thanks{P. Baudi\v{s} is student at the Faculty of Math and Physics, Charles University, Prague, CZ, and also does some of his Computer Go research as an employee of SUSE Labs Prague, Novell CZ.}% <-this % stops a space
217 \thanks{J. Moud\v{r}\'{i}k is student at the Faculty of Math and Physics, Charles University, Prague, CZ.}}
219 % note the % following the last \IEEEmembership and also \thanks -
220 % these prevent an unwanted space from occurring between the last author name
221 % and the end of the author line. i.e., if you had this:
223 % \author{....lastname \thanks{...} \thanks{...} }
224 % ^------------^------------^----Do not want these spaces!
226 % a space would be appended to the last name and could cause every name on that
227 % line to be shifted left slightly. This is one of those "LaTeX things". For
228 % instance, "\textbf{A} \textbf{B}" will typeset as "A B" not "AB". To get
229 % "AB" then you have to do: "\textbf{A}\textbf{B}"
230 % \thanks is no different in this regard, so shield the last } of each \thanks
231 % that ends a line with a % and do not let a space in before the next \thanks.
232 % Spaces after \IEEEmembership other than the last one are OK (and needed) as
233 % you are supposed to have spaces between the names. For what it is worth,
234 % this is a minor point as most people would not even notice if the said evil
235 % space somehow managed to creep in.
238 % The paper headers
239 \markboth{Transactions on Computational Intelligence and AI in Games}%
240 {On Pattern Feature Trends in Large Go Game Corpus}
241 % The only time the second header will appear is for the odd numbered pages
242 % after the title page when using the twoside option.
244 % *** Note that you probably will NOT want to include the author's ***
245 % *** name in the headers of peer review papers. ***
246 % You can use \ifCLASSOPTIONpeerreview for conditional compilation here if
247 % you desire.
252 % If you want to put a publisher's ID mark on the page you can do it like
253 % this:
254 %\IEEEpubid{0000--0000/00\$00.00~\copyright~2007 IEEE} 255 % Remember, if you use this you must call \IEEEpubidadjcol in the second 256 % column for its text to clear the IEEEpubid mark. 260 % use for special paper notices 261 %\IEEEspecialpapernotice{(Invited Paper)} 266 % make the title area 267 \maketitle 270 \begin{abstract} 271 %\boldmath 273 We process a~large corpus of game records of the board game of Go and 274 propose a~way to extract per-player summary information on played moves. 275 We then apply several basic data-mining methods on the summary 276 information to identify the most differentiating features within the 277 summary information, and discuss their correspondence with traditional 278 Go knowledge. We show mappings of the features to player attributes 279 like playing strength or informally perceived playing style'' (such as 280 territoriality or aggressivity), and propose applications including 281 seeding real-work ranks of internet players, aiding in Go study, or 282 contribution to discussion within Go theory on the scope of playing 283 style''. 285 \end{abstract} 286 % IEEEtran.cls defaults to using nonbold math in the Abstract. 287 % This preserves the distinction between vectors and scalars. However, 288 % if the journal you are submitting to favors bold math in the abstract, 289 % then you can use LaTeX's standard command \boldmath at the very start 290 % of the abstract to achieve this. Many IEEE journals frown on math 291 % in the abstract anyway. 293 % Note that keywords are not normally used for peerreview papers. 294 \begin{IEEEkeywords} 295 board games, go, data mining, pattern recongition, player strength, playing style 296 \end{IEEEkeywords} 303 % For peer review papers, you can put extra information on the cover 304 % page as needed: 305 % \ifCLASSOPTIONpeerreview 306 % \begin{center} \bfseries EDICS Category: 3-BBND \end{center} 307 % \fi 309 % For peerreview papers, this IEEEtran command inserts a page break and 310 % creates the second title. It will be ignored for other modes. 311 \IEEEpeerreviewmaketitle 315 \section{Introduction} 316 % The very first letter is a 2 line initial drop letter followed 317 % by the rest of the first word in caps. 319 % form to use if the first word consists of a single letter: 320 % \IEEEPARstart{A}{demo} file is .... 322 % form to use if you need the single drop letter followed by 323 % normal text (unknown if ever used by IEEE): 324 % \IEEEPARstart{A}{}demo file is .... 326 % Some journals put the first two words in caps: 327 % \IEEEPARstart{T}{his demo} file is .... 329 % Here we have the typical use of a "T" for an initial drop letter 330 % and "HIS" in caps to complete the first word. 331 \IEEEPARstart{T}{he} field of Computer Go usually focuses on the problem 332 of creating a~program to play the game, finding the best move from a~given 333 board position. We will make use of one method developed in the course 334 of such research and apply it to the analysis of existing game records 335 with the aim of helping humans to play the game better instead. 337 Go is a~two-player full-information board game played 338 on a~square grid (usually$19\times19$lines) with black and white 339 stones; the goal of the game is to surround the most territory and 340 capture enemy stones. We assume basic familiarity with the game. 342 Many Go players are eager to play using computers (usually over 343 the internet) and review games played by others on computers as well. 344 This means that large amounts of game records are collected and digitally 345 stored, enabling easy processing of such collections. However, so far 346 only little has been done with the available data --- we are aware 347 only of uses for simple win/loss statistics (TODO: KGS Stats, KGS Analytics, 348 Pro Go Rating) and ''next move'' statistics on a~specific position (TODO: 349 Kombilo, Moyo Go Studio). 351 We present a~more in-depth approach --- from all played moves, we devise 352 a~compact evaluation of each player. We then explore correlations between 353 evaluations of various players in light of externally given information. 354 This way, we can discover similarity between moves characteristics of 355 players with the same playing strength, or discuss the meaning of the 356 "playing style" concept on the assumption that similar playing styles 357 should yield similar moves characteristics. 360 \section{Data Extraction} 361 \label{pattern-vectors} 363 As the input of our analysis, we use large collections of game records\footnote{We 364 use the SGF format (TODO) in our implementation.} organized by player names. 365 In order to generate the required compact description of most frequently played moves, 366 we construct a set of$n$most occuring patterns (\emph{top patterns}) 367 across all players and games from the database.\footnote{We use$n=500$in our analysis.} 369 For each player, we then count how many times was each of those$n$patterns played 370 during all his games and finally assign him a~{\em pattern vector}$\vec p$of dimension$n$, with each 371 dimension corresponding to the relative number of occurences of a given pattern 372 (with respect to player's most played \emph{top pattern}). Using relative numbers of occurences ensures that 373 each dimension of player's \emph{pattern vector} is scaled to range$[0,1]$and 374 therefore even players with different number of games in the database have comparable \emph{pattern vectors}. 376 \subsection{Pattern Features} 377 We need to define how to compose the patterns we use to describe moves. 378 There are some tradeoffs in play - overly general descriptions carry too few 379 information to discern various player attributes; too specific descriptions 380 gather too few specimen over the games sample and the vector differences are 381 not statistically significant. 383 We have chosen an intuitive and simple approach inspired by pattern features 384 used when computing ELO ratings for candidate patterns in Computer Go play. 385 \cite{ELO} Each pattern is a~combination of several {\em pattern features} 386 (name--value pairs) matched at the position of the played move. 387 We use these features: 389 \begin{itemize} 390 \item capture move flag 391 \item atari move flag 392 \item atari escape flag 393 \item contiguity-to-last flag --- whether the move has been played in one of 8 neighbors of the last move 394 \item contiguity-to-second-last flag 395 \item board edge distance --- only up to distance 4 396 \item spatial pattern --- configuration of stones around the played move 397 \end{itemize} 399 The spatial patterns are normalized (using a dictionary) to be always 400 black-to-play and maintain translational and rotational symmetry. 401 Configurations of radius between 2 and 9 in the gridcular metric% 402 \footnote{The {\em gridcular} metric 403$d(x,y) = |\delta x| + |\delta y| + \max(|\delta x|, |\delta y|)$defines 404 a circle-like structure on the Go board square grid. \cite{SpatPat} } 405 are matched. 407 \subsection{Implementation} 409 We have implemented the data extraction by making use of the pattern 410 features matching implementation within the Pachi go-playing program 411 (TODO). We extract information on players by converting the SGF game 412 records to GTP (TODO) stream that feeds Pachi's {\tt patternscan} 413 engine which outputs a~single {\em patternspec} (string representation 414 of the particular pattern features combination) per move. 416 We can then gather all patternspecs played by a~given player and summarize 417 them; the$\vec p$vector then consists of normalized counts of 418 the given$n$most frequent patternspecs. 421 \section{Data Mining} 422 \label{data-mining} 424 To assess the properties of gathered \emph{pattern vectors} 425 and their influence on playing styles, 426 we have processes the data using a~few basic data minining techniques. 428 The first two methods ({\em analytic}) rely purely on data gathered 429 from the game collection 430 and serve to show internal structure and correlations within the data set. 431 Principal component analysis finds orthogonal vector components that 432 have the largest variance. 433 Reversing the process then indicates which patterns correlate with each style. 434 Additionally, PCA can be used as a vector-preprocessing for methods 435 that are negatively sensitive to \emph{pattern vector} component correlations. 437 A~second method -- Kohonen maps -- is based on the theory of self-organizing maps of abstract neurons that 438 compete against each other for representation of the input space. 439 Because neurons in the network are organized in a two-dimensional plane, 440 the trained network virtually spreads vectors to the 2D plane, 441 allowing for simple visualization of clusters of players with similar styles. 443 TODO: style vector -> output vector? 445 Furthermore, we have used and compared two \emph{classification} methods 446 that approximate well-defined but unknown \emph{style vector}$\vec S$447 based on input \emph{pattern vector}$\vec P$. 448 The methods are calibrated based on expert or prior knowledge about 449 training pattern vectors and then their error is measured on a testing 450 set of pattern vectors. 452 One of the methods is$k$-Nearest Neighbor (kNN) classifier: 453 we approximate$\vec S$by composing the \emph{style vectors} of$k$nearest \emph{pattern vectors}. 454 The other is based on a multi-layer feed-forward Artificial Neural Network: 455 the neural network can learn correlations between input and output vectors 456 and generalize the knowledge'' to unknown vectors; it can be more flexible 457 in the interpretation of different pattern vector elements and discern more 458 complex relations than the kNN classifier, but e.g. requires larger training sample. 460 TODO: Dava ta posledni veta nejaky smysl?! 462 \subsection{Principal Component Analysis} 463 \label{data-mining} 464 We use Principal Component Analysis \emph{PCA} \cite{Jolliffe1986} 465 to reduce the dimensions of the \emph{pattern vectors} while preserving 466 as much information as possible. 468 Briefly, PCA is an eigenvalue decomposition of a~covariance matrix of centered \emph{pattern vectors}, 469 producing a~linear mapping$o$from$n$-dimensional vector space 470 to a~reduced$m$-dimensional vector space. 471 The$m$eigenvectors of the original vectors' covariance matrix 472 with the largest eigenvalues are used as the base of the reduced vector space; 473 the eigenvectors form the transformation matrix$W$. 475 For each original \emph{pattern vector}$\vec p_i$, 476 we obtain its new representation$\vec r_i$in the PCA base 477 as shown in the following equation: 478 \begin{equation} 479 \vec r_i = W \cdot \vec p_i 480 \end{equation} 482 The whole process is described in the Algorithm \ref{alg:pca}. 484 \begin{algorithm} 485 \caption{PCA -- Principal Component Analysis} 486 \begin{algorithmic} 487 \label{alg:pca} 488 \REQUIRE{$m > 0$, set of players$R$with \emph{pattern vectors}$p_r$} 489 \STATE$\vec \mu \leftarrow 1/|R| \cdot \sum_{r \in R}{\vec p_r}$490 \FOR{$r \in R$} 491 \STATE$\vec p_r \leftarrow \vec p_r - \vec \mu$492 \ENDFOR 493 \FOR{$(i,j) \in \{1,... ,n\} \times \{1,... ,n\}$} 494 \STATE$\mathit{Cov}[i,j] \leftarrow 1/|R| \cdot \sum_{r \in R}{\vec p_{ri} \cdot \vec p_{rj}}$495 \ENDFOR 496 \STATE Compute Eigenvalue Decomposition of$\mathit{Cov}$matrix 497 \STATE Get$m$largest eigenvalues 498 \STATE Most significant eigenvectors ordered by decreasing eigenvalues form the rows of matrix$W$499 \FOR{$r \in R$} 500 \STATE$\vec r_r\leftarrow W \vec p_r$501 \ENDFOR 502 \end{algorithmic} 503 \end{algorithm} 505 \subsection{Kohonen Maps} 506 \label{koh} 507 Kohonen map is a self-organizing network with neurons spread over a two-dimensional plane. 508 Neurons in the map compete for representation of portions of the input vector space. 509 Each neuron$\vec n$represents a vector 510 and the network is trained so that the neurons that are topologically close 511 tend to represent vectors that are close as well. 513 First, a randomly initialized network is sequentially trained; 514 in each iteration, we choose a random training vector$\vec t$515 and find the neuron$\vec w$that is closest to$\vec t$in Euclidean metric 516 (we call$\vec w$a \emph{winner neuron}). 518 We then adapt neurons from the neighbourhood of$\vec w$employing an equation: 519 \begin{equation} 520 \vec n = \vec n + \alpha \cdot \mathit{Influence}(\vec w, \vec n) \cdot (\vec t - \vec n) 521 \end{equation} 522 where$\alpha$is a learning parameter, usually decreasing in time. 523$Influence()$is a function that forces neurons to spread. 524 Such function is usually realised using a mexican hat function or a difference-of-gaussians 525 (see \cite{TODO} for details). 526 The state of the network can be evaluated by calculating mean square difference 527 between each$\vec t \in T$and its corresponding \emph{winner neuron}$\vec w_t$: 528 \begin{equation} 529 \mathit{Error}(N,T) = \sum_{\vec t \in T}{|\vec w_t - \vec t|} 530 \end{equation} 533 \begin{algorithm} 534 \caption{Kohonen maps -- training} 535 \begin{algorithmic} 536 \label{alg:koh} 537 \REQUIRE{Set of training vectors$T$, input dimension$D$} 538 \REQUIRE{max number of iterations$M$, desired error$E$} 539 \STATE$N \leftarrow \{\vec n | \vec n$random,$\mathit{dim}(\vec n) = D\}$540 \REPEAT 541 \STATE$\mathit{It} \leftarrow \mathit{It} + 1$542 \STATE$\vec t \leftarrow \mathit{PickRandom}(T)$543 \FORALL{$\vec n \in N$} 544 \STATE$D[\vec n] \leftarrow \mathit{EuclideanDistance}(\vec n, \vec t)$545 \ENDFOR 546 \STATE Find$ \vec w \in N$so that$D[\vec w] <= D[\vec m], \forall \vec m \in N$547 \FORALL{$\vec n \in \mathit{TopologicalNeigbors}(N, \vec w)$} 548 \STATE$\vec n \leftarrow \vec n + \alpha(It) \cdot \mathit{Influence}(\vec w, \vec n) \cdot ( \vec t - \vec n ) $549 \ENDFOR 550 \UNTIL{$\mathit{Error}(N, T) < E$or$ \mathit{It} > M$} 551 \end{algorithmic} 552 \end{algorithm} 555 \subsection{k-nearest Neighbors Classifier} 556 \label{knn} 557 Our goal is to approximate player's \emph{style vector}$\vec S$558 based on their \emph{pattern vector}$\vec P$. 559 To achieve this, we require prior knowledge of \emph{reference style vectors} 560 (see section \ref{style-vectors}). 562 In this method, we assume that similarities in players' \emph{pattern vectors} 563 uniformly correlate with similarities in players' \emph{style vectors}. 564 We try to approximate$\vec S$as a weighted average of \emph{style vectors} 565$\vec s_i$of$k$players with \emph{pattern vectors}$\vec p_i$closest to$\vec P$. 566 This is illustrated in the Algorithm \ref{alg:knn}. 567 Note that the weight is a function of distance and it is not explicitly defined in Algorithm \ref{alg:knn}. 568 During our research, exponentially decreasing weight has proven to be sufficient. 570 \begin{algorithm} 571 \caption{k-Nearest Neighbors} 572 \begin{algorithmic} 573 \label{alg:knn} 574 \REQUIRE{pattern vector$\vec P$,$k > 0$, set of reference players$R$} 575 \FORALL{$r \in R$} 576 \STATE$D[r] \leftarrow \mathit{EuclideanDistance}(\vec p_r, \vec P)$577 \ENDFOR 578 \STATE$N \leftarrow \mathit{SelectSmallest}(k, R, D)$579 \STATE$\vec S \leftarrow \vec 0$580 \FORALL{$r \in N $} 581 \STATE$\vec S \leftarrow \vec S + \mathit{Weight}(D[r]) \cdot \vec s_r $582 \ENDFOR 583 \end{algorithmic} 584 \end{algorithm} 586 \subsection{Neural Network Classifier} 587 \label{neural-net} 589 As an alternative to the k-Nearest Neigbors algorithm (section \ref{knn}), 590 we have used a classificator based on feed-forward artificial neural networks \cite{TODO}. 591 Neural networks (NN) are known for their ability to generalize 592 and find correlations and patterns between input and output data. 593 Neural network is an adaptive system that must undergo a training 594 period before it can be reasonably used, similarly to the requirement 595 of reference vectors for the k-Nearest Neighbors algorithm above. 597 \subsubsection{Computation and activation of the NN} 598 Technically, neural network is a network of interconnected computational units called neurons. 599 A feedforward neural network has a layered topology; 600 it usually has one \emph{input layer}, one \emph{output layer} 601 and an arbitrary number of \emph{hidden layers} inbetween. 603 Each neuron$i$is connected to all neurons in the previous layer and each connection has its weight$w_{ij}$605 The computation proceeds in discrete time steps. 606 In the first step, the neurons in the \emph{input layer} 607 are \emph{activated} according to the \emph{input vector}. 608 Then, we iteratively compute output of each neuron in the next layer 609 until the output layer is reached. 610 The activity of output layer is then presented as the result. 612 The activation$y_i$of neuron$i$from the layer$I$is computed as 613 \begin{equation} 614 y_i = f(\sum_{j \in J}{w_{ij} y_j}) 615 \end{equation} 616 where$J$is the previous layer, while$y_j$is the activation for neurons from$J$layer. 617 Function$f()$is so-called \emph{activation function} 618 and its purpose is to bound the outputs of neurons. 619 A typical example of an activation function is the sigmoid function.% 620 \footnote{A special case of the logistic function, defined by the formula 621$\sigma(x)=\frac{1}{1+e^{-(rx+k)}}$; parameters control the growth rate ($r$) 622 and the x-position ($k$).} 624 \subsubsection{Training} 625 The training of the feed-forward neural network usually involves some 626 modification of supervised Backpropagation learning algorithm. \cite{TODO} 627 We use first-order optimization algorithm called RPROP \cite{Riedmiller1993}. 629 Because the \emph{reference set} is usually not very large, 630 we have devised a simple method for its extension. 631 This enhancement is based upon adding random linear combinations 632 of \emph{style and pattern vectors} to the training set. 634 TODO: Tohle je puvodni napad? 636 As outlined above, the training set consists of pairs of 637 input vectors (\emph{pattern vectors}) and 638 desired output vectors (\emph{style vectors}). 639 The training set$T$is then extended by adding the linear combinations: 640 \begin{equation} 641 T_\mathit{base} = \{(\vec p_r, \vec s_r) | r \in R\}\\ 642 \end{equation} 643 \begin{equation} 644 T_\mathit{ext} = \{(\vec p, \vec s) | \exists D \subseteq R : \vec p = \sum_{d \in D}{g_d \vec p_d}, \vec s = \sum_{d \in D}{g_d \vec s_d}\} 645 \end{equation} 646 TODO zabudovat$g_d$dovnitr? 647 where$g_d, d \in D$are random coeficients, so that$\sum_{d \in D}{g_d} = 1$. 648 The training set is then constructed as: 649 \begin{equation} 650 T = T_\mathit{base} \cup \mathit{SomeFiniteSubset}(T_\mathit{ext}) 651 \end{equation} 653 The network is trained as shown in Algorithm \ref{alg:tnn}. 655 \begin{algorithm} 656 \caption{Training Neural Network} 657 \begin{algorithmic} 658 \label{alg:tnn} 659 \REQUIRE{Train set$T$, desired error$e$, max iterations$M$} 660 \STATE$N \leftarrow \mathit{RandomlyInitializedNetwork}()$661 \STATE$\mathit{It} \leftarrow 0$662 \REPEAT 663 \STATE$\mathit{It} \leftarrow \mathit{It} + 1$664 \STATE$\Delta \vec w \leftarrow \vec 0$665 \STATE$\mathit{TotalError} \leftarrow 0$666 %\FORALL{$(\overrightarrow{Input}, \overrightarrow{DesiredOutput}) \in T$} 667 %\STATE$\overrightarrow{Output} \leftarrow Result(N, \overrightarrow{Input})$668 %\STATE$E \leftarrow |\overrightarrow{DesiredOutput} - \overrightarrow{Output}|$669 \FORALL{$(\mathit{Input}, \mathit{DesiredOutput}) \in T$} 670 \STATE$\mathit{Output} \leftarrow \mathit{Result}(N, \mathit{Input})$671 \STATE$\mathit{Error} \leftarrow |\mathit{DesiredOutput} - \mathit{Output}|$672 \STATE$\Delta \vec w \leftarrow \Delta \vec w + \mathit{WeightUpdate}(N,\mathit{Error})$673 \STATE$\mathit{TotalError} \leftarrow \mathit{TotalError} + \mathit{Error}$674 \ENDFOR 675 \STATE$N \leftarrow \mathit{ModifyWeights}(N, \Delta \vec w)$676 \UNTIL{$\mathit{TotalError} < e$or$ \mathit{It} > M$} 677 \end{algorithmic} 678 \end{algorithm} 681 \subsubsection{Architecture details} 682 TODO num layers, num neurons, .. 685 \subsection{Implementation} 687 We have implemented the data mining methods as an open-source framework gostyle'' \cite{TODO}, 688 made available under the GNU GPL licence. 689 We use python for the basic processing and most of the analysis; 690 the MDP library \cite{MDP} is used for PCA analysis, Kohonen library \cite{KohonenPy} for Kohonen maps. 691 The neuron network classifier is using the libfann C library. \cite{TODO} 694 \section{Strength Estimator} 696 First, we have used our framework to analyse correlations of pattern vectors 697 and playing strength. Like in other competitively played board games, Go players 698 receive real-world rating based on tournament games, and rank based on their 699 rating.\footnote{Elo-like rating system \cite{GoR} is usually used, 700 corresponding to even win chances for game of two players with the same rank, 701 and about 2:3 win chance for white in case of one rank difference.}% 702 \footnote{Professional ranks and dan ranks in some Asia countries may 703 be assigned differently.} The amateur ranks range from 30kyu (beginner) to 704 1kyu (intermediate) and then follows 1dan to 7dan (9dan in some systems; 705 top-level player). Multiple independent real-world ranking scales exist 706 (geographically based) and online servers maintain their own user ranking; 707 the difference can be up to several stones. 709 As the source game collection, we use Go Teaching Ladder 710 reviews\footnote{The reviews contain comments and variations --- we consider only the actual played game.} 711 \cite{GTL} --- this collection contains 7700 games of players with strength ranging 712 from 30k to 4d; we consider only even games with clear rank information, and then 713 randomly separate 770 games as a testing set. Since the rank information is provided 714 by the users and may not be consistent, we are forced to take a simplified look 715 at the ranks, discarding the differences between various systems and thus increasing 716 error in our model.\footnote{Since 717 our results seem satisfying, we did not pursue to try another collection} 719 First, we have created a single pattern vector for each rank, from 30k to 4d; 720 we have performed PCA analysis on the pattern vectors, achieving near-perfect 721 rank correspondence in the first PCA dimension\footnote{The eigenvalue of the 722 second dimension was four orders of magnitude smaller, with no discernable 723 structure revealed within the lower-order eigenvectors.} 724 (chi-square test TODO). 725 (Figure TODO.) Using the eigenvector position directly for classification 726 of players within the test group yields MSE TODO, thus providing 727 reasonably satisfying accuracy. 729 To further enhance the strength estimator accuracy, 730 we have tried to train a NN classifier on our train set, consisting 731 of one$(\vec p, {\rm rank})$pair per player --- we use the pattern vector 732 for activation of input neurons and rank number as result of the output 733 neuron. We then proceeded to test the NN on per-player pattern vectors built 734 from the games in the test set, yielding MSE of TODO with TODO games per player 735 on average. 738 \section{Style Estimator} 740 The source games collection is GoGoD Winter 2009 (TODO) containing 42000 (TODO) 741 professional games, dating from the early Go history 1500 years ago to the present. 743 bla bla bla 745 \subsection{Expert-based knowledge} 746 \label{style-vectors} 747 In order to provide a reference frame for our style analysis, 748 we have gathered some expert-based information about various 749 traditionally perceived style aspects. 750 Three high-level Go players (Alexander Dinerstein 3-pro, Motoki Noguchi 751 7-dan and V\'{i}t Brunner 4-dan) have judged style of several Go 752 professionals -- we call them \emph{reference playerse} -- chosen for both 753 being well-known within the community and having large number of played games in our collection. 755 This expert-based knowledge allows us to predict styles of unknown players based on 756 the similarity of their pattern vectors, as well as discover correlations between 757 styles and proportions of played patterns. 759 Experts were asked to assign each of player's style a number 760 on a scale from 1 to 10. These are interpreted 761 as shown in the table below. 763 \vspace{4mm} 764 \noindent 765 %\begin{table} 766 \begin{center} 767 %\caption{Styles} 768 \begin{tabular}{|c|c|c|} 769 \hline 770 \multicolumn{3}{|c|}{Styles} \\ \hline 771 Style & 1 & 10\\ \hline 772 Territoriality & Moyo & Territorial \\ 773 Orthodoxity & Classic & Novel \\ 774 Aggressivity & Calm & Figting \\ 775 Thickness & Safe & Shinogi \\ \hline 776 \end{tabular} 777 \end{center} 778 %\end{table} 779 \vspace{4mm} 781 Averaging this expert based evaluation yields 782 \emph{reference style vector}$\vec s_r$(of dimension$4$) for each player$r$783 from the set of \emph{reference players}$R$. 785 \subsection{Style Components Analysis} 787 PCA analysis yielded X, chi-square test... 789 \subsection{Style Classification} 791 We then tried to apply the NN classifier with linear output function on the dataset 792 and that yielded Y (see fig. Z), with MSE abcd. 795 \section{Proposed Applications} 797 We believe that our findings might be useful for many applications 798 in the area of Go support software as well as Go-playing computer engines. 800 The style analysis can be an excellent teaching aid --- classifying style 801 dimensions based on player's pattern vector, many study recommendations 802 can be given, e.g. about the professional games to replay, the goal being 803 balancing understanding of various styles to achieve well-rounded skill set. 804 This was also our original aim when starting the research and a user-friendly 805 tool based on our work is now being created. 807 We hope that more strong players will look into the style dimensions found 808 by our statistical analysis --- analysis of most played patterns of prospective 809 opponents might prepare for the game, but we especially hope that new insights 810 on strategic purposes of various shapes and general human understanding 811 of the game might be achieved by investigating the style-specific patterns. 813 Classifying playing strength of a pattern vector of a player can be used 814 e.g. to help determine initial real-world rating of a player before their 815 first tournament based on games played on the internet; some players especially 816 in less populated areas could get fairly strong before playing their first 817 real tournament. 819 Analysis of pattern vectors extracted from games of Go-playing programs 820 in light of the shown strength and style distributions might help to 821 highlight some weaknesses and room for improvements. (However, since 822 correlation does not imply causation, simply optimizing Go-playing programs 823 according to these vectors is unlikely to yield good results.) 824 Another interesting applications in Go-playing programs might be strength 825 adjustment; the program can classify the player's level based on the pattern 826 vector from its previous games and auto-adjust its difficulty settings 827 accordingly to provide more even games for beginners. 830 % An example of a floating figure using the graphicx package. 831 % Note that \label must occur AFTER (or within) \caption. 832 % For figures, \caption should occur after the \includegraphics. 833 % Note that IEEEtran v1.7 and later has special internal code that 834 % is designed to preserve the operation of \label within \caption 835 % even when the captionsoff option is in effect. However, because 836 % of issues like this, it may be the safest practice to put all your 837 % \label just after \caption rather than within \caption{}. 839 % Reminder: the "draftcls" or "draftclsnofoot", not "draft", class 840 % option should be used if it is desired that the figures are to be 841 % displayed while in draft mode. 843 %\begin{figure}[!t] 844 %\centering 845 %\includegraphics[width=2.5in]{myfigure} 846 % where an .eps filename suffix will be assumed under latex, 847 % and a .pdf suffix will be assumed for pdflatex; or what has been declared 848 % via \DeclareGraphicsExtensions. 849 %\caption{Simulation Results} 850 %\label{fig_sim} 851 %\end{figure} 853 % Note that IEEE typically puts floats only at the top, even when this 854 % results in a large percentage of a column being occupied by floats. 857 % An example of a double column floating figure using two subfigures. 858 % (The subfig.sty package must be loaded for this to work.) 859 % The subfigure \label commands are set within each subfloat command, the 860 % \label for the overall figure must come after \caption. 861 % \hfil must be used as a separator to get equal spacing. 862 % The subfigure.sty package works much the same way, except \subfigure is 863 % used instead of \subfloat. 865 %\begin{figure*}[!t] 866 %\centerline{\subfloat[Case I]\includegraphics[width=2.5in]{subfigcase1}% 867 %\label{fig_first_case}} 868 %\hfil 869 %\subfloat[Case II]{\includegraphics[width=2.5in]{subfigcase2}% 870 %\label{fig_second_case}}} 871 %\caption{Simulation results} 872 %\label{fig_sim} 873 %\end{figure*} 875 % Note that often IEEE papers with subfigures do not employ subfigure 876 % captions (using the optional argument to \subfloat), but instead will 877 % reference/describe all of them (a), (b), etc., within the main caption. 880 % An example of a floating table. Note that, for IEEE style tables, the 881 % \caption command should come BEFORE the table. Table text will default to 882 % \footnotesize as IEEE normally uses this smaller font for tables. 883 % The \label must come after \caption as always. 885 %\begin{table}[!t] 886 %% increase table row spacing, adjust to taste 887 %\renewcommand{\arraystretch}{1.3} 888 % if using array.sty, it might be a good idea to tweak the value of 889 % \extrarowheight as needed to properly center the text within the cells 890 %\caption{An Example of a Table} 891 %\label{table_example} 892 %\centering 893 %% Some packages, such as MDW tools, offer better commands for making tables 894 %% than the plain LaTeX2e tabular which is used here. 895 %\begin{tabular}{|c||c|} 896 %\hline 897 %One & Two\\ 898 %\hline 899 %Three & Four\\ 900 %\hline 901 %\end{tabular} 902 %\end{table} 905 % Note that IEEE does not put floats in the very first column - or typically 906 % anywhere on the first page for that matter. Also, in-text middle ("here") 907 % positioning is not used. Most IEEE journals use top floats exclusively. 908 % Note that, LaTeX2e, unlike IEEE journals, places footnotes above bottom 909 % floats. This can be corrected via the \fnbelowfloat command of the 910 % stfloats package. 914 \section{Conclusion} 915 The conclusion goes here. 916 We have shown brm and proposed brm. 918 Since we are not aware of any previous research on this topic and we 919 are limited by space and time constraints, plenty of research remains 920 to be done. There is plenty of room for further research in all parts 921 of our analysis --- different methods of generating the$\vec p$vectors 922 can be explored; other data mining methods could be tried. 923 It can be argued that many players adjust their style by game conditions 924 (Go development era, handicap, komi and color, time limits, opponent) 925 or styles might express differently in various game stages. 926 More professional players could be consulted on the findings 927 and for style scales calibration. Impact of handicap games on by-strength 928$\vec p\$ distribution should be investigated.
930 TODO: Future research --- Sparse PCA
935 % if have a single appendix:
936 %\appendix[Proof of the Zonklar Equations]
937 % or
938 %\appendix % for no appendix heading
939 % do not use \section anymore after \appendix, only \section*
940 % is possibly needed
942 % use appendices with more than one appendix
943 % then use \section to start each appendix
944 % you must declare a \section before using any
945 % \subsection or using \label (\appendices by itself
946 % starts a section numbered zero.)
950 %\appendices
951 %\section{Proof of the First Zonklar Equation}
952 %Appendix one text goes here.
954 %% you can choose not to have a title for an appendix
955 %% if you want by leaving the argument blank
956 %\section{}
957 %Appendix two text goes here.
960 % use section* for acknowledgement
961 \section*{Acknowledgment}
962 \label{acknowledgement}
965 We would like to thank X for reviewing our paper.
966 We appreciate helpful comments on our general methodology
967 by John Fairbairn, T. M. Hall, Robert Jasiek
968 and several GoDiscussions.com users. \cite{GoDiscThread}
969 Finally, we are very grateful for ranking of go styles of selected professionals
970 by Alexander Dinerstein, Motoki Noguchi and V\'{i}t Brunner.
973 % Can use something like this to put references on a page
974 % by themselves when using endfloat and the captionsoff option.
975 \ifCLASSOPTIONcaptionsoff
976 \newpage
981 % trigger a \newpage just before the given reference
982 % number - used to balance the columns on the last page
983 % adjust value as needed - may need to be readjusted if
984 % the document is modified later
985 %\IEEEtriggeratref{8}
986 % The "triggered" command can be changed if desired:
987 %\IEEEtriggercmd{\enlargethispage{-5in}}
989 % references section
991 % can use a bibliography generated by BibTeX as a .bbl file
992 % BibTeX documentation can be easily obtained at:
993 % http://www.ctan.org/tex-archive/biblio/bibtex/contrib/doc/
994 % The IEEEtran BibTeX style support page is at:
995 % http://www.michaelshell.org/tex/ieeetran/bibtex/
996 \bibliographystyle{IEEEtran}
997 % argument is your BibTeX string definitions and bibliography database(s)
998 \bibliography{gostyle}
1000 % <OR> manually copy in the resultant .bbl file
1001 % set second argument of \begin to the number of references
1002 % (used to reserve space for the reference number labels box)
1003 %\begin{thebibliography}{1}
1005 %\bibitem{MasterMCTS}
1007 %\end{thebibliography}
1009 % biography section
1011 % If you have an EPS/PDF photo (graphicx package needed) extra braces are
1012 % needed around the contents of the optional argument to biography to prevent
1013 % the LaTeX parser from getting confused when it sees the complicated
1014 % \includegraphics command within an optional argument. (You could create
1015 % your own custom macro containing the \includegraphics command to make things
1016 % simpler here.)
1017 %\begin{biography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{mshell}}]{Michael Shell}
1018 % or if you just want to reserve a space for a photo:
1020 \begin{IEEEbiography}{Michael Shell}
1021 Biography text here.
1022 \end{IEEEbiography}
1024 % if you will not have a photo at all:
1025 \begin{IEEEbiographynophoto}{John Doe}
1026 Biography text here.
1027 \end{IEEEbiographynophoto}
1029 % insert where needed to balance the two columns on the last page with
1030 % biographies
1031 %\newpage
1033 \begin{IEEEbiographynophoto}{Jane Doe}
1034 Biography text here.
1035 \end{IEEEbiographynophoto}
1037 % You can push biographies down or up by placing
1038 % a \vfill before or after them. The appropriate
1039 % use of \vfill depends on what kind of text is
1040 % on the last page and whether or not the columns
1041 % are being equalized.
1043 %\vfill
1045 % Can be used to pull up biographies so that the bottom of the last one
1046 % is flush with the other column.
1047 %\enlargethispage{-5in}
1051 % that's all folks
1052 \end{document}