gostyle_old, commit old
[gostyle.git] / tex / gostyle.tex
blob50c47ec875fff59d9ebd9be2e7e2704a570b427d
1 \documentclass[journal]{IEEEtran}
3 \usepackage{cite}
4 % cite.sty was written by Donald Arseneau
5 % V1.6 and later of IEEEtran pre-defines the format of the cite.sty package
6 % \cite{} output to follow that of IEEE. Loading the cite package will
7 % result in citation numbers being automatically sorted and properly
8 % "compressed/ranged". e.g., [1], [9], [2], [7], [5], [6] without using
9 % cite.sty will become [1], [2], [5]--[7], [9] using cite.sty. cite.sty's
10 % \cite will automatically add leading space, if needed. Use cite.sty's
11 % noadjust option (cite.sty V3.8 and later) if you want to turn this off.
12 % cite.sty is already installed on most LaTeX systems. Be sure and use
13 % version 4.0 (2003-05-27) and later if using hyperref.sty. cite.sty does
14 % not currently provide for hyperlinked citations.
15 % The latest version can be obtained at:
16 % http://www.ctan.org/tex-archive/macros/latex/contrib/cite/
17 % The documentation is contained in the cite.sty file itself.
20 % *** GRAPHICS RELATED PACKAGES ***
22 \ifCLASSINFOpdf
23 % \usepackage[pdftex]{graphicx}
24 % declare the path(s) where your graphic files are
25 % \graphicspath{{../pdf/}{../jpeg/}}
26 % and their extensions so you won't have to specify these with
27 % every instance of \includegraphics
28 % \DeclareGraphicsExtensions{.pdf,.jpeg,.png}
29 \else
30 % or other class option (dvipsone, dvipdf, if not using dvips). graphicx
31 % will default to the driver specified in the system graphics.cfg if no
32 % driver is specified.
33 % \usepackage[dvips]{graphicx}
34 \usepackage{graphicx}
35 % declare the path(s) where your graphic files are
36 % \graphicspath{{../eps/}}
37 % and their extensions so you won't have to specify these with
38 % every instance of \includegraphics
39 % \DeclareGraphicsExtensions{.eps}
40 \fi
42 \usepackage{threeparttable}
44 \usepackage{psgo}
45 \setgounit{0.4cm}
47 \usepackage{soul}
49 % FIRST REVISION
50 %ENABLE:
51 %\newcommand{\rv}[1]{\ul{#1}}
52 %DISABLE:
53 \newcommand{\rv}[1]{#1}
55 % SECOND REVISION
56 %ENABLE:
57 %\newcommand{\rvv}[1]{\ul{#1}}
58 %DISABLE:
59 \newcommand{\rvv}[1]{#1}
61 % THIRD REVISION
62 %ENABLE:
63 %\newcommand{\rvvv}[1]{\ul{#1}}
64 %DISABLE:
65 \newcommand{\rvvv}[1]{#1}
67 % GUESS THE # of this REVISION
68 %ENABLE:
69 %\newcommand{\rvvvv}[1]{\ul{#1}}
70 %DISABLE:
71 \newcommand{\rvvvv}[1]{#1}
73 \usepackage{algorithm}
74 \usepackage{algorithmic}
75 %\usepackage{algpseudocode}
76 % WICKED: nefunguje ani jedno???
77 % algorithmic.sty can be obtained at:
78 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithms/
79 % There is also a support site at:
80 % http://algorithms.berlios.de/index.html
81 % Also of interest may be the (relatively newer and more customizable)
82 % algorithmicx.sty package by Szasz Janos:
83 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithmicx/
85 % *** ALIGNMENT PACKAGES ***
87 %\usepackage{array}
88 % http://www.ctan.org/tex-archive/macros/latex/required/tools/
91 \usepackage{amsmath}
92 %\usepackage{mdwtab}
93 % http://www.ctan.org/tex-archive/macros/latex/contrib/mdwtools/
96 % IEEEtran contains the IEEEeqnarray family of commands that can be used to
97 % generate multiline equations as well as matrices, tables, etc., of high
98 % quality.
100 %\usepackage{eqparbox}
101 % Also of notable interest is Scott Pakin's eqparbox package for creating
102 % (automatically sized) equal width boxes - aka "natural width parboxes".
103 % Available at:
104 % http://www.ctan.org/tex-archive/macros/latex/contrib/eqparbox/
108 % *** SUBFIGURE PACKAGES ***
109 %\usepackage[tight,footnotesize]{subfigure}
110 % subfigure.sty was written by Steven Douglas Cochran. This package makes it
111 % easy to put subfigures in your figures. e.g., "Figure 1a and 1b". For IEEE
112 % work, it is a good idea to load it with the tight package option to reduce
113 % the amount of white space around the subfigures. subfigure.sty is already
114 % installed on most LaTeX systems. The latest version and documentation can
115 % be obtained at:
116 % http://www.ctan.org/tex-archive/obsolete/macros/latex/contrib/subfigure/
117 % subfigure.sty has been superceeded by subfig.sty.
121 %\usepackage[caption=false]{caption}
122 %\usepackage[font=footnotesize]{subfig}
123 % subfig.sty, also written by Steven Douglas Cochran, is the modern
124 % replacement for subfigure.sty. However, subfig.sty requires and
125 % automatically loads Axel Sommerfeldt's caption.sty which will override
126 % IEEEtran.cls handling of captions and this will result in nonIEEE style
127 % figure/table captions. To prevent this problem, be sure and preload
128 % caption.sty with its "caption=false" package option. This is will preserve
129 % IEEEtran.cls handing of captions. Version 1.3 (2005/06/28) and later
130 % (recommended due to many improvements over 1.2) of subfig.sty supports
131 % the caption=false option directly:
132 %\usepackage[caption=false,font=footnotesize]{subfig}
134 % The latest version and documentation can be obtained at:
135 % http://www.ctan.org/tex-archive/macros/latex/contrib/subfig/
136 % The latest version and documentation of caption.sty can be obtained at:
137 % http://www.ctan.org/tex-archive/macros/latex/contrib/caption/
141 % *** FLOAT PACKAGES ***
143 %\usepackage{fixltx2e}
144 % fixltx2e, the successor to the earlier fix2col.sty, was written by
145 % Frank Mittelbach and David Carlisle. This package corrects a few problems
146 % in the LaTeX2e kernel, the most notable of which is that in current
147 % LaTeX2e releases, the ordering of single and double column floats is not
148 % guaranteed to be preserved. Thus, an unpatched LaTeX2e can allow a
149 % single column figure to be placed prior to an earlier double column
150 % figure. The latest version and documentation can be found at:
151 % http://www.ctan.org/tex-archive/macros/latex/base/
155 %\usepackage{stfloats}
156 % stfloats.sty was written by Sigitas Tolusis. This package gives LaTeX2e
157 % the ability to do double column floats at the bottom of the page as well
158 % as the top. (e.g., "\begin{figure*}[!b]" is not normally possible in
159 % LaTeX2e). It also provides a command:
160 %\fnbelowfloat
161 % to enable the placement of footnotes below bottom floats (the standard
162 % LaTeX2e kernel puts them above bottom floats). This is an invasive package
163 % which rewrites many portions of the LaTeX2e float routines. It may not work
164 % with other packages that modify the LaTeX2e float routines. The latest
165 % version and documentation can be obtained at:
166 % http://www.ctan.org/tex-archive/macros/latex/contrib/sttools/
167 % Documentation is contained in the stfloats.sty comments as well as in the
168 % presfull.pdf file. Do not use the stfloats baselinefloat ability as IEEE
169 % does not allow \baselineskip to stretch. Authors submitting work to the
170 % IEEE should note that IEEE rarely uses double column equations and
171 % that authors should try to avoid such use. Do not be tempted to use the
172 % cuted.sty or midfloat.sty packages (also by Sigitas Tolusis) as IEEE does
173 % not format its papers in such ways.
176 %\ifCLASSOPTIONcaptionsoff
177 % \usepackage[nomarkers]{endfloat}
178 % \let\MYoriglatexcaption\caption
179 % \renewcommand{\caption}[2][\relax]{\MYoriglatexcaption[#2]{#2}}
180 %\fi
181 % endfloat.sty was written by James Darrell McCauley and Jeff Goldberg.
182 % This package may be useful when used in conjunction with IEEEtran.cls'
183 % captionsoff option. Some IEEE journals/societies require that submissions
184 % have lists of figures/tables at the end of the paper and that
185 % figures/tables without any captions are placed on a page by themselves at
186 % the end of the document. If needed, the draftcls IEEEtran class option or
187 % \CLASSINPUTbaselinestretch interface can be used to increase the line
188 % spacing as well. Be sure and use the nomarkers option of endfloat to
189 % prevent endfloat from "marking" where the figures would have been placed
190 % in the text. The two hack lines of code above are a slight modification of
191 % that suggested by in the endfloat docs (section 8.3.1) to ensure that
192 % the full captions always appear in the list of figures/tables - even if
193 % the user used the short optional argument of \caption[]{}.
194 % IEEE papers do not typically make use of \caption[]'s optional argument,
195 % so this should not be an issue. A similar trick can be used to disable
196 % captions of packages such as subfig.sty that lack options to turn off
197 % the subcaptions:
198 % For subfig.sty:
199 % \let\MYorigsubfloat\subfloat
200 % \renewcommand{\subfloat}[2][\relax]{\MYorigsubfloat[]{#2}}
201 % For subfigure.sty:
202 % \let\MYorigsubfigure\subfigure
203 % \renewcommand{\subfigure}[2][\relax]{\MYorigsubfigure[]{#2}}
204 % However, the above trick will not work if both optional arguments of
205 % the \subfloat/subfig command are used. Furthermore, there needs to be a
206 % description of each subfigure *somewhere* and endfloat does not add
207 % subfigure captions to its list of figures. Thus, the best approach is to
208 % avoid the use of subfigure captions (many IEEE journals avoid them anyway)
209 % and instead reference/explain all the subfigures within the main caption.
210 % The latest version of endfloat.sty and its documentation can obtained at:
211 % http://www.ctan.org/tex-archive/macros/latex/contrib/endfloat/
213 % The IEEEtran \ifCLASSOPTIONcaptionsoff conditional can also be used
214 % later in the document, say, to conditionally put the References on a
215 % page by themselves.
217 % *** PDF, URL AND HYPERLINK PACKAGES ***
219 \usepackage{url}
220 % url.sty was written by Donald Arseneau. It provides better support for
221 % handling and breaking URLs. url.sty is already installed on most LaTeX
222 % systems. The latest version can be obtained at:
223 % http://www.ctan.org/tex-archive/macros/latex/contrib/misc/
224 % Read the url.sty source comments for usage information. Basically,
225 % \url{my_url_here}.
228 % *** Do not adjust lengths that control margins, column widths, etc. ***
229 % *** Do not use packages that alter fonts (such as pslatex). ***
230 % There should be no need to do such things with IEEEtran.cls V1.6 and later.
231 % (Unless specifically asked to do so by the journal or conference you plan
232 % to submit to, of course. )
234 % correct bad hyphenation here
235 \hyphenation{op-tical net-works semi-conduc-tor know-ledge}
238 \begin{document}
240 % paper title
241 % can use linebreaks \\ within to get better formatting as desired
242 \title{On Move Pattern Trends\\in \rvv{a} Large Go Games Corpus}
244 % use \thanks{} to gain access to the first footnote area
245 % a separate \thanks must be used for each paragraph as LaTeX2e's \thanks
246 % was not built to handle multiple paragraphs
247 \author{Petr~Baudi\v{s},~Josef~Moud\v{r}\'{i}k\\
248 September 2012% <-this % stops a space
249 \thanks{P. Baudi\v{s} is student at the Faculty of Math and Physics, Charles University, Prague, CZ, and also does some of his Computer Go research as an employee of SUSE Labs Prague, Novell CZ.}% <-this % stops a space
250 \thanks{J. Moud\v{r}\'{i}k is student at the Faculty of Math and Physics, Charles University, Prague, CZ.}}
252 % note the % following the last \IEEEmembership and also \thanks -
253 % these prevent an unwanted space from occurring between the last author name
254 % and the end of the author line. i.e., if you had this:
256 % \author{....lastname \thanks{...} \thanks{...} }
257 % ^------------^------------^----Do not want these spaces!
259 % a space would be appended to the last name and could cause every name on that
260 % line to be shifted left slightly. This is one of those "LaTeX things". For
261 % instance, "\textbf{A} \textbf{B}" will typeset as "A B" not "AB". To get
262 % "AB" then you have to do: "\textbf{A}\textbf{B}"
263 % \thanks is no different in this regard, so shield the last } of each \thanks
264 % that ends a line with a % and do not let a space in before the next \thanks.
265 % Spaces after \IEEEmembership other than the last one are OK (and needed) as
266 % you are supposed to have spaces between the names. For what it is worth,
267 % this is a minor point as most people would not even notice if the said evil
268 % space somehow managed to creep in.
271 % The paper headers
272 \markboth {On Move Pattern Trends in Large Go Games Corpus}{}
274 %{Transactions on Computational Intelligence and AI in Games --- REV. SUBMISSIONp}%
275 % The only time the second header will appear is for the odd numbered pages
276 % after the title page when using the twoside option.
278 % *** Note that you probably will NOT want to include the author's ***
279 % *** name in the headers of peer review papers. ***
280 % You can use \ifCLASSOPTIONpeerreview for conditional compilation here if
281 % you desire.
286 % If you want to put a publisher's ID mark on the page you can do it like
287 % this:
288 %\IEEEpubid{0000--0000/00\$00.00~\copyright~2007 IEEE}
289 % Remember, if you use this you must call \IEEEpubidadjcol in the second
290 % column for its text to clear the IEEEpubid mark.
294 % use for special paper notices
295 %\IEEEspecialpapernotice{(Invited Paper)}
300 % make the title area
301 \maketitle
304 \begin{abstract}
305 %\boldmath
307 We process a~large corpus of game records of the board game of Go and propose
308 a~way of extracting summary information on played moves. We then apply several
309 basic data-mining methods on the summary information to identify the most
310 differentiating features within the summary information, and discuss their
311 correspondence with traditional Go knowledge. We show statistically significant
312 mappings of the features to player attributes such as playing strength or
313 informally perceived ``playing style'' (e.g. territoriality or aggressivity),
314 describe accurate classifiers for these attributes, and propose applications
315 including seeding real-work ranks of internet players, aiding in Go study and
316 tuning of Go-playing programs, or contribution to Go-theoretical discussion on
317 the scope of ``playing style''.
319 \end{abstract}
320 % IEEEtran.cls defaults to using nonbold math in the Abstract.
321 % This preserves the distinction between vectors and scalars. However,
322 % if the journal you are submitting to favors bold math in the abstract,
323 % then you can use LaTeX's standard command \boldmath at the very start
324 % of the abstract to achieve this. Many IEEE journals frown on math
325 % in the abstract anyway.
327 % Note that keywords are not normally used for peerreview papers.
328 \begin{IEEEkeywords}
329 Board games, Evaluation, Function approximation, Go, Machine learning, Neural networks, User modelling
330 \end{IEEEkeywords}
337 % For peer review papers, you can put extra information on the cover
338 % page as needed:
339 % \ifCLASSOPTIONpeerreview
340 % \begin{center} \bfseries EDICS Category: 3-BBND \end{center}
341 % \fi
343 % For peerreview papers, this IEEEtran command inserts a page break and
344 % creates the second title. It will be ignored for other modes.
345 \IEEEpeerreviewmaketitle
347 % TODO remove!??
348 %This work has been submitted to the IEEE for possible publication. Copyright
349 %may be transferred without notice, after which this version may no longer be
350 %accessible.
353 \section{Introduction}
354 % The very first letter is a 2 line initial drop letter followed
355 % by the rest of the first word in caps.
357 % form to use if the first word consists of a single letter:
358 % \IEEEPARstart{A}{demo} file is ....
360 % form to use if you need the single drop letter followed by
361 % normal text (unknown if ever used by IEEE):
362 % \IEEEPARstart{A}{}demo file is ....
364 % Some journals put the first two words in caps:
365 % \IEEEPARstart{T}{his demo} file is ....
367 % Here we have the typical use of a "T" for an initial drop letter
368 % and "HIS" in caps to complete the first word.
369 \IEEEPARstart{T}{he} field of Computer Go usually focuses on the problem
370 of creating a~program to play the game, finding the best move from a~given
371 board position \cite{GellySilver2008}.
372 We will make use of one method developed in the course
373 of such research and apply it to the analysis of existing game records
374 with the aim of helping humans to play and understand the game better
375 instead.
377 Go is a~two-player full-information board game played
378 on a~square grid (usually $19\times19$ lines) with black and white
379 stones; the goal of the game is to surround the most territory and
380 capture enemy stones. We assume basic familiarity with the game.
382 Many Go players are eager to play using computers (usually over
383 the internet) and review games played by others on computers as well.
384 This means that large amounts of game records are collected and digitally
385 stored, enabling easy processing of such collections. However, so far
386 only little has been done with the available data. We are aware
387 only of uses for simple win/loss statistics \cite{KGSAnalytics} \cite{ProGoR}
388 and ``next move'' statistics on a~specific position \cite{Kombilo} \cite{MoyoGo}.
390 \rvvv{Additionally}, \rvv{a simple machine learning technique based on GNU Go's}\cite{GnuGo}\rvv{
391 move evaluation feature has recently been presented in}\cite{CompAwar}\rvv{. The authors used decision trees
392 to predict whether a given user belongs into one of three classes based on his strength
393 (causal, intermediate or advanced player). This method is however limited by the
394 blackbox-use of GNU Go engine, making it unsuitable for more detailed analysis of the moves.}
396 We present a~more in-depth approach --- from all played moves, we devise
397 a~compact evaluation of each player. We then explore correlations between
398 evaluations of various players in the light of externally given information.
399 This way, we can discover similarity between move characteristics of
400 players with the same playing strength, or discuss the meaning of the
401 ``playing style'' concept on the assumption that similar playing styles
402 should yield similar move characteristics.
404 \rv{We show that a~sample of player's games can be used to quite reliably estimate player's strength,
405 game style, or even a time when he/she was active. Apart from these practical results,
406 the research may prove to be useful for Go theoretists by investigating the principles behind
407 the classical ``style'' classification.}
409 % XXX \rv{ \ref{bla} } is not working
411 \rv{We shall first present details of the extraction and summarization of
412 information from the game corpus (section~}\ref{pattern-vectors}\rv{).
413 Afterwards, we will explain the statistical methods applied (section~}\ref{data-mining}\rv{),
414 and then describe our findings on particular game collections,
415 regarding the analysis of either strength (section~}\ref{strength-analysis}\rv{)
416 or playing styles (section~}\ref{style-analysis}\rv{).
417 Finally, we will explore possible interpretations and few applications
418 of our research (section~}\ref{proposed-apps-and-discussion}\rv{)
419 and point out some possible future research directions (section~}\ref{future-research}\rv{).}
422 \section{Data Extraction}
423 \label{pattern-vectors}
425 As the input of our analysis, we use large collections of game records
426 \rvv{in SGF format} \cite{SGF}
427 \rv{grouped by the primary object of analysis
428 (player name when analyzing style of a particular player,
429 player rank when looking at the effect of rank on data, etc.).}
430 We process the games, generating a description for each
431 played move -- a {\em pattern}, being a combination of several
432 {\em pattern features} described below.
434 We \rv{compute the occurence counts of all encountered patterns,
435 eventually} composing $n$-dimensional {\em pattern vector}
436 $\vec p$ of counts of the $n$ \rvv{(we use $n = 500$)} globally most frequent patterns
437 (the mapping from patterns to vector elements is common for
438 \rv{all generated vectors}).
439 We can then process and compare just the pattern vectors.
441 \subsection{Pattern Features}
442 When deciding how to compose the patterns we use to describe moves,
443 we need to consider a specificity tradeoff --- overly general descriptions carry too few
444 information to discern various player attributes; too specific descriptions
445 gather too few specimen over the games sample and the vector differences are
446 not statistically significant.
448 We have chosen an intuitive and simple approach inspired by pattern features
449 used when computing Elo ratings for candidate patterns in Computer Go play
450 \cite{PatElo}. Each pattern is a~combination of several {\em pattern features}
451 (name--value pairs) matched at the position of the played move.
452 We use these features:
454 \begin{itemize}
455 \item capture move flag\rvvv{,}
456 \item atari move flag\rvvv{,}
457 \item atari escape flag\rvvv{,}
458 \item contiguity-to-last flag%
459 \footnote{We do not consider contiguity features in some cases when we are working
460 on small game samples and need to reduce pattern diversity.}
461 --- whether the move has been played in one of 8 neighbors of the last move\rvvv{,}
462 \item contiguity-to-second-last flag\rvvv{,}
463 \item board edge distance --- only up to distance 4\rvvv{,}
464 \item and spatial pattern --- configuration of stones around the played move.
465 \end{itemize}
467 The spatial patterns are normalized (using a dictionary) to be always
468 black-to-play and maintain translational and rotational symmetry.
469 Configurations of radius between 2 and 9 in the gridcular metric%
470 \footnote{The {\em gridcular} metric
471 $d(x,y) = |\delta x| + |\delta y| + \max(|\delta x|, |\delta y|)$ produces
472 a circle-like structure on the Go board square grid \cite{SpatPat}. }
473 are matched.
475 Pattern vectors representing these features contain information on
476 played shape as well as a basic representation of tactical dynamics
477 --- threats to capture stones, replying to last move, or ignoring
478 opponent's move elsewhere to return to an urgent local situation.
479 The shapes \rv{often} correspond to opening moves
480 (either in empty corners and sides, or as part of {\em joseki}
481 --- commonly played sequences) characteristic for a certain
482 strategic aim. In the opening, even a single-line difference
483 in the distance from the border can have dramatic impact on
484 further local and global development.
486 \subsection{Vector Rescaling}
488 The pattern vector elements can have diverse values since for each object,
489 we consider a different number of games (and thus patterns).
490 Therefore, we normalize the values to range $[-1,1]$,
491 the most frequent pattern having the value of $1$ and the least occuring
492 one being $-1$.
493 Thus, we obtain vectors describing relative frequency of played patterns
494 independent on number of gathered patterns.
495 But there are multiple ways to approach the normalization.
497 \begin{figure}[!t]
498 \centering
499 \includegraphics{patcountdist}
500 \caption{Log-scaled number of pattern occurences
501 in the GoGoD games examined in sec. \ref{style-analysis}.}
502 \label{fig:patcountdist}
503 \end{figure}
505 \subsubsection{Linear Normalization}
507 \rvv{An intuitive solution is to} linearly re-scale the values using:
508 $$y_i = {x_i - x_{\rm min} \over x_{\rm max}}$$
509 This is the default approach; we have used data processed by only this
510 computation unless we note otherwise.
511 As shown on fig. \ref{fig:patcountdist}, most of the spectrum is covered
512 by the few most-occuring patterns (describing mostly large-diameter
513 shapes from the game opening). This means that most patterns will be
514 always represented by only very small values near the lower bound.
516 \subsubsection{Extended Normalization}
517 \label{xnorm}
519 To alleviate this problem, we have also tried to modify the linear
520 normalization by applying two steps --- {\em pre-processing}
521 the raw counts using
522 $$x_i' = \log (x_i + 1)$$
523 and {\em post-processing} the re-scaled values by the logistic function:
524 $$y_i' = {2 \over 1 + e^{-cy_i}}-1$$
525 However, we have found that this method is not universally beneficial.
526 In our styles case study (sec. \ref{style-analysis}), this normalization
527 produced PCA decomposition with significant dimensions corresponding
528 better to some of the prior knowledge and more instructive for manual
529 inspection, but ultimately worsened accuracy of our classifiers.
530 \rvvv{From this we conjecture} that the most frequently occuring patterns are
531 also most important for classification of major style aspects.
533 \subsection{Implementation}
535 We have implemented the data extraction by making use of the pattern
536 features matching implementation
537 within the Pachi Go-playing program \cite{Pachi}, \rvv{which works according to
538 the Elo-rating pattern selection scheme} \cite{PatElo}.
539 We extract information on players by converting the SGF game
540 records to GTP stream \cite{GTP} that feeds Pachi's {\tt patternscan}
541 engine, \rv{producing} a~single {\em patternspec} (string representation
542 of the particular pattern features combination) per move. Of course,
543 only moves played by the appropriate \rv{player} are collected.
545 \section{Data Mining}
546 \label{data-mining}
548 To assess the properties of gathered pattern vectors
549 and their influence on playing styles,
550 we analyze the data using several basic data minining techniques.
551 The first two methods {\em (analytic)} rely purely on single data set
552 and serve to show internal structure and correlations within the data set.
554 \rvvv{\emph{ Principal Component Analysis}} \rvv{\emph{(PCA)}} \cite{Jolliffe1986}
555 finds orthogonal vector components that \rv{represent} the largest variance
556 \rv{of values within the dataset.
557 That is, PCA will produce vectors representing
558 the overall variability within the dataset --- the first vector representing
559 the ``primary axis'' of the dataset, the next vectors representing the less
560 significant axes; each vector has an associated number that
561 determines its impact on the overall dataset variance: $1.0$ would mean
562 that all points within the dataset lie on this vector, value close to zero
563 would mean that removing this dimension would have little effect on the
564 overall shape of the dataset.}
566 \rvv{Reversing the process of the PCA by backprojecting the orthogonal vector components into the
567 original pattern space can indicate which patterns correlate with each component.}
568 Additionally, PCA can be used as vector preprocessing for methods
569 that are negatively sensitive to pattern vector component correlations.
571 \rv{On the other hand,} Sociomaps \cite{Sociomaps} \cite{TeamProf} \cite{SociomapsPersonal} produce
572 spatial representation of the data set elements (e.g. players) based on
573 similarity of their data set features\rvvv{. Projecting some other
574 information on this map helps illustrate connections within the data set.}
576 % Pryc v ramci snizeni poctu footnotu
577 %\footnote{\rv{We also attempted to visualise the player relationships
578 %using Kohonen maps, but that did not produce very useful results.}}
580 Furthermore, we test several \emph{classification} methods that assign
581 an \emph{output vector} $\vec O$ \rv{to} each pattern vector $\vec P$,
582 \rv{the output vector representing the information we want to infer
583 from the game sample} --- e.g.~\rv{assessment of} the playing style,
584 player's strength or even meta-information like the player's era
585 or the country of origin.
586 Initially, the methods must be calibrated (trained) on some prior knowledge,
587 usually in the form of \emph{reference pairs} of pattern vectors
588 and the associated output vectors.
589 The reference set is divided into training and testing pairs
590 and the methods can be compared by the mean square error (MSE) on testing data set
591 (difference of output vectors approximated by the method and their real desired value).
593 %\footnote{However, note that dicrete characteristics such as country of origin are
594 %not very feasible to use here, since WHAT??? is that even true?? }
596 The most trivial method is approximation by the PCA representation
597 matrix, provided that the PCA dimensions have some already well-defined
598 \rv{interpretation}\rvvv{. This} can be true for single-dimensional information like
599 the playing strength.
600 Aside of that, we test the $k$-Nearest Neighbors (\emph{$k$-NN}) classifier \cite{CoverHart1967}
601 that approximates $\vec O$ by composing the output vectors
602 of $k$ reference pattern vectors closest to $\vec P$.
604 Another classifier is a~multi-layer feed-forward Artificial Neural Network \rv{(see e.g. }\cite{Haykin1994}\rv{)}.
605 The neural network can learn correlations between input and output vectors
606 and generalize the ``knowledge'' to unknown vectors\rvvv{. The neural network} can be more flexible
607 in the interpretation of different pattern vector elements and discern more
608 complex relations than the $k$-NN classifier,
609 but may not be as stable and expects larger training sample.
611 Finally, a commonly used classifier in statistical inference is
612 the Naive Bayes Classifier \cite{Bayes}\rvvv{. It} can infer relative probability of membership
613 in various classes based on previous evidence (training patterns).
615 \subsection{Statistical Methods}
616 We use couple of general statistical analysis \rv{methods} together
617 with the particular techniques.
618 \label{pearson}
619 To find correlations within or between extracted data and
620 some prior knowledge (player rank, style vector), we compute the well-known
621 {\em Pearson product-moment correlation coefficient (PMCC)} \cite{Pearson},
622 measuring the strength of the linear dependence%
623 \footnote{A desirable property of PMCC is that it is invariant to translations and rescaling
624 of the vectors.}
625 between any two dimensions:
627 $$ r_{X,Y} = {{\rm cov}(X,Y) \over \sigma_X \sigma_Y} $$
629 To compare classifier performance on the reference data, we employ
630 {\em $k$-fold cross validation}:
631 we randomly divide the training set
632 into $k$ distinct segments of similar sizes and then iteratively
633 use each part as a~testing set as the other $k-1$ parts are used as a~training set.
634 We then average results over all iterations.
636 \subsection{Principal Component Analysis}
637 \label{PCA}
638 We use Principal Component Analysis
639 to reduce the dimensions of the pattern vectors while preserving
640 as much information as possible, assuming inter-dependencies between
641 pattern vector dimensions are linear.
642 \rv{Technically}, PCA is an eigenvalue decomposition of a~covariance matrix of centered pattern vectors,
643 producing a~linear mapping $o$ from $n$-dimensional vector space
644 to a~reduced $m$-dimensional vector space.
645 The $m$ eigenvectors of the original vectors' covariance matrix
646 with the largest eigenvalues are used as the base of the reduced vector space;
647 the eigenvectors form projection matrix $W$.
649 For each original pattern vector $\vec p_i$,
650 we obtain its new representation $\vec r_i$ in the PCA base
651 as shown in the following equation:
652 \begin{equation}
653 \vec r_i = W \cdot \vec p_i
654 \end{equation}
656 The whole process is described in the Algorithm \ref{alg:pca}.
658 \begin{algorithm}
659 \caption{PCA -- Principal Component Analysis}
660 \begin{algorithmic}
661 \label{alg:pca}
662 \REQUIRE{$m > 0$, set of players $R$ with pattern vectors $p_r$}
663 \STATE $\vec \mu \leftarrow 1/|R| \cdot \sum_{r \in R}{\vec p_r}$
664 \FOR{ $r \in R$}
665 \STATE $\vec p_r \leftarrow \vec p_r - \vec \mu$
666 \ENDFOR
667 \FOR{ $(i,j) \in \{1,... ,n\} \times \{1,... ,n\}$}
668 \STATE $\mathit{Cov}[i,j] \leftarrow 1/|R| \cdot \sum_{r \in R}{\vec p_{ri} \cdot \vec p_{rj}}$
669 \ENDFOR
670 \STATE Compute Eigenvalue Decomposition of $\mathit{Cov}$ matrix
671 \STATE Get $m$ largest eigenvalues
672 \STATE Most significant eigenvectors ordered by decreasing eigenvalues form the rows of matrix $W$
673 \FOR{ $r \in R$}
674 \STATE $\vec r_r\leftarrow W \vec p_r$
675 \ENDFOR
676 \end{algorithmic}
677 \end{algorithm}
679 \subsection{Sociomaps}
680 \label{soc}
681 Sociomaps are a general mechanism for \rv{visualizing}
682 relationships on a 2D plane such that \rv{given} ordering of the
683 \rv{player} distances in the dataset is preserved in distances on the plane.
684 In our particular case,
685 %\footnote{A special case of the {\em Subject-to-Object Relation Mapping (STORM)} indirect sociomap.}
686 we will consider a dataset $\vec S$ of small-dimensional
687 vectors $\vec s_i$. First, we estimate the {\em significance}
688 of difference {\rv of} each two subjects.
689 Then, we determine projection $\varphi$ of all the $\vec s_i$
690 to spatial coordinates of an Euclidean plane, such that it reflects
691 the estimated difference significances.
693 % TODO: Clarify, clean up references
695 To quantify the differences between the subjects ({\em team profiling})
696 into an $A$ matrix, for each two subjects $i, j$ we compute the scalar distance%
697 \footnote{We use the {\em Manhattan} metric $d(x,y) = \sum_i |x_i - y_i|$.}
698 of $s_i, s_j$ and then estimate the $A_{ij}$ probability of at least such distance
699 occuring in uniformly-distributed input (the higher the probability, the more
700 significant and therefore important to preserve the difference is).
702 To visualize the quantified differences, we need to find
703 the $\varphi$ projection such that it maximizes a {\em three-way ordering} criterion:
704 ordering of any three members within $A$ and on the plane
705 (by Euclidean metric) must be the same.
707 $$ \max_\varphi \sum_{i\ne j\ne k} \Phi(\varphi, i, j, k) $$
708 $$ \Phi(\varphi, i, j, k) = \begin{cases}
709 1 & \delta(1,A_{ij},A_{ik}) = \delta(\varphi(i),\varphi(j),\varphi(k)) \\
710 0 & \hbox{otherwise} \end{cases} $$
711 $$ \delta(a, b, c) = \begin{cases}
712 1 & |a-b| > |a-c| \\
713 0 & |a-b| = |a-c| \\
714 -1 & |a-b| < |a-c| \end{cases} $$
716 The $\varphi$ projection is then determined by randomly initializing
717 the position of each subject and then employing gradient descent methods.
719 \subsection{k-Nearest Neighbors Classifier}
720 \label{knn}
721 Our goal is to approximate \rvv{the} player's output vector $\vec O$,
722 knowing their pattern vector $\vec P$.
723 We further assume that similarities in players' pattern vectors
724 uniformly correlate with similarities in players' output vectors.
726 We require a set of reference players $R$ with known \emph{pattern vectors} $\vec p_r$
727 and \emph{output vectors} $\vec o_r$.
728 $\vec O$ is approximated as weighted average of \emph{output vectors}
729 $\vec o_i$ of $k$ players with \emph{pattern vectors} $\vec p_i$ closest to $\vec P$.
730 This is illustrated in the Algorithm \ref{alg:knn}.
731 Note that the weight is a function of distance and is not explicitly defined in Algorithm \ref{alg:knn}.
732 During our research, exponentially decreasing weight has proven to be sufficient\rvvv{,
733 as detailed in each of the case studies.}
735 \begin{algorithm}
736 \caption{k-Nearest Neighbors}
737 \begin{algorithmic}
738 \label{alg:knn}
739 \REQUIRE{pattern vector $\vec P$, $k > 0$, set of reference players $R$}
740 \FORALL{$r \in R$ }
741 \STATE $D[r] \leftarrow \mathit{EuclideanDistance}(\vec p_r, \vec P)$
742 \ENDFOR
743 \STATE $N \leftarrow \mathit{SelectSmallest}(k, R, D)$
744 \STATE $\vec O \leftarrow \vec 0$
745 \FORALL{$r \in N $}
746 \STATE $\vec O \leftarrow \vec O + \mathit{Weight}(D[r]) \cdot \vec o_r $
747 \ENDFOR
748 \end{algorithmic}
749 \end{algorithm}
751 \subsection{Neural Network Classifier}
752 \label{neural-net}
754 Feed-forward neural networks are known for their ability to generalize
755 and find correlations between input patterns and output classifications.
756 Before use, the network is iteratively trained on the training data
757 until the error on the training set is reasonably small.
759 %Neural network is an adaptive system that must undergo a training
760 %period similarly to the requirement
761 %of reference vectors for the k-Nearest Neighbors algorithm above.
763 \subsubsection{Computation and activation of the NN}
764 Technically, the neural network is a network of interconnected
765 computational units called neurons.
766 A feed-forward neural network has a layered topology\rvvv{. It}
767 usually has one \emph{input layer}, one \emph{output layer}
768 and an arbitrary number of \emph{hidden layers} between.
769 Each neuron $i$ gets input from all neurons in the previous layer,
770 each connection having specific weight $w_{ij}$.
772 The computation proceeds in discrete time steps.
773 In the first step, the neurons in the \emph{input layer}
774 are \emph{activated} according to the \emph{input vector}.
775 Then, we iteratively compute output of each neuron in the next layer
776 until the output layer is reached.
777 The activity of output layer is then presented as the result.
779 The activation $y_i$ of neuron $i$ from the layer $I$ is computed as
780 \begin{equation}
781 y_i = f\left(\sum_{j \in J}{w_{ij} y_j}\right)
782 \end{equation}
783 where $J$ is the previous layer, while $y_j$ is the activation for neurons from $J$ layer.
784 Function $f()$ is a~so-called \emph{activation function}
785 and its purpose is to bound the outputs of neurons.
786 A typical example of an activation function is the sigmoid function.%
787 \footnote{A special case of the logistic function $\sigma(x)=(1+e^{-(rx+k)})^{-1}$.
788 Parameters control the growth rate $r$ and the x-position $k$.}
790 \subsubsection{Training}
791 Training of the feed-forward neural network usually involves some
792 modification of supervised Backpropagation learning algorithm.
793 We use first-order optimization algorithm called RPROP \cite{Riedmiller1993}.
795 %Because the \emph{reference set} is usually not very large,
796 %we have devised a simple method for its extension.
797 %This enhancement is based upon adding random linear combinations
798 %of \emph{style and pattern vectors} to the training set.
800 As outlined above, the training set $T$ consists of
801 $(\vec p_i, \vec o_i)$ pairs.
802 The training algorithm is shown in Algorithm \ref{alg:tnn}.
804 \begin{algorithm}
805 \caption{Training Neural Network}
806 \begin{algorithmic}
807 \label{alg:tnn}
808 \REQUIRE{Train set $T$, desired error $e$, max iterations $M$}
809 \STATE $N \leftarrow \mathit{RandomlyInitializedNetwork}()$
810 \STATE $\mathit{It} \leftarrow 0$
811 \REPEAT
812 \STATE $\mathit{It} \leftarrow \mathit{It} + 1$
813 \STATE $\Delta \vec w \leftarrow \vec 0$
814 \STATE $\mathit{TotalError} \leftarrow 0$
815 %\FORALL{$(\overrightarrow{Input}, \overrightarrow{DesiredOutput}) \in T$}
816 %\STATE $\overrightarrow{Output} \leftarrow Result(N, \overrightarrow{Input})$
817 %\STATE $E \leftarrow |\overrightarrow{DesiredOutput} - \overrightarrow{Output}|$
818 \FORALL{$(\mathit{Input}, \mathit{DesiredOutput}) \in T$}
819 \STATE $\mathit{Output} \leftarrow \mathit{Result}(N, \mathit{Input})$
820 \STATE $\mathit{Error} \leftarrow |\mathit{DesiredOutput} - \mathit{Output}|$
821 \STATE $\Delta \vec w \leftarrow \Delta \vec w + \mathit{WeightUpdate}(N,\mathit{Error})$
822 \STATE $\mathit{TotalError} \leftarrow \mathit{TotalError} + \mathit{Error}$
823 \ENDFOR
824 \STATE $N \leftarrow \mathit{ModifyWeights}(N, \Delta \vec w)$
825 \UNTIL{$\mathit{TotalError} < e$ or $ \mathit{It} > M$}
826 \end{algorithmic}
827 \end{algorithm}
829 \subsection{Naive Bayes Classifier}
830 \label{naive-bayes}
832 The Naive Bayes Classifier uses existing information to construct
833 probability model of likelihoods of given {\em feature variables}
834 based on a discrete-valued {\em class variable}.
835 Using the Bayes equation, we can then estimate the probability distribution
836 of class variable for particular values of the feature variables.
838 In order to approximate the player's output vector $\vec O$ based on
839 pattern vector $\vec P$, we will compute each element of the
840 output vector separately, covering the output domain by several $k$-sized
841 discrete intervals (classes).
842 \rv{In fact, we use the PCA-represented input $\vec R$ (using the 10 most significant
843 dimensions), since it better fits the pre-requisites of the
844 Bayes classifier -- values in each dimension are more independent and
845 they approximate the normal distribution better. Additionally, small input dimensions
846 are computationaly feasible.}
848 When training the classifier for $\vec O$ element $o_i$
849 of class $c = \lfloor o_i/k \rfloor$,
850 we assume the $\vec R$ elements are normally distributed and
851 feed the classifier information in the form
852 $$ \vec R \mid c $$
853 estimating the mean $\mu_c$ and standard deviation $\sigma_c$
854 of each $\vec R$ element for each encountered $c$
855 (see algorithm \ref{alg:tnb}).
856 Then, we can query the built probability model on
857 $$ \max_c P(c \mid \vec R) $$
858 obtaining the most probable class $i$ for an arbitrary $\vec R$
859 Each probability is obtained using the normal distribution formula:
860 $$ P(c \mid x) = {1\over \sqrt{2\pi\sigma_c^2}}\exp{-(x-\mu_c)^2\over2\sigma_c^2} $$
862 \begin{algorithm}
863 \caption{Training Naive Bayes}
864 \begin{algorithmic}
865 \label{alg:tnb}
866 \REQUIRE{Training set $T = (\mathit{R, c})$}
867 \FORALL{$(R, c) \in T$}
868 \STATE $\mathit{RbyC}_c \leftarrow \mathit{RbyC}_c \cup \{R\}$
869 \ENDFOR
870 \FORALL{$c$}
871 \STATE $\mu_c \leftarrow {1 \over |\mathit{RbyC}_c|} \sum_{R \in \mathit{RbyC}_c} R$
872 \ENDFOR
873 \FORALL{$c$}
874 \STATE $\sigma_c \leftarrow {1 \over |\mathit{RbyC}_c|} \sum_{R \in \mathit{RbyC}_c} R-\mu_c $
875 \ENDFOR
876 \end{algorithmic}
877 \end{algorithm}
879 \subsection{Implementation}
881 We have implemented the data mining methods as the
882 ``gostyle'' open-source framework \cite{GoStyle},
883 made available under the GNU GPL licence.
884 The majority of our basic processing and \rv{analysis
885 is} implemented in the Python \cite{Python25} programming language.
887 We use several external libraries, most notably the MDP library \cite{MDP} \rv{for the PCA analysis}.
888 The neural network \rv{component} is written using the libfann C library \cite{Nissen2003}.
889 The Naive Bayes Classifier \rv{is built around} the {\tt AI::NaiveBayes1} Perl module \cite{NaiveBayes1}.
890 The sociomap has been visualised using the Team Profile Analyzer \cite{TPA}
891 which is a part of the Sociomap suite \cite{SociomapSite}.
894 \section{Strength Analysis}
895 \label{strength-analysis}
897 \begin{figure*}[!t]
898 \centering
899 \includegraphics[width=7in]{strength-pca}
900 \caption{PCA of by-strength vectors}
901 \label{fig:strength_pca}
902 \end{figure*}
904 First, we have used our framework to analyse correlations of pattern vectors
905 and playing strength. Like in other competitively played board games, Go players
906 receive real-world {\em rating number} based on tournament games,
907 and {\em rank} based on their rating.
908 %\footnote{Elo-type rating system \cite{GoR} is usually used,
909 %corresponding to even win chances for game of two players with the same rank,
910 %and about 2:3 win chance for the stronger in case of one rank difference.}
911 %\footnote{Professional ranks and dan ranks in some Asia countries may be assigned differently.}
912 The amateur ranks range from 30-kyu (beginner) to 1-kyu (intermediate)
913 and then follows 1-dan to 9-dan
914 %\footnote{7-dan in some systems.}
915 (top-level player).
917 There are multiple independent real-world ranking scales
918 (geographically based), \rv{while} online servers \rv{also} maintain their own user rank \rv{list}\rvvv{.
919 The} difference between scales can be up to several ranks and the rank
920 distributions also differ \cite{RankComparison}.
922 \subsection{Data source}
923 As the source game collection, we use \rvv{the} Go Teaching Ladder reviews archive
924 %\footnote{The reviews contain comments and variations --- we consider only the main variation with the actual played game.}
925 \cite{GTL}. This collection contains 7700 games of players with strength ranging
926 from 30-kyu to 4-dan; we consider only even games with clear rank information.
928 Since the rank information is provided by the users and may not be consistent,
929 we are forced to take a simplified look at the ranks,
930 discarding the differences between various systems and thus somewhat
931 increasing error in our model.\footnote{Since our results seem satisfying,
932 we did not pursue to try another collection;
933 one could e.g. look at game archives of some Go server to work within
934 single more-or-less consistent rank model.}
935 We represent the rank in our dataset \rv{as an integer in the range} $[-3,30]$ with positive
936 numbers representing the kyu ranks and numbers smaller than 1 representing the dan
937 ranks: 4-dan maps to $-3$, 1-dan to $0$, etc.
939 \subsection{Strength PCA analysis}
940 First, we have created a single pattern vector for each rank between 30-kyu to 4-dan;
941 we have performed PCA analysis on the pattern vectors, achieving near-perfect
942 rank correspondence in the first PCA dimension%
943 \footnote{The eigenvalue of the second dimension was four times smaller,
944 with no discernable structure revealed within the lower-order eigenvectors.}
945 (figure \ref{fig:strength_pca}).
946 We measure the accuracy of the strength approximation by the first PCA dimension
947 using Pearson's $r$ (see \ref{pearson}), yielding very satisfying value of $r=0.979$
948 implying extremely strong correlation.%
949 %\footnote{Extended vector normalization (sec. \ref{xnorm}) produced noticeably less clear-cut results.}
951 \rv{This reflects the trivial fact that the most important ``defining characteristic''
952 of a set of players grouped by strength is indeed their strength and confirms
953 that our methodics is correct.
954 At the same time, this result suggests that it is possible to accurately estimate
955 player's strength \rvvv{just} from a sample of his games,
956 as we confirm below.}
957 %\footnote{The point is of course that the pattern analysis can be done even if we do not know the opponent's strength, or even the game result.}
959 \rv{When investigating a player's $\vec p$, the PCA decomposition could be also
960 useful for study suggestions --- a program could examine the pattern gradient at the
961 player's position on the PCA dimensions and suggest patterns to avoid and patterns
962 to play more often. Of course, such an advice alone is certainly not enough and it
963 must be used only as a basis of a more thorough analysis of reasons behind the fact
964 that the player plays other patterns than they ``should''.}
966 \subsection{Strength Classification}
967 \label{strength-class}
969 \rv{In line with results of the PCA analysis, we have tested the strength approximation ability
970 of $k$-NN (sec.} \ref{knn}\rv{), neural network (sec. }\ref{neural-net}\rv{),
971 and a simple PCA-based classifier (sec. }\ref{PCA}\rv{).}
973 \subsubsection{Reference (Training) Data}
974 \rv{We have trained the tested classifiers using one pattern vector per rank
975 (aggregate over all games played by some player declaring the given rank),
976 then performing PCA analysis to reduce the dimension of pattern vectors.}
977 We have explored the influence of different game sample sizes (\rv{$G$})
978 on the classification accuracy to \rv{determine the} practicality and scaling
979 abilities of the classifiers.
980 In order to reduce the diversity of patterns (negatively impacting accuracy
981 on small samples), we do not consider the contiguity pattern features.
983 The classifiers were compared by running a many-fold validation by repeatedly and
984 exhaustively taking disjunct \rv{$G$}--game samples of the same rank from the collection
985 and measuring the standard error of the classifier.
986 Arbitrary game numbers were approximated by pattern file sizes,
987 iteratively selecting all games of randomly selected player
988 of the required strength.
990 %We have randomly separated $10\%$ of the game database as a testing set,
991 %Using the most %of players within the test group yields MSE TODO, thus providing
992 %reasonably satisfying accuracy by itself.
994 %Using the Naive Bayes classifier yields MSE TODO.
996 \subsubsection{Results}
997 \rv{The results are shown in the table~}\ref{table-str-class}\rv{.
998 The $G$ column describes the number of games in each sample,
999 $\mathit{MSE}$ column shows measured mean square error and $\sigma$ is the empirical standard deviation.
1000 Methods are
1001 compared (column $\mathit{Cmp}$) to the random classifier by the quotient of their~$\sigma$.}
1003 \rv{From the table, it should be obvious that the $k$-NN is obtaining good
1004 accuracy even on as few as 9 games as a sample\rvv{, where the classifier performs within a standard deviation of $4.6$kyu.}
1005 For a large number of training vectors -- albeit not very accurate due to small
1006 sample sizes -- the neural network classifier performs very similarly.
1007 For samples of 2 games, the neural network is even slightly better on average.
1008 However, due to the decreasing number of training vectors with increasing game sample sizes,
1009 the neural network gets unusable for large \rvvvv{game} sample sizes.
1010 The table therefore only shows the neural network results for samples of 17 games and smaller.}
1011 \rv{PCA-based classifier (the most significant PCA eigenvector position is simply directly taken as a~rank) and
1012 a random classifier are listed mainly for the sake of comparison, because they do not perform
1013 competetively.}
1015 \begin{table}[!t]
1016 % increase table row spacing, adjust to taste
1017 \renewcommand{\arraystretch}{1.3}
1018 \caption{Strength Classifier Performance}
1019 \label{table-str-class}
1020 \centering
1021 \begin{tabular}{|c|c||c|c||c|}
1022 \hline
1023 Method & \rv{$G$} & MSE & $\sigma$ & Cmp \\ \hline
1024 $k$-NN&$85$ & $5.514$ & $2.348$ & $6.150$ \\
1025 &$43$ & $8.449$ & $2.907$ & $4.968$ \\
1026 &$17$ & $10.096$& $3.177$ & $4.545$ \\
1027 &$9$ & $21.343$& $4.620$ & $3.126$ \\
1028 &$2$ & $52.212$& $7.226$ & $1.998$ \\\hline
1030 \rv{Neural Network} & $17$ & $110.633$ & $10.518$ & $1.373$ \\
1031 &$9$ & $44.512$ & $6.672$ & $2.164$ \\
1032 &$2$ & $43.682$ & $6.609$ & $2.185$ \\ \hline
1033 %&$1$ & $58.051$ & $7.619$ & $1.895$ \\ \hline
1035 PCA & $85$ & $24.070$ & $4.906$ & $2.944$ \\
1036 &$43$ & $31.324$ & $5.597$ & $2.580$ \\
1037 &$17$ & $50.390$ & $7.099$ & $2.034$ \\
1038 &$9$ & $72.528$ & $8.516$ & $1.696$ \\
1039 &$2$ & $128.660$& $11.343$ & $1.273$ \\ \hline
1041 Rnd & N/A & $208.549$ & $14.441$ & $1.000$ \\ \hline
1042 \end{tabular}
1043 \end{table}
1045 \subsubsection{$k$-NN parameters}
1046 \rv{Using the $4$-Nearest Neighbors classifier with the weight function}
1047 \begin{equation}
1048 \mathit{Weight}(\vec x) = 0.9^{M*\mathit{Distance}(\vec x)}
1049 \end{equation}
1050 (parameter $M$ ranging from $30$ to $6$).
1052 \subsubsection{Neural network's parameters}
1053 \rv{The neural network classifier had three-layered architecture (one hidden layer)
1054 comprising of these numbers of neurons:}
1055 \vspace{4mm}
1056 \noindent
1057 %\begin{table}
1058 \begin{center}
1059 %\caption{Styles}
1060 \begin{tabular}{|c|c|c|}
1061 \hline
1062 \multicolumn{3}{|c|}{Layer} \\\hline
1063 Input & Hidden & Output \\ \hline
1064 119 & 35 & 1 \\ \hline
1065 \end{tabular}
1066 \end{center}
1067 %\end{table}
1068 \vspace{4mm}
1070 \rv{The network was trained until the square error on the training set was smaller than $0.0005$.
1071 Due to the small number of input vectors,
1072 this only took about $20$ iterations of RPROP learning algorithm on average.}
1075 %#Finally, we used $8$-fold cross validation on one-file-per-rank data,
1076 %yielding a MSE $0.085$ which is equivalent to standard deviation of $15\%$.
1078 \section{Style Analysis}
1079 \label{style-analysis}
1081 As a~second case study for our pattern analysis,
1082 we investigate pattern vectors $\vec p$ of various well-known players,
1083 their relationships in-between and to prior knowledge
1084 in order to explore the correlation of prior knowledge with extracted patterns.
1085 We look for relationships between pattern vectors and perceived
1086 ``playing style'' and attempt to use our classifiers to transform
1087 the pattern vector $\vec p$ to a style vector $\vec s$.
1089 \subsection{Data sources}
1090 \subsubsection{Game database}
1091 The source game collection is GoGoD Winter 2008 \cite{GoGoD} containing 55000
1092 professional games, dating from the early Go history 1500 years ago to the present.
1093 We consider only games of a small subset of players (table \ref{fig:style_marks})\rvvv{. These players
1094 were chosen} for being well-known within the players community,
1095 having large number of played games in our collection and not playing too long
1096 ago.
1097 %\footnote{Over time, many commonly used sequences get altered, adopted and
1098 %dismissed\rvvv{. Usual} playing conditions can also differ significantly.}
1100 \subsubsection{Expert-based knowledge}
1101 \label{style-vectors}
1102 In order to provide a reference frame for our style analysis,
1103 we have gathered some information from game experts about various
1104 traditionally perceived style aspects to use as a prior knowledge.
1105 This expert-based knowledge allows us to predict styles of unknown players
1106 based on the similarity of their pattern vectors,
1107 as well as discover correlations between styles and \rv{particular}
1108 move patterns.
1110 Experts were asked to mark four style aspects of each of the given players
1111 on the scale from 1 to 10. The style aspects are defined as shown:
1113 \vspace{4mm}
1114 \noindent
1115 %\begin{table}
1116 \begin{center}
1117 %\caption{Styles}
1118 \begin{tabular}{|c|c|c|}
1119 \hline
1120 Style & 1 & 10\\ \hline
1121 Territoriality $\tau$ & Moyo & Territory \\
1122 Orthodoxity $\omega$ & Classic & Novel \\
1123 Aggressivity $\alpha$ & Calm & Fighting \\
1124 Thickness $\theta$ & Safe & Shinogi \\ \hline
1125 \end{tabular}
1126 \end{center}
1127 %\end{table}
1128 \vspace{4mm}
1130 We have devised these four style aspects based on our own Go experience
1131 and consultations with other experts.
1132 The used terminology has quite
1133 clear meaning to any experienced Go player and there is not too much
1134 room for confusion, except possibly in the case of ``thickness'' ---
1135 but the concept is not easy to pin-point succintly and we also did not
1136 add extra comments on the style aspects to the questionnaire deliberately
1137 to accurately reflect any diversity in understanding of the terms.
1138 Averaging this expert based evaluation yields \emph{reference style vector}
1139 $\vec s_r$ (of dimension $4$) for each player $r$
1140 from the set of \emph{reference players} $R$.
1142 Throughout our research, we have experimentally found that playing era
1143 is also a major factor differentiating between patterns. Thus, we have
1144 further extended the $\vec s_r$ by median year over all games played
1145 by the player.
1147 \begin{table}[!t]
1148 % increase table row spacing, adjust to taste
1149 \renewcommand{\arraystretch}{1.3}
1150 \caption{Covariance Measure of Prior Information Dimensions}
1151 \label{fig:style_marks_r}
1152 \centering
1153 % Some packages, such as MDW tools, offer better commands for making tables
1154 % than the plain LaTeX2e tabular which is used here.
1155 \begin{tabular}{|r||r||r||r||r||r|}
1156 \hline
1157 & $\tau$ & $\omega$ & $\alpha$ & $\theta$ & year \\
1158 \hline
1159 $\tau$ &$1.000$&$\mathbf{-0.438}$&$\mathbf{-0.581}$&$\mathbf{ 0.721}$&$ 0.108$\\
1160 $\omega$& &$ 1.000$&$\mathbf{ 0.682}$&$ 0.014$&$-0.021$\\
1161 $\alpha$& & &$ 1.000$&$-0.081$&$ 0.030$\\
1162 $\theta$& &\multicolumn{1}{c||}{---}
1163 & &$ 1.000$&$-0.073$\\
1164 y. & & & & &$ 1.000$\\
1165 \hline
1166 \end{tabular}
1167 \end{table}
1169 Three high-level Go players (Alexander Dinerstein 3-pro, Motoki Noguchi
1170 7-dan and V\'{i}t Brunner 4-dan) have judged the style of the reference
1171 players.
1172 The complete list of answers is in table \ref{fig:style_marks}.
1173 Standard error of the answers is 0.952, making the data reasonably reliable,
1174 though much larger sample would of course be more desirable
1175 (but beyond our means to collect).
1176 We have also found a~significant correlation between the various
1177 style aspects, as shown by the Pearson's $r$ values
1178 in table \ref{fig:style_marks_r}.
1180 \rv{We have made few manual adjustments in the dataset, disregarding some
1181 players or portions of their games. This was done to achieve better
1182 consistency of the games (e.g. considering only games of roughly the
1183 same age) and to consider only sets of games that can be reasonably
1184 rated as a whole by human experts (who can give a clear feedback in this
1185 effect). This filtering methodology can be easily reproduced
1186 and such arbitrary decisions are neccessary only
1187 for processing the training dataset, not for using it (either for exloration
1188 or classification).}
1190 \begin{table}[!t]
1191 % increase table row spacing, adjust to taste
1192 \renewcommand{\arraystretch}{1.4}
1193 \begin{threeparttable}
1194 \caption{Expert-Based Style Aspects of Selected Professionals\tnote{1} \tnote{2}}
1195 \label{fig:style_marks}
1196 \centering
1197 % Some packages, such as MDW tools, offer better commands for making tables
1198 % than the plain LaTeX2e tabular which is used here.
1199 \begin{tabular}{|c||c||c||c||c|}
1200 \hline
1201 {Player} & $\tau$ & $\omega$ & $\alpha$ & $\theta$ \\
1202 \hline
1203 Go Seigen\tnote{3} & $6.0 \pm 2.0$ & $9.0 \pm 1.0$ & $8.0 \pm 1.0$ & $5.0 \pm 1.0$ \\
1204 Ishida Yoshio\tnote{4}&$8.0 \pm 1.4$ & $5.0 \pm 1.4$ & $3.3 \pm 1.2$ & $5.3 \pm 0.5$ \\
1205 Miyazawa Goro & $1.5 \pm 0.5$ & $10 \pm 0 $ & $9.5 \pm 0.5$ & $4.0 \pm 1.0$ \\
1206 Yi Ch'ang-ho\tnote{5}& $7.0 \pm 0.8$ & $5.0 \pm 1.4$ & $2.6 \pm 0.9$ & $2.6 \pm 1.2$ \\
1207 Sakata Eio & $7.6 \pm 1.7$ & $4.6 \pm 0.5$ & $7.3 \pm 0.9$ & $8.0 \pm 1.6$ \\
1208 Fujisawa Hideyuki & $3.5 \pm 0.5$ & $9.0 \pm 1.0$ & $7.0 \pm 0.0$ & $4.0 \pm 0.0$ \\
1209 Otake Hideo & $4.3 \pm 0.5$ & $3.0 \pm 0.0$ & $4.6 \pm 1.2$ & $3.6 \pm 0.9$ \\
1210 Kato Masao & $2.5 \pm 0.5$ & $4.5 \pm 1.5$ & $9.5 \pm 0.5$ & $4.0 \pm 0.0$ \\
1211 Takemiya Masaki\tnote{4}&$1.3\pm 0.5$& $6.3 \pm 2.1$ & $7.0 \pm 0.8$ & $1.3 \pm 0.5$ \\
1212 Kobayashi Koichi & $9.0 \pm 1.0$ & $2.5 \pm 0.5$ & $2.5 \pm 0.5$ & $5.5 \pm 0.5$ \\
1213 Cho Chikun & $9.0 \pm 0.8$ & $7.6 \pm 0.9$ & $6.6 \pm 1.2$ & $9.0 \pm 0.8$ \\
1214 Ma Xiaochun & $8.0 \pm 2.2$ & $6.3 \pm 0.5$ & $5.6 \pm 1.9$ & $8.0 \pm 0.8$ \\
1215 Yoda Norimoto & $6.3 \pm 1.7$ & $4.3 \pm 2.1$ & $4.3 \pm 2.1$ & $3.3 \pm 1.2$ \\
1216 Luo Xihe & $7.3 \pm 0.9$ & $7.3 \pm 2.5$ & $7.6 \pm 0.9$ & $6.0 \pm 1.4$ \\
1217 O Meien & $2.6 \pm 1.2$ & $9.6 \pm 0.5$ & $8.3 \pm 1.7$ & $3.6 \pm 1.2$ \\
1218 Rui Naiwei & $4.6 \pm 1.2$ & $5.6 \pm 0.5$ & $9.0 \pm 0.8$ & $3.3 \pm 1.2$ \\
1219 Yuki Satoshi & $3.0 \pm 1.0$ & $8.5 \pm 0.5$ & $9.0 \pm 1.0$ & $4.5 \pm 0.5$ \\
1220 Hane Naoki & $7.5 \pm 0.5$ & $2.5 \pm 0.5$ & $4.0 \pm 0.0$ & $4.5 \pm 1.5$ \\
1221 Takao Shinji & $5.0 \pm 1.0$ & $3.5 \pm 0.5$ & $5.5 \pm 1.5$ & $4.5 \pm 0.5$ \\
1222 Yi Se-tol & $5.3 \pm 0.5$ & $6.6 \pm 2.5$ & $9.3 \pm 0.5$ & $6.6 \pm 1.2$ \\
1223 Yamashita Keigo\tnote{4}&$2.0\pm 0.0$& $9.0 \pm 1.0$ & $9.5 \pm 0.5$ & $3.0 \pm 1.0$ \\
1224 Cho U & $7.3 \pm 2.4$ & $6.0 \pm 0.8$ & $5.3 \pm 1.7$ & $6.3 \pm 1.7$ \\
1225 Gu Li & $5.6 \pm 0.9$ & $7.0 \pm 0.8$ & $9.0 \pm 0.8$ & $4.0 \pm 0.8$ \\
1226 Chen Yaoye & $6.0 \pm 1.0$ & $4.0 \pm 1.0$ & $6.0 \pm 1.0$ & $5.5 \pm 0.5$ \\
1227 \hline
1228 \end{tabular}
1229 \begin{tablenotes}
1230 \item [1] Including standard deviation. Only players where we received at least two out of three answers are included.
1231 \item [2] Since the playing era column does not fit into the table, we at least sort the players ascending by their median year.
1232 \item [3] We do not consider games of Go Seigen due to him playing across several distinct eras and also being famous for radical opening experiments throughout the time, and thus featuring especially high diversity in patterns.
1233 \item [4] We do not consider games of Ishida Yoshio and Yamashita Keigo for the PCA analysis since they are significant outliers, making high-order dimensions much like purely ``similarity to this player''. Takemiya Masaki has the similar effect for the first dimension, but that case corresponds to common knowledge of him being an extreme proponent of anti-territorial (``moyo'') style.
1234 \item [5] We consider games only up to year 2004, since Yi Ch'ang-ho was prominent representative of a balanced, careful player until then and still has this reputation in minds of many players, but is regarded to have altered his style significantly afterwards.
1235 \end{tablenotes}
1236 \end{threeparttable}
1237 \end{table}
1239 \subsection{Style PCA analysis}
1241 \begin{figure}[!t]
1242 \centering
1243 \includegraphics[width=3in]{style-pca}
1244 \caption{Columns with the most significant PCA dimensions of the dataset.}
1245 \label{fig:style_pca}
1246 \end{figure}
1248 We have looked at the ten most significant dimensions of the pattern data
1249 yielded by the PCA analysis of the reference player set
1250 %\footnote{
1251 %We also tried to observe PCA effect of removing outlying Takemiya
1252 %Masaki. That way, the second dimension strongly
1253 %correlated to territoriality and third dimension strongly correlacted to era,
1254 %however the first dimension remained mysteriously uncorrelated and with no
1255 %obvious interpretation.}
1256 (fig. \ref{fig:style_pca} shows the first three).
1257 We have again computed the Pearson's $r$ for all combinations of PCA dimensions
1258 and dimensions of the prior knowledge style vectors to find correlations.
1260 \begin{table}[!t]
1261 % increase table row spacing, adjust to taste
1262 \renewcommand{\arraystretch}{1.4}
1263 \caption{Covariance Measure of PCA and Prior Information}
1264 \label{fig:style_r}
1265 \centering
1266 % Some packages, such as MDW tools, offer better commands for making tables
1267 % than the plain LaTeX2e tabular which is used here.
1268 \begin{tabular}{|c||r||r||r||r||r|}
1269 \hline
1270 Eigenval. & $\tau$ & $\omega$ & $\alpha$ & $\theta$ & Year \\
1271 \hline
1272 $0.447$ & $\mathbf{-0.530}$ & $ 0.323$ & $ 0.298$ & $\mathbf{-0.554}$ & $ 0.090$ \\
1273 $0.194$ & $\mathbf{-0.547}$ & $ 0.215$ & $ 0.249$ & $-0.293$ & $\mathbf{-0.630}$ \\
1274 $0.046$ & $ 0.131$ & $-0.002$ & $-0.128$ & $ 0.242$ & $\mathbf{-0.630}$ \\
1275 $0.028$ & $-0.011$ & $ 0.225$ & $ 0.186$ & $ 0.131$ & $ 0.067$ \\
1276 $0.024$ & $-0.181$ & $ 0.174$ & $-0.032$ & $-0.216$ & $ 0.352$ \\
1277 %$0.018$ & $-0.364$ & $ 0.226$ & $ 0.339$ & $-0.136$ & $ 0.113$ \\
1278 %$0.014$ & $-0.194$ & $-0.048$ & $-0.099$ & $-0.333$ & $ 0.055$ \\
1279 %$0.0110575$ & $-0.040$ & $-0.254$ & $-0.154$ & $-0.054$ & $-0.089$ \\
1280 %$0.0093587$ & $-0.199$ & $-0.115$ & $ 0.358$ & $-0.234$ & $-0.028$ \\
1281 %$0.0084930$ & $ 0.046$ & $ 0.190$ & $ 0.305$ & $ 0.176$ & $ 0.089$ \\
1282 % puvodni
1283 %$0.4473697$ & $\mathbf{-0.530}$ & $ 0.323$ & $ 0.298$ & $\mathbf{-0.554}$ & $ 0.090$ \\
1284 %$0.1941057$ & $\mathbf{-0.547}$ & $ 0.215$ & $ 0.249$ & $-0.293$ & $\mathbf{-0.630}$ \\
1285 %$0.0463189$ & $ 0.131$ & $-0.002$ & $-0.128$ & $ 0.242$ & $\mathbf{-0.630}$ \\
1286 %$0.0280301$ & $-0.011$ & $ 0.225$ & $ 0.186$ & $ 0.131$ & $ 0.067$ \\
1287 %$0.0243231$ & $-0.181$ & $ 0.174$ & $-0.032$ & $-0.216$ & $ 0.352$ \\
1288 %$0.0180875$ & $-0.364$ & $ 0.226$ & $ 0.339$ & $-0.136$ & $ 0.113$ \\
1289 %$0.0138478$ & $-0.194$ & $-0.048$ & $-0.099$ & $-0.333$ & $ 0.055$ \\
1290 %$0.0110575$ & $-0.040$ & $-0.254$ & $-0.154$ & $-0.054$ & $-0.089$ \\
1291 %$0.0093587$ & $-0.199$ & $-0.115$ & $ 0.358$ & $-0.234$ & $-0.028$ \\
1292 %$0.0084930$ & $ 0.046$ & $ 0.190$ & $ 0.305$ & $ 0.176$ & $ 0.089$ \\
1293 \hline
1294 \end{tabular}
1295 \end{table}
1297 \begin{table}[!t]
1298 % increase table row spacing, adjust to taste
1299 \renewcommand{\arraystretch}{1.6}
1300 \begin{threeparttable}
1301 \caption{Characteristic Patterns of PCA$_{1,2}$ Dimensions \tnote{1}}
1302 \label{fig:style_patterns}
1303 \centering
1304 % Some packages, such as MDW tools, offer better commands for making tables
1305 % than the plain LaTeX2e tabular which is used here.
1306 \begin{tabular}{|p{2.3cm}p{2.4cm}p{2.4cm}p{0cm}|}
1307 % The virtual last column is here because otherwise we get random syntax errors.
1309 \hline \multicolumn{4}{|c|}{PCA$_1$ --- Moyo-oriented, thin-playing player} \\
1310 \centering \begin{psgopartialboard*}{(8,1)(12,6)}
1311 \stone[\marktr]{black}{k}{4}
1312 \end{psgopartialboard*} &
1313 \centering \begin{psgopartialboard*}{(1,2)(5,6)}
1314 \stone{white}{d}{3}
1315 \stone[\marktr]{black}{d}{5}
1316 \end{psgopartialboard*} &
1317 \centering \begin{psgopartialboard*}{(5,1)(10,6)}
1318 \stone{white}{f}{3}
1319 \stone[\marktr]{black}{j}{4}
1320 \end{psgopartialboard*} & \\
1321 \centering $0.274$ & \centering $0.086$ & \centering $0.083$ & \\
1322 \centering high corner/side opening \tnote{2} & \centering high corner approach & \centering high distant pincer & \\
1324 \hline \multicolumn{4}{|c|}{PCA$_1$ --- Territorial, thick-playing player} \\
1325 \centering \begin{psgopartialboard*}{(3,1)(7,6)}
1326 \stone{white}{d}{4}
1327 \stone[\marktr]{black}{f}{3}
1328 \end{psgopartialboard*} &
1329 \centering \begin{psgopartialboard*}{(3,1)(7,6)}
1330 \stone{white}{c}{6}
1331 \stone{black}{d}{4}
1332 \stone[\marktr]{black}{f}{3}
1333 \end{psgopartialboard*} &
1334 \centering \begin{psgopartialboard*}{(3,1)(7,6)}
1335 \stone{black}{d}{4}
1336 \stone[\marktr]{black}{f}{3}
1337 \end{psgopartialboard*} & \\
1338 \centering $-0.399$ & \centering $-0.399$ & \centering $-0.177$ & \\
1339 \centering low corner approach & \centering low corner reply & \centering low corner enclosure & \\
1341 \hline \multicolumn{4}{|c|}{PCA$_2$ --- Territorial, current player \tnote{3}} \\
1342 \centering \begin{psgopartialboard*}{(3,1)(7,6)}
1343 \stone{white}{c}{6}
1344 \stone{black}{d}{4}
1345 \stone[\marktr]{black}{f}{3}
1346 \end{psgopartialboard*} &
1347 \centering \begin{psgopartialboard*}{(3,1)(8,6)}
1348 \stone{white}{d}{4}
1349 \stone[\marktr]{black}{g}{4}
1350 \end{psgopartialboard*} &
1351 \centering \begin{psgopartialboard*}{(4,1)(9,6)}
1352 \stone{black}{d}{4}
1353 \stone{white}{f}{3}
1354 \stone[\marktr]{black}{h}{3}
1355 \end{psgopartialboard*} & \\
1356 \centering $-0.193$ & \centering $-0.139$ & \centering $-0.135$ & \\
1357 \centering low corner reply \tnote{4} & \centering high distant approach/pincer & \centering near low pincer & \\
1359 \hline
1360 \end{tabular}
1361 \begin{tablenotes}
1362 \item [1] We present the patterns in a simplified compact form;
1363 in reality, they are usually somewhat larger and always circle-shaped
1364 (centered on the triangled move).
1365 We omit only pattern segments that are entirely empty.
1366 \item [2] We give some textual interpretation of the patterns, especially
1367 since some of them may not be obvious unless seen in game context; we choose
1368 the descriptions based on the most frequently observer contexts, but of course
1369 the pattern can be also matched in other positions and situations.
1370 \item [3] In the second PCA dimension, we find no correlated patterns;
1371 only uncorrelated and anti-correlated ones.
1372 \item [4] As the second most significant pattern,
1373 we skip a slide follow-up pattern to this move.
1374 \end{tablenotes}
1375 \end{threeparttable}
1376 \end{table}
1378 \begin{table}[!t]
1379 % increase table row spacing, adjust to taste
1380 \renewcommand{\arraystretch}{1.8}
1381 \begin{threeparttable}
1382 \caption{Characteristic Patterns of PCA$_3$ Dimension \tnote{1}}
1383 \label{fig:style_patterns3}
1384 \centering
1385 % Some packages, such as MDW tools, offer better commands for making tables
1386 % than the plain LaTeX2e tabular which is used here.
1387 \begin{tabular}{|p{2.4cm}p{2.4cm}p{2.4cm}p{0cm}|}
1388 % The virtual last column is here because otherwise we get random syntax errors.
1390 \hline \multicolumn{4}{|c|}{PCA$_3$ --- Old-time player} \\
1391 \centering \begin{psgopartialboard*}{(1,3)(5,7)}
1392 \stone{white}{d}{4}
1393 \stone[\marktr]{black}{c}{6}
1394 \end{psgopartialboard*} &
1395 \centering \begin{psgopartialboard*}{(8,1)(12,5)}
1396 \stone[\marktr]{black}{k}{3}
1397 \end{psgopartialboard*} &
1398 \centering \begin{psgopartialboard*}{(1,1)(5,5)}
1399 \stone[\marktr]{black}{c}{3}
1400 \end{psgopartialboard*} & \\
1401 \centering $0.515$ & \centering $0.264$ & \centering $0.258$ & \\
1402 \centering low corner approach & \centering low side or mokuhazushi opening & \centering san-san opening & \\
1404 \hline \multicolumn{4}{|c|}{PCA$_3$ --- Current player} \\
1405 \centering \begin{psgopartialboard*}{(3,1)(7,5)}
1406 \stone{black}{d}{4}
1407 \stone[\marktr]{black}{f}{3}
1408 \end{psgopartialboard*} &
1409 \centering \begin{psgopartialboard*}{(1,1)(5,5)}
1410 \stone[\marktr]{black}{c}{4}
1411 \end{psgopartialboard*} &
1412 \centering \begin{psgopartialboard*}{(1,2)(5,6)}
1413 \stone{black}{d}{3}
1414 \stone{white}{d}{5}
1415 \stone[\marktr]{black}{c}{5}
1416 \end{psgopartialboard*} & \\
1417 \centering $-0.276$ & \centering $-0.273$ & \centering $-0.116$ & \\
1418 \centering low corner enclosure & \centering 3-4 corner opening \tnote{2} & \centering high approach reply & \\
1420 \hline
1421 \end{tabular}
1422 \begin{tablenotes}
1423 \item [1] We cannot use terms ``classic'' and ''modern'' in case of PCA$_3$
1424 since the current patterns are commonplace in games of past centuries
1425 (not included in our training set) and many would call a lot of the old-time patterns
1426 modern inventions. Perhaps we can infer that the latest 21th-century play trends abandon
1427 many of the 20th-century experiments (lower echelon of our by-year samples)
1428 to return to the more ordinary but effective classic patterns.
1429 \item [2] At this point, we skip two patterns already shown elsewhere:
1430 {\em high side/corner opening} and {\em low corner reply}.
1431 \end{tablenotes}
1432 \end{threeparttable}
1433 \end{table}
1435 It is immediately
1436 obvious both from the measured $r$ and visual observation
1437 that by far the most significant vector corresponds very well
1438 to the territoriality of the players,
1439 confirming the intuitive notion that this aspect of style
1440 is the one easiest to pin-point and also
1441 most obvious in the played shapes and sequences
1442 (that can obviously aim directly at taking secure territory
1443 or building center-oriented framework). Thick (solid) play also plays
1444 a role, but these two style dimensions are already
1445 correlated in the prior data.
1447 The other PCA dimensions are somewhat harder to interpret, but there
1448 certainly is significant influence of the styles on the patterns;
1449 the correlations are all presented in table \ref{fig:style_r}.
1450 (Larger absolute value means better linear correspondence.)
1452 We also list the characteristic spatial patterns of the PCA dimension
1453 extremes (tables \ref{fig:style_patterns}, \ref{fig:style_patterns3}), determined by their coefficients
1454 in the PCA projection matrix --- however, such naive approach
1455 has limited reliability, better methods will have to be researched.%
1456 \footnote{For example, as one of highly ranked ``Takemiya's'' PCA1 patterns,
1457 3,3 corner opening was generated, completely inappropriately;
1458 it reflects some weak ordering in bottom half of the dimension,
1459 not global ordering within the dimension.}
1460 We do not show the other pattern features since they carry no useful
1461 information in the opening stage.
1462 %\footnote{The board distance feature can be useful in some cases, but here all the spatial patterns are wide enough to reach to the edge on their own.}
1463 \begin{table}[!t]
1464 % increase table row spacing, adjust to taste
1465 \renewcommand{\arraystretch}{1.4}
1466 \caption{Covariance Measure of Externed-Normalization PCA and~Prior Information}
1467 \label{fig:style_normr}
1468 \centering
1469 % Some packages, such as MDW tools, offer better commands for making tables
1470 % than the plain LaTeX2e tabular which is used here.
1471 \begin{tabular}{|c||r||r||r||r||r|}
1472 \hline
1473 Eigenval. & $\tau$ & $\omega$ & $\alpha$ & $\theta$ & Year \\
1474 \hline
1475 $6.377$ & $ \mathbf{0.436}$ & $-0.220$ & $-0.289$ & $ \mathbf{0.404}$ & $\mathbf{-0.576}$ \\
1476 $1.727$ & $\mathbf{-0.690}$ & $ 0.340$ & $ 0.315$ & $\mathbf{-0.445}$ & $\mathbf{-0.639}$ \\
1477 $1.175$ & $-0.185$ & $ 0.156$ & $ 0.107$ & $-0.315$ & $ 0.320$ \\
1478 $0.845$ & $ 0.064$ & $-0.102$ & $-0.189$ & $ 0.032$ & $ 0.182$ \\
1479 $0.804$ & $-0.185$ & $ 0.261$ & $ \mathbf{0.620}$ & $ 0.120$ & $ 0.056$ \\
1480 $0.668$ & $-0.027$ & $ 0.055$ & $ 0.147$ & $-0.198$ & $ 0.155$ \\
1481 $0.579$ & $ 0.079$ & $ \mathbf{0.509}$ & $ 0.167$ & $ 0.294$ & $-0.019$ \\
1482 %$0.4971474$ & $ 0.026$ & $-0.119$ & $-0.071$ & $ 0.049$ & $ 0.043$ \\
1483 %$0.4938777$ & $-0.061$ & $ 0.061$ & $ 0.104$ & $-0.168$ & $ 0.015$ \\
1484 %$0.4888848$ & $ 0.203$ & $-0.283$ & $-0.120$ & $ 0.083$ & $-0.220$ \\
1485 % puvodni tabulka
1486 %$6.3774289$ & $ \mathbf{0.436}$ & $-0.220$ & $-0.289$ & $ \mathbf{0.404}$ & $\mathbf{-0.576}$ \\
1487 %$1.7269775$ & $\mathbf{-0.690}$ & $ 0.340$ & $ 0.315$ & $\mathbf{-0.445}$ & $\mathbf{-0.639}$ \\
1488 %$1.1747101$ & $-0.185$ & $ 0.156$ & $ 0.107$ & $-0.315$ & $ 0.320$ \\
1489 %$0.8452797$ & $ 0.064$ & $-0.102$ & $-0.189$ & $ 0.032$ & $ 0.182$ \\
1490 %$0.8038992$ & $-0.185$ & $ 0.261$ & $ \mathbf{0.620}$ & $ 0.120$ & $ 0.056$ \\
1491 %$0.6679533$ & $-0.027$ & $ 0.055$ & $ 0.147$ & $-0.198$ & $ 0.155$ \\
1492 %$0.5790000$ & $ 0.079$ & $ \mathbf{0.509}$ & $ 0.167$ & $ 0.294$ & $-0.019$ \\
1493 %$0.4971474$ & $ 0.026$ & $-0.119$ & $-0.071$ & $ 0.049$ & $ 0.043$ \\
1494 %$0.4938777$ & $-0.061$ & $ 0.061$ & $ 0.104$ & $-0.168$ & $ 0.015$ \\
1495 %$0.4888848$ & $ 0.203$ & $-0.283$ & $-0.120$ & $ 0.083$ & $-0.220$ \\
1496 \hline
1497 \end{tabular}
1498 \end{table}
1500 The PCA results presented above do not show much correlation between
1501 the significant PCA dimensions and the $\omega$ and $\alpha$ style dimensions.
1502 However, when we applied the extended vector normalization
1503 (sec. \ref{xnorm}; see table \ref{fig:style_normr}),
1504 some less significant PCA dimensions exhibited clear correlations.%
1505 \footnote{We have found that $c=6$ in the post-processing logistic function
1506 produces the most instructive PCA output on our particular game collection.}
1507 While we do not use the extended normalization results elsewhere since
1508 they produced noticeably less accurate classifiers in all dimensions
1509 (including $\omega$ and $\alpha$), it is instructive to look at the PCA dimensions.
1511 \rv{In contrast with the emphasis of opening patterns in the $\tau$ and $\theta$
1512 dimensions, the most contributing patterns of the $\omega$ and $\alpha$
1513 dimensions are the middle-game patterns that occur less frequently and require
1514 the extended normalization not to be over-shadowed by the opening patterns.}%
1515 \footnote{In the middle game, \rv{basic areas of influence have been staked
1516 out and invasions and group attacks are being played out}.
1517 Notably, the board is much more filled \rv{than in the opening} and thus
1518 particular specific-shape patterns repeat less often.}
1519 E.g. the most characteristic patterns
1520 on the aggressiveness dimension represent moves that make life with small,
1521 unstable groups (connecting kosumi on second line or mouth-shape eyespace
1522 move), while the novel-ranked players seem to like the (in)famous tsuke-nobi
1523 joseki sequence.%
1524 \footnote{\rv{Tsuke-nobi is a well-known joseki popular among beginners,
1525 but professionals usually play it only in special contexts.}}
1526 \rv{This may either mean that novel players like to play the joseki more,
1527 or (more likely, in our opinion) that novel players are more likely to
1528 get into unorthodox situation that require resorting to the tsuke-nobi
1529 sequence.}
1530 We believe that the next step in interpreting our analytical results
1531 will be more refined prior information input
1532 and precise analysis of the outputs by Go experts.
1534 \begin{figure}[!t]
1535 \centering
1536 \includegraphics[width=3.5in,angle=-90]{sociomap}
1537 \caption{Sociomap visualisation. The spatial positioning of players
1538 is based on the expert knowledge, while the node heights (depicted by
1539 contour lines) represent the pattern vectors.%
1540 %The light lines denote coherence-based hierarchical clusters.
1542 \label{fig:sociomap}
1543 \end{figure}
1545 Fig. \ref{fig:sociomap} shows the Sociomap visualisation
1546 as an alternate view of the player relationships and similarity,
1547 as well as correlation between the expert-given style marks
1548 and the PCA decomposition. The four-dimensional style vectors
1549 are used as input for the Sociomap renderer and determine the
1550 spatial positions of players. The height of a node is then
1551 determined using first two PCA dimensions $R_1,R_2$ and their
1552 eigenvalues $\lambda_1,\lambda_2$ as their linear combination:
1553 $$ h=\lambda_1R_1 + \lambda_2R_2 $$
1555 We can observe that the terrain of the sociomap is reasonably
1556 ``smooth'', again demonstrating some level of connection between
1557 the style vectors and data-mined information. High countour density
1558 indicates some discrepancy; in case of Takemiya Masaki and Yi Ch'ang-ho,
1559 this seems to be merely an issue of scale,
1560 while the Rui Naiwei --- Gu Li cliff suggests a genuine problem;
1561 we cannot say now whether it is because of imprecise prior information
1562 or lacking approximation abilities of our model.
1564 \subsection{Style Classification}
1565 \label{style-class}
1567 %TODO vsude zcheckovat jestli pouzivame stejny cas "we perform, we apply" X "we have performed, ..."
1569 Similarly to the the Strength classification (section \ref{strength-class}), we have tested the style inference ability
1570 of $k$-NN (sec. \ref{knn}), neural network (sec. \ref{neural-net}), and Bayes (sec. \ref{naive-bayes}) classifers.
1572 \subsubsection{Reference (Training) Data}
1573 As the~reference data, we use expert-based knowledge presented in section \ref{style-vectors}.
1574 For each reference player, that gives $4$-dimensional \emph{style vector} (each component in the
1575 range of $[1,10]$).\footnote{Since the neural network has activation function with range $[-1,1]$, we
1576 have linearly rescaled the \emph{style vectors} from interval $[1,10]$ to $[-1,1]$ before using the training
1577 data. The network's output was afterwards rescaled back to allow for MSE comparison.}
1579 All input (pattern) vectors were preprocessed using PCA, reducing the input dimension from $400$ to $23$.
1580 We measure the performance on the same reference data using $5$-fold cross validation.
1581 To put our measurements in scale, we also include a~random classifier in our results.
1583 \subsubsection{Results}
1584 The results are shown in the table \ref{crossval-cmp}. Second to fifth columns in the table represent
1585 mean square error (MSE) of the examined styles, $\mathit{Mean}$ is the
1586 mean square error across the styles and finally, the last column $\mathit{Cmp}$
1587 represents $\mathit{Mean}(\mathit{Random classifier}) / \mathit{Mean}(\mathit{X})$ -- comparison of mean square error
1588 of each method with the random classifier. To minimize the
1589 effect of random variables, all numbers were taken as an average of $200$ runs of the cross validation.
1591 Analysis of the performance of $k$-NN classifier for different $k$-values has shown that different
1592 $k$-values are suitable to approximate different styles. Combining the $k$-NN classifiers with the
1593 neural network (so that each style is approximated by the method with lowest MSE in that style)
1594 results in \emph{Joint classifier}, which outperforms all other methods.
1595 The \emph{Joint classifier} has outstanding MSE $3.960$, which is equivalent to standard error
1596 of $\sigma = 1.99$ per style.%
1597 \footnote{We should note that the pattern vector for each tested player
1598 was generated over at least few tens of games.}
1600 \begin{table}[!t]
1601 \renewcommand{\arraystretch}{1.4}
1602 \begin{threeparttable}
1603 \caption{Comparison of style classifiers}
1604 \label{crossval-cmp}
1605 \begin{tabular}{|c|c|c|c|c|c|c|}
1606 \hline
1607 %Classifier & $\sigma_\tau$ & $\sigma_\omega$ & $\sigma_\alpha$ & $\sigma_\theta$ & Tot $\sigma$ & $\mathit{RndC}$\\ \hline
1608 %Neural network & 0.420 & 0.488 & 0.365 & 0.371 & 0.414 & 1.82 \\
1609 %$k$-NN ($k=4$) & 0.394 & 0.507 & 0.457 & 0.341 & 0.429 & 1.76 \\
1610 %Random classifier & 0.790 & 0.773 & 0.776 & 0.677 & 0.755 & 1.00 \\ \hline
1611 &\multicolumn{5}{|c|}{MSE}& \\ \hline
1612 {Classifier} & $\tau$ & $\omega$ & $\alpha$ & $\theta$ & {\bf Mean} & {\bf Cmp}\\ \hline
1613 Joint classifier\tnote{1} & 4.04 & {\bf 5.25} & {\bf 3.52} & {\bf 3.05} & {\bf 3.960}& 2.97 \\\hline
1614 Neural network & {\bf 4.03} & 6.15 & {\bf 3.58} & 3.79 & 4.388 & 2.68 \\
1615 $k$-NN ($k=2$) & 4.08 & 5.40 & 4.77 & 3.37 & 4.405 & 2.67 \\
1616 $k$-NN ($k=3$) & 4.05 & 5.58 & 5.06 & 3.41 & 4.524 & 2.60 \\
1617 $k$-NN ($k=1$) & 4.52 & {\bf 5.26} & 5.36 & {\bf 3.09} & 4.553 & 2.59 \\
1618 $k$-NN ($k=4$) & 4.10 & 5.88 & 5.16 & 3.60 & 4.684 & 2.51 \\
1619 Naive Bayes & 4.48 & 6.90 & 5.48 & 3.70 & 5.143 & 2.29 \\
1620 Random class. & 12.26 & 12.33 & 12.40 & 10.11 & 11.776 & 1.00 \\\hline
1622 \end{tabular}
1623 \begin{tablenotes}
1624 \item [1] Note that these measurements have a certain variance.
1625 Since the Joint classifier performance was measured from scratch,
1626 the precise values may not match appropriate cells of the used methods.
1627 \end{tablenotes}
1628 \end{threeparttable}
1629 \end{table}
1631 % TODO presunout konkretni parametry do Appendixu? (neni jich tolik, mozna ne)
1632 \subsubsection{$k$-NN parameters}
1633 All three variants of $k$-NN classifier ($k=2,3,4$) had the weight function
1634 \begin{equation}
1635 \mathit{Weight}(\vec x) = 0.8^{10*\mathit{Distance}(\vec x)}
1636 \end{equation}
1637 The parameters were chosen empirically to minimize the MSE.
1639 \subsubsection{Neural network's parameters}
1640 The neural network classifier had three-layered architecture (one hidden layer)
1641 comprising of these numbers of neurons:
1642 \vspace{4mm}
1643 \noindent
1644 %\begin{table}
1645 \begin{center}
1646 %\caption{Styles}
1647 \begin{tabular}{|c|c|c|}
1648 \hline
1649 \multicolumn{3}{|c|}{Layer} \\\hline
1650 Input & Hidden & Output \\ \hline
1651 23 & 30 & 4 \\ \hline
1652 \end{tabular}
1653 \end{center}
1654 %\end{table}
1655 \vspace{4mm}
1657 The network was trained until the square error on the training set was smaller than $0.0003$.
1658 Due to a small number of input vectors, this only took $20$ iterations of RPROP learning algorithm on average.
1660 \subsubsection{Naive Bayes parameters}
1662 We have chosen $k = 10/7$ as our discretization parameter;
1663 ideally, we would use $k = 1$ to fully cover the style marks
1664 domain, however our training sample \rv{turns out to be} too small for
1665 that.
1667 \section{Proposed Applications}
1668 %\section{Proposed Applications and Discussion}
1669 \label{proposed-apps-and-discussion}
1671 %\rv{TODO Discussion}
1673 We believe that our findings might be useful for many applications
1674 in the area of Go support software as well as Go-playing computer engines.
1675 \rv{However, our foremost aim is to use the style analysis as an excellent
1676 teaching aid} --- classifying style
1677 dimensions based on player's pattern vector, many study recommendations
1678 can be given, e.g. about the professional games to replay, the goal being
1679 balancing understanding of various styles to achieve well-rounded skill set.%
1680 \footnote{\rv{The strength analysis could be also used in a similar fashion,
1681 but the lesson learned cannot simply be ``play pattern $X$ more often'';
1682 instead, the insight lays in the underlying reason of disproportionate
1683 frequency of some patterns.}}
1684 \rv{A user-friendly tool based on our work is currently in development.}
1686 \rv{Another promising application we envision is helping to}
1687 determine \rvvvv{the} initial real-world rating of a player before their
1688 first tournament based on a sample of their games played on the internet;
1689 some players especially in less populated areas could get fairly strong
1690 before playing in their first real tournament.
1691 \rv{Similarly, a computer Go program can quickly} classify the level of its
1692 \rv{human opponent} based on the pattern vector from \rv{their previous games}
1693 and auto-adjust its difficulty settings accordingly
1694 to provide more even games for beginners.
1695 \rvvv{This can also be achieved using} win-loss statistics,
1696 but pattern vector analysis \rv{should} converge faster \rv{initially,
1697 providing} \rvvvv{a much} \rv{better user experience}.
1699 We hope that more strong players will look into the style dimensions found
1700 by our statistical analysis --- analysis of most played patterns of prospective
1701 opponents might prepare for \rv{a tournament} game, but we especially hope that new insights
1702 on strategic purposes of various shapes and general human understanding
1703 of the game might be \rv{improved} by investigating the style-specific patterns.
1705 \rv{Of course, it is challenging to predict all possible uses of our work by others.
1706 Some less obvious applications might include}
1707 analysis of pattern vectors extracted from games of Go-playing programs:
1708 the strength and style \rv{classification} might help to highlight some weaknesses
1709 and room for improvements.%
1710 \footnote{Of course, correlation does not imply causation \rv{and we certainly do not
1711 suggest simply optimizing Go-playing programs according to these vectors.
1712 However, they could hint on general shortcomings of the playing engines if the
1713 actual cause of e.g. surprisingly low strength prediction is investigated.}}
1714 Also, new historical game records are still \rv{occassionally} being discovered;
1715 pattern-based classification might help to suggest \rv{or verify} origin of the games
1716 in Go Archeology.
1719 % An example of a floating figure using the graphicx package.
1720 % Note that \label must occur AFTER (or within) \caption.
1721 % For figures, \caption should occur after the \includegraphics.
1722 % Note that IEEEtran v1.7 and later has special internal code that
1723 % is designed to preserve the operation of \label within \caption
1724 % even when the captionsoff option is in effect. However, because
1725 % of issues like this, it may be the safest practice to put all your
1726 % \label just after \caption rather than within \caption{}.
1728 % Reminder: the "draftcls" or "draftclsnofoot", not "draft", class
1729 % option should be used if it is desired that the figures are to be
1730 % displayed while in draft mode.
1732 %\begin{figure}[!t]
1733 %\centering
1734 %\includegraphics[width=2.5in]{myfigure}
1735 % where an .eps filename suffix will be assumed under latex,
1736 % and a .pdf suffix will be assumed for pdflatex; or what has been declared
1737 % via \DeclareGraphicsExtensions.
1738 %\caption{Simulation Results}
1739 %\label{fig_sim}
1740 %\end{figure}
1742 % Note that IEEE typically puts floats only at the top, even when this
1743 % results in a large percentage of a column being occupied by floats.
1746 % An example of a double column floating figure using two subfigures.
1747 % (The subfig.sty package must be loaded for this to work.)
1748 % The subfigure \label commands are set within each subfloat command, the
1749 % \label for the overall figure must come after \caption.
1750 % \hfil must be used as a separator to get equal spacing.
1751 % The subfigure.sty package works much the same way, except \subfigure is
1752 % used instead of \subfloat.
1754 %\begin{figure*}[!t]
1755 %\centerline{\subfloat[Case I]\includegraphics[width=2.5in]{subfigcase1}%
1756 %\label{fig_first_case}}
1757 %\hfil
1758 %\subfloat[Case II]{\includegraphics[width=2.5in]{subfigcase2}%
1759 %\label{fig_second_case}}}
1760 %\caption{Simulation results}
1761 %\label{fig_sim}
1762 %\end{figure*}
1764 % Note that often IEEE papers with subfigures do not employ subfigure
1765 % captions (using the optional argument to \subfloat), but instead will
1766 % reference/describe all of them (a), (b), etc., within the main caption.
1769 % An example of a floating table. Note that, for IEEE style tables, the
1770 % \caption command should come BEFORE the table. Table text will default to
1771 % \footnotesize as IEEE normally uses this smaller font for tables.
1772 % The \label must come after \caption as always.
1774 %\begin{table}[!t]
1775 %% increase table row spacing, adjust to taste
1776 %\renewcommand{\arraystretch}{1.3}
1777 % if using array.sty, it might be a good idea to tweak the value of
1778 % \extrarowheight as needed to properly center the text within the cells
1779 %\caption{An Example of a Table}
1780 %\label{table_example}
1781 %\centering
1782 %% Some packages, such as MDW tools, offer better commands for making tables
1783 %% than the plain LaTeX2e tabular which is used here.
1784 %\begin{tabular}{|c||c|}
1785 %\hline
1786 %One & Two\\
1787 %\hline
1788 %Three & Four\\
1789 %\hline
1790 %\end{tabular}
1791 %\end{table}
1794 % Note that IEEE does not put floats in the very first column - or typically
1795 % anywhere on the first page for that matter. Also, in-text middle ("here")
1796 % positioning is not used. Most IEEE journals use top floats exclusively.
1797 % Note that, LaTeX2e, unlike IEEE journals, places footnotes above bottom
1798 % floats. This can be corrected via the \fnbelowfloat command of the
1799 % stfloats package.
1803 \section{Future Research}
1804 \label{future-research}
1806 Since we are not aware of any previous research on this topic and we
1807 are limited by space and time constraints, plenty of research remains
1808 to be done in all parts of our analysis --- we have already noted
1809 many in the text above. Most significantly, different methods of generating
1810 and normalizing the $\vec p$ vectors can be explored
1811 and other data mining methods could be investigated.
1812 Better ways of visualising the relationships would be desirable,
1813 together with thorough expert dissemination of internal structure
1814 of the player pattern vectors space:
1815 more professional players should be consulted on the findings
1816 and for style scales calibration.
1818 It can be argued that many players adjust their style by game conditions
1819 (Go development era, handicap, komi and color, time limits, opponent)
1820 or that styles might express differently in various game stages\rvvv{.
1821 These} factors should be explored by building pattern vectors more
1822 carefully than by simply considering all moves in all games of a player.
1823 Impact of handicap and uneven games on by-strength
1824 $\vec p$ distribution should be also investigated.
1826 % TODO: Future research --- Sparse PCA
1828 \section{Conclusion}
1829 We have proposed a way to extract summary pattern information from
1830 game collections and combined this with various data mining methods
1831 to show correspondence of our pattern summaries with various player
1832 meta-information \rvvv{such as} playing strength, era of play or playing style,
1833 as ranked by expert players. We have implemented and measured our
1834 proposals in two case studies: per-rank characteristics of amateur
1835 players and per-player style/era characteristics of well-known
1836 professionals.
1838 While many details remain to be worked out,
1839 we have demonstrated that many significant correlations \rv{doubtlessly}
1840 do exist and
1841 it is practically viable to infer the player meta-information from
1842 extracted pattern summaries \rv{and we have proposed applications}
1843 for such inference. Finally, we outlined some of the many possible
1844 directions of future work in this newly staked research field
1845 on the boundary of Computer Go, Data Mining and Go Theory.
1848 % if have a single appendix:
1849 %\appendix[Proof of the Zonklar Equations]
1850 % or
1851 %\appendix % for no appendix heading
1852 % do not use \section anymore after \appendix, only \section*
1853 % is possibly needed
1855 % use appendices with more than one appendix
1856 % then use \section to start each appendix
1857 % you must declare a \section before using any
1858 % \subsection or using \label (\appendices by itself
1859 % starts a section numbered zero.)
1863 %\appendices
1864 %\section{Proof of the First Zonklar Equation}
1865 %Appendix one text goes here.
1867 %% you can choose not to have a title for an appendix
1868 %% if you want by leaving the argument blank
1869 %\section{}
1870 %Appendix two text goes here.
1873 % use section* for acknowledgement
1874 \section*{Acknowledgment}
1875 \label{acknowledgement}
1877 Foremostly, we are very grateful for detailed input on specific Go styles
1878 by Alexander Dinerstein, Motoki Noguchi and V\'{i}t Brunner.
1879 We appreciate helpful comments on our general methodology
1880 by John Fairbairn, T. M. Hall, Cyril H\"oschl, Robert Jasiek, Franti\v{s}ek Mr\'{a}z
1881 and several GoDiscussions.com users \cite{GoDiscThread}.
1882 Finally, we would like to thank Radka ``chidori'' Hane\v{c}kov\'{a}
1883 for the original research idea and acknowledge major inspiration
1884 by R\'{e}mi Coulom's paper \cite{PatElo} on the extraction of pattern information.
1887 % Can use something like this to put references on a page
1888 % by themselves when using endfloat and the captionsoff option.
1889 \ifCLASSOPTIONcaptionsoff
1890 \newpage
1895 % trigger a \newpage just before the given reference
1896 % number - used to balance the columns on the last page
1897 % adjust value as needed - may need to be readjusted if
1898 % the document is modified later
1899 %\IEEEtriggeratref{8}
1900 % The "triggered" command can be changed if desired:
1901 %\IEEEtriggercmd{\enlargethispage{-5in}}
1903 % references section
1905 % can use a bibliography generated by BibTeX as a .bbl file
1906 % BibTeX documentation can be easily obtained at:
1907 % http://www.ctan.org/tex-archive/biblio/bibtex/contrib/doc/
1908 % The IEEEtran BibTeX style support page is at:
1909 % http://www.michaelshell.org/tex/ieeetran/bibtex/
1910 \bibliographystyle{IEEEtran}
1911 % argument is your BibTeX string definitions and bibliography database(s)
1912 \bibliography{gostyle}
1914 % <OR> manually copy in the resultant .bbl file
1915 % set second argument of \begin to the number of references
1916 % (used to reserve space for the reference number labels box)
1917 %\begin{thebibliography}{1}
1919 %\bibitem{MasterMCTS}
1921 %\end{thebibliography}
1923 % biography section
1925 % If you have an EPS/PDF photo (graphicx package needed) extra braces are
1926 % needed around the contents of the optional argument to biography to prevent
1927 % the LaTeX parser from getting confused when it sees the complicated
1928 % \includegraphics command within an optional argument. (You could create
1929 % your own custom macro containing the \includegraphics command to make things
1930 % simpler here.)
1931 %\begin{biography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{mshell}}]{Michael Shell}
1932 % or if you just want to reserve a space for a photo:
1934 % insert where needed to balance the two columns on the last page with
1935 % biographies
1936 %\newpage
1938 %\begin{IEEEbiographynophoto}{Jane Doe}
1939 %Biography text here.
1940 %\end{IEEEbiographynophoto}
1942 % You can push biographies down or up by placing
1943 % a \vfill before or after them. The appropriate
1944 % use of \vfill depends on what kind of text is
1945 % on the last page and whether or not the columns
1946 % are being equalized.
1948 %\vfill
1950 % Can be used to pull up biographies so that the bottom of the last one
1951 % is flush with the other column.
1952 %\enlargethispage{-5in}
1956 % that's all folks
1957 \end{document}