2 # -*- coding: ISO-8859-1 -*-
5 # Copyright (C) 2002-2004 Jörg Lehmann <joergl@users.sourceforge.net>
6 # Copyright (C) 2003-2004 Michael Schindler <m-schindler@users.sourceforge.net>
7 # Copyright (C) 2002-2005 André Wobst <wobsta@users.sourceforge.net>
9 # This file is part of PyX (http://pyx.sourceforge.net/).
11 # PyX is free software; you can redistribute it and/or modify
12 # it under the terms of the GNU General Public License as published by
13 # the Free Software Foundation; either version 2 of the License, or
14 # (at your option) any later version.
16 # PyX is distributed in the hope that it will be useful,
17 # but WITHOUT ANY WARRANTY; without even the implied warranty of
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 # GNU General Public License for more details.
21 # You should have received a copy of the GNU General Public License
22 # along with PyX; if not, write to the Free Software
23 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
25 from __future__
import nested_scopes
27 import math
, re
, ConfigParser
, struct
, warnings
29 from pyx
.style
import linestyle
30 from pyx
.graph
import style
35 # fallback implementation for Python 2.2 and below
37 return zip(xrange(len(list)), list)
42 # fallback implementation for Python 2.1
45 for key
, value
in items
:
50 def splitatvalue(value
, *splitpoints
):
52 while section
< len(splitpoints
) and splitpoints
[section
] < value
:
54 if len(splitpoints
) > 1:
59 return (section
, value
)
62 _mathglobals
= {"neg": lambda x
: -x
,
63 "abs": lambda x
: x
< 0 and -x
or x
,
64 "sgn": lambda x
: x
< 0 and -1 or 1,
74 "sind": lambda x
: math
.sin(math
.pi
/180*x
),
75 "cosd": lambda x
: math
.cos(math
.pi
/180*x
),
76 "tand": lambda x
: math
.tan(math
.pi
/180*x
),
77 "asind": lambda x
: 180/math
.pi
*math
.asin(x
),
78 "acosd": lambda x
: 180/math
.pi
*math
.acos(x
),
79 "atand": lambda x
: 180/math
.pi
*math
.atan(x
),
80 "norm": lambda x
, y
: math
.hypot(x
, y
),
81 "splitatvalue": splitatvalue
,
87 """graph data interface
89 Graph data consists in columns, where each column might be identified by a
90 string or an integer. Each row in the resulting table refers to a data
93 All methods except for the constructor should consider self and its
94 attributes to be readonly, since the data instance might be shared between
95 several graphs simultaniously.
97 The instance variable columns is a dictionary mapping column names to the
98 data of the column (i.e. to a list). Only static columns (known at
99 construction time) are contained in that dictionary. For data with numbered
100 columns the column data is also available via the list columndata.
101 Otherwise the columndata list should be missing and an access to a column
104 The names of all columns (static and dynamic) must be fixed at the constructor
105 and stated in the columnnames dictionary.
107 The instance variable title and defaultstyles contain the data title and
108 the default styles (a list of styles), respectively.
111 def dynamiccolumns(self
, graph
):
112 """create and return dynamic columns data
114 Returns dynamic data matching the given axes (the axes range and other
115 data might be used). The return value is a dictionary similar to the
116 columns instance variable.
122 "Graph data from a list of points"
124 defaultstyles
= [style
.symbol()]
126 def __init__(self
, points
, title
="user provided list", addlinenumbers
=1, **columns
):
129 self
.columndata
= [[x
] for x
in points
[0]]
130 for point
in points
[1:]:
132 raise ValueError("different number of columns per point")
133 for i
, x
in enumerate(point
):
134 self
.columndata
[i
].append(x
)
135 for v
in columns
.values():
136 if abs(v
) > l
or (not addlinenumbers
and abs(v
) == l
):
137 raise ValueError("column number bigger than number of columns")
139 self
.columndata
= [range(1, len(points
) + 1)] + self
.columndata
140 self
.columns
= dict([(key
, self
.columndata
[i
]) for key
, i
in columns
.items()])
142 self
.columns
= dict([(key
, []) for key
, i
in columns
])
143 self
.columnnames
= self
.columns
.keys()
145 self
.defaultstyles
= [style
.symbol()]
151 _columnintref
= re
.compile(r
"\$(-?\d+)", re
.IGNORECASE
)
154 "creates a new data set out of an existing data set"
156 def __init__(self
, data
, title
=_notitle
, context
={}, copy
=1,
157 replacedollar
=1, columncallback
="__column__", **columns
):
159 if title
is _notitle
:
160 items
= columns
.items()
161 items
.sort() # we want sorted items (otherwise they would be unpredictable scrambled)
162 self
.title
= "%s: %s" % (text
.escapestring(data
.title
or "unkown source"),
163 ", ".join(["%s=%s" % (text
.escapestring(key
),
164 text
.escapestring(str(value
)))
165 for key
, value
in items
]))
170 self
.defaultstyles
= self
.orgdata
.defaultstyles
172 # analyse the **columns argument
174 for columnname
, value
in columns
.items():
175 # search in the columns dictionary
177 self
.columns
[columnname
] = self
.orgdata
.columns
[value
]
179 # search in the columndata list
181 self
.columns
[columnname
] = self
.orgdata
.columndata
[value
]
182 except (AttributeError, TypeError):
183 # value was not an valid column identifier
184 # i.e. take it as a mathematical expression
186 m
= _columnintref
.search(value
)
188 value
= "%s%s(%s)%s" % (value
[:m
.start()], columncallback
, m
.groups()[0], value
[m
.end():])
189 m
= _columnintref
.search(value
)
190 value
= value
.replace("$", columncallback
)
191 expression
= compile(value
.strip(), __file__
, "eval")
192 context
= context
.copy()
193 context
[columncallback
] = self
.columncallback
194 if self
.orgdata
.columns
:
195 key
, columndata
= self
.orgdata
.columns
.items()[0]
196 count
= len(columndata
)
197 elif self
.orgdata
.columndata
:
198 count
= len(self
.orgdata
.columndata
[0])
202 for i
in xrange(count
):
203 self
.columncallbackcount
= i
204 for key
, values
in self
.orgdata
.columns
.items():
205 context
[key
] = values
[i
]
207 newdata
.append(eval(expression
, _mathglobals
, context
))
208 except (ArithmeticError, ValueError):
210 self
.columns
[columnname
] = newdata
213 # copy other, non-conflicting column names
214 for columnname
, columndata
in self
.orgdata
.columns
.items():
215 if not self
.columns
.has_key(columnname
):
216 self
.columns
[columnname
] = columndata
218 self
.columnnames
= self
.columns
.keys()
220 def columncallback(self
, value
):
222 return self
.orgdata
.columndata
[value
][self
.columncallbackcount
]
224 return self
.orgdata
.columns
[value
][self
.columncallbackcount
]
231 defaultcommentpattern
= re
.compile(r
"(#+|!+|%+)\s*")
232 defaultstringpattern
= re
.compile(r
"\"(.*?
)\"(\s
+|$
)")
233 defaultcolumnpattern = re.compile(r"(.*?
)(\s
+|$
)")
235 def splitline(self, line, stringpattern, columnpattern, tofloat=1):
236 """returns a tuple created out of the string line
237 - matches stringpattern and columnpattern, adds the first group of that
238 match to the result and and removes those matches until the line is empty
239 - when stringpattern matched, the result is always kept as a string
240 - when columnpattern matched and tofloat is true, a conversion to a float
241 is tried; when this conversion fails, the string is kept"""
243 # try to gain speed by skip matching regular expressions
244 if line.find('"')!=-1 or \
245 stringpattern is not self.defaultstringpattern or \
246 columnpattern is not self.defaultcolumnpattern:
248 match = stringpattern.match(line)
250 result.append(match.groups()[0])
251 line = line[match.end():]
253 match = columnpattern.match(line)
256 result.append(float(match.groups()[0]))
257 except (TypeError, ValueError):
258 result.append(match.groups()[0])
260 result.append(match.groups()[0])
261 line = line[match.end():]
265 return map(float, line.split())
266 except (TypeError, ValueError):
268 for r in line.split():
270 result.append(float(r))
271 except (TypeError, ValueError):
277 def getcachekey(self, *args):
278 return ":".join([str(x) for x in args])
280 def __init__(self, filename,
281 commentpattern=defaultcommentpattern,
282 stringpattern=defaultstringpattern,
283 columnpattern=defaultcolumnpattern,
284 skiphead=0, skiptail=0, every=1,
287 def readfile(file, title, self=self, commentpattern=commentpattern, stringpattern=stringpattern, columnpattern=columnpattern, skiphead=skiphead, skiptail=skiptail, every=every):
292 for line in file.readlines():
294 match = commentpattern.match(line)
296 if not len(columndata):
297 columns = self.splitline(line[match.end():], stringpattern, columnpattern, tofloat=0)
300 for value in self.splitline(line, stringpattern, columnpattern, tofloat=1):
301 linedata.append(value)
303 if linenumber >= skiphead and not ((linenumber - skiphead) % every):
304 linedata = [linenumber + 1] + linedata
305 if len(linedata) > maxcolumns:
306 maxcolumns = len(linedata)
307 columndata.append(linedata)
309 if skiptail >= every:
310 skip, x = divmod(skiptail, every)
311 del columndata[-skip:]
312 for i in xrange(len(columndata)):
313 if len(columndata[i]) != maxcolumns:
314 columndata[i].extend([None]*(maxcolumns-len(columndata[i])))
315 return list(columndata, title=title, addlinenumbers=0,
316 **dict([(column, i+1) for i, column in enumerate(columns[:maxcolumns-1])]))
321 # not a file-like object -> open it
322 cachekey = self.getcachekey(filename, commentpattern, stringpattern, columnpattern, skiphead, skiptail, every)
323 if not filecache.has_key(cachekey):
324 filecache[cachekey] = readfile(open(filename), filename)
325 data.__init__(self, filecache[cachekey], **kwargs)
327 data.__init__(self, readfile(filename, "user provided file-like object"), **kwargs)
332 class conffile(data):
334 def __init__(self, filename, **kwargs):
335 """read data from a config-like file
336 - filename is a string
337 - each row is defined by a section in the config-like file (see
338 config module description)
339 - the columns for each row are defined by lines in the section file;
340 the option entries identify and name the columns
341 - further keyword arguments are passed to the constructor of data,
342 keyword arguments data and titles excluded"""
344 def readfile(file, title):
345 config = ConfigParser.ConfigParser()
346 config.optionxform = str
348 sections = config.sections()
350 columndata = [None]*len(sections)
353 for i in xrange(len(sections)):
354 point = [sections[i]] + [None]*(maxcolumns-1)
355 for option in config.options(sections[i]):
356 value = config.get(sections[i], option)
362 index = columns[option]
364 columns[option] = maxcolumns
369 columndata[i] = point
370 # wrap result into a data instance to remove column numbers
371 result = data(list(columndata, addlinenumbers=0, **columns), title=title)
372 # ... but reinsert sections as linenumbers
373 result.columndata = [[x[0] for x in columndata]]
379 # not a file-like object -> open it
380 if not filecache.has_key(filename):
381 filecache[filename] = readfile(open(filename), filename)
382 data.__init__(self, filecache[filename], **kwargs)
384 data.__init__(self, readfile(filename, "user provided file-like object"), **kwargs)
391 def getcachekey(self, *args):
392 return ":".join([str(x) for x in args])
394 def __init__(self, filename, minrank=None, maxrank=None, **kwargs):
398 def __init__(self, file):
404 self.fill) = struct.unpack("<5i20s", file.read(40))
405 if self.magic != 0x20770002:
406 raise ValueError("bad magic number")
410 def __init__(self, file, i):
419 self.rank) = struct.unpack("<6i2h", file.read(28))
423 def __init__(self, file, sd):
424 file.seek(sd.absaddr)
429 self.dummy) = struct.unpack("<3i2h", file.read(16))
430 oln, olt = self.orgx, self.orgy
431 self.points = [(olt, oln)]
432 for i in range(self.nstrokes):
433 c1, c2 = struct.unpack("2c", file.read(2))
444 c3, c4, c5, c6, c7, c8 = struct.unpack("6c", file.read(6))
446 c2 = chr(ord(c2) | 0x40)
447 dx, dy = struct.unpack("<2i", c3+c4+c1+c2+c7+c8+c5+c6)
450 self.points.append((olt, oln))
451 sd.nstrokes = self.nstrokes
453 def readfile(file, title):
455 file.seek(h.dictaddr)
456 sds = [segdict(file, i+1) for i in range(h.segcount)]
457 sbs = [segment(file, sd) for sd in sds]
459 # remove jumps at long +/- 180
460 for sd, sb in zip(sds, sbs):
461 if sd.minlong < -150*3600 and sd.maxlong > 150*3600:
462 for i, (lat, long) in enumerate(sb.points):
464 sb.points[i] = lat, long + 360*3600
467 for sd, sb in zip(sds, sbs):
468 if ((minrank is None or sd.rank >= minrank) and
469 (maxrank is None or sd.rank <= maxrank)):
471 columndata.append((None, None))
472 columndata.extend([(long/3600.0, lat/3600.0)
473 for lat, long in sb.points])
475 result = list(columndata, title=title)
476 result.defaultstyles = [style.line()]
483 # not a file-like object -> open it
484 cachekey = self.getcachekey(filename, minrank, maxrank)
485 if not cbdfilecache.has_key(cachekey):
486 cbdfilecache[cachekey] = readfile(open(filename, "rb"), filename)
487 data.__init__(self, cbdfilecache[cachekey], **kwargs)
489 data.__init__(self, readfile(filename, "user provided file-like object"), **kwargs)
492 class function(_data):
494 defaultstyles = [style.line()]
496 assignmentpattern = re.compile(r"\s*([a-z_][a-z0-9_]*)\s*\(\s*([a-z_][a-z0-9_]*)\s*\)\s*=", re.IGNORECASE)
498 def __init__(self, expression, title=_notitle, min=None, max=None,
499 points=100, context={}):
501 if title is _notitle:
502 self.title = expression
507 self.numberofpoints = points
508 self.context = context.copy() # be save on late evaluations
509 m = self.assignmentpattern.match(expression)
511 self.yname, self.xname = m.groups()
512 expression = expression[m.end():]
514 raise ValueError("y(x)=... or similar expected")
515 if context.has_key(self.xname):
516 raise ValueError("xname in context")
517 self.expression = compile(expression.strip(), __file__, "eval")
519 self.columnnames = [self.xname, self.yname]
521 def dynamiccolumns(self, graph):
522 dynamiccolumns = {self.xname: [], self.yname: []}
524 xaxis = graph.axes[self.xname]
525 from pyx.graph.axis import logarithmic
526 logaxis = isinstance(xaxis.axis, logarithmic)
527 if self.min is not None:
531 if self.max is not None:
538 for i in range(self.numberofpoints):
539 x = min + (max-min)*i / (self.numberofpoints-1.0)
542 dynamiccolumns[self.xname].append(x)
543 self.context[self.xname] = x
545 y = eval(self.expression, _mathglobals, self.context)
546 except (ArithmeticError, ValueError):
548 dynamiccolumns[self.yname].append(y)
549 return dynamiccolumns
552 class functionxy(function):
554 def __init__(self, f, min=None, max=None, **kwargs):
555 function.__init__(self, "y(x)=f(x)", context={"f": f}, min=min, max=max, **kwargs)
558 class paramfunction(_data):
560 defaultstyles = [style.line()]
562 def __init__(self, varname, min, max, expression, title=_notitle, points=100, context={}):
563 if context.has_key(varname):
564 raise ValueError("varname in context")
565 if title is _notitle:
566 self.title = expression
569 varlist, expression = expression.split("=")
570 expression = compile(expression.strip(), __file__, "eval")
571 keys = [key.strip() for key in varlist.split(",")]
572 self.columns = dict([(key, []) for key in keys])
573 context = context.copy()
574 for i in range(points):
575 param = min + (max-min)*i / (points-1.0)
576 context[varname] = param
577 values = eval(expression, _mathglobals, context)
578 for key, value in zip(keys, values):
579 self.columns[key].append(value)
580 if len(keys) != len(values):
581 raise ValueError("unpack tuple of wrong size")
582 self.columnnames = self.columns.keys()
585 class paramfunctionxy(paramfunction):
587 def __init__(self, f, min, max, **kwargs):
588 paramfunction.__init__(self, "t", min, max, "x, y = f(t)", context={"f": f}, **kwargs)