1 # Contains all the handlers and helper classes
2 module ActiveSupport::Multibyte::Handlers #:nodoc:
3 class EncodingError < ArgumentError #:nodoc:
6 class Codepoint #:nodoc:
7 attr_accessor :code, :combining_class, :decomp_type, :decomp_mapping, :uppercase_mapping, :lowercase_mapping
10 class UnicodeDatabase #:nodoc:
11 attr_writer :codepoints, :composition_exclusion, :composition_map, :boundary, :cp1252
13 # self-expiring methods that lazily load the Unicode database and then return the value.
14 [:codepoints, :composition_exclusion, :composition_map, :boundary, :cp1252].each do |attr_name|
15 class_eval(<<-EOS, __FILE__, __LINE__)
23 # Shortcut to ucd.codepoints[]
24 def [](index); codepoints[index]; end
26 # Returns the directory in which the data files are stored
28 File.dirname(__FILE__) + '/../../values/'
31 # Returns the filename for the data file for this version
33 File.expand_path File.join(dirname, "unicode_tables.dat")
36 # Loads the unicode database and returns all the internal objects of UnicodeDatabase
37 # Once the values have been loaded, define attr_reader methods for the instance variables.
40 @codepoints, @composition_exclusion, @composition_map, @boundary, @cp1252 = File.open(self.class.filename, 'rb') { |f| Marshal.load f.read }
42 raise IOError.new("Couldn't load the unicode tables for UTF8Handler (#{e.message}), handler is unusable")
44 @codepoints ||= Hash.new(Codepoint.new)
45 @composition_exclusion ||= []
46 @composition_map ||= {}
50 # Redefine the === method so we can write shorter rules for grapheme cluster breaks
51 @boundary.each do |k,_|
52 @boundary[k].instance_eval do
54 detect { |i| i === other } ? true : false
56 end if @boundary[k].kind_of?(Array)
59 # define attr_reader methods for the instance variables
61 attr_reader :codepoints, :composition_exclusion, :composition_map, :boundary, :cp1252
66 # UTF8Handler implements Unicode aware operations for strings, these operations will be used by the Chars
67 # proxy when $KCODE is set to 'UTF8'.
69 # Hangul character boundaries and properties
77 HANGUL_NCOUNT = HANGUL_VCOUNT * HANGUL_TCOUNT
79 HANGUL_SLAST = HANGUL_SBASE + HANGUL_SCOUNT
80 HANGUL_JAMO_FIRST = 0x1100
81 HANGUL_JAMO_LAST = 0x11FF
83 # All the unicode whitespace
84 UNICODE_WHITESPACE = [
85 (0x0009..0x000D).to_a, # White_Space # Cc [5] <control-0009>..<control-000D>
86 0x0020, # White_Space # Zs SPACE
87 0x0085, # White_Space # Cc <control-0085>
88 0x00A0, # White_Space # Zs NO-BREAK SPACE
89 0x1680, # White_Space # Zs OGHAM SPACE MARK
90 0x180E, # White_Space # Zs MONGOLIAN VOWEL SEPARATOR
91 (0x2000..0x200A).to_a, # White_Space # Zs [11] EN QUAD..HAIR SPACE
92 0x2028, # White_Space # Zl LINE SEPARATOR
93 0x2029, # White_Space # Zp PARAGRAPH SEPARATOR
94 0x202F, # White_Space # Zs NARROW NO-BREAK SPACE
95 0x205F, # White_Space # Zs MEDIUM MATHEMATICAL SPACE
96 0x3000, # White_Space # Zs IDEOGRAPHIC SPACE
99 # BOM (byte order mark) can also be seen as whitespace, it's a non-rendering character used to distinguish
100 # between little and big endian. This is not an issue in utf-8, so it must be ignored.
101 UNICODE_LEADERS_AND_TRAILERS = UNICODE_WHITESPACE + [65279] # ZERO-WIDTH NO-BREAK SPACE aka BOM
103 # Borrowed from the Kconv library by Shinji KONO - (also as seen on the W3C site)
106 [\xc2-\xdf] [\x80-\xbf] |
107 \xe0 [\xa0-\xbf] [\x80-\xbf] |
108 [\xe1-\xef] [\x80-\xbf] [\x80-\xbf] |
109 \xf0 [\x90-\xbf] [\x80-\xbf] [\x80-\xbf] |
110 [\xf1-\xf3] [\x80-\xbf] [\x80-\xbf] [\x80-\xbf] |
111 \xf4 [\x80-\x8f] [\x80-\xbf] [\x80-\xbf]
114 # Returns a regular expression pattern that matches the passed Unicode codepoints
115 def self.codepoints_to_pattern(array_of_codepoints) #:nodoc:
116 array_of_codepoints.collect{ |e| [e].pack 'U*' }.join('|')
118 UNICODE_TRAILERS_PAT = /(#{codepoints_to_pattern(UNICODE_LEADERS_AND_TRAILERS)})+\Z/
119 UNICODE_LEADERS_PAT = /\A(#{codepoints_to_pattern(UNICODE_LEADERS_AND_TRAILERS)})+/
124 # /// BEGIN String method overrides
127 # Inserts the passed string at specified codepoint offsets
128 def insert(str, offset, fragment)
130 u_unpack(str).insert(
137 # Returns the position of the passed argument in the string, counting in codepoints
138 def index(str, *args)
139 bidx = str.index(*args)
140 bidx ? (u_unpack(str.slice(0...bidx)).size) : nil
143 # Works just like the indexed replace method on string, except instead of byte offsets you specify
149 # s.chars[2] = "e" # Replace character with offset 2
154 # s.chars[1, 2] = "ö" # Replace 2 characters at character offset 1
158 replace_by = args.pop
159 # Indexed replace with regular expressions already works
160 return str[*args] = replace_by if args.first.is_a?(Regexp)
161 result = u_unpack(str)
162 if args[0].is_a?(Fixnum)
163 raise IndexError, "index #{args[0]} out of string" if args[0] >= result.length
165 max = args[1].nil? ? min : (min + args[1] - 1)
166 range = Range.new(min, max)
167 replace_by = [replace_by].pack('U') if replace_by.is_a?(Fixnum)
168 elsif args.first.is_a?(Range)
169 raise RangeError, "#{args[0]} out of range" if args[0].min >= result.length
172 needle = args[0].to_s
173 min = index(str, needle)
174 max = min + length(needle) - 1
175 range = Range.new(min, max)
177 result[range] = u_unpack(replace_by)
178 str.replace(result.pack('U*'))
181 # Works just like String#rjust, only integer specifies characters instead of bytes.
185 # "¾ cup".chars.rjust(8).to_s
188 # "¾ cup".chars.rjust(8, " ").to_s # Use non-breaking whitespace
190 def rjust(str, integer, padstr=' ')
191 justify(str, integer, :right, padstr)
194 # Works just like String#ljust, only integer specifies characters instead of bytes.
198 # "¾ cup".chars.rjust(8).to_s
201 # "¾ cup".chars.rjust(8, " ").to_s # Use non-breaking whitespace
203 def ljust(str, integer, padstr=' ')
204 justify(str, integer, :left, padstr)
207 # Works just like String#center, only integer specifies characters instead of bytes.
211 # "¾ cup".chars.center(8).to_s
214 # "¾ cup".chars.center(8, " ").to_s # Use non-breaking whitespace
216 def center(str, integer, padstr=' ')
217 justify(str, integer, :center, padstr)
220 # Does Unicode-aware rstrip
222 str.gsub(UNICODE_TRAILERS_PAT, '')
225 # Does Unicode-aware lstrip
227 str.gsub(UNICODE_LEADERS_PAT, '')
230 # Removed leading and trailing whitespace
232 str.gsub(UNICODE_LEADERS_PAT, '').gsub(UNICODE_TRAILERS_PAT, '')
235 # Returns the number of codepoints in the string
239 alias_method :length, :size
241 # Reverses codepoints in the string.
243 u_unpack(str).reverse.pack('U*')
246 # Implements Unicode-aware slice with codepoints. Slicing on one point returns the codepoints for that
248 def slice(str, *args)
250 raise ArgumentError, "wrong number of arguments (#{args.size} for 1)" # Do as if we were native
251 elsif (args.size == 2 && !(args.first.is_a?(Numeric) || args.first.is_a?(Regexp)))
252 raise TypeError, "cannot convert #{args.first.class} into Integer" # Do as if we were native
253 elsif (args.size == 2 && !args[1].is_a?(Numeric))
254 raise TypeError, "cannot convert #{args[1].class} into Integer" # Do as if we were native
255 elsif args[0].kind_of? Range
256 cps = u_unpack(str).slice(*args)
257 cps.nil? ? nil : cps.pack('U*')
258 elsif args[0].kind_of? Regexp
260 elsif args.size == 1 && args[0].kind_of?(Numeric)
261 u_unpack(str)[args[0]]
263 u_unpack(str).slice(*args).pack('U*')
266 alias_method :[], :slice
268 # Convert characters in the string to uppercase
269 def upcase(str); to_case :uppercase_mapping, str; end
271 # Convert characters in the string to lowercase
272 def downcase(str); to_case :lowercase_mapping, str; end
274 # Returns a copy of +str+ with the first character converted to uppercase and the remainder to lowercase
276 upcase(slice(str, 0..0)) + downcase(slice(str, 1..-1) || '')
280 # /// Extra String methods for unicode operations
283 # Returns the KC normalization of the string by default. NFKC is considered the best normalization form for
284 # passing strings to databases and validations.
286 # * <tt>str</tt> - The string to perform normalization on.
287 # * <tt>form</tt> - The form you want to normalize in. Should be one of the following: :c, :kc, :d or :kd.
288 def normalize(str, form=ActiveSupport::Multibyte::DEFAULT_NORMALIZATION_FORM)
289 # See http://www.unicode.org/reports/tr15, Table 1
290 codepoints = u_unpack(str)
293 reorder_characters(decompose_codepoints(:canonical, codepoints))
295 compose_codepoints reorder_characters(decompose_codepoints(:canonical, codepoints))
297 reorder_characters(decompose_codepoints(:compatability, codepoints))
299 compose_codepoints reorder_characters(decompose_codepoints(:compatability, codepoints))
301 raise ArgumentError, "#{form} is not a valid normalization variant", caller
305 # Perform decomposition on the characters in the string
307 decompose_codepoints(:canonical, u_unpack(str)).pack('U*')
310 # Perform composition on the characters in the string
312 compose_codepoints u_unpack(str).pack('U*')
316 # /// BEGIN Helper methods for unicode operation
319 # Used to translate an offset from bytes to characters, for instance one received from a regular expression match
320 def translate_offset(str, byte_offset)
321 return nil if byte_offset.nil?
322 return 0 if str == ''
323 chunk = str[0..byte_offset]
326 chunk.unpack('U*').length - 1
327 rescue ArgumentError => e
328 chunk = str[0..(byte_offset+=1)]
329 # Stop retrying at the end of the string
330 raise e unless byte_offset < chunk.length
331 # We damaged a character, retry
334 # Catch the ArgumentError so we can throw our own
336 raise EncodingError.new('malformed UTF-8 character')
340 # Checks if the string is valid UTF8.
342 # Unpack is a little bit faster than regular expressions
351 # Returns the number of grapheme clusters in the string. This method is very likely to be moved or renamed
352 # in future versions.
357 # Replaces all the non-utf-8 bytes by their iso-8859-1 or cp1252 equivalent resulting in a valid utf-8 string
359 str.split(//u).map do |c|
360 if !UTF8_PAT.match(c)
363 n < 160 ? [UCD.cp1252[n] || n].pack('U') :
364 n < 192 ? "\xC2" + n.chr : "\xC3" + (n-64).chr
373 # Detect whether the codepoint is in a certain character class. Primarily used by the
374 # grapheme cluster support.
375 def in_char_class?(codepoint, classes)
376 classes.detect { |c| UCD.boundary[c] === codepoint } ? true : false
379 # Unpack the string at codepoints boundaries
384 raise EncodingError.new('malformed UTF-8 character')
388 # Unpack the string at grapheme boundaries instead of codepoint boundaries
390 codepoints = u_unpack(str)
394 eoc = codepoints.length
397 previous = codepoints[pos-1]
398 current = codepoints[pos]
401 one = ( previous == UCD.boundary[:cr] and current == UCD.boundary[:lf] ) or
403 two = ( UCD.boundary[:l] === previous and in_char_class?(current, [:l,:v,:lv,:lvt]) ) or
405 three = ( in_char_class?(previous, [:lv,:v]) and in_char_class?(current, [:v,:t]) ) or
407 four = ( in_char_class?(previous, [:lvt,:t]) and UCD.boundary[:t] === current ) or
409 five = (UCD.boundary[:extend] === current)
412 unpacked << codepoints[marker..pos-1]
419 # Reverse operation of g_unpack
424 # Justifies a string in a certain way. Valid values for <tt>way</tt> are <tt>:right</tt>, <tt>:left</tt> and
425 # <tt>:center</tt>. Is primarily used as a helper method by <tt>rjust</tt>, <tt>ljust</tt> and <tt>center</tt>.
426 def justify(str, integer, way, padstr=' ')
427 raise ArgumentError, "zero width padding" if padstr.length == 0
428 padsize = integer - size(str)
429 padsize = padsize > 0 ? padsize : 0
432 str.dup.insert(0, padding(padsize, padstr))
434 str.dup.insert(-1, padding(padsize, padstr))
436 lpad = padding((padsize / 2.0).floor, padstr)
437 rpad = padding((padsize / 2.0).ceil, padstr)
438 str.dup.insert(0, lpad).insert(-1, rpad)
442 # Generates a padding string of a certain size.
443 def padding(padsize, padstr=' ')
445 slice(padstr * ((padsize / size(padstr)) + 1), 0, padsize)
451 # Convert characters to a different case
452 def to_case(way, str)
453 u_unpack(str).map do |codepoint|
457 ncp > 0 ? ncp : codepoint
464 # Re-order codepoints so the string becomes canonical
465 def reorder_characters(codepoints)
466 length = codepoints.length- 1
468 while pos < length do
469 cp1, cp2 = UCD[codepoints[pos]], UCD[codepoints[pos+1]]
470 if (cp1.combining_class > cp2.combining_class) && (cp2.combining_class > 0)
471 codepoints[pos..pos+1] = cp2.code, cp1.code
472 pos += (pos > 0 ? -1 : 1)
480 # Decompose composed characters to the decomposed form
481 def decompose_codepoints(type, codepoints)
482 codepoints.inject([]) do |decomposed, cp|
483 # if it's a hangul syllable starter character
484 if HANGUL_SBASE <= cp and cp < HANGUL_SLAST
485 sindex = cp - HANGUL_SBASE
486 ncp = [] # new codepoints
487 ncp << HANGUL_LBASE + sindex / HANGUL_NCOUNT
488 ncp << HANGUL_VBASE + (sindex % HANGUL_NCOUNT) / HANGUL_TCOUNT
489 tindex = sindex % HANGUL_TCOUNT
490 ncp << (HANGUL_TBASE + tindex) unless tindex == 0
491 decomposed.concat ncp
492 # if the codepoint is decomposable in with the current decomposition type
493 elsif (ncp = UCD[cp].decomp_mapping) and (!UCD[cp].decomp_type || type == :compatability)
494 decomposed.concat decompose_codepoints(type, ncp.dup)
501 # Compose decomposed characters to the composed form
502 def compose_codepoints(codepoints)
504 eoa = codepoints.length - 1
506 starter_char = codepoints[0]
507 previous_combining_class = -1
510 lindex = starter_char - HANGUL_LBASE
512 if 0 <= lindex and lindex < HANGUL_LCOUNT
513 vindex = codepoints[starter_pos+1] - HANGUL_VBASE rescue vindex = -1
514 if 0 <= vindex and vindex < HANGUL_VCOUNT
515 tindex = codepoints[starter_pos+2] - HANGUL_TBASE rescue tindex = -1
516 if 0 <= tindex and tindex < HANGUL_TCOUNT
524 codepoints[starter_pos..j] = (lindex * HANGUL_VCOUNT + vindex) * HANGUL_TCOUNT + tindex + HANGUL_SBASE
527 starter_char = codepoints[starter_pos]
528 # -- Other characters
530 current_char = codepoints[pos]
531 current = UCD[current_char]
532 if current.combining_class > previous_combining_class
533 if ref = UCD.composition_map[starter_char]
534 composition = ref[current_char]
538 unless composition.nil?
539 codepoints[starter_pos] = composition
540 starter_char = composition
541 codepoints.delete_at pos
544 previous_combining_class = -1
546 previous_combining_class = current.combining_class
549 previous_combining_class = current.combining_class
551 if current.combining_class == 0
553 starter_char = codepoints[pos]
561 UCD = UnicodeDatabase.new