tokhash.pl: formatting changes for readability
[nasm/autotest.git] / tokhash.pl
blobfb65c2f69cff60c3502735d08b8c219548735b4e
1 #!/usr/bin/perl
3 # Generate a perfect hash for token parsing
5 # Usage: tokenhash.pl insns.dat regs.dat tokens.dat
8 require 'phash.ph';
10 my($insns_dat, $regs_dat, $tokens_dat) = @ARGV;
12 %tokens = ();
13 @tokendata = ();
16 # List of condition codes
18 @conditions = ('a', 'ae', 'b', 'be', 'c', 'e', 'g', 'ge', 'l', 'le',
19 'na', 'nae', 'nb', 'nbe', 'nc', 'ne', 'ng', 'nge', 'nl',
20 'nle', 'no', 'np', 'ns', 'nz', 'o', 'p', 'pe', 'po', 's', 'z');
23 # Read insns.dat
25 open(ID, "< ${insns_dat}") or die "$0: cannot open $insns_dat: $!\n";
26 while (defined($line = <ID>)) {
27 if ($line =~ /^([A-Z0-9_]+)(|cc)\s/) {
28 $insn = $1.$2;
29 ($token = $1) =~ tr/A-Z/a-z/;
31 if ($2 eq '') {
32 # Single instruction token
33 if (!defined($tokens{$token})) {
34 $tokens{$token} = scalar @tokendata;
35 push(@tokendata, "\"${token}\", TOKEN_INSN, I_${insn}, 0");
37 } else {
38 # Conditional instruction
39 foreach $cc (@conditions) {
40 if (!defined($tokens{$token.$cc})) {
41 $tokens{$token.$cc} = scalar @tokendata;
42 push(@tokendata, "\"${token}${cc}\", TOKEN_INSN, I_${insn}, C_\U$cc\E");
48 close(ID);
51 # Read regs.dat
53 open(RD, "< ${regs_dat}") or die "$0: cannot open $regs_dat: $!\n";
54 while (defined($line = <RD>)) {
55 if ($line =~ /^([a-z0-9_-]+)\s/) {
56 $reg = $1;
58 if ($reg =~ /^(.*[^0-9])([0-9]+)\-([0-9]+)(|[^0-9].*)$/) {
59 $nregs = $3-$2+1;
60 $reg = $1.$2.$4;
61 $reg_nr = $2;
62 $reg_prefix = $1;
63 $reg_suffix = $4;
64 } else {
65 $nregs = 1;
66 undef $reg_prefix, $reg_suffix;
69 while ($nregs--) {
70 if (defined($tokens{$reg})) {
71 die "Duplicate definition: $reg\n";
73 $tokens{$reg} = scalar @tokendata;
74 push(@tokendata, "\"${reg}\", TOKEN_REG, R_\U${reg}\E, 0");
76 if (defined($reg_prefix)) {
77 $reg_nr++;
78 $reg = sprintf("%s%u%s", $reg_prefix, $reg_nr, $reg_suffix);
79 } else {
80 # Not a dashed sequence
81 die if ($nregs);
86 close(RD);
89 # Read tokens.dat
91 open(TD, "< ${tokens_dat}") or die "$0: cannot open $tokens_dat: $!\n";
92 while (defined($line = <TD>)) {
93 if ($line =~ /^\%\s+(.*)$/) {
94 $pattern = $1;
95 } elsif ($line =~ /^([a-z0-9_-]+)/) {
96 $token = $1;
98 if (defined($tokens{$reg})) {
99 die "Duplicate definition: $token\n";
101 $tokens{$token} = scalar @tokendata;
103 $data = $pattern;
104 $data =~ s/\*/\U$token/g;
106 push(@tokendata, "\"$token\", $data");
109 close(TD);
112 # Actually generate the hash
114 @hashinfo = gen_perfect_hash(\%tokens);
115 if (!defined(@hashinfo)) {
116 die "$0: no hash found\n";
119 # Paranoia...
120 verify_hash_table(\%tokens, \@hashinfo);
122 ($n, $sv, $f1, $f2, $g) = @hashinfo;
123 $sv2 = $sv+2;
125 die if ($n & ($n-1));
127 print "/*\n";
128 print " * This file is generated from insns.dat, regs.dat and token.dat\n";
129 print " * by tokhash.pl; do not edit.\n";
130 print " */\n";
131 print "\n";
133 print "#include <string.h>\n";
134 print "#include \"nasm.h\"\n";
135 print "#include \"insns.h\"\n";
136 print "\n";
138 print "#define rot(x,y) (((uint32_t)(x) << (y))+((uint32_t)(x) >> (32-(y))))\n";
139 print "\n";
141 print "struct tokendata {\n";
142 print " const char *string;\n";
143 print " int tokentype;\n";
144 print " int i1, i2;\n";
145 print "};\n";
146 print "\n";
148 print "int nasm_token_hash(const char *token, struct tokenval *tv)\n";
149 print "{\n";
151 # Put a large value in unused slots. This makes it extremely unlikely
152 # that any combination that involves unused slot will pass the range test.
153 # This speeds up rejection of unrecognized tokens, i.e. identifiers.
154 print "#define UNUSED 16383\n";
156 print " static const int16_t hash1[$n] = {\n";
157 for ($i = 0; $i < $n; $i++) {
158 my $h = ${$g}[${$f1}[$i]];
159 print " ", defined($h) ? $h : 'UNUSED', ",\n";
161 print " };\n";
163 print " static const int16_t hash2[$n] = {\n";
164 for ($i = 0; $i < $n; $i++) {
165 my $h = ${$g}[${$f2}[$i]];
166 print " ", defined($h) ? $h : 'UNUSED', ",\n";
168 print " };\n";
170 printf " static const struct tokendata tokendata[%d] = {\n", scalar(@tokendata);
171 foreach $d (@tokendata) {
172 print " { ", $d, " },\n";
174 print " };\n";
176 print " uint32_t k1 = 0, k2 = 0;\n";
177 print " uint8_t c;\n";
178 # For correct overflow behavior, "ix" should be unsigned of the same
179 # width as the hash arrays.
180 print " uint16_t ix;\n";
181 print " const struct tokendata *data;\n";
182 print " const char *p = token;\n";
183 print "\n";
185 print " while ((c = *p++) != 0) {\n";
186 printf " uint32_t kn1 = rot(k1,%2d) - rot(k2,%2d) + c;\n", ${$sv}[0], ${$sv}[1];
187 printf " uint32_t kn2 = rot(k2,%2d) - rot(k1,%2d) + c;\n", ${$sv}[2], ${$sv}[3];
188 print " k1 = kn1; k2 = kn2;\n";
189 print " }\n";
190 print "\n";
191 printf " ix = hash1[k1 & 0x%x] + hash2[k2 & 0x%x];\n", $n-1, $n-1;
192 printf " if (ix >= %d)\n", scalar(@tokendata);
193 print " return -1;\n";
194 print "\n";
195 print " data = &tokendata[ix];\n";
197 # print " fprintf(stderr, \"Looked for: %s found: %s\\n\", token, data->string);\n\n";
199 print " if (strcmp(data->string, token))\n";
200 print " return -1;\n";
201 print "\n";
202 print " tv->t_integer = data->i1;\n";
203 print " tv->t_inttwo = data->i2;\n";
204 print " return tv->t_type = data->tokentype;\n";
205 print "}\n";