10 prerr_endline (sprintf "Lexer error : %s" callerID);
12 raise Parsing.Parse_error
14 let pos lexbuf = (lexeme_start lexbuf, lexeme_end lexbuf)
16 let advance_line_pos pos =
17 { pos with pos_lnum = pos.pos_lnum + 1; pos_bol = pos.pos_cnum; }
19 let advance_line lexbuf =
20 lexbuf.lex_curr_p <- advance_line_pos lexbuf.lex_curr_p
39 "precision",PRECISION;
47 "character",CHARACTER;
78 "intersect",INTERSECT;
80 "temporary",TEMPORARY;
87 "references",REFERENCES;
91 "timestamp",TIMESTAMP;
97 "constraint",CONSTRAINT;
124 "interval", INTERVAL;
125 "microsecond", MICROSECOND;
134 "second_microsecond", SECOND_MICROSECOND;
135 "minute_microsecond", MINUTE_MICROSECOND;
136 "minute_second", MINUTE_SECOND;
137 "hour_microsecond", HOUR_MICROSECOND;
138 "hour_second", HOUR_SECOND;
139 "hour_minute", HOUR_MINUTE;
140 "day_microsecond", DAY_MICROSECOND;
141 "day_second", DAY_SECOND;
142 "day_minute", DAY_MINUTE;
143 "day_hour", DAY_HOUR;
144 "year_month", YEAR_MONTH;
147 "duplicate", DUPLICATE;
149 let all token l = k := !k @ List.map (fun x -> x,token) l in
150 let func x l = all (FUNCTION x) l in
151 func T.Agg ["max";"min";"sum"];
152 func T.(Group (Int,true)) ["count"];
153 func T.(Group (Float,false)) ["avg"];
154 func T.(fixed Text [Text;Text]) ["strftime"];
155 func T.(fixed Text [Text]) ["lower";"upper"];
156 func T.(Ret Text) ["concat"];
157 func T.(Ret Any) ["coalesce"];
158 func T.(Ret Int) ["length"; "random";"unix_timestamp";"least";"greatest"];
159 all DATETIME_FUNC ["current_date";"current_timestamp";"current_time";"localtime";"localtimestamp";"now";];
160 all DATETIME_FUNC ["getdate"]; (* mssql? *)
161 all CONFLICT_ALGO ["ignore"; "abort"; "fail"; "rollback"];
162 all JOIN_TYPE1 ["left";"right";"full"];
163 all JOIN_TYPE2 ["inner";"outer"];
164 all LIKE_OP ["glob";"regexp";"match"];
165 all AUTOINCREMENT ["autoincrement";"auto_increment"];
166 (* standard built-in types
167 CHARACTER, CHARACTER VARYING, CHARACTER LARGE OBJECT,
168 BINARY, BINARY VARYING, BINARY LARGE OBJECT,
169 NUMERIC, DECIMAL, INTEGER, SMALLINT, BIGINT,
170 FLOAT, REAL, DOUBLE PRECISION,
172 DATE, TIME, TIMESTAMP, INTERVAL
174 all T_INTEGER ["integer";"int";"smallint";"bigint";"tinyint";"mediumint";"middleint";"serial";"identity"];
175 all T_INTEGER ["numeric";"decimal";"dec";"fixed"];
176 all T_INTEGER ["number"]; (* oracle *)
177 all T_BOOLEAN ["bool";"boolean"];
178 all T_FLOAT ["float";"real";"double";"float4";"float8";"int1";"int2";"int3";"int4";"int8"];
179 all T_BLOB ["blob";"varbinary";"tinyblob";"mediumblob";"longblob"];
180 all T_TEXT ["text";"char";"varchar";"tinytext";"mediumtext";"longtext"];
181 all T_TEXT ["varchar2"]; (* oracle *)
182 all T_DATETIME ["datetime"];
183 all T_UUID ["uuid"]; (* http://www.postgresql.org/docs/9.4/static/datatype-uuid.html *)
187 Q: Why not convert all input to lowercase before lexing?
188 A: Sometimes SQL is case-sensitive, also string contents should be preserved
191 module Keywords = Map.Make(String)
195 let k = String.lowercase k in
196 if Keywords.mem k map then
197 failwith (sprintf "Lexeme %s is already associated with keyword." k)
201 List.fold_left add Keywords.empty keywords
203 (* FIXME case sensitivity??! *)
206 let str = String.lowercase str in
207 try Keywords.find str keywords with Not_found -> IDENT str
209 let ident str = IDENT (String.lowercase str)
211 let as_literal ch s =
212 let s = String.replace_chars (fun x -> String.make (if x = ch then 2 else 1) x) s in
213 sprintf "%c%s%c" ch s ch
216 let digit = ['0'-'9']
217 let alpha = ['a'-'z' 'A'-'Z']
218 let ident = (alpha) (alpha | digit | '_' )*
219 let wsp = [' ' '\r' '\t']
220 let cmnt = "--" | "//" | "#"
222 (* extract separate statements *)
223 rule ruleStatement = parse
224 | ['\n' ' ' '\r' '\t']+ as tok { `Space tok }
225 | cmnt wsp* "[sqlgg]" wsp+ (ident+ as n) wsp* "=" wsp* ([^'\n']* as v) '\n' { `Prop (n,v) }
226 | cmnt wsp* "@" (ident+ as name) [^'\n']* '\n' { `Prop ("name",name) }
227 | '"' { let s = ruleInQuotes "" lexbuf in `Token (as_literal '"' s) }
228 | "'" { let s = ruleInSingleQuotes "" lexbuf in `Token (as_literal '\'' s) }
229 | "$" (ident? as tag) "$" { let s = ruleInDollarQuotes tag "" lexbuf in `Token (sprintf "$%s$%s$%s$" tag s tag) }
230 | cmnt as s { `Comment (s ^ ruleComment "" lexbuf) }
231 | "/*" { `Comment ("/*" ^ ruleCommentMulti "" lexbuf ^ "*/") }
233 | [^ ';'] as c { `Char c }
236 (* extract tail of the input *)
239 | _* as str { ruleTail (acc ^ str) lexbuf }
242 | wsp { ruleMain lexbuf }
243 (* update line number *)
244 | '\n' { advance_line lexbuf; ruleMain lexbuf}
251 | cmnt { ignore (ruleComment "" lexbuf); ruleMain lexbuf }
252 | "/*" { ignore (ruleCommentMulti "" lexbuf); ruleMain lexbuf }
262 | "/" | "%" { NUM_DIV_OP }
263 | "<<" | ">>" | "|" | "&" { NUM_BIT_OP }
264 | ">" | ">=" | "<=" | "<" { NUM_CMP_OP }
265 | "<>" | "!=" | "==" { NUM_EQ_OP }
267 | "?" { PARAM (None,pos lexbuf) }
268 | [':' '@'] (ident as str) { PARAM (Some str,pos lexbuf) }
270 | '"' { ident (ruleInQuotes "" lexbuf) }
271 | "'" { TEXT (ruleInSingleQuotes "" lexbuf) }
272 (* http://www.postgresql.org/docs/current/interactive/sql-syntax-lexical.html#SQL-SYNTAX-DOLLAR-QUOTING *)
273 | "$" (ident? as tag) "$" { TEXT (ruleInDollarQuotes tag "" lexbuf) }
274 | "`" { ident (ruleInBackQuotes "" lexbuf) }
275 | "[" { ident (ruleInBrackets "" lexbuf) }
276 | ['x' 'X'] "'" { BLOB (ruleInSingleQuotes "" lexbuf) }
278 | ident as str { get_ident str }
279 | digit+ as str { INTEGER (int_of_string str) }
280 | digit+ '.' digit+ as str { FLOAT (float_of_string str) }
282 | _ { error lexbuf "ruleMain" }
284 (* FIXME factor out all that ruleIn* rules *)
285 ruleInQuotes acc = parse
287 | eof { error lexbuf "no terminating quote" }
288 | '\n' { advance_line lexbuf; error lexbuf "EOL before terminating quote" }
289 | "\"\"" { ruleInQuotes (acc^"\"") lexbuf }
290 | [^'"' '\n']+ as s { ruleInQuotes (acc^s) lexbuf }
291 | _ { error lexbuf "ruleInQuotes" }
293 ruleInBrackets acc = parse
295 | eof { error lexbuf "no terminating bracket" }
296 | '\n' { advance_line lexbuf; error lexbuf "EOL before terminating bracket" }
297 (* | "\"\"" { ruleInQuotes (acc ^ "\"") lexbuf } *)
298 | [^']' '\n']+ { ruleInBrackets (acc ^ lexeme lexbuf) lexbuf }
299 | _ { error lexbuf "ruleInBrackets" }
301 ruleInSingleQuotes acc = parse
303 | eof { error lexbuf "no terminating single quote" }
304 | '\n' { advance_line lexbuf; error lexbuf "EOL before terminating single quote" }
305 | "''" { ruleInSingleQuotes (acc ^ "'") lexbuf }
306 | [^'\'' '\n']+ { ruleInSingleQuotes (acc ^ lexeme lexbuf) lexbuf }
307 | _ { error lexbuf "ruleInSingleQuotes" }
309 ruleInBackQuotes acc = parse
311 | eof { error lexbuf "no terminating back quote" }
312 | '\n' { advance_line lexbuf; error lexbuf "EOL before terminating back quote" }
313 | "``" { ruleInBackQuotes (acc ^ "`") lexbuf }
314 | [^'`' '\n']+ { ruleInBackQuotes (acc ^ lexeme lexbuf) lexbuf }
315 | _ { error lexbuf "ruleInBackQuotes" }
317 ruleInDollarQuotes tag acc = parse
318 | "$" (ident? as tag_) "$" { if tag_ = tag then acc else ruleInDollarQuotes tag (acc ^ sprintf "$%s$" tag_) lexbuf }
319 | eof { error lexbuf "no terminating dollar quote" }
320 | '\n' { advance_line lexbuf; ruleInDollarQuotes tag (acc ^ "\n") lexbuf }
321 (* match one char at a time to make sure delimiter matches longer *)
322 | [^'\n'] { ruleInDollarQuotes tag (acc ^ lexeme lexbuf) lexbuf }
323 | _ { error lexbuf "ruleInDollarQuotes" }
325 ruleComment acc = parse
326 | '\n' { advance_line lexbuf; acc }
328 | [^'\n']+ { let s = lexeme lexbuf in ruleComment (acc ^ s) lexbuf; }
329 | _ { error lexbuf "ruleComment"; }
331 ruleCommentMulti acc = parse
332 | '\n' { advance_line lexbuf; ruleCommentMulti (acc ^ "\n") lexbuf }
335 | [^'\n' '*']+ { let s = lexeme lexbuf in ruleCommentMulti (acc ^ s) lexbuf }
336 | _ { error lexbuf "ruleCommentMulti" }
340 let parse_rule lexbuf =
341 let module P = Parser_state in
342 let token = ruleMain lexbuf in
346 (* eprintf "ignored: %s\n" (lexeme lexbuf); *)
347 if (token = EOF) then token else IGNORED