4 * hed - Hexadecimal editor
5 * Copyright (C) 2004 Petr Baudis <pasky@ucw.cz>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * 'You see: Mr. Drogo, he married poor Miss Primula Brandybuck. She was our
23 * Mr. Bilbo's first cousin on the mother's side (her mother being the youngest
24 * of the Old Took's daughters); and Mr. Drogo was his second cousin. So Mr.
25 * Frodo is his first _and_ second cousin, once removed either way, as the
26 * saying is, if you follow me.
38 #include <util/numbers.h>
39 #include <util/lists.h>
43 /* This is really just trivial implementation. */
45 #define SIZE_AUTO ((size_t)-1L)
48 struct list_head list
;
50 size_t len
; // SIZE_AUTO == auto (defaults to 1 at root)
62 /* atom_evaluator returns a flag bitmask, see the HED_AEF_xxx constants */
63 typedef long (*atom_evaluator
)(struct hed_expr
*expr
, struct atom
*atom
,
64 unsigned char *scramble
, size_t len
);
65 static long atom_un(struct hed_expr
*expr
, struct atom
*atom
,
66 unsigned char *scramble
, size_t len
);
67 static long atom_bin(struct hed_expr
*expr
, struct atom
*atom
,
68 unsigned char *scramble
, size_t len
);
69 static long atom_bytestr(struct hed_expr
*expr
, struct atom
*atom
,
70 unsigned char *scramble
, size_t len
);
71 static long atom_register(struct hed_expr
*expr
, struct atom
*atom
,
72 unsigned char *scramble
, size_t len
);
73 static long atom_mark(struct hed_expr
*expr
, struct atom
*atom
,
74 unsigned char *scramble
, size_t len
);
76 typedef void (*atom_free
)(void *atom
);
78 static void expr_cleanup(struct hed_expr
*expr
);
102 /* pseudo-operations used while parsing */
110 struct atom_bytestr
{
112 size_t len
; /* Length of @bytes */
113 unsigned char bytes
[0]; /* [len] */
116 struct atom_register
{
119 struct hed_expr offset
;
130 static const char hx
[] = "0123456789abcdef89ABCDEF";
131 unsigned idx
= strchr(hx
, x
) - hx
;
132 return (idx
& 0x0f) | ((idx
& 0x10) >> 1);
136 skip_white(char **sexpr
)
138 while (isspace(**sexpr
))
142 static struct atom
*val_parse(char **sexpr
);
143 static struct hed_expr
*compile_till(struct hed_expr
*expr
,
144 char **sexpr
, char till
);
145 static long atom_eval(struct hed_expr
*expr
, struct atom
*atom
,
146 unsigned char *scramble
, size_t len
);
147 static void free_atom(struct atom
*atom
);
149 #if __BYTE_ORDER == __LITTLE_ENDIAN
150 # define BYTESTR_LSB(s, len) (s)
151 # define BYTESTR_STEP (+1)
152 # define BYTESTR_MSB(s, len) ((s) + (len) - 1)
153 # define BYTESTR_LOPART(s, len, plen) (s)
154 # define BYTESTR_HIPART(s, len, plen) ((s) + (len) - (plen))
155 # define HED_AEF_NATIVEORDER HED_AEF_FORCELE
156 #elif __BYTE_ORDER == __BIG_ENDIAN
157 # define BYTESTR_LSB(s, len) ((s) + (len) - 1)
158 # define BYTESTR_STEP (-1)
159 # define BYTESTR_MSB(s, len) (s)
160 # define BYTESTR_LOPART(s, len, plen) ((s) + (len) - (plen))
161 # define BYTESTR_HIPART(s, len, plen) (s)
162 # define HED_AEF_NATIVEORDER HED_AEF_FORCEBE
164 # error "Unsupported byte order"
168 hed_bytestr2off(unsigned char *bytestr
, size_t len
, long flags
)
170 unsigned char *p
, *q
;
172 register hed_off_t ret
;
174 if ((flags
& HED_AEF_ENDIANMASK
) == HED_AEF_KEEPENDIAN
||
175 (flags
& HED_AEF_ENDIANMASK
) == HED_AEF_NATIVEORDER
) {
176 p
= BYTESTR_MSB(bytestr
, len
);
177 q
= BYTESTR_LSB(bytestr
, len
);
178 step
= -BYTESTR_STEP
;
180 p
= BYTESTR_LSB(bytestr
, len
);
181 q
= BYTESTR_MSB(bytestr
, len
);
185 if (flags
& HED_AEF_SIGNED
&& (signed char)*p
< 0)
186 flags
|= HED_AEF_NEGATIVE
;
187 ret
= flags
& HED_AEF_NEGATIVE
? -1 : 0;
198 /* The following function always uses the native byte order.
199 * FIXME? Should the API be more analogous to hed_bytestr2off?
202 hed_off2bytestr(unsigned char *bytestr
, size_t len
, hed_off_t num
)
204 unsigned char *p
= BYTESTR_LSB(bytestr
, len
);
205 unsigned char *q
= BYTESTR_MSB(bytestr
, len
) + BYTESTR_STEP
;
213 /* bytestr arithmetics */
215 bytestr_not(unsigned char *s
, size_t len
)
224 bytestr_and(unsigned char *s1
, unsigned char *s2
, size_t len
)
231 bytestr_or(unsigned char *s1
, unsigned char *s2
, size_t len
)
238 bytestr_xor(unsigned char *s1
, unsigned char *s2
, size_t len
)
244 /* Get the maximum length of the shift bytestr and check that the
245 * actual value in @s fits into that size. If @s is larger than
246 * the maximum shift for @len-sized numbers, return 0. This would
247 * be otherwise an invalid value, because even a zero-sized bytestr
251 max_shiftlen(unsigned char *s
, size_t len
)
253 size_t maxshiftoff
= ((bitsize(len
) + 3) - 1) >> 3;
254 unsigned char *p
= BYTESTR_LSB(s
, len
) + maxshiftoff
* BYTESTR_STEP
;
255 while (p
!= BYTESTR_MSB(s
, len
)) {
260 return maxshiftoff
+ 1;
263 /* Get the shift size in bytes. */
265 shift_bytecount(unsigned char *s
, size_t len
, size_t shiftlen
)
269 s
= BYTESTR_LSB(s
, len
) + shiftlen
* BYTESTR_STEP
;
272 ret
= (ret
<< 8) | carry
| (*s
>> 3);
273 carry
= (*s
& 7) << 5;
279 bytestr_shl(unsigned char *s1
, unsigned char *s2
, size_t len
)
281 size_t shiftlen
, byteoff
, bitshift
;
283 shiftlen
= max_shiftlen(s2
, len
);
284 byteoff
= shiftlen
? shift_bytecount(s2
, len
, shiftlen
) : len
;
288 bitshift
= *BYTESTR_LSB(s2
, len
) & 7;
289 for (p
= BYTESTR_MSB(s1
, len
) - byteoff
* BYTESTR_STEP
;
290 p
!= BYTESTR_LSB(s1
, len
);
292 p
[byteoff
* BYTESTR_STEP
] =
294 p
[-BYTESTR_STEP
] >> (8 - bitshift
);
295 p
[byteoff
* BYTESTR_STEP
] = *p
<< bitshift
;
298 memset(BYTESTR_LOPART(s1
, len
, byteoff
), 0, byteoff
);
302 bytestr_shr(unsigned char *s1
, unsigned char *s2
, size_t len
)
304 size_t shiftlen
, byteoff
, bitshift
;
306 shiftlen
= max_shiftlen(s2
, len
);
307 byteoff
= shiftlen
? shift_bytecount(s2
, len
, shiftlen
) : len
;
311 bitshift
= *BYTESTR_LSB(s2
, len
) & 7;
312 for (p
= BYTESTR_LSB(s1
, len
) + byteoff
* BYTESTR_STEP
;
313 p
!= BYTESTR_MSB(s1
, len
);
315 p
[-byteoff
* BYTESTR_STEP
] =
317 p
[BYTESTR_STEP
] << (8 - bitshift
);
318 p
[-byteoff
* BYTESTR_STEP
] = *p
>> bitshift
;
321 memset(BYTESTR_HIPART(s1
, len
, byteoff
), 0, byteoff
);
325 bytestr_inc(unsigned char *s
, size_t len
)
328 s
= BYTESTR_LSB(s
, len
);
332 } while (carry
&& --len
);
337 bytestr_add(unsigned char *s1
, unsigned char *s2
, size_t len
)
340 s1
= BYTESTR_LSB(s1
, len
);
341 s2
= BYTESTR_LSB(s2
, len
);
353 bytestr_sub(unsigned char *s1
, unsigned char *s2
, size_t len
)
356 s1
= BYTESTR_LSB(s1
, len
);
357 s2
= BYTESTR_LSB(s2
, len
);
368 /* multiply @src by @mul and add it to @dst */
370 muladd(unsigned char *dst
, unsigned char *src
, size_t len
, unsigned char mul
)
373 unsigned char *p
= BYTESTR_LSB(src
, len
);
374 unsigned char *q
= BYTESTR_LSB(dst
, len
);
375 while (p
!= BYTESTR_MSB(src
, len
) + BYTESTR_STEP
) {
376 carry
+= *p
* mul
+ *q
;
386 bytestr_mul(unsigned char *s1
, unsigned char *s2
, size_t len
)
388 unsigned char ret
[len
];
392 for (mlen
= len
; mlen
; --mlen
)
393 muladd(BYTESTR_HIPART(ret
, len
, mlen
),
394 BYTESTR_LOPART(s1
, len
, mlen
), mlen
,
395 BYTESTR_MSB(s2
, len
)[(1-mlen
) * BYTESTR_STEP
]);
396 memcpy(s1
, ret
, len
);
400 shl_one(unsigned char *s
, size_t len
, unsigned carry
)
402 unsigned char *p
= BYTESTR_LSB(s
, len
);
403 while (p
!= BYTESTR_MSB(s
, len
) + BYTESTR_STEP
) {
412 /* Simple non-restoring algorithm for now.
413 * Feel free to improve.
416 bytestr_div(unsigned char *s1
, unsigned char *s2
, size_t len
,
419 unsigned char *p
, mask
;
422 p
= BYTESTR_MSB(s1
, len
);
424 while (p
!= BYTESTR_LSB(s1
, len
) - BYTESTR_STEP
) {
425 if (shl_one(mod
, len
, !!(*p
& mask
)))
426 bytestr_add(mod
, s2
, len
);
428 bytestr_sub(mod
, s2
, len
);
430 if (*BYTESTR_MSB(mod
, len
) & 0x80)
435 if (! (mask
>>= 1) ) {
440 if (*BYTESTR_MSB(mod
, len
) & 0x80)
441 bytestr_add(mod
, s2
, len
);
444 /* Remove any MSB zeroes from @s and update *@plen. */
445 static unsigned char*
446 bytestr_shrink(unsigned char *s
, size_t *plen
)
448 size_t origlen
= *plen
;
449 unsigned char *p
= BYTESTR_MSB(s
, origlen
);
451 while (*plen
> 1 && !*p
) {
455 return memmove(s
, BYTESTR_LOPART(s
, origlen
, *plen
), *plen
);
459 bytestr_unop(unsigned char *s
, size_t len
, long flags
, enum atom_un_op op
)
465 if (op
== AUO_MINUS
) {
466 if (!bytestr_inc(s
, len
))
467 flags
^= HED_AEF_NEGATIVE
;
478 bytestr_binop(unsigned char *s1
, unsigned char *s2
, size_t len
,
479 long flags1
, long flags2
, enum atom_bin_op op
)
483 int sign
= !!(flags1
& HED_AEF_NEGATIVE
);
484 int sign2
= !!(flags2
& HED_AEF_NEGATIVE
);
485 long ret
= (flags1
& ~HED_AEF_NEGATIVE
) | (flags2
& HED_AEF_DYNAMIC
);
490 bytestr_or(s1
, s2
, len
);
494 bytestr_and(s1
, s2
, len
);
498 bytestr_xor(s1
, s2
, len
);
501 carry
= bytestr_add(s1
, s2
, len
);
502 sign
^= (sign
^ sign2
) & (sign
^ !carry
);
505 carry
= bytestr_sub(s1
, s2
, len
);
506 sign
^= ((sign
^ sign2
) | (sign
^ !carry
)) ^ 1;
510 bytestr_mul(s1
, s2
, len
);
515 if (! (smod
= malloc(len
)) ) {
516 flags1
= HED_AEF_ERROR
;
519 bytestr_div(s1
, s2
, len
, smod
);
521 memcpy(s1
, smod
, len
);
525 bytestr_shl(s1
, s2
, len
);
528 bytestr_shr(s1
, s2
, len
);
534 ret
|= HED_AEF_NEGATIVE
& -sign
;
539 alloc_atom(enum atom_type type
, size_t len
)
541 static const size_t sizes
[] = {
542 sizeof(struct atom_un
),
543 sizeof(struct atom_bin
),
544 sizeof(struct atom_bytestr
),
545 sizeof(struct atom_register
),
546 sizeof(struct atom_mark
)
548 size_t asize
= sizes
[type
];
551 if (type
== ATOM_BYTESTR
)
554 a
= calloc(1, asize
);
563 parse_un(char **sexpr
, enum atom_un_op op
)
569 sa
= val_parse(sexpr
);
573 a
= alloc_atom(ATOM_UN
, sa
->len
);
584 parse_register(char **sexpr
)
586 struct atom_register
*a
;
587 char reg
= *(*sexpr
)++;
589 a
= alloc_atom(ATOM_REGISTER
, SIZE_AUTO
);
594 INIT_LIST_HEAD(&a
->offset
.atoms
);
595 if (**sexpr
== '[') {
597 if (!compile_till(&a
->offset
, sexpr
, ']')) {
608 parse_mark(char **sexpr
)
611 char mark
= *(*sexpr
)++;
613 a
= alloc_atom(ATOM_MARK
, sizeof(hed_off_t
));
621 parse_string(char **sexpr
)
623 struct atom_bytestr
*a
;
628 while (**sexpr
&& **sexpr
!= '\'') {
629 if (**sexpr
== '\\') {
638 a
= alloc_atom(ATOM_BYTESTR
, i
);
642 for (i
= 0; base
< *sexpr
; i
++, base
++) {
652 parse_hexstr(char **sexpr
)
654 struct atom_bytestr
*a
;
659 while (isxdigit(**sexpr
))
662 a
= alloc_atom(ATOM_BYTESTR
, (*sexpr
- base
+ 1)/2);
668 if ((*sexpr
- base
) % 2)
669 a
->bytes
[i
++] = unhex(*base
++);
670 while (base
< *sexpr
) {
671 a
->bytes
[i
++] = (unhex(base
[0]) << 4) + unhex(base
[1]);
677 static struct atom_bytestr
*
678 parse_number_hex(char **sexpr
)
684 struct atom_bytestr
*ret
;
686 for (q
= p
= *sexpr
; isxdigit(*q
); ++q
);
688 len
= ((size_t)(q
- p
) + 1) >> 1;
690 if (! (ret
= alloc_atom(ATOM_BYTESTR
, len
)) )
693 s
= BYTESTR_LSB(ret
->bytes
, *plen
);
696 *s
|= unhex(*q
) << shift
;
707 static struct atom_bytestr
*
708 parse_number_oct(char **sexpr
)
714 struct atom_bytestr
*ret
;
716 for (q
= p
= *sexpr
; *q
>= '0' && *q
<= '7'; ++q
);
718 len
= ((size_t)(q
- p
) * 3 + 7)/8 ?: 1;
719 if (! (ret
= alloc_atom(ATOM_BYTESTR
, len
)) )
722 s
= BYTESTR_LSB(ret
->bytes
, len
);
725 acc
|= (*q
- '0') << shift
;
737 bytestr_shrink(ret
->bytes
, &ret
->a
.len
);
741 /* Number of bits per decimal digit scaled by 256 (8 bit shift) */
742 #define SBITSPERDECDIG 851
744 static struct atom_bytestr
*
745 parse_number_dec(char **sexpr
)
750 struct atom_bytestr
*ret
;
752 /* We must make sure that the resulting bytestr will fit into
753 * the allocated space, so we must always round to the nearest
754 * higher integer. Because of this, the approximation of log2(10)
755 * and the dependency on the actual number, the allocated space
756 * may be larger than necessary.
758 for (p
= q
= *sexpr
; isdigit(*q
); ++q
);
760 len
= (((size_t)(q
- p
) * SBITSPERDECDIG
+ 255)/256 + 7)/8 ?: 1;
761 if (! (ret
= alloc_atom(ATOM_BYTESTR
, len
)) )
765 unsigned carry
= *p
++ - '0';
766 for (s
= BYTESTR_LSB(ret
->bytes
, len
);
767 s
!= BYTESTR_MSB(ret
->bytes
, len
) + BYTESTR_STEP
;
775 bytestr_shrink(ret
->bytes
, &ret
->a
.len
);
780 parse_number(char **sexpr
)
782 struct atom_bytestr
*a
;
784 if (**sexpr
== '0') {
786 if ((**sexpr
== 'x' || **sexpr
== 'X') &&
787 isxdigit((*sexpr
)[1])) {
789 a
= parse_number_hex(sexpr
);
791 a
= parse_number_oct(sexpr
);
793 a
= parse_number_dec(sexpr
);
802 val_parse(char **sexpr
)
804 static const char uopc
[] = "~-"; // in atom_un_op order
806 struct atom
*ret
= NULL
;
808 if ( (uop
= strchr(uopc
, **sexpr
)) ) {
810 ret
= parse_un(sexpr
, uop
- uopc
);
811 } else if (**sexpr
== '"') {
813 ret
= parse_register(sexpr
);
814 } else if (**sexpr
== '@') {
816 ret
= parse_mark(sexpr
);
817 } else if (**sexpr
== '\'') {
819 ret
= parse_string(sexpr
);
820 } else if (**sexpr
== 'x') {
822 ret
= parse_hexstr(sexpr
);
823 } else if (isdigit(**sexpr
)) {
824 ret
= parse_number(sexpr
);
857 ret
->len
= strtoull(*sexpr
, sexpr
, 0);
858 if (**sexpr
!= '}') {
868 /* return the precedence of the operator @op */
870 precedence(enum atom_bin_op op
)
872 static const int op2prec
[] = {
886 assert(op
>= 0 && op
< sizeof(op2prec
)/sizeof(int));
890 /* Reduce the stack up to an operator with a higher precedence than @till
891 * Returns the first non-reduced atom, or NULL if there are no more atoms
894 static struct atom_bin
*
895 reduce_stack(struct hed_expr
*expr
, struct list_head
*stack
, int minprec
)
897 while (!list_empty(stack
)) {
898 struct atom_bin
*bin
;
900 bin
= list_entry(stack
->next
, struct atom_bin
, a
.list
);
901 if (precedence(bin
->op
) < minprec
)
904 if (expr
->atoms
.prev
== expr
->atoms
.next
)
905 return bin
; /* zero or one element */
907 bin
->atom2
= list_entry(expr
->atoms
.prev
, struct atom
, list
);
908 list_del(&bin
->atom2
->list
);
909 bin
->atom1
= list_entry(expr
->atoms
.prev
, struct atom
, list
);
910 list_del(&bin
->atom1
->list
);
912 list_move_tail(&bin
->a
.list
, &expr
->atoms
);
914 /* Note that BOTH may be SIZE_AUTO, but that's ok */
915 if (bin
->atom1
->len
== SIZE_AUTO
)
916 bin
->a
.len
= bin
->atom1
->len
= bin
->atom2
->len
;
917 else if (bin
->atom2
->len
== SIZE_AUTO
)
918 bin
->a
.len
= bin
->atom2
->len
= bin
->atom1
->len
;
920 bin
->a
.len
= bin
->atom1
->len
>= bin
->atom2
->len
921 ? bin
->atom1
->len
: bin
->atom2
->len
;
927 static struct hed_expr
*
928 compile_till(struct hed_expr
*expr
, char **sexpr
, char till
)
930 static const char bopc
[] = "|&^+-*/%<>"; // in atom_bin_op order
934 struct atom_bin
*bin
;
935 struct list_head binstack
;
937 INIT_LIST_HEAD(&binstack
);
939 while (**sexpr
!= till
) {
942 if (**sexpr
== '(') {
943 bin
= alloc_atom(ATOM_BIN
, 0);
946 bin
->op
= ABO_LPAREN
;
947 list_add(&bin
->a
.list
, &binstack
);
952 a
= val_parse(sexpr
);
955 list_add_tail(&a
->list
, &expr
->atoms
);
958 if (**sexpr
== ')') {
959 bin
= reduce_stack(expr
, &binstack
,
960 precedence(ABO_RPAREN
));
962 goto err
; /* mismatched parenthesis */
963 list_del(&bin
->a
.list
);
968 bop
= strchr(bopc
, **sexpr
);
972 if (*bop
== '<' || *bop
== '>') {
978 bin
= alloc_atom(ATOM_BIN
, 0);
981 bin
->op
= bop
- bopc
;
982 reduce_stack(expr
, &binstack
, precedence(bin
->op
));
983 list_add(&bin
->a
.list
, &binstack
);
987 if (reduce_stack(expr
, &binstack
, precedence(ABO_RPAREN
)))
990 list_for_each_entry(a
, &expr
->atoms
, list
) {
991 if (a
->len
== SIZE_AUTO
)
997 expr
->buf
= realloc(expr
->buf
, expr
->len
);
1005 list_splice(&binstack
, &expr
->atoms
);
1011 hed_expr_new(char *sexpr
,
1012 hed_expr_reg_cb reg_cb
, hed_expr_mark_cb mark_cb
, void *cb_data
)
1014 struct hed_expr
*expr
= calloc(1, sizeof(struct hed_expr
));
1019 expr
->reg_cb
= reg_cb
;
1020 expr
->mark_cb
= mark_cb
;
1021 expr
->cb_data
= cb_data
;
1022 INIT_LIST_HEAD(&expr
->atoms
);
1024 if (compile_till(expr
, &sexpr
, '\0'))
1032 atom_un(struct hed_expr
*expr
, struct atom
*atom
,
1033 unsigned char *scramble
, size_t len
)
1035 struct atom_un
*data
= (struct atom_un
*) atom
;
1039 flags
= atom_eval(expr
, data
->atom
, scramble
, len
);
1040 if (flags
& HED_AEF_ERROR
)
1042 return bytestr_unop(scramble
, len
, flags
, data
->op
);
1046 do_atom_bin(struct hed_expr
*expr
, struct atom_bin
*data
,
1047 unsigned char *scramble
, size_t len
, unsigned char *tmp
)
1049 long flags1
, flags2
;
1051 assert(data
->atom1
&& data
->atom2
);
1053 flags1
= atom_eval(expr
, data
->atom1
, scramble
, len
);
1054 if (flags1
& HED_AEF_ERROR
)
1057 flags2
= atom_eval(expr
, data
->atom2
, tmp
, len
);
1058 if (flags2
& HED_AEF_ERROR
)
1061 return bytestr_binop(scramble
, tmp
, len
,
1062 flags1
, flags2
, data
->op
);
1066 atom_bin(struct hed_expr
*expr
, struct atom
*atom
,
1067 unsigned char *scramble
, size_t len
)
1069 struct atom_bin
*data
= (struct atom_bin
*) atom
;
1075 return HED_AEF_ERROR
;
1077 ret
= do_atom_bin(expr
, data
, scramble
, len
, tmp
);
1084 atom_bytestr(struct hed_expr
*expr
, struct atom
*atom
,
1085 unsigned char *scramble
, size_t len
)
1087 struct atom_bytestr
*data
= (struct atom_bytestr
*) atom
;
1089 if (len
> data
->len
) {
1090 memcpy(BYTESTR_LOPART(scramble
, len
, data
->len
),
1091 data
->bytes
, data
->len
);
1092 memset(BYTESTR_HIPART(scramble
, len
, len
- data
->len
),
1093 0, len
- data
->len
);
1096 BYTESTR_LOPART(data
->bytes
, data
->len
, len
), len
);
1101 atom_mark(struct hed_expr
*expr
, struct atom
*atom
,
1102 unsigned char *scramble
, size_t len
)
1104 struct atom_mark
*data
= (struct atom_mark
*) atom
;
1106 if (!expr
->mark_cb
) {
1107 memset(scramble
, 0, len
);
1111 return expr
->mark_cb(expr
->cb_data
, data
->mark
, scramble
, len
);
1115 atom_register(struct hed_expr
*expr
, struct atom
*atom
,
1116 unsigned char *scramble
, size_t len
)
1118 struct atom_register
*data
= (struct atom_register
*) atom
;
1121 if (!expr
->reg_cb
) {
1122 memset(scramble
, 0, len
);
1126 data
->offset
.reg_cb
= expr
->reg_cb
;
1127 data
->offset
.mark_cb
= expr
->mark_cb
;
1128 data
->offset
.cb_data
= expr
->cb_data
;
1129 if (hed_expr_eval(&data
->offset
) & HED_AEF_ERROR
)
1130 return HED_AEF_ERROR
;
1132 ofs
= hed_expr2off(&data
->offset
);
1134 if (data
->reg
== '.')
1135 ofs
+= expr
->cb_pos
;
1136 return expr
->reg_cb(expr
->cb_data
, data
->reg
, ofs
,
1141 atom_eval(struct hed_expr
*expr
, struct atom
*atom
,
1142 unsigned char *scramble
, size_t len
)
1147 switch (atom
->type
) {
1148 case ATOM_UN
: return atom_un(expr
, atom
, scramble
, len
);
1149 case ATOM_BIN
: return atom_bin(expr
, atom
, scramble
, len
);
1150 case ATOM_BYTESTR
: return atom_bytestr(expr
, atom
, scramble
, len
);
1151 case ATOM_REGISTER
: return atom_register(expr
, atom
, scramble
, len
);
1152 case ATOM_MARK
: return atom_mark(expr
, atom
, scramble
, len
);
1153 default: assert(0); return HED_AEF_ERROR
;
1158 hed_expr_eval(struct hed_expr
*expr
)
1165 assert(expr
&& expr
->len
>= 0);
1166 list_for_each_entry (a
, &expr
->atoms
, list
) {
1169 flags
= atom_eval(expr
, a
,
1170 expr
->buf
+ expr
->cb_pos
, a
->len
);
1171 expr
->flags
|= (flags
& (HED_AEF_DYNAMIC
| HED_AEF_ERROR
));
1173 expr
->flags
|= (flags
& HED_AEF_NEGATIVE
);
1175 expr
->flags
&= ~HED_AEF_NEGATIVE
;
1176 expr
->cb_pos
+= a
->len
;
1178 assert(expr
->cb_pos
== expr
->len
);
1183 cleanup_un(struct atom_un
*un
)
1185 free_atom(un
->atom
);
1189 cleanup_bin(struct atom_bin
*bin
)
1192 free_atom(bin
->atom1
);
1194 free_atom(bin
->atom2
);
1198 cleanup_register(struct atom_register
*reg
)
1200 expr_cleanup(®
->offset
);
1204 free_atom(struct atom
*atom
)
1206 if (atom
->type
== ATOM_UN
)
1207 cleanup_un((struct atom_un
*) atom
);
1208 else if (atom
->type
== ATOM_BIN
)
1209 cleanup_bin((struct atom_bin
*) atom
);
1210 else if (atom
->type
== ATOM_REGISTER
)
1211 cleanup_register((struct atom_register
*) atom
);
1217 expr_cleanup(struct hed_expr
*expr
)
1219 struct atom
*a
, *next
;
1221 list_for_each_entry_safe (a
, next
, &expr
->atoms
, list
)
1228 hed_expr_free(struct hed_expr
*expr
)