4 * hed - Hexadecimal editor
5 * Copyright (C) 2004 Petr Baudis <pasky@ucw.cz>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * 'You see: Mr. Drogo, he married poor Miss Primula Brandybuck. She was our
23 * Mr. Bilbo's first cousin on the mother's side (her mother being the youngest
24 * of the Old Took's daughters); and Mr. Drogo was his second cousin. So Mr.
25 * Frodo is his first _and_ second cousin, once removed either way, as the
26 * saying is, if you follow me.
38 #include <util/numbers.h>
39 #include <util/lists.h>
43 /* This is really just trivial implementation. */
45 #define SIZE_AUTO ((size_t)-1L)
48 struct list_head list
;
50 size_t len
; // SIZE_AUTO == auto (defaults to 1 at root)
62 /* atom_evaluator returns a flag bitmask, see the HED_AEF_xxx constants */
63 typedef long (*atom_evaluator
)(struct hed_expr
*expr
, struct atom
*atom
,
64 unsigned char *scramble
, size_t len
);
65 static long atom_un(struct hed_expr
*expr
, struct atom
*atom
,
66 unsigned char *scramble
, size_t len
);
67 static long atom_bin(struct hed_expr
*expr
, struct atom
*atom
,
68 unsigned char *scramble
, size_t len
);
69 static long atom_bytestr(struct hed_expr
*expr
, struct atom
*atom
,
70 unsigned char *scramble
, size_t len
);
71 static long atom_register(struct hed_expr
*expr
, struct atom
*atom
,
72 unsigned char *scramble
, size_t len
);
73 static long atom_mark(struct hed_expr
*expr
, struct atom
*atom
,
74 unsigned char *scramble
, size_t len
);
75 static atom_evaluator evals
[ATOM_T_MAX
] = {
76 atom_un
, atom_bin
, atom_bytestr
, atom_register
, atom_mark
79 typedef void (*atom_free
)(void *atom
);
80 static void free_un(void *p
);
81 static void free_bin(void *p
);
82 static void free_register(void *p
);
83 static atom_free frees
[ATOM_T_MAX
] = {
84 free_un
, free_bin
, free
, free_register
, free
87 static void expr_cleanup(struct hed_expr
*expr
);
111 /* pseudo-operations used while parsing */
119 struct atom_bytestr
{
121 size_t len
; /* Length of @bytes */
122 unsigned char bytes
[0]; /* [len] */
125 struct atom_register
{
128 struct hed_expr offset
;
137 free_atom(struct atom
*atom
)
139 frees
[atom
->type
](atom
);
146 static const char hx
[] = "0123456789abcdef89ABCDEF";
147 unsigned idx
= strchr(hx
, x
) - hx
;
148 return (idx
& 0x0f) | ((idx
& 0x10) >> 1);
152 skip_white(char **sexpr
)
154 while (isspace(**sexpr
))
158 static struct hed_expr
*compile_till(struct hed_expr
*expr
,
159 char **sexpr
, char till
);
161 #if __BYTE_ORDER == __LITTLE_ENDIAN
162 # define BYTESTR_LSB(s, len) (s)
163 # define BYTESTR_STEP (+1)
164 # define BYTESTR_MSB(s, len) ((s) + (len) - 1)
165 # define BYTESTR_LOPART(s, len, plen) (s)
166 # define BYTESTR_HIPART(s, len, plen) ((s) + (len) - (plen))
167 # define HED_AEF_NATIVEORDER HED_AEF_FORCELE
168 #elif __BYTE_ORDER == __BIG_ENDIAN
169 # define BYTESTR_LSB(s, len) ((s) + (len) - 1)
170 # define BYTESTR_STEP (-1)
171 # define BYTESTR_MSB(s, len) (s)
172 # define BYTESTR_LOPART(s, len, plen) ((s) + (len) - (plen))
173 # define BYTESTR_HIPART(s, len, plen) (s)
174 # define HED_AEF_NATIVEORDER HED_AEF_FORCEBE
176 # error "Unsupported byte order"
180 hed_bytestr2off(unsigned char *bytestr
, size_t len
, long flags
)
182 unsigned char *p
, *q
;
184 register hed_off_t ret
;
186 if ((flags
& HED_AEF_ENDIANMASK
) == HED_AEF_KEEPENDIAN
||
187 (flags
& HED_AEF_ENDIANMASK
) == HED_AEF_NATIVEORDER
) {
188 p
= BYTESTR_MSB(bytestr
, len
);
189 q
= BYTESTR_LSB(bytestr
, len
);
190 step
= -BYTESTR_STEP
;
192 p
= BYTESTR_LSB(bytestr
, len
);
193 q
= BYTESTR_MSB(bytestr
, len
);
197 if (flags
& HED_AEF_SIGNED
&& (signed char)*p
< 0)
198 flags
|= HED_AEF_NEGATIVE
;
199 ret
= flags
& HED_AEF_NEGATIVE
? -1 : 0;
210 /* The following function always uses the native byte order.
211 * FIXME? Should the API be more analogous to hed_bytestr2off?
214 hed_off2bytestr(unsigned char *bytestr
, size_t len
, hed_off_t num
)
216 unsigned char *p
= BYTESTR_LSB(bytestr
, len
);
217 unsigned char *q
= BYTESTR_MSB(bytestr
, len
) + BYTESTR_STEP
;
225 /* bytestr arithmetics */
227 bytestr_not(unsigned char *s
, size_t len
)
236 bytestr_and(unsigned char *s1
, unsigned char *s2
, size_t len
)
243 bytestr_or(unsigned char *s1
, unsigned char *s2
, size_t len
)
250 bytestr_xor(unsigned char *s1
, unsigned char *s2
, size_t len
)
256 /* Get the maximum length of the shift bytestr and check that the
257 * actual value in @s fits into that size. If @s is larger than
258 * the maximum shift for @len-sized numbers, return 0. This would
259 * be otherwise an invalid value, because even a zero-sized bytestr
263 max_shiftlen(unsigned char *s
, size_t len
)
265 size_t maxshiftoff
= ((bitsize(len
) + 3) - 1) >> 3;
266 unsigned char *p
= BYTESTR_LSB(s
, len
) + maxshiftoff
* BYTESTR_STEP
;
267 while (p
!= BYTESTR_MSB(s
, len
)) {
272 return maxshiftoff
+ 1;
275 /* Get the shift size in bytes. */
277 shift_bytecount(unsigned char *s
, size_t len
, size_t shiftlen
)
281 s
= BYTESTR_LSB(s
, len
) + shiftlen
* BYTESTR_STEP
;
284 ret
= (ret
<< 8) | carry
| (*s
>> 3);
285 carry
= (*s
& 7) << 5;
291 bytestr_shl(unsigned char *s1
, unsigned char *s2
, size_t len
)
293 size_t shiftlen
, byteoff
, bitshift
;
295 shiftlen
= max_shiftlen(s2
, len
);
296 byteoff
= shiftlen
? shift_bytecount(s2
, len
, shiftlen
) : len
;
300 bitshift
= *BYTESTR_LSB(s2
, len
) & 7;
301 for (p
= BYTESTR_MSB(s1
, len
) - byteoff
* BYTESTR_STEP
;
302 p
!= BYTESTR_LSB(s1
, len
);
304 p
[byteoff
* BYTESTR_STEP
] =
306 p
[-BYTESTR_STEP
] >> (8 - bitshift
);
307 p
[byteoff
* BYTESTR_STEP
] = *p
<< bitshift
;
310 memset(BYTESTR_LOPART(s1
, len
, byteoff
), 0, byteoff
);
314 bytestr_shr(unsigned char *s1
, unsigned char *s2
, size_t len
)
316 size_t shiftlen
, byteoff
, bitshift
;
318 shiftlen
= max_shiftlen(s2
, len
);
319 byteoff
= shiftlen
? shift_bytecount(s2
, len
, shiftlen
) : len
;
323 bitshift
= *BYTESTR_LSB(s2
, len
) & 7;
324 for (p
= BYTESTR_LSB(s1
, len
) + byteoff
* BYTESTR_STEP
;
325 p
!= BYTESTR_MSB(s1
, len
);
327 p
[-byteoff
* BYTESTR_STEP
] =
329 p
[BYTESTR_STEP
] << (8 - bitshift
);
330 p
[-byteoff
* BYTESTR_STEP
] = *p
>> bitshift
;
333 memset(BYTESTR_HIPART(s1
, len
, byteoff
), 0, byteoff
);
337 bytestr_inc(unsigned char *s
, size_t len
)
340 s
= BYTESTR_LSB(s
, len
);
344 } while (carry
&& --len
);
349 bytestr_add(unsigned char *s1
, unsigned char *s2
, size_t len
)
352 s1
= BYTESTR_LSB(s1
, len
);
353 s2
= BYTESTR_LSB(s2
, len
);
365 bytestr_sub(unsigned char *s1
, unsigned char *s2
, size_t len
)
368 s1
= BYTESTR_LSB(s1
, len
);
369 s2
= BYTESTR_LSB(s2
, len
);
380 /* multiply @src by @mul and add it to @dst */
382 muladd(unsigned char *dst
, unsigned char *src
, size_t len
, unsigned char mul
)
385 unsigned char *p
= BYTESTR_LSB(src
, len
);
386 unsigned char *q
= BYTESTR_LSB(dst
, len
);
387 while (p
!= BYTESTR_MSB(src
, len
) + BYTESTR_STEP
) {
388 carry
+= *p
* mul
+ *q
;
398 bytestr_mul(unsigned char *s1
, unsigned char *s2
, size_t len
)
400 unsigned char ret
[len
];
404 for (mlen
= len
; mlen
; --mlen
)
405 muladd(BYTESTR_HIPART(ret
, len
, mlen
),
406 BYTESTR_LOPART(s1
, len
, mlen
), mlen
,
407 BYTESTR_MSB(s2
, len
)[(1-mlen
) * BYTESTR_STEP
]);
408 memcpy(s1
, ret
, len
);
412 shl_one(unsigned char *s
, size_t len
, unsigned carry
)
414 unsigned char *p
= BYTESTR_LSB(s
, len
);
415 while (p
!= BYTESTR_MSB(s
, len
) + BYTESTR_STEP
) {
424 /* Simple non-restoring algorithm for now.
425 * Feel free to improve.
428 bytestr_div(unsigned char *s1
, unsigned char *s2
, size_t len
,
431 unsigned char *p
, mask
;
434 p
= BYTESTR_MSB(s1
, len
);
436 while (p
!= BYTESTR_LSB(s1
, len
) - BYTESTR_STEP
) {
437 if (shl_one(mod
, len
, !!(*p
& mask
)))
438 bytestr_add(mod
, s2
, len
);
440 bytestr_sub(mod
, s2
, len
);
442 if (*BYTESTR_MSB(mod
, len
) & 0x80)
447 if (! (mask
>>= 1) ) {
452 if (*BYTESTR_MSB(mod
, len
) & 0x80)
453 bytestr_add(mod
, s2
, len
);
456 /* Remove any MSB zeroes from @s and update *@plen. */
457 static unsigned char*
458 bytestr_shrink(unsigned char *s
, size_t *plen
)
460 size_t origlen
= *plen
;
461 unsigned char *p
= BYTESTR_MSB(s
, origlen
);
463 while (*plen
> 1 && !*p
) {
467 return memmove(s
, BYTESTR_LOPART(s
, origlen
, *plen
), *plen
);
470 static unsigned char*
471 bytestr_from_hex(char **nptr
, size_t *plen
)
474 unsigned char *ret
, *s
;
477 for (q
= p
= *nptr
; isxdigit(*q
); ++q
);
479 *plen
= ((size_t)(q
- p
) + 1) >> 1;
481 if (! (ret
= calloc(1, *plen
)) )
484 s
= BYTESTR_LSB(ret
, *plen
);
487 *s
|= unhex(*q
) << shift
;
498 static unsigned char*
499 bytestr_from_oct(char **nptr
, size_t *plen
)
502 unsigned char *ret
, *s
;
505 for (q
= p
= *nptr
; *q
>= '0' && *q
<= '7'; ++q
);
507 *plen
= ((size_t)(q
- p
) * 3 + 7)/8 ?: 1;
508 if (! (ret
= calloc(1, *plen
)) )
511 s
= BYTESTR_LSB(ret
, *plen
);
514 acc
|= (*q
- '0') << shift
;
526 return bytestr_shrink(ret
, plen
);
529 /* Number of bits per decimal digit scaled by 256 (8 bit shift) */
530 #define SBITSPERDECDIG 851
532 static unsigned char*
533 bytestr_from_dec(char **nptr
, size_t *plen
)
536 unsigned char *ret
, *s
;
538 /* We must make sure that the resulting bytestr will fit into
539 * the allocated space, so we must always round to the nearest
540 * higher integer. Because of this, the approximation of log2(10)
541 * and the dependency on the actual number, the allocated space
542 * may be larger than necessary.
544 for (p
= q
= *nptr
; isdigit(*q
); ++q
);
546 *plen
= (((size_t)(q
- p
) * SBITSPERDECDIG
+ 255)/256 + 7)/8 ?: 1;
547 if (! (ret
= calloc(1, *plen
)) )
551 unsigned carry
= *p
++ - '0';
552 for (s
= BYTESTR_LSB(ret
, *plen
);
553 s
!= BYTESTR_MSB(ret
, *plen
) + BYTESTR_STEP
;
561 return bytestr_shrink(ret
, plen
);
564 /* Convert an ascii representation to a bytestr */
565 static unsigned char*
566 bytestr_from_ascii(char **nptr
, size_t *plen
)
570 if ((**nptr
== 'x' || **nptr
== 'X') &&
571 isxdigit((*nptr
)[1])) {
573 return bytestr_from_hex(nptr
, plen
);
575 return bytestr_from_oct(nptr
, plen
);
577 return bytestr_from_dec(nptr
, plen
);
581 bytestr_unop(unsigned char *s
, size_t len
, long flags
, enum atom_un_op op
)
587 if (op
== AUO_MINUS
) {
588 if (!bytestr_inc(s
, len
))
589 flags
^= HED_AEF_NEGATIVE
;
600 bytestr_binop(unsigned char *s1
, unsigned char *s2
, size_t len
,
601 long flags1
, long flags2
, enum atom_bin_op op
)
605 int sign
= !!(flags1
& HED_AEF_NEGATIVE
);
606 int sign2
= !!(flags2
& HED_AEF_NEGATIVE
);
607 long ret
= (flags1
& ~HED_AEF_NEGATIVE
) | (flags2
& HED_AEF_DYNAMIC
);
612 bytestr_or(s1
, s2
, len
);
616 bytestr_and(s1
, s2
, len
);
620 bytestr_xor(s1
, s2
, len
);
623 carry
= bytestr_add(s1
, s2
, len
);
624 sign
^= (sign
^ sign2
) & (sign
^ !carry
);
627 carry
= bytestr_sub(s1
, s2
, len
);
628 sign
^= ((sign
^ sign2
) | (sign
^ !carry
)) ^ 1;
632 bytestr_mul(s1
, s2
, len
);
637 if (! (smod
= malloc(len
)) ) {
638 flags1
= HED_AEF_ERROR
;
641 bytestr_div(s1
, s2
, len
, smod
);
643 memcpy(s1
, smod
, len
);
647 bytestr_shl(s1
, s2
, len
);
650 bytestr_shr(s1
, s2
, len
);
656 ret
|= HED_AEF_NEGATIVE
& -sign
;
661 alloc_atom(enum atom_type type
, size_t len
)
663 static const size_t sizes
[] = {
664 sizeof(struct atom_un
),
665 sizeof(struct atom_bin
),
666 sizeof(struct atom_bytestr
),
667 sizeof(struct atom_register
),
668 sizeof(struct atom_mark
)
670 size_t asize
= sizes
[type
];
673 if (type
== ATOM_BYTESTR
)
676 a
= calloc(1, asize
);
685 val_parse(char **sexpr
)
687 static const char uopc
[] = "~-"; // in atom_un_op order
691 if ( (uop
= strchr(uopc
, **sexpr
)) ) {
697 sa
= val_parse(sexpr
);
701 a
= alloc_atom(ATOM_UN
, sa
->len
);
710 } else if (**sexpr
== '"') {
711 struct atom_register
*a
;
718 a
= alloc_atom(ATOM_REGISTER
, SIZE_AUTO
);
723 INIT_LIST_HEAD(&a
->offset
.atoms
);
724 if (**sexpr
== '[') {
726 compile_till(&a
->offset
, sexpr
, ']');
732 } else if (**sexpr
== '@') {
740 a
= alloc_atom(ATOM_MARK
, sizeof(hed_off_t
));
746 } else if (**sexpr
== '\'') {
747 struct atom_bytestr
*a
;
753 while (**sexpr
&& **sexpr
!= '\'') {
754 if (**sexpr
== '\\') {
763 a
= alloc_atom(ATOM_BYTESTR
, i
);
767 for (i
= 0; base
< *sexpr
; i
++, base
++) {
775 } else if (**sexpr
== 'x') {
776 struct atom_bytestr
*a
;
782 while (isxdigit(**sexpr
))
785 a
= alloc_atom(ATOM_BYTESTR
, (*sexpr
- base
+ 1)/2);
791 if ((*sexpr
- base
) % 2)
792 a
->bytes
[i
++] = unhex(*base
++);
793 while (base
< *sexpr
) {
794 a
->bytes
[i
++] = (unhex(base
[0]) << 4) + unhex(base
[1]);
799 } else if (isdigit(**sexpr
)) {
800 struct atom_bytestr
*a
;
804 if (! (num
= bytestr_from_ascii(sexpr
, &len
)) )
807 a
= alloc_atom(ATOM_BYTESTR
, len
);
813 memcpy(a
->bytes
, num
, len
);
847 ret
->len
= strtoull(*sexpr
, sexpr
, 0);
848 if (**sexpr
!= '}') {
858 /* return the precedence of the operator @op */
860 precedence(enum atom_bin_op op
)
862 static const int op2prec
[] = {
876 assert(op
>= 0 && op
< sizeof(op2prec
)/sizeof(int));
880 /* Reduce the stack up to an operator with a higher precedence than @till
881 * Returns the first non-reduced atom, or NULL if there are no more atoms
884 static struct atom_bin
*
885 reduce_stack(struct hed_expr
*expr
, struct list_head
*stack
, int minprec
)
887 while (!list_empty(stack
)) {
888 struct atom_bin
*bin
;
890 bin
= list_entry(stack
->next
, struct atom_bin
, a
.list
);
891 if (precedence(bin
->op
) < minprec
)
894 if (list_empty(&expr
->atoms
))
896 bin
->atom2
= list_entry(expr
->atoms
.prev
, struct atom
, list
);
897 list_del(&bin
->atom2
->list
);
899 if (list_empty(&expr
->atoms
))
901 bin
->atom1
= list_entry(expr
->atoms
.prev
, struct atom
, list
);
902 list_del(&bin
->atom1
->list
);
904 list_move_tail(&bin
->a
.list
, &expr
->atoms
);
906 /* Note that BOTH may be SIZE_AUTO, but that's ok */
907 if (bin
->atom1
->len
== SIZE_AUTO
)
908 bin
->a
.len
= bin
->atom1
->len
= bin
->atom2
->len
;
909 else if (bin
->atom2
->len
== SIZE_AUTO
)
910 bin
->a
.len
= bin
->atom2
->len
= bin
->atom1
->len
;
912 bin
->a
.len
= bin
->atom1
->len
>= bin
->atom2
->len
913 ? bin
->atom1
->len
: bin
->atom2
->len
;
919 static struct hed_expr
*
920 compile_till(struct hed_expr
*expr
, char **sexpr
, char till
)
922 static const char bopc
[] = "|&^+-*/%<>"; // in atom_bin_op order
926 struct atom_bin
*bin
;
927 struct list_head binstack
;
929 INIT_LIST_HEAD(&binstack
);
931 while (**sexpr
!= till
) {
934 if (**sexpr
== '(') {
935 bin
= alloc_atom(ATOM_BIN
, 0);
938 bin
->op
= ABO_LPAREN
;
939 list_add(&bin
->a
.list
, &binstack
);
944 a
= val_parse(sexpr
);
947 list_add_tail(&a
->list
, &expr
->atoms
);
950 if (**sexpr
== ')') {
951 bin
= reduce_stack(expr
, &binstack
,
952 precedence(ABO_RPAREN
));
954 goto err
; /* mismatched parenthesis */
955 list_del(&bin
->a
.list
);
960 bop
= strchr(bopc
, **sexpr
);
964 if (*bop
== '<' || *bop
== '>') {
970 bin
= alloc_atom(ATOM_BIN
, 0);
973 bin
->op
= bop
- bopc
;
974 reduce_stack(expr
, &binstack
, precedence(bin
->op
));
975 list_add(&bin
->a
.list
, &binstack
);
979 if (reduce_stack(expr
, &binstack
, precedence(ABO_RPAREN
)))
982 list_for_each_entry(a
, &expr
->atoms
, list
) {
983 if (a
->len
== SIZE_AUTO
)
989 expr
->buf
= realloc(expr
->buf
, expr
->len
);
997 list_splice(&binstack
, &expr
->atoms
);
1003 hed_expr_new(char *sexpr
,
1004 hed_expr_reg_cb reg_cb
, hed_expr_mark_cb mark_cb
, void *cb_data
)
1006 struct hed_expr
*expr
= calloc(1, sizeof(struct hed_expr
));
1011 expr
->reg_cb
= reg_cb
;
1012 expr
->mark_cb
= mark_cb
;
1013 expr
->cb_data
= cb_data
;
1014 INIT_LIST_HEAD(&expr
->atoms
);
1016 if (compile_till(expr
, &sexpr
, '\0'))
1025 atom_eval(struct hed_expr
*expr
, struct atom
*atom
,
1026 unsigned char *scramble
, size_t len
)
1031 return evals
[atom
->type
](expr
, atom
, scramble
, len
);
1035 atom_un(struct hed_expr
*expr
, struct atom
*atom
,
1036 unsigned char *scramble
, size_t len
)
1038 struct atom_un
*data
= (struct atom_un
*) atom
;
1042 flags
= atom_eval(expr
, data
->atom
, scramble
, len
);
1043 if (flags
& HED_AEF_ERROR
)
1045 return bytestr_unop(scramble
, len
, flags
, data
->op
);
1049 do_atom_bin(struct hed_expr
*expr
, struct atom_bin
*data
,
1050 unsigned char *scramble
, size_t len
, unsigned char *tmp
)
1052 long flags1
, flags2
;
1054 assert(data
->atom1
&& data
->atom2
);
1056 flags1
= atom_eval(expr
, data
->atom1
, scramble
, len
);
1057 if (flags1
& HED_AEF_ERROR
)
1060 flags2
= atom_eval(expr
, data
->atom2
, tmp
, len
);
1061 if (flags2
& HED_AEF_ERROR
)
1064 return bytestr_binop(scramble
, tmp
, len
,
1065 flags1
, flags2
, data
->op
);
1069 atom_bin(struct hed_expr
*expr
, struct atom
*atom
,
1070 unsigned char *scramble
, size_t len
)
1072 struct atom_bin
*data
= (struct atom_bin
*) atom
;
1078 return HED_AEF_ERROR
;
1080 ret
= do_atom_bin(expr
, data
, scramble
, len
, tmp
);
1087 atom_bytestr(struct hed_expr
*expr
, struct atom
*atom
,
1088 unsigned char *scramble
, size_t len
)
1090 struct atom_bytestr
*data
= (struct atom_bytestr
*) atom
;
1092 if (len
> data
->len
) {
1093 memcpy(BYTESTR_LOPART(scramble
, len
, data
->len
),
1094 data
->bytes
, data
->len
);
1095 memset(BYTESTR_HIPART(scramble
, len
, len
- data
->len
),
1096 0, len
- data
->len
);
1099 BYTESTR_LOPART(data
->bytes
, data
->len
, len
), len
);
1104 atom_mark(struct hed_expr
*expr
, struct atom
*atom
,
1105 unsigned char *scramble
, size_t len
)
1107 struct atom_mark
*data
= (struct atom_mark
*) atom
;
1109 if (!expr
->mark_cb
) {
1110 memset(scramble
, 0, len
);
1114 return expr
->mark_cb(expr
->cb_data
, data
->mark
, scramble
, len
);
1119 atom_register(struct hed_expr
*expr
, struct atom
*atom
,
1120 unsigned char *scramble
, size_t len
)
1122 struct atom_register
*data
= (struct atom_register
*) atom
;
1125 if (!expr
->reg_cb
) {
1126 memset(scramble
, 0, len
);
1130 data
->offset
.reg_cb
= expr
->reg_cb
;
1131 data
->offset
.mark_cb
= expr
->mark_cb
;
1132 data
->offset
.cb_data
= expr
->cb_data
;
1133 if (hed_expr_eval(&data
->offset
) & HED_AEF_ERROR
)
1134 return HED_AEF_ERROR
;
1136 ofs
= hed_expr2off(&data
->offset
);
1138 if (data
->reg
== '.')
1139 ofs
+= expr
->cb_pos
;
1140 return expr
->reg_cb(expr
->cb_data
, data
->reg
, ofs
,
1146 hed_expr_eval(struct hed_expr
*expr
)
1153 assert(expr
&& expr
->len
>= 0);
1154 list_for_each_entry (a
, &expr
->atoms
, list
) {
1157 flags
= atom_eval(expr
, a
,
1158 expr
->buf
+ expr
->cb_pos
, a
->len
);
1159 expr
->flags
|= (flags
& (HED_AEF_DYNAMIC
| HED_AEF_ERROR
));
1161 expr
->flags
|= (flags
& HED_AEF_NEGATIVE
);
1163 expr
->flags
&= ~HED_AEF_NEGATIVE
;
1164 expr
->cb_pos
+= a
->len
;
1166 assert(expr
->cb_pos
== expr
->len
);
1173 struct atom_un
*un
= (struct atom_un
*) p
;
1174 free_atom(un
->atom
);
1181 struct atom_bin
*bin
= (struct atom_bin
*) p
;
1183 free_atom(bin
->atom1
);
1185 free_atom(bin
->atom2
);
1190 free_register(void *p
)
1192 struct atom_register
*reg
= (struct atom_register
*) p
;
1193 expr_cleanup(®
->offset
);
1198 expr_cleanup(struct hed_expr
*expr
)
1200 struct atom
*a
, *next
;
1202 list_for_each_entry_safe (a
, next
, &expr
->atoms
, list
)
1209 hed_expr_free(struct hed_expr
*expr
)