16875 ndpd/ipmgmtd addrconf race
[illumos-gate.git] / usr / src / uts / common / io / avintr.c
blobf13eda8a344f170d5d14ad0bbadaeabd7157f0b5
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Autovectored Interrupt Configuration and Deconfiguration
29 #include <sys/param.h>
30 #include <sys/cmn_err.h>
31 #include <sys/trap.h>
32 #include <sys/t_lock.h>
33 #include <sys/avintr.h>
34 #include <sys/kmem.h>
35 #include <sys/machlock.h>
36 #include <sys/systm.h>
37 #include <sys/machsystm.h>
38 #include <sys/sunddi.h>
39 #include <sys/x_call.h>
40 #include <sys/cpuvar.h>
41 #include <sys/atomic.h>
42 #include <sys/smp_impldefs.h>
43 #include <sys/sdt.h>
44 #include <sys/stack.h>
45 #include <sys/ddi_impldefs.h>
46 #ifdef __xpv
47 #include <sys/evtchn_impl.h>
48 #endif
50 typedef struct av_softinfo {
51 cpuset_t av_pending; /* pending bitmasks */
52 } av_softinfo_t;
54 static void insert_av(void *intr_id, struct av_head *vectp, avfunc f,
55 caddr_t arg1, caddr_t arg2, uint64_t *ticksp, int pri_level,
56 dev_info_t *dip);
57 static void remove_av(void *intr_id, struct av_head *vectp, avfunc f,
58 int pri_level, int vect);
61 * Arrange for a driver to be called when a particular
62 * auto-vectored interrupt occurs.
63 * NOTE: if a device can generate interrupts on more than
64 * one level, or if a driver services devices that interrupt
65 * on more than one level, then the driver should install
66 * itself on each of those levels.
68 static char badsoft[] =
69 "add_avintr: bad soft interrupt level %d for driver '%s'\n";
70 static char multilevel[] =
71 "!IRQ%d is being shared by drivers with different interrupt levels.\n"
72 "This may result in reduced system performance.";
73 static char multilevel2[] =
74 "Cannot register interrupt for '%s' device at IPL %d because it\n"
75 "conflicts with another device using the same vector %d with an IPL\n"
76 "of %d. Reconfigure the conflicting devices to use different vectors.";
78 #ifdef __xpv
79 #define MAX_VECT NR_IRQS
80 #else
81 #define MAX_VECT 256
82 #endif
84 struct autovec *nmivect = NULL;
85 struct av_head autovect[MAX_VECT];
86 struct av_head softvect[LOCK_LEVEL + 1];
87 kmutex_t av_lock;
89 * These are software interrupt handlers dedicated to ddi timer.
90 * The interrupt levels up to 10 are supported, but high interrupts
91 * must not be used there.
93 ddi_softint_hdl_impl_t softlevel_hdl[DDI_IPL_10] = {
94 {0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 1 */
95 {0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 2 */
96 {0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 3 */
97 {0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 4 */
98 {0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 5 */
99 {0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 6 */
100 {0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 7 */
101 {0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 8 */
102 {0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 9 */
103 {0, 0, NULL, NULL, 0, NULL, NULL, NULL}, /* level 10 */
105 ddi_softint_hdl_impl_t softlevel1_hdl =
106 {0, 0, NULL, NULL, 0, NULL, NULL, NULL};
109 * clear/check softint pending flag corresponding for
110 * the current CPU
112 void
113 av_clear_softint_pending(av_softinfo_t *infop)
115 CPUSET_ATOMIC_DEL(infop->av_pending, CPU->cpu_seqid);
118 boolean_t
119 av_check_softint_pending(av_softinfo_t *infop, boolean_t check_all)
121 if (check_all)
122 return (!CPUSET_ISNULL(infop->av_pending));
123 else
124 return (CPU_IN_SET(infop->av_pending, CPU->cpu_seqid) != 0);
128 * This is the wrapper function which is generally used to set a softint
129 * pending
131 void
132 av_set_softint_pending(int pri, av_softinfo_t *infop)
134 kdi_av_set_softint_pending(pri, infop);
138 * This is kmdb's private entry point to setsoftint called from kdi_siron
139 * It first sets our av softint pending bit for the current CPU,
140 * then it sets the CPU softint pending bit for pri.
142 void
143 kdi_av_set_softint_pending(int pri, av_softinfo_t *infop)
145 CPUSET_ATOMIC_ADD(infop->av_pending, CPU->cpu_seqid);
147 atomic_or_32((uint32_t *)&CPU->cpu_softinfo.st_pending, 1 << pri);
151 * register nmi interrupt routine. The first arg is used only to order
152 * various nmi interrupt service routines in the chain. Higher lvls will
153 * be called first
156 add_nmintr(int lvl, avfunc nmintr, char *name, caddr_t arg)
158 struct autovec *mem;
159 struct autovec *p, *prev = NULL;
161 if (nmintr == NULL) {
162 printf("Attempt to add null vect for %s on nmi\n", name);
163 return (0);
167 mem = kmem_zalloc(sizeof (struct autovec), KM_SLEEP);
168 mem->av_vector = nmintr;
169 mem->av_intarg1 = arg;
170 mem->av_intarg2 = NULL;
171 mem->av_intr_id = NULL;
172 mem->av_prilevel = lvl;
173 mem->av_dip = NULL;
174 mem->av_link = NULL;
176 mutex_enter(&av_lock);
178 if (!nmivect) {
179 nmivect = mem;
180 mutex_exit(&av_lock);
181 return (1);
183 /* find where it goes in list */
184 for (p = nmivect; p != NULL; p = p->av_link) {
185 if (p->av_vector == nmintr && p->av_intarg1 == arg) {
187 * already in list
188 * So? Somebody added the same interrupt twice.
190 cmn_err(CE_WARN, "Driver already registered '%s'",
191 name);
192 kmem_free(mem, sizeof (struct autovec));
193 mutex_exit(&av_lock);
194 return (0);
196 if (p->av_prilevel < lvl) {
197 if (p == nmivect) { /* it's at head of list */
198 mem->av_link = p;
199 nmivect = mem;
200 } else {
201 mem->av_link = p;
202 prev->av_link = mem;
204 mutex_exit(&av_lock);
205 return (1);
207 prev = p;
210 /* didn't find it, add it to the end */
211 prev->av_link = mem;
212 mutex_exit(&av_lock);
213 return (1);
218 * register a hardware interrupt handler.
220 * The autovect data structure only supports globally 256 interrupts.
221 * In order to support 256 * #LocalAPIC interrupts, a new PSM module
222 * apix is introduced. It defines PSM private data structures for the
223 * interrupt handlers. The PSM module initializes addintr to a PSM
224 * private function so that it could override add_avintr() to operate
225 * on its private data structures.
228 add_avintr(void *intr_id, int lvl, avfunc xxintr, char *name, int vect,
229 caddr_t arg1, caddr_t arg2, uint64_t *ticksp, dev_info_t *dip)
231 struct av_head *vecp = (struct av_head *)0;
232 avfunc f;
233 int s, vectindex; /* save old spl value */
234 ushort_t hi_pri;
236 if (addintr) {
237 return ((*addintr)(intr_id, lvl, xxintr, name, vect,
238 arg1, arg2, ticksp, dip));
241 if ((f = xxintr) == NULL) {
242 printf("Attempt to add null vect for %s on vector %d\n",
243 name, vect);
244 return (0);
247 vectindex = vect % MAX_VECT;
249 vecp = &autovect[vectindex];
252 * "hi_pri == 0" implies all entries on list are "unused",
253 * which means that it's OK to just insert this one.
255 hi_pri = vecp->avh_hi_pri;
256 if (vecp->avh_link && (hi_pri != 0)) {
257 if (((hi_pri > LOCK_LEVEL) && (lvl < LOCK_LEVEL)) ||
258 ((hi_pri < LOCK_LEVEL) && (lvl > LOCK_LEVEL))) {
259 cmn_err(CE_WARN, multilevel2, name, lvl, vect,
260 hi_pri);
261 return (0);
263 if ((vecp->avh_lo_pri != lvl) || (hi_pri != lvl))
264 cmn_err(CE_NOTE, multilevel, vect);
267 insert_av(intr_id, vecp, f, arg1, arg2, ticksp, lvl, dip);
268 s = splhi();
270 * do what ever machine specific things are necessary
271 * to set priority level (e.g. set picmasks)
273 mutex_enter(&av_lock);
274 (*addspl)(vect, lvl, vecp->avh_lo_pri, vecp->avh_hi_pri);
275 mutex_exit(&av_lock);
276 splx(s);
277 return (1);
281 void
282 update_avsoftintr_args(void *intr_id, int lvl, caddr_t arg2)
284 struct autovec *p;
285 struct autovec *target = NULL;
286 struct av_head *vectp = (struct av_head *)&softvect[lvl];
288 for (p = vectp->avh_link; p && p->av_vector; p = p->av_link) {
289 if (p->av_intr_id == intr_id) {
290 target = p;
291 break;
295 if (target == NULL)
296 return;
297 target->av_intarg2 = arg2;
301 * Register a software interrupt handler
304 add_avsoftintr(void *intr_id, int lvl, avfunc xxintr, char *name,
305 caddr_t arg1, caddr_t arg2)
307 int slvl;
308 ddi_softint_hdl_impl_t *hdlp = (ddi_softint_hdl_impl_t *)intr_id;
310 if ((slvl = slvltovect(lvl)) != -1)
311 return (add_avintr(intr_id, lvl, xxintr,
312 name, slvl, arg1, arg2, NULL, NULL));
314 if (intr_id == NULL) {
315 printf("Attempt to add null intr_id for %s on level %d\n",
316 name, lvl);
317 return (0);
320 if (xxintr == NULL) {
321 printf("Attempt to add null handler for %s on level %d\n",
322 name, lvl);
323 return (0);
326 if (lvl <= 0 || lvl > LOCK_LEVEL) {
327 printf(badsoft, lvl, name);
328 return (0);
331 if (hdlp->ih_pending == NULL) {
332 hdlp->ih_pending =
333 kmem_zalloc(sizeof (av_softinfo_t), KM_SLEEP);
336 insert_av(intr_id, &softvect[lvl], xxintr, arg1, arg2, NULL, lvl, NULL);
338 return (1);
342 * insert an interrupt vector into chain by its priority from high
343 * to low
345 static void
346 insert_av(void *intr_id, struct av_head *vectp, avfunc f, caddr_t arg1,
347 caddr_t arg2, uint64_t *ticksp, int pri_level, dev_info_t *dip)
350 * Protect rewrites of the list
352 struct autovec *p, *prep, *mem;
354 mem = kmem_zalloc(sizeof (struct autovec), KM_SLEEP);
355 mem->av_vector = f;
356 mem->av_intarg1 = arg1;
357 mem->av_intarg2 = arg2;
358 mem->av_ticksp = ticksp;
359 mem->av_intr_id = intr_id;
360 mem->av_prilevel = pri_level;
361 mem->av_dip = dip;
362 mem->av_link = NULL;
364 mutex_enter(&av_lock);
366 if (vectp->avh_link == NULL) { /* Nothing on list - put it at head */
367 vectp->avh_link = mem;
368 vectp->avh_hi_pri = vectp->avh_lo_pri = (ushort_t)pri_level;
370 mutex_exit(&av_lock);
371 return;
374 /* find where it goes in list */
375 prep = NULL;
376 for (p = vectp->avh_link; p != NULL; p = p->av_link) {
377 if (p->av_vector && p->av_prilevel <= pri_level)
378 break;
379 prep = p;
381 if (prep != NULL) {
382 if (prep->av_vector == NULL) { /* freed struct available */
383 p = prep;
384 p->av_intarg1 = arg1;
385 p->av_intarg2 = arg2;
386 p->av_ticksp = ticksp;
387 p->av_intr_id = intr_id;
388 p->av_prilevel = pri_level;
389 p->av_dip = dip;
390 if (pri_level > (int)vectp->avh_hi_pri) {
391 vectp->avh_hi_pri = (ushort_t)pri_level;
393 if (pri_level < (int)vectp->avh_lo_pri) {
394 vectp->avh_lo_pri = (ushort_t)pri_level;
397 * To prevent calling service routine before args
398 * and ticksp are ready fill in vector last.
400 p->av_vector = f;
401 mutex_exit(&av_lock);
402 kmem_free(mem, sizeof (struct autovec));
403 return;
406 mem->av_link = prep->av_link;
407 prep->av_link = mem;
408 } else {
409 /* insert new intpt at beginning of chain */
410 mem->av_link = vectp->avh_link;
411 vectp->avh_link = mem;
413 if (pri_level > (int)vectp->avh_hi_pri) {
414 vectp->avh_hi_pri = (ushort_t)pri_level;
416 if (pri_level < (int)vectp->avh_lo_pri) {
417 vectp->avh_lo_pri = (ushort_t)pri_level;
419 mutex_exit(&av_lock);
422 static int
423 av_rem_softintr(void *intr_id, int lvl, avfunc xxintr, boolean_t rem_softinfo)
425 struct av_head *vecp = (struct av_head *)0;
426 int slvl;
427 ddi_softint_hdl_impl_t *hdlp = (ddi_softint_hdl_impl_t *)intr_id;
428 av_softinfo_t *infop = (av_softinfo_t *)hdlp->ih_pending;
430 if (xxintr == NULL)
431 return (0);
433 if ((slvl = slvltovect(lvl)) != -1) {
434 rem_avintr(intr_id, lvl, xxintr, slvl);
435 return (1);
438 if (lvl <= 0 && lvl >= LOCK_LEVEL) {
439 return (0);
441 vecp = &softvect[lvl];
442 remove_av(intr_id, vecp, xxintr, lvl, 0);
444 if (rem_softinfo) {
445 kmem_free(infop, sizeof (av_softinfo_t));
446 hdlp->ih_pending = NULL;
449 return (1);
453 av_softint_movepri(void *intr_id, int old_lvl)
455 int ret;
456 ddi_softint_hdl_impl_t *hdlp = (ddi_softint_hdl_impl_t *)intr_id;
458 ret = add_avsoftintr(intr_id, hdlp->ih_pri, hdlp->ih_cb_func,
459 DEVI(hdlp->ih_dip)->devi_name, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2);
461 if (ret) {
462 (void) av_rem_softintr(intr_id, old_lvl, hdlp->ih_cb_func,
463 B_FALSE);
466 return (ret);
470 * Remove a driver from the autovector list.
473 rem_avsoftintr(void *intr_id, int lvl, avfunc xxintr)
475 return (av_rem_softintr(intr_id, lvl, xxintr, B_TRUE));
479 * Remove specified interrupt handler.
481 * PSM module could initialize remintr to some PSM private function
482 * so that it could override rem_avintr() to operate on its private
483 * data structures.
485 void
486 rem_avintr(void *intr_id, int lvl, avfunc xxintr, int vect)
488 struct av_head *vecp = (struct av_head *)0;
489 avfunc f;
490 int s, vectindex; /* save old spl value */
492 if (remintr) {
493 (*remintr)(intr_id, lvl, xxintr, vect);
494 return;
497 if ((f = xxintr) == NULL)
498 return;
500 vectindex = vect % MAX_VECT;
501 vecp = &autovect[vectindex];
502 remove_av(intr_id, vecp, f, lvl, vect);
503 s = splhi();
504 mutex_enter(&av_lock);
505 (*delspl)(vect, lvl, vecp->avh_lo_pri, vecp->avh_hi_pri);
506 mutex_exit(&av_lock);
507 splx(s);
512 * After having made a change to an autovector list, wait until we have
513 * seen each cpu not executing an interrupt at that level--so we know our
514 * change has taken effect completely (no old state in registers, etc).
516 void
517 wait_till_seen(int ipl)
519 int cpu_in_chain, cix;
520 struct cpu *cpup;
521 cpuset_t cpus_to_check;
523 CPUSET_ALL(cpus_to_check);
524 do {
525 cpu_in_chain = 0;
526 for (cix = 0; cix < NCPU; cix++) {
527 cpup = cpu[cix];
528 if (cpup != NULL && CPU_IN_SET(cpus_to_check, cix)) {
529 if (INTR_ACTIVE(cpup, ipl)) {
530 cpu_in_chain = 1;
531 } else {
532 CPUSET_DEL(cpus_to_check, cix);
536 } while (cpu_in_chain);
539 static uint64_t dummy_tick;
541 /* remove an interrupt vector from the chain */
542 static void
543 remove_av(void *intr_id, struct av_head *vectp, avfunc f, int pri_level,
544 int vect)
546 struct autovec *p, *target;
547 int lo_pri, hi_pri;
548 int ipl;
550 * Protect rewrites of the list
552 target = NULL;
554 mutex_enter(&av_lock);
555 ipl = pri_level;
556 lo_pri = MAXIPL;
557 hi_pri = 0;
558 for (p = vectp->avh_link; p; p = p->av_link) {
559 if ((p->av_vector == f) && (p->av_intr_id == intr_id)) {
560 /* found the handler */
561 target = p;
562 continue;
564 if (p->av_vector != NULL) {
565 if (p->av_prilevel > hi_pri)
566 hi_pri = p->av_prilevel;
567 if (p->av_prilevel < lo_pri)
568 lo_pri = p->av_prilevel;
571 if (ipl < hi_pri)
572 ipl = hi_pri;
573 if (target == NULL) { /* not found */
574 printf("Couldn't remove function %p at %d, %d\n",
575 (void *)f, vect, pri_level);
576 mutex_exit(&av_lock);
577 return;
581 * This drops the handler from the chain, it can no longer be called.
582 * However, there is no guarantee that the handler is not currently
583 * still executing.
585 target->av_vector = NULL;
587 * There is a race where we could be just about to pick up the ticksp
588 * pointer to increment it after returning from the service routine
589 * in av_dispatch_autovect. Rather than NULL it out let's just point
590 * it off to something safe so that any final tick update attempt
591 * won't fault.
593 target->av_ticksp = &dummy_tick;
594 wait_till_seen(ipl);
596 if (lo_pri > hi_pri) { /* the chain is now empty */
597 /* Leave the unused entries here for probable future use */
598 vectp->avh_lo_pri = MAXIPL;
599 vectp->avh_hi_pri = 0;
600 } else {
601 if ((int)vectp->avh_lo_pri < lo_pri)
602 vectp->avh_lo_pri = (ushort_t)lo_pri;
603 if ((int)vectp->avh_hi_pri > hi_pri)
604 vectp->avh_hi_pri = (ushort_t)hi_pri;
606 mutex_exit(&av_lock);
607 wait_till_seen(ipl);
611 * kmdb uses siron (and thus setsoftint) while the world is stopped in order to
612 * inform its driver component that there's work to be done. We need to keep
613 * DTrace from instrumenting kmdb's siron and setsoftint. We duplicate siron,
614 * giving kmdb's version a kdi prefix to keep DTrace at bay. We also
615 * provide a version of the various setsoftint functions available for kmdb to
616 * use using a kdi_ prefix while the main *setsoftint() functionality is
617 * implemented as a wrapper. This allows tracing, while still providing a
618 * way for kmdb to sneak in unmolested.
620 void
621 kdi_siron(void)
623 (*kdisetsoftint)(1, softlevel1_hdl.ih_pending);
627 * Trigger a soft interrupt.
629 void
630 siron(void)
632 /* Level 1 software interrupt */
633 (*setsoftint)(1, softlevel1_hdl.ih_pending);
637 * Trigger software interrupts dedicated to ddi timer.
639 void
640 sir_on(int level)
642 ASSERT(level >= DDI_IPL_1 && level <= DDI_IPL_10);
643 (*setsoftint)(level, softlevel_hdl[level-1].ih_pending);
647 * The handler which is executed on the target CPU.
649 /*ARGSUSED*/
650 static int
651 siron_poke_intr(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
653 siron();
654 return (0);
658 * May get called from softcall to poke CPUs.
660 void
661 siron_poke_cpu(cpuset_t poke)
663 int cpuid = CPU->cpu_id;
666 * If we are poking to ourself then we can simply
667 * generate level1 using siron()
669 if (CPU_IN_SET(poke, cpuid)) {
670 siron();
671 CPUSET_DEL(poke, cpuid);
672 if (CPUSET_ISNULL(poke))
673 return;
676 xc_call(0, 0, 0, CPUSET2BV(poke), (xc_func_t)siron_poke_intr);
680 * Walk the autovector table for this vector, invoking each
681 * interrupt handler as we go.
684 extern uint64_t intr_get_time(void);
686 void
687 av_dispatch_autovect(uint_t vec)
689 struct autovec *av;
691 ASSERT_STACK_ALIGNED();
693 while ((av = autovect[vec].avh_link) != NULL) {
694 uint_t numcalled = 0;
695 uint_t claimed = 0;
697 for (; av; av = av->av_link) {
698 uint_t r;
699 uint_t (*intr)() = av->av_vector;
700 caddr_t arg1 = av->av_intarg1;
701 caddr_t arg2 = av->av_intarg2;
702 dev_info_t *dip = av->av_dip;
705 * We must walk the entire chain. Removed handlers
706 * may be anywhere in the chain.
708 if (intr == NULL)
709 continue;
711 DTRACE_PROBE4(interrupt__start, dev_info_t *, dip,
712 void *, intr, caddr_t, arg1, caddr_t, arg2);
713 r = (*intr)(arg1, arg2);
714 DTRACE_PROBE4(interrupt__complete, dev_info_t *, dip,
715 void *, intr, caddr_t, arg1, uint_t, r);
716 numcalled++;
717 claimed |= r;
718 if (av->av_ticksp && av->av_prilevel <= LOCK_LEVEL)
719 atomic_add_64(av->av_ticksp, intr_get_time());
723 * If there's only one interrupt handler in the chain,
724 * or if no-one claimed the interrupt at all give up now.
726 if (numcalled == 1 || claimed == 0)
727 break;
732 * Call every soft interrupt handler we can find at this level once.
734 void
735 av_dispatch_softvect(uint_t pil)
737 struct autovec *av;
738 ddi_softint_hdl_impl_t *hdlp;
739 uint_t (*intr)();
740 caddr_t arg1;
741 caddr_t arg2;
743 ASSERT_STACK_ALIGNED();
744 ASSERT3U(pil, <=, PIL_MAX);
746 for (av = softvect[pil].avh_link; av; av = av->av_link) {
748 * We must walk the entire chain. Removed handlers
749 * may be anywhere in the chain.
751 if ((intr = av->av_vector) == NULL)
752 continue;
753 arg1 = av->av_intarg1;
754 arg2 = av->av_intarg2;
756 hdlp = (ddi_softint_hdl_impl_t *)av->av_intr_id;
757 ASSERT(hdlp);
760 * Each cpu has its own pending bit in hdlp->ih_pending,
761 * here av_check/clear_softint_pending is just checking
762 * and clearing the pending bit for the current cpu, who
763 * has just triggered a softint.
765 if (av_check_softint_pending(hdlp->ih_pending, B_FALSE)) {
766 av_clear_softint_pending(hdlp->ih_pending);
767 (void) (*intr)(arg1, arg2);
772 struct regs;
775 * Call every NMI handler we know of once.
777 void
778 av_dispatch_nmivect(struct regs *rp)
780 struct autovec *av;
782 ASSERT_STACK_ALIGNED();
784 for (av = nmivect; av; av = av->av_link)
785 (void) (av->av_vector)(av->av_intarg1, rp);