cmd: remove sparc-only virtinfo
[unleashed.git] / usr / src / cmd / picl / plugins / sun4v / lib / snmp / snmplib.c
blob61c73b94f7d0c506fbaee9cc4ffd6df7c611d9d8
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * The snmp library helps to prepare the PDUs and communicate with
29 * the snmp agent on the SP side via the ds_snmp driver.
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <unistd.h>
36 #include <thread.h>
37 #include <synch.h>
38 #include <errno.h>
39 #include <sys/time.h>
40 #include <sys/types.h>
41 #include <sys/stat.h>
42 #include <fcntl.h>
43 #include <libnvpair.h>
44 #include <sys/ds_snmp.h>
46 #include "libpiclsnmp.h"
47 #include "snmplib.h"
48 #include "asn1.h"
49 #include "pdu.h"
50 #include "debug.h"
52 #pragma init(libpiclsnmp_init) /* need this in .init */
55 * Data from the MIB is fetched based on the hints about object
56 * groups received from (possibly many threads in) the application.
57 * However, the fetched data is kept in a common cache for use across
58 * all threads, so even a GETBULK is issued only when absolutely
59 * necessary.
61 * Note that locking is not fine grained (there's no locking per row)
62 * since we don't expect too many MT consumers right away.
65 static mutex_t mibcache_lock;
66 static nvlist_t **mibcache = NULL;
67 static uint_t n_mibcache_rows = 0;
69 static mutex_t snmp_reqid_lock;
70 static int snmp_reqid = 1;
72 #ifdef SNMP_DEBUG
73 uint_t snmp_nsends = 0;
74 uint_t snmp_sentbytes = 0;
75 uint_t snmp_nrecvs = 0;
76 uint_t snmp_rcvdbytes = 0;
77 #endif
79 #ifdef USE_SOCKETS
80 #define SNMP_DEFAULT_PORT 161
81 #define SNMP_MAX_RECV_PKTSZ (64 * 1024)
82 #endif
85 * We need a reliably monotonic and stable source of time values to age
86 * entries in the mibcache toward expiration. The code originally used
87 * gettimeofday(), but since that is subject to time-of-day changes made by
88 * the administrator, the values it returns do not satisfy our needs.
89 * Instead, we use gethrtime(), which is immune to time-of-day changes.
90 * However, since gethrtime() returns a signed 64-bit value in units of
91 * nanoseconds and we are using signed 32-bit timestamps, we always divide
92 * the result by (HRTIME_SCALE * NANOSEC) to scale it down into units of 10
93 * seconds.
95 * Note that the scaling factor means that the value of MAX_INCACHE_TIME
96 * from snmplib.h should also be in units of 10 seconds.
98 #define GET_SCALED_HRTIME() (int)(gethrtime() / (HRTIME_SCALE * NANOSEC))
101 * The mibcache code originally cached values for 300 seconds after fetching
102 * data via SNMP. Subsequent reads within that 300 second window would come
103 * from the cache - which is quite a bit faster than an SNMP query - but the
104 * first request that came in more than 300 seconds after the previous SNMP
105 * query would trigger a new SNMP query. This worked well as an
106 * optimization for frequent queries, but when data was only queried less
107 * frequently than every 300 seconds (as proved to be the case at multiple
108 * customer sites), the cache didn't help at all.
110 * To improve the performance of infrequent queries, code was added to the
111 * library to allow a client (i.e. a thread in the picl plugin) to proactively
112 * refresh cache entries without waiting for them to expire, thereby ensuring
113 * that all volatile entries in the cache at any given time are less than 300
114 * seconds old. Whenever an SNMP query is generated to retrieve volatile data
115 * that will be cached, an entry is added in a refresh queue that tracks the
116 * parameters of the query and the time that it was made. A client can query
117 * the age of the oldest item in the refresh queue and - at its discretion - can
118 * then force that query to be repeated in a manner that will update the
119 * mibcache entry even though it hasn't expired.
121 typedef struct {
122 struct picl_snmphdl *smd;
123 char *oidstrs;
124 int n_oids;
125 int row;
126 int last_fetch_time; /* in scaled hrtime */
127 } refreshq_job_t;
129 static mutex_t refreshq_lock;
130 static refreshq_job_t *refreshq = NULL;
131 static uint_t n_refreshq_slots = 0; /* # of alloc'ed job slots */
132 static uint_t n_refreshq_jobs = 0; /* # of unprocessed jobs */
133 static uint_t refreshq_next_job = 0; /* oldest unprocessed job */
134 static uint_t refreshq_next_slot = 0; /* next available job slot */
138 * Static function declarations
140 static void libpiclsnmp_init(void);
142 static int lookup_int(char *, int, int *, int);
143 static int lookup_str(char *, int, char **, int);
144 static int lookup_bitstr(char *, int, uchar_t **, uint_t *, int);
146 static oidgroup_t *locate_oid_group(struct picl_snmphdl *, char *);
147 static int search_oid_in_group(char *, char *, int);
149 static snmp_pdu_t *fetch_single(struct picl_snmphdl *, char *, int, int *);
150 static snmp_pdu_t *fetch_next(struct picl_snmphdl *, char *, int, int *);
151 static void fetch_bulk(struct picl_snmphdl *, char *, int, int, int, int *);
152 static int fetch_single_str(struct picl_snmphdl *, char *, int,
153 char **, int *);
154 static int fetch_single_int(struct picl_snmphdl *, char *, int,
155 int *, int *);
156 static int fetch_single_bitstr(struct picl_snmphdl *, char *, int,
157 uchar_t **, uint_t *, int *);
159 static int snmp_send_request(struct picl_snmphdl *, snmp_pdu_t *, int *);
160 static int snmp_recv_reply(struct picl_snmphdl *, snmp_pdu_t *, int *);
162 static int mibcache_realloc(int);
163 static void mibcache_populate(snmp_pdu_t *, int);
164 static char *oid_to_oidstr(oid *, size_t);
166 static int refreshq_realloc(int);
167 static int refreshq_add_job(struct picl_snmphdl *, char *, int, int);
170 static void
171 libpiclsnmp_init(void)
173 (void) mutex_init(&mibcache_lock, USYNC_THREAD, NULL);
174 if (mibcache_realloc(0) < 0)
175 (void) mutex_destroy(&mibcache_lock);
177 (void) mutex_init(&refreshq_lock, USYNC_THREAD, NULL);
178 (void) mutex_init(&snmp_reqid_lock, USYNC_THREAD, NULL);
180 LOGINIT();
183 picl_snmphdl_t
184 snmp_init()
186 struct picl_snmphdl *smd;
187 #ifdef USE_SOCKETS
188 int sbuf = (1 << 15); /* 16K */
189 int rbuf = (1 << 17); /* 64K */
190 char *snmp_agent_addr;
191 #endif
193 smd = (struct picl_snmphdl *)calloc(1, sizeof (struct picl_snmphdl));
194 if (smd == NULL)
195 return (NULL);
197 #ifdef USE_SOCKETS
198 if ((snmp_agent_addr = getenv("SNMP_AGENT_IPADDR")) == NULL)
199 return (NULL);
201 if ((smd->fd = socket(PF_INET, SOCK_DGRAM, 0)) < 0)
202 return (NULL);
204 (void) setsockopt(smd->fd, SOL_SOCKET, SO_SNDBUF, &sbuf, sizeof (int));
205 (void) setsockopt(smd->fd, SOL_SOCKET, SO_RCVBUF, &rbuf, sizeof (int));
207 memset(&smd->agent_addr, 0, sizeof (struct sockaddr_in));
208 smd->agent_addr.sin_family = AF_INET;
209 smd->agent_addr.sin_port = htons(SNMP_DEFAULT_PORT);
210 smd->agent_addr.sin_addr.s_addr = inet_addr(snmp_agent_addr);
211 #else
212 smd->fd = open(DS_SNMP_DRIVER, O_RDWR);
213 if (smd->fd < 0) {
214 free(smd);
215 return (NULL);
217 #endif
219 return ((picl_snmphdl_t)smd);
222 void
223 snmp_fini(picl_snmphdl_t hdl)
225 struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
227 if (smd) {
228 if (smd->fd >= 0) {
229 (void) close(smd->fd);
231 free(smd);
236 snmp_reinit(picl_snmphdl_t hdl, int clr_linkreset)
238 struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
239 nvlist_t *nvl;
240 int i;
242 (void) mutex_lock(&mibcache_lock);
244 for (i = 0; i < n_mibcache_rows; i++) {
245 if ((nvl = mibcache[i]) != NULL)
246 nvlist_free(nvl);
249 n_mibcache_rows = 0;
250 if (mibcache) {
251 free(mibcache);
252 mibcache = NULL;
255 (void) mutex_unlock(&mibcache_lock);
257 if (clr_linkreset) {
258 if (smd == NULL || smd->fd < 0)
259 return (-1);
260 else
261 return (ioctl(smd->fd, DSSNMP_CLRLNKRESET, NULL));
264 return (0);
267 void
268 snmp_register_group(picl_snmphdl_t hdl, char *oidstrs, int n_oids, int is_vol)
270 struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
271 oidgroup_t *oidg;
272 oidgroup_t *curr, *prev;
273 char *p;
274 int i, sz;
277 * Allocate a new oidgroup_t
279 oidg = (oidgroup_t *)calloc(1, sizeof (struct oidgroup));
280 if (oidg == NULL)
281 return;
284 * Determine how much space is required to register this group
286 sz = 0;
287 p = oidstrs;
288 for (i = 0; i < n_oids; i++) {
289 sz += strlen(p) + 1;
290 p = oidstrs + sz;
294 * Create this oid group
296 if ((p = (char *)malloc(sz)) == NULL) {
297 free((void *) oidg);
298 return;
301 (void) memcpy(p, oidstrs, sz);
303 oidg->next = NULL;
304 oidg->oidstrs = p;
305 oidg->n_oids = n_oids;
306 oidg->is_volatile = is_vol;
309 * Link it to the tail of the list of oid groups
311 for (prev = NULL, curr = smd->group; curr; curr = curr->next)
312 prev = curr;
314 if (prev == NULL)
315 smd->group = oidg;
316 else
317 prev->next = oidg;
321 * snmp_get_int() takes in an OID and returns the integer value
322 * of the object referenced in the passed arg. It returns 0 on
323 * success and -1 on failure.
326 snmp_get_int(picl_snmphdl_t hdl, char *prefix, int row, int *val,
327 int *snmp_syserr)
329 struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
330 oidgroup_t *grp;
331 int ret;
332 int err = 0;
334 if (smd == NULL || prefix == NULL || val == NULL)
335 return (-1);
338 * If this item should not be cached, fetch it directly from
339 * the agent using fetch_single_xxx()
341 if ((grp = locate_oid_group(smd, prefix)) == NULL) {
342 ret = fetch_single_int(smd, prefix, row, val, &err);
344 if (snmp_syserr)
345 *snmp_syserr = err;
347 return (ret);
351 * is it in the cache ?
353 if (lookup_int(prefix, row, val, grp->is_volatile) == 0)
354 return (0);
357 * fetch it from the agent and populate the cache
359 fetch_bulk(smd, grp->oidstrs, grp->n_oids, row, grp->is_volatile, &err);
360 if (snmp_syserr)
361 *snmp_syserr = err;
364 * look it up again and return it
366 if (lookup_int(prefix, row, val, grp->is_volatile) < 0)
367 return (-1);
369 return (0);
373 * snmp_get_str() takes in an OID and returns the string value
374 * of the object referenced in the passed arg. Memory for the string
375 * is allocated within snmp_get_str() and is expected to be freed by
376 * the caller when it is no longer needed. The function returns 0
377 * on success and -1 on failure.
380 snmp_get_str(picl_snmphdl_t hdl, char *prefix, int row, char **strp,
381 int *snmp_syserr)
383 struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
384 oidgroup_t *grp;
385 char *val;
386 int ret;
387 int err = 0;
389 if (smd == NULL || prefix == NULL || strp == NULL)
390 return (-1);
392 *strp = NULL;
394 * Check if this item is cacheable or not. If not, call
395 * fetch_single_* to get it directly from the agent
397 if ((grp = locate_oid_group(smd, prefix)) == NULL) {
398 ret = fetch_single_str(smd, prefix, row, strp, &err);
400 if (snmp_syserr)
401 *snmp_syserr = err;
403 return (ret);
407 * See if it's in the cache already
409 if (lookup_str(prefix, row, &val, grp->is_volatile) == 0) {
410 if ((*strp = strdup(val)) == NULL)
411 return (-1);
412 else
413 return (0);
417 * Fetch it from the agent and populate cache
419 fetch_bulk(smd, grp->oidstrs, grp->n_oids, row, grp->is_volatile, &err);
420 if (snmp_syserr)
421 *snmp_syserr = err;
424 * Retry lookup
426 if (lookup_str(prefix, row, &val, grp->is_volatile) < 0)
427 return (-1);
430 if ((*strp = strdup(val)) == NULL)
431 return (-1);
432 else
433 return (0);
437 * snmp_get_bitstr() takes in an OID and returns the bit string value
438 * of the object referenced in the passed args. Memory for the bitstring
439 * is allocated within the function and is expected to be freed by
440 * the caller when it is no longer needed. The function returns 0
441 * on success and -1 on failure.
444 snmp_get_bitstr(picl_snmphdl_t hdl, char *prefix, int row, uchar_t **bitstrp,
445 uint_t *nbytes, int *snmp_syserr)
447 struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
448 oidgroup_t *grp;
449 uchar_t *val;
450 int ret;
451 int err = 0;
453 if (smd == NULL || prefix == NULL || bitstrp == NULL || nbytes == NULL)
454 return (-1);
456 *bitstrp = NULL;
458 * Check if this item is cacheable or not. If not, call
459 * fetch_single_* to get it directly from the agent
461 if ((grp = locate_oid_group(smd, prefix)) == NULL) {
462 ret = fetch_single_bitstr(smd, prefix, row, bitstrp,
463 nbytes, &err);
465 if (snmp_syserr)
466 *snmp_syserr = err;
468 return (ret);
472 * See if it's in the cache already
474 if (lookup_bitstr(prefix, row, &val, nbytes, grp->is_volatile) == 0) {
475 if ((*bitstrp = (uchar_t *)calloc(*nbytes, 1)) == NULL)
476 return (-1);
477 (void) memcpy(*bitstrp, (const void *)val, *nbytes);
478 return (0);
482 * Fetch it from the agent and populate cache
484 fetch_bulk(smd, grp->oidstrs, grp->n_oids, row, grp->is_volatile, &err);
485 if (snmp_syserr)
486 *snmp_syserr = err;
489 * Retry lookup
491 if (lookup_bitstr(prefix, row, &val, nbytes, grp->is_volatile) < 0)
492 return (-1);
494 if ((*bitstrp = (uchar_t *)calloc(*nbytes, 1)) == NULL)
495 return (-1);
496 (void) memcpy(*bitstrp, (const void *)val, *nbytes);
498 return (0);
502 * snmp_get_nextrow() is similar in operation to SNMP_GETNEXT, but
503 * only just. In particular, this is only expected to return the next
504 * valid row number for the same object, not its value. Since we don't
505 * have any other means, we use this to determine the number of rows
506 * in the table (and the valid ones). This function returns 0 on success
507 * and -1 on failure.
510 snmp_get_nextrow(picl_snmphdl_t hdl, char *prefix, int row, int *nextrow,
511 int *snmp_syserr)
513 struct picl_snmphdl *smd = (struct picl_snmphdl *)hdl;
514 snmp_pdu_t *reply_pdu;
515 pdu_varlist_t *vp;
516 char *nxt_oidstr;
517 int err = 0;
519 if (smd == NULL || prefix == NULL || nextrow == NULL) {
520 if (snmp_syserr)
521 *snmp_syserr = EINVAL;
522 return (-1);
526 * The get_nextrow results should *never* go into any cache,
527 * since these relationships are dynamically discovered each time.
529 if ((reply_pdu = fetch_next(smd, prefix, row, &err)) == NULL) {
530 if (snmp_syserr)
531 *snmp_syserr = err;
532 return (-1);
536 * We are not concerned about the "value" of the lexicographically
537 * next object; we only care about the name of that object and
538 * its row number (and whether such an object exists or not).
540 vp = reply_pdu->vars;
543 * This indicates that we're at the end of the MIB view.
545 if (vp == NULL || vp->name == NULL || vp->type == SNMP_NOSUCHOBJECT ||
546 vp->type == SNMP_NOSUCHINSTANCE || vp->type == SNMP_ENDOFMIBVIEW) {
547 snmp_free_pdu(reply_pdu);
548 if (snmp_syserr)
549 *snmp_syserr = ENOSPC;
550 return (-1);
554 * need to be able to convert the OID
556 if ((nxt_oidstr = oid_to_oidstr(vp->name, vp->name_len - 1)) == NULL) {
557 snmp_free_pdu(reply_pdu);
558 if (snmp_syserr)
559 *snmp_syserr = ENOMEM;
560 return (-1);
564 * We're on to the next table.
566 if (strcmp(nxt_oidstr, prefix) != 0) {
567 free(nxt_oidstr);
568 snmp_free_pdu(reply_pdu);
569 if (snmp_syserr)
570 *snmp_syserr = ENOENT;
571 return (-1);
575 * Ok, so we've got an oid that's simply the next valid row of the
576 * passed on object, return this row number.
578 *nextrow = (vp->name)[vp->name_len-1];
580 free(nxt_oidstr);
581 snmp_free_pdu(reply_pdu);
583 return (0);
587 * Request ids for snmp messages to the agent are sequenced here.
590 snmp_get_reqid(void)
592 int ret;
594 (void) mutex_lock(&snmp_reqid_lock);
596 ret = snmp_reqid++;
598 (void) mutex_unlock(&snmp_reqid_lock);
600 return (ret);
603 static int
604 lookup_int(char *prefix, int row, int *valp, int is_vol)
606 int32_t *val_arr;
607 uint_t nelem;
608 int now;
609 int elapsed;
611 (void) mutex_lock(&mibcache_lock);
613 if (row >= n_mibcache_rows) {
614 (void) mutex_unlock(&mibcache_lock);
615 return (-1);
618 if (mibcache[row] == NULL) {
619 (void) mutex_unlock(&mibcache_lock);
620 return (-1);
624 * If this is a volatile property, we should be searching
625 * for an integer-timestamp pair
627 if (is_vol) {
628 if (nvlist_lookup_int32_array(mibcache[row], prefix,
629 &val_arr, &nelem) != 0) {
630 (void) mutex_unlock(&mibcache_lock);
631 return (-1);
633 if (nelem != 2 || val_arr[1] < 0) {
634 (void) mutex_unlock(&mibcache_lock);
635 return (-1);
637 now = GET_SCALED_HRTIME();
638 elapsed = now - val_arr[1];
639 if (elapsed < 0 || elapsed > MAX_INCACHE_TIME) {
640 (void) mutex_unlock(&mibcache_lock);
641 return (-1);
644 *valp = (int)val_arr[0];
645 } else {
646 if (nvlist_lookup_int32(mibcache[row], prefix, valp) != 0) {
647 (void) mutex_unlock(&mibcache_lock);
648 return (-1);
652 (void) mutex_unlock(&mibcache_lock);
654 return (0);
657 static int
658 lookup_str(char *prefix, int row, char **valp, int is_vol)
660 char **val_arr;
661 uint_t nelem;
662 int now;
663 int elapsed;
665 (void) mutex_lock(&mibcache_lock);
667 if (row >= n_mibcache_rows) {
668 (void) mutex_unlock(&mibcache_lock);
669 return (-1);
672 if (mibcache[row] == NULL) {
673 (void) mutex_unlock(&mibcache_lock);
674 return (-1);
678 * If this is a volatile property, we should be searching
679 * for a string-timestamp pair
681 if (is_vol) {
682 if (nvlist_lookup_string_array(mibcache[row], prefix,
683 &val_arr, &nelem) != 0) {
684 (void) mutex_unlock(&mibcache_lock);
685 return (-1);
687 if (nelem != 2 || atoi(val_arr[1]) <= 0) {
688 (void) mutex_unlock(&mibcache_lock);
689 return (-1);
691 now = GET_SCALED_HRTIME();
692 elapsed = now - atoi(val_arr[1]);
693 if (elapsed < 0 || elapsed > MAX_INCACHE_TIME) {
694 (void) mutex_unlock(&mibcache_lock);
695 return (-1);
698 *valp = val_arr[0];
699 } else {
700 if (nvlist_lookup_string(mibcache[row], prefix, valp) != 0) {
701 (void) mutex_unlock(&mibcache_lock);
702 return (-1);
706 (void) mutex_unlock(&mibcache_lock);
708 return (0);
711 static int
712 lookup_bitstr(char *prefix, int row, uchar_t **valp, uint_t *nelem, int is_vol)
714 (void) mutex_lock(&mibcache_lock);
716 if (row >= n_mibcache_rows) {
717 (void) mutex_unlock(&mibcache_lock);
718 return (-1);
721 if (mibcache[row] == NULL) {
722 (void) mutex_unlock(&mibcache_lock);
723 return (-1);
727 * We don't support volatile bit string values yet. The nvlist
728 * functions don't support bitstring arrays like they do charstring
729 * arrays, so we would need to do things in a convoluted way,
730 * probably by attaching the timestamp as part of the byte array
731 * itself. However, the need for volatile bitstrings isn't there
732 * yet, to justify the effort.
734 if (is_vol) {
735 (void) mutex_unlock(&mibcache_lock);
736 return (-1);
739 if (nvlist_lookup_byte_array(mibcache[row], prefix, valp, nelem) != 0) {
740 (void) mutex_unlock(&mibcache_lock);
741 return (-1);
744 (void) mutex_unlock(&mibcache_lock);
746 return (0);
749 static int
750 search_oid_in_group(char *prefix, char *oidstrs, int n_oids)
752 char *p;
753 int i;
755 p = oidstrs;
756 for (i = 0; i < n_oids; i++) {
757 if (strcmp(p, prefix) == 0)
758 return (0);
760 p += strlen(p) + 1;
763 return (-1);
766 static oidgroup_t *
767 locate_oid_group(struct picl_snmphdl *smd, char *prefix)
769 oidgroup_t *grp;
771 if (smd == NULL)
772 return (NULL);
774 if (smd->group == NULL)
775 return (NULL);
777 for (grp = smd->group; grp; grp = grp->next) {
778 if (search_oid_in_group(prefix, grp->oidstrs,
779 grp->n_oids) == 0) {
780 return (grp);
784 return (NULL);
787 static int
788 fetch_single_int(struct picl_snmphdl *smd, char *prefix, int row, int *ival,
789 int *snmp_syserr)
791 snmp_pdu_t *reply_pdu;
792 pdu_varlist_t *vp;
794 if ((reply_pdu = fetch_single(smd, prefix, row, snmp_syserr)) == NULL)
795 return (-1);
798 * Note that we don't make any distinction between unsigned int
799 * value and signed int value at this point, since we provide
800 * only snmp_get_int() at the higher level. While it is possible
801 * to provide an entirely separate interface such as snmp_get_uint(),
802 * that's quite unnecessary, because we don't do any interpretation
803 * of the received value. Besides, the sizes of int and uint are
804 * the same and the sizes of all pointers are the same (so val.iptr
805 * would be the same as val.uiptr in pdu_varlist_t). If/when we
806 * violate any of these assumptions, it will be time to add
807 * snmp_get_uint().
809 vp = reply_pdu->vars;
810 if (vp == NULL || vp->val.iptr == NULL) {
811 snmp_free_pdu(reply_pdu);
812 return (-1);
815 *ival = *(vp->val.iptr);
817 snmp_free_pdu(reply_pdu);
819 return (0);
822 static int
823 fetch_single_str(struct picl_snmphdl *smd, char *prefix, int row, char **valp,
824 int *snmp_syserr)
826 snmp_pdu_t *reply_pdu;
827 pdu_varlist_t *vp;
829 if ((reply_pdu = fetch_single(smd, prefix, row, snmp_syserr)) == NULL)
830 return (-1);
832 vp = reply_pdu->vars;
833 if (vp == NULL || vp->val.str == NULL) {
834 snmp_free_pdu(reply_pdu);
835 return (-1);
838 *valp = strdup((const char *)(vp->val.str));
840 snmp_free_pdu(reply_pdu);
842 return (0);
845 static int
846 fetch_single_bitstr(struct picl_snmphdl *smd, char *prefix, int row,
847 uchar_t **valp, uint_t *nelem, int *snmp_syserr)
849 snmp_pdu_t *reply_pdu;
850 pdu_varlist_t *vp;
852 if ((reply_pdu = fetch_single(smd, prefix, row, snmp_syserr)) == NULL)
853 return (-1);
855 vp = reply_pdu->vars;
856 if (vp == NULL || vp->val.str == NULL) {
857 snmp_free_pdu(reply_pdu);
858 return (-1);
861 if ((*valp = (uchar_t *)calloc(vp->val_len, 1)) == NULL) {
862 snmp_free_pdu(reply_pdu);
863 return (-1);
866 *nelem = vp->val_len;
867 (void) memcpy(*valp, (const void *)(vp->val.str),
868 (size_t)(vp->val_len));
870 snmp_free_pdu(reply_pdu);
872 return (0);
875 static snmp_pdu_t *
876 fetch_single(struct picl_snmphdl *smd, char *prefix, int row, int *snmp_syserr)
878 snmp_pdu_t *pdu, *reply_pdu;
880 LOGGET(TAG_CMD_REQUEST, prefix, row);
882 if ((pdu = snmp_create_pdu(SNMP_MSG_GET, 0, prefix, 1, row)) == NULL)
883 return (NULL);
885 LOGPDU(TAG_REQUEST_PDU, pdu);
887 if (snmp_make_packet(pdu) < 0) {
888 snmp_free_pdu(pdu);
889 return (NULL);
892 LOGPKT(TAG_REQUEST_PKT, pdu->req_pkt, pdu->req_pktsz);
894 if (snmp_send_request(smd, pdu, snmp_syserr) < 0) {
895 snmp_free_pdu(pdu);
896 return (NULL);
899 if (snmp_recv_reply(smd, pdu, snmp_syserr) < 0) {
900 snmp_free_pdu(pdu);
901 return (NULL);
904 LOGPKT(TAG_RESPONSE_PKT, pdu->reply_pkt, pdu->reply_pktsz);
906 reply_pdu = snmp_parse_reply(pdu->reqid, pdu->reply_pkt,
907 pdu->reply_pktsz);
909 LOGPDU(TAG_RESPONSE_PDU, reply_pdu);
911 snmp_free_pdu(pdu);
913 return (reply_pdu);
916 static void
917 fetch_bulk(struct picl_snmphdl *smd, char *oidstrs, int n_oids,
918 int row, int is_vol, int *snmp_syserr)
920 snmp_pdu_t *pdu, *reply_pdu;
921 int max_reps;
923 LOGBULK(TAG_CMD_REQUEST, n_oids, oidstrs, row);
926 * If we're fetching volatile properties using BULKGET, don't
927 * venture to get multiple rows (passing max_reps=0 will make
928 * snmp_create_pdu() fetch SNMP_DEF_MAX_REPETITIONS rows)
930 max_reps = is_vol ? 1 : 0;
932 pdu = snmp_create_pdu(SNMP_MSG_GETBULK, max_reps, oidstrs, n_oids, row);
933 if (pdu == NULL)
934 return;
936 LOGPDU(TAG_REQUEST_PDU, pdu);
939 * Make an ASN.1 encoded packet from the PDU information
941 if (snmp_make_packet(pdu) < 0) {
942 snmp_free_pdu(pdu);
943 return;
946 LOGPKT(TAG_REQUEST_PKT, pdu->req_pkt, pdu->req_pktsz);
949 * Send the request packet to the agent
951 if (snmp_send_request(smd, pdu, snmp_syserr) < 0) {
952 snmp_free_pdu(pdu);
953 return;
957 * Receive response from the agent into the reply packet buffer
958 * in the request PDU
960 if (snmp_recv_reply(smd, pdu, snmp_syserr) < 0) {
961 snmp_free_pdu(pdu);
962 return;
965 LOGPKT(TAG_RESPONSE_PKT, pdu->reply_pkt, pdu->reply_pktsz);
968 * Parse the reply, validate the response and create a
969 * reply-PDU out of the information. Populate the mibcache
970 * with the received values.
972 reply_pdu = snmp_parse_reply(pdu->reqid, pdu->reply_pkt,
973 pdu->reply_pktsz);
974 if (reply_pdu) {
975 LOGPDU(TAG_RESPONSE_PDU, reply_pdu);
977 if (reply_pdu->errstat == SNMP_ERR_NOERROR) {
978 if (is_vol) {
979 /* Add a job to the cache refresh work queue */
980 (void) refreshq_add_job(smd, oidstrs, n_oids,
981 row);
984 mibcache_populate(reply_pdu, is_vol);
987 snmp_free_pdu(reply_pdu);
990 snmp_free_pdu(pdu);
993 static snmp_pdu_t *
994 fetch_next(struct picl_snmphdl *smd, char *prefix, int row, int *snmp_syserr)
996 snmp_pdu_t *pdu, *reply_pdu;
998 LOGNEXT(TAG_CMD_REQUEST, prefix, row);
1000 pdu = snmp_create_pdu(SNMP_MSG_GETNEXT, 0, prefix, 1, row);
1001 if (pdu == NULL)
1002 return (NULL);
1004 LOGPDU(TAG_REQUEST_PDU, pdu);
1006 if (snmp_make_packet(pdu) < 0) {
1007 snmp_free_pdu(pdu);
1008 return (NULL);
1011 LOGPKT(TAG_REQUEST_PKT, pdu->req_pkt, pdu->req_pktsz);
1013 if (snmp_send_request(smd, pdu, snmp_syserr) < 0) {
1014 snmp_free_pdu(pdu);
1015 return (NULL);
1018 if (snmp_recv_reply(smd, pdu, snmp_syserr) < 0) {
1019 snmp_free_pdu(pdu);
1020 return (NULL);
1023 LOGPKT(TAG_RESPONSE_PKT, pdu->reply_pkt, pdu->reply_pktsz);
1025 reply_pdu = snmp_parse_reply(pdu->reqid, pdu->reply_pkt,
1026 pdu->reply_pktsz);
1028 LOGPDU(TAG_RESPONSE_PDU, reply_pdu);
1030 snmp_free_pdu(pdu);
1032 return (reply_pdu);
1035 static int
1036 snmp_send_request(struct picl_snmphdl *smd, snmp_pdu_t *pdu, int *snmp_syserr)
1038 extern int errno;
1039 #ifdef USE_SOCKETS
1040 int ret;
1041 #endif
1043 if (smd->fd < 0)
1044 return (-1);
1046 if (pdu == NULL || pdu->req_pkt == NULL)
1047 return (-1);
1049 #ifdef USE_SOCKETS
1050 ret = -1;
1051 while (ret < 0) {
1052 LOGIO(TAG_SENDTO, smd->fd, pdu->req_pkt, pdu->req_pktsz);
1054 ret = sendto(smd->fd, pdu->req_pkt, pdu->req_pktsz, 0,
1055 (struct sockaddr *)&smd->agent_addr,
1056 sizeof (struct sockaddr));
1057 if (ret < 0 && errno != EINTR) {
1058 return (-1);
1061 #else
1062 LOGIO(TAG_WRITE, smd->fd, pdu->req_pkt, pdu->req_pktsz);
1064 if (write(smd->fd, pdu->req_pkt, pdu->req_pktsz) < 0) {
1065 if (snmp_syserr)
1066 *snmp_syserr = errno;
1067 return (-1);
1069 #endif
1071 #ifdef SNMP_DEBUG
1072 snmp_nsends++;
1073 snmp_sentbytes += pdu->req_pktsz;
1074 #endif
1076 return (0);
1079 static int
1080 snmp_recv_reply(struct picl_snmphdl *smd, snmp_pdu_t *pdu, int *snmp_syserr)
1082 struct dssnmp_info snmp_info;
1083 size_t pktsz;
1084 uchar_t *pkt;
1085 extern int errno;
1086 #ifdef USE_SOCKETS
1087 struct sockaddr_in from;
1088 int fromlen;
1089 ssize_t msgsz;
1090 #endif
1092 if (smd->fd < 0 || pdu == NULL)
1093 return (-1);
1095 #ifdef USE_SOCKETS
1096 if ((pkt = (uchar_t *)calloc(1, SNMP_MAX_RECV_PKTSZ)) == NULL)
1097 return (-1);
1099 fromlen = sizeof (struct sockaddr_in);
1101 LOGIO(TAG_RECVFROM, smd->fd, pkt, SNMP_MAX_RECV_PKTSZ);
1103 msgsz = recvfrom(smd->fd, pkt, SNMP_MAX_RECV_PKTSZ, 0,
1104 (struct sockaddr *)&from, &fromlen);
1105 if (msgsz < 0 || msgsz >= SNMP_MAX_RECV_PKTSZ) {
1106 free(pkt);
1107 return (-1);
1110 pktsz = (size_t)msgsz;
1111 #else
1112 LOGIO(TAG_IOCTL, smd->fd, DSSNMP_GETINFO, &snmp_info);
1115 * The ioctl will block until we have snmp data available
1117 if (ioctl(smd->fd, DSSNMP_GETINFO, &snmp_info) < 0) {
1118 if (snmp_syserr)
1119 *snmp_syserr = errno;
1120 return (-1);
1123 pktsz = snmp_info.size;
1124 if ((pkt = (uchar_t *)calloc(1, pktsz)) == NULL)
1125 return (-1);
1127 LOGIO(TAG_READ, smd->fd, pkt, pktsz);
1129 if (read(smd->fd, pkt, pktsz) < 0) {
1130 free(pkt);
1131 if (snmp_syserr)
1132 *snmp_syserr = errno;
1133 return (-1);
1135 #endif
1137 pdu->reply_pkt = pkt;
1138 pdu->reply_pktsz = pktsz;
1140 #ifdef SNMP_DEBUG
1141 snmp_nrecvs++;
1142 snmp_rcvdbytes += pktsz;
1143 #endif
1145 return (0);
1148 static int
1149 mibcache_realloc(int hint)
1151 uint_t count = (uint_t)hint;
1152 nvlist_t **p;
1154 if (hint < 0)
1155 return (-1);
1157 (void) mutex_lock(&mibcache_lock);
1159 if (hint < n_mibcache_rows) {
1160 (void) mutex_unlock(&mibcache_lock);
1161 return (0);
1164 count = ((count >> MIBCACHE_BLK_SHIFT) + 1) << MIBCACHE_BLK_SHIFT;
1166 p = (nvlist_t **)calloc(count, sizeof (nvlist_t *));
1167 if (p == NULL) {
1168 (void) mutex_unlock(&mibcache_lock);
1169 return (-1);
1172 if (mibcache) {
1173 (void) memcpy((void *) p, (void *) mibcache,
1174 n_mibcache_rows * sizeof (nvlist_t *));
1175 free((void *) mibcache);
1178 mibcache = p;
1179 n_mibcache_rows = count;
1181 (void) mutex_unlock(&mibcache_lock);
1183 return (0);
1188 * Scan each variable in the returned PDU's bindings and populate
1189 * the cache appropriately
1191 static void
1192 mibcache_populate(snmp_pdu_t *pdu, int is_vol)
1194 pdu_varlist_t *vp;
1195 int row, ret;
1196 char *oidstr;
1197 int tod; /* in secs */
1198 char tod_str[MAX_INT_LEN];
1199 int ival_arr[2];
1200 char *sval_arr[2];
1203 * If we're populating volatile properties, we also store a
1204 * timestamp with each property value. When we lookup, we check the
1205 * current time against this timestamp to determine if we need to
1206 * refetch the value or not (refetch if it has been in for far too
1207 * long).
1210 if (is_vol) {
1211 tod = GET_SCALED_HRTIME();
1213 tod_str[0] = 0;
1214 (void) snprintf(tod_str, MAX_INT_LEN, "%d", tod);
1216 ival_arr[1] = tod;
1217 sval_arr[1] = (char *)tod_str;
1220 for (vp = pdu->vars; vp; vp = vp->nextvar) {
1221 if (vp->type != ASN_INTEGER && vp->type != ASN_OCTET_STR &&
1222 vp->type != ASN_BIT_STR) {
1223 continue;
1226 if (vp->name == NULL || vp->val.str == NULL)
1227 continue;
1229 row = (vp->name)[vp->name_len-1];
1231 (void) mutex_lock(&mibcache_lock);
1233 if (row >= n_mibcache_rows) {
1234 (void) mutex_unlock(&mibcache_lock);
1235 if (mibcache_realloc(row) < 0)
1236 continue;
1237 (void) mutex_lock(&mibcache_lock);
1239 ret = 0;
1240 if (mibcache[row] == NULL)
1241 ret = nvlist_alloc(&mibcache[row], NV_UNIQUE_NAME, 0);
1243 (void) mutex_unlock(&mibcache_lock);
1245 if (ret != 0)
1246 continue;
1249 * Convert the standard OID form into an oid string that
1250 * we can use as the key to lookup. Since we only search
1251 * by the prefix (mibcache is really an array of nvlist_t
1252 * pointers), ignore the leaf subid.
1254 oidstr = oid_to_oidstr(vp->name, vp->name_len - 1);
1255 if (oidstr == NULL)
1256 continue;
1258 (void) mutex_lock(&mibcache_lock);
1260 if (vp->type == ASN_INTEGER) {
1261 if (is_vol) {
1262 ival_arr[0] = *(vp->val.iptr);
1263 (void) nvlist_add_int32_array(mibcache[row],
1264 oidstr, ival_arr, 2);
1265 } else {
1266 (void) nvlist_add_int32(mibcache[row],
1267 oidstr, *(vp->val.iptr));
1270 } else if (vp->type == ASN_OCTET_STR) {
1271 if (is_vol) {
1272 sval_arr[0] = (char *)vp->val.str;
1273 (void) nvlist_add_string_array(mibcache[row],
1274 oidstr, sval_arr, 2);
1275 } else {
1276 (void) nvlist_add_string(mibcache[row],
1277 oidstr, (const char *)(vp->val.str));
1279 } else if (vp->type == ASN_BIT_STR) {
1281 * We don't support yet bit string objects that are
1282 * volatile values.
1284 if (!is_vol) {
1285 (void) nvlist_add_byte_array(mibcache[row],
1286 oidstr, (uchar_t *)(vp->val.str),
1287 (uint_t)vp->val_len);
1290 (void) mutex_unlock(&mibcache_lock);
1292 free(oidstr);
1296 static char *
1297 oid_to_oidstr(oid *objid, size_t n_subids)
1299 char *oidstr;
1300 char subid_str[MAX_INT_LEN];
1301 int i, isize;
1302 size_t oidstr_sz;
1305 * ugly, but for now this will have to do.
1307 oidstr_sz = sizeof (subid_str) * n_subids;
1308 oidstr = calloc(1, oidstr_sz);
1310 for (i = 0; i < n_subids; i++) {
1311 (void) memset(subid_str, 0, sizeof (subid_str));
1312 isize = snprintf(subid_str, sizeof (subid_str), "%d",
1313 objid[i]);
1314 if (isize >= sizeof (subid_str))
1315 return (NULL);
1317 (void) strlcat(oidstr, subid_str, oidstr_sz);
1318 if (i < (n_subids - 1))
1319 (void) strlcat(oidstr, ".", oidstr_sz);
1322 return (oidstr);
1326 * Expand the refreshq to hold more cache refresh jobs. Caller must already
1327 * hold refreshq_lock mutex. Every expansion of the refreshq will add
1328 * REFRESH_BLK_SZ job slots, rather than expanding by one slot every time more
1329 * space is needed.
1331 static int
1332 refreshq_realloc(int hint)
1334 uint_t count = (uint_t)hint;
1335 refreshq_job_t *p;
1337 if (hint < 0)
1338 return (-1);
1340 if (hint < n_refreshq_slots) {
1341 return (0);
1344 /* Round count up to next multiple of REFRESHQ_BLK_SHIFT */
1345 count = ((count >> REFRESHQ_BLK_SHIFT) + 1) << REFRESHQ_BLK_SHIFT;
1347 p = (refreshq_job_t *)calloc(count, sizeof (refreshq_job_t));
1348 if (p == NULL) {
1349 return (-1);
1352 if (refreshq) {
1353 if (n_refreshq_jobs == 0) {
1354 /* Simple case, nothing to copy */
1355 refreshq_next_job = 0;
1356 refreshq_next_slot = 0;
1357 } else if (refreshq_next_slot > refreshq_next_job) {
1358 /* Simple case, single copy preserves everything */
1359 (void) memcpy((void *) p,
1360 (void *) &(refreshq[refreshq_next_job]),
1361 n_refreshq_jobs * sizeof (refreshq_job_t));
1362 } else {
1364 * Complex case. The jobs in the refresh queue wrap
1365 * around the end of the array in which they are stored.
1366 * To preserve chronological order in the new allocated
1367 * array, we need to copy the jobs at the end of the old
1368 * array to the beginning of the new one and place the
1369 * jobs from the beginning of the old array after them.
1371 uint_t tail_jobs, head_jobs;
1373 tail_jobs = n_refreshq_slots - refreshq_next_job;
1374 head_jobs = n_refreshq_jobs - tail_jobs;
1376 /* Copy the jobs from the end of the old array */
1377 (void) memcpy((void *) p,
1378 (void *) &(refreshq[refreshq_next_job]),
1379 tail_jobs * sizeof (refreshq_job_t));
1381 /* Copy the jobs from the beginning of the old array */
1382 (void) memcpy((void *) &(p[tail_jobs]),
1383 (void *) &(refreshq[0]),
1384 head_jobs * sizeof (refreshq_job_t));
1386 /* update the job and slot indices to match */
1387 refreshq_next_job = 0;
1388 refreshq_next_slot = n_refreshq_jobs;
1390 free((void *) refreshq);
1391 } else {
1392 /* First initialization */
1393 refreshq_next_job = 0;
1394 refreshq_next_slot = 0;
1395 n_refreshq_jobs = 0;
1398 refreshq = p;
1399 n_refreshq_slots = count;
1401 return (0);
1405 * Add a new job to the refreshq. If there aren't any open slots, attempt to
1406 * expand the queue first. Return -1 if unable to add the job to the work
1407 * queue, or 0 if the job was added OR if an existing job with the same
1408 * parameters is already pending.
1410 static int
1411 refreshq_add_job(struct picl_snmphdl *smd, char *oidstrs, int n_oids, int row)
1413 int i;
1414 int job;
1416 (void) mutex_lock(&refreshq_lock);
1419 * Can't do anything without a queue. Either the client never
1420 * initialized the refresh queue or the initial memory allocation
1421 * failed.
1423 if (refreshq == NULL) {
1424 (void) mutex_unlock(&refreshq_lock);
1425 return (-1);
1429 * If there is already a job pending with the same parameters as the job
1430 * we have been asked to add, we apparently let an entry expire and it
1431 * is now being reloaded. Rather than add another job for the same
1432 * entry, we skip adding the new job and let the existing job address
1433 * it.
1435 for (i = 0, job = refreshq_next_job; i < n_refreshq_jobs; i++,
1436 job = (job + 1) % n_refreshq_slots) {
1437 if ((refreshq[job].row == row) &&
1438 (refreshq[job].n_oids == n_oids) &&
1439 (refreshq[job].oidstrs == oidstrs)) {
1440 (void) mutex_unlock(&refreshq_lock);
1441 return (0);
1447 * If the queue is full, we need to expand it
1449 if (n_refreshq_jobs == n_refreshq_slots) {
1450 if (refreshq_realloc(n_refreshq_slots + 1) < 0) {
1452 * Can't expand the job queue, so we drop this job on
1453 * the floor. No data is lost... we just allow some
1454 * data in the mibcache to expire.
1456 (void) mutex_unlock(&refreshq_lock);
1457 return (-1);
1462 * There is room in the queue, so add the new job. We are actually
1463 * taking a timestamp for this job that is slightly earlier than when
1464 * the mibcache entry will be updated, but since we're trying to update
1465 * the mibcache entry before it expires anyway, the earlier timestamp
1466 * here is acceptable.
1468 refreshq[refreshq_next_slot].smd = smd;
1469 refreshq[refreshq_next_slot].oidstrs = oidstrs;
1470 refreshq[refreshq_next_slot].n_oids = n_oids;
1471 refreshq[refreshq_next_slot].row = row;
1472 refreshq[refreshq_next_slot].last_fetch_time = GET_SCALED_HRTIME();
1475 * Update queue management variables
1477 n_refreshq_jobs += 1;
1478 refreshq_next_slot = (refreshq_next_slot + 1) % n_refreshq_slots;
1480 (void) mutex_unlock(&refreshq_lock);
1482 return (0);
1486 * Almost all of the refresh code remains dormant unless specifically
1487 * initialized by a client (the exception being that fetch_bulk() will still
1488 * call refreshq_add_job(), but the latter will return without doing anything).
1491 snmp_refresh_init(void)
1493 int ret;
1495 (void) mutex_lock(&refreshq_lock);
1497 ret = refreshq_realloc(0);
1499 (void) mutex_unlock(&refreshq_lock);
1501 return (ret);
1505 * If the client is going away, we don't want to keep doing refresh work, so
1506 * clean everything up.
1508 void
1509 snmp_refresh_fini(void)
1511 (void) mutex_lock(&refreshq_lock);
1513 n_refreshq_jobs = 0;
1514 n_refreshq_slots = 0;
1515 refreshq_next_job = 0;
1516 refreshq_next_slot = 0;
1517 free(refreshq);
1518 refreshq = NULL;
1520 (void) mutex_unlock(&refreshq_lock);
1524 * Return the number of seconds remaining before the mibcache entry associated
1525 * with the next job in the queue will expire. Note that this requires
1526 * reversing the scaling normally done on hrtime values. (The need for scaling
1527 * is purely internal, and should be hidden from clients.) If there are no jobs
1528 * in the queue, return -1. If the next job has already expired, return 0.
1531 snmp_refresh_get_next_expiration(void)
1533 int ret;
1534 int elapsed;
1536 (void) mutex_lock(&refreshq_lock);
1538 if (n_refreshq_jobs == 0) {
1539 ret = -1;
1540 } else {
1541 elapsed = GET_SCALED_HRTIME() -
1542 refreshq[refreshq_next_job].last_fetch_time;
1544 if (elapsed >= MAX_INCACHE_TIME) {
1545 ret = 0;
1546 } else {
1547 ret = (MAX_INCACHE_TIME - elapsed) * HRTIME_SCALE;
1551 (void) mutex_unlock(&refreshq_lock);
1553 return (ret);
1557 * Given the number of seconds the client wants to spend on each cyle of
1558 * processing jobs and then sleeping, return a suggestion for the number of jobs
1559 * the client should process, calculated by dividing the client's cycle duration
1560 * by MAX_INCACHE_TIME and multiplying the result by the total number of jobs in
1561 * the queue. (Note that the actual implementation of that calculation is done
1562 * in a different order to avoid losing fractional values during integer
1563 * arithmetic.)
1566 snmp_refresh_get_cycle_hint(int secs)
1568 int jobs;
1570 (void) mutex_lock(&refreshq_lock);
1573 * First, we need to scale the client's cycle time to get it into the
1574 * same units we use internally (i.e. tens of seconds). We round up, as
1575 * it makes more sense for the client to process extra jobs than
1576 * insufficient jobs. If the client's desired cycle time is greater
1577 * than MAX_INCACHE_TIME, we just return the current total number of
1578 * jobs.
1580 secs = (secs + HRTIME_SCALE - 1) / HRTIME_SCALE;
1582 jobs = (n_refreshq_jobs * secs) / MAX_INCACHE_TIME;
1583 if (jobs > n_refreshq_jobs) {
1584 jobs = n_refreshq_jobs;
1587 (void) mutex_unlock(&refreshq_lock);
1589 return (jobs);
1593 * Process the next job on the refresh queue by invoking fetch_bulk() with the
1594 * recorded parameters. Return -1 if no job was processed (e.g. because there
1595 * aren't any available), or 0 if a job was processed. We don't actually care
1596 * if fetch_bulk() fails, since we're just working on cache entry refreshing and
1597 * the worst case result of failing here is a longer delay getting that data the
1598 * next time it is requested.
1601 snmp_refresh_process_job(void)
1603 struct picl_snmphdl *smd;
1604 char *oidstrs;
1605 int n_oids;
1606 int row;
1607 int err;
1609 (void) mutex_lock(&refreshq_lock);
1611 if (n_refreshq_jobs == 0) {
1612 (void) mutex_unlock(&refreshq_lock);
1614 return (-1);
1617 smd = refreshq[refreshq_next_job].smd;
1618 oidstrs = refreshq[refreshq_next_job].oidstrs;
1619 n_oids = refreshq[refreshq_next_job].n_oids;
1620 row = refreshq[refreshq_next_job].row;
1622 refreshq_next_job = (refreshq_next_job + 1) % n_refreshq_slots;
1623 n_refreshq_jobs--;
1625 (void) mutex_unlock(&refreshq_lock);
1629 * fetch_bulk() is going to come right back into the refresh code to add
1630 * a new job for the entry we just loaded, which means we have to make
1631 * the call without holding the refreshq_lock mutex.
1633 fetch_bulk(smd, oidstrs, n_oids, row, 1, &err);
1635 return (0);