16664 Update AMD microcode to 20240710
[illumos-gate.git] / usr / src / uts / common / fs / portfs / port_vnops.c
blobab95c0a1f8af267f3810330ce6b3b0f7253992f7
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
31 #include <sys/types.h>
32 #include <sys/vnode.h>
33 #include <sys/vfs_opreg.h>
34 #include <sys/kmem.h>
35 #include <fs/fs_subr.h>
36 #include <sys/proc.h>
37 #include <sys/kstat.h>
38 #include <sys/port_impl.h>
40 /* local functions */
41 static int port_open(struct vnode **, int, cred_t *, caller_context_t *);
42 static int port_close(struct vnode *, int, int, offset_t, cred_t *,
43 caller_context_t *);
44 static int port_getattr(struct vnode *, struct vattr *, int, cred_t *,
45 caller_context_t *);
46 static int port_access(struct vnode *, int, int, cred_t *, caller_context_t *);
47 static int port_realvp(vnode_t *, vnode_t **, caller_context_t *);
48 static int port_poll(vnode_t *, short, int, short *, struct pollhead **,
49 caller_context_t *);
50 static void port_inactive(struct vnode *, cred_t *, caller_context_t *);
52 const fs_operation_def_t port_vnodeops_template[] = {
53 VOPNAME_OPEN, { .vop_open = port_open },
54 VOPNAME_CLOSE, { .vop_close = port_close },
55 VOPNAME_GETATTR, { .vop_getattr = port_getattr },
56 VOPNAME_ACCESS, { .vop_access = port_access },
57 VOPNAME_INACTIVE, { .vop_inactive = port_inactive },
58 VOPNAME_FRLOCK, { .error = fs_error },
59 VOPNAME_REALVP, { .vop_realvp = port_realvp },
60 VOPNAME_POLL, { .vop_poll = port_poll },
61 VOPNAME_PATHCONF, { .error = fs_error },
62 VOPNAME_DISPOSE, { .error = fs_error },
63 VOPNAME_GETSECATTR, { .error = fs_error },
64 VOPNAME_SHRLOCK, { .error = fs_error },
65 NULL, NULL
68 /* ARGSUSED */
69 static int
70 port_open(struct vnode **vpp, int flag, cred_t *cr, caller_context_t *ct)
72 return (0);
76 * port_discard_events() scans the port event queue for events owned
77 * by current proc. Non-shareable events will be discarded, all other
78 * events remain in the event queue.
80 void
81 port_discard_events(port_queue_t *portq)
83 port_kevent_t *kevp;
84 pid_t pid = curproc->p_pid;
87 * The call to port_block() is required to avoid interaction
88 * with other threads in port_get(n).
90 mutex_enter(&portq->portq_mutex);
91 port_block(portq);
92 port_push_eventq(portq); /* empty temporary queue */
93 kevp = list_head(&portq->portq_list);
94 while (kevp) {
95 if (kevp->portkev_pid == pid) {
96 /* own event, check if it is shareable */
97 if (kevp->portkev_flags & PORT_KEV_NOSHARE)
98 kevp->portkev_flags |= PORT_KEV_FREE;
100 kevp = list_next(&portq->portq_list, kevp);
102 port_unblock(portq);
103 mutex_exit(&portq->portq_mutex);
107 * Called from port_close().
108 * Free all kernel events structures which are still in the event queue.
110 static void
111 port_close_events(port_queue_t *portq)
113 port_kevent_t *pkevp;
114 int events; /* ignore events */
116 mutex_enter(&portq->portq_mutex);
117 while (pkevp = list_head(&portq->portq_list)) {
118 portq->portq_nent--;
119 list_remove(&portq->portq_list, pkevp);
120 if (pkevp->portkev_callback) {
121 (void) (*pkevp->portkev_callback)(pkevp->portkev_arg,
122 &events, pkevp->portkev_pid, PORT_CALLBACK_CLOSE,
123 pkevp);
125 mutex_exit(&portq->portq_mutex);
126 port_free_event_local(pkevp, 0);
127 mutex_enter(&portq->portq_mutex);
131 * Wait for any thread in pollwakeup(), accessing this port to
132 * finish.
134 while (portq->portq_flags & PORTQ_POLLWK_PEND) {
135 cv_wait(&portq->portq_closecv, &portq->portq_mutex);
137 mutex_exit(&portq->portq_mutex);
141 * The port_close() function is called from standard close(2) when
142 * the file descriptor is of type S_IFPORT/VPORT.
143 * Port file descriptors behave like standard file descriptors. It means,
144 * the port file/vnode is only destroyed on last close.
145 * If the reference counter is > 1 then
146 * - sources associated with the port will be notified about the close,
147 * - objects associated with the port will be dissociated,
148 * - pending and delivered events will be discarded.
149 * On last close all references and caches will be removed. The vnode itself
150 * will be destroyed with VOP_RELE().
152 /* ARGSUSED */
153 static int
154 port_close(struct vnode *vp, int flag, int count, offset_t offset, cred_t *cr,
155 caller_context_t *ct)
157 port_t *pp;
158 port_queue_t *portq;
159 port_source_t *ps;
160 port_source_t *ps_next;
161 int source;
163 pp = VTOEP(vp);
164 mutex_enter(&pp->port_mutex);
165 if (pp->port_flags & PORT_CLOSED) {
166 mutex_exit(&pp->port_mutex);
167 return (0);
169 mutex_exit(&pp->port_mutex);
171 portq = &pp->port_queue;
172 if (count > 1) {
174 * It is not the last close.
175 * Remove/free all event resources owned by the current proc
176 * First notify all with the port associated sources about the
177 * close(2). The last argument of the close callback function
178 * advises the source about the type of of the close.
179 * If the port was set in alert mode by the curren process then
180 * remove the alert mode.
183 /* check alert mode of the port */
184 mutex_enter(&portq->portq_mutex);
185 if ((portq->portq_flags & PORTQ_ALERT) &&
186 (portq->portq_alert.portal_pid == curproc->p_pid))
187 portq->portq_flags &= ~PORTQ_ALERT;
188 mutex_exit(&portq->portq_mutex);
190 /* notify all event sources about port_close() */
191 mutex_enter(&portq->portq_source_mutex);
192 for (source = 0; source < PORT_SCACHE_SIZE; source++) {
193 ps = portq->portq_scache[PORT_SHASH(source)];
194 for (; ps != NULL; ps = ps->portsrc_next) {
195 if (ps->portsrc_close != NULL)
196 (*ps->portsrc_close)
197 (ps->portsrc_closearg, pp->port_fd,
198 curproc->p_pid, 0);
201 mutex_exit(&portq->portq_source_mutex);
202 port_discard_events(&pp->port_queue);
203 return (0);
207 * We are executing the last close of the port -> discard everything
208 * Make sure that all threads/processes accessing this port leave
209 * the kernel immediately.
212 mutex_enter(&portq->portq_mutex);
213 portq->portq_flags |= PORTQ_CLOSE;
214 while (portq->portq_thrcnt > 0) {
215 if (portq->portq_thread != NULL)
216 cv_signal(&portq->portq_thread->portget_cv);
217 cv_wait(&portq->portq_closecv, &portq->portq_mutex);
219 mutex_exit(&portq->portq_mutex);
222 * Send "last close" message to associated sources.
223 * - new event allocation requests are being denied since uf_file entry
224 * was set to NULL in closeandsetf().
225 * - all still allocated event structures must be returned to the
226 * port immediately:
227 * - call port_free_event(*event) or
228 * - call port_send_event(*event) to complete event operations
229 * which need activities in a dedicated process environment.
230 * The port_close() function waits until all allocated event structures
231 * are delivered back to the port.
234 mutex_enter(&portq->portq_source_mutex);
235 for (source = 0; source < PORT_SCACHE_SIZE; source++) {
236 ps = portq->portq_scache[PORT_SHASH(source)];
237 for (; ps != NULL; ps = ps_next) {
238 ps_next = ps->portsrc_next;
239 if (ps->portsrc_close != NULL)
240 (*ps->portsrc_close)(ps->portsrc_closearg,
241 pp->port_fd, curproc->p_pid, 1);
242 kmem_free(ps, sizeof (port_source_t));
245 kmem_free(portq->portq_scache,
246 PORT_SCACHE_SIZE * sizeof (port_source_t *));
247 portq->portq_scache = NULL;
248 mutex_exit(&portq->portq_source_mutex);
250 mutex_enter(&portq->portq_mutex);
251 /* Wait for outstanding events */
252 while (pp->port_curr > portq->portq_nent)
253 cv_wait(&portq->portq_closecv, &portq->portq_mutex);
254 mutex_exit(&portq->portq_mutex);
257 * If PORT_SOURCE_FD objects were not associated with the port then
258 * it is necessary to free the port_fdcache structure here.
261 if (portq->portq_pcp != NULL) {
262 mutex_destroy(&portq->portq_pcp->pc_lock);
263 kmem_free(portq->portq_pcp, sizeof (port_fdcache_t));
264 portq->portq_pcp = NULL;
268 * Now all events are passed back to the port,
269 * discard remaining events in the port queue
272 port_close_events(portq);
273 return (0);
277 * The port_poll() function is the VOP_POLL() entry of event ports.
278 * Event ports return:
279 * POLLIN : events are available in the event queue
280 * POLLOUT : event queue can still accept events
282 /*ARGSUSED*/
283 static int
284 port_poll(vnode_t *vp, short events, int anyyet, short *reventsp,
285 struct pollhead **phpp, caller_context_t *ct)
287 port_t *pp;
288 port_queue_t *portq;
289 short levents;
291 pp = VTOEP(vp);
292 portq = &pp->port_queue;
293 levents = 0;
294 mutex_enter(&portq->portq_mutex);
295 if (portq->portq_nent)
296 levents = POLLIN;
297 if (pp->port_curr < pp->port_max_events)
298 levents |= POLLOUT;
299 levents &= events;
300 *reventsp = levents;
301 if ((levents == 0 && !anyyet) || (events & POLLET)) {
302 *phpp = &pp->port_pollhd;
303 portq->portq_flags |= events & POLLIN ? PORTQ_POLLIN : 0;
304 portq->portq_flags |= events & POLLOUT ? PORTQ_POLLOUT : 0;
306 mutex_exit(&portq->portq_mutex);
307 return (0);
311 /* ARGSUSED */
312 static int
313 port_getattr(struct vnode *vp, struct vattr *vap, int flags, cred_t *cr,
314 caller_context_t *ct)
316 port_t *pp;
317 extern dev_t portdev;
319 pp = VTOEP(vp);
321 vap->va_type = vp->v_type; /* vnode type (for create) */
322 vap->va_mode = 0; /* file access mode */
323 vap->va_uid = pp->port_uid; /* owner user id */
324 vap->va_gid = pp->port_gid; /* owner group id */
325 vap->va_fsid = portdev; /* file system id */
326 vap->va_nodeid = (ino64_t)0; /* node id */
327 vap->va_nlink = vp->v_count; /* number of references to file */
328 vap->va_size = (u_offset_t)pp->port_queue.portq_nent; /* file size */
329 vap->va_atime = pp->port_ctime; /* time of last access */
330 vap->va_mtime = pp->port_ctime; /* time of last modification */
331 vap->va_ctime = pp->port_ctime; /* time file ``created'' */
332 vap->va_rdev = portdev; /* device the file represents */
333 vap->va_blksize = 0; /* fundamental block size */
334 vap->va_nblocks = (fsblkcnt64_t)0; /* # of blocks allocated */
335 vap->va_seq = 0; /* sequence number */
337 return (0);
341 * Destroy the port.
343 /* ARGSUSED */
344 static void
345 port_inactive(struct vnode *vp, cred_t *cr, caller_context_t *ct)
347 port_t *pp = VTOEP(vp);
348 extern port_kstat_t port_kstat;
350 mutex_enter(&port_control.pc_mutex);
351 port_control.pc_nents--;
352 curproc->p_portcnt--;
353 port_kstat.pks_ports.value.ui32--;
354 mutex_exit(&port_control.pc_mutex);
355 vn_free(vp);
356 mutex_destroy(&pp->port_mutex);
357 mutex_destroy(&pp->port_queue.portq_mutex);
358 mutex_destroy(&pp->port_queue.portq_source_mutex);
359 kmem_free(pp, sizeof (port_t));
362 /* ARGSUSED */
363 static int
364 port_access(struct vnode *vp, int mode, int flags, cred_t *cr,
365 caller_context_t *ct)
367 return (0);
370 /* ARGSUSED */
371 static int
372 port_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct)
374 *vpp = vp;
375 return (0);