1/* $NetBSD: spec_vnops.c,v 1.165 2016/09/08 08:45:52 pgoyette Exp $ */
2
3/*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. Neither the name of the University nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 *
57 * @(#)spec_vnops.c 8.15 (Berkeley) 7/14/95
58 */
59
60#include <sys/cdefs.h>
61__KERNEL_RCSID(0, "$NetBSD: spec_vnops.c,v 1.165 2016/09/08 08:45:52 pgoyette Exp $");
62
63#include <sys/param.h>
64#include <sys/proc.h>
65#include <sys/systm.h>
66#include <sys/kernel.h>
67#include <sys/conf.h>
68#include <sys/buf.h>
69#include <sys/mount.h>
70#include <sys/namei.h>
71#include <sys/vnode.h>
72#include <sys/stat.h>
73#include <sys/errno.h>
74#include <sys/ioctl.h>
75#include <sys/poll.h>
76#include <sys/file.h>
77#include <sys/disklabel.h>
78#include <sys/lockf.h>
79#include <sys/tty.h>
80#include <sys/kauth.h>
81#include <sys/fstrans.h>
82#include <sys/module.h>
83
84#include <miscfs/genfs/genfs.h>
85#include <miscfs/specfs/specdev.h>
86
87/* symbolic sleep message strings for devices */
88const char devopn[] = "devopn";
89const char devio[] = "devio";
90const char devwait[] = "devwait";
91const char devin[] = "devin";
92const char devout[] = "devout";
93const char devioc[] = "devioc";
94const char devcls[] = "devcls";
95
96#define SPECHSZ 64
97#if ((SPECHSZ&(SPECHSZ-1)) == 0)
98#define SPECHASH(rdev) (((rdev>>5)+(rdev))&(SPECHSZ-1))
99#else
100#define SPECHASH(rdev) (((unsigned)((rdev>>5)+(rdev)))%SPECHSZ)
101#endif
102
103static vnode_t *specfs_hash[SPECHSZ];
104extern struct mount *dead_rootmount;
105
106/*
107 * This vnode operations vector is used for special device nodes
108 * created from whole cloth by the kernel. For the ops vector for
109 * vnodes built from special devices found in a filesystem, see (e.g)
110 * ffs_specop_entries[] in ffs_vnops.c or the equivalent for other
111 * filesystems.
112 */
113
114int (**spec_vnodeop_p)(void *);
115const struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
116 { &vop_default_desc, vn_default_error },
117 { &vop_lookup_desc, spec_lookup }, /* lookup */
118 { &vop_create_desc, spec_create }, /* create */
119 { &vop_mknod_desc, spec_mknod }, /* mknod */
120 { &vop_open_desc, spec_open }, /* open */
121 { &vop_close_desc, spec_close }, /* close */
122 { &vop_access_desc, spec_access }, /* access */
123 { &vop_getattr_desc, spec_getattr }, /* getattr */
124 { &vop_setattr_desc, spec_setattr }, /* setattr */
125 { &vop_read_desc, spec_read }, /* read */
126 { &vop_write_desc, spec_write }, /* write */
127 { &vop_fallocate_desc, spec_fallocate }, /* fallocate */
128 { &vop_fdiscard_desc, spec_fdiscard }, /* fdiscard */
129 { &vop_fcntl_desc, spec_fcntl }, /* fcntl */
130 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */
131 { &vop_poll_desc, spec_poll }, /* poll */
132 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */
133 { &vop_revoke_desc, spec_revoke }, /* revoke */
134 { &vop_mmap_desc, spec_mmap }, /* mmap */
135 { &vop_fsync_desc, spec_fsync }, /* fsync */
136 { &vop_seek_desc, spec_seek }, /* seek */
137 { &vop_remove_desc, spec_remove }, /* remove */
138 { &vop_link_desc, spec_link }, /* link */
139 { &vop_rename_desc, spec_rename }, /* rename */
140 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */
141 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */
142 { &vop_symlink_desc, spec_symlink }, /* symlink */
143 { &vop_readdir_desc, spec_readdir }, /* readdir */
144 { &vop_readlink_desc, spec_readlink }, /* readlink */
145 { &vop_abortop_desc, spec_abortop }, /* abortop */
146 { &vop_inactive_desc, spec_inactive }, /* inactive */
147 { &vop_reclaim_desc, spec_reclaim }, /* reclaim */
148 { &vop_lock_desc, spec_lock }, /* lock */
149 { &vop_unlock_desc, spec_unlock }, /* unlock */
150 { &vop_bmap_desc, spec_bmap }, /* bmap */
151 { &vop_strategy_desc, spec_strategy }, /* strategy */
152 { &vop_print_desc, spec_print }, /* print */
153 { &vop_islocked_desc, spec_islocked }, /* islocked */
154 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */
155 { &vop_advlock_desc, spec_advlock }, /* advlock */
156 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */
157 { &vop_getpages_desc, spec_getpages }, /* getpages */
158 { &vop_putpages_desc, spec_putpages }, /* putpages */
159 { NULL, NULL }
160};
161const struct vnodeopv_desc spec_vnodeop_opv_desc =
162 { &spec_vnodeop_p, spec_vnodeop_entries };
163
164static kauth_listener_t rawio_listener;
165
166/* Returns true if vnode is /dev/mem or /dev/kmem. */
167bool
168iskmemvp(struct vnode *vp)
169{
170 return ((vp->v_type == VCHR) && iskmemdev(vp->v_rdev));
171}
172
173/*
174 * Returns true if dev is /dev/mem or /dev/kmem.
175 */
176int
177iskmemdev(dev_t dev)
178{
179 /* mem_no is emitted by config(8) to generated devsw.c */
180 extern const int mem_no;
181
182 /* minor 14 is /dev/io on i386 with COMPAT_10 */
183 return (major(dev) == mem_no && (minor(dev) < 2 || minor(dev) == 14));
184}
185
186static int
187rawio_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
188 void *arg0, void *arg1, void *arg2, void *arg3)
189{
190 int result;
191
192 result = KAUTH_RESULT_DEFER;
193
194 if ((action != KAUTH_DEVICE_RAWIO_SPEC) &&
195 (action != KAUTH_DEVICE_RAWIO_PASSTHRU))
196 return result;
197
198 /* Access is mandated by permissions. */
199 result = KAUTH_RESULT_ALLOW;
200
201 return result;
202}
203
204void
205spec_init(void)
206{
207
208 rawio_listener = kauth_listen_scope(KAUTH_SCOPE_DEVICE,
209 rawio_listener_cb, NULL);
210}
211
212/*
213 * Initialize a vnode that represents a device.
214 */
215void
216spec_node_init(vnode_t *vp, dev_t rdev)
217{
218 specnode_t *sn;
219 specdev_t *sd;
220 vnode_t *vp2;
221 vnode_t **vpp;
222
223 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
224 KASSERT(vp->v_specnode == NULL);
225
226 /*
227 * Search the hash table for this device. If known, add a
228 * reference to the device structure. If not known, create
229 * a new entry to represent the device. In all cases add
230 * the vnode to the hash table.
231 */
232 sn = kmem_alloc(sizeof(*sn), KM_SLEEP);
233 if (sn == NULL) {
234 /* XXX */
235 panic("spec_node_init: unable to allocate memory");
236 }
237 sd = kmem_alloc(sizeof(*sd), KM_SLEEP);
238 if (sd == NULL) {
239 /* XXX */
240 panic("spec_node_init: unable to allocate memory");
241 }
242 mutex_enter(&device_lock);
243 vpp = &specfs_hash[SPECHASH(rdev)];
244 for (vp2 = *vpp; vp2 != NULL; vp2 = vp2->v_specnext) {
245 KASSERT(vp2->v_specnode != NULL);
246 if (rdev == vp2->v_rdev && vp->v_type == vp2->v_type) {
247 break;
248 }
249 }
250 if (vp2 == NULL) {
251 /* No existing record, create a new one. */
252 sd->sd_rdev = rdev;
253 sd->sd_mountpoint = NULL;
254 sd->sd_lockf = NULL;
255 sd->sd_refcnt = 1;
256 sd->sd_opencnt = 0;
257 sd->sd_bdevvp = NULL;
258 sn->sn_dev = sd;
259 sd = NULL;
260 } else {
261 /* Use the existing record. */
262 sn->sn_dev = vp2->v_specnode->sn_dev;
263 sn->sn_dev->sd_refcnt++;
264 }
265 /* Insert vnode into the hash chain. */
266 sn->sn_opencnt = 0;
267 sn->sn_rdev = rdev;
268 sn->sn_gone = false;
269 vp->v_specnode = sn;
270 vp->v_specnext = *vpp;
271 *vpp = vp;
272 mutex_exit(&device_lock);
273
274 /* Free the record we allocated if unused. */
275 if (sd != NULL) {
276 kmem_free(sd, sizeof(*sd));
277 }
278}
279
280/*
281 * Lookup a vnode by device number and return it referenced.
282 */
283int
284spec_node_lookup_by_dev(enum vtype type, dev_t dev, vnode_t **vpp)
285{
286 int error;
287 vnode_t *vp;
288
289 mutex_enter(&device_lock);
290 for (vp = specfs_hash[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
291 if (type == vp->v_type && dev == vp->v_rdev) {
292 mutex_enter(vp->v_interlock);
293 /* If clean or being cleaned, then ignore it. */
294 if (vdead_check(vp, VDEAD_NOWAIT) == 0)
295 break;
296 mutex_exit(vp->v_interlock);
297 }
298 }
299 KASSERT(vp == NULL || mutex_owned(vp->v_interlock));
300 if (vp == NULL) {
301 mutex_exit(&device_lock);
302 return ENOENT;
303 }
304 /*
305 * If it is an opened block device return the opened vnode.
306 */
307 if (type == VBLK && vp->v_specnode->sn_dev->sd_bdevvp != NULL) {
308 mutex_exit(vp->v_interlock);
309 vp = vp->v_specnode->sn_dev->sd_bdevvp;
310 mutex_enter(vp->v_interlock);
311 }
312 mutex_exit(&device_lock);
313 error = vget(vp, 0, true /* wait */);
314 if (error != 0)
315 return error;
316 *vpp = vp;
317
318 return 0;
319}
320
321/*
322 * Lookup a vnode by file system mounted on and return it referenced.
323 */
324int
325spec_node_lookup_by_mount(struct mount *mp, vnode_t **vpp)
326{
327 int i, error;
328 vnode_t *vp, *vq;
329
330 mutex_enter(&device_lock);
331 for (i = 0, vq = NULL; i < SPECHSZ && vq == NULL; i++) {
332 for (vp = specfs_hash[i]; vp; vp = vp->v_specnext) {
333 if (vp->v_type != VBLK)
334 continue;
335 vq = vp->v_specnode->sn_dev->sd_bdevvp;
336 if (vq != NULL &&
337 vq->v_specnode->sn_dev->sd_mountpoint == mp)
338 break;
339 vq = NULL;
340 }
341 }
342 if (vq == NULL) {
343 mutex_exit(&device_lock);
344 return ENOENT;
345 }
346 mutex_enter(vq->v_interlock);
347 mutex_exit(&device_lock);
348 error = vget(vq, 0, true /* wait */);
349 if (error != 0)
350 return error;
351 *vpp = vq;
352
353 return 0;
354
355}
356
357/*
358 * Get the file system mounted on this block device.
359 */
360struct mount *
361spec_node_getmountedfs(vnode_t *devvp)
362{
363 struct mount *mp;
364
365 KASSERT(devvp->v_type == VBLK);
366 mp = devvp->v_specnode->sn_dev->sd_mountpoint;
367
368 return mp;
369}
370
371/*
372 * Set the file system mounted on this block device.
373 */
374void
375spec_node_setmountedfs(vnode_t *devvp, struct mount *mp)
376{
377
378 KASSERT(devvp->v_type == VBLK);
379 KASSERT(devvp->v_specnode->sn_dev->sd_mountpoint == NULL || mp == NULL);
380 devvp->v_specnode->sn_dev->sd_mountpoint = mp;
381}
382
383/*
384 * A vnode representing a special device is going away. Close
385 * the device if the vnode holds it open.
386 */
387void
388spec_node_revoke(vnode_t *vp)
389{
390 specnode_t *sn;
391 specdev_t *sd;
392
393 sn = vp->v_specnode;
394 sd = sn->sn_dev;
395
396 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
397 KASSERT(vp->v_specnode != NULL);
398 KASSERT(sn->sn_gone == false);
399
400 mutex_enter(&device_lock);
401 KASSERT(sn->sn_opencnt <= sd->sd_opencnt);
402 if (sn->sn_opencnt != 0) {
403 sd->sd_opencnt -= (sn->sn_opencnt - 1);
404 sn->sn_opencnt = 1;
405 sn->sn_gone = true;
406 mutex_exit(&device_lock);
407
408 VOP_CLOSE(vp, FNONBLOCK, NOCRED);
409
410 mutex_enter(&device_lock);
411 KASSERT(sn->sn_opencnt == 0);
412 }
413 mutex_exit(&device_lock);
414}
415
416/*
417 * A vnode representing a special device is being recycled.
418 * Destroy the specfs component.
419 */
420void
421spec_node_destroy(vnode_t *vp)
422{
423 specnode_t *sn;
424 specdev_t *sd;
425 vnode_t **vpp, *vp2;
426 int refcnt;
427
428 sn = vp->v_specnode;
429 sd = sn->sn_dev;
430
431 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
432 KASSERT(vp->v_specnode != NULL);
433 KASSERT(sn->sn_opencnt == 0);
434
435 mutex_enter(&device_lock);
436 /* Remove from the hash and destroy the node. */
437 vpp = &specfs_hash[SPECHASH(vp->v_rdev)];
438 for (vp2 = *vpp;; vp2 = vp2->v_specnext) {
439 if (vp2 == NULL) {
440 panic("spec_node_destroy: corrupt hash");
441 }
442 if (vp2 == vp) {
443 KASSERT(vp == *vpp);
444 *vpp = vp->v_specnext;
445 break;
446 }
447 if (vp2->v_specnext == vp) {
448 vp2->v_specnext = vp->v_specnext;
449 break;
450 }
451 }
452 sn = vp->v_specnode;
453 vp->v_specnode = NULL;
454 refcnt = sd->sd_refcnt--;
455 KASSERT(refcnt > 0);
456 mutex_exit(&device_lock);
457
458 /* If the device is no longer in use, destroy our record. */
459 if (refcnt == 1) {
460 KASSERT(sd->sd_opencnt == 0);
461 KASSERT(sd->sd_bdevvp == NULL);
462 kmem_free(sd, sizeof(*sd));
463 }
464 kmem_free(sn, sizeof(*sn));
465}
466
467/*
468 * Trivial lookup routine that always fails.
469 */
470int
471spec_lookup(void *v)
472{
473 struct vop_lookup_v2_args /* {
474 struct vnode *a_dvp;
475 struct vnode **a_vpp;
476 struct componentname *a_cnp;
477 } */ *ap = v;
478
479 *ap->a_vpp = NULL;
480 return (ENOTDIR);
481}
482
483typedef int (*spec_ioctl_t)(dev_t, u_long, void *, int, struct lwp *);
484
485/*
486 * Open a special file.
487 */
488/* ARGSUSED */
489int
490spec_open(void *v)
491{
492 struct vop_open_args /* {
493 struct vnode *a_vp;
494 int a_mode;
495 kauth_cred_t a_cred;
496 } */ *ap = v;
497 struct lwp *l;
498 struct vnode *vp;
499 dev_t dev;
500 int error;
501 enum kauth_device_req req;
502 specnode_t *sn;
503 specdev_t *sd;
504 spec_ioctl_t ioctl;
505 u_int gen;
506 const char *name;
507 struct partinfo pi;
508
509 l = curlwp;
510 vp = ap->a_vp;
511 dev = vp->v_rdev;
512 sn = vp->v_specnode;
513 sd = sn->sn_dev;
514 name = NULL;
515 gen = 0;
516
517 /*
518 * Don't allow open if fs is mounted -nodev.
519 */
520 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
521 return (ENXIO);
522
523 switch (ap->a_mode & (FREAD | FWRITE)) {
524 case FREAD | FWRITE:
525 req = KAUTH_REQ_DEVICE_RAWIO_SPEC_RW;
526 break;
527 case FWRITE:
528 req = KAUTH_REQ_DEVICE_RAWIO_SPEC_WRITE;
529 break;
530 default:
531 req = KAUTH_REQ_DEVICE_RAWIO_SPEC_READ;
532 break;
533 }
534
535 switch (vp->v_type) {
536 case VCHR:
537 error = kauth_authorize_device_spec(ap->a_cred, req, vp);
538 if (error != 0)
539 return (error);
540
541 /*
542 * Character devices can accept opens from multiple
543 * vnodes.
544 */
545 mutex_enter(&device_lock);
546 if (sn->sn_gone) {
547 mutex_exit(&device_lock);
548 return (EBADF);
549 }
550 sd->sd_opencnt++;
551 sn->sn_opencnt++;
552 mutex_exit(&device_lock);
553 if (cdev_type(dev) == D_TTY)
554 vp->v_vflag |= VV_ISTTY;
555 VOP_UNLOCK(vp);
556 do {
557 const struct cdevsw *cdev;
558
559 gen = module_gen;
560 error = cdev_open(dev, ap->a_mode, S_IFCHR, l);
561 if (error != ENXIO)
562 break;
563
564 /* Check if we already have a valid driver */
565 mutex_enter(&device_lock);
566 cdev = cdevsw_lookup(dev);
567 mutex_exit(&device_lock);
568 if (cdev != NULL)
569 break;
570
571 /* Get device name from devsw_conv array */
572 if ((name = cdevsw_getname(major(dev))) == NULL)
573 break;
574
575 /* Try to autoload device module */
576 (void) module_autoload(name, MODULE_CLASS_DRIVER);
577 } while (gen != module_gen);
578
579 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
580 break;
581
582 case VBLK:
583 error = kauth_authorize_device_spec(ap->a_cred, req, vp);
584 if (error != 0)
585 return (error);
586
587 /*
588 * For block devices, permit only one open. The buffer
589 * cache cannot remain self-consistent with multiple
590 * vnodes holding a block device open.
591 */
592 mutex_enter(&device_lock);
593 if (sn->sn_gone) {
594 mutex_exit(&device_lock);
595 return (EBADF);
596 }
597 if (sd->sd_opencnt != 0) {
598 mutex_exit(&device_lock);
599 return EBUSY;
600 }
601 sn->sn_opencnt = 1;
602 sd->sd_opencnt = 1;
603 sd->sd_bdevvp = vp;
604 mutex_exit(&device_lock);
605 do {
606 const struct bdevsw *bdev;
607
608 gen = module_gen;
609 error = bdev_open(dev, ap->a_mode, S_IFBLK, l);
610 if (error != ENXIO)
611 break;
612
613 /* Check if we already have a valid driver */
614 mutex_enter(&device_lock);
615 bdev = bdevsw_lookup(dev);
616 mutex_exit(&device_lock);
617 if (bdev != NULL)
618 break;
619
620 /* Get device name from devsw_conv array */
621 if ((name = bdevsw_getname(major(dev))) == NULL)
622 break;
623
624 VOP_UNLOCK(vp);
625
626 /* Try to autoload device module */
627 (void) module_autoload(name, MODULE_CLASS_DRIVER);
628
629 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
630 } while (gen != module_gen);
631
632 break;
633
634 case VNON:
635 case VLNK:
636 case VDIR:
637 case VREG:
638 case VBAD:
639 case VFIFO:
640 case VSOCK:
641 default:
642 return 0;
643 }
644
645 mutex_enter(&device_lock);
646 if (sn->sn_gone) {
647 if (error == 0)
648 error = EBADF;
649 } else if (error != 0) {
650 sd->sd_opencnt--;
651 sn->sn_opencnt--;
652 if (vp->v_type == VBLK)
653 sd->sd_bdevvp = NULL;
654
655 }
656 mutex_exit(&device_lock);
657
658 if (cdev_type(dev) != D_DISK || error != 0)
659 return error;
660
661
662 ioctl = vp->v_type == VCHR ? cdev_ioctl : bdev_ioctl;
663 error = (*ioctl)(vp->v_rdev, DIOCGPARTINFO, &pi, FREAD, curlwp);
664 if (error == 0)
665 uvm_vnp_setsize(vp, (voff_t)pi.pi_secsize * pi.pi_size);
666
667 return 0;
668}
669
670/*
671 * Vnode op for read
672 */
673/* ARGSUSED */
674int
675spec_read(void *v)
676{
677 struct vop_read_args /* {
678 struct vnode *a_vp;
679 struct uio *a_uio;
680 int a_ioflag;
681 kauth_cred_t a_cred;
682 } */ *ap = v;
683 struct vnode *vp = ap->a_vp;
684 struct uio *uio = ap->a_uio;
685 struct lwp *l = curlwp;
686 struct buf *bp;
687 daddr_t bn;
688 int bsize, bscale;
689 struct partinfo pi;
690 int n, on;
691 int error = 0;
692
693 KASSERT(uio->uio_rw == UIO_READ);
694 KASSERTMSG(VMSPACE_IS_KERNEL_P(uio->uio_vmspace) ||
695 uio->uio_vmspace == curproc->p_vmspace,
696 "vmspace belongs to neither kernel nor curproc");
697
698 if (uio->uio_resid == 0)
699 return (0);
700
701 switch (vp->v_type) {
702
703 case VCHR:
704 VOP_UNLOCK(vp);
705 error = cdev_read(vp->v_rdev, uio, ap->a_ioflag);
706 vn_lock(vp, LK_SHARED | LK_RETRY);
707 return (error);
708
709 case VBLK:
710 KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp);
711 if (uio->uio_offset < 0)
712 return (EINVAL);
713
714 if (bdev_ioctl(vp->v_rdev, DIOCGPARTINFO, &pi, FREAD, l) == 0)
715 bsize = pi.pi_bsize;
716 else
717 bsize = BLKDEV_IOSIZE;
718
719 bscale = bsize >> DEV_BSHIFT;
720 do {
721 bn = (uio->uio_offset >> DEV_BSHIFT) &~ (bscale - 1);
722 on = uio->uio_offset % bsize;
723 n = min((unsigned)(bsize - on), uio->uio_resid);
724 error = bread(vp, bn, bsize, 0, &bp);
725 if (error) {
726 return (error);
727 }
728 n = min(n, bsize - bp->b_resid);
729 error = uiomove((char *)bp->b_data + on, n, uio);
730 brelse(bp, 0);
731 } while (error == 0 && uio->uio_resid > 0 && n != 0);
732 return (error);
733
734 default:
735 panic("spec_read type");
736 }
737 /* NOTREACHED */
738}
739
740/*
741 * Vnode op for write
742 */
743/* ARGSUSED */
744int
745spec_write(void *v)
746{
747 struct vop_write_args /* {
748 struct vnode *a_vp;
749 struct uio *a_uio;
750 int a_ioflag;
751 kauth_cred_t a_cred;
752 } */ *ap = v;
753 struct vnode *vp = ap->a_vp;
754 struct uio *uio = ap->a_uio;
755 struct lwp *l = curlwp;
756 struct buf *bp;
757 daddr_t bn;
758 int bsize, bscale;
759 struct partinfo pi;
760 int n, on;
761 int error = 0;
762
763 KASSERT(uio->uio_rw == UIO_WRITE);
764 KASSERTMSG(VMSPACE_IS_KERNEL_P(uio->uio_vmspace) ||
765 uio->uio_vmspace == curproc->p_vmspace,
766 "vmspace belongs to neither kernel nor curproc");
767
768 switch (vp->v_type) {
769
770 case VCHR:
771 VOP_UNLOCK(vp);
772 error = cdev_write(vp->v_rdev, uio, ap->a_ioflag);
773 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
774 return (error);
775
776 case VBLK:
777 KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp);
778 if (uio->uio_resid == 0)
779 return (0);
780 if (uio->uio_offset < 0)
781 return (EINVAL);
782
783 if (bdev_ioctl(vp->v_rdev, DIOCGPARTINFO, &pi, FREAD, l) == 0)
784 bsize = pi.pi_bsize;
785 else
786 bsize = BLKDEV_IOSIZE;
787
788 bscale = bsize >> DEV_BSHIFT;
789 do {
790 bn = (uio->uio_offset >> DEV_BSHIFT) &~ (bscale - 1);
791 on = uio->uio_offset % bsize;
792 n = min((unsigned)(bsize - on), uio->uio_resid);
793 if (n == bsize)
794 bp = getblk(vp, bn, bsize, 0, 0);
795 else
796 error = bread(vp, bn, bsize, B_MODIFY, &bp);
797 if (error) {
798 return (error);
799 }
800 n = min(n, bsize - bp->b_resid);
801 error = uiomove((char *)bp->b_data + on, n, uio);
802 if (error)
803 brelse(bp, 0);
804 else {
805 if (n + on == bsize)
806 bawrite(bp);
807 else
808 bdwrite(bp);
809 error = bp->b_error;
810 }
811 } while (error == 0 && uio->uio_resid > 0 && n != 0);
812 return (error);
813
814 default:
815 panic("spec_write type");
816 }
817 /* NOTREACHED */
818}
819
820/*
821 * fdiscard, which on disk devices becomes TRIM.
822 */
823int
824spec_fdiscard(void *v)
825{
826 struct vop_fdiscard_args /* {
827 struct vnode *a_vp;
828 off_t a_pos;
829 off_t a_len;
830 } */ *ap = v;
831 struct vnode *vp;
832 dev_t dev;
833
834 vp = ap->a_vp;
835 dev = NODEV;
836
837 mutex_enter(vp->v_interlock);
838 if (vdead_check(vp, VDEAD_NOWAIT) == 0 && vp->v_specnode != NULL) {
839 dev = vp->v_rdev;
840 }
841 mutex_exit(vp->v_interlock);
842
843 if (dev == NODEV) {
844 return ENXIO;
845 }
846
847 switch (vp->v_type) {
848 case VCHR:
849 // this is not stored for character devices
850 //KASSERT(vp == vp->v_specnode->sn_dev->sd_cdevvp);
851 return cdev_discard(dev, ap->a_pos, ap->a_len);
852 case VBLK:
853 KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp);
854 return bdev_discard(dev, ap->a_pos, ap->a_len);
855 default:
856 panic("spec_fdiscard: not a device\n");
857 }
858}
859
860/*
861 * Device ioctl operation.
862 */
863/* ARGSUSED */
864int
865spec_ioctl(void *v)
866{
867 struct vop_ioctl_args /* {
868 struct vnode *a_vp;
869 u_long a_command;
870 void *a_data;
871 int a_fflag;
872 kauth_cred_t a_cred;
873 } */ *ap = v;
874 struct vnode *vp;
875 dev_t dev;
876
877 /*
878 * Extract all the info we need from the vnode, taking care to
879 * avoid a race with VOP_REVOKE().
880 */
881
882 vp = ap->a_vp;
883 dev = NODEV;
884 mutex_enter(vp->v_interlock);
885 if (vdead_check(vp, VDEAD_NOWAIT) == 0 && vp->v_specnode) {
886 dev = vp->v_rdev;
887 }
888 mutex_exit(vp->v_interlock);
889 if (dev == NODEV) {
890 return ENXIO;
891 }
892
893 switch (vp->v_type) {
894
895 case VCHR:
896 return cdev_ioctl(dev, ap->a_command, ap->a_data,
897 ap->a_fflag, curlwp);
898
899 case VBLK:
900 KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp);
901 return bdev_ioctl(dev, ap->a_command, ap->a_data,
902 ap->a_fflag, curlwp);
903
904 default:
905 panic("spec_ioctl");
906 /* NOTREACHED */
907 }
908}
909
910/* ARGSUSED */
911int
912spec_poll(void *v)
913{
914 struct vop_poll_args /* {
915 struct vnode *a_vp;
916 int a_events;
917 } */ *ap = v;
918 struct vnode *vp;
919 dev_t dev;
920
921 /*
922 * Extract all the info we need from the vnode, taking care to
923 * avoid a race with VOP_REVOKE().
924 */
925
926 vp = ap->a_vp;
927 dev = NODEV;
928 mutex_enter(vp->v_interlock);
929 if (vdead_check(vp, VDEAD_NOWAIT) == 0 && vp->v_specnode) {
930 dev = vp->v_rdev;
931 }
932 mutex_exit(vp->v_interlock);
933 if (dev == NODEV) {
934 return POLLERR;
935 }
936
937 switch (vp->v_type) {
938
939 case VCHR:
940 return cdev_poll(dev, ap->a_events, curlwp);
941
942 default:
943 return (genfs_poll(v));
944 }
945}
946
947/* ARGSUSED */
948int
949spec_kqfilter(void *v)
950{
951 struct vop_kqfilter_args /* {
952 struct vnode *a_vp;
953 struct proc *a_kn;
954 } */ *ap = v;
955 dev_t dev;
956
957 switch (ap->a_vp->v_type) {
958
959 case VCHR:
960 dev = ap->a_vp->v_rdev;
961 return cdev_kqfilter(dev, ap->a_kn);
962 default:
963 /*
964 * Block devices don't support kqfilter, and refuse it
965 * for any other files (like those vflush()ed) too.
966 */
967 return (EOPNOTSUPP);
968 }
969}
970
971/*
972 * Allow mapping of only D_DISK. This is called only for VBLK.
973 */
974int
975spec_mmap(void *v)
976{
977 struct vop_mmap_args /* {
978 struct vnode *a_vp;
979 vm_prot_t a_prot;
980 kauth_cred_t a_cred;
981 } */ *ap = v;
982 struct vnode *vp = ap->a_vp;
983
984 KASSERT(vp->v_type == VBLK);
985 if (bdev_type(vp->v_rdev) != D_DISK)
986 return EINVAL;
987
988 return 0;
989}
990
991/*
992 * Synch buffers associated with a block device
993 */
994/* ARGSUSED */
995int
996spec_fsync(void *v)
997{
998 struct vop_fsync_args /* {
999 struct vnode *a_vp;
1000 kauth_cred_t a_cred;
1001 int a_flags;
1002 off_t offlo;
1003 off_t offhi;
1004 } */ *ap = v;
1005 struct vnode *vp = ap->a_vp;
1006 struct mount *mp;
1007 int error;
1008
1009 if (vp->v_type == VBLK) {
1010 if ((mp = spec_node_getmountedfs(vp)) != NULL) {
1011 error = VFS_FSYNC(mp, vp, ap->a_flags);
1012 if (error != EOPNOTSUPP)
1013 return error;
1014 }
1015 return vflushbuf(vp, ap->a_flags);
1016 }
1017 return (0);
1018}
1019
1020/*
1021 * Just call the device strategy routine
1022 */
1023int
1024spec_strategy(void *v)
1025{
1026 struct vop_strategy_args /* {
1027 struct vnode *a_vp;
1028 struct buf *a_bp;
1029 } */ *ap = v;
1030 struct vnode *vp = ap->a_vp;
1031 struct buf *bp = ap->a_bp;
1032 dev_t dev;
1033 int error;
1034
1035 dev = NODEV;
1036
1037 /*
1038 * Extract all the info we need from the vnode, taking care to
1039 * avoid a race with VOP_REVOKE().
1040 */
1041
1042 mutex_enter(vp->v_interlock);
1043 if (vdead_check(vp, VDEAD_NOWAIT) == 0 && vp->v_specnode != NULL) {
1044 KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp);
1045 dev = vp->v_rdev;
1046 }
1047 mutex_exit(vp->v_interlock);
1048
1049 if (dev == NODEV) {
1050 error = ENXIO;
1051 goto out;
1052 }
1053 bp->b_dev = dev;
1054
1055 if (!(bp->b_flags & B_READ)) {
1056 error = fscow_run(bp, false);
1057 if (error)
1058 goto out;
1059 }
1060 bdev_strategy(bp);
1061
1062 return 0;
1063
1064out:
1065 bp->b_error = error;
1066 bp->b_resid = bp->b_bcount;
1067 biodone(bp);
1068
1069 return error;
1070}
1071
1072int
1073spec_inactive(void *v)
1074{
1075 struct vop_inactive_args /* {
1076 struct vnode *a_vp;
1077 struct bool *a_recycle;
1078 } */ *ap = v;
1079 struct vnode *vp = ap->a_vp;
1080
1081 KASSERT(vp->v_mount == dead_rootmount);
1082 *ap->a_recycle = true;
1083 VOP_UNLOCK(vp);
1084 return 0;
1085}
1086
1087int
1088spec_reclaim(void *v)
1089{
1090 struct vop_reclaim_args /* {
1091 struct vnode *a_vp;
1092 } */ *ap = v;
1093 struct vnode *vp __diagused = ap->a_vp;
1094
1095 KASSERT(vp->v_mount == dead_rootmount);
1096 return 0;
1097}
1098
1099/*
1100 * This is a noop, simply returning what one has been given.
1101 */
1102int
1103spec_bmap(void *v)
1104{
1105 struct vop_bmap_args /* {
1106 struct vnode *a_vp;
1107 daddr_t a_bn;
1108 struct vnode **a_vpp;
1109 daddr_t *a_bnp;
1110 int *a_runp;
1111 } */ *ap = v;
1112
1113 if (ap->a_vpp != NULL)
1114 *ap->a_vpp = ap->a_vp;
1115 if (ap->a_bnp != NULL)
1116 *ap->a_bnp = ap->a_bn;
1117 if (ap->a_runp != NULL)
1118 *ap->a_runp = (MAXBSIZE >> DEV_BSHIFT) - 1;
1119 return (0);
1120}
1121
1122/*
1123 * Device close routine
1124 */
1125/* ARGSUSED */
1126int
1127spec_close(void *v)
1128{
1129 struct vop_close_args /* {
1130 struct vnode *a_vp;
1131 int a_fflag;
1132 kauth_cred_t a_cred;
1133 } */ *ap = v;
1134 struct vnode *vp = ap->a_vp;
1135 struct session *sess;
1136 dev_t dev = vp->v_rdev;
1137 int flags = ap->a_fflag;
1138 int mode, error, count;
1139 specnode_t *sn;
1140 specdev_t *sd;
1141
1142 mutex_enter(vp->v_interlock);
1143 sn = vp->v_specnode;
1144 sd = sn->sn_dev;
1145 /*
1146 * If we're going away soon, make this non-blocking.
1147 * Also ensures that we won't wedge in vn_lock below.
1148 */
1149 if (vdead_check(vp, VDEAD_NOWAIT) != 0)
1150 flags |= FNONBLOCK;
1151 mutex_exit(vp->v_interlock);
1152
1153 switch (vp->v_type) {
1154
1155 case VCHR:
1156 /*
1157 * Hack: a tty device that is a controlling terminal
1158 * has a reference from the session structure. We
1159 * cannot easily tell that a character device is a
1160 * controlling terminal, unless it is the closing
1161 * process' controlling terminal. In that case, if the
1162 * open count is 1 release the reference from the
1163 * session. Also, remove the link from the tty back to
1164 * the session and pgrp.
1165 *
1166 * XXX V. fishy.
1167 */
1168 mutex_enter(proc_lock);
1169 sess = curlwp->l_proc->p_session;
1170 if (sn->sn_opencnt == 1 && vp == sess->s_ttyvp) {
1171 mutex_spin_enter(&tty_lock);
1172 sess->s_ttyvp = NULL;
1173 if (sess->s_ttyp->t_session != NULL) {
1174 sess->s_ttyp->t_pgrp = NULL;
1175 sess->s_ttyp->t_session = NULL;
1176 mutex_spin_exit(&tty_lock);
1177 /* Releases proc_lock. */
1178 proc_sessrele(sess);
1179 } else {
1180 mutex_spin_exit(&tty_lock);
1181 if (sess->s_ttyp->t_pgrp != NULL)
1182 panic("spec_close: spurious pgrp ref");
1183 mutex_exit(proc_lock);
1184 }
1185 vrele(vp);
1186 } else
1187 mutex_exit(proc_lock);
1188
1189 /*
1190 * If the vnode is locked, then we are in the midst
1191 * of forcably closing the device, otherwise we only
1192 * close on last reference.
1193 */
1194 mode = S_IFCHR;
1195 break;
1196
1197 case VBLK:
1198 KASSERT(vp == vp->v_specnode->sn_dev->sd_bdevvp);
1199 /*
1200 * On last close of a block device (that isn't mounted)
1201 * we must invalidate any in core blocks, so that
1202 * we can, for instance, change floppy disks.
1203 */
1204 error = vinvalbuf(vp, V_SAVE, ap->a_cred, curlwp, 0, 0);
1205 if (error)
1206 return (error);
1207 /*
1208 * We do not want to really close the device if it
1209 * is still in use unless we are trying to close it
1210 * forcibly. Since every use (buffer, vnode, swap, cmap)
1211 * holds a reference to the vnode, and because we mark
1212 * any other vnodes that alias this device, when the
1213 * sum of the reference counts on all the aliased
1214 * vnodes descends to one, we are on last close.
1215 */
1216 mode = S_IFBLK;
1217 break;
1218
1219 default:
1220 panic("spec_close: not special");
1221 }
1222
1223 mutex_enter(&device_lock);
1224 sn->sn_opencnt--;
1225 count = --sd->sd_opencnt;
1226 if (vp->v_type == VBLK)
1227 sd->sd_bdevvp = NULL;
1228 mutex_exit(&device_lock);
1229
1230 if (count != 0)
1231 return 0;
1232
1233 /*
1234 * If we're able to block, release the vnode lock & reacquire. We
1235 * might end up sleeping for someone else who wants our queues. They
1236 * won't get them if we hold the vnode locked.
1237 */
1238 if (!(flags & FNONBLOCK))
1239 VOP_UNLOCK(vp);
1240
1241 if (vp->v_type == VBLK)
1242 error = bdev_close(dev, flags, mode, curlwp);
1243 else
1244 error = cdev_close(dev, flags, mode, curlwp);
1245
1246 if (!(flags & FNONBLOCK))
1247 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1248
1249 return (error);
1250}
1251
1252/*
1253 * Print out the contents of a special device vnode.
1254 */
1255int
1256spec_print(void *v)
1257{
1258 struct vop_print_args /* {
1259 struct vnode *a_vp;
1260 } */ *ap = v;
1261
1262 printf("dev %llu, %llu\n", (unsigned long long)major(ap->a_vp->v_rdev),
1263 (unsigned long long)minor(ap->a_vp->v_rdev));
1264 return 0;
1265}
1266
1267/*
1268 * Return POSIX pathconf information applicable to special devices.
1269 */
1270int
1271spec_pathconf(void *v)
1272{
1273 struct vop_pathconf_args /* {
1274 struct vnode *a_vp;
1275 int a_name;
1276 register_t *a_retval;
1277 } */ *ap = v;
1278
1279 switch (ap->a_name) {
1280 case _PC_LINK_MAX:
1281 *ap->a_retval = LINK_MAX;
1282 return (0);
1283 case _PC_MAX_CANON:
1284 *ap->a_retval = MAX_CANON;
1285 return (0);
1286 case _PC_MAX_INPUT:
1287 *ap->a_retval = MAX_INPUT;
1288 return (0);
1289 case _PC_PIPE_BUF:
1290 *ap->a_retval = PIPE_BUF;
1291 return (0);
1292 case _PC_CHOWN_RESTRICTED:
1293 *ap->a_retval = 1;
1294 return (0);
1295 case _PC_VDISABLE:
1296 *ap->a_retval = _POSIX_VDISABLE;
1297 return (0);
1298 case _PC_SYNC_IO:
1299 *ap->a_retval = 1;
1300 return (0);
1301 default:
1302 return (EINVAL);
1303 }
1304 /* NOTREACHED */
1305}
1306
1307/*
1308 * Advisory record locking support.
1309 */
1310int
1311spec_advlock(void *v)
1312{
1313 struct vop_advlock_args /* {
1314 struct vnode *a_vp;
1315 void *a_id;
1316 int a_op;
1317 struct flock *a_fl;
1318 int a_flags;
1319 } */ *ap = v;
1320 struct vnode *vp = ap->a_vp;
1321
1322 return lf_advlock(ap, &vp->v_speclockf, (off_t)0);
1323}
1324