DragonFly test List (threaded) for 2009-03
[
Date Prev][
Date Next]
[
Thread Prev][
Thread Next]
[
Date Index][
Thread Index]
Test message
message body
diff --git a/sbin/mount_tmpfs/Makefile b/sbin/mount_tmpfs/Makefile
new file mode 100644
index 0000000..eb5d258
--- /dev/null
+++ b/sbin/mount_tmpfs/Makefile
@@ -0,0 +1,12 @@
+#
+# $DragonFly: src/sbin/mount_hammer/Makefile,v 1.1 2007/10/10 19:35:19 dillon Exp $
+
+PROG= mount_tmpfs
+SRCS= mount_tmpfs.c getmntopts.c
+MAN=
+
+MOUNT= ${.CURDIR}/../mount
+CFLAGS+= -I${.CURDIR}/../../sys -I${MOUNT}
+.PATH: ${MOUNT}
+
+.include <bsd.prog.mk>
diff --git a/sbin/mount_tmpfs/mount_tmpfs.c b/sbin/mount_tmpfs/mount_tmpfs.c
new file mode 100755
index 0000000..2091870
--- /dev/null
+++ b/sbin/mount_tmpfs/mount_tmpfs.c
@@ -0,0 +1,94 @@
+
+#include <sys/param.h>
+#include <sys/mount.h>
+#include "tmpfs_args.h"
+//#include <vfs/tmpfs/tmpfs.h>
+
+#include <err.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sysexits.h>
+#include <unistd.h>
+
+#include "/usr/src/sbin/mount/mntopts.h"
+
+struct mntopt mopts[] = {
+ MOPT_STDOPTS,
+ MOPT_NULL
+};
+
+static void usage(void) __dead2;
+
+int
+main(int argc, char **argv)
+{
+ struct tmpfs_args args;
+ int ch, mntflags = 0;
+ char target[MAXPATHLEN];
+ struct vfsconf vfc;
+ int error;
+
+ /*
+ mntflags = 0;
+ while ((ch = getopt(argc, argv, "o:")) != -1)
+ switch(ch) {
+ case 'o':
+ getmntopts(optarg, mopts, &mntflags, 0);
+ break;
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+ */
+ argv++;
+ argc--;
+ if (argc < 1)
+ usage();
+
+ /* resolve target and source with realpath(3) */
+ checkpath(argv[0], target);
+
+ /*
+ * Mount points that did not use distinct paths (e.g. / on /mnt)
+ * used to be disallowed because mount linkages were stored in
+ * vnodes and would lead to endlessly recursive trees. DragonFly
+ * stores mount linkages in the namecache topology and does not
+ * have this problem, so paths no longer need to be distinct.
+ */
+
+// args.target = target;
+ bzero(&args, sizeof(struct tmpfs_args));
+ args.ta_version = TMPFS_ARGS_VERSION;
+ args.ta_nodes_max = 160000;
+ args.ta_size_max = 0xf0000000;
+ if (argc >= 2)
+ args.ta_size_max = atoi(argv[1]);
+ args.ta_root_uid = getuid();
+ args.ta_root_gid = getgid();
+ args.ta_root_mode = 0777;
+
+ error = getvfsbyname("tmpfs", &vfc);
+ if (error && vfsisloadable("tmpfs")) {
+ if(vfsload("tmpfs"))
+ err(EX_OSERR, "vfsload(tmpfs)");
+ endvfsent();
+ error = getvfsbyname("tmpfs", &vfc);
+ }
+ if (error)
+ errx(EX_OSERR, "tmpfs filesystem is not available");
+
+ if (mount(vfc.vfc_name, target, mntflags, &args))
+ err(1, NULL);
+ exit(0);
+}
+
+static void
+usage(void)
+{
+ fprintf(stderr,
+ "usage: mount_tmpfs [-o options] mount_point\n");
+ exit(1);
+}
diff --git a/sys/vfs/tmpfs/Makefile b/sys/vfs/tmpfs/Makefile
new file mode 100755
index 0000000..2917513
--- /dev/null
+++ b/sys/vfs/tmpfs/Makefile
@@ -0,0 +1,8 @@
+
+KMOD= tmpfs
+SRCS= tmpfs_vnops.c tmpfs_vfsops.c tmpfs_subr.c \
+# tmpfs_specops.c tmpfs_pool.c tmpfsr_fifops.c
+NOMAN=
+
+.include <bsd.kmod.mk>
+
diff --git a/sys/vfs/tmpfs/tmpfs.h b/sys/vfs/tmpfs/tmpfs.h
new file mode 100755
index 0000000..567b6a7
--- /dev/null
+++ b/sys/vfs/tmpfs/tmpfs.h
@@ -0,0 +1,580 @@
+/* $NetBSD: tmpfs.h,v 1.37 2008/07/29 09:10:09 pooka Exp $ */
+
+/*
+ * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
+ * 2005 program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FS_TMPFS_TMPFS_H_
+#define _FS_TMPFS_TMPFS_H_
+
+#include <sys/cdefs.h>
+//#include "opt_posix.h"
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+
+#include <sys/dirent.h>
+#include <sys/mount.h>
+#include <sys/queue.h>
+#include <sys/vnode.h>
+#include <sys/lockf.h>
+
+#include <vm/vm_object.h>
+#include <vm/vm_pager.h>
+#include <vm/swap_pager.h>
+
+#define TMPFS_DEBUG 0
+
+#if TMPFS_DEBUG > 0
+#define DP(format, args...) kprintf(format, ## args)
+#else
+#define DP(format, args...)
+#endif
+
+MALLOC_DECLARE(M_TMPFS);
+
+#define kmutex_t struct lock
+
+#define mutex_init(mtx, a, b) lockinit(mtx, "mutex", 0, 0)
+#define mutex_destroy(mtx) lockuninit(mtx)
+#define mutex_enter(mtx) lockmgr(mtx, LK_EXCLUSIVE)
+#define mutex_exit(mtx) lockmgr(mtx, LK_RELEASE)
+
+#define MNT_GETARGS 0
+#define INT_MAX 0xffffffff
+#define MAXNAMLEN MNAMELEN
+#define IMNT_MPSAFE 0
+
+#define v_interlock v_lock
+
+#define UPDATE_CLOSE 0
+
+#define TMPFS_LOCK(tmp) mutex_enter(&tmp->tm_lock)
+#define TMPFS_UNLOCK(tmp) mutex_exit(&tmp->tm_lock)
+
+/* XXX */
+#define VM_OBJECT_LOCK(obj)
+#define VM_OBJECT_UNLOCK(obj)
+#define vm_page_lock_queues()
+#define vm_page_unlock_queues()
+
+
+/* --------------------------------------------------------------------- */
+/* For the kernel and anyone who likes peeking into kernel memory */
+/* --------------------------------------------------------------------- */
+
+#if defined(_KERNEL)
+#include <vfs/tmpfs/tmpfs_pool.h>
+#endif /* defined(_KERNEL) */
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Internal representation of a tmpfs directory entry.
+ */
+struct tmpfs_dirent {
+ TAILQ_ENTRY(tmpfs_dirent) td_entries;
+
+ /* Length of the name stored in this directory entry. This avoids
+ * the need to recalculate it every time the name is used. */
+ uint16_t td_namelen;
+
+ /* The name of the entry, allocated from a string pool. This
+ * string is not required to be zero-terminated; therefore, the
+ * td_namelen field must always be used when accessing its value. */
+ char * td_name;
+
+ /* Pointer to the node this entry refers to. */
+ struct tmpfs_node * td_node;
+};
+
+/* A directory in tmpfs holds a sorted list of directory entries, which in
+ * turn point to other files (which can be directories themselves).
+ *
+ * In tmpfs, this list is managed by a tail queue, whose head is defined by
+ * the struct tmpfs_dir type.
+ *
+ * It is imporant to notice that directories do not have entries for . and
+ * .. as other file systems do. These can be generated when requested
+ * based on information available by other means, such as the pointer to
+ * the node itself in the former case or the pointer to the parent directory
+ * in the latter case. This is done to simplify tmpfs's code and, more
+ * importantly, to remove redundancy. */
+TAILQ_HEAD(tmpfs_dir, tmpfs_dirent);
+
+/* Each entry in a directory has a cookie that identifies it. Cookies
+ * supersede offsets within directories because, given how tmpfs stores
+ * directories in memory, there is no such thing as an offset. (Emulating
+ * a real offset could be very difficult.)
+ *
+ * The '.', '..' and the end of directory markers have fixed cookies which
+ * cannot collide with the cookies generated by other entries. The cookies
+ * fot the other entries are generated based on the memory address on which
+ * stores their information is stored.
+ *
+ * Ideally, using the entry's memory pointer as the cookie would be enough
+ * to represent it and it wouldn't cause collisions in any system.
+ * Unfortunately, this results in "offsets" with very large values which
+ * later raise problems in the Linux compatibility layer (and maybe in other
+ * places) as described in PR kern/32034. Hence we need to workaround this
+ * with a rather ugly hack.
+ *
+ * Linux 32-bit binaries, unless built with _FILE_OFFSET_BITS=64, have off_t
+ * set to 'long', which is a 32-bit *signed* long integer. Regardless of
+ * the macro value, GLIBC (2.3 at least) always uses the getdents64
+ * system call (when calling readdir) which internally returns off64_t
+ * offsets. In order to make 32-bit binaries work, *GLIBC* converts the
+ * 64-bit values returned by the kernel to 32-bit ones and aborts with
+ * EOVERFLOW if the conversion results in values that won't fit in 32-bit
+ * integers (which it assumes is because the directory is extremely large).
+ * This wouldn't cause problems if we were dealing with unsigned integers,
+ * but as we have signed integers, this check fails due to sign expansion.
+ *
+ * For example, consider that the kernel returns the 0xc1234567 cookie to
+ * userspace in a off64_t integer. Later on, GLIBC casts this value to
+ * off_t (remember, signed) with code similar to:
+ * system call returns the offset in kernel_value;
+ * off_t casted_value = kernel_value;
+ * if (sizeof(off_t) != sizeof(off64_t) &&
+ * kernel_value != casted_value)
+ * error!
+ * In this case, casted_value still has 0xc1234567, but when it is compared
+ * for equality against kernel_value, it is promoted to a 64-bit integer and
+ * becomes 0xffffffffc1234567, which is different than 0x00000000c1234567.
+ * Then, GLIBC assumes this is because the directory is very large.
+ *
+ * Given that all the above happens in user-space, we have no control over
+ * it; therefore we must workaround the issue here. We do this by
+ * truncating the pointer value to a 32-bit integer and hope that there
+ * won't be collisions. In fact, this will not cause any problems in
+ * 32-bit platforms but some might arise in 64-bit machines (I'm not sure
+ * if they can happen at all in practice).
+ *
+ * XXX A nicer solution shall be attempted. */
+#if defined(_KERNEL)
+#define TMPFS_DIRCOOKIE_DOT 0
+#define TMPFS_DIRCOOKIE_DOTDOT 1
+#define TMPFS_DIRCOOKIE_EOF 2
+static __inline
+off_t
+tmpfs_dircookie(struct tmpfs_dirent *de)
+{
+ off_t cookie;
+
+ cookie = ((off_t)(uintptr_t)de >> 1) & 0x7FFFFFFF;
+ KKASSERT(cookie != TMPFS_DIRCOOKIE_DOT);
+ KKASSERT(cookie != TMPFS_DIRCOOKIE_DOTDOT);
+ KKASSERT(cookie != TMPFS_DIRCOOKIE_EOF);
+
+ return cookie;
+}
+#endif /* defined(_KERNEL) */
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Internal representation of a tmpfs file system node.
+ *
+ * This structure is splitted in two parts: one holds attributes common
+ * to all file types and the other holds data that is only applicable to
+ * a particular type. The code must be careful to only access those
+ * attributes that are actually allowed by the node's type.
+ */
+struct tmpfs_node {
+ /* Doubly-linked list entry which links all existing nodes for a
+ * single file system. This is provided to ease the removal of
+ * all nodes during the unmount operation. */
+ LIST_ENTRY(tmpfs_node) tn_entries;
+
+ /* The node's type. Any of 'VBLK', 'VCHR', 'VDIR', 'VFIFO',
+ * 'VLNK', 'VREG' and 'VSOCK' is allowed. The usage of vnode
+ * types instead of a custom enumeration is to make things simpler
+ * and faster, as we do not need to convert between two types. */
+ enum vtype tn_type;
+
+ /* Node identifier. */
+ ino_t tn_id;
+
+ /* Node's internal status. This is used by several file system
+ * operations to do modifications to the node in a delayed
+ * fashion. */
+ int tn_status;
+#define TMPFS_NODE_ACCESSED (1 << 1)
+#define TMPFS_NODE_MODIFIED (1 << 2)
+#define TMPFS_NODE_CHANGED (1 << 3)
+
+ /* The node size. It does not necessarily match the real amount
+ * of memory consumed by it. */
+ off_t tn_size;
+
+ /* Generic node attributes. */
+ uid_t tn_uid;
+ gid_t tn_gid;
+ mode_t tn_mode;
+ int tn_flags;
+ nlink_t tn_links;
+ struct timespec tn_atime;
+ struct timespec tn_mtime;
+ struct timespec tn_ctime;
+ struct timespec tn_birthtime;
+ unsigned long tn_gen;
+
+ /* Head of byte-level lock list (used by tmpfs_advlock). */
+ struct lockf tn_lockf;
+
+ /* As there is a single vnode for each active file within the
+ * system, care has to be taken to avoid allocating more than one
+ * vnode per file. In order to do this, a bidirectional association
+ * is kept between vnodes and nodes.
+ *
+ * Whenever a vnode is allocated, its v_data field is updated to
+ * point to the node it references. At the same time, the node's
+ * tn_vnode field is modified to point to the new vnode representing
+ * it. Further attempts to allocate a vnode for this same node will
+ * result in returning a new reference to the value stored in
+ * tn_vnode.
+ *
+ * May be NULL when the node is unused (that is, no vnode has been
+ * allocated for it or it has been reclaimed). */
+ kmutex_t tn_vlock;
+ struct vnode * tn_vnode;
+
+ union {
+ /* Valid when tn_type == VBLK || tn_type == VCHR. */
+ struct {
+ int tn_rmajor;
+ int tn_rminor;
+ } tn_dev;
+
+ /* Valid when tn_type == VDIR. */
+ struct {
+ /* Pointer to the parent directory. The root
+ * directory has a pointer to itself in this field;
+ * this property identifies the root node. */
+ struct tmpfs_node * tn_parent;
+
+ /* Head of a tail-queue that links the contents of
+ * the directory together. See above for a
+ * description of its contents. */
+ struct tmpfs_dir tn_dir;
+
+ /* Number and pointer of the first directory entry
+ * returned by the readdir operation if it were
+ * called again to continue reading data from the
+ * same directory as before. This is used to speed
+ * up reads of long directories, assuming that no
+ * more than one read is in progress at a given time.
+ * Otherwise, these values are discarded and a linear
+ * scan is performed from the beginning up to the
+ * point where readdir starts returning values. */
+ off_t tn_readdir_lastn;
+ struct tmpfs_dirent * tn_readdir_lastp;
+ } tn_dir;
+
+ /* Valid when tn_type == VLNK. */
+ struct tn_lnk {
+ /* The link's target, allocated from a string pool. */
+ char * tn_link;
+ } tn_lnk;
+
+ /* Valid when tn_type == VREG. */
+ struct tn_reg {
+ /* The contents of regular files stored in a tmpfs
+ * file system are represented by a single anonymous
+ * memory object (aobj, for short). The aobj provides
+ * direct access to any position within the file,
+ * because its contents are always mapped in a
+ * contiguous region of virtual memory. It is a task
+ * of the memory management subsystem (see uvm(9)) to
+ * issue the required page ins or page outs whenever
+ * a position within the file is accessed. */
+ //struct uvm_object * tn_aobj;
+ vm_object_t tn_aobj;
+ size_t tn_aobj_pages;
+ } tn_reg;
+ } tn_spec;
+};
+
+#if defined(_KERNEL)
+LIST_HEAD(tmpfs_node_list, tmpfs_node);
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Internal representation of a tmpfs mount point.
+ */
+struct tmpfs_mount {
+ /* Maximum number of memory pages available for use by the file
+ * system, set during mount time. This variable must never be
+ * used directly as it may be bigger than the current amount of
+ * free memory; in the extreme case, it will hold the SIZE_MAX
+ * value. Instead, use the TMPFS_PAGES_MAX macro. */
+ unsigned int tm_pages_max;
+
+ /* Number of pages in use by the file system. Cannot be bigger
+ * than the value returned by TMPFS_PAGES_MAX in any case. */
+ unsigned int tm_pages_used;
+
+ /* Pointer to the node representing the root directory of this
+ * file system. */
+ struct tmpfs_node * tm_root;
+
+ /* Maximum number of possible nodes for this file system; set
+ * during mount time. We need a hard limit on the maximum number
+ * of nodes to avoid allocating too much of them; their objects
+ * cannot be released until the file system is unmounted.
+ * Otherwise, we could easily run out of memory by creating lots
+ * of empty files and then simply removing them. */
+ unsigned int tm_nodes_max;
+
+ /* Number of nodes currently allocated. This number only grows.
+ * When it reaches tm_nodes_max, no more new nodes can be allocated.
+ * Of course, the old, unused ones can be reused. */
+ unsigned int tm_nodes_cnt;
+
+ /* Node list. */
+ kmutex_t tm_lock;
+ struct tmpfs_node_list tm_nodes;
+
+ /* Pools used to store file system meta data. These are not shared
+ * across several instances of tmpfs for the reasons described in
+ * tmpfs_pool.c. */
+ struct tmpfs_pool tm_dirent_pool;
+ struct tmpfs_pool tm_node_pool;
+ struct tmpfs_str_pool tm_str_pool;
+};
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * This structure maps a file identifier to a tmpfs node. Used by the
+ * NFS code.
+ */
+struct tmpfs_fid {
+ uint16_t tf_len;
+ uint16_t tf_pad;
+ uint32_t tf_gen;
+ ino_t tf_id;
+};
+
+/* --------------------------------------------------------------------- */
+
+
+void print_links(struct vnode *, struct tmpfs_node *);
+
+/*
+ * Prototypes for tmpfs_subr.c.
+ */
+
+int tmpfs_alloc_node(struct tmpfs_mount *, enum vtype,
+ uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *,
+ char *, int, int, struct tmpfs_node **);
+void tmpfs_free_node(struct tmpfs_mount *, struct tmpfs_node *);
+int tmpfs_alloc_dirent(struct tmpfs_mount *, struct tmpfs_node *,
+ const char *, uint16_t, struct tmpfs_dirent **);
+void tmpfs_free_dirent(struct tmpfs_mount *, struct tmpfs_dirent *,
+ boolean_t);
+int tmpfs_alloc_vp(struct mount *, struct tmpfs_node *, struct vnode **);
+void tmpfs_free_vp(struct vnode *);
+int tmpfs_alloc_file(struct vnode *, struct vnode **, struct vattr *,
+ struct namecache *, struct ucred *, char *);
+void tmpfs_dir_attach(struct vnode *, struct tmpfs_dirent *);
+void tmpfs_dir_detach(struct vnode *, struct tmpfs_dirent *);
+struct tmpfs_dirent * tmpfs_dir_lookup(struct tmpfs_node *,
+ struct namecache *);
+int tmpfs_dir_getdotdent(struct tmpfs_node *, struct uio *);
+int tmpfs_dir_getdotdotdent(struct tmpfs_node *, struct uio *);
+struct tmpfs_dirent *tmpfs_dir_lookupbycookie(struct tmpfs_node *, off_t);
+int tmpfs_dir_getdents(struct tmpfs_node *, struct uio *, off_t *);
+int tmpfs_reg_resize(struct vnode *, off_t);
+//size_t tmpfs_mem_info(bool);
+int tmpfs_chflags(struct vnode *, int, struct ucred *);
+int tmpfs_chmod(struct vnode *, mode_t, struct ucred *);
+int tmpfs_chown(struct vnode *, uid_t, gid_t, struct ucred *);
+int tmpfs_chsize(struct vnode *, u_quad_t, struct ucred *);
+int tmpfs_chtimes(struct vnode *, const struct timespec *,
+ const struct timespec *, const struct timespec *, int, struct ucred *);
+void tmpfs_itimes(struct vnode *, const struct timespec *,
+ const struct timespec *, const struct timespec *);
+
+void tmpfs_update(struct vnode *, const struct timespec *,
+ const struct timespec *, const struct timespec *, int);
+int tmpfs_truncate(struct vnode *, off_t);
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Convenience macros to simplify some logical expressions.
+ */
+#define IMPLIES(a, b) (!(a) || (b))
+#define IFF(a, b) (IMPLIES(a, b) && IMPLIES(b, a))
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Checks that the directory entry pointed by 'de' matches the name 'name'
+ * with a length of 'len'.
+ */
+#define TMPFS_DIRENT_MATCHES(de, name, len) \
+ (de->td_namelen == (uint16_t)len && \
+ memcmp((de)->td_name, (name), (de)->td_namelen) == 0)
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Ensures that the node pointed by 'node' is a directory and that its
+ * contents are consistent with respect to directories.
+ */
+#define TMPFS_VALIDATE_DIR(node) \
+ KKASSERT((node)->tn_type == VDIR); \
+ KKASSERT((node)->tn_size % sizeof(struct tmpfs_dirent) == 0); \
+ KKASSERT((node)->tn_spec.tn_dir.tn_readdir_lastp == NULL || \
+ tmpfs_dircookie((node)->tn_spec.tn_dir.tn_readdir_lastp) == \
+ (node)->tn_spec.tn_dir.tn_readdir_lastn);
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Memory management stuff.
+ */
+
+/* Amount of memory pages to reserve for the system (e.g., to not use by
+ * tmpfs).
+ * XXX: Should this be tunable through sysctl, for instance? */
+#define TMPFS_PAGES_RESERVED (4 * 1024 * 1024 / PAGE_SIZE)
+
+/*
+ * Returns information about the number of available memory pages,
+ * including physical and virtual ones.
+ *
+ * If 'total' is TRUE, the value returned is the total amount of memory
+ * pages configured for the system (either in use or free).
+ * If it is FALSE, the value returned is the amount of free memory pages.
+ *
+ * Remember to remove TMPFS_PAGES_RESERVED from the returned value to avoid
+ * excessive memory usage.
+ *
+ */
+
+#define swap_pager_avail swap_pager_full
+
+static __inline size_t
+tmpfs_mem_info(void)
+{
+ size_t size;
+
+ size = swap_pager_avail + vmstats.v_free_count + vmstats.v_inactive_count;
+ size -= size > vmstats.v_wire_count ? vmstats.v_wire_count : size;
+ return size;
+}
+
+/* Returns the maximum size allowed for a tmpfs file system. This macro
+ * must be used instead of directly retrieving the value from tm_pages_max.
+ * The reason is that the size of a tmpfs file system is dynamic: it lets
+ * the user store files as long as there is enough free memory (including
+ * physical memory and swap space). Therefore, the amount of memory to be
+ * used is either the limit imposed by the user during mount time or the
+ * amount of available memory, whichever is lower. To avoid consuming all
+ * the memory for a given mount point, the system will always reserve a
+ * minimum of TMPFS_PAGES_RESERVED pages, which is also taken into account
+ * by this macro (see above). */
+static __inline size_t
+TMPFS_PAGES_MAX(struct tmpfs_mount *tmp)
+{
+ size_t freepages;
+
+ freepages = tmpfs_mem_info();//false);
+ if (freepages < TMPFS_PAGES_RESERVED)
+ freepages = 0;
+ else
+ freepages -= TMPFS_PAGES_RESERVED;
+
+ return MIN(tmp->tm_pages_max, freepages + tmp->tm_pages_used);
+}
+
+/* Returns the available space for the given file system. */
+#define TMPFS_PAGES_AVAIL(tmp) \
+ ((ssize_t)(TMPFS_PAGES_MAX(tmp) - (tmp)->tm_pages_used))
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Macros/functions to convert from generic data structures to tmpfs
+ * specific ones.
+ */
+
+static __inline
+struct tmpfs_mount *
+VFS_TO_TMPFS(struct mount *mp)
+{
+ struct tmpfs_mount *tmp;
+
+#ifdef KKASSERT
+ KKASSERT((mp) != NULL && (mp)->mnt_data != NULL);
+#endif
+ tmp = (struct tmpfs_mount *)(mp)->mnt_data;
+ return tmp;
+}
+
+#endif /* defined(_KERNEL) */
+
+static __inline
+struct tmpfs_node *
+VP_TO_TMPFS_NODE(struct vnode *vp)
+{
+ struct tmpfs_node *node;
+
+#ifdef KKASSERT
+ KKASSERT((vp) != NULL && (vp)->v_data != NULL);
+#endif
+ node = (struct tmpfs_node *)vp->v_data;
+ return node;
+}
+
+#if defined(_KERNEL)
+
+static __inline
+struct tmpfs_node *
+VP_TO_TMPFS_DIR(struct vnode *vp)
+{
+ struct tmpfs_node *node;
+
+ node = VP_TO_TMPFS_NODE(vp);
+#ifdef KKASSERT
+ TMPFS_VALIDATE_DIR(node);
+#endif
+ return node;
+}
+
+#endif /* defined(_KERNEL) */
+#endif /* _FS_TMPFS_TMPFS_H_ */
diff --git a/sys/vfs/tmpfs/tmpfs_args.h b/sys/vfs/tmpfs/tmpfs_args.h
new file mode 100755
index 0000000..ba3e9b9
--- /dev/null
+++ b/sys/vfs/tmpfs/tmpfs_args.h
@@ -0,0 +1,54 @@
+/* $NetBSD: tmpfs_args.h,v 1.3 2008/07/29 09:10:09 pooka Exp $ */
+
+/*
+ * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
+ * 2005 program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FS_TMPFS_TMPFS_ARGS_H_
+#define _FS_TMPFS_TMPFS_ARGS_H_
+
+/*
+ * This structure is used to communicate mount parameters between userland
+ * and kernel space.
+ */
+#define TMPFS_ARGS_VERSION 1
+struct tmpfs_args {
+ int ta_version;
+
+ /* Size counters. */
+ ino_t ta_nodes_max;
+ off_t ta_size_max;
+
+ /* Root node attributes. */
+ uid_t ta_root_uid;
+ gid_t ta_root_gid;
+ mode_t ta_root_mode;
+};
+
+#endif /* _FS_TMPFS_TMPFS_ARGS_H_ */
diff --git a/sys/vfs/tmpfs/tmpfs_pool.h b/sys/vfs/tmpfs/tmpfs_pool.h
new file mode 100755
index 0000000..017c7d8
--- /dev/null
+++ b/sys/vfs/tmpfs/tmpfs_pool.h
@@ -0,0 +1,121 @@
+/* $NetBSD: tmpfs_pool.h,v 1.7 2008/04/28 20:24:02 martin Exp $ */
+
+/*
+ * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
+ * 2005 program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FS_TMPFS_TMPFS_POOL_H_
+#define _FS_TMPFS_TMPFS_POOL_H_
+
+struct pool
+{
+ int p_size;
+ int p_nallocs;
+ int p_nfrees;
+};
+
+void *pool_get(struct pool* pp, int flags);
+void pool_put(struct pool* pp, void *p);
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * tmpfs_pool is an extension of regular system pools to also hold data
+ * specific to tmpfs. More specifically, we want a pointer to the
+ * tmpfs_mount structure using the pool so that we can update its memory
+ * usage statistics.
+ */
+struct tmpfs_pool {
+ struct pool tp_pool;
+
+ /* Reference to the mount point that holds the pool. This is used
+ * by the tmpfs_pool_allocator to access and modify the memory
+ * accounting variables for the mount point. */
+ struct tmpfs_mount * tp_mount;
+
+ /* The pool's name. Used as the wait channel. */
+ char tp_name[64];
+};
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * tmpfs uses variable-length strings to store file names and to store
+ * link targets. Reserving a fixed-size buffer for each of them is
+ * inefficient because it will consume a lot more memory than is really
+ * necessary. However, managing variable-sized buffers is difficult as
+ * regards memory allocation and very inefficient in computation time.
+ * This is why tmpfs provides an hybrid scheme to store strings: string
+ * pools.
+ *
+ * A string pool is a collection of memory pools, each one with elements
+ * of a fixed size. In tmpfs's case, a string pool contains independent
+ * memory pools for 16-byte, 32-byte, 64-byte, 128-byte, 256-byte,
+ * 512-byte and 1024-byte long objects. Whenever an object is requested
+ * from the pool, the new object's size is rounded to the closest upper
+ * match and an item from the corresponding pool is returned.
+ */
+struct tmpfs_str_pool {
+ struct tmpfs_pool tsp_pool_16;
+ struct tmpfs_pool tsp_pool_32;
+ struct tmpfs_pool tsp_pool_64;
+ struct tmpfs_pool tsp_pool_128;
+ struct tmpfs_pool tsp_pool_256;
+ struct tmpfs_pool tsp_pool_512;
+ struct tmpfs_pool tsp_pool_1024;
+};
+
+/* --------------------------------------------------------------------- */
+#ifdef _KERNEL
+
+/*
+ * Convenience functions and macros to manipulate a tmpfs_pool.
+ */
+
+void tmpfs_pool_init(struct tmpfs_pool *tpp, size_t size,
+ const char *what, struct tmpfs_mount *tmp);
+void tmpfs_pool_destroy(struct tmpfs_pool *tpp);
+
+#define TMPFS_POOL_GET(tpp, flags) pool_get((struct pool *)(tpp), flags)
+#define TMPFS_POOL_PUT(tpp, v) pool_put((struct pool *)(tpp), v)
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Functions to manipulate a tmpfs_str_pool.
+ */
+
+void tmpfs_str_pool_init(struct tmpfs_str_pool *, struct tmpfs_mount *);
+void tmpfs_str_pool_destroy(struct tmpfs_str_pool *);
+char * tmpfs_str_pool_get(struct tmpfs_str_pool *, size_t, int);
+void tmpfs_str_pool_put(struct tmpfs_str_pool *, char *, size_t);
+
+#endif
+
+#endif /* _FS_TMPFS_TMPFS_POOL_H_ */
diff --git a/sys/vfs/tmpfs/tmpfs_subr.c b/sys/vfs/tmpfs/tmpfs_subr.c
new file mode 100755
index 0000000..820c1b0
--- /dev/null
+++ b/sys/vfs/tmpfs/tmpfs_subr.c
@@ -0,0 +1,1375 @@
+/* $NetBSD: tmpfs_subr.c,v 1.48 2008/06/19 19:03:44 christos Exp $ */
+
+/*
+ * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
+ * 2005 program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Efficient memory file system supporting functions.
+ */
+
+#include <sys/cdefs.h>
+//__KERNEL_RCSID(0, "$NetBSD: tmpfs_subr.c,v 1.48 2008/06/19 19:03:44 christos Exp $");
+
+#include <sys/param.h>
+#include <sys/dirent.h>
+#include <sys/event.h>
+#include <sys/malloc.h>
+#include <sys/mount.h>
+#include <sys/namei.h>
+#include <sys/time.h>
+#include <sys/stat.h>
+#include <sys/systm.h>
+#include <sys/vnode.h>
+#include <sys/proc.h>
+
+#include <machine/atomic.h>
+
+#include <vm/vm_extern.h>
+#include <vm/swap_pager.h>
+
+#include <vfs/tmpfs/tmpfs.h>
+#include <vfs/tmpfs/tmpfs_vnops.h>
+
+#ifndef VT_TMPFS
+#define VT_TMPFS (VT_HAMMER + 1)
+#endif
+
+
+MALLOC_DEFINE(M_TMPFS, "tmpfs", "tmpfs data");
+
+#define atomic_inc_uint_nv(p) atomic_add_int(p, 1)
+#define atomic_dec_uint(p) atomic_subtract_int(p, 1)
+
+#define uvm_vnp_setsize(vp, size)
+
+static __inline void
+VN_KNOTE(struct vnode *vp, long hint)
+{
+ //mutex_enter(&vp->v_interlock);
+ //KNOTE(&vp->v_klist, hint);
+ //mutex_exit(&vp->v_interlock);
+}
+
+void
+print_links(struct vnode *vp, struct tmpfs_node *node)
+{
+ DP("l v%x n%x f", vp->v_sysref.refcnt, node->tn_links);
+ if (vp->v_flag & VCACHED) DP("VCACHED "); /* No active references but has cache value */
+ if (vp->v_flag & VOBJBUF) DP("VOBJBUF "); /* Allocate buffers in VM object */
+ if (vp->v_flag & VINACTIVE) DP("VINACTIVE "); /* The vnode is inactive (did VOP_INACTIVE) */
+ if (vp->v_flag & VAGE) DP("VAGE "); /* Insert vnode at head of free list */
+ if (vp->v_flag & VOLOCK) DP("VOLOCK "); /* vnode is locked waiting for an object */
+ if (vp->v_flag & VOWANT) DP("VOWANT "); /* a process is waiting for VOLOCK */
+ if (vp->v_flag & VRECLAIMED) DP("VRECLAIMED "); /* This vnode has been destroyed */
+ if (vp->v_flag & VFREE) DP("VFREE "); /* This vnode is on the freelist */
+ /* open for business 0x100000 */
+ if (vp->v_flag & VONWORKLST) DP("VONWORKLST "); /* On syncer work-list */
+ if (vp->v_flag & VMOUNT) DP("VMOUNT "); /* Mount in progress */
+ if (vp->v_flag & VOBJDIRTY) DP("VOBJDIRTY ");
+ DP("\n");
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Allocates a new node of type 'type' inside the 'tmp' mount point, with
+ * its owner set to 'uid', its group to 'gid' and its mode set to 'mode',
+ * using the credentials of the process 'p'.
+ *
+ * If the node type is set to 'VDIR', then the parent parameter must point
+ * to the parent directory of the node being created. It may only be NULL
+ * while allocating the root node.
+ *
+ * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter
+ * specifies the device the node represents.
+ *
+ * If the node type is set to 'VLNK', then the parameter target specifies
+ * the file name of the target file for the symbolic link that is being
+ * created.
+ *
+ * Note that new nodes are retrieved from the available list if it has
+ * items or, if it is empty, from the node pool as long as there is enough
+ * space to create them.
+ *
+ * Returns zero on success or an appropriate error code on failure.
+ */
+
+int
+tmpfs_alloc_node(struct tmpfs_mount *tmp, enum vtype type,
+ uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *parent,
+ char *target, int rmajor, int rminor, struct tmpfs_node **node)
+{
+ struct tmpfs_node *nnode;
+
+ DP("tmpfs_alloc_node %d %d parent %p t %s\n", type, mode, parent, target );
+
+ /* If the root directory of the 'tmp' file system is not yet
+ * allocated, this must be the request to do it. */
+ KKASSERT(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR));
+ KKASSERT(IFF(type == VLNK, target != NULL));
+ KKASSERT(IFF(type == VBLK || type == VCHR, rmajor != VNOVAL, rminor != VNOVAL));
+ KKASSERT(uid != VNOVAL && gid != VNOVAL && mode != VNOVAL);
+ nnode = NULL;
+
+ atomic_add_int(&tmp->tm_nodes_cnt, 1);
+ if (tmp->tm_nodes_cnt >= tmp->tm_nodes_max) {
+ atomic_subtract_int(&tmp->tm_nodes_cnt, 1);
+ return ENOSPC;
+ }
+
+ nnode = (struct tmpfs_node *)TMPFS_POOL_GET(&tmp->tm_node_pool, 0);
+ if (nnode == NULL) {
+ atomic_subtract_int(&tmp->tm_nodes_cnt, 1);
+ return ENOSPC;
+ }
+
+ /*
+ * XXX Where the pool is backed by a map larger than (4GB *
+ * sizeof(*nnode)), this may produce duplicate inode numbers
+ * for applications that do not understand 64-bit ino_t.
+ */
+ nnode->tn_id = (ino_t)((uintptr_t)nnode / sizeof(*nnode));
+ nnode->tn_gen = karc4random();
+
+ /* Generic initialization. */
+ nnode->tn_type = type;
+ nnode->tn_size = 0;
+ nnode->tn_status = 0;
+ nnode->tn_flags = 0;
+ nnode->tn_links = 0;
+ getnanotime(&nnode->tn_atime);
+ nnode->tn_birthtime = nnode->tn_ctime = nnode->tn_mtime =
+ nnode->tn_atime;
+ nnode->tn_uid = uid;
+ nnode->tn_gid = gid;
+ nnode->tn_mode = mode;
+ nnode->tn_vnode = NULL;
+
+ /* Type-specific initialization. */
+ switch (nnode->tn_type) {
+ case VBLK:
+ case VCHR:
+ nnode->tn_spec.tn_dev.tn_rmajor = rmajor;
+ nnode->tn_spec.tn_dev.tn_rminor = rminor;
+ break;
+
+ case VDIR:
+ TAILQ_INIT(&nnode->tn_spec.tn_dir.tn_dir);
+ nnode->tn_spec.tn_dir.tn_parent =
+ (parent == NULL) ? nnode : parent;
+ nnode->tn_spec.tn_dir.tn_readdir_lastn = 0;
+ nnode->tn_spec.tn_dir.tn_readdir_lastp = NULL;
+ nnode->tn_links++;
+ break;
+
+ case VFIFO: /* FALLTHROUGH */
+ case VSOCK:
+ break;
+
+ case VLNK:
+ KKASSERT(strlen(target) < MAXPATHLEN);
+ nnode->tn_size = strlen(target);
+ nnode->tn_spec.tn_lnk.tn_link =
+ tmpfs_str_pool_get(&tmp->tm_str_pool, nnode->tn_size, 0);
+ if (nnode->tn_spec.tn_lnk.tn_link == NULL) {
+ atomic_subtract_int(&tmp->tm_nodes_cnt, 1);
+ TMPFS_POOL_PUT(&tmp->tm_node_pool, nnode);
+ return ENOSPC;
+ }
+ memcpy(nnode->tn_spec.tn_lnk.tn_link, target, nnode->tn_size);
+ break;
+
+ case VREG:
+ nnode->tn_spec.tn_reg.tn_aobj =
+ vm_pager_allocate(OBJT_SWAP, NULL, 0, VM_PROT_DEFAULT, 0);
+ nnode->tn_spec.tn_reg.tn_aobj_pages = 0;
+ //nnode->tn_spec.tn_reg.tn_aobj =
+ // uao_create(INT32_MAX - PAGE_SIZE, 0);
+ //nnode->tn_spec.tn_reg.tn_aobj_pages = 0;
+ break;
+
+ default:
+ KKASSERT(0);
+ }
+
+ mutex_init(&nnode->tn_vlock, MUTEX_DEFAULT, IPL_NONE);
+
+ mutex_enter(&tmp->tm_lock);
+ LIST_INSERT_HEAD(&tmp->tm_nodes, nnode, tn_entries);
+ mutex_exit(&tmp->tm_lock);
+
+ *node = nnode;
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Destroys the node pointed to by node from the file system 'tmp'.
+ * If the node does not belong to the given mount point, the results are
+ * unpredicted.
+ *
+ * If the node references a directory; no entries are allowed because
+ * their removal could need a recursive algorithm, something forbidden in
+ * kernel space. Furthermore, there is not need to provide such
+ * functionality (recursive removal) because the only primitives offered
+ * to the user are the removal of empty directories and the deletion of
+ * individual files.
+ *
+ * Note that nodes are not really deleted; in fact, when a node has been
+ * allocated, it cannot be deleted during the whole life of the file
+ * system. Instead, they are moved to the available list and remain there
+ * until reused.
+ */
+void
+tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node)
+{
+ size_t pages = 0;
+ DP("tmpfs_free_node tmp %p node %p\n", tmp, node );
+
+ atomic_dec_uint(&tmp->tm_nodes_cnt);
+ mutex_enter(&tmp->tm_lock);
+ LIST_REMOVE(node, tn_entries);
+ mutex_exit(&tmp->tm_lock);
+
+ switch (node->tn_type) {
+ case VLNK:
+ tmpfs_str_pool_put(&tmp->tm_str_pool,
+ node->tn_spec.tn_lnk.tn_link, node->tn_size);
+ break;
+
+ case VREG:
+ vm_object_deallocate(node->tn_spec.tn_reg.tn_aobj);
+ pages = node->tn_spec.tn_reg.tn_aobj_pages;
+ // XXXX if (node->tn_spec.tn_reg.tn_aobj != NULL)
+ //uao_detach(node->tn_spec.tn_reg.tn_aobj);
+ break;
+
+ default:
+ break;
+ }
+ mutex_destroy(&node->tn_vlock);
+ TMPFS_POOL_PUT(&tmp->tm_node_pool, node);
+
+ atomic_subtract_int(&tmp->tm_pages_used, pages);
+ DP("tmpfs_free_node used %d\n", tmp->tm_pages_used);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Allocates a new directory entry for the node node with a name of name.
+ * The new directory entry is returned in *de.
+ *
+ * The link count of node is increased by one to reflect the new object
+ * referencing it. This takes care of notifying kqueue listeners about
+ * this change.
+ *
+ * Returns zero on success or an appropriate error code on failure.
+ */
+int
+tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
+ const char *name, uint16_t len, struct tmpfs_dirent **de)
+{
+ struct tmpfs_dirent *nde;
+ DP("tmpfs_alloc_dirent tmp %p n %p %*.*s\n",tmp,node,len,len,name);
+ nde = (struct tmpfs_dirent *) TMPFS_POOL_GET(&tmp->tm_dirent_pool, 0);
+ if (nde == NULL)
+ return ENOSPC;
+
+ nde->td_name = tmpfs_str_pool_get(&tmp->tm_str_pool, len, 0);
+ if (nde->td_name == NULL) {
+ TMPFS_POOL_PUT(&tmp->tm_dirent_pool, nde);
+ return ENOSPC;
+ }
+ nde->td_namelen = len;
+ memcpy(nde->td_name, name, len);
+ nde->td_node = node;
+
+ node->tn_links++;
+ if (node->tn_links > 1 && node->tn_vnode != NULL)
+ VN_KNOTE(node->tn_vnode, NOTE_LINK);
+ *de = nde;
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Frees a directory entry. It is the caller's responsibility to destroy
+ * the node referenced by it if needed.
+ *
+ * The link count of node is decreased by one to reflect the removal of an
+ * object that referenced it. This only happens if 'node_exists' is true;
+ * otherwise the function will not access the node referred to by the
+ * directory entry, as it may already have been released from the outside.
+ *
+ * Interested parties (kqueue) are notified of the link count change; note
+ * that this can include both the node pointed to by the directory entry
+ * as well as its parent.
+ */
+void
+tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de,
+ boolean_t node_exists)
+{
+ DP("tmpfs_free_dirent tmp %p dirent %p \n", tmp, de );
+ if (node_exists) {
+ struct tmpfs_node *node;
+ node = de->td_node;
+
+ KKASSERT(node->tn_links > 0);
+ node->tn_links--;
+ if (node->tn_vnode != NULL)
+ VN_KNOTE(node->tn_vnode, node->tn_links == 0 ?
+ NOTE_DELETE : NOTE_LINK);
+ if (node->tn_type == VDIR)
+ VN_KNOTE(node->tn_spec.tn_dir.tn_parent->tn_vnode,
+ NOTE_LINK);
+ }
+ tmpfs_str_pool_put(&tmp->tm_str_pool, de->td_name, de->td_namelen);
+ TMPFS_POOL_PUT(&tmp->tm_dirent_pool, de);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Allocates a new vnode for the node node or returns a new reference to
+ * an existing one if the node had already a vnode referencing it. The
+ * resulting locked vnode is returned in *vpp.
+ *
+ * Returns zero on success or an appropriate error code on failure.
+ */
+int
+tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, struct vnode **vpp)
+{
+ int error;
+ struct vnode *vp;
+ DP("tmpfs_alloc_vp tmp %p node %p\n", mp, node );
+ /* If there is already a vnode, then lock it. */
+ for (;;) {
+ mutex_enter(&node->tn_vlock);
+ if ((vp = node->tn_vnode) != NULL) {
+ /*XXX*///mutex_enter(&vp->v_interlock);
+ error = vget(vp, LK_EXCLUSIVE);// | LK_INTERLOCK);
+ mutex_exit(&node->tn_vlock);
+ if (error == ENOENT) {
+ /* vnode was reclaimed. */
+ continue;
+ }
+ *vpp = vp;
+ DP("tmpfs_alloc_vp ret2 %p\n", vp);
+ return error;
+ }
+ break;
+ }
+ /* Get a new vnode and associate it with our node. */
+ error = getnewvnode(VT_TMPFS, mp, &vp, 0, 0);
+ if (error != 0) {
+ mutex_exit(&node->tn_vlock);
+ return error;
+ }
+ vp->v_type = node->tn_type;
+ /* Type-specific initialization. */
+ switch (node->tn_type) {
+ case VBLK: /* FALLTHROUGH */
+ case VCHR:
+ vp->v_ops = &mp->mnt_vn_spec_ops;
+ addaliasu(vp, node->tn_spec.tn_dev.tn_rmajor,
+ node->tn_spec.tn_dev.tn_rminor);
+ break;
+ case VDIR:
+ /*XXX vp->v_flag |= node->tn_spec.tn_dir.tn_parent == node
+ ? V_ROOT : 0; */
+ break;
+ case VFIFO:
+ vp->v_ops = &mp->mnt_vn_fifo_ops;
+ break;
+ case VREG:
+ vinitvmio(vp, node->tn_size);
+ break;
+ case VLNK: /* FALLTHROUGH */
+ case VSOCK:
+ break;
+ default:
+ KKASSERT(0);
+ }
+ /* XXX uvm_vnp_setsize(vp, node->tn_size); */
+ vp->v_data = node;
+ node->tn_vnode = vp;
+ mutex_exit(&node->tn_vlock);
+ *vpp = vp;
+ KKASSERT(IFF(error == 0, *vpp != NULL && vn_islocked(*vpp)));
+ KKASSERT(*vpp == node->tn_vnode);
+ DP("tmpfs_alloc_vp ret %p\t", vp);
+ print_links(vp, node);
+ return error;
+}
+/* --------------------------------------------------------------------- */
+
+/*
+ * Destroys the association between the vnode vp and the node it
+ * references.
+ */
+void
+tmpfs_free_vp(struct vnode *vp)
+{
+ struct tmpfs_node *node;
+ DP("tmpfs_free_vp vp %p\n", vp);
+
+ node = VP_TO_TMPFS_NODE(vp);
+
+ mutex_enter(&node->tn_vlock);
+ node->tn_vnode = NULL;
+ mutex_exit(&node->tn_vlock);
+ vp->v_data = NULL;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Allocates a new file of type 'type' and adds it to the parent directory
+ * 'dvp'; this addition is done using the component name given in 'cnp'.
+ * The ownership of the new file is automatically assigned based on the
+ * credentials of the caller (through 'cnp'), the group is set based on
+ * the parent directory and the mode is determined from the 'vap' argument.
+ * If successful, *vpp holds a vnode to the newly created file and zero
+ * is returned. Otherwise *vpp is NULL and the function returns an
+ * appropriate error code.
+ */
+int
+tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
+ struct namecache *ncp, struct ucred *cred, char *target)
+{
+ int error;
+ struct tmpfs_dirent *de;
+ struct tmpfs_mount *tmp;
+ struct tmpfs_node *dnode;
+ struct tmpfs_node *node;
+ struct tmpfs_node *parent;
+
+ DP("tmpfs_alloc_file dvp %p vtype %d\n", dvp, vap->va_type);
+ KKASSERT(vn_islocked(dvp));
+
+ tmp = VFS_TO_TMPFS(dvp->v_mount);
+ dnode = VP_TO_TMPFS_DIR(dvp);
+ *vpp = NULL;
+ /* If the entry we are creating is a directory, we cannot overflow
+ * the number of links of its parent, because it will get a new
+ * link. */
+ if (vap->va_type == VDIR) {
+ /* Ensure that we do not overflow the maximum number of links
+ * imposed by the system. */
+ KKASSERT(dnode->tn_links <= LINK_MAX);
+ if (dnode->tn_links == LINK_MAX) {
+ error = EMLINK;
+ goto out;
+ }
+
+ parent = dnode;
+ } else
+ parent = NULL;
+
+ /* Allocate a node that represents the new file. */
+ error = tmpfs_alloc_node(tmp, vap->va_type,
+ /* XXX kauth_cred_geteuid(cred), */
+ cred->cr_uid, dnode->tn_gid,
+ vap->va_mode, parent, target,
+ vap->va_rmajor, vap->va_rminor,
+ &node);
+ if (error != 0)
+ goto out;
+
+ /* Allocate a directory entry that points to the new file. */
+ error = tmpfs_alloc_dirent(tmp, node, ncp->nc_name, ncp->nc_nlen, &de);
+ if (error != 0) {
+ tmpfs_free_node(tmp, node);
+ goto out;
+ }
+
+ /* Allocate a vnode for the new file. */
+ error = tmpfs_alloc_vp(dvp->v_mount, node, vpp);
+ if (error != 0) {
+ tmpfs_free_dirent(tmp, de, TRUE);
+ tmpfs_free_node(tmp, node);
+ goto out;
+ }
+
+ /* Now that all required items are allocated, we can proceed to
+ * insert the new node into the directory, an operation that
+ * cannot fail. */
+ tmpfs_dir_attach(dvp, de);
+ if (vap->va_type == VDIR) {
+ VN_KNOTE(dvp, NOTE_LINK);
+ dnode->tn_links++;
+ KKASSERT(dnode->tn_links <= LINK_MAX);
+ }
+out:
+
+ KKASSERT(IFF(error == 0, *vpp != NULL));
+ return error;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Attaches the directory entry de to the directory represented by vp.
+ * Note that this does not change the link count of the node pointed by
+ * the directory entry, as this is done by tmpfs_alloc_dirent.
+ *
+ * As the "parent" directory changes, interested parties are notified of
+ * a write to it.
+ */
+void
+tmpfs_dir_attach(struct vnode *vp, struct tmpfs_dirent *de)
+{
+ struct tmpfs_node *dnode;
+ DP("tmpfs_dir_attach dvp %p dirent %p\n", vp, de);
+ dnode = VP_TO_TMPFS_DIR(vp);
+ TAILQ_INSERT_TAIL(&dnode->tn_spec.tn_dir.tn_dir, de, td_entries);
+ dnode->tn_size += sizeof(struct tmpfs_dirent);
+ dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
+ TMPFS_NODE_MODIFIED;
+ uvm_vnp_setsize(vp, dnode->tn_size);
+ VN_KNOTE(vp, NOTE_WRITE);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Detaches the directory entry de from the directory represented by vp.
+ * Note that this does not change the link count of the node pointed by
+ * the directory entry, as this is done by tmpfs_free_dirent.
+ *
+ * As the "parent" directory changes, interested parties are notified of
+ * a write to it.
+ */
+void
+tmpfs_dir_detach(struct vnode *vp, struct tmpfs_dirent *de)
+{
+ struct tmpfs_node *dnode;
+ DP("tmpfs_dir_detach vp %p dirent %p\n", vp, de);
+ KKASSERT(vn_islocked(vp));
+ dnode = VP_TO_TMPFS_DIR(vp);
+ if (dnode->tn_spec.tn_dir.tn_readdir_lastp == de) {
+ dnode->tn_spec.tn_dir.tn_readdir_lastn = 0;
+ dnode->tn_spec.tn_dir.tn_readdir_lastp = NULL;
+ }
+ TAILQ_REMOVE(&dnode->tn_spec.tn_dir.tn_dir, de, td_entries);
+ dnode->tn_size -= sizeof(struct tmpfs_dirent);
+ dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
+ TMPFS_NODE_MODIFIED;
+ uvm_vnp_setsize(vp, dnode->tn_size);
+ VN_KNOTE(vp, NOTE_WRITE);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Looks for a directory entry in the directory represented by node.
+ * 'cnp' describes the name of the entry to look for. Note that the .
+ * and .. components are not allowed as they do not physically exist
+ * within directories.
+ *
+ * Returns a pointer to the entry when found, otherwise NULL.
+ */
+struct tmpfs_dirent *
+tmpfs_dir_lookup(struct tmpfs_node *node, struct namecache *ncp)
+{
+ boolean_t found;
+ struct tmpfs_dirent *de;
+ DP("tmpfs_dir_lookup node %p ncp %p\n", node, ncp);
+ KKASSERT(IMPLIES(ncp->nc_name == 1, ncp->nc_name[0] != '.'));
+ KKASSERT(IMPLIES(ncp->nc_nlen == 2, !(ncp->nc_name[0] == '.' &&
+ ncp->nc_name[1] == '.')));
+ TMPFS_VALIDATE_DIR(node);
+ node->tn_status |= TMPFS_NODE_ACCESSED;
+ found = 0;
+ TAILQ_FOREACH(de, &node->tn_spec.tn_dir.tn_dir, td_entries) {
+ KKASSERT(ncp->nc_nlen < 0xffff);
+ if (de->td_namelen == (uint16_t)ncp->nc_nlen &&
+ memcmp(de->td_name, ncp->nc_name, de->td_namelen) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ return found ? de : NULL;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Helper function for tmpfs_readdir. Creates a '.' entry for the given
+ * directory and returns it in the uio space. The function returns 0
+ * on success, -1 if there was not enough space in the uio structure to
+ * hold the directory entry or an appropriate error code if another
+ * error happens.
+ */
+int
+tmpfs_dir_getdotdent(struct tmpfs_node *node, struct uio *uio)
+{
+ int error;
+ struct dirent dent;
+ int reclen;
+
+ DP("tmpfs_dir_getdotdent node %p uio %p\n", node, uio);
+ TMPFS_VALIDATE_DIR(node);
+ KKASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOT);
+
+ dent.d_ino = node->tn_id;
+ dent.d_type = DT_DIR;
+ dent.d_namlen = 1;
+ dent.d_name[0] = '.';
+ dent.d_name[1] = '\0';
+ /* XXX dentp->d_reclen */
+ reclen = _DIRENT_DIRSIZ(&dent);
+
+ if (/*dent.d_*/reclen > uio->uio_resid)
+ error = -1;
+ else {
+ error = uiomove((caddr_t)&dent, /*dent.d_*/reclen, uio);
+ if (error == 0)
+ uio->uio_offset = TMPFS_DIRCOOKIE_DOTDOT;
+ }
+
+ node->tn_status |= TMPFS_NODE_ACCESSED;
+
+ return error;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Helper function for tmpfs_readdir. Creates a '..' entry for the given
+ * directory and returns it in the uio space. The function returns 0
+ * on success, -1 if there was not enough space in the uio structure to
+ * hold the directory entry or an appropriate error code if another
+ * error happens.
+ */
+int
+tmpfs_dir_getdotdotdent(struct tmpfs_node *node, struct uio *uio)
+{
+ int error, reclen;
+ struct dirent dent;
+
+ DP("tmpfs_dir_getdotdotdent node %p uio %p\n", node, uio);
+ TMPFS_VALIDATE_DIR(node);
+ KKASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT);
+
+ dent.d_ino = node->tn_spec.tn_dir.tn_parent->tn_id;
+ dent.d_type = DT_DIR;
+ dent.d_namlen = 2;
+ dent.d_name[0] = '.';
+ dent.d_name[1] = '.';
+ dent.d_name[2] = '\0';
+ /*XXX dent.d_reclen = _DIRENT_SIZE(dentp);*/
+ reclen = _DIRENT_DIRSIZ(&dent);
+
+ if (/*dent.d_*/reclen > uio->uio_resid)
+ error = -1;
+ else {
+ error = uiomove((caddr_t)&dent, /*dent.d_*/reclen, uio);
+ if (error == 0) {
+ struct tmpfs_dirent *de;
+
+ de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir);
+ if (de == NULL)
+ uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
+ else
+ uio->uio_offset = tmpfs_dircookie(de);
+ }
+ }
+ node->tn_status |= TMPFS_NODE_ACCESSED;
+
+ return error;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Lookup a directory entry by its associated cookie.
+ */
+struct tmpfs_dirent *
+tmpfs_dir_lookupbycookie(struct tmpfs_node *node, off_t cookie)
+{
+ struct tmpfs_dirent *de;
+ DP("tmpfs_dir_lookupbycookie node %p cookie %d\n", node, (int)cookie);
+ if (cookie == node->tn_spec.tn_dir.tn_readdir_lastn &&
+ node->tn_spec.tn_dir.tn_readdir_lastp != NULL) {
+ return node->tn_spec.tn_dir.tn_readdir_lastp;
+ }
+ TAILQ_FOREACH(de, &node->tn_spec.tn_dir.tn_dir, td_entries) {
+ if (tmpfs_dircookie(de) == cookie) {
+ break;
+ }
+ }
+ return de;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Helper function for tmpfs_readdir. Returns as much directory entries
+ * as can fit in the uio space. The read starts at uio->uio_offset.
+ * The function returns 0 on success, -1 if there was not enough space
+ * in the uio structure to hold the directory entry or an appropriate
+ * error code if another error happens.
+ */
+int
+tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, off_t *cntp)
+{
+
+ int error;
+ off_t startcookie;
+ struct dirent dent;
+ struct tmpfs_dirent *de;
+ int reclen;
+
+ DP("tmpfs_dir_getdents node %p uio %p\n", node, uio);
+ TMPFS_VALIDATE_DIR(node);
+
+ /* Locate the first directory entry we have to return. We have cached
+ * the last readdir in the node, so use those values if appropriate.
+ * Otherwise do a linear scan to find the requested entry. */
+ startcookie = uio->uio_offset;
+ KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOT);
+ KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOTDOT);
+ if (startcookie == TMPFS_DIRCOOKIE_EOF) {
+ return 0;
+ } else {
+ de = tmpfs_dir_lookupbycookie(node, startcookie);
+ }
+ if (de == NULL) {
+ return EINVAL;
+ }
+
+ /* Read as much entries as possible; i.e., until we reach the end of
+ * the directory or we exhaust uio space. */
+ do {
+ /* Create a dirent structure representing the current
+ * tmpfs_node and fill it. */
+ dent.d_ino = de->td_node->tn_id;
+ switch (de->td_node->tn_type) {
+ case VBLK:
+ dent.d_type = DT_BLK;
+ break;
+
+ case VCHR:
+ dent.d_type = DT_CHR;
+ break;
+
+ case VDIR:
+ dent.d_type = DT_DIR;
+ break;
+
+ case VFIFO:
+ dent.d_type = DT_FIFO;
+ break;
+
+ case VLNK:
+ dent.d_type = DT_LNK;
+ break;
+
+ case VREG:
+ dent.d_type = DT_REG;
+ break;
+
+ case VSOCK:
+ dent.d_type = DT_SOCK;
+ break;
+
+ default:
+ KKASSERT(0);
+ }
+ dent.d_namlen = de->td_namelen;
+ KKASSERT(de->td_namelen < sizeof(dent.d_name));
+ (void)memcpy(dent.d_name, de->td_name, de->td_namelen);
+ dent.d_name[de->td_namelen] = '\0';
+ /* XXX dentp->d_reclen */
+ reclen = _DIRENT_DIRSIZ(&dent);
+
+ /* Stop reading if the directory entry we are treating is
+ * bigger than the amount of data that can be returned. */
+ if (/*dent.d_*/reclen > uio->uio_resid) {
+ error = -1;
+ break;
+ }
+
+ /* Copy the new dirent structure into the output buffer and
+ * advance pointers. */
+ error = uiomove((caddr_t)&dent, /*dent.d_*/reclen, uio);
+
+ (*cntp)++;
+ de = TAILQ_NEXT(de, td_entries);
+ } while (error == 0 && uio->uio_resid > 0 && de != NULL);
+
+ /* Update the offset and cache. */
+ if (de == NULL) {
+ uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
+ node->tn_spec.tn_dir.tn_readdir_lastn = 0;
+ node->tn_spec.tn_dir.tn_readdir_lastp = NULL;
+ } else {
+ node->tn_spec.tn_dir.tn_readdir_lastn = uio->uio_offset =
+ tmpfs_dircookie(de);
+ node->tn_spec.tn_dir.tn_readdir_lastp = de;
+ }
+
+ node->tn_status |= TMPFS_NODE_ACCESSED;
+
+ return error;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Resizes the aobj associated to the regular file pointed to by vp to
+ * the size newsize. 'vp' must point to a vnode that represents a regular
+ * file. 'newsize' must be positive.
+ *
+ * If the file is extended, the appropriate kevent is raised. This does
+ * not rise a write event though because resizing is not the same as
+ * writing.
+ *
+ * Returns zero on success or an appropriate error code on failure.
+ */
+int
+tmpfs_reg_resize(struct vnode *vp, off_t newsize)
+{
+ int error;
+ size_t newpages, oldpages;
+ struct tmpfs_mount *tmp;
+ struct tmpfs_node *node;
+ off_t oldsize;
+
+ DP("tmpfs_reg_resize vp %p newsize %d\n", vp, (int)newsize);
+ KKASSERT(vp->v_type == VREG);
+ KKASSERT(newsize >= 0);
+
+ node = VP_TO_TMPFS_NODE(vp);
+ tmp = VFS_TO_TMPFS(vp->v_mount);
+
+ /* Convert the old and new sizes to the number of pages needed to
+ * store them. It may happen that we do not need to do anything
+ * because the last allocated page can accommodate the change on
+ * its own. */
+ oldsize = node->tn_size;
+ oldpages = round_page(oldsize) / PAGE_SIZE;
+ KKASSERT(oldpages == node->tn_spec.tn_reg.tn_aobj_pages);
+ newpages = round_page(newsize) / PAGE_SIZE;
+
+ if (newpages > oldpages &&
+ newpages - oldpages > TMPFS_PAGES_AVAIL(tmp)) {
+ error = ENOSPC;
+ goto out;
+ }
+
+ node->tn_spec.tn_reg.tn_aobj_pages = newpages;
+
+ TMPFS_LOCK(tmp);
+ tmp->tm_pages_used += (newpages - oldpages);
+ TMPFS_UNLOCK(tmp);
+
+ node->tn_size = newsize;
+ vnode_pager_setsize(vp, newsize);
+ if (newsize < oldsize) {
+ size_t zerolen = round_page(newsize) - newsize;
+ vm_object_t uobj = node->tn_spec.tn_reg.tn_aobj;
+ vm_page_t m;
+
+ /*
+ * free "backing store"
+ */
+ /* XXX */
+ VM_OBJECT_LOCK(uobj);
+ if (newpages < oldpages) {
+ swap_pager_freespace(uobj,
+ newpages, oldpages - newpages);
+ vm_object_page_remove(uobj,
+ OFF_TO_IDX(newsize + PAGE_MASK), 0, FALSE);
+ }
+
+ /*
+ * zero out the truncated part of the last page.
+ */
+
+ if (zerolen > 0) {
+ m = vm_page_grab(uobj, OFF_TO_IDX(newsize),
+ VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
+ pmap_zero_page_area((vm_paddr_t)(vm_offset_t)m,
+ PAGE_SIZE - zerolen, zerolen);
+ vm_page_wakeup(m);
+ }
+ VM_OBJECT_UNLOCK(uobj);
+
+ }
+ error = 0;
+out:
+ return error;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Returns information about the number of available memory pages,
+ * including physical and virtual ones.
+ *
+ * If 'total' is true, the value returned is the total amount of memory
+ * pages configured for the system (either in use or free).
+ * If it is FALSE, the value returned is the amount of free memory pages.
+ *
+ * Remember to remove TMPFS_PAGES_RESERVED from the returned value to avoid
+ * excessive memory usage.
+ *
+ */
+/*
+size_t
+tmpfs_mem_info(boolean_t total)
+{
+ size_t size;
+
+ DP("tmpfs_mem_info \n");
+ size = 0;
+ vmmeter
+ size += uvmexp.swpgavail;
+ if (!total) {
+ size -= uvmexp.swpgonly;
+ }
+ size += uvmexp.free;
+ size += uvmexp.filepages;
+ if (size > uvmexp.wired) {
+ size -= uvmexp.wired;
+ } else {
+ size = 0;
+ }
+
+ return size;
+}
+*/
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Change flags of the given vnode.
+ * Caller should execute tmpfs_update on vp after a successful execution.
+ * The vnode must be locked on entry and remain locked on exit.
+ */
+int
+tmpfs_chflags(struct vnode *vp, int flags, struct ucred *cred)
+{
+ int error;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_chflags vp %p\n", vp);
+ KKASSERT(vn_islocked(vp));
+ node = VP_TO_TMPFS_NODE(vp);
+
+ /* Disallow this operation if the file system is mounted read-only. */
+ if (vp->v_mount->mnt_flag & MNT_RDONLY)
+ return EROFS;
+
+ /* XXX: The following comes from UFS code, and can be found in
+ * several other file systems. Shouldn't this be centralized
+ * somewhere? */
+ //if (kauth_cred_geteuid(cred) != node->tn_uid &&
+ // (error = kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER,
+ // NULL)))
+ // return error;
+ //if (kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL) == 0) {
+ /* The super-user is only allowed to change flags if the file
+ * wasn't protected before and the securelevel is zero. */
+ // if ((node->tn_flags & (SF_IMMUTABLE | SF_APPEND)) &&
+ // kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_CHSYSFLAGS,
+ // 0, NULL, NULL, NULL))
+ // return EPERM;
+ // node->tn_flags = flags;
+ //} else {
+ /* Regular users can change flags provided they only want to
+ * change user-specific ones, not those reserved for the
+ * super-user. */
+ if ((node->tn_flags & (SF_IMMUTABLE | SF_APPEND)) ||
+ (flags & UF_SETTABLE) != flags)
+ return EPERM;
+ if ((node->tn_flags & SF_SETTABLE) != (flags & SF_SETTABLE))
+ return EPERM;
+ node->tn_flags &= SF_SETTABLE;
+ node->tn_flags |= (flags & UF_SETTABLE);
+ //}
+
+ node->tn_status |= TMPFS_NODE_CHANGED;
+ VN_KNOTE(vp, NOTE_ATTRIB);
+
+ KKASSERT(VOP_ISLOCKED(vp));
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Change access mode on the given vnode.
+ * Caller should execute tmpfs_update on vp after a successful execution.
+ * The vnode must be locked on entry and remain locked on exit.
+ */
+int
+tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred)
+{
+ int error, ismember = 0;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_chmod vp %p\n", vp);
+ KKASSERT(VOP_ISLOCKED(vp));
+ node = VP_TO_TMPFS_NODE(vp);
+
+ /* Disallow this operation if the file system is mounted read-only. */
+ if (vp->v_mount->mnt_flag & MNT_RDONLY)
+ return EROFS;
+
+ /* Immutable or append-only files cannot be modified, either. */
+ if (node->tn_flags & (IMMUTABLE | APPEND))
+ return EPERM;
+
+ /* XXX: The following comes from UFS code, and can be found in
+ * several other file systems. Shouldn't this be centralized
+ * somewhere? */
+ //if (kauth_cred_geteuid(cred) != node->tn_uid &&
+ // (error = kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER,
+ // NULL)))
+ // return error;
+ //if (kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL) != 0) {
+ // if (vp->v_type != VDIR && (mode & S_ISTXT))
+ // return EFTYPE;
+
+ // if ((kauth_cred_ismember_gid(cred, node->tn_gid,
+ // &ismember) != 0 || !ismember) && (mode & S_ISGID))
+ // return EPERM;
+ //}
+
+ node->tn_mode = (mode & ALLPERMS);
+ node->tn_status |= TMPFS_NODE_CHANGED;
+ VN_KNOTE(vp, NOTE_ATTRIB);
+ KKASSERT(VOP_ISLOCKED(vp));
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Change ownership of the given vnode. At least one of uid or gid must
+ * be different than VNOVAL. If one is set to that value, the attribute
+ * is unchanged.
+ * Caller should execute tmpfs_update on vp after a successful execution.
+ * The vnode must be locked on entry and remain locked on exit.
+ */
+int
+tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred)
+{
+ int error, ismember = 0;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_chown vp %p\n", vp);
+ KKASSERT(VOP_ISLOCKED(vp));
+ node = VP_TO_TMPFS_NODE(vp);
+ /* Assign default values if they are unknown. */
+ KKASSERT(uid != VNOVAL || gid != VNOVAL);
+ if (uid == VNOVAL)
+ uid = node->tn_uid;
+ if (gid == VNOVAL)
+ gid = node->tn_gid;
+ KKASSERT(uid != VNOVAL && gid != VNOVAL);
+
+ /* Disallow this operation if the file system is mounted read-only. */
+ if (vp->v_mount->mnt_flag & MNT_RDONLY)
+ return EROFS;
+
+ /* Immutable or append-only files cannot be modified, either. */
+ if (node->tn_flags & (IMMUTABLE | APPEND))
+ return EPERM;
+
+ /* XXX: The following comes from UFS code, and can be found in
+ * several other file systems. Shouldn't this be centralized
+ * somewhere? */
+ //if ((kauth_cred_geteuid(cred) != node->tn_uid || uid != node->tn_uid ||
+ // (gid != node->tn_gid && !(kauth_cred_getegid(cred) == node->tn_gid ||
+ // (kauth_cred_ismember_gid(cred, gid, &ismember) == 0 && ismember)))) &&
+ // ((error = kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER,
+ // NULL)) != 0))
+ // return error;
+ node->tn_uid = uid;
+ node->tn_gid = gid;
+
+ node->tn_status |= TMPFS_NODE_CHANGED;
+ VN_KNOTE(vp, NOTE_ATTRIB);
+ KKASSERT(VOP_ISLOCKED(vp));
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Change size of the given vnode.
+ * Caller should execute tmpfs_update on vp after a successful execution.
+ * The vnode must be locked on entry and remain locked on exit.
+ */
+int
+tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred)
+{
+ int error;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_chsize vp %p size %d\n", vp, (int)size);
+ KKASSERT(VOP_ISLOCKED(vp));
+ node = VP_TO_TMPFS_NODE(vp);
+ /* Decide whether this is a valid operation based on the file type. */
+ error = 0;
+ switch (vp->v_type) {
+ case VDIR:
+ return EISDIR;
+ case VREG:
+ if (vp->v_mount->mnt_flag & MNT_RDONLY)
+ return EROFS;
+ break;
+ case VBLK: /* FALLTHROUGH */
+ case VCHR: /* FALLTHROUGH */
+ case VFIFO:
+ /* Allow modifications of special files even if in the file
+ * system is mounted read-only (we are not modifying the
+ * files themselves, but the objects they represent). */
+ return 0;
+ default:
+ /* Anything else is unsupported. */
+ return EOPNOTSUPP;
+ }
+
+ /* Immutable or append-only files cannot be modified, either. */
+ if (node->tn_flags & (IMMUTABLE | APPEND))
+ return EPERM;
+
+ error = tmpfs_truncate(vp, size);
+ /* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents
+ * for us, as will update tn_status; no need to do that here. */
+ KKASSERT(VOP_ISLOCKED(vp));
+ return error;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Change access and modification times of the given vnode.
+ * Caller should execute tmpfs_update on vp after a successful execution.
+ * The vnode must be locked on entry and remain locked on exit.
+ */
+int
+tmpfs_chtimes(struct vnode *vp, const struct timespec *atime,
+ const struct timespec *mtime, const struct timespec *btime,
+ int vaflags, struct ucred *cred)
+{
+ int error;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_chtimes vp %p\n", vp);
+ KKASSERT(VOP_ISLOCKED(vp));
+ node = VP_TO_TMPFS_NODE(vp);
+ /* Disallow this operation if the file system is mounted read-only. */
+ if (vp->v_mount->mnt_flag & MNT_RDONLY)
+ return EROFS;
+
+ /* Immutable or append-only files cannot be modified, either. */
+ if (node->tn_flags & (IMMUTABLE | APPEND))
+ return EPERM;
+
+ /* XXX: The following comes from UFS code, and can be found in
+ * several other file systems. Shouldn't this be centralized
+ * somewhere? */
+ //if (kauth_cred_geteuid(cred) != node->tn_uid &&
+ // (error = kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER,
+ // NULL)) && ((vaflags & VA_UTIMES_NULL) == 0 ||
+ // (error = VOP_ACCESS(vp, VWRITE, cred))))
+ // return error;
+
+ if (atime->tv_sec != VNOVAL && atime->tv_nsec != VNOVAL)
+ node->tn_status |= TMPFS_NODE_ACCESSED;
+
+ if (mtime->tv_sec != VNOVAL && mtime->tv_nsec != VNOVAL)
+ node->tn_status |= TMPFS_NODE_MODIFIED;
+
+ if (btime->tv_sec == VNOVAL && btime->tv_nsec == VNOVAL)
+ btime = NULL;
+
+ tmpfs_update(vp, atime, mtime, btime, 0);
+ VN_KNOTE(vp, NOTE_ATTRIB);
+ KKASSERT(VOP_ISLOCKED(vp));
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+/* Sync timestamps */
+void
+tmpfs_itimes(struct vnode *vp, const struct timespec *acc,
+ const struct timespec *mod, const struct timespec *birth)
+{
+ struct timespec now, *nowp = NULL;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_itimes vp %p\n", vp);
+ node = VP_TO_TMPFS_NODE(vp);
+
+ if ((node->tn_status & (TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
+ TMPFS_NODE_CHANGED)) == 0)
+ return;
+
+ if (birth != NULL)
+ node->tn_birthtime = *birth;
+
+ if (node->tn_status & TMPFS_NODE_ACCESSED) {
+ if (acc == NULL) {
+ if (nowp == NULL)
+ getnanotime(nowp = &now);
+ acc = nowp;
+ }
+ node->tn_atime = *acc;
+ }
+ if (node->tn_status & TMPFS_NODE_MODIFIED) {
+ if (mod == NULL) {
+ if (nowp == NULL)
+ getnanotime(nowp = &now);
+ mod = nowp;
+ }
+ node->tn_mtime = *mod;
+ }
+ if (node->tn_status & TMPFS_NODE_CHANGED) {
+ if (nowp == NULL)
+ getnanotime(nowp = &now);
+ node->tn_ctime = *nowp;
+ }
+
+ node->tn_status &=
+ ~(TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED);
+}
+
+/* --------------------------------------------------------------------- */
+
+void
+tmpfs_update(struct vnode *vp, const struct timespec *acc,
+ const struct timespec *mod, const struct timespec *birth, int flags)
+{
+ struct tmpfs_node *node;
+ DP("tmpfs_update vp %p\n", vp);
+ KKASSERT(VOP_ISLOCKED(vp));
+ node = VP_TO_TMPFS_NODE(vp);
+#if 0
+ if (flags & UPDATE_CLOSE)
+ ; /* XXX Need to do anything special? */
+#endif
+ tmpfs_itimes(vp, acc, mod, birth);
+ KKASSERT(VOP_ISLOCKED(vp));
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+tmpfs_truncate(struct vnode *vp, off_t length)
+{
+ boolean_t extended;
+ int error;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_truncate vp %p l %d\n", vp, (int)length);
+ node = VP_TO_TMPFS_NODE(vp);
+ extended = length > node->tn_size;
+
+ if (length < 0) {
+ error = EINVAL;
+ goto out;
+ }
+ if (node->tn_size == length) {
+ error = 0;
+ goto out;
+ }
+ error = tmpfs_reg_resize(vp, length);
+ if (error == 0)
+ node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
+
+out:
+ tmpfs_update(vp, NULL, NULL, NULL, 0);
+ return error;
+}
+
+
+/* --------------------------------------------------------------------- */
+
+void
+tmpfs_pool_init(struct tmpfs_pool *tpp, size_t size,
+ const char *what, struct tmpfs_mount *tmp)
+{
+ DP("tmpfs_pool_init pool %p size %d\n", tpp, size);
+ tpp->tp_pool.p_size = size;
+ tpp->tp_pool.p_nallocs = 0;
+ tpp->tp_pool.p_nfrees = 0;
+}
+
+void
+tmpfs_pool_destroy(struct tmpfs_pool *tpp)
+{
+ DP("tmpfs_pool_destroy pool %p allocs %d frees %d\n", tpp,
+ tpp->tp_pool.p_nallocs,
+ tpp->tp_pool.p_nfrees);
+}
+
+void *
+pool_get(struct pool* pp, int flags)
+{
+ pp->p_nallocs++;
+ return kmalloc(pp->p_size, M_TMPFS, M_WAITOK | M_ZERO);
+}
+
+void
+pool_put(struct pool* pp, void *p)
+{
+ pp->p_nfrees++;
+ kfree(p, M_TMPFS);
+}
+
+void tmpfs_str_pool_init(struct tmpfs_str_pool *a, struct tmpfs_mount *b)
+{
+}
+
+void tmpfs_str_pool_destroy(struct tmpfs_str_pool *a)
+{
+}
+
+char *tmpfs_str_pool_get(struct tmpfs_str_pool *pool, size_t size, int flags)
+{
+ return kmalloc(size, M_TMPFS, M_WAITOK | M_ZERO);
+ //flags);
+}
+
+void tmpfs_str_pool_put(struct tmpfs_str_pool *pool, char *p, size_t size)
+{
+ kfree(p, M_TMPFS);
+}
diff --git a/sys/vfs/tmpfs/tmpfs_vfsops.c b/sys/vfs/tmpfs/tmpfs_vfsops.c
new file mode 100755
index 0000000..e89dcef
--- /dev/null
+++ b/sys/vfs/tmpfs/tmpfs_vfsops.c
@@ -0,0 +1,477 @@
+/* $NetBSD: tmpfs_vfsops.c,v 1.44 2008/07/29 09:10:09 pooka Exp $ */
+
+/*
+ * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
+ * 2005 program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Efficient memory file system.
+ *
+ * tmpfs is a file system that uses NetBSD's virtual memory sub-system
+ * (the well-known UVM) to store file data and metadata in an efficient
+ * way. This means that it does not follow the structure of an on-disk
+ * file system because it simply does not need to. Instead, it uses
+ * memory-specific data structures and algorithms to automatically
+ * allocate and release resources.
+ */
+
+#include <sys/cdefs.h>
+//__KERNEL_RCSID(0, "$NetBSD: tmpfs_vfsops.c,v 1.44 2008/07/29 09:10:09 pooka Exp $");
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/systm.h>
+#include <sys/vnode.h>
+#include <sys/proc.h>
+#include <sys/module.h>
+#include <sys/file.h>
+#include <machine/atomic.h>
+
+#include <vfs/tmpfs/tmpfs.h>
+#include <vfs/tmpfs/tmpfs_args.h>
+#include <vfs/tmpfs/tmpfs_vnops.h>
+//#include <vfs/tmpfs/tmpfs_pool.h>
+
+/* --------------------------------------------------------------------- */
+
+static int tmpfs_mount(struct mount *, char *, caddr_t, struct ucred *);
+static int tmpfs_start(struct mount *, int);
+static int tmpfs_unmount(struct mount *, int);
+static int tmpfs_root(struct mount *, struct vnode **);
+static int tmpfs_vget(struct mount *, ino_t, struct vnode **);
+static int tmpfs_fhtovp(struct mount *, struct fid *, struct vnode **);
+static int tmpfs_vptofh(struct vnode *, struct fid *);//, size_t *);
+static int tmpfs_statfs(struct mount *, struct statfs *, struct ucred *);
+static int tmpfs_statvfs(struct mount *, struct statvfs *, struct ucred *cred);
+static int tmpfs_sync(struct mount *, int);//, kauth_cred_t);
+static int tmpfs_init(struct vfsconf *conf);
+
+/* --------------------------------------------------------------------- */
+
+static int
+tmpfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred)
+{
+ int error;
+ ino_t nodes;
+ size_t pages;
+ struct tmpfs_mount *tmp;
+ struct tmpfs_node *root;
+ struct tmpfs_args args;
+
+ DP("tmpfs_mount %s mnt=%p fl=%x\n", path, mp, mp->mnt_flag);
+
+ error = copyin(data, (caddr_t)&args, sizeof(struct tmpfs_args));
+ if (error)
+ return (error);
+
+ DP("tmpfs_mount %d %d %d \t\t %d %d %x\n",
+ args.ta_version, (int)args.ta_nodes_max, (int)args.ta_size_max,
+ args.ta_root_uid, args.ta_root_gid, args.ta_root_mode);
+
+ /* Handle retrieval of mount point arguments. */
+ if (mp->mnt_flag & MNT_GETARGS) {
+ DP("tmpfs_mount MNT_GETARGS\n");
+ if (mp->mnt_data == NULL)
+ return EIO;
+ tmp = VFS_TO_TMPFS(mp);
+
+ args.ta_version = TMPFS_ARGS_VERSION;
+ args.ta_nodes_max = tmp->tm_nodes_max;
+ args.ta_size_max = tmp->tm_pages_max * PAGE_SIZE;
+
+ root = tmp->tm_root;
+ args.ta_root_uid = root->tn_uid;
+ args.ta_root_gid = root->tn_gid;
+ args.ta_root_mode = root->tn_mode;
+
+ return 0;
+ }
+
+ if (mp->mnt_flag & MNT_UPDATE) {
+ /* XXX: There is no support yet to update file system
+ * settings. Should be added. */
+ return EOPNOTSUPP;
+ }
+
+ if (args.ta_version != TMPFS_ARGS_VERSION)
+ return EINVAL;
+
+ DP("tmpfs_mount 2\n");
+ /* Do not allow mounts if we do not have enough memory to preserve
+ * the minimum reserved pages. */
+
+ /* XXX */
+ //if (tmpfs_mem_info(TRUE) < TMPFS_PAGES_RESERVED)
+ // return EINVAL;
+
+ /* Get the maximum number of memory pages this file system is
+ * allowed to use, based on the maximum size the user passed in
+ * the mount structure. A value of zero is treated as if the
+ * maximum available space was requested. */
+ if (args.ta_size_max < PAGE_SIZE || args.ta_size_max >= SIZE_MAX)
+ pages = SIZE_MAX;
+ else
+ pages = args.ta_size_max / PAGE_SIZE +
+ (args.ta_size_max % PAGE_SIZE == 0 ? 0 : 1);
+ if (pages > INT_MAX)
+ pages = INT_MAX;
+ KKASSERT(pages > 0);
+
+ if (args.ta_nodes_max <= 3)
+ nodes = 3 + pages * PAGE_SIZE / 1024;
+ else
+ nodes = args.ta_nodes_max;
+ if (nodes > INT_MAX)
+ nodes = INT_MAX;
+ KKASSERT(nodes >= 3);
+
+ /* Allocate the tmpfs mount structure and fill it. */
+ tmp = kmalloc(sizeof(struct tmpfs_mount), M_TMPFS, M_WAITOK | M_ZERO);
+ if (tmp == NULL)
+ return ENOMEM;
+
+ DP("tmp mount = %p\n", tmp);
+
+ tmp->tm_nodes_max = nodes;
+ tmp->tm_nodes_cnt = 0;
+ LIST_INIT(&tmp->tm_nodes);
+
+ mutex_init(&tmp->tm_lock, MUTEX_DEFAULT, IPL_NONE);
+
+ tmp->tm_pages_max = pages;
+ tmp->tm_pages_used = 0;
+ tmpfs_pool_init(&tmp->tm_dirent_pool, sizeof(struct tmpfs_dirent),
+ "dirent", tmp);
+ tmpfs_pool_init(&tmp->tm_node_pool, sizeof(struct tmpfs_node),
+ "node", tmp);
+ tmpfs_str_pool_init(&tmp->tm_str_pool, tmp);
+
+ vfs_add_vnodeops(mp, &tmpfs_vnode_vops, &mp->mnt_vn_norm_ops);
+ vfs_add_vnodeops(mp, &tmpfs_spec_vops, &mp->mnt_vn_spec_ops);
+ vfs_add_vnodeops(mp, &tmpfs_fifo_vops, &mp->mnt_vn_fifo_ops);
+
+ DP("tmpfs_mount alloc_node\n");
+ /* Allocate the root node. */
+ error = tmpfs_alloc_node(tmp, VDIR, args.ta_root_uid,
+ args.ta_root_gid, args.ta_root_mode & ALLPERMS, NULL, NULL,
+ VNOVAL, VNOVAL, &root);
+ DP("tmpfs_mount root = %p\n", root);
+ KKASSERT(error == 0 && root != NULL);
+ root->tn_links++;
+ tmp->tm_root = root;
+
+ mp->mnt_data = (qaddr_t) tmp;
+ mp->mnt_flag |= MNT_LOCAL;
+ /* XXX */
+ //mp->mnt_stat.f_namemax = MAXNAMLEN;
+ //mp->mnt_fs_bshift = PAGE_SHIFT;
+ //mp->mnt_dev_bshift = DEV_BSHIFT;
+ //mp->mnt_iflag |= IMNT_MPSAFE;
+ vfs_getnewfsid(mp);
+
+ /* XXX */
+ strcpy(mp->mnt_stat.f_mntfromname, "tmpfs");
+ //copyinstr(args.target, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
+ //bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
+
+ tmpfs_statfs(mp, &mp->mnt_stat, cred);
+
+ //TMPFSDEBUG("nullfs_mount: lower %s, alias at %s\n",
+ // mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntfromname);
+ //return set_statvfs_info(path, UIO_USERSPACE, "tmpfs", UIO_SYSSPACE,
+ // mp->mnt_op->vfs_name, mp, l);
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+tmpfs_start(struct mount *mp, int flags)
+{
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+/* ARGSUSED2 */
+static int
+tmpfs_unmount(struct mount *mp, int mntflags)
+{
+ int error;
+ int flags = 0;
+ struct tmpfs_mount *tmp;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_unmount %p %d\n", mp, mntflags);
+
+ /* Handle forced unmounts. */
+ if (mntflags & MNT_FORCE)
+ flags |= FORCECLOSE;
+
+ /* Finalize all pending I/O. XXX */
+ error = vflush(mp, 0, flags);
+ if (error != 0)
+ return error;
+
+ tmp = VFS_TO_TMPFS(mp);
+
+ /* Free all associated data. The loop iterates over the linked list
+ * we have containing all used nodes. For each of them that is
+ * a directory, we free all its directory entries. Note that after
+ * freeing a node, it will automatically go to the available list,
+ * so we will later have to iterate over it to release its items. */
+ node = LIST_FIRST(&tmp->tm_nodes);
+ while (node != NULL) {
+ struct tmpfs_node *next;
+
+ if (node->tn_type == VDIR) {
+ struct tmpfs_dirent *de;
+
+ de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir);
+ while (de != NULL) {
+ struct tmpfs_dirent *nde;
+
+ nde = TAILQ_NEXT(de, td_entries);
+ tmpfs_free_dirent(tmp, de, FALSE);
+ de = nde;
+ node->tn_size -= sizeof(struct tmpfs_dirent);
+ }
+ }
+
+ next = LIST_NEXT(node, tn_entries);
+ tmpfs_free_node(tmp, node);
+ node = next;
+ }
+
+ tmpfs_pool_destroy(&tmp->tm_dirent_pool);
+ tmpfs_pool_destroy(&tmp->tm_node_pool);
+ tmpfs_str_pool_destroy(&tmp->tm_str_pool);
+
+ KKASSERT(tmp->tm_pages_used == 0);
+
+ /* Throw away the tmpfs_mount structure. */
+ mutex_destroy(&tmp->tm_lock);
+ kfree(tmp, M_TMPFS);
+ mp->mnt_data = NULL;
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+tmpfs_root(struct mount *mp, struct vnode **vpp)
+{
+ return tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, vpp);
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+tmpfs_vget(struct mount *mp, ino_t ino,
+ struct vnode **vpp)
+{
+ kprintf("tmpfs_vget called; need for it unknown yet\n");
+ return EOPNOTSUPP;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+tmpfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
+{
+ boolean_t found;
+ struct tmpfs_fid tfh;
+ struct tmpfs_mount *tmp;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_fhtovp\n");
+ tmp = VFS_TO_TMPFS(mp);
+
+ if (fhp->fid_len != sizeof(struct tmpfs_fid))
+ return EINVAL;
+
+ memcpy(&tfh, fhp, sizeof(struct tmpfs_fid));
+
+ if (tfh.tf_id >= tmp->tm_nodes_max)
+ return EINVAL;
+
+ found = FALSE;
+ mutex_enter(&tmp->tm_lock);
+ LIST_FOREACH(node, &tmp->tm_nodes, tn_entries) {
+ if (node->tn_id == tfh.tf_id &&
+ node->tn_gen == tfh.tf_gen) {
+ found = TRUE;
+ break;
+ }
+ }
+ mutex_exit(&tmp->tm_lock);
+
+ /* XXXAD nothing to prevent 'node' from being removed. */
+ return found ? tmpfs_alloc_vp(mp, node, vpp) : EINVAL;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+tmpfs_vptofh(struct vnode *vp, struct fid *fhp)
+{
+ struct tmpfs_fid tfh;
+ struct tmpfs_node *node;
+ DP("tmpfs_vptofh\n");
+/*
+ if (*fh_size < sizeof(struct tmpfs_fid)) {
+ *fh_size = sizeof(struct tmpfs_fid);
+ return E2BIG;
+ }
+
+ *fh_size = sizeof(struct tmpfs_fid);
+*/
+ node = VP_TO_TMPFS_NODE(vp);
+
+ memset(&tfh, 0, sizeof(tfh));
+ tfh.tf_len = sizeof(struct tmpfs_fid);
+ tfh.tf_gen = node->tn_gen;
+ tfh.tf_id = node->tn_id;
+ memcpy(fhp, &tfh, sizeof(tfh));
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+tmpfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
+{
+ fsfilcnt_t freenodes;
+ struct tmpfs_mount *tmp;
+ struct statfs *sp = &mp->mnt_stat;
+
+ DP("tmpfs_statfs\n");
+ tmp = VFS_TO_TMPFS(mp);
+
+ sp->f_iosize = sp->f_bsize = PAGE_SIZE;
+
+ sp->f_blocks = TMPFS_PAGES_MAX(tmp);
+ sp->f_bavail = sp->f_bfree = TMPFS_PAGES_AVAIL(tmp);
+
+ freenodes = MIN(tmp->tm_nodes_max - tmp->tm_nodes_cnt,
+ TMPFS_PAGES_AVAIL(tmp) * PAGE_SIZE / sizeof(struct tmpfs_node));
+
+ sp->f_files = tmp->tm_nodes_cnt + freenodes;
+ sp->f_ffree = freenodes;
+
+ *sbp = mp->mnt_stat;
+ return 0;
+}
+
+/* ARGSUSED2 */
+static int
+tmpfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
+{
+ fsfilcnt_t freenodes;
+ struct tmpfs_mount *tmp;
+ struct statvfs *sp = &mp->mnt_vstat;
+
+ DP("tmpfs_statvfs\n");
+ tmp = VFS_TO_TMPFS(mp);
+
+ sp->f_frsize = sp->f_bsize = PAGE_SIZE;
+
+ sp->f_blocks = TMPFS_PAGES_MAX(tmp);
+ sp->f_bavail = sp->f_bfree = TMPFS_PAGES_AVAIL(tmp);
+
+ freenodes = MIN(tmp->tm_nodes_max - tmp->tm_nodes_cnt,
+ TMPFS_PAGES_AVAIL(tmp) * PAGE_SIZE / sizeof(struct tmpfs_node));
+
+ sp->f_files = tmp->tm_nodes_cnt + freenodes;
+ sp->f_favail = sp->f_ffree = freenodes;
+
+ *sbp = mp->mnt_vstat;
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+/* ARGSUSED0 */
+static int
+tmpfs_sync(struct mount *mp, int waitfor)
+{
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+tmpfs_init(struct vfsconf *conf)
+{
+ return 0;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * tmpfs vfs operations.
+ */
+
+struct vfsops tmpfs_vfsops = {
+ .vfs_mount = tmpfs_mount,
+ .vfs_start = tmpfs_start,
+ .vfs_unmount = tmpfs_unmount,
+ .vfs_root = tmpfs_root,
+ .vfs_statvfs = tmpfs_statvfs,
+ .vfs_sync = tmpfs_sync,
+ .vfs_vget = tmpfs_vget,
+ .vfs_fhtovp = tmpfs_fhtovp,
+ .vfs_vptofh = tmpfs_vptofh,
+ .vfs_init = tmpfs_init,
+ .vfs_extattrctl = vfs_stdextattrctl
+ /*(void *)eopnotsupp, vfs_quotactl */
+};
+
+/*
+static int
+tmpfs_modcmd(modcmd_t cmd, void *arg)
+{
+ switch (cmd) {
+ case MODULE_CMD_INIT:
+ return vfs_attach(&tmpfs_vfsops);
+ case MODULE_CMD_FINI:
+ return vfs_detach(&tmpfs_vfsops);
+ default:
+ return ENOTTY;
+ }
+}
+*/
+
+VFS_SET(tmpfs_vfsops, tmpfs, VFCF_SYNTHETIC);
diff --git a/sys/vfs/tmpfs/tmpfs_vnops.c b/sys/vfs/tmpfs/tmpfs_vnops.c
new file mode 100755
index 0000000..7d96952
--- /dev/null
+++ b/sys/vfs/tmpfs/tmpfs_vnops.c
@@ -0,0 +1,1798 @@
+/* $NetBSD: tmpfs_vnops.c,v 1.52 2008/11/26 20:17:33 pooka Exp $ */
+
+/*
+ * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
+ * 2005 program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * tmpfs vnode interface.
+ */
+
+#include <sys/cdefs.h>
+//__KERNEL_RCSID(0, "$NetBSD: tmpfs_vnops.c,v 1.52 2008/11/26 20:17:33 pooka Exp $");
+
+#include <sys/param.h>
+#include <sys/dirent.h>
+#include <sys/fcntl.h>
+#include <sys/event.h>
+#include <sys/malloc.h>
+#include <sys/namei.h>
+#include <sys/proc.h>
+#include <sys/stat.h>
+#include <sys/uio.h>
+#include <sys/unistd.h>
+#include <sys/namecache.h>
+#include <sys/vnode.h>
+#include <sys/lockf.h>
+#include <sys/ucred.h>
+#include <sys/vfsops.h>
+//#include <sys/kauth.h>
+
+#include <machine/pmap.h>
+
+#include <vm/vm.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_map.h>
+#include <vm/vnode_pager.h>
+#include <vm/vm_extern.h>
+
+#include <sys/sched.h>
+#include <sys/sfbuf.h>
+
+#include <vfs/fifofs/fifo.h>
+#include <vfs/tmpfs/tmpfs_vnops.h>
+#include <vfs/tmpfs/tmpfs.h>
+
+
+int tmpfs_open (struct vop_open_args *);
+int tmpfs_close (struct vop_close_args *);
+int tmpfs_access (struct vop_access_args *);
+int tmpfs_getattr (struct vop_getattr_args *);
+int tmpfs_setattr (struct vop_setattr_args *);
+int tmpfs_read (struct vop_read_args *);
+int tmpfs_write (struct vop_write_args *);
+int tmpfs_fsync (struct vop_fsync_args *);
+int tmpfs_readdir (struct vop_readdir_args *);
+int tmpfs_readlink (struct vop_readlink_args *);
+int tmpfs_inactive (struct vop_inactive_args *);
+int tmpfs_reclaim (struct vop_reclaim_args *);
+int tmpfs_print (struct vop_print_args *v);
+int tmpfs_pathconf (struct vop_pathconf_args *);
+int tmpfs_advlock (struct vop_advlock_args *);
+int tmpfs_getpages (struct vop_getpages_args *);
+int tmpfs_putpages (struct vop_putpages_args *);
+
+int tmpfs_nresolve (struct vop_nresolve_args *);
+int tmpfs_nlookupdotdot (struct vop_nlookupdotdot_args *);
+int tmpfs_ncreate (struct vop_ncreate_args *);
+int tmpfs_nmkdir (struct vop_nmkdir_args *);
+int tmpfs_nmknod (struct vop_nmknod_args *);
+int tmpfs_nrmdir (struct vop_nrmdir_args *);
+int tmpfs_nremove (struct vop_nremove_args *);
+int tmpfs_nrename (struct vop_nrename_args *);
+int tmpfs_nsymlink (struct vop_nsymlink_args *);
+int tmpfs_nlink (struct vop_nlink_args *);
+
+/*
+int tmpfs_lookup (struct vop_old_lookup_args *);
+int tmpfs_create (struct vop_old_create_args *);
+int tmpfs_mknod (struct vop_old_mknod_args *);
+int tmpfs_remove (struct vop_old_remove_args *);
+int tmpfs_link (struct vop_old_link_args *);
+int tmpfs_rename (struct vop_old_rename_args *);
+int tmpfs_mkdir (struct vop_old_mkdir_args *);
+int tmpfs_rmdir (struct vop_old_rmdir_args *);
+int tmpfs_symlink (struct vop_old_symlink_args *);
+*/
+
+struct vop_ops tmpfs_vnode_vops = {
+ .vop_default = vop_defaultop,
+ .vop_open = tmpfs_open,
+ .vop_close = tmpfs_close,
+ .vop_access = tmpfs_access,
+ .vop_getattr = tmpfs_getattr,
+ .vop_setattr = tmpfs_setattr,
+ .vop_read = tmpfs_read,
+ .vop_write = tmpfs_write,
+ //.vop_mmap = vop_eopnotsupp,
+ .vop_fsync = tmpfs_fsync,
+ .vop_readdir = tmpfs_readdir,
+ .vop_readlink = tmpfs_readlink,
+ .vop_inactive = tmpfs_inactive,
+ .vop_reclaim = tmpfs_reclaim,
+ //.vop_bmap = vop_eopnotsupp,
+ .vop_print = tmpfs_print,
+ .vop_pathconf = tmpfs_pathconf,
+ .vop_advlock = tmpfs_advlock,
+ .vop_getpages = tmpfs_getpages,//vop_stdgetpages,
+ .vop_putpages = vop_stdputpages,//tmpfs_putpages,
+
+ .vop_nresolve = tmpfs_nresolve,
+ .vop_nlookupdotdot=tmpfs_nlookupdotdot,
+ .vop_ncreate = tmpfs_ncreate,
+ .vop_nmknod = tmpfs_nmknod,
+ .vop_nmkdir = tmpfs_nmkdir,
+ .vop_nrmdir = tmpfs_nrmdir,
+ .vop_nremove = tmpfs_nremove,
+ .vop_nsymlink = tmpfs_nsymlink,
+ .vop_nrename = tmpfs_nrename
+
+ /*.vop_old_lookup=tmpfs_lookup,
+ .vop_old_create=tmpfs_create,
+ .vop_old_mknod =tmpfs_mknod,
+ .vop_old_remove=tmpfs_remove,
+ .vop_old_link = tmpfs_link,
+ .vop_old_rename=tmpfs_rename,
+ .vop_old_mkdir =tmpfs_mkdir,
+ .vop_old_rmdir =tmpfs_rmdir,
+ .vop_old_symlink=tmpfs_symlink, */
+};
+
+int tmpfs_fifo_write(struct vop_write_args *ap);
+int tmpfs_fifo_read(struct vop_read_args *ap);
+int tmpfs_fifo_close(struct vop_close_args *ap);
+
+struct vop_ops tmpfs_fifo_vops = {
+ .vop_default = fifo_vnoperate,
+ .vop_inactive = tmpfs_inactive,
+ .vop_reclaim = tmpfs_reclaim,
+ .vop_access = tmpfs_access,
+ .vop_getattr = tmpfs_getattr,
+ .vop_setattr = tmpfs_setattr,
+ .vop_read = tmpfs_fifo_read,
+ .vop_write = tmpfs_fifo_write,
+ .vop_close = tmpfs_fifo_close,
+ /* XXX .vop_kqfilter = tmpfs_fifo_kqfilter */
+};
+
+int tmpfs_spec_write(struct vop_write_args *ap);
+int tmpfs_spec_read(struct vop_read_args *ap);
+int tmpfs_spec_close(struct vop_close_args *ap);
+
+struct vop_ops tmpfs_spec_vops = {
+ .vop_default = spec_vnoperate,
+ .vop_inactive = tmpfs_inactive,
+ .vop_reclaim = tmpfs_reclaim,
+ .vop_access = tmpfs_access,
+ .vop_getattr = tmpfs_getattr,
+ .vop_setattr = tmpfs_setattr,
+ .vop_read = tmpfs_spec_read,
+ .vop_write = tmpfs_spec_write,
+ .vop_close = tmpfs_spec_close
+};
+
+int
+tmpfs_nresolve(struct vop_nresolve_args *ap)
+{
+ struct nchandle *nch = ap->a_nch;
+ struct namecache *ncp = nch->ncp;
+ struct vnode *dvp = ap->a_dvp;
+ /*struct ucred *cred = ap->a_cred;*/
+
+ int error;
+ struct tmpfs_dirent *de;
+ struct tmpfs_node *dnode;
+ struct vnode *vp = NULL;
+
+ /*KKASSERT(vn_islocked(dvp));*/
+ dnode = VP_TO_TMPFS_DIR(dvp);
+ DP("tmpfs_nresolve dvp%p n%p %*.*s\n",
+ (void *)dvp, (void *)dnode,
+ ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
+
+ if (ncp->nc_nlen == 1 && ncp->nc_name[0] == '.') {
+ /*XXX*///VREF(dvp);
+ vp = dvp;
+ cache_setvp(nch, dvp);
+ error = 0;
+ } else {
+
+ de = tmpfs_dir_lookup(dnode, ncp);
+
+ if (de == NULL) {
+ error = ENOENT;
+ cache_setvp(nch, NULL);
+ } else {
+ /* The entry was found, so get its associated tmpfs_node.
+ * Allocate a new vnode on the matching entry. */
+ error = tmpfs_alloc_vp(dvp->v_mount, de->td_node, &vp);
+ if (error == 0) {
+ vn_unlock(vp);
+ cache_setvp(nch, vp);
+ vrele(vp);
+ }
+ }
+ }
+ return error;
+}
+
+int
+tmpfs_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
+{
+ struct vnode *dvp = ap->a_dvp;
+ struct vnode **vpp = ap->a_vpp;
+
+ int error;
+ struct tmpfs_node *dnode;
+ int ltype;
+
+ dnode = VP_TO_TMPFS_DIR(dvp);
+ DP("tmpfs_nlookupdotdot dvp%p n%p\n", (void *)dvp, (void *)dnode);
+
+ /* XXX */
+ ltype = vn_islocked(dvp);
+ vhold(dvp);
+ vn_unlock(dvp);
+
+ /* Allocate a new vnode on the matching entry. */
+ error = tmpfs_alloc_vp(dvp->v_mount, dnode->tn_spec.tn_dir.tn_parent, vpp);
+
+ vn_lock(dvp, ltype | LK_RETRY);
+ vdrop(dvp);
+
+ KKASSERT(IFF(error == 0, *vpp != NULLVP && vn_islocked(*vpp)));
+ return error;
+}
+
+static int
+tmpfs_create_file(struct vnode *dvp, struct vnode **vpp,
+ struct vattr *vap, struct nchandle *nch,
+ struct ucred *cred, char *target)
+{
+ int error;
+
+ *vpp = NULL;
+
+ if ((error = vget(dvp, LK_EXCLUSIVE | LK_RETRY)) != 0)
+ return error;
+
+ error = tmpfs_alloc_file(dvp, vpp, vap, nch->ncp, cred, target);
+
+ vput(dvp);
+
+ if (error == 0) {
+ cache_setunresolved(nch);
+ cache_setvp(nch, *vpp);
+ }
+
+ return error;
+}
+
+int
+tmpfs_ncreate(struct vop_ncreate_args *ap)
+{
+ DP("tmpfs_nmkdir dvp %p\n %*.*s\n", (void *)ap->a_dvp,
+ ap->a_nch->ncp->nc_nlen, ap->a_nch->ncp->nc_nlen, ap->a_nch->ncp->nc_name);
+
+ KKASSERT(ap->a_vap->va_type == VREG || ap->a_vap->va_type == VSOCK);
+
+ return tmpfs_create_file(ap->a_dvp, ap->a_vpp,
+ ap->a_vap, ap->a_nch, ap->a_cred, NULL);
+}
+
+int
+tmpfs_nmkdir(struct vop_nmkdir_args *ap)
+{
+ DP("tmpfs_nmkdir dvp %p\n %*.*s\n", (void *)ap->a_dvp,
+ ap->a_nch->ncp->nc_nlen, ap->a_nch->ncp->nc_nlen, ap->a_nch->ncp->nc_name);
+
+ KKASSERT(ap->a_vap->va_type == VDIR);
+
+ return tmpfs_create_file(ap->a_dvp, ap->a_vpp,
+ ap->a_vap, ap->a_nch, ap->a_cred, NULL);
+}
+
+int
+tmpfs_nsymlink(struct vop_nsymlink_args *ap)
+{
+ DP("tmpfs_nsymlink dvp %p\n", (void *)ap->a_dvp);
+
+ ap->a_vap->va_type = VLNK;
+
+ return tmpfs_create_file(ap->a_dvp, ap->a_vpp,
+ ap->a_vap, ap->a_nch, ap->a_cred, ap->a_target);
+}
+
+
+int
+tmpfs_nmknod(struct vop_nmknod_args *ap)
+{
+ DP("tmpfs_nmknod\n");
+
+ if (ap->a_vap->va_type != VBLK && ap->a_vap->va_type != VCHR &&
+ ap->a_vap->va_type != VFIFO)
+ return EINVAL;
+
+ return tmpfs_create_file(ap->a_dvp, ap->a_vpp,
+ ap->a_vap, ap->a_nch, ap->a_cred, NULL);
+}
+
+int
+tmpfs_nrmdir(struct vop_nrmdir_args *ap)
+{
+ struct vnode *dvp = ap->a_dvp;
+ struct vnode *vp;
+
+ int error;
+ struct tmpfs_dirent *de;
+ struct tmpfs_mount *tmp;
+ struct tmpfs_node *dnode;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_nrmdir dvp %p vp %p\n", dvp, vp);
+
+ error = cache_vget(ap->a_nch, ap->a_cred, LK_EXCLUSIVE, &vp);
+ if (error)
+ return error;
+ /* XXX vget ? */
+ error = vget(dvp, LK_EXCLUSIVE);
+ if (error) {
+ vput(vp);
+ return error;
+ }
+
+ KKASSERT(vn_islocked(dvp));
+ KKASSERT(vn_islocked(vp));
+
+ tmp = VFS_TO_TMPFS(dvp->v_mount);
+ dnode = VP_TO_TMPFS_DIR(dvp);
+ node = VP_TO_TMPFS_DIR(vp);
+ error = 0;
+
+ /* Directories with more than two entries ('.' and '..') cannot be
+ * removed. */
+ if (node->tn_size > 0) {
+ error = ENOTEMPTY;
+ goto out;
+ }
+
+ /* This invariant holds only if we are not trying to remove "..".
+ * We checked for that above so this is safe now. */
+ KKASSERT(node->tn_spec.tn_dir.tn_parent == dnode);
+
+ /* Get the directory entry associated with node (vp). */
+ de = tmpfs_dir_lookup(dnode, ap->a_nch->ncp);
+ if (de == NULL) {
+ error = ENOENT;
+ goto out;
+ }
+ KKASSERT(de->td_node == node);
+
+ /* Check flags to see if we are allowed to remove the directory. */
+ if (dnode->tn_flags & APPEND || node->tn_flags & (IMMUTABLE | APPEND)) {
+ error = EPERM;
+ goto out;
+ }
+
+ /* Detach the directory entry from the directory (dnode). */
+ tmpfs_dir_detach(dvp, de);
+
+ node->tn_links--;
+ node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
+ TMPFS_NODE_MODIFIED;
+ node->tn_spec.tn_dir.tn_parent->tn_links--;
+ node->tn_spec.tn_dir.tn_parent->tn_status |= TMPFS_NODE_ACCESSED | \
+ TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
+
+ /* Free the directory entry we just deleted. Note that the node
+ * referred by it will not be removed until the vnode is really
+ * reclaimed. */
+ tmpfs_free_dirent(tmp, de, TRUE);
+
+ KKASSERT(node->tn_links == 0);
+
+ cache_setunresolved(ap->a_nch);
+ cache_setvp(ap->a_nch, NULL);
+ /* XXX locking */
+ cache_inval_vp(vp, CINV_DESTROY);
+
+ out:
+ /* Release the nodes. */
+ vput(dvp);
+ vput(vp);
+
+ return error;
+}
+
+int
+tmpfs_nremove(struct vop_nremove_args *ap)
+{
+ struct vnode *dvp = ap->a_dvp;
+
+ struct vnode *vp;
+ struct tmpfs_dirent *de;
+ struct tmpfs_mount *tmp;
+ struct tmpfs_node *dnode;
+ struct tmpfs_node *node;
+
+ int error;
+
+ DP("tmpfs_nremove dvp %p ", dvp);
+
+ error = cache_vget(ap->a_nch, ap->a_cred, LK_EXCLUSIVE, &vp);
+ if (error)
+ return error;
+ DP("vp %p\n", vp);
+ /* XXX vget ? */
+ error = vget(dvp, LK_EXCLUSIVE);
+ if (error) {
+ vput(vp);
+ return error;
+ }
+
+ KKASSERT(vn_islocked(dvp));
+ KKASSERT(vn_islocked(vp));
+
+ if (vp->v_type == VDIR) {
+ error = EISDIR;
+ goto out;
+ }
+
+ dnode = VP_TO_TMPFS_DIR(dvp);
+ node = VP_TO_TMPFS_NODE(vp);
+ tmp = VFS_TO_TMPFS(vp->v_mount);
+
+ de = tmpfs_dir_lookup(dnode, ap->a_nch->ncp);
+ if (de == NULL) {
+ error = ENOENT;
+ goto out;
+ }
+ KKASSERT(de->td_node == node);
+
+ /* Files marked as immutable or append-only cannot be deleted. */
+ if (node->tn_flags & (IMMUTABLE | APPEND)) {
+ error = EPERM;
+ goto out;
+ }
+
+ /* Remove the entry from the directory; as it is a file, we do not
+ * have to change the number of hard links of the directory. */
+ tmpfs_dir_detach(dvp, de);
+
+ /* Free the directory entry we just deleted. Note that the node
+ * referred by it will not be removed until the vnode is really
+ * reclaimed. */
+ tmpfs_free_dirent(tmp, de, TRUE);
+
+ cache_setunresolved(ap->a_nch);
+ cache_setvp(ap->a_nch, NULL);
+ /* XXX locking */
+ cache_inval_vp(vp, CINV_DESTROY);
+
+ error = 0;
+out:
+ /* Release the nodes. */
+ vput(vp);
+ vput(dvp);
+
+ return error;
+}
+
+int
+tmpfs_nrename(struct vop_nrename_args *ap)
+{
+ struct vnode *fdvp = ap->a_fdvp;
+ struct vnode *fvp;
+
+ struct vnode *tdvp = ap->a_tdvp;
+ struct vnode *tvp = NULL;
+
+ struct nchandle *fnch = ap->a_fnch;
+ struct nchandle *tnch = ap->a_tnch;
+
+ char *newname = NULL;
+ int error;
+ struct tmpfs_dirent *fde, *tde;
+ struct tmpfs_mount *tmp;
+ struct tmpfs_node *fnode;
+ struct tmpfs_node *fdnode;
+ struct tmpfs_node *tnode;
+ struct tmpfs_node *tdnode;
+ size_t namelen;
+
+ DP("tmpfs_nrename\n");
+
+ /* Disallow cross-device renames. */
+ if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
+ return (EXDEV);
+ if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
+ return (EXDEV);
+
+ fdnode = VP_TO_TMPFS_DIR(fdvp);
+ tdnode = VP_TO_TMPFS_DIR(tdvp);
+ tmp = VFS_TO_TMPFS(tdvp->v_mount);
+
+#if 0
+ error = vget(fdvp, LK_EXCLUSIVE | LK_RETRY);
+ if (error)
+ return error;
+
+ if (tdvp != fdvp) {
+ error = vget(tdvp, LK_EXCLUSIVE | LK_RETRY);
+ if (error) {
+ vput(fdvp);
+ return error;
+ }
+ }
+ else
+ vref(tdvp);
+#endif
+
+ /* XXX */
+ //cache_vget(fnch, ap->a_cred, LK_EXCLUSIVE, &fvp);
+ //cache_vget(tnch, ap->a_cred, LK_EXCLUSIVE, &tvp);
+ fvp = fnch->ncp->nc_vp;
+ tvp = tnch->ncp->nc_vp;
+
+ tnode = (tvp == NULL) ? NULL : VP_TO_TMPFS_NODE(tvp);
+ fnode = VP_TO_TMPFS_NODE(fvp);
+
+ /* If we need to move the directory between entries, lock the
+ * source so that we can safely operate on it. */
+
+ /* XXX: this is a potential locking order violation! */
+ /*if (fdnode != tdnode) {
+ error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY);
+ if (error != 0)
+ goto out_unlocked;
+ }*/
+
+ fde = tmpfs_dir_lookup(fdnode, ap->a_fnch->ncp);
+ KKASSERT(fde == NULL)
+ KKASSERT(fde->td_node == fnode);
+
+ /* If replacing an existing entry, ensure we can do the operation. */
+ if (tvp != NULL) {
+ KKASSERT(tnode != NULL);
+
+ if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
+ if (tnode->tn_size > 0) {
+ error = ENOTEMPTY;
+ goto out;
+ }
+ } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
+ error = ENOTDIR;
+ goto out;
+ } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
+ error = EISDIR;
+ goto out;
+ }
+ }
+
+ /* Ensure that we have enough memory to hold the new name, if it
+ * has to be changed. */
+ namelen = tnch->ncp->nc_nlen;
+ if (fnch->ncp->nc_nlen != tnch->ncp->nc_nlen ||
+ memcmp(fnch->ncp->nc_name, tnch->ncp->nc_name,
+ fnch->ncp->nc_nlen) != 0) {
+ newname = tmpfs_str_pool_get(&tmp->tm_str_pool, namelen, 0);
+ if (newname == NULL) {
+ error = ENOSPC;
+ goto out;
+ }
+ }
+ else
+ newname = NULL;
+
+ /* If the node is being moved to another directory,
+ * we have to do the move. */
+ if (fdnode != tdnode) {
+ /* In case we are moving a directory, we have to adjust its
+ * parent to point to the new parent. */
+ if (fde->td_node->tn_type == VDIR) {
+ struct tmpfs_node *n;
+
+ /* Ensure the target directory is not a child of the
+ * directory being moved. Otherwise, we'd end up
+ * with stale nodes. */
+ n = tdnode;
+ while (n != n->tn_spec.tn_dir.tn_parent) {
+ if (n == fnode) {
+ error = EINVAL;
+ goto out;
+ }
+ n = n->tn_spec.tn_dir.tn_parent;
+ }
+
+ /* Adjust the parent pointer. */
+ TMPFS_VALIDATE_DIR(fnode);
+ fde->td_node->tn_spec.tn_dir.tn_parent = tdnode;
+
+ /* As a result of changing the target of the '..'
+ * entry, the link count of the source and target
+ * directories has to be adjusted. */
+ fdnode->tn_links--;
+ tdnode->tn_links++;
+ }
+
+ /* Do the move: just remove the entry from the source directory
+ * and insert it into the target one. */
+ tmpfs_dir_detach(fdvp, fde);
+ tmpfs_dir_attach(tdvp, fde);
+ }
+
+ /* If we are overwriting an entry, we have to remove the old one
+ * from the target directory. */
+ if (tvp != NULL) {
+ KKASSERT(tnode != NULL);
+
+ /* Remove the old entry from the target directory.
+ * Note! This relies on tmpfs_dir_attach() putting the new
+ * node on the end of the target's node list. */
+ tde = tmpfs_dir_lookup(tdnode, tnch->ncp);
+ KKASSERT(tde != NULL);
+ KKASSERT(tde->td_node == tnode);
+ tmpfs_dir_detach(tdvp, tde);
+
+ /* Free the directory entry we just deleted. Note that the
+ * node referred by it will not be removed until the vnode is
+ * really reclaimed. */
+ tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde, TRUE);
+ }
+
+ /* If the name has changed, we need to make it effective by changing
+ * it in the directory entry. */
+ if (newname != NULL) {
+ KKASSERT(tnch->ncp->nc_nlen < MAXNAMLEN);
+ KKASSERT(tnch->ncp->nc_nlen < 0xffff);
+
+ tmpfs_str_pool_put(&tmp->tm_str_pool, fde->td_name, fde->td_namelen);
+ fde->td_namelen = (uint16_t)namelen;
+ memcpy(newname, tnch->ncp->nc_name, namelen);
+ fde->td_name = newname;
+ newname = NULL;
+
+ fnode->tn_status |= TMPFS_NODE_CHANGED;
+ tdnode->tn_status |= TMPFS_NODE_MODIFIED;
+ }
+
+ cache_rename(fnch, tnch);
+ error = 0;
+
+out:
+ /* if (fdnode != tdnode)
+ vn_unlock(fdvp, 0); */
+
+out_unlocked:
+ /* Release target nodes. */
+ //if (tdvp == tvp)
+ // vrele(tdvp);
+ //else
+ // vput(tdvp);
+ //if (tvp != NULL)
+ // vput(tvp);
+
+ /* Release source nodes. */
+ ////vrele(fdvp);
+ ////vrele(fvp);
+
+ //vrele(fdvp);
+ //vput(fvp);
+
+ if (newname != NULL)
+ tmpfs_str_pool_put(&tmp->tm_str_pool, newname, namelen);
+
+ return error;
+}
+
+
+int
+tmpfs_nlink(struct vop_nlink_args *ap)
+{
+ struct vnode *dvp = ap->a_dvp;
+ struct vnode *vp = ap->a_vp;
+
+ int error;
+ struct tmpfs_dirent *de;
+ struct tmpfs_node *dnode;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_nlink\n");
+
+ error = vget(dvp, LK_EXCLUSIVE);
+ if (error)
+ return error;
+
+ KKASSERT(vn_islocked(dvp));
+ /*XXX*///KKASSERT(cnp->cn_flags & HASBUF);
+ KKASSERT(dvp != vp); /* XXX When can this be false? */
+
+ dnode = VP_TO_TMPFS_DIR(dvp);
+ node = VP_TO_TMPFS_NODE(vp);
+
+ /* Lock vp because we will need to run tmpfs_update over it, which
+ * needs the vnode to be locked. */
+ error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
+ if (error != 0)
+ goto out1;
+
+ /* XXX: Why aren't the following two tests done by the caller? */
+
+ /* Hard links of directories are forbidden. */
+ if (vp->v_type == VDIR) {
+ error = EISDIR;
+ goto out;
+ }
+
+ /* Cannot create cross-device links. */
+ if (dvp->v_mount != vp->v_mount) {
+ error = EXDEV;
+ goto out;
+ }
+
+ /* Ensure that we do not overflow the maximum number of links imposed
+ * by the system. */
+ KKASSERT(node->tn_links <= LINK_MAX);
+ if (node->tn_links == LINK_MAX) {
+ error = EMLINK;
+ goto out;
+ }
+
+ /* We cannot create links of files marked immutable or append-only. */
+ if (node->tn_flags & (IMMUTABLE | APPEND)) {
+ error = EPERM;
+ goto out;
+ }
+
+ /* Allocate a new directory entry to represent the node. */
+ error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node,
+ ap->a_nch->ncp->nc_name, ap->a_nch->ncp->nc_nlen, &de);
+ if (error != 0)
+ goto out;
+
+ /* Insert the new directory entry into the appropriate directory. */
+ tmpfs_dir_attach(dvp, de);
+
+ /* vp link count has changed, so update node times. */
+ node->tn_status |= TMPFS_NODE_CHANGED;
+ tmpfs_update(vp, NULL, NULL, NULL, 0);
+
+ error = 0;
+
+out:
+ vn_unlock(vp);
+out1:
+ /*XXX*///PNBUF_PUT(cnp->cn_pnbuf);
+ vput(dvp);
+ return error;
+}
+
+int
+tmpfs_open(struct vop_open_args *a)
+{
+ struct vnode *vp = a->a_vp;
+ int mode = a->a_mode;
+ int error;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_open vp %p mode %x ", vp, mode);
+
+ KKASSERT(vn_islocked(vp));
+ node = VP_TO_TMPFS_NODE(vp);
+
+ print_links(vp, node);
+ /* The file is still active but all its names have been removed
+ * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as
+ * it is about to die. */
+ if (node->tn_links < 1) {
+ error = ENOENT;
+ goto out;
+ }
+ /* If the file is marked append-only, deny write requests. */
+/*XXX*///if (node->tn_flags & APPEND && (mode & (FWRITE | O_APPEND)) == FWRITE)
+/*XXX*/// error = EPERM;
+/*XXX*///else
+ error = 0;
+out:
+ KKASSERT(vn_islocked(vp));
+ return (vop_stdopen(a));
+}
+
+int
+tmpfs_close(struct vop_close_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_close vp %p ", vp);
+ KKASSERT(vn_islocked(vp));
+ node = VP_TO_TMPFS_NODE(vp);
+ print_links(vp, node);
+ if (node->tn_links > 0) {
+ /* Update node times. No need to do it if the node has
+ * been deleted, because it will vanish after we return. */
+ tmpfs_update(vp, NULL, NULL, NULL, UPDATE_CLOSE);
+ }
+ return (vop_stdclose(ap));
+}
+
+int
+tmpfs_access(struct vop_access_args *a)
+{
+ struct vnode *vp = a->a_vp;
+ int mode = a->a_mode;
+ /*XXX*///kauth_cred_t cred = a->a_cred;
+ int error;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_access vp %p\t", vp);
+
+ KKASSERT(vn_islocked(vp));
+ node = VP_TO_TMPFS_NODE(vp);
+ print_links(vp, node);
+ switch (vp->v_type) {
+ case VDIR: /* FALLTHROUGH */
+ case VLNK: /* FALLTHROUGH */
+ case VREG:
+ if (mode & VWRITE && vp->v_mount->mnt_flag & MNT_RDONLY) {
+ error = EROFS;
+ goto out;
+ }
+ break;
+ case VBLK: /* FALLTHROUGH */
+ case VCHR: /* FALLTHROUGH */
+ case VSOCK: /* FALLTHROUGH */
+ case VFIFO:
+ break;
+ default:
+ error = EINVAL;
+ goto out;
+ }
+ if (mode & VWRITE && node->tn_flags & IMMUTABLE) {
+ error = EPERM;
+ goto out;
+ }
+ /*XXX*///error = vaccess(vp->v_type, node->tn_mode, node->tn_uid,
+ /*XXX*/// node->tn_gid, mode, cred);
+ error = 0;
+out:
+ KKASSERT(vn_islocked(vp));
+ return error;
+}
+
+int
+tmpfs_getattr(struct vop_getattr_args *a)
+{
+ struct vnode *vp = a->a_vp;
+ struct vattr *vap = a->a_vap;
+ struct tmpfs_node *node;
+ DP("tmpfs_getattr vp %p\t", vp);
+
+ node = VP_TO_TMPFS_NODE(vp);
+
+ VATTR_NULL(vap);
+
+ tmpfs_itimes(vp, NULL, NULL, NULL);
+
+ vap->va_type = vp->v_type;
+ vap->va_mode = node->tn_mode;
+ vap->va_nlink = node->tn_links;
+ vap->va_uid = node->tn_uid;
+ vap->va_gid = node->tn_gid;
+ vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
+ vap->va_fileid = node->tn_id;
+ vap->va_size = node->tn_size;
+ vap->va_blocksize = PAGE_SIZE;
+ vap->va_atime = node->tn_atime;
+ vap->va_mtime = node->tn_mtime;
+ vap->va_ctime = node->tn_ctime;
+ /* XXX vap->va_birthtime = node->tn_birthtime; */
+ vap->va_gen = node->tn_gen;
+ vap->va_flags = node->tn_flags;
+ vap->va_rmajor = (vp->v_type == VBLK || vp->v_type == VCHR) ?
+ node->tn_spec.tn_dev.tn_rmajor : VNOVAL;
+ vap->va_rminor = (vp->v_type == VBLK || vp->v_type == VCHR) ?
+ node->tn_spec.tn_dev.tn_rminor : VNOVAL;
+ vap->va_bytes = round_page(node->tn_size);
+ vap->va_filerev = VNOVAL;
+ vap->va_vaflags = 0;
+ vap->va_spare = VNOVAL; /* XXX */
+ return 0;
+}
+
+#define GOODTIME(tv) ((tv)->tv_sec != VNOVAL || (tv)->tv_nsec != VNOVAL)
+/* XXX Should this operation be atomic? I think it should, but code in
+ * other places (e.g., ufs) doesn't seem to be... */
+int
+tmpfs_setattr(struct vop_setattr_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct vattr *vap = ap->a_vap;
+ struct ucred *cred = ap->a_cred;
+
+ int error;
+
+ DP("tmpfs_setattr\n");
+ KKASSERT(vn_islocked(vp));
+ error = 0;
+
+ /* Abort if any unsettable attribute is given. */
+ if (vap->va_type != VNON ||
+ vap->va_nlink != VNOVAL ||
+ vap->va_fsid != VNOVAL ||
+ vap->va_fileid != VNOVAL ||
+ vap->va_blocksize != VNOVAL ||
+ GOODTIME(&vap->va_ctime) ||
+ vap->va_gen != VNOVAL ||
+ vap->va_rmajor != VNOVAL ||
+ vap->va_rminor != VNOVAL ||
+ vap->va_bytes != VNOVAL)
+ error = EINVAL;
+
+ if (error == 0 && (vap->va_flags != VNOVAL))
+ error = tmpfs_chflags(vp, vap->va_flags, cred);
+
+ if (error == 0 && (vap->va_size != VNOVAL))
+ error = tmpfs_chsize(vp, vap->va_size, cred);
+
+ if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL))
+ error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred);
+
+ if (error == 0 && (vap->va_mode != (u_short)VNOVAL))
+ error = tmpfs_chmod(vp, vap->va_mode, cred);
+
+ if (error == 0 &&
+ (GOODTIME(&vap->va_atime)
+ || GOODTIME(&vap->va_mtime)
+ || GOODTIME(&vap->va_ctime)
+ ))
+ if ((error = tmpfs_chtimes(vp,
+ &vap->va_atime, &vap->va_mtime, &vap->va_ctime,
+ vap->va_vaflags, cred)) == 0)
+ return 0;
+
+ /* Update the node times. We give preference to the error codes
+ * generated by this function rather than the ones that may arise
+ * from tmpfs_update. */
+ tmpfs_update(vp, NULL, NULL, NULL, 0);
+ KKASSERT(vn_islocked(vp));
+ return error;
+}
+
+
+/* --------------------------------------------------------------------- */
+
+static int
+tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio)
+{
+ vm_pindex_t idx;
+ vm_page_t m;
+ struct sf_buf *sf;
+ off_t offset, addr;
+ size_t tlen;
+ caddr_t va;
+ int error;
+
+ DP("tmpfs_mapped_read len %d\n", len);
+ addr = uio->uio_offset;
+ idx = OFF_TO_IDX(addr);
+ offset = addr & PAGE_MASK;
+ tlen = MIN(PAGE_SIZE - offset, len);
+
+ if ((vobj == NULL) || (vobj->resident_page_count == 0))
+ goto nocache;
+
+ VM_OBJECT_LOCK(vobj);
+lookupvpg:
+ if (((m = vm_page_lookup(vobj, idx)) != NULL) &&
+ vm_page_is_valid(m, offset, tlen)) {
+ if (vm_page_sleep_busy(m, FALSE, "tmfsmr"))
+ goto lookupvpg;
+ vm_page_busy(m);
+ VM_OBJECT_UNLOCK(vobj);
+ /* XXX sched_pin(); */
+ sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
+ va = (caddr_t)sf_buf_kva(sf);
+ error = uiomove(va + offset, tlen, uio);
+ sf_buf_free(sf);
+ /* XXX sched_unpin(); */
+ VM_OBJECT_LOCK(vobj);
+ vm_page_wakeup(m);
+ VM_OBJECT_UNLOCK(vobj);
+ return (error);
+ }
+ VM_OBJECT_UNLOCK(vobj);
+nocache:
+ VM_OBJECT_LOCK(tobj);
+ vm_object_pip_add(tobj, 1);
+ m = vm_page_grab(tobj, idx,
+ /* XXX VM_ALLOC_WIRED |*/
+ VM_ALLOC_ZERO | VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
+
+ vm_page_wire(m);
+ if (m->valid != VM_PAGE_BITS_ALL) {
+ int behind, ahead;
+ if (vm_pager_has_page(tobj, idx, &behind, &ahead)) {
+ error = vm_pager_get_pages(tobj, &m, 1, 0);
+ if (error != 0) {
+ kprintf("tmpfs get pages from pager error [read]\n");
+ goto out;
+ }
+ } else
+ vm_page_zero_invalid(m, TRUE);
+ }
+ VM_OBJECT_UNLOCK(tobj);
+ /* XXX sched_pin(); */
+ sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
+ va = (caddr_t)sf_buf_kva(sf);
+ error = uiomove(va + offset, tlen, uio);
+ sf_buf_free(sf);
+ /* XXX sched_unpin(); */
+ VM_OBJECT_LOCK(tobj);
+out:
+ vm_page_lock_queues();
+ vm_page_unwire(m, 0);
+ vm_page_activate(m);
+ vm_page_unlock_queues();
+ vm_page_wakeup(m);
+ vm_object_pip_subtract(tobj, 1);
+ VM_OBJECT_UNLOCK(tobj);
+
+ return (error);
+}
+
+int
+tmpfs_read(struct vop_read_args *v)
+{
+ struct vnode *vp = v->a_vp;
+ struct uio *uio = v->a_uio;
+
+ struct tmpfs_node *node;
+ vm_object_t uobj;
+ size_t len;
+ int resid;
+
+ int error;
+
+ DP("tmpfs_read off %d s %d\n", (int)uio->uio_offset, uio->uio_resid);
+ node = VP_TO_TMPFS_NODE(vp);
+
+ if (vp->v_type != VREG) {
+ error = EISDIR;
+ goto out;
+ }
+
+ if (uio->uio_offset < 0) {
+ error = EINVAL;
+ goto out;
+ }
+
+ node->tn_status |= TMPFS_NODE_ACCESSED;
+
+ uobj = node->tn_spec.tn_reg.tn_aobj;
+ while ((resid = uio->uio_resid) > 0) {
+ error = 0;
+ if (node->tn_size <= uio->uio_offset)
+ break;
+ len = MIN(node->tn_size - uio->uio_offset, resid);
+ if (len == 0)
+ break;
+ error = tmpfs_mappedread(vp->v_object, uobj, len, uio);
+ if ((error != 0) || (resid == uio->uio_resid))
+ break;
+ }
+out:
+ return error;
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio)
+{
+ vm_pindex_t idx;
+ vm_page_t vpg, tpg;
+ struct sf_buf *sf;
+ off_t offset, addr;
+ size_t tlen;
+ caddr_t va;
+ int error;
+
+ DP("tmpfs_mapped_write len %d\n", len);
+ addr = uio->uio_offset;
+ idx = OFF_TO_IDX(addr);
+ offset = addr & PAGE_MASK;
+ tlen = MIN(PAGE_SIZE - offset, len);
+
+ if ((vobj == NULL) || (vobj->resident_page_count == 0)) {
+ vpg = NULL;
+ goto nocache;
+ }
+
+ VM_OBJECT_LOCK(vobj);
+lookupvpg:
+ if (((vpg = vm_page_lookup(vobj, idx)) != NULL) &&
+ vm_page_is_valid(vpg, offset, tlen)) {
+ if (vm_page_sleep_busy(vpg, FALSE, "tmfsmw"))
+ goto lookupvpg;
+ vm_page_busy(vpg);
+ vm_page_lock_queues();
+ vm_page_undirty(vpg);
+ vm_page_unlock_queues();
+ VM_OBJECT_UNLOCK(vobj);
+
+ /* XXX sched_pin(); */
+ sf = sf_buf_alloc(vpg, SFB_CPUPRIVATE);
+ va = (caddr_t)sf_buf_kva(sf);
+ error = uiomove(va + offset, tlen, uio);
+ sf_buf_free(sf);
+ /* XXX sched_unpin(); */
+ } else {
+ VM_OBJECT_UNLOCK(vobj);
+ vpg = NULL;
+ }
+nocache:
+ VM_OBJECT_LOCK(tobj);
+ vm_object_pip_add(tobj, 1);
+ tpg = vm_page_grab(tobj, idx,
+ /* XXX VM_ALLOC_WIRED | */
+ VM_ALLOC_ZERO | VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
+
+ vm_page_wire(tpg);
+ if (tpg->valid != VM_PAGE_BITS_ALL) {
+ int behind, ahead;
+ if (vm_pager_has_page(tobj, idx, &behind, &ahead)) {
+ error = vm_pager_get_pages(tobj, &tpg, 1, 0);
+ if (error != 0) {
+ kprintf("tmpfs get pages from pager error [write]\n");
+ goto out;
+ }
+ } else
+ vm_page_zero_invalid(tpg, TRUE);
+ }
+ VM_OBJECT_UNLOCK(tobj);
+ if (vpg == NULL) {
+ /* XXX sched_pin(); */
+ sf = sf_buf_alloc(tpg, SFB_CPUPRIVATE);
+ va = (caddr_t)sf_buf_kva(sf);
+ error = uiomove(va + offset, tlen, uio);
+ sf_buf_free(sf);
+ /* XXX sched_unpin(); */
+ } else {
+ KASSERT(vpg->valid == VM_PAGE_BITS_ALL, ("parts of vpg invalid"));
+ pmap_copy_page((vm_paddr_t)(vm_offset_t)vpg, (vm_paddr_t)(vm_offset_t)tpg);
+ }
+ VM_OBJECT_LOCK(tobj);
+out:
+ if (vobj != NULL)
+ VM_OBJECT_LOCK(vobj);
+ vm_page_lock_queues();
+ if (error == 0) {
+ vm_page_set_validclean(tpg, offset, tlen);
+ vm_page_zero_invalid(tpg, TRUE);
+ vm_page_dirty(tpg);
+ }
+ vm_page_unwire(tpg, 0);
+ vm_page_activate(tpg);
+ vm_page_unlock_queues();
+ vm_page_wakeup(tpg);
+ if (vpg != NULL)
+ vm_page_wakeup(vpg);
+ if (vobj != NULL)
+ VM_OBJECT_UNLOCK(vobj);
+ vm_object_pip_subtract(tobj, 1);
+ VM_OBJECT_UNLOCK(tobj);
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+tmpfs_write(struct vop_write_args *v)
+{
+ struct vnode *vp = v->a_vp;
+ struct uio *uio = v->a_uio;
+ int ioflag = v->a_ioflag;
+
+ boolean_t extended;
+ int error = 0;
+ off_t oldsize;
+ struct tmpfs_node *node;
+ vm_object_t uobj;
+ size_t len;
+ int resid;
+
+ DP("tmpfs_write %p\n", vp);
+ node = VP_TO_TMPFS_NODE(vp);
+ oldsize = node->tn_size;
+
+ if (uio->uio_offset < 0 || vp->v_type != VREG) {
+ error = EINVAL;
+ goto out;
+ }
+
+ if (uio->uio_resid == 0) {
+ error = 0;
+ goto out;
+ }
+
+ if (ioflag & IO_APPEND)
+ uio->uio_offset = node->tn_size;
+
+ /*XXX*/
+ //if (uio->uio_offset + uio->uio_resid >
+ // VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize)
+ // return (EFBIG);
+#if 0
+ if (vp->v_type == VREG && td != NULL) {
+ /*XXX*/PROC_LOCK(uio->uio_td->td_proc);
+ if (uio->uio_offset + uio->uio_resid >
+ lim_cur(uio->uio_td->td_proc, RLIMIT_FSIZE)) {
+ psignal(uio->uio_td->td_proc, SIGXFSZ);
+ PROC_UNLOCK(uio->uio_td->td_proc);
+ return (EFBIG);
+ }
+ /*XXX*/PROC_UNLOCK(uio->uio_td->td_proc);
+ }
+#endif
+ extended = uio->uio_offset + uio->uio_resid > node->tn_size;
+ if (extended) {
+ error = tmpfs_reg_resize(vp, uio->uio_offset + uio->uio_resid);
+ if (error != 0)
+ goto out;
+ }
+
+ uobj = node->tn_spec.tn_reg.tn_aobj;
+ while ((resid = uio->uio_resid) > 0) {
+ if (node->tn_size <= uio->uio_offset)
+ break;
+ len = MIN(node->tn_size - uio->uio_offset, resid);
+ if (len == 0)
+ break;
+ error = tmpfs_mappedwrite(vp->v_object, uobj, len, uio);
+ if ((error != 0) || (resid == uio->uio_resid))
+ break;
+ }
+
+ node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED |
+ (extended ? TMPFS_NODE_CHANGED : 0);
+
+ /* XXX */
+ //if (node->tn_mode & (S_ISUID | S_ISGID)) {
+ // if (priv_check_cred(v->a_cred, PRIV_VFS_RETAINSUGID, 0))
+ // node->tn_mode &= ~(S_ISUID | S_ISGID);
+ //}
+
+ if (error != 0)
+ (void)tmpfs_reg_resize(vp, oldsize);
+
+out:
+ KKASSERT(IMPLIES(error == 0, uio->uio_resid == 0));
+ KKASSERT(IMPLIES(error != 0, oldsize == node->tn_size));
+ return error;
+}
+
+int
+tmpfs_fsync(struct vop_fsync_args *a)
+{
+ struct vnode *vp = a->a_vp;
+
+ DP("tmpfs_fsync\n");
+ KKASSERT(vn_islocked(vp));
+
+ tmpfs_update(vp, NULL, NULL, NULL, 0);
+
+ return 0;
+}
+
+
+int
+tmpfs_readdir(struct vop_readdir_args *a)
+{
+ struct vnode *vp = a->a_vp;
+ struct uio *uio = a->a_uio;
+ int *eofflag = a->a_eofflag;
+ off_t **cookies = a->a_cookies;
+ int *ncookies = a->a_ncookies;
+
+ int error;
+ off_t startoff;
+ off_t cnt;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_readdir\n");
+ /* This operation only makes sense on directory nodes. */
+ if (vp->v_type != VDIR) {
+ error = ENOTDIR;
+ goto out;
+ }
+ node = VP_TO_TMPFS_DIR(vp);
+ startoff = uio->uio_offset;
+
+ cnt = 0;
+ if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) {
+ error = tmpfs_dir_getdotdent(node, uio);
+ if (error == -1) {
+ error = 0;
+ goto outok;
+ } else if (error != 0)
+ goto outok;
+ cnt++;
+ }
+
+ if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) {
+ error = tmpfs_dir_getdotdotdent(node, uio);
+ if (error == -1) {
+ error = 0;
+ goto outok;
+ } else if (error != 0)
+ goto outok;
+ cnt++;
+ }
+
+ error = tmpfs_dir_getdents(node, uio, &cnt);
+ if (error == -1)
+ error = 0;
+ KKASSERT(error >= 0);
+
+outok:
+ /* This label assumes that startoff has been
+ * initialized. If the compiler didn't spit out warnings, we'd
+ * simply make this one be 'out' and drop 'outok'. */
+
+ if (eofflag != NULL)
+ *eofflag =
+ (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);
+
+ /* Update NFS-related variables. */
+ if (error == 0 && cookies != NULL && ncookies != NULL) {
+ off_t i;
+ off_t off = startoff;
+ struct tmpfs_dirent *de = NULL;
+
+ *ncookies = cnt;
+ *cookies = kmalloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK);
+
+ for (i = 0; i < cnt; i++) {
+ KKASSERT(off != TMPFS_DIRCOOKIE_EOF);
+ if (off == TMPFS_DIRCOOKIE_DOT) {
+ off = TMPFS_DIRCOOKIE_DOTDOT;
+ } else {
+ if (off == TMPFS_DIRCOOKIE_DOTDOT) {
+ de = TAILQ_FIRST(&node->tn_spec.
+ tn_dir.tn_dir);
+ } else if (de != NULL) {
+ de = TAILQ_NEXT(de, td_entries);
+ } else {
+ de = tmpfs_dir_lookupbycookie(node,
+ off);
+ KKASSERT(de != NULL);
+ de = TAILQ_NEXT(de, td_entries);
+ }
+ if (de == NULL) {
+ off = TMPFS_DIRCOOKIE_EOF;
+ } else {
+ off = tmpfs_dircookie(de);
+ }
+ }
+
+ (*cookies)[i] = off;
+ }
+ KKASSERT(uio->uio_offset == off);
+ }
+
+out:
+ return error;
+}
+
+int
+tmpfs_readlink(struct vop_readlink_args *a)
+{
+ struct vnode *vp = a->a_vp;
+ struct uio *uio = a->a_uio;
+
+ int error;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_readlink\n");
+ KKASSERT(vn_islocked(vp));
+ KKASSERT(uio->uio_offset == 0);
+ KKASSERT(vp->v_type == VLNK);
+
+ node = VP_TO_TMPFS_NODE(vp);
+
+ error = uiomove(node->tn_spec.tn_lnk.tn_link,
+ MIN(node->tn_size, uio->uio_resid), uio);
+ node->tn_status |= TMPFS_NODE_ACCESSED;
+
+ KKASSERT(vn_islocked(vp));
+
+ return error;
+}
+
+int
+tmpfs_inactive(struct vop_inactive_args *a)
+{
+ struct vnode *vp = a->a_vp;
+ struct tmpfs_node *node;
+ DP("tmpfs_inactive vp %p\n", vp);
+ KKASSERT(vn_islocked(vp));
+ node = VP_TO_TMPFS_NODE(vp);
+
+ if (node->tn_links == 0)
+ vrecycle(vp);
+ vn_unlock(vp);
+ return 0;
+}
+
+int
+tmpfs_reclaim(struct vop_reclaim_args *a)
+{
+ struct vnode *vp = a->a_vp;
+ struct tmpfs_mount *tmp;
+ struct tmpfs_node *node;
+
+ DP("tmpfs_reclaim vp %p\n", vp);
+
+ if (vp->v_data != NULL)
+ {
+ node = VP_TO_TMPFS_NODE(vp);
+ tmp = VFS_TO_TMPFS(vp->v_mount);
+
+ tmpfs_free_vp(vp);
+
+ /* If the node referenced by this vnode was deleted by the user,
+ * we must free its associated data structures (now that the vnode
+ * is being reclaimed). */
+ if (node->tn_links == 0)
+ tmpfs_free_node(tmp, node);
+
+ KKASSERT(vp->v_data == NULL);
+ }
+ return 0;
+}
+
+#define PRIdMAX "LX"
+int
+tmpfs_print(struct vop_print_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct tmpfs_node *node;
+
+ node = VP_TO_TMPFS_NODE(vp);
+
+ kprintf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n",
+ node, node->tn_flags, node->tn_links);
+ kprintf("\tmode 0%o, owner %d, group %d, size %" PRIdMAX
+ ", status 0x%x\n",
+ node->tn_mode, node->tn_uid, node->tn_gid,
+ (uintmax_t)node->tn_size, (u_int)node->tn_status);
+ if (vp->v_type == VFIFO)
+ fifo_printinfo(vp);
+ kprintf("\n");
+
+ return 0;
+}
+
+int
+tmpfs_pathconf(struct vop_pathconf_args *ap)
+{
+ int name = ap->a_name;
+ register_t *retval = ap->a_retval;
+
+ int error;
+ DP("tmpfs_pathconf\n");
+ error = 0;
+
+ switch (name) {
+ case _PC_LINK_MAX:
+ *retval = LINK_MAX;
+ break;
+
+ case _PC_NAME_MAX:
+ *retval = NAME_MAX;
+ break;
+
+ case _PC_PATH_MAX:
+ *retval = PATH_MAX;
+ break;
+
+ case _PC_PIPE_BUF:
+ *retval = PIPE_BUF;
+ break;
+
+ case _PC_CHOWN_RESTRICTED:
+ *retval = 1;
+ break;
+
+ case _PC_NO_TRUNC:
+ *retval = 1;
+ break;
+#if 0
+ case _PC_SYNC_IO:
+ *retval = 1;
+ break;
+
+ case _PC_FILESIZEBITS:
+ *retval = 0; /* XXX Don't know which value should I return. */
+ break;
+#endif
+ default:
+ error = EINVAL;
+ }
+
+ return error;
+}
+
+int
+tmpfs_advlock(struct vop_advlock_args *ap)
+{
+ struct tmpfs_node *node = VP_TO_TMPFS_NODE(ap->a_vp);
+ DP("tmpfs_advlock\n");
+ return lf_advlock(ap, &node->tn_lockf, (u_quad_t)node->tn_size);
+}
+
+
+
+int
+tmpfs_getpages(struct vop_getpages_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ vm_ooffset_t offset = ap->a_offset;
+ struct vm_page **m = ap->a_m;
+ int count = ap->a_count;
+ int reqpage = ap->a_reqpage;
+
+ int error = 0;
+ int i;
+ struct tmpfs_node *node;
+ struct vm_object *tobj;
+ vm_pindex_t idx;
+ vm_page_t p;
+ vm_page_t ma[ap->a_count];
+ int rv;
+
+ DP("tmpfs_getpages\n");
+ KKASSERT(vp->v_type == VREG);
+ KKASSERT(mutex_owned(&vp->v_interlock));
+
+ node = VP_TO_TMPFS_NODE(vp);
+ tobj = node->tn_spec.tn_reg.tn_aobj;
+
+ idx = m[reqpage]->pindex;
+
+ crit_enter();
+
+ p = vm_page_grab(tobj, idx, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
+
+ vm_page_wire(p);
+
+ if (p->valid != VM_PAGE_BITS_ALL) {
+ int behind, ahead;
+ if (vm_pager_has_page(tobj, idx, &behind, &ahead)) {
+ error = vm_pager_get_pages(tobj, &p, 1, 0);
+ if (error != 0) {
+ kprintf("tmpfs.gp get pages from pager error [read]\n");
+ goto out;
+ }
+ } else
+ vm_page_zero_invalid(p, TRUE);
+ }
+
+ vm_page_copy(p, m[reqpage]);
+
+ m[reqpage]->valid = VM_PAGE_BITS_ALL;
+ vm_page_undirty(m[reqpage]);
+
+out:
+
+ vm_page_unwire(p, 0);
+ vm_page_activate(p);
+ vm_page_wakeup(p);
+ vm_page_queues[0];
+ crit_exit();
+/*
+ for (i = 0; i < count; i++) {
+ if (i != reqpage) {
+ if (!error) {
+ if (m[i]->flags & PG_WANTED)
+ vm_page_activate(m[i]);
+ else
+ vm_page_deactivate(m[i]);
+ vm_page_wakeup(m[i]);
+ } else {
+ vnode_pager_freepage(m[i]);
+ }
+ }
+ }
+*/
+ //error = vm_pager_get_pages(uobj, m, count, reqpage);
+
+ /* We currently don't rely on PGO_PASTEOF. */
+#if 0
+ if (vp->v_size <= offset + (centeridx << PAGE_SHIFT)) {
+ if ((flags & PGO_LOCKED) == 0)
+ mutex_exit(&vp->v_interlock);
+ return EINVAL;
+ }
+
+ if (vp->v_size < offset + (npages << PAGE_SHIFT)) {
+ npages = (round_page(vp->v_size) - offset) >> PAGE_SHIFT;
+ }
+
+ if ((flags & PGO_LOCKED) != 0)
+ return EBUSY;
+
+ if ((flags & PGO_NOTIMESTAMP) == 0) {
+ if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
+ node->tn_status |= TMPFS_NODE_ACCESSED;
+
+ if ((access_type & VM_PROT_WRITE) != 0)
+ node->tn_status |= TMPFS_NODE_MODIFIED;
+ }
+
+ mutex_exit(&vp->v_interlock);
+
+ /*
+ * Make sure that the array on which we will store the
+ * gotten pages is clean. Otherwise uao_get (pointed to by
+ * the pgo_get below) gets confused and does not return the
+ * appropriate pages.
+ *
+ * XXX This shall be revisited when kern/32166 is addressed
+ * because the loop to clean m[i] will most likely be redundant
+ * as well as the PGO_ALLPAGES flag.
+ */
+ if (m != NULL)
+ for (i = 0; i < npages; i++)
+ m[i] = NULL;
+ mutex_enter(&uobj->vmobjlock);
+
+ error = (*uobj->pgops->pgo_get)(uobj, offset, m, &npages, centeridx,
+ access_type, advice, flags | PGO_ALLPAGES);
+#endif
+
+#if defined(DEBUG)
+ {
+ /* Make sure that all the pages we return are valid. */
+ int dbgi;
+ if (error == 0 && m != NULL)
+ for (dbgi = 0; dbgi < npages; dbgi++)
+ KKASSERT(m[dbgi] != NULL);
+ }
+ #endif
+
+ return error;
+}
+#if 1
+int
+tmpfs_putpages(struct vop_putpages_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ vm_ooffset_t offset = ap->a_offset;
+ struct vm_page **m = ap->a_m;
+ int count = ap->a_count;
+ int *rtvals = ap->a_rtvals;
+ int sync = ap->a_sync;
+
+ struct tmpfs_node *node;
+ struct vm_object *uobj;
+
+ DP("tmpfs_putpages\n");
+ KKASSERT(vp->v_type == VREG);
+ //KKASSERT(mutex_owned(&vp->v_interlock));
+
+ node = VP_TO_TMPFS_NODE(vp);
+ uobj = node->tn_spec.tn_reg.tn_aobj;
+
+ vm_pager_put_pages(uobj, m, count, sync, rtvals);
+ return 0;
+}
+
+#endif
+
+
+
+/* --------------------------------------------------------------------- */
+/* tmpfs_fifo_ops */
+
+
+static int
+tmpfs_fifo_kqfilter(struct vop_kqfilter_args *ap)
+{
+ struct vnode *vp;
+ struct tmpfs_node *node;
+
+ vp = ap->a_vp;
+ node = VP_TO_TMPFS_NODE(vp);
+
+ switch (ap->a_kn->kn_filter){
+ case EVFILT_READ:
+ node->tn_status |= TMPFS_NODE_ACCESSED;
+ break;
+ case EVFILT_WRITE:
+ node->tn_status |= TMPFS_NODE_MODIFIED;
+ break;
+ }
+
+ return (VOCALL(&fifo_vnode_vops, &ap->a_head));
+}
+
+int
+tmpfs_fifo_close(struct vop_close_args *ap)
+{
+ DP("tmpfs_fifo_close\n");
+ tmpfs_update(ap->a_vp, NULL, NULL, NULL, UPDATE_CLOSE);
+ return (VOCALL(&fifo_vnode_vops, &ap->a_head));
+}
+
+int
+tmpfs_fifo_read(struct vop_read_args *ap)
+{
+ DP("tmpfs_fifo_read\n");
+ VP_TO_TMPFS_NODE(ap->a_vp)->tn_status |= TMPFS_NODE_ACCESSED;
+ return (VOCALL(&fifo_vnode_vops, &ap->a_head));
+}
+
+
+int
+tmpfs_fifo_write(struct vop_write_args *ap)
+{
+ DP("tmpfs_fifo_write\n");
+ VP_TO_TMPFS_NODE(ap->a_vp)->tn_status |= TMPFS_NODE_MODIFIED;
+ int error = VOCALL(&fifo_vnode_vops, &ap->a_head);
+ return error;
+}
+
+/* --------------------------------------------------------------------- */
+/* tmpfs_spec_ops */
+
+int
+tmpfs_spec_close(struct vop_close_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ int error;
+ DP("tmpfs_spec_close\n");
+ tmpfs_update(vp, NULL, NULL, NULL, UPDATE_CLOSE);
+ error = VOCALL(&spec_vnode_vops, &ap->a_head);
+ return error;
+}
+
+
+int
+tmpfs_spec_read(struct vop_read_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ DP("tmpfs_spec_read\n");
+ VP_TO_TMPFS_NODE(vp)->tn_status |= TMPFS_NODE_ACCESSED;
+ return VOCALL(&spec_vnode_vops, &ap->a_head);
+}
+
+
+int
+tmpfs_spec_write(struct vop_write_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ DP("tmpfs_spec_write\n");
+ VP_TO_TMPFS_NODE(vp)->tn_status |= TMPFS_NODE_MODIFIED;
+ return VOCALL(&spec_vnode_vops, &ap->a_head);
+}
diff --git a/sys/vfs/tmpfs/tmpfs_vnops.h b/sys/vfs/tmpfs/tmpfs_vnops.h
new file mode 100755
index 0000000..4287789
--- /dev/null
+++ b/sys/vfs/tmpfs/tmpfs_vnops.h
@@ -0,0 +1,48 @@
+/* $NetBSD: tmpfs_vnops.h,v 1.11 2008/04/28 20:24:02 martin Exp $ */
+
+/*
+ * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
+ * 2005 program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VFS_TMPFS_TMPFS_VNOPS_H_
+#define _VFS_TMPFS_TMPFS_VNOPS_H_
+
+#if !defined(_KERNEL)
+#error not supposed to be exposed to userland.
+#endif
+
+/*
+ * Declarations for tmpfs_vnops.c.
+ */
+
+extern struct vop_ops tmpfs_vnode_vops;
+extern struct vop_ops tmpfs_spec_vops;
+extern struct vop_ops tmpfs_fifo_vops;
+
+#endif /* _VFS_TMPFS_TMPFS_VNOPS_H_ */
[
Date Prev][
Date Next]
[
Thread Prev][
Thread Next]
[
Date Index][
Thread Index]