|
| 1 | +#ifndef KexecTypes_H |
| 2 | +#define KexecTypes_H |
| 3 | + |
| 4 | +/* |
| 5 | + * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, |
| 6 | + * with the exception of MTX_UNOWNED, applies to spin locks. |
| 7 | + */ |
| 8 | +#define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ |
| 9 | +#define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ |
| 10 | +#define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */ |
| 11 | +#define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_UNOWNED) |
| 12 | + |
| 13 | +/* |
| 14 | + * Value stored in mutex->mtx_lock to denote a destroyed mutex. |
| 15 | + */ |
| 16 | +#define MTX_DESTROYED (MTX_CONTESTED | MTX_UNOWNED) |
| 17 | + |
| 18 | +typedef __int64_t sbintime_t; |
| 19 | + |
| 20 | +struct knote { |
| 21 | + SLIST_ENTRY(knote) kn_link; /* for kq */ |
| 22 | + SLIST_ENTRY(knote) kn_selnext; /* for struct selinfo */ |
| 23 | + struct knlist *kn_knlist; /* f_attach populated */ |
| 24 | + TAILQ_ENTRY(knote) kn_tqe; |
| 25 | + struct kqueue *kn_kq; /* which queue we are on */ |
| 26 | + struct kevent kn_kevent; |
| 27 | + int kn_status; /* protected by kq lock */ |
| 28 | +#define KN_ACTIVE 0x01 /* event has been triggered */ |
| 29 | +#define KN_QUEUED 0x02 /* event is on queue */ |
| 30 | +#define KN_DISABLED 0x04 /* event is disabled */ |
| 31 | +#define KN_DETACHED 0x08 /* knote is detached */ |
| 32 | +#define KN_INFLUX 0x10 /* knote is in flux */ |
| 33 | +#define KN_MARKER 0x20 /* ignore this knote */ |
| 34 | +#define KN_KQUEUE 0x40 /* this knote belongs to a kq */ |
| 35 | +#define KN_HASKQLOCK 0x80 /* for _inevent */ |
| 36 | + int kn_sfflags; /* saved filter flags */ |
| 37 | + intptr_t kn_sdata; /* saved data field */ |
| 38 | + union { |
| 39 | + struct file *p_fp; /* file data pointer */ |
| 40 | + struct proc *p_proc; /* proc pointer */ |
| 41 | + struct aiocblist *p_aio; /* AIO job pointer */ |
| 42 | + struct aioliojob *p_lio; /* LIO job pointer */ |
| 43 | + } kn_ptr; |
| 44 | + struct filterops *kn_fop; |
| 45 | + void *kn_hook; |
| 46 | + int kn_hookid; |
| 47 | + |
| 48 | +#define kn_id kn_kevent.ident |
| 49 | +#define kn_filter kn_kevent.filter |
| 50 | +#define kn_flags kn_kevent.flags |
| 51 | +#define kn_fflags kn_kevent.fflags |
| 52 | +#define kn_data kn_kevent.data |
| 53 | +#define kn_fp kn_ptr.p_fp |
| 54 | +}; |
| 55 | + |
| 56 | +struct pctrie { |
| 57 | + uintptr_t pt_root; |
| 58 | +}; |
| 59 | + |
| 60 | +TAILQ_HEAD(buflists, buf); |
| 61 | + |
| 62 | +struct rwlock { |
| 63 | + struct lock_object lock_object; |
| 64 | + volatile uintptr_t rw_lock; |
| 65 | +}; |
| 66 | + |
| 67 | +struct bufv { |
| 68 | + struct buflists bv_hd; /* Sorted blocklist */ |
| 69 | + struct pctrie bv_root; /* Buf trie */ |
| 70 | + int bv_cnt; /* Number of buffers */ |
| 71 | +}; |
| 72 | + |
| 73 | +struct rl_q_entry { |
| 74 | + TAILQ_ENTRY(rl_q_entry) rl_q_link; |
| 75 | + off_t rl_q_start, rl_q_end; |
| 76 | + int rl_q_flags; |
| 77 | +}; |
| 78 | + |
| 79 | +struct rangelock { |
| 80 | + TAILQ_HEAD(, rl_q_entry) rl_waiters; |
| 81 | + struct rl_q_entry *rl_currdep; |
| 82 | +}; |
| 83 | + |
| 84 | +struct bufobj { |
| 85 | + struct rwlock bo_lock; /* Lock which protects "i" things */ |
| 86 | + struct buf_ops *bo_ops; /* - Buffer operations */ |
| 87 | + struct vm_object *bo_object; /* v Place to store VM object */ |
| 88 | + LIST_ENTRY(bufobj) bo_synclist; /* S dirty vnode list */ |
| 89 | + void *bo_private; /* private pointer */ |
| 90 | + struct vnode *__bo_vnode; /* |
| 91 | + * XXX: This vnode pointer is here |
| 92 | + * XXX: only to keep the syncer working |
| 93 | + * XXX: for now. |
| 94 | + */ |
| 95 | + struct bufv bo_clean; /* i Clean buffers */ |
| 96 | + struct bufv bo_dirty; /* i Dirty buffers */ |
| 97 | + long bo_numoutput; /* i Writes in progress */ |
| 98 | + u_int bo_flag; /* i Flags */ |
| 99 | + int bo_bsize; /* - Block size for i/o */ |
| 100 | +}; |
| 101 | + |
| 102 | +struct lock { |
| 103 | + struct lock_object lock_object; |
| 104 | + volatile uintptr_t lk_lock; |
| 105 | + u_int lk_exslpfail; |
| 106 | + int lk_timo; |
| 107 | + int lk_pri; |
| 108 | +#ifdef DEBUG_LOCKS |
| 109 | + struct stack lk_stack; |
| 110 | +#endif |
| 111 | +}; |
| 112 | + |
| 113 | +enum vtype { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO, VBAD, |
| 114 | + VMARKER }; |
| 115 | + |
| 116 | +struct vnode { |
| 117 | + /* |
| 118 | + * Fields which define the identity of the vnode. These fields are |
| 119 | + * owned by the filesystem (XXX: and vgone() ?) |
| 120 | + */ |
| 121 | + const char *v_tag; /* u type of underlying data */ |
| 122 | + struct vop_vector *v_op; /* u vnode operations vector */ |
| 123 | + void *v_data; /* u private data for fs */ |
| 124 | + |
| 125 | + /* |
| 126 | + * Filesystem instance stuff |
| 127 | + */ |
| 128 | + struct mount *v_mount; /* u ptr to vfs we are in */ |
| 129 | + TAILQ_ENTRY(vnode) v_nmntvnodes; /* m vnodes for mount point */ |
| 130 | + |
| 131 | + /* |
| 132 | + * Type specific fields, only one applies to any given vnode. |
| 133 | + * See #defines below for renaming to v_* namespace. |
| 134 | + */ |
| 135 | + union { |
| 136 | + struct mount *vu_mount; /* v ptr to mountpoint (VDIR) */ |
| 137 | + struct socket *vu_socket; /* v unix domain net (VSOCK) */ |
| 138 | + struct cdev *vu_cdev; /* v device (VCHR, VBLK) */ |
| 139 | + struct fifoinfo *vu_fifoinfo; /* v fifo (VFIFO) */ |
| 140 | + } v_un; |
| 141 | + |
| 142 | + /* |
| 143 | + * vfs_hash: (mount + inode) -> vnode hash. The hash value |
| 144 | + * itself is grouped with other int fields, to avoid padding. |
| 145 | + */ |
| 146 | + LIST_ENTRY(vnode) v_hashlist; |
| 147 | + |
| 148 | + /* |
| 149 | + * VFS_namecache stuff |
| 150 | + */ |
| 151 | + LIST_HEAD(, namecache) v_cache_src; /* c Cache entries from us */ |
| 152 | + TAILQ_HEAD(, namecache) v_cache_dst; /* c Cache entries to us */ |
| 153 | + struct namecache *v_cache_dd; /* c Cache entry for .. vnode */ |
| 154 | + |
| 155 | + /* |
| 156 | + * Locking |
| 157 | + */ |
| 158 | + struct lock v_lock; /* u (if fs don't have one) */ |
| 159 | + struct mtx v_interlock; /* lock for "i" things */ |
| 160 | + struct lock *v_vnlock; /* u pointer to vnode lock */ |
| 161 | + |
| 162 | + /* |
| 163 | + * The machinery of being a vnode |
| 164 | + */ |
| 165 | + TAILQ_ENTRY(vnode) v_actfreelist; /* f vnode active/free lists */ |
| 166 | + struct bufobj v_bufobj; /* * Buffer cache object */ |
| 167 | + |
| 168 | + /* |
| 169 | + * Hooks for various subsystems and features. |
| 170 | + */ |
| 171 | + struct vpollinfo *v_pollinfo; /* i Poll events, p for *v_pi */ |
| 172 | + struct label *v_label; /* MAC label for vnode */ |
| 173 | + struct lockf *v_lockf; /* Byte-level advisory lock list */ |
| 174 | + struct rangelock v_rl; /* Byte-range lock */ |
| 175 | + |
| 176 | + /* |
| 177 | + * clustering stuff |
| 178 | + */ |
| 179 | + daddr_t v_cstart; /* v start block of cluster */ |
| 180 | + daddr_t v_lasta; /* v last allocation */ |
| 181 | + daddr_t v_lastw; /* v last write */ |
| 182 | + int v_clen; /* v length of cur. cluster */ |
| 183 | + |
| 184 | + u_int v_holdcnt; /* I prevents recycling. */ |
| 185 | + u_int v_usecount; /* I ref count of users */ |
| 186 | + u_int v_iflag; /* i vnode flags (see below) */ |
| 187 | + u_int v_vflag; /* v vnode flags */ |
| 188 | + int v_writecount; /* v ref count of writers */ |
| 189 | + u_int v_hash; |
| 190 | + enum vtype v_type; /* u vnode type */ |
| 191 | +}; |
| 192 | + |
| 193 | +struct file { |
| 194 | + void *f_data; /* file descriptor specific data */ |
| 195 | + struct fileops *f_ops; /* File operations */ |
| 196 | + struct ucred *f_cred; /* associated credentials. */ |
| 197 | + struct vnode *f_vnode; /* NULL or applicable vnode */ |
| 198 | + short f_type; /* descriptor type */ |
| 199 | + short f_vnread_flags; /* (f) Sleep lock for f_offset */ |
| 200 | + volatile u_int f_flag; /* see fcntl.h */ |
| 201 | + volatile u_int f_count; /* reference count */ |
| 202 | + /* |
| 203 | + * DTYPE_VNODE specific fields. |
| 204 | + */ |
| 205 | + int f_seqcount; /* Count of sequential accesses. */ |
| 206 | + off_t f_nextoff; /* next expected read/write offset. */ |
| 207 | + struct cdev_privdata *f_cdevpriv; /* (d) Private data for the cdev. */ |
| 208 | + /* |
| 209 | + * DFLAG_SEEKABLE specific fields |
| 210 | + */ |
| 211 | + off_t f_offset; |
| 212 | + /* |
| 213 | + * Mandatory Access control information. |
| 214 | + */ |
| 215 | + void *f_label; /* Place-holder for MAC label. */ |
| 216 | +}; |
| 217 | + |
| 218 | +typedef uint32_t seq_t; |
| 219 | + |
| 220 | +struct filecaps { |
| 221 | + cap_rights_t fc_rights; /* per-descriptor capability rights */ |
| 222 | + u_long *fc_ioctls; /* per-descriptor allowed ioctls */ |
| 223 | + int16_t fc_nioctls; /* fc_ioctls array size */ |
| 224 | + uint32_t fc_fcntls; /* per-descriptor allowed fcntls */ |
| 225 | +}; |
| 226 | + |
| 227 | +struct filedescent { |
| 228 | + struct file *fde_file; /* file structure for open file */ |
| 229 | + struct filecaps fde_caps; /* per-descriptor rights */ |
| 230 | + uint8_t fde_flags; /* per-process open file flags */ |
| 231 | + seq_t fde_seq; /* keep file and caps in sync */ |
| 232 | +}; |
| 233 | + |
| 234 | +#define IOCPARM_SHIFT 13 /* number of bits for ioctl size */ |
| 235 | +#define IOCPARM_MASK ((1 << IOCPARM_SHIFT) - 1) /* parameter length mask */ |
| 236 | +#define IOCPARM_LEN(x) (((x) >> 16) & IOCPARM_MASK) |
| 237 | +#define IOCBASECMD(x) ((x) & ~(IOCPARM_MASK << 16)) |
| 238 | +#define IOCGROUP(x) (((x) >> 8) & 0xff) |
| 239 | + |
| 240 | +#define IOCPARM_MAX (1 << IOCPARM_SHIFT) /* max size of ioctl */ |
| 241 | +#define IOC_VOID 0x20000000 /* no parameters */ |
| 242 | +#define IOC_OUT 0x40000000 /* copy out parameters */ |
| 243 | +#define IOC_IN 0x80000000 /* copy in parameters */ |
| 244 | +#define IOC_INOUT (IOC_IN|IOC_OUT) |
| 245 | +#define IOC_DIRMASK (IOC_VOID|IOC_OUT|IOC_IN) |
| 246 | + |
| 247 | +#define _IOC(inout,group,num,len) ((unsigned long) \ |
| 248 | + ((inout) | (((len) & IOCPARM_MASK) << 16) | ((group) << 8) | (num))) |
| 249 | +#define _IO(g,n) _IOC(IOC_VOID, (g), (n), 0) |
| 250 | +#define _IOWINT(g,n) _IOC(IOC_VOID, (g), (n), sizeof(int)) |
| 251 | +#define _IOR(g,n,t) _IOC(IOC_OUT, (g), (n), sizeof(t)) |
| 252 | +#define _IOW(g,n,t) _IOC(IOC_IN, (g), (n), sizeof(t)) |
| 253 | + |
| 254 | +typedef void task_fn_t(void *context, int pending); |
| 255 | + |
| 256 | +struct task { |
| 257 | + STAILQ_ENTRY(task) ta_link; /* (q) link for queue */ |
| 258 | + u_short ta_pending; /* (q) count times queued */ |
| 259 | + u_short ta_priority; /* (c) Priority */ |
| 260 | + task_fn_t *ta_func; /* (c) task handler */ |
| 261 | + void *ta_context; /* (c) argument for handler */ |
| 262 | +}; |
| 263 | + |
| 264 | +struct kqueue { |
| 265 | + struct mtx kq_lock; |
| 266 | + int kq_refcnt; |
| 267 | + TAILQ_ENTRY(kqueue) kq_list; |
| 268 | + TAILQ_HEAD(, knote) kq_head; /* list of pending event */ |
| 269 | + int kq_count; /* number of pending events */ |
| 270 | + struct selinfo kq_sel; |
| 271 | + struct sigio *kq_sigio; |
| 272 | + struct filedesc *kq_fdp; |
| 273 | + int kq_state; |
| 274 | +#define KQ_SEL 0x01 |
| 275 | +#define KQ_SLEEP 0x02 |
| 276 | +#define KQ_FLUXWAIT 0x04 /* waiting for a in flux kn */ |
| 277 | +#define KQ_ASYNC 0x08 |
| 278 | +#define KQ_CLOSING 0x10 |
| 279 | +#define KQ_TASKSCHED 0x20 /* task scheduled */ |
| 280 | +#define KQ_TASKDRAIN 0x40 /* waiting for task to drain */ |
| 281 | + int kq_knlistsize; /* size of knlist */ |
| 282 | + struct klist *kq_knlist; /* list of knotes */ |
| 283 | + u_long kq_knhashmask; /* size of knhash */ |
| 284 | + struct klist *kq_knhash; /* hash table for knotes */ |
| 285 | + struct task kq_task; |
| 286 | + struct ucred *kq_cred; |
| 287 | +}; |
| 288 | + |
| 289 | +#endif |
0 commit comments