1/* $NetBSD: ufs_quota2.c,v 1.41 2016/11/20 21:21:26 riastradh Exp $ */
2/*-
3 * Copyright (c) 2010 Manuel Bouyer
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
16 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
17 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
19 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: ufs_quota2.c,v 1.41 2016/11/20 21:21:26 riastradh Exp $");
30
31#include <sys/buf.h>
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/systm.h>
35#include <sys/namei.h>
36#include <sys/file.h>
37#include <sys/proc.h>
38#include <sys/vnode.h>
39#include <sys/mount.h>
40#include <sys/fstrans.h>
41#include <sys/kauth.h>
42#include <sys/wapbl.h>
43#include <sys/quota.h>
44#include <sys/quotactl.h>
45#include <sys/timevar.h>
46
47#include <ufs/ufs/quota2.h>
48#include <ufs/ufs/inode.h>
49#include <ufs/ufs/ufsmount.h>
50#include <ufs/ufs/ufs_bswap.h>
51#include <ufs/ufs/ufs_extern.h>
52#include <ufs/ufs/ufs_quota.h>
53#include <ufs/ufs/ufs_wapbl.h>
54
55/*
56 * LOCKING:
57 * Data in the entries are protected by the associated struct dquot's
58 * dq_interlock (this means we can't read or change a quota entry without
59 * grabing a dquot for it).
60 * The header and lists (including pointers in the data entries, and q2e_uid)
61 * are protected by the global dqlock.
62 * the locking order is dq_interlock -> dqlock
63 */
64
65static int quota2_bwrite(struct mount *, struct buf *);
66static int getinoquota2(struct inode *, bool, bool, struct buf **,
67 struct quota2_entry **);
68static int getq2h(struct ufsmount *, int, struct buf **,
69 struct quota2_header **, int);
70static int getq2e(struct ufsmount *, int, daddr_t, int, struct buf **,
71 struct quota2_entry **, int);
72static int quota2_walk_list(struct ufsmount *, struct buf *, int,
73 uint64_t *, int, void *,
74 int (*func)(struct ufsmount *, uint64_t *, struct quota2_entry *,
75 uint64_t, void *));
76
77static const char *limnames[] = INITQLNAMES;
78
79static void
80quota2_dict_update_q2e_limits(int objtype, const struct quotaval *val,
81 struct quota2_entry *q2e)
82{
83 /* make sure we can index q2e_val[] by the fs-independent objtype */
84 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
85 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
86
87 q2e->q2e_val[objtype].q2v_hardlimit = val->qv_hardlimit;
88 q2e->q2e_val[objtype].q2v_softlimit = val->qv_softlimit;
89 q2e->q2e_val[objtype].q2v_grace = val->qv_grace;
90}
91
92/*
93 * Convert internal representation to FS-independent representation.
94 * (Note that while the two types are currently identical, the
95 * internal representation is an on-disk struct and the FS-independent
96 * representation is not, and they might diverge in the future.)
97 */
98static void
99q2val_to_quotaval(struct quota2_val *q2v, struct quotaval *qv)
100{
101 qv->qv_softlimit = q2v->q2v_softlimit;
102 qv->qv_hardlimit = q2v->q2v_hardlimit;
103 qv->qv_usage = q2v->q2v_cur;
104 qv->qv_expiretime = q2v->q2v_time;
105 qv->qv_grace = q2v->q2v_grace;
106}
107
108/*
109 * Convert a quota2entry and default-flag to the FS-independent
110 * representation.
111 */
112static void
113q2e_to_quotaval(struct quota2_entry *q2e, int def,
114 id_t *id, int objtype, struct quotaval *ret)
115{
116 if (def) {
117 *id = QUOTA_DEFAULTID;
118 } else {
119 *id = q2e->q2e_uid;
120 }
121
122 KASSERT(objtype >= 0 && objtype < N_QL);
123 q2val_to_quotaval(&q2e->q2e_val[objtype], ret);
124}
125
126
127static int
128quota2_bwrite(struct mount *mp, struct buf *bp)
129{
130 if (mp->mnt_flag & MNT_SYNCHRONOUS)
131 return bwrite(bp);
132 else {
133 bdwrite(bp);
134 return 0;
135 }
136}
137
138static int
139getq2h(struct ufsmount *ump, int type,
140 struct buf **bpp, struct quota2_header **q2hp, int flags)
141{
142 const int needswap = UFS_MPNEEDSWAP(ump);
143 int error;
144 struct buf *bp;
145 struct quota2_header *q2h;
146
147 KASSERT(mutex_owned(&dqlock));
148 error = bread(ump->um_quotas[type], 0, ump->umq2_bsize,
149 flags, &bp);
150 if (error)
151 return error;
152 if (bp->b_resid != 0)
153 panic("dq2get: %s quota file truncated", quotatypes[type]);
154
155 q2h = (void *)bp->b_data;
156 if (ufs_rw32(q2h->q2h_magic_number, needswap) != Q2_HEAD_MAGIC ||
157 q2h->q2h_type != type)
158 panic("dq2get: corrupted %s quota header", quotatypes[type]);
159 *bpp = bp;
160 *q2hp = q2h;
161 return 0;
162}
163
164static int
165getq2e(struct ufsmount *ump, int type, daddr_t lblkno, int blkoffset,
166 struct buf **bpp, struct quota2_entry **q2ep, int flags)
167{
168 int error;
169 struct buf *bp;
170
171 if (blkoffset & (sizeof(uint64_t) - 1)) {
172 panic("dq2get: %s quota file corrupted",
173 quotatypes[type]);
174 }
175 error = bread(ump->um_quotas[type], lblkno, ump->umq2_bsize,
176 flags, &bp);
177 if (error)
178 return error;
179 if (bp->b_resid != 0) {
180 panic("dq2get: %s quota file corrupted",
181 quotatypes[type]);
182 }
183 *q2ep = (void *)((char *)bp->b_data + blkoffset);
184 *bpp = bp;
185 return 0;
186}
187
188/* walk a quota entry list, calling the callback for each entry */
189#define Q2WL_ABORT 0x10000000
190
191static int
192quota2_walk_list(struct ufsmount *ump, struct buf *hbp, int type,
193 uint64_t *offp, int flags, void *a,
194 int (*func)(struct ufsmount *, uint64_t *, struct quota2_entry *, uint64_t, void *))
195{
196 const int needswap = UFS_MPNEEDSWAP(ump);
197 daddr_t off = ufs_rw64(*offp, needswap);
198 struct buf *bp, *obp = hbp;
199 int ret = 0, ret2 = 0;
200 struct quota2_entry *q2e;
201 daddr_t lblkno, blkoff, olblkno = 0;
202
203 KASSERT(mutex_owned(&dqlock));
204
205 while (off != 0) {
206 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
207 blkoff = (off & ump->umq2_bmask);
208 if (lblkno == 0) {
209 /* in the header block */
210 bp = hbp;
211 } else if (lblkno == olblkno) {
212 /* still in the same buf */
213 bp = obp;
214 } else {
215 ret = bread(ump->um_quotas[type], lblkno,
216 ump->umq2_bsize, flags, &bp);
217 if (ret)
218 return ret;
219 if (bp->b_resid != 0) {
220 panic("quota2_walk_list: %s quota file corrupted",
221 quotatypes[type]);
222 }
223 }
224 q2e = (void *)((char *)(bp->b_data) + blkoff);
225 ret = (*func)(ump, offp, q2e, off, a);
226 if (off != ufs_rw64(*offp, needswap)) {
227 /* callback changed parent's pointer, redo */
228 off = ufs_rw64(*offp, needswap);
229 if (bp != hbp && bp != obp)
230 ret2 = bwrite(bp);
231 } else {
232 /* parent if now current */
233 if (obp != bp && obp != hbp) {
234 if (flags & B_MODIFY)
235 ret2 = bwrite(obp);
236 else
237 brelse(obp, 0);
238 }
239 obp = bp;
240 olblkno = lblkno;
241 offp = &(q2e->q2e_next);
242 off = ufs_rw64(*offp, needswap);
243 }
244 if (ret)
245 break;
246 if (ret2) {
247 ret = ret2;
248 break;
249 }
250 }
251 if (obp != hbp) {
252 if (flags & B_MODIFY)
253 ret2 = bwrite(obp);
254 else
255 brelse(obp, 0);
256 }
257 if (ret & Q2WL_ABORT)
258 return 0;
259 if (ret == 0)
260 return ret2;
261 return ret;
262}
263
264int
265quota2_umount(struct mount *mp, int flags)
266{
267 int i, error;
268 struct ufsmount *ump = VFSTOUFS(mp);
269
270 if ((ump->um_flags & UFS_QUOTA2) == 0)
271 return 0;
272
273 for (i = 0; i < MAXQUOTAS; i++) {
274 if (ump->um_quotas[i] != NULLVP) {
275 error = vn_close(ump->um_quotas[i], FREAD|FWRITE,
276 ump->um_cred[i]);
277 if (error) {
278 printf("quota2_umount failed: close(%p) %d\n",
279 ump->um_quotas[i], error);
280 return error;
281 }
282 }
283 ump->um_quotas[i] = NULLVP;
284 }
285 return 0;
286}
287
288static int
289quota2_q2ealloc(struct ufsmount *ump, int type, uid_t uid, struct dquot *dq)
290{
291 int error, error2;
292 struct buf *hbp, *bp;
293 struct quota2_header *q2h;
294 struct quota2_entry *q2e;
295 daddr_t offset;
296 u_long hash_mask;
297 const int needswap = UFS_MPNEEDSWAP(ump);
298
299 KASSERT(mutex_owned(&dq->dq_interlock));
300 KASSERT(mutex_owned(&dqlock));
301 error = getq2h(ump, type, &hbp, &q2h, B_MODIFY);
302 if (error)
303 return error;
304 offset = ufs_rw64(q2h->q2h_free, needswap);
305 if (offset == 0) {
306 struct vnode *vp = ump->um_quotas[type];
307 struct inode *ip = VTOI(vp);
308 uint64_t size = ip->i_size;
309 /* need to alocate a new disk block */
310 error = UFS_BALLOC(vp, size, ump->umq2_bsize,
311 ump->um_cred[type], B_CLRBUF | B_SYNC, &bp);
312 if (error) {
313 brelse(hbp, 0);
314 return error;
315 }
316 KASSERT((ip->i_size % ump->umq2_bsize) == 0);
317 ip->i_size += ump->umq2_bsize;
318 DIP_ASSIGN(ip, size, ip->i_size);
319 ip->i_flag |= IN_CHANGE | IN_UPDATE;
320 uvm_vnp_setsize(vp, ip->i_size);
321 quota2_addfreeq2e(q2h, bp->b_data, size, ump->umq2_bsize,
322 needswap);
323 error = bwrite(bp);
324 error2 = UFS_UPDATE(vp, NULL, NULL, UPDATE_WAIT);
325 if (error || error2) {
326 brelse(hbp, 0);
327 if (error)
328 return error;
329 return error2;
330 }
331 offset = ufs_rw64(q2h->q2h_free, needswap);
332 KASSERT(offset != 0);
333 }
334 dq->dq2_lblkno = (offset >> ump->um_mountp->mnt_fs_bshift);
335 dq->dq2_blkoff = (offset & ump->umq2_bmask);
336 if (dq->dq2_lblkno == 0) {
337 bp = hbp;
338 q2e = (void *)((char *)bp->b_data + dq->dq2_blkoff);
339 } else {
340 error = getq2e(ump, type, dq->dq2_lblkno,
341 dq->dq2_blkoff, &bp, &q2e, B_MODIFY);
342 if (error) {
343 brelse(hbp, 0);
344 return error;
345 }
346 }
347 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
348 /* remove from free list */
349 q2h->q2h_free = q2e->q2e_next;
350
351 memcpy(q2e, &q2h->q2h_defentry, sizeof(*q2e));
352 q2e->q2e_uid = ufs_rw32(uid, needswap);
353 /* insert in hash list */
354 q2e->q2e_next = q2h->q2h_entries[uid & hash_mask];
355 q2h->q2h_entries[uid & hash_mask] = ufs_rw64(offset, needswap);
356 if (hbp != bp) {
357 bwrite(hbp);
358 }
359 bwrite(bp);
360 return 0;
361}
362
363static int
364getinoquota2(struct inode *ip, bool alloc, bool modify, struct buf **bpp,
365 struct quota2_entry **q2ep)
366{
367 int error;
368 int i;
369 struct dquot *dq;
370 struct ufsmount *ump = ip->i_ump;
371 u_int32_t ino_ids[MAXQUOTAS];
372
373 error = getinoquota(ip);
374 if (error)
375 return error;
376
377 if (alloc) {
378 UFS_WAPBL_JLOCK_ASSERT(ump->um_mountp);
379 }
380 ino_ids[USRQUOTA] = ip->i_uid;
381 ino_ids[GRPQUOTA] = ip->i_gid;
382 /* first get the interlock for all dquot */
383 for (i = 0; i < MAXQUOTAS; i++) {
384 dq = ip->i_dquot[i];
385 if (dq == NODQUOT)
386 continue;
387 mutex_enter(&dq->dq_interlock);
388 }
389 /* now get the corresponding quota entry */
390 for (i = 0; i < MAXQUOTAS; i++) {
391 bpp[i] = NULL;
392 q2ep[i] = NULL;
393 dq = ip->i_dquot[i];
394 if (dq == NODQUOT)
395 continue;
396 if (__predict_false(ump->um_quotas[i] == NULL)) {
397 /*
398 * quotas have been turned off. This can happen
399 * at umount time.
400 */
401 mutex_exit(&dq->dq_interlock);
402 dqrele(NULLVP, dq);
403 ip->i_dquot[i] = NULL;
404 continue;
405 }
406
407 if ((dq->dq2_lblkno | dq->dq2_blkoff) == 0) {
408 if (!alloc) {
409 continue;
410 }
411 /* need to alloc a new on-disk quot */
412 mutex_enter(&dqlock);
413 error = quota2_q2ealloc(ump, i, ino_ids[i], dq);
414 mutex_exit(&dqlock);
415 if (error)
416 return error;
417 }
418 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
419 error = getq2e(ump, i, dq->dq2_lblkno,
420 dq->dq2_blkoff, &bpp[i], &q2ep[i],
421 modify ? B_MODIFY : 0);
422 if (error)
423 return error;
424 }
425 return 0;
426}
427
428__inline static int __unused
429quota2_check_limit(struct quota2_val *q2v, uint64_t change, time_t now)
430{
431 return quota_check_limit(q2v->q2v_cur, change, q2v->q2v_softlimit,
432 q2v->q2v_hardlimit, q2v->q2v_time, now);
433}
434
435static int
436quota2_check(struct inode *ip, int vtype, int64_t change, kauth_cred_t cred,
437 int flags)
438{
439 int error;
440 struct buf *bp[MAXQUOTAS];
441 struct quota2_entry *q2e[MAXQUOTAS];
442 struct quota2_val *q2vp;
443 struct dquot *dq;
444 uint64_t ncurblks;
445 struct ufsmount *ump = ip->i_ump;
446 struct mount *mp = ump->um_mountp;
447 const int needswap = UFS_MPNEEDSWAP(ump);
448 int i;
449
450 if ((error = getinoquota2(ip, change > 0, change != 0, bp, q2e)) != 0)
451 return error;
452 if (change == 0) {
453 for (i = 0; i < MAXQUOTAS; i++) {
454 dq = ip->i_dquot[i];
455 if (dq == NODQUOT)
456 continue;
457 if (bp[i])
458 brelse(bp[i], 0);
459 mutex_exit(&dq->dq_interlock);
460 }
461 return 0;
462 }
463 if (change < 0) {
464 for (i = 0; i < MAXQUOTAS; i++) {
465 dq = ip->i_dquot[i];
466 if (dq == NODQUOT)
467 continue;
468 if (q2e[i] == NULL) {
469 mutex_exit(&dq->dq_interlock);
470 continue;
471 }
472 q2vp = &q2e[i]->q2e_val[vtype];
473 ncurblks = ufs_rw64(q2vp->q2v_cur, needswap);
474 if (ncurblks < -change)
475 ncurblks = 0;
476 else
477 ncurblks += change;
478 q2vp->q2v_cur = ufs_rw64(ncurblks, needswap);
479 quota2_bwrite(mp, bp[i]);
480 mutex_exit(&dq->dq_interlock);
481 }
482 return 0;
483 }
484 /* see if the allocation is allowed */
485 for (i = 0; i < MAXQUOTAS; i++) {
486 struct quota2_val q2v;
487 int ql_stat;
488 dq = ip->i_dquot[i];
489 if (dq == NODQUOT)
490 continue;
491 KASSERT(q2e[i] != NULL);
492 quota2_ufs_rwq2v(&q2e[i]->q2e_val[vtype], &q2v, needswap);
493 ql_stat = quota2_check_limit(&q2v, change, time_second);
494
495 if ((flags & FORCE) == 0 &&
496 kauth_authorize_system(cred, KAUTH_SYSTEM_FS_QUOTA,
497 KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT,
498 KAUTH_ARG(i), KAUTH_ARG(vtype), NULL) != 0) {
499 /* enforce this limit */
500 switch(QL_STATUS(ql_stat)) {
501 case QL_S_DENY_HARD:
502 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
503 uprintf("\n%s: write failed, %s %s "
504 "limit reached\n",
505 mp->mnt_stat.f_mntonname,
506 quotatypes[i], limnames[vtype]);
507 dq->dq_flags |= DQ_WARN(vtype);
508 }
509 error = EDQUOT;
510 break;
511 case QL_S_DENY_GRACE:
512 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
513 uprintf("\n%s: write failed, %s %s "
514 "limit reached\n",
515 mp->mnt_stat.f_mntonname,
516 quotatypes[i], limnames[vtype]);
517 dq->dq_flags |= DQ_WARN(vtype);
518 }
519 error = EDQUOT;
520 break;
521 case QL_S_ALLOW_SOFT:
522 if ((dq->dq_flags & DQ_WARN(vtype)) == 0) {
523 uprintf("\n%s: warning, %s %s "
524 "quota exceeded\n",
525 mp->mnt_stat.f_mntonname,
526 quotatypes[i], limnames[vtype]);
527 dq->dq_flags |= DQ_WARN(vtype);
528 }
529 break;
530 }
531 }
532 /*
533 * always do this; we don't know if the allocation will
534 * succed or not in the end. if we don't do the allocation
535 * q2v_time will be ignored anyway
536 */
537 if (ql_stat & QL_F_CROSS) {
538 q2v.q2v_time = time_second + q2v.q2v_grace;
539 quota2_ufs_rwq2v(&q2v, &q2e[i]->q2e_val[vtype],
540 needswap);
541 }
542 }
543
544 /* now do the allocation if allowed */
545 for (i = 0; i < MAXQUOTAS; i++) {
546 dq = ip->i_dquot[i];
547 if (dq == NODQUOT)
548 continue;
549 KASSERT(q2e[i] != NULL);
550 if (error == 0) {
551 q2vp = &q2e[i]->q2e_val[vtype];
552 ncurblks = ufs_rw64(q2vp->q2v_cur, needswap);
553 q2vp->q2v_cur = ufs_rw64(ncurblks + change, needswap);
554 quota2_bwrite(mp, bp[i]);
555 } else
556 brelse(bp[i], 0);
557 mutex_exit(&dq->dq_interlock);
558 }
559 return error;
560}
561
562int
563chkdq2(struct inode *ip, int64_t change, kauth_cred_t cred, int flags)
564{
565 return quota2_check(ip, QL_BLOCK, change, cred, flags);
566}
567
568int
569chkiq2(struct inode *ip, int32_t change, kauth_cred_t cred, int flags)
570{
571 return quota2_check(ip, QL_FILE, change, cred, flags);
572}
573
574int
575quota2_handle_cmd_put(struct ufsmount *ump, const struct quotakey *key,
576 const struct quotaval *val)
577{
578 int error;
579 struct dquot *dq;
580 struct quota2_header *q2h;
581 struct quota2_entry q2e, *q2ep;
582 struct buf *bp;
583 const int needswap = UFS_MPNEEDSWAP(ump);
584
585 /* make sure we can index by the fs-independent idtype */
586 CTASSERT(QUOTA_IDTYPE_USER == USRQUOTA);
587 CTASSERT(QUOTA_IDTYPE_GROUP == GRPQUOTA);
588
589 if (ump->um_quotas[key->qk_idtype] == NULLVP)
590 return ENODEV;
591 error = UFS_WAPBL_BEGIN(ump->um_mountp);
592 if (error)
593 return error;
594
595 if (key->qk_id == QUOTA_DEFAULTID) {
596 mutex_enter(&dqlock);
597 error = getq2h(ump, key->qk_idtype, &bp, &q2h, B_MODIFY);
598 if (error) {
599 mutex_exit(&dqlock);
600 goto out_wapbl;
601 }
602 quota2_ufs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
603 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
604 quota2_ufs_rwq2e(&q2e, &q2h->q2h_defentry, needswap);
605 mutex_exit(&dqlock);
606 quota2_bwrite(ump->um_mountp, bp);
607 goto out_wapbl;
608 }
609
610 error = dqget(NULLVP, key->qk_id, ump, key->qk_idtype, &dq);
611 if (error)
612 goto out_wapbl;
613
614 mutex_enter(&dq->dq_interlock);
615 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
616 /* need to alloc a new on-disk quot */
617 mutex_enter(&dqlock);
618 error = quota2_q2ealloc(ump, key->qk_idtype, key->qk_id, dq);
619 mutex_exit(&dqlock);
620 if (error)
621 goto out_il;
622 }
623 KASSERT(dq->dq2_lblkno != 0 || dq->dq2_blkoff != 0);
624 error = getq2e(ump, key->qk_idtype, dq->dq2_lblkno,
625 dq->dq2_blkoff, &bp, &q2ep, B_MODIFY);
626 if (error)
627 goto out_il;
628
629 quota2_ufs_rwq2e(q2ep, &q2e, needswap);
630 /*
631 * Reset time limit if previously had no soft limit or were
632 * under it, but now have a soft limit and are over it.
633 */
634 if (val->qv_softlimit &&
635 q2e.q2e_val[key->qk_objtype].q2v_cur >= val->qv_softlimit &&
636 (q2e.q2e_val[key->qk_objtype].q2v_softlimit == 0 ||
637 q2e.q2e_val[key->qk_objtype].q2v_cur < q2e.q2e_val[key->qk_objtype].q2v_softlimit))
638 q2e.q2e_val[key->qk_objtype].q2v_time = time_second + val->qv_grace;
639 quota2_dict_update_q2e_limits(key->qk_objtype, val, &q2e);
640 quota2_ufs_rwq2e(&q2e, q2ep, needswap);
641 quota2_bwrite(ump->um_mountp, bp);
642
643out_il:
644 mutex_exit(&dq->dq_interlock);
645 dqrele(NULLVP, dq);
646out_wapbl:
647 UFS_WAPBL_END(ump->um_mountp);
648 return error;
649}
650
651struct dq2clear_callback {
652 uid_t id;
653 struct dquot *dq;
654 struct quota2_header *q2h;
655};
656
657static int
658dq2clear_callback(struct ufsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
659 uint64_t off, void *v)
660{
661 struct dq2clear_callback *c = v;
662 const int needswap = UFS_MPNEEDSWAP(ump);
663 uint64_t myoff;
664
665 if (ufs_rw32(q2e->q2e_uid, needswap) == c->id) {
666 KASSERT(mutex_owned(&c->dq->dq_interlock));
667 c->dq->dq2_lblkno = 0;
668 c->dq->dq2_blkoff = 0;
669 myoff = *offp;
670 /* remove from hash list */
671 *offp = q2e->q2e_next;
672 /* add to free list */
673 q2e->q2e_next = c->q2h->q2h_free;
674 c->q2h->q2h_free = myoff;
675 return Q2WL_ABORT;
676 }
677 return 0;
678}
679int
680quota2_handle_cmd_del(struct ufsmount *ump, const struct quotakey *qk)
681{
682 int idtype;
683 id_t id;
684 int objtype;
685 int error, i, canfree;
686 struct dquot *dq;
687 struct quota2_header *q2h;
688 struct quota2_entry q2e, *q2ep;
689 struct buf *hbp, *bp;
690 u_long hash_mask;
691 struct dq2clear_callback c;
692
693 idtype = qk->qk_idtype;
694 id = qk->qk_id;
695 objtype = qk->qk_objtype;
696
697 if (ump->um_quotas[idtype] == NULLVP)
698 return ENODEV;
699 if (id == QUOTA_DEFAULTID)
700 return EOPNOTSUPP;
701
702 /* get the default entry before locking the entry's buffer */
703 mutex_enter(&dqlock);
704 error = getq2h(ump, idtype, &hbp, &q2h, 0);
705 if (error) {
706 mutex_exit(&dqlock);
707 return error;
708 }
709 /* we'll copy to another disk entry, so no need to swap */
710 memcpy(&q2e, &q2h->q2h_defentry, sizeof(q2e));
711 mutex_exit(&dqlock);
712 brelse(hbp, 0);
713
714 error = dqget(NULLVP, id, ump, idtype, &dq);
715 if (error)
716 return error;
717
718 mutex_enter(&dq->dq_interlock);
719 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
720 /* already clear, nothing to do */
721 error = ENOENT;
722 goto out_il;
723 }
724 error = UFS_WAPBL_BEGIN(ump->um_mountp);
725 if (error)
726 goto out_dq;
727
728 error = getq2e(ump, idtype, dq->dq2_lblkno, dq->dq2_blkoff,
729 &bp, &q2ep, B_MODIFY);
730 if (error)
731 goto out_wapbl;
732
733 /* make sure we can index by the objtype passed in */
734 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
735 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
736
737 /* clear the requested objtype by copying from the default entry */
738 q2ep->q2e_val[objtype].q2v_softlimit =
739 q2e.q2e_val[objtype].q2v_softlimit;
740 q2ep->q2e_val[objtype].q2v_hardlimit =
741 q2e.q2e_val[objtype].q2v_hardlimit;
742 q2ep->q2e_val[objtype].q2v_grace =
743 q2e.q2e_val[objtype].q2v_grace;
744 q2ep->q2e_val[objtype].q2v_time = 0;
745
746 /* if this entry now contains no information, we can free it */
747 canfree = 1;
748 for (i = 0; i < N_QL; i++) {
749 if (q2ep->q2e_val[i].q2v_cur != 0 ||
750 (q2ep->q2e_val[i].q2v_softlimit !=
751 q2e.q2e_val[i].q2v_softlimit) ||
752 (q2ep->q2e_val[i].q2v_hardlimit !=
753 q2e.q2e_val[i].q2v_hardlimit) ||
754 (q2ep->q2e_val[i].q2v_grace !=
755 q2e.q2e_val[i].q2v_grace)) {
756 canfree = 0;
757 break;
758 }
759 /* note: do not need to check q2v_time */
760 }
761
762 if (canfree == 0) {
763 quota2_bwrite(ump->um_mountp, bp);
764 goto out_wapbl;
765 }
766 /* we can free it. release bp so we can walk the list */
767 brelse(bp, 0);
768 mutex_enter(&dqlock);
769 error = getq2h(ump, idtype, &hbp, &q2h, 0);
770 if (error)
771 goto out_dqlock;
772
773 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
774 c.dq = dq;
775 c.id = id;
776 c.q2h = q2h;
777 error = quota2_walk_list(ump, hbp, idtype,
778 &q2h->q2h_entries[id & hash_mask], B_MODIFY, &c,
779 dq2clear_callback);
780
781 bwrite(hbp);
782
783out_dqlock:
784 mutex_exit(&dqlock);
785out_wapbl:
786 UFS_WAPBL_END(ump->um_mountp);
787out_il:
788 mutex_exit(&dq->dq_interlock);
789out_dq:
790 dqrele(NULLVP, dq);
791 return error;
792}
793
794static int
795quota2_fetch_q2e(struct ufsmount *ump, const struct quotakey *qk,
796 struct quota2_entry *ret)
797{
798 struct dquot *dq;
799 int error;
800 struct quota2_entry *q2ep;
801 struct buf *bp;
802 const int needswap = UFS_MPNEEDSWAP(ump);
803
804 error = dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
805 if (error)
806 return error;
807
808 mutex_enter(&dq->dq_interlock);
809 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
810 mutex_exit(&dq->dq_interlock);
811 dqrele(NULLVP, dq);
812 return ENOENT;
813 }
814 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
815 &bp, &q2ep, 0);
816 if (error) {
817 mutex_exit(&dq->dq_interlock);
818 dqrele(NULLVP, dq);
819 return error;
820 }
821 quota2_ufs_rwq2e(q2ep, ret, needswap);
822 brelse(bp, 0);
823 mutex_exit(&dq->dq_interlock);
824 dqrele(NULLVP, dq);
825
826 return 0;
827}
828
829static int
830quota2_fetch_quotaval(struct ufsmount *ump, const struct quotakey *qk,
831 struct quotaval *ret)
832{
833 struct dquot *dq;
834 int error;
835 struct quota2_entry *q2ep, q2e;
836 struct buf *bp;
837 const int needswap = UFS_MPNEEDSWAP(ump);
838 id_t id2;
839
840 error = dqget(NULLVP, qk->qk_id, ump, qk->qk_idtype, &dq);
841 if (error)
842 return error;
843
844 mutex_enter(&dq->dq_interlock);
845 if (dq->dq2_lblkno == 0 && dq->dq2_blkoff == 0) {
846 mutex_exit(&dq->dq_interlock);
847 dqrele(NULLVP, dq);
848 return ENOENT;
849 }
850 error = getq2e(ump, qk->qk_idtype, dq->dq2_lblkno, dq->dq2_blkoff,
851 &bp, &q2ep, 0);
852 if (error) {
853 mutex_exit(&dq->dq_interlock);
854 dqrele(NULLVP, dq);
855 return error;
856 }
857 quota2_ufs_rwq2e(q2ep, &q2e, needswap);
858 brelse(bp, 0);
859 mutex_exit(&dq->dq_interlock);
860 dqrele(NULLVP, dq);
861
862 q2e_to_quotaval(&q2e, 0, &id2, qk->qk_objtype, ret);
863 KASSERT(id2 == qk->qk_id);
864 return 0;
865}
866
867int
868quota2_handle_cmd_get(struct ufsmount *ump, const struct quotakey *qk,
869 struct quotaval *qv)
870{
871 int error;
872 struct quota2_header *q2h;
873 struct quota2_entry q2e;
874 struct buf *bp;
875 const int needswap = UFS_MPNEEDSWAP(ump);
876 id_t id2;
877
878 /*
879 * Make sure the FS-independent codes match the internal ones,
880 * so we can use the passed-in objtype without having to
881 * convert it explicitly to QL_BLOCK/QL_FILE.
882 */
883 CTASSERT(QL_BLOCK == QUOTA_OBJTYPE_BLOCKS);
884 CTASSERT(QL_FILE == QUOTA_OBJTYPE_FILES);
885 CTASSERT(N_QL == 2);
886
887 if (qk->qk_objtype < 0 || qk->qk_objtype >= N_QL) {
888 return EINVAL;
889 }
890
891 if (ump->um_quotas[qk->qk_idtype] == NULLVP)
892 return ENODEV;
893 if (qk->qk_id == QUOTA_DEFAULTID) {
894 mutex_enter(&dqlock);
895 error = getq2h(ump, qk->qk_idtype, &bp, &q2h, 0);
896 if (error) {
897 mutex_exit(&dqlock);
898 return error;
899 }
900 quota2_ufs_rwq2e(&q2h->q2h_defentry, &q2e, needswap);
901 mutex_exit(&dqlock);
902 brelse(bp, 0);
903 q2e_to_quotaval(&q2e, qk->qk_id == QUOTA_DEFAULTID, &id2,
904 qk->qk_objtype, qv);
905 (void)id2;
906 } else
907 error = quota2_fetch_quotaval(ump, qk, qv);
908
909 return error;
910}
911
912/*
913 * Cursor structure we used.
914 *
915 * This will get stored in userland between calls so we must not assume
916 * it isn't arbitrarily corrupted.
917 */
918struct ufsq2_cursor {
919 uint32_t q2c_magic; /* magic number */
920 int q2c_hashsize; /* size of hash table at last go */
921
922 int q2c_users_done; /* true if we've returned all user data */
923 int q2c_groups_done; /* true if we've returned all group data */
924 int q2c_defaults_done; /* true if we've returned the default values */
925 int q2c_hashpos; /* slot to start at in hash table */
926 int q2c_uidpos; /* number of ids we've handled */
927 int q2c_blocks_done; /* true if we've returned the blocks value */
928};
929
930/*
931 * State of a single cursorget call, or at least the part of it that
932 * needs to be passed around.
933 */
934struct q2cursor_state {
935 /* data return pointers */
936 struct quotakey *keys;
937 struct quotaval *vals;
938
939 /* key/value counters */
940 unsigned maxkeyvals;
941 unsigned numkeys; /* number of keys assigned */
942
943 /* ID to key/value conversion state */
944 int skipfirst; /* if true skip first key/value */
945 int skiplast; /* if true skip last key/value */
946
947 /* ID counters */
948 unsigned maxids; /* maximum number of IDs to handle */
949 unsigned numids; /* number of IDs handled */
950};
951
952/*
953 * Additional structure for getids callback.
954 */
955struct q2cursor_getids {
956 struct q2cursor_state *state;
957 int idtype;
958 unsigned skip; /* number of ids to skip over */
959 unsigned new_skip; /* number of ids to skip over next time */
960 unsigned skipped; /* number skipped so far */
961 int stopped; /* true if we stopped quota_walk_list early */
962};
963
964/*
965 * Cursor-related functions
966 */
967
968/* magic number */
969#define Q2C_MAGIC (0xbeebe111)
970
971/* extract cursor from caller form */
972#define Q2CURSOR(qkc) ((struct ufsq2_cursor *)&qkc->u.qkc_space[0])
973
974/*
975 * Check that a cursor we're handed is something like valid. If
976 * someone munges it and it still passes these checks, they'll get
977 * partial or odd results back but won't break anything.
978 */
979static int
980q2cursor_check(struct ufsq2_cursor *cursor)
981{
982 if (cursor->q2c_magic != Q2C_MAGIC) {
983 return EINVAL;
984 }
985 if (cursor->q2c_hashsize < 0) {
986 return EINVAL;
987 }
988
989 if (cursor->q2c_users_done != 0 && cursor->q2c_users_done != 1) {
990 return EINVAL;
991 }
992 if (cursor->q2c_groups_done != 0 && cursor->q2c_groups_done != 1) {
993 return EINVAL;
994 }
995 if (cursor->q2c_defaults_done != 0 && cursor->q2c_defaults_done != 1) {
996 return EINVAL;
997 }
998 if (cursor->q2c_hashpos < 0 || cursor->q2c_uidpos < 0) {
999 return EINVAL;
1000 }
1001 if (cursor->q2c_blocks_done != 0 && cursor->q2c_blocks_done != 1) {
1002 return EINVAL;
1003 }
1004 return 0;
1005}
1006
1007/*
1008 * Set up the q2cursor state.
1009 */
1010static void
1011q2cursor_initstate(struct q2cursor_state *state, struct quotakey *keys,
1012 struct quotaval *vals, unsigned maxkeyvals, int blocks_done)
1013{
1014 state->keys = keys;
1015 state->vals = vals;
1016
1017 state->maxkeyvals = maxkeyvals;
1018 state->numkeys = 0;
1019
1020 /*
1021 * For each ID there are two quotavals to return. If the
1022 * maximum number of entries to return is odd, we might want
1023 * to skip the first quotaval of the first ID, or the last
1024 * quotaval of the last ID, but not both. So the number of IDs
1025 * we want is (up to) half the number of return slots we have,
1026 * rounded up.
1027 */
1028
1029 state->maxids = (state->maxkeyvals + 1) / 2;
1030 state->numids = 0;
1031 if (state->maxkeyvals % 2) {
1032 if (blocks_done) {
1033 state->skipfirst = 1;
1034 state->skiplast = 0;
1035 } else {
1036 state->skipfirst = 0;
1037 state->skiplast = 1;
1038 }
1039 } else {
1040 state->skipfirst = 0;
1041 state->skiplast = 0;
1042 }
1043}
1044
1045/*
1046 * Choose which idtype we're going to work on. If doing a full
1047 * iteration, we do users first, then groups, but either might be
1048 * disabled or marked to skip via cursorsetidtype(), so don't make
1049 * silly assumptions.
1050 */
1051static int
1052q2cursor_pickidtype(struct ufsq2_cursor *cursor, int *idtype_ret)
1053{
1054 if (cursor->q2c_users_done == 0) {
1055 *idtype_ret = QUOTA_IDTYPE_USER;
1056 } else if (cursor->q2c_groups_done == 0) {
1057 *idtype_ret = QUOTA_IDTYPE_GROUP;
1058 } else {
1059 return EAGAIN;
1060 }
1061 return 0;
1062}
1063
1064/*
1065 * Add an ID to the current state. Sets up either one or two keys to
1066 * refer to it, depending on whether it's first/last and the setting
1067 * of skipfirst. (skiplast does not need to be explicitly tested)
1068 */
1069static void
1070q2cursor_addid(struct q2cursor_state *state, int idtype, id_t id)
1071{
1072 KASSERT(state->numids < state->maxids);
1073 KASSERT(state->numkeys < state->maxkeyvals);
1074
1075 if (!state->skipfirst || state->numkeys > 0) {
1076 state->keys[state->numkeys].qk_idtype = idtype;
1077 state->keys[state->numkeys].qk_id = id;
1078 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_BLOCKS;
1079 state->numkeys++;
1080 }
1081 if (state->numkeys < state->maxkeyvals) {
1082 state->keys[state->numkeys].qk_idtype = idtype;
1083 state->keys[state->numkeys].qk_id = id;
1084 state->keys[state->numkeys].qk_objtype = QUOTA_OBJTYPE_FILES;
1085 state->numkeys++;
1086 } else {
1087 KASSERT(state->skiplast);
1088 }
1089 state->numids++;
1090}
1091
1092/*
1093 * Callback function for getting IDs. Update counting and call addid.
1094 */
1095static int
1096q2cursor_getids_callback(struct ufsmount *ump, uint64_t *offp,
1097 struct quota2_entry *q2ep, uint64_t off, void *v)
1098{
1099 struct q2cursor_getids *gi = v;
1100 id_t id;
1101 const int needswap = UFS_MPNEEDSWAP(ump);
1102
1103 if (gi->skipped < gi->skip) {
1104 gi->skipped++;
1105 return 0;
1106 }
1107 id = ufs_rw32(q2ep->q2e_uid, needswap);
1108 q2cursor_addid(gi->state, gi->idtype, id);
1109 gi->new_skip++;
1110 if (gi->state->numids >= gi->state->maxids) {
1111 /* got enough ids, stop now */
1112 gi->stopped = 1;
1113 return Q2WL_ABORT;
1114 }
1115 return 0;
1116}
1117
1118/*
1119 * Fill in a batch of quotakeys by scanning one or more hash chains.
1120 */
1121static int
1122q2cursor_getkeys(struct ufsmount *ump, int idtype, struct ufsq2_cursor *cursor,
1123 struct q2cursor_state *state,
1124 int *hashsize_ret, struct quota2_entry *default_q2e_ret)
1125{
1126 const int needswap = UFS_MPNEEDSWAP(ump);
1127 struct buf *hbp;
1128 struct quota2_header *q2h;
1129 int quota2_hash_size;
1130 struct q2cursor_getids gi;
1131 uint64_t offset;
1132 int error;
1133
1134 /*
1135 * Read the header block.
1136 */
1137
1138 mutex_enter(&dqlock);
1139 error = getq2h(ump, idtype, &hbp, &q2h, 0);
1140 if (error) {
1141 mutex_exit(&dqlock);
1142 return error;
1143 }
1144
1145 /* if the table size has changed, make the caller start over */
1146 quota2_hash_size = ufs_rw16(q2h->q2h_hash_size, needswap);
1147 if (cursor->q2c_hashsize == 0) {
1148 cursor->q2c_hashsize = quota2_hash_size;
1149 } else if (cursor->q2c_hashsize != quota2_hash_size) {
1150 error = EDEADLK;
1151 goto scanfail;
1152 }
1153
1154 /* grab the entry with the default values out of the header */
1155 quota2_ufs_rwq2e(&q2h->q2h_defentry, default_q2e_ret, needswap);
1156
1157 /* If we haven't done the defaults yet, that goes first. */
1158 if (cursor->q2c_defaults_done == 0) {
1159 q2cursor_addid(state, idtype, QUOTA_DEFAULTID);
1160 /* if we read both halves, mark it done */
1161 if (state->numids < state->maxids || !state->skiplast) {
1162 cursor->q2c_defaults_done = 1;
1163 }
1164 }
1165
1166 gi.state = state;
1167 gi.idtype = idtype;
1168
1169 while (state->numids < state->maxids) {
1170 if (cursor->q2c_hashpos >= quota2_hash_size) {
1171 /* nothing more left */
1172 break;
1173 }
1174
1175 /* scan this hash chain */
1176 gi.skip = cursor->q2c_uidpos;
1177 gi.new_skip = gi.skip;
1178 gi.skipped = 0;
1179 gi.stopped = 0;
1180 offset = q2h->q2h_entries[cursor->q2c_hashpos];
1181
1182 error = quota2_walk_list(ump, hbp, idtype, &offset, 0, &gi,
1183 q2cursor_getids_callback);
1184 KASSERT(error != Q2WL_ABORT);
1185 if (error) {
1186 break;
1187 }
1188 if (gi.stopped) {
1189 /* callback stopped before reading whole chain */
1190 cursor->q2c_uidpos = gi.new_skip;
1191 /* if we didn't get both halves, back up */
1192 if (state->numids == state->maxids && state->skiplast){
1193 KASSERT(cursor->q2c_uidpos > 0);
1194 cursor->q2c_uidpos--;
1195 }
1196 } else {
1197 /* read whole chain */
1198 /* if we got both halves of the last id, advance */
1199 if (state->numids < state->maxids || !state->skiplast){
1200 cursor->q2c_uidpos = 0;
1201 cursor->q2c_hashpos++;
1202 }
1203 }
1204 }
1205
1206scanfail:
1207 mutex_exit(&dqlock);
1208 brelse(hbp, 0);
1209 if (error)
1210 return error;
1211
1212 *hashsize_ret = quota2_hash_size;
1213 return 0;
1214}
1215
1216/*
1217 * Fetch the quotavals for the quotakeys.
1218 */
1219static int
1220q2cursor_getvals(struct ufsmount *ump, struct q2cursor_state *state,
1221 const struct quota2_entry *default_q2e)
1222{
1223 int hasid;
1224 id_t loadedid, id;
1225 unsigned pos;
1226 struct quota2_entry q2e;
1227 int objtype;
1228 int error;
1229
1230 hasid = 0;
1231 loadedid = 0;
1232 for (pos = 0; pos < state->numkeys; pos++) {
1233 id = state->keys[pos].qk_id;
1234 if (!hasid || id != loadedid) {
1235 hasid = 1;
1236 loadedid = id;
1237 if (id == QUOTA_DEFAULTID) {
1238 q2e = *default_q2e;
1239 } else {
1240 error = quota2_fetch_q2e(ump,
1241 &state->keys[pos],
1242 &q2e);
1243 if (error == ENOENT) {
1244 /* something changed - start over */
1245 error = EDEADLK;
1246 }
1247 if (error) {
1248 return error;
1249 }
1250 }
1251 }
1252
1253
1254 objtype = state->keys[pos].qk_objtype;
1255 KASSERT(objtype >= 0 && objtype < N_QL);
1256 q2val_to_quotaval(&q2e.q2e_val[objtype], &state->vals[pos]);
1257 }
1258
1259 return 0;
1260}
1261
1262/*
1263 * Handle cursorget.
1264 *
1265 * We can't just read keys and values directly, because we can't walk
1266 * the list with qdlock and grab dq_interlock to read the entries at
1267 * the same time. So we're going to do two passes: one to figure out
1268 * which IDs we want and fill in the keys, and then a second to use
1269 * the keys to fetch the values.
1270 */
1271int
1272quota2_handle_cmd_cursorget(struct ufsmount *ump, struct quotakcursor *qkc,
1273 struct quotakey *keys, struct quotaval *vals, unsigned maxreturn,
1274 unsigned *ret)
1275{
1276 int error;
1277 struct ufsq2_cursor *cursor;
1278 struct ufsq2_cursor newcursor;
1279 struct q2cursor_state state;
1280 struct quota2_entry default_q2e;
1281 int idtype;
1282 int quota2_hash_size = 0; /* XXX: sh3 gcc 4.8 -Wuninitialized */
1283
1284 /*
1285 * Convert and validate the cursor.
1286 */
1287 cursor = Q2CURSOR(qkc);
1288 error = q2cursor_check(cursor);
1289 if (error) {
1290 return error;
1291 }
1292
1293 /*
1294 * Make sure our on-disk codes match the values of the
1295 * FS-independent ones. This avoids the need for explicit
1296 * conversion (which would be a NOP anyway and thus easily
1297 * left out or called in the wrong places...)
1298 */
1299 CTASSERT(QUOTA_IDTYPE_USER == USRQUOTA);
1300 CTASSERT(QUOTA_IDTYPE_GROUP == GRPQUOTA);
1301 CTASSERT(QUOTA_OBJTYPE_BLOCKS == QL_BLOCK);
1302 CTASSERT(QUOTA_OBJTYPE_FILES == QL_FILE);
1303
1304 /*
1305 * If some of the idtypes aren't configured/enabled, arrange
1306 * to skip over them.
1307 */
1308 if (cursor->q2c_users_done == 0 &&
1309 ump->um_quotas[USRQUOTA] == NULLVP) {
1310 cursor->q2c_users_done = 1;
1311 }
1312 if (cursor->q2c_groups_done == 0 &&
1313 ump->um_quotas[GRPQUOTA] == NULLVP) {
1314 cursor->q2c_groups_done = 1;
1315 }
1316
1317 /* Loop over, potentially, both idtypes */
1318 while (1) {
1319
1320 /* Choose id type */
1321 error = q2cursor_pickidtype(cursor, &idtype);
1322 if (error == EAGAIN) {
1323 /* nothing more to do, return 0 */
1324 *ret = 0;
1325 return 0;
1326 }
1327 KASSERT(ump->um_quotas[idtype] != NULLVP);
1328
1329 /*
1330 * Initialize the per-call iteration state. Copy the
1331 * cursor state so we can update it in place but back
1332 * out on error.
1333 */
1334 q2cursor_initstate(&state, keys, vals, maxreturn,
1335 cursor->q2c_blocks_done);
1336 newcursor = *cursor;
1337
1338 /* Assign keys */
1339 error = q2cursor_getkeys(ump, idtype, &newcursor, &state,
1340 &quota2_hash_size, &default_q2e);
1341 if (error) {
1342 return error;
1343 }
1344
1345 /* Now fill in the values. */
1346 error = q2cursor_getvals(ump, &state, &default_q2e);
1347 if (error) {
1348 return error;
1349 }
1350
1351 /*
1352 * Now that we aren't going to fail and lose what we
1353 * did so far, we can update the cursor state.
1354 */
1355
1356 if (newcursor.q2c_hashpos >= quota2_hash_size) {
1357 if (idtype == QUOTA_IDTYPE_USER)
1358 cursor->q2c_users_done = 1;
1359 else
1360 cursor->q2c_groups_done = 1;
1361
1362 /* start over on another id type */
1363 cursor->q2c_hashsize = 0;
1364 cursor->q2c_defaults_done = 0;
1365 cursor->q2c_hashpos = 0;
1366 cursor->q2c_uidpos = 0;
1367 cursor->q2c_blocks_done = 0;
1368 } else {
1369 *cursor = newcursor;
1370 cursor->q2c_blocks_done = state.skiplast;
1371 }
1372
1373 /*
1374 * If we have something to return, return it.
1375 * Otherwise, continue to the other idtype, if any,
1376 * and only return zero at end of iteration.
1377 */
1378 if (state.numkeys > 0) {
1379 break;
1380 }
1381 }
1382
1383 *ret = state.numkeys;
1384 return 0;
1385}
1386
1387int
1388quota2_handle_cmd_cursoropen(struct ufsmount *ump, struct quotakcursor *qkc)
1389{
1390 struct ufsq2_cursor *cursor;
1391
1392 CTASSERT(sizeof(*cursor) <= sizeof(qkc->u.qkc_space));
1393 cursor = Q2CURSOR(qkc);
1394
1395 cursor->q2c_magic = Q2C_MAGIC;
1396 cursor->q2c_hashsize = 0;
1397
1398 cursor->q2c_users_done = 0;
1399 cursor->q2c_groups_done = 0;
1400 cursor->q2c_defaults_done = 0;
1401 cursor->q2c_hashpos = 0;
1402 cursor->q2c_uidpos = 0;
1403 cursor->q2c_blocks_done = 0;
1404 return 0;
1405}
1406
1407int
1408quota2_handle_cmd_cursorclose(struct ufsmount *ump, struct quotakcursor *qkc)
1409{
1410 struct ufsq2_cursor *cursor;
1411 int error;
1412
1413 cursor = Q2CURSOR(qkc);
1414 error = q2cursor_check(cursor);
1415 if (error) {
1416 return error;
1417 }
1418
1419 /* nothing to do */
1420
1421 return 0;
1422}
1423
1424int
1425quota2_handle_cmd_cursorskipidtype(struct ufsmount *ump,
1426 struct quotakcursor *qkc, int idtype)
1427{
1428 struct ufsq2_cursor *cursor;
1429 int error;
1430
1431 cursor = Q2CURSOR(qkc);
1432 error = q2cursor_check(cursor);
1433 if (error) {
1434 return error;
1435 }
1436
1437 switch (idtype) {
1438 case QUOTA_IDTYPE_USER:
1439 cursor->q2c_users_done = 1;
1440 break;
1441 case QUOTA_IDTYPE_GROUP:
1442 cursor->q2c_groups_done = 1;
1443 break;
1444 default:
1445 return EINVAL;
1446 }
1447
1448 return 0;
1449}
1450
1451int
1452quota2_handle_cmd_cursoratend(struct ufsmount *ump, struct quotakcursor *qkc,
1453 int *ret)
1454{
1455 struct ufsq2_cursor *cursor;
1456 int error;
1457
1458 cursor = Q2CURSOR(qkc);
1459 error = q2cursor_check(cursor);
1460 if (error) {
1461 return error;
1462 }
1463
1464 *ret = (cursor->q2c_users_done && cursor->q2c_groups_done);
1465 return 0;
1466}
1467
1468int
1469quota2_handle_cmd_cursorrewind(struct ufsmount *ump, struct quotakcursor *qkc)
1470{
1471 struct ufsq2_cursor *cursor;
1472 int error;
1473
1474 cursor = Q2CURSOR(qkc);
1475 error = q2cursor_check(cursor);
1476 if (error) {
1477 return error;
1478 }
1479
1480 cursor->q2c_hashsize = 0;
1481
1482 cursor->q2c_users_done = 0;
1483 cursor->q2c_groups_done = 0;
1484 cursor->q2c_defaults_done = 0;
1485 cursor->q2c_hashpos = 0;
1486 cursor->q2c_uidpos = 0;
1487 cursor->q2c_blocks_done = 0;
1488
1489 return 0;
1490}
1491
1492int
1493q2sync(struct mount *mp)
1494{
1495 return 0;
1496}
1497
1498struct dq2get_callback {
1499 uid_t id;
1500 struct dquot *dq;
1501};
1502
1503static int
1504dq2get_callback(struct ufsmount *ump, uint64_t *offp, struct quota2_entry *q2e,
1505 uint64_t off, void *v)
1506{
1507 struct dq2get_callback *c = v;
1508 daddr_t lblkno;
1509 int blkoff;
1510 const int needswap = UFS_MPNEEDSWAP(ump);
1511
1512 if (ufs_rw32(q2e->q2e_uid, needswap) == c->id) {
1513 KASSERT(mutex_owned(&c->dq->dq_interlock));
1514 lblkno = (off >> ump->um_mountp->mnt_fs_bshift);
1515 blkoff = (off & ump->umq2_bmask);
1516 c->dq->dq2_lblkno = lblkno;
1517 c->dq->dq2_blkoff = blkoff;
1518 return Q2WL_ABORT;
1519 }
1520 return 0;
1521}
1522
1523int
1524dq2get(struct vnode *dqvp, u_long id, struct ufsmount *ump, int type,
1525 struct dquot *dq)
1526{
1527 struct buf *bp;
1528 struct quota2_header *q2h;
1529 int error;
1530 daddr_t offset;
1531 u_long hash_mask;
1532 struct dq2get_callback c = {
1533 .id = id,
1534 .dq = dq
1535 };
1536
1537 KASSERT(mutex_owned(&dq->dq_interlock));
1538 mutex_enter(&dqlock);
1539 error = getq2h(ump, type, &bp, &q2h, 0);
1540 if (error)
1541 goto out_mutex;
1542 /* look for our entry */
1543 hash_mask = ((1 << q2h->q2h_hash_shift) - 1);
1544 offset = q2h->q2h_entries[id & hash_mask];
1545 error = quota2_walk_list(ump, bp, type, &offset, 0, (void *)&c,
1546 dq2get_callback);
1547 brelse(bp, 0);
1548out_mutex:
1549 mutex_exit(&dqlock);
1550 return error;
1551}
1552
1553int
1554dq2sync(struct vnode *vp, struct dquot *dq)
1555{
1556 return 0;
1557}
1558