File: | fs/xfs/xfs_itable.c |
Warning: | line 161, column 6 Copies out a struct with untouched element(s): bs_cowextsize |
1 | /* | |||
2 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. | |||
3 | * All Rights Reserved. | |||
4 | * | |||
5 | * This program is free software; you can redistribute it and/or | |||
6 | * modify it under the terms of the GNU General Public License as | |||
7 | * published by the Free Software Foundation. | |||
8 | * | |||
9 | * This program is distributed in the hope that it would be useful, | |||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
12 | * GNU General Public License for more details. | |||
13 | * | |||
14 | * You should have received a copy of the GNU General Public License | |||
15 | * along with this program; if not, write the Free Software Foundation, | |||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |||
17 | */ | |||
18 | #include "xfs.h" | |||
19 | #include "xfs_fs.h" | |||
20 | #include "xfs_shared.h" | |||
21 | #include "xfs_format.h" | |||
22 | #include "xfs_log_format.h" | |||
23 | #include "xfs_trans_resv.h" | |||
24 | #include "xfs_mount.h" | |||
25 | #include "xfs_inode.h" | |||
26 | #include "xfs_btree.h" | |||
27 | #include "xfs_ialloc.h" | |||
28 | #include "xfs_ialloc_btree.h" | |||
29 | #include "xfs_itable.h" | |||
30 | #include "xfs_error.h" | |||
31 | #include "xfs_trace.h" | |||
32 | #include "xfs_icache.h" | |||
33 | ||||
34 | STATIC int | |||
35 | xfs_internal_inum( | |||
36 | xfs_mount_t *mp, | |||
37 | xfs_ino_t ino) | |||
38 | { | |||
39 | return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || | |||
40 | (xfs_sb_version_hasquota(&mp->m_sb) && | |||
41 | xfs_is_quota_inode(&mp->m_sb, ino))); | |||
42 | } | |||
43 | ||||
44 | /* | |||
45 | * Return stat information for one inode. | |||
46 | * Return 0 if ok, else errno. | |||
47 | */ | |||
48 | int | |||
49 | xfs_bulkstat_one_int( | |||
50 | struct xfs_mount *mp, /* mount point for filesystem */ | |||
51 | xfs_ino_t ino, /* inode to get data for */ | |||
52 | void __user *buffer, /* buffer to place output in */ | |||
53 | int ubsize, /* size of buffer */ | |||
54 | bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ | |||
55 | int *ubused, /* bytes used by me */ | |||
56 | int *stat) /* BULKSTAT_RV_... */ | |||
57 | { | |||
58 | struct xfs_icdinode *dic; /* dinode core info pointer */ | |||
59 | struct xfs_inode *ip; /* incore inode pointer */ | |||
60 | struct inode *inode; | |||
61 | struct xfs_bstat *buf; /* return buffer */ | |||
62 | int error = 0; /* error value */ | |||
63 | ||||
64 | *stat = BULKSTAT_RV_NOTHING0; | |||
65 | ||||
66 | if (!buffer || xfs_internal_inum(mp, ino)) | |||
67 | return -EINVAL22; | |||
68 | ||||
69 | buf = kmem_zalloc(sizeof(*buf), KM_SLEEP(( xfs_km_flags_t)0x0001u) | KM_MAYFAIL(( xfs_km_flags_t)0x0008u)); | |||
70 | if (!buf) | |||
71 | return -ENOMEM12; | |||
72 | ||||
73 | error = xfs_iget(mp, NULL((void *)0), ino, | |||
74 | (XFS_IGET_DONTCACHE0x4 | XFS_IGET_UNTRUSTED0x2), | |||
75 | XFS_ILOCK_SHARED(1<<3), &ip); | |||
76 | if (error) | |||
77 | goto out_free; | |||
78 | ||||
79 | ASSERT(ip != NULL)((ip != ((void *)0)) ? (void)0 : assfail("ip != NULL", "fs/xfs/xfs_itable.c" , 79)); | |||
80 | ASSERT(ip->i_imap.im_blkno != 0)((ip->i_imap.im_blkno != 0) ? (void)0 : assfail("ip->i_imap.im_blkno != 0" , "fs/xfs/xfs_itable.c", 80)); | |||
81 | inode = VFS_I(ip); | |||
82 | ||||
83 | dic = &ip->i_d; | |||
84 | ||||
85 | /* xfs_iget returns the following without needing | |||
86 | * further change. | |||
87 | */ | |||
88 | buf->bs_projid_lo = dic->di_projid_lo; | |||
89 | buf->bs_projid_hi = dic->di_projid_hi; | |||
90 | buf->bs_ino = ino; | |||
91 | buf->bs_uid = dic->di_uid; | |||
92 | buf->bs_gid = dic->di_gid; | |||
93 | buf->bs_size = dic->di_size; | |||
94 | ||||
95 | buf->bs_nlink = inode->i_nlink; | |||
96 | buf->bs_atime.tv_sec = inode->i_atime.tv_sec; | |||
97 | buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec; | |||
98 | buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec; | |||
99 | buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec; | |||
100 | buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec; | |||
101 | buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec; | |||
102 | buf->bs_gen = inode->i_generation; | |||
103 | buf->bs_mode = inode->i_mode; | |||
104 | ||||
105 | buf->bs_xflags = xfs_ip2xflags(ip); | |||
106 | buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog; | |||
107 | buf->bs_extents = dic->di_nextents; | |||
108 | memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); | |||
109 | buf->bs_dmevmask = dic->di_dmevmask; | |||
110 | buf->bs_dmstate = dic->di_dmstate; | |||
111 | buf->bs_aextents = dic->di_anextents; | |||
112 | buf->bs_forkoff = XFS_IFORK_BOFF(ip)((int)((ip)->i_d.di_forkoff << 3)); | |||
113 | ||||
114 | if (dic->di_version == 3) { | |||
115 | if (dic->di_flags2 & XFS_DIFLAG2_COWEXTSIZE(1 << 2)) | |||
116 | buf->bs_cowextsize = dic->di_cowextsize << | |||
117 | mp->m_sb.sb_blocklog; | |||
118 | } | |||
119 | ||||
120 | switch (dic->di_format) { | |||
121 | case XFS_DINODE_FMT_DEV: | |||
122 | buf->bs_rdev = ip->i_df.if_u2.if_rdev; | |||
123 | buf->bs_blksize = BLKDEV_IOSIZE(1<<12); | |||
124 | buf->bs_blocks = 0; | |||
125 | break; | |||
126 | case XFS_DINODE_FMT_LOCAL: | |||
127 | case XFS_DINODE_FMT_UUID: | |||
128 | buf->bs_rdev = 0; | |||
129 | buf->bs_blksize = mp->m_sb.sb_blocksize; | |||
130 | buf->bs_blocks = 0; | |||
131 | break; | |||
132 | case XFS_DINODE_FMT_EXTENTS: | |||
133 | case XFS_DINODE_FMT_BTREE: | |||
134 | buf->bs_rdev = 0; | |||
135 | buf->bs_blksize = mp->m_sb.sb_blocksize; | |||
136 | buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; | |||
137 | break; | |||
138 | } | |||
139 | xfs_iunlock(ip, XFS_ILOCK_SHARED(1<<3)); | |||
140 | IRELE(ip)do { trace_xfs_irele(ip, ({ __label__ __here; __here: (unsigned long)&&__here; })); iput(VFS_I(ip)); } while (0); | |||
141 | ||||
142 | error = formatter(buffer, ubsize, ubused, buf); | |||
143 | if (!error) | |||
144 | *stat = BULKSTAT_RV_DIDONE1; | |||
145 | ||||
146 | out_free: | |||
147 | kmem_free(buf); | |||
148 | return error; | |||
149 | } | |||
150 | ||||
151 | /* Return 0 on success or positive error */ | |||
152 | STATIC int | |||
153 | xfs_bulkstat_one_fmt( | |||
154 | void __user *ubuffer, | |||
155 | int ubsize, | |||
156 | int *ubused, | |||
157 | const xfs_bstat_t *buffer) | |||
158 | { | |||
159 | if (ubsize < sizeof(*buffer)) | |||
160 | return -ENOMEM12; | |||
161 | if (copy_to_user(ubuffer, buffer, sizeof(*buffer))) | |||
| ||||
162 | return -EFAULT14; | |||
163 | if (ubused) | |||
164 | *ubused = sizeof(*buffer); | |||
165 | return 0; | |||
166 | } | |||
167 | ||||
168 | int | |||
169 | xfs_bulkstat_one( | |||
170 | xfs_mount_t *mp, /* mount point for filesystem */ | |||
171 | xfs_ino_t ino, /* inode number to get data for */ | |||
172 | void __user *buffer, /* buffer to place output in */ | |||
173 | int ubsize, /* size of buffer */ | |||
174 | int *ubused, /* bytes used by me */ | |||
175 | int *stat) /* BULKSTAT_RV_... */ | |||
176 | { | |||
177 | return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, | |||
| ||||
178 | xfs_bulkstat_one_fmt, ubused, stat); | |||
179 | } | |||
180 | ||||
181 | /* | |||
182 | * Loop over all clusters in a chunk for a given incore inode allocation btree | |||
183 | * record. Do a readahead if there are any allocated inodes in that cluster. | |||
184 | */ | |||
185 | STATIC void | |||
186 | xfs_bulkstat_ichunk_ra( | |||
187 | struct xfs_mount *mp, | |||
188 | xfs_agnumber_t agno, | |||
189 | struct xfs_inobt_rec_incore *irec) | |||
190 | { | |||
191 | xfs_agblock_t agbno; | |||
192 | struct blk_plug plug; | |||
193 | int blks_per_cluster; | |||
194 | int inodes_per_cluster; | |||
195 | int i; /* inode chunk index */ | |||
196 | ||||
197 | agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino)((irec->ir_startino) >> (mp)->m_sb.sb_inopblog); | |||
198 | blks_per_cluster = xfs_icluster_size_fsb(mp); | |||
199 | inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; | |||
200 | ||||
201 | blk_start_plug(&plug); | |||
202 | for (i = 0; i < XFS_INODES_PER_CHUNK(8 * sizeof(xfs_inofree_t)); | |||
203 | i += inodes_per_cluster, agbno += blks_per_cluster) { | |||
204 | if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) { | |||
205 | xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster, | |||
206 | &xfs_inode_buf_ops); | |||
207 | } | |||
208 | } | |||
209 | blk_finish_plug(&plug); | |||
210 | } | |||
211 | ||||
212 | /* | |||
213 | * Lookup the inode chunk that the given inode lives in and then get the record | |||
214 | * if we found the chunk. If the inode was not the last in the chunk and there | |||
215 | * are some left allocated, update the data for the pointed-to record as well as | |||
216 | * return the count of grabbed inodes. | |||
217 | */ | |||
218 | STATIC int | |||
219 | xfs_bulkstat_grab_ichunk( | |||
220 | struct xfs_btree_cur *cur, /* btree cursor */ | |||
221 | xfs_agino_t agino, /* starting inode of chunk */ | |||
222 | int *icount,/* return # of inodes grabbed */ | |||
223 | struct xfs_inobt_rec_incore *irec) /* btree record */ | |||
224 | { | |||
225 | int idx; /* index into inode chunk */ | |||
226 | int stat; | |||
227 | int error = 0; | |||
228 | ||||
229 | /* Lookup the inode chunk that this inode lives in */ | |||
230 | error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE((xfs_lookup_t)XFS_LOOKUP_LEi), &stat); | |||
231 | if (error) | |||
232 | return error; | |||
233 | if (!stat) { | |||
234 | *icount = 0; | |||
235 | return error; | |||
236 | } | |||
237 | ||||
238 | /* Get the record, should always work */ | |||
239 | error = xfs_inobt_get_rec(cur, irec, &stat); | |||
240 | if (error) | |||
241 | return error; | |||
242 | XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1){ int fs_is_ok = (stat == 1); ((fs_is_ok) ? (void)0 : assfail ("fs_is_ok", "fs/xfs/xfs_itable.c", 242)); if ((!fs_is_ok)) { xfs_error_report("XFS_WANT_CORRUPTED_RETURN", 1, cur->bc_mp , "fs/xfs/xfs_itable.c", 242, __builtin_return_address(0)); return -117; } }; | |||
243 | ||||
244 | /* Check if the record contains the inode in request */ | |||
245 | if (irec->ir_startino + XFS_INODES_PER_CHUNK(8 * sizeof(xfs_inofree_t)) <= agino) { | |||
246 | *icount = 0; | |||
247 | return 0; | |||
248 | } | |||
249 | ||||
250 | idx = agino - irec->ir_startino + 1; | |||
251 | if (idx < XFS_INODES_PER_CHUNK(8 * sizeof(xfs_inofree_t)) && | |||
252 | (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK(8 * sizeof(xfs_inofree_t)) - idx) & ~irec->ir_free)) { | |||
253 | int i; | |||
254 | ||||
255 | /* We got a right chunk with some left inodes allocated at it. | |||
256 | * Grab the chunk record. Mark all the uninteresting inodes | |||
257 | * free -- because they're before our start point. | |||
258 | */ | |||
259 | for (i = 0; i < idx; i++) { | |||
260 | if (XFS_INOBT_MASK(i)((xfs_inofree_t)1 << (i)) & ~irec->ir_free) | |||
261 | irec->ir_freecount++; | |||
262 | } | |||
263 | ||||
264 | irec->ir_free |= xfs_inobt_maskn(0, idx); | |||
265 | *icount = irec->ir_count - irec->ir_freecount; | |||
266 | } | |||
267 | ||||
268 | return 0; | |||
269 | } | |||
270 | ||||
271 | #define XFS_BULKSTAT_UBLEFT(ubleft)((ubleft) >= statstruct_size) ((ubleft) >= statstruct_size) | |||
272 | ||||
273 | struct xfs_bulkstat_agichunk { | |||
274 | char __user **ac_ubuffer;/* pointer into user's buffer */ | |||
275 | int ac_ubleft; /* bytes left in user's buffer */ | |||
276 | int ac_ubelem; /* spaces used in user's buffer */ | |||
277 | }; | |||
278 | ||||
279 | /* | |||
280 | * Process inodes in chunk with a pointer to a formatter function | |||
281 | * that will iget the inode and fill in the appropriate structure. | |||
282 | */ | |||
283 | static int | |||
284 | xfs_bulkstat_ag_ichunk( | |||
285 | struct xfs_mount *mp, | |||
286 | xfs_agnumber_t agno, | |||
287 | struct xfs_inobt_rec_incore *irbp, | |||
288 | bulkstat_one_pf formatter, | |||
289 | size_t statstruct_size, | |||
290 | struct xfs_bulkstat_agichunk *acp, | |||
291 | xfs_agino_t *last_agino) | |||
292 | { | |||
293 | char __user **ubufp = acp->ac_ubuffer; | |||
294 | int chunkidx; | |||
295 | int error = 0; | |||
296 | xfs_agino_t agino = irbp->ir_startino; | |||
297 | ||||
298 | for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK(8 * sizeof(xfs_inofree_t)); | |||
299 | chunkidx++, agino++) { | |||
300 | int fmterror; | |||
301 | int ubused; | |||
302 | ||||
303 | /* inode won't fit in buffer, we are done */ | |||
304 | if (acp->ac_ubleft < statstruct_size) | |||
305 | break; | |||
306 | ||||
307 | /* Skip if this inode is free */ | |||
308 | if (XFS_INOBT_MASK(chunkidx)((xfs_inofree_t)1 << (chunkidx)) & irbp->ir_free) | |||
309 | continue; | |||
310 | ||||
311 | /* Get the inode and fill in a single buffer */ | |||
312 | ubused = statstruct_size; | |||
313 | error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino)(((xfs_ino_t)(agno) << (mp)->m_agino_log) | (agino)), | |||
314 | *ubufp, acp->ac_ubleft, &ubused, &fmterror); | |||
315 | ||||
316 | if (fmterror == BULKSTAT_RV_GIVEUP2 || | |||
317 | (error && error != -ENOENT2 && error != -EINVAL22)) { | |||
318 | acp->ac_ubleft = 0; | |||
319 | ASSERT(error)((error) ? (void)0 : assfail("error", "fs/xfs/xfs_itable.c", 319 )); | |||
320 | break; | |||
321 | } | |||
322 | ||||
323 | /* be careful not to leak error if at end of chunk */ | |||
324 | if (fmterror == BULKSTAT_RV_NOTHING0 || error) { | |||
325 | error = 0; | |||
326 | continue; | |||
327 | } | |||
328 | ||||
329 | *ubufp += ubused; | |||
330 | acp->ac_ubleft -= ubused; | |||
331 | acp->ac_ubelem++; | |||
332 | } | |||
333 | ||||
334 | /* | |||
335 | * Post-update *last_agino. At this point, agino will always point one | |||
336 | * inode past the last inode we processed successfully. Hence we | |||
337 | * substract that inode when setting the *last_agino cursor so that we | |||
338 | * return the correct cookie to userspace. On the next bulkstat call, | |||
339 | * the inode under the lastino cookie will be skipped as we have already | |||
340 | * processed it here. | |||
341 | */ | |||
342 | *last_agino = agino - 1; | |||
343 | ||||
344 | return error; | |||
345 | } | |||
346 | ||||
347 | /* | |||
348 | * Return stat information in bulk (by-inode) for the filesystem. | |||
349 | */ | |||
350 | int /* error status */ | |||
351 | xfs_bulkstat( | |||
352 | xfs_mount_t *mp, /* mount point for filesystem */ | |||
353 | xfs_ino_t *lastinop, /* last inode returned */ | |||
354 | int *ubcountp, /* size of buffer/count returned */ | |||
355 | bulkstat_one_pf formatter, /* func that'd fill a single buf */ | |||
356 | size_t statstruct_size, /* sizeof struct filling */ | |||
357 | char __user *ubuffer, /* buffer with inode stats */ | |||
358 | int *done) /* 1 if there are more stats to get */ | |||
359 | { | |||
360 | xfs_buf_t *agbp; /* agi header buffer */ | |||
361 | xfs_agino_t agino; /* inode # in allocation group */ | |||
362 | xfs_agnumber_t agno; /* allocation group number */ | |||
363 | xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ | |||
364 | size_t irbsize; /* size of irec buffer in bytes */ | |||
365 | xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ | |||
366 | int nirbuf; /* size of irbuf */ | |||
367 | int ubcount; /* size of user's buffer */ | |||
368 | struct xfs_bulkstat_agichunk ac; | |||
369 | int error = 0; | |||
370 | ||||
371 | /* | |||
372 | * Get the last inode value, see if there's nothing to do. | |||
373 | */ | |||
374 | agno = XFS_INO_TO_AGNO(mp, *lastinop)((xfs_agnumber_t)((*lastinop) >> (mp)->m_agino_log)); | |||
375 | agino = XFS_INO_TO_AGINO(mp, *lastinop)((xfs_agino_t)(*lastinop) & (__uint32_t)((1ULL << ( (mp)->m_agino_log)) - 1)); | |||
376 | if (agno >= mp->m_sb.sb_agcount || | |||
377 | *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)(((xfs_ino_t)(agno) << (mp)->m_agino_log) | (agino))) { | |||
378 | *done = 1; | |||
379 | *ubcountp = 0; | |||
380 | return 0; | |||
381 | } | |||
382 | ||||
383 | ubcount = *ubcountp; /* statstruct's */ | |||
384 | ac.ac_ubuffer = &ubuffer; | |||
385 | ac.ac_ubleft = ubcount * statstruct_size; /* bytes */; | |||
386 | ac.ac_ubelem = 0; | |||
387 | ||||
388 | *ubcountp = 0; | |||
389 | *done = 0; | |||
390 | ||||
391 | irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE((1UL) << 12), PAGE_SIZE((1UL) << 12) * 4); | |||
392 | if (!irbuf) | |||
393 | return -ENOMEM12; | |||
394 | ||||
395 | nirbuf = irbsize / sizeof(*irbuf); | |||
396 | ||||
397 | /* | |||
398 | * Loop over the allocation groups, starting from the last | |||
399 | * inode returned; 0 means start of the allocation group. | |||
400 | */ | |||
401 | while (agno < mp->m_sb.sb_agcount) { | |||
402 | struct xfs_inobt_rec_incore *irbp = irbuf; | |||
403 | struct xfs_inobt_rec_incore *irbufend = irbuf + nirbuf; | |||
404 | bool end_of_ag = false; | |||
405 | int icount = 0; | |||
406 | int stat; | |||
407 | ||||
408 | error = xfs_ialloc_read_agi(mp, NULL((void *)0), agno, &agbp); | |||
409 | if (error) | |||
410 | break; | |||
411 | /* | |||
412 | * Allocate and initialize a btree cursor for ialloc btree. | |||
413 | */ | |||
414 | cur = xfs_inobt_init_cursor(mp, NULL((void *)0), agbp, agno, | |||
415 | XFS_BTNUM_INO((xfs_btnum_t)XFS_BTNUM_INOi)); | |||
416 | if (agino > 0) { | |||
417 | /* | |||
418 | * In the middle of an allocation group, we need to get | |||
419 | * the remainder of the chunk we're in. | |||
420 | */ | |||
421 | struct xfs_inobt_rec_incore r; | |||
422 | ||||
423 | error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r); | |||
424 | if (error) | |||
425 | goto del_cursor; | |||
426 | if (icount) { | |||
427 | irbp->ir_startino = r.ir_startino; | |||
428 | irbp->ir_holemask = r.ir_holemask; | |||
429 | irbp->ir_count = r.ir_count; | |||
430 | irbp->ir_freecount = r.ir_freecount; | |||
431 | irbp->ir_free = r.ir_free; | |||
432 | irbp++; | |||
433 | } | |||
434 | /* Increment to the next record */ | |||
435 | error = xfs_btree_increment(cur, 0, &stat); | |||
436 | } else { | |||
437 | /* Start of ag. Lookup the first inode chunk */ | |||
438 | error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE((xfs_lookup_t)XFS_LOOKUP_GEi), &stat); | |||
439 | } | |||
440 | if (error || stat == 0) { | |||
441 | end_of_ag = true; | |||
442 | goto del_cursor; | |||
443 | } | |||
444 | ||||
445 | /* | |||
446 | * Loop through inode btree records in this ag, | |||
447 | * until we run out of inodes or space in the buffer. | |||
448 | */ | |||
449 | while (irbp < irbufend && icount < ubcount) { | |||
450 | struct xfs_inobt_rec_incore r; | |||
451 | ||||
452 | error = xfs_inobt_get_rec(cur, &r, &stat); | |||
453 | if (error || stat == 0) { | |||
454 | end_of_ag = true; | |||
455 | goto del_cursor; | |||
456 | } | |||
457 | ||||
458 | /* | |||
459 | * If this chunk has any allocated inodes, save it. | |||
460 | * Also start read-ahead now for this chunk. | |||
461 | */ | |||
462 | if (r.ir_freecount < r.ir_count) { | |||
463 | xfs_bulkstat_ichunk_ra(mp, agno, &r); | |||
464 | irbp->ir_startino = r.ir_startino; | |||
465 | irbp->ir_holemask = r.ir_holemask; | |||
466 | irbp->ir_count = r.ir_count; | |||
467 | irbp->ir_freecount = r.ir_freecount; | |||
468 | irbp->ir_free = r.ir_free; | |||
469 | irbp++; | |||
470 | icount += r.ir_count - r.ir_freecount; | |||
471 | } | |||
472 | error = xfs_btree_increment(cur, 0, &stat); | |||
473 | if (error || stat == 0) { | |||
474 | end_of_ag = true; | |||
475 | goto del_cursor; | |||
476 | } | |||
477 | cond_resched()({ ___might_sleep("fs/xfs/xfs_itable.c", 477, 0); _cond_resched (); }); | |||
478 | } | |||
479 | ||||
480 | /* | |||
481 | * Drop the btree buffers and the agi buffer as we can't hold any | |||
482 | * of the locks these represent when calling iget. If there is a | |||
483 | * pending error, then we are done. | |||
484 | */ | |||
485 | del_cursor: | |||
486 | xfs_btree_del_cursor(cur, error ? | |||
487 | XFS_BTREE_ERROR1 : XFS_BTREE_NOERROR0); | |||
488 | xfs_buf_relse(agbp); | |||
489 | if (error) | |||
490 | break; | |||
491 | /* | |||
492 | * Now format all the good inodes into the user's buffer. The | |||
493 | * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer | |||
494 | * for the next loop iteration. | |||
495 | */ | |||
496 | irbufend = irbp; | |||
497 | for (irbp = irbuf; | |||
498 | irbp < irbufend && ac.ac_ubleft >= statstruct_size; | |||
499 | irbp++) { | |||
500 | error = xfs_bulkstat_ag_ichunk(mp, agno, irbp, | |||
501 | formatter, statstruct_size, &ac, | |||
502 | &agino); | |||
503 | if (error) | |||
504 | break; | |||
505 | ||||
506 | cond_resched()({ ___might_sleep("fs/xfs/xfs_itable.c", 506, 0); _cond_resched (); }); | |||
507 | } | |||
508 | ||||
509 | /* | |||
510 | * If we've run out of space or had a formatting error, we | |||
511 | * are now done | |||
512 | */ | |||
513 | if (ac.ac_ubleft < statstruct_size || error) | |||
514 | break; | |||
515 | ||||
516 | if (end_of_ag) { | |||
517 | agno++; | |||
518 | agino = 0; | |||
519 | } | |||
520 | } | |||
521 | /* | |||
522 | * Done, we're either out of filesystem or space to put the data. | |||
523 | */ | |||
524 | kmem_free(irbuf); | |||
525 | *ubcountp = ac.ac_ubelem; | |||
526 | ||||
527 | /* | |||
528 | * We found some inodes, so clear the error status and return them. | |||
529 | * The lastino pointer will point directly at the inode that triggered | |||
530 | * any error that occurred, so on the next call the error will be | |||
531 | * triggered again and propagated to userspace as there will be no | |||
532 | * formatted inodes in the buffer. | |||
533 | */ | |||
534 | if (ac.ac_ubelem) | |||
535 | error = 0; | |||
536 | ||||
537 | /* | |||
538 | * If we ran out of filesystem, lastino will point off the end of | |||
539 | * the filesystem so the next call will return immediately. | |||
540 | */ | |||
541 | *lastinop = XFS_AGINO_TO_INO(mp, agno, agino)(((xfs_ino_t)(agno) << (mp)->m_agino_log) | (agino)); | |||
542 | if (agno >= mp->m_sb.sb_agcount) | |||
543 | *done = 1; | |||
544 | ||||
545 | return error; | |||
546 | } | |||
547 | ||||
548 | int | |||
549 | xfs_inumbers_fmt( | |||
550 | void __user *ubuffer, /* buffer to write to */ | |||
551 | const struct xfs_inogrp *buffer, /* buffer to read from */ | |||
552 | long count, /* # of elements to read */ | |||
553 | long *written) /* # of bytes written */ | |||
554 | { | |||
555 | if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer))) | |||
556 | return -EFAULT14; | |||
557 | *written = count * sizeof(*buffer); | |||
558 | return 0; | |||
559 | } | |||
560 | ||||
561 | /* | |||
562 | * Return inode number table for the filesystem. | |||
563 | */ | |||
564 | int /* error status */ | |||
565 | xfs_inumbers( | |||
566 | struct xfs_mount *mp,/* mount point for filesystem */ | |||
567 | xfs_ino_t *lastino,/* last inode returned */ | |||
568 | int *count,/* size of buffer/count returned */ | |||
569 | void __user *ubuffer,/* buffer with inode descriptions */ | |||
570 | inumbers_fmt_pf formatter) | |||
571 | { | |||
572 | xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, *lastino)((xfs_agnumber_t)((*lastino) >> (mp)->m_agino_log)); | |||
573 | xfs_agino_t agino = XFS_INO_TO_AGINO(mp, *lastino)((xfs_agino_t)(*lastino) & (__uint32_t)((1ULL << (( mp)->m_agino_log)) - 1)); | |||
574 | struct xfs_btree_cur *cur = NULL((void *)0); | |||
575 | struct xfs_buf *agbp = NULL((void *)0); | |||
576 | struct xfs_inogrp *buffer; | |||
577 | int bcount; | |||
578 | int left = *count; | |||
579 | int bufidx = 0; | |||
580 | int error = 0; | |||
581 | ||||
582 | *count = 0; | |||
583 | if (agno >= mp->m_sb.sb_agcount || | |||
584 | *lastino != XFS_AGINO_TO_INO(mp, agno, agino)(((xfs_ino_t)(agno) << (mp)->m_agino_log) | (agino))) | |||
585 | return error; | |||
586 | ||||
587 | bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)))(({ typeof(left) __UNIQUE_ID_min1_40 = (left); typeof((int)(( (1UL) << 12) / sizeof(*buffer))) __UNIQUE_ID_min2_41 = ( (int)(((1UL) << 12) / sizeof(*buffer))); (void) (&__UNIQUE_ID_min1_40 == &__UNIQUE_ID_min2_41); __UNIQUE_ID_min1_40 < __UNIQUE_ID_min2_41 ? __UNIQUE_ID_min1_40 : __UNIQUE_ID_min2_41; })); | |||
588 | buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP(( xfs_km_flags_t)0x0001u)); | |||
589 | do { | |||
590 | struct xfs_inobt_rec_incore r; | |||
591 | int stat; | |||
592 | ||||
593 | if (!agbp) { | |||
594 | error = xfs_ialloc_read_agi(mp, NULL((void *)0), agno, &agbp); | |||
595 | if (error) | |||
596 | break; | |||
597 | ||||
598 | cur = xfs_inobt_init_cursor(mp, NULL((void *)0), agbp, agno, | |||
599 | XFS_BTNUM_INO((xfs_btnum_t)XFS_BTNUM_INOi)); | |||
600 | error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE((xfs_lookup_t)XFS_LOOKUP_GEi), | |||
601 | &stat); | |||
602 | if (error) | |||
603 | break; | |||
604 | if (!stat) | |||
605 | goto next_ag; | |||
606 | } | |||
607 | ||||
608 | error = xfs_inobt_get_rec(cur, &r, &stat); | |||
609 | if (error) | |||
610 | break; | |||
611 | if (!stat) | |||
612 | goto next_ag; | |||
613 | ||||
614 | agino = r.ir_startino + XFS_INODES_PER_CHUNK(8 * sizeof(xfs_inofree_t)) - 1; | |||
615 | buffer[bufidx].xi_startino = | |||
616 | XFS_AGINO_TO_INO(mp, agno, r.ir_startino)(((xfs_ino_t)(agno) << (mp)->m_agino_log) | (r.ir_startino )); | |||
617 | buffer[bufidx].xi_alloccount = r.ir_count - r.ir_freecount; | |||
618 | buffer[bufidx].xi_allocmask = ~r.ir_free; | |||
619 | if (++bufidx == bcount) { | |||
620 | long written; | |||
621 | ||||
622 | error = formatter(ubuffer, buffer, bufidx, &written); | |||
623 | if (error) | |||
624 | break; | |||
625 | ubuffer += written; | |||
626 | *count += bufidx; | |||
627 | bufidx = 0; | |||
628 | } | |||
629 | if (!--left) | |||
630 | break; | |||
631 | ||||
632 | error = xfs_btree_increment(cur, 0, &stat); | |||
633 | if (error) | |||
634 | break; | |||
635 | if (stat) | |||
636 | continue; | |||
637 | ||||
638 | next_ag: | |||
639 | xfs_btree_del_cursor(cur, XFS_BTREE_ERROR1); | |||
640 | cur = NULL((void *)0); | |||
641 | xfs_buf_relse(agbp); | |||
642 | agbp = NULL((void *)0); | |||
643 | agino = 0; | |||
644 | agno++; | |||
645 | } while (agno < mp->m_sb.sb_agcount); | |||
646 | ||||
647 | if (!error) { | |||
648 | if (bufidx) { | |||
649 | long written; | |||
650 | ||||
651 | error = formatter(ubuffer, buffer, bufidx, &written); | |||
652 | if (!error) | |||
653 | *count += bufidx; | |||
654 | } | |||
655 | *lastino = XFS_AGINO_TO_INO(mp, agno, agino)(((xfs_ino_t)(agno) << (mp)->m_agino_log) | (agino)); | |||
656 | } | |||
657 | ||||
658 | kmem_free(buffer); | |||
659 | if (cur) | |||
660 | xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR1 : | |||
661 | XFS_BTREE_NOERROR0)); | |||
662 | if (agbp) | |||
663 | xfs_buf_relse(agbp); | |||
664 | ||||
665 | return error; | |||
666 | } |