Bug Summary

File:fs/dlm/user.c
Warning:line 748, column 6
Copies out a struct with untouched element(s): unused

Annotated Source Code

1/*
2 * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
3 *
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License v.2.
7 */
8
9#include <linux1/miscdevice.h>
10#include <linux1/init.h>
11#include <linux1/wait.h>
12#include <linux1/module.h>
13#include <linux1/file.h>
14#include <linux1/fs.h>
15#include <linux1/poll.h>
16#include <linux1/signal.h>
17#include <linux1/spinlock.h>
18#include <linux1/dlm.h>
19#include <linux1/dlm_device.h>
20#include <linux1/slab.h>
21
22#include "dlm_internal.h"
23#include "lockspace.h"
24#include "lock.h"
25#include "lvb_table.h"
26#include "user.h"
27#include "ast.h"
28
29static const char name_prefix[] = "dlm";
30static const struct file_operations device_fops;
31static atomic_t dlm_monitor_opened;
32static int dlm_monitor_unused = 1;
33
34#ifdef CONFIG_COMPAT1
35
36struct dlm_lock_params32 {
37 __u8 mode;
38 __u8 namelen;
39 __u16 unused;
40 __u32 flags;
41 __u32 lkid;
42 __u32 parent;
43 __u64 xid;
44 __u64 timeout;
45 __u32 castparam;
46 __u32 castaddr;
47 __u32 bastparam;
48 __u32 bastaddr;
49 __u32 lksb;
50 char lvb[DLM_USER_LVB_LEN32];
51 char name[0];
52};
53
54struct dlm_write_request32 {
55 __u32 version[3];
56 __u8 cmd;
57 __u8 is64bit;
58 __u8 unused[2];
59
60 union {
61 struct dlm_lock_params32 lock;
62 struct dlm_lspace_params lspace;
63 struct dlm_purge_params purge;
64 } i;
65};
66
67struct dlm_lksb32 {
68 __u32 sb_status;
69 __u32 sb_lkid;
70 __u8 sb_flags;
71 __u32 sb_lvbptr;
72};
73
74struct dlm_lock_result32 {
75 __u32 version[3];
76 __u32 length;
77 __u32 user_astaddr;
78 __u32 user_astparam;
79 __u32 user_lksb;
80 struct dlm_lksb32 lksb;
81 __u8 bast_mode;
82 __u8 unused[3];
83 /* Offsets may be zero if no data is present */
84 __u32 lvb_offset;
85};
86
87static void compat_input(struct dlm_write_request *kb,
88 struct dlm_write_request32 *kb32,
89 int namelen)
90{
91 kb->version[0] = kb32->version[0];
92 kb->version[1] = kb32->version[1];
93 kb->version[2] = kb32->version[2];
94
95 kb->cmd = kb32->cmd;
96 kb->is64bit = kb32->is64bit;
97 if (kb->cmd == DLM_USER_CREATE_LOCKSPACE4 ||
98 kb->cmd == DLM_USER_REMOVE_LOCKSPACE5) {
99 kb->i.lspace.flags = kb32->i.lspace.flags;
100 kb->i.lspace.minor = kb32->i.lspace.minor;
101 memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen)({ size_t __len = (namelen); void *__ret; if (__builtin_constant_p
(namelen) && __len >= 64) __ret = __memcpy((kb->
i.lspace.name), (kb32->i.lspace.name), __len); else __ret =
__builtin_memcpy((kb->i.lspace.name), (kb32->i.lspace.
name), __len); __ret; })
;
102 } else if (kb->cmd == DLM_USER_PURGE6) {
103 kb->i.purge.nodeid = kb32->i.purge.nodeid;
104 kb->i.purge.pid = kb32->i.purge.pid;
105 } else {
106 kb->i.lock.mode = kb32->i.lock.mode;
107 kb->i.lock.namelen = kb32->i.lock.namelen;
108 kb->i.lock.flags = kb32->i.lock.flags;
109 kb->i.lock.lkid = kb32->i.lock.lkid;
110 kb->i.lock.parent = kb32->i.lock.parent;
111 kb->i.lock.xid = kb32->i.lock.xid;
112 kb->i.lock.timeout = kb32->i.lock.timeout;
113 kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
114 kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
115 kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
116 kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
117 kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
118 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN)({ size_t __len = (32); void *__ret; if (__builtin_constant_p
(32) && __len >= 64) __ret = __memcpy((kb->i.lock
.lvb), (kb32->i.lock.lvb), __len); else __ret = __builtin_memcpy
((kb->i.lock.lvb), (kb32->i.lock.lvb), __len); __ret; }
)
;
119 memcpy(kb->i.lock.name, kb32->i.lock.name, namelen)({ size_t __len = (namelen); void *__ret; if (__builtin_constant_p
(namelen) && __len >= 64) __ret = __memcpy((kb->
i.lock.name), (kb32->i.lock.name), __len); else __ret = __builtin_memcpy
((kb->i.lock.name), (kb32->i.lock.name), __len); __ret;
})
;
120 }
121}
122
123static void compat_output(struct dlm_lock_result *res,
124 struct dlm_lock_result32 *res32)
125{
126 res32->version[0] = res->version[0];
127 res32->version[1] = res->version[1];
128 res32->version[2] = res->version[2];
129
130 res32->user_astaddr = (__u32)(long)res->user_astaddr;
131 res32->user_astparam = (__u32)(long)res->user_astparam;
132 res32->user_lksb = (__u32)(long)res->user_lksb;
133 res32->bast_mode = res->bast_mode;
134
135 res32->lvb_offset = res->lvb_offset;
136 res32->length = res->length;
137
138 res32->lksb.sb_status = res->lksb.sb_status;
139 res32->lksb.sb_flags = res->lksb.sb_flags;
140 res32->lksb.sb_lkid = res->lksb.sb_lkid;
141 res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
142}
143#endif
144
145/* Figure out if this lock is at the end of its life and no longer
146 available for the application to use. The lkb still exists until
147 the final ast is read. A lock becomes EOL in three situations:
148 1. a noqueue request fails with EAGAIN
149 2. an unlock completes with EUNLOCK
150 3. a cancel of a waiting request completes with ECANCEL/EDEADLK
151 An EOL lock needs to be removed from the process's list of locks.
152 And we can't allow any new operation on an EOL lock. This is
153 not related to the lifetime of the lkb struct which is managed
154 entirely by refcount. */
155
156static int lkb_is_endoflife(int mode, int status)
157{
158 switch (status) {
159 case -DLM_EUNLOCK0x10002:
160 return 1;
161 case -DLM_ECANCEL0x10001:
162 case -ETIMEDOUT110:
163 case -EDEADLK35:
164 case -EAGAIN11:
165 if (mode == DLM_LOCK_IV(-1))
166 return 1;
167 break;
168 }
169 return 0;
170}
171
172/* we could possibly check if the cancel of an orphan has resulted in the lkb
173 being removed and then remove that lkb from the orphans list and free it */
174
175void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
176 int status, uint32_t sbflags, uint64_t seq)
177{
178 struct dlm_ls *ls;
179 struct dlm_user_args *ua;
180 struct dlm_user_proc *proc;
181 int rv;
182
183 if (lkb->lkb_flags & (DLM_IFL_ORPHAN0x00000002 | DLM_IFL_DEAD0x00040000))
184 return;
185
186 ls = lkb->lkb_resource->res_ls;
187 mutex_lock(&ls->ls_clear_proc_locks)mutex_lock_nested(&ls->ls_clear_proc_locks, 0);
188
189 /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
190 can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
191 lkb->ua so we can't try to use it. This second check is necessary
192 for cases where a completion ast is received for an operation that
193 began before clear_proc_locks did its cancel/unlock. */
194
195 if (lkb->lkb_flags & (DLM_IFL_ORPHAN0x00000002 | DLM_IFL_DEAD0x00040000))
196 goto out;
197
198 DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb);){ if (!(lkb->lkb_ua)) { printk("\001" "3" "\nDLM: Assertion failed on line %d of file %s\n"
"DLM: assertion: \"%s\"\n" "DLM: time = %lu\n", 198, "fs/dlm/user.c"
, "lkb->lkb_ua", jiffies); {dlm_print_lkb(lkb);} printk("\n"
); do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n"
"2:\t.long 1b - 2b, %c0 - 2b\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n"
".popsection" : : "i" ("fs/dlm/user.c"), "i" (198), "i" (sizeof
(struct bug_entry))); do { } while (1); } while (0); panic("DLM: Record message above and reboot.\n"
); } }
;
199 ua = lkb->lkb_ua;
200 proc = ua->proc;
201
202 if ((flags & DLM_CB_BAST0x00000002) && ua->bastaddr == NULL((void *)0))
203 goto out;
204
205 if ((flags & DLM_CB_CAST0x00000001) && lkb_is_endoflife(mode, status))
206 lkb->lkb_flags |= DLM_IFL_ENDOFLIFE0x00200000;
207
208 spin_lock(&proc->asts_spin);
209
210 rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
211 if (rv < 0) {
212 spin_unlock(&proc->asts_spin);
213 goto out;
214 }
215
216 if (list_empty(&lkb->lkb_cb_list)) {
217 kref_get(&lkb->lkb_ref);
218 list_add_tail(&lkb->lkb_cb_list, &proc->asts);
219 wake_up_interruptible(&proc->wait)__wake_up(&proc->wait, 1, 1, ((void *)0));
220 }
221 spin_unlock(&proc->asts_spin);
222
223 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE0x00200000) {
224 /* N.B. spin_lock locks_spin, not asts_spin */
225 spin_lock(&proc->locks_spin);
226 if (!list_empty(&lkb->lkb_ownqueue)) {
227 list_del_init(&lkb->lkb_ownqueue);
228 dlm_put_lkb(lkb);
229 }
230 spin_unlock(&proc->locks_spin);
231 }
232 out:
233 mutex_unlock(&ls->ls_clear_proc_locks);
234}
235
236static int device_user_lock(struct dlm_user_proc *proc,
237 struct dlm_lock_params *params)
238{
239 struct dlm_ls *ls;
240 struct dlm_user_args *ua;
241 uint32_t lkid;
242 int error = -ENOMEM12;
243
244 ls = dlm_find_lockspace_local(proc->lockspace);
245 if (!ls)
246 return -ENOENT2;
247
248 if (!params->castaddr || !params->lksb) {
249 error = -EINVAL22;
250 goto out;
251 }
252
253 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS((( gfp_t)(0x400000u|0x2000000u)) | (( gfp_t)0x40u)));
254 if (!ua)
255 goto out;
256 ua->proc = proc;
257 ua->user_lksb = params->lksb;
258 ua->castparam = params->castparam;
259 ua->castaddr = params->castaddr;
260 ua->bastparam = params->bastparam;
261 ua->bastaddr = params->bastaddr;
262 ua->xid = params->xid;
263
264 if (params->flags & DLM_LKF_CONVERT0x00000004) {
265 error = dlm_user_convert(ls, ua,
266 params->mode, params->flags,
267 params->lkid, params->lvb,
268 (unsigned long) params->timeout);
269 } else if (params->flags & DLM_LKF_ORPHAN0x00004000) {
270 error = dlm_user_adopt_orphan(ls, ua,
271 params->mode, params->flags,
272 params->name, params->namelen,
273 (unsigned long) params->timeout,
274 &lkid);
275 if (!error)
276 error = lkid;
277 } else {
278 error = dlm_user_request(ls, ua,
279 params->mode, params->flags,
280 params->name, params->namelen,
281 (unsigned long) params->timeout);
282 if (!error)
283 error = ua->lksb.sb_lkid;
284 }
285 out:
286 dlm_put_lockspace(ls);
287 return error;
288}
289
290static int device_user_unlock(struct dlm_user_proc *proc,
291 struct dlm_lock_params *params)
292{
293 struct dlm_ls *ls;
294 struct dlm_user_args *ua;
295 int error = -ENOMEM12;
296
297 ls = dlm_find_lockspace_local(proc->lockspace);
298 if (!ls)
299 return -ENOENT2;
300
301 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS((( gfp_t)(0x400000u|0x2000000u)) | (( gfp_t)0x40u)));
302 if (!ua)
303 goto out;
304 ua->proc = proc;
305 ua->user_lksb = params->lksb;
306 ua->castparam = params->castparam;
307 ua->castaddr = params->castaddr;
308
309 if (params->flags & DLM_LKF_CANCEL0x00000002)
310 error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
311 else
312 error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
313 params->lvb);
314 out:
315 dlm_put_lockspace(ls);
316 return error;
317}
318
319static int device_user_deadlock(struct dlm_user_proc *proc,
320 struct dlm_lock_params *params)
321{
322 struct dlm_ls *ls;
323 int error;
324
325 ls = dlm_find_lockspace_local(proc->lockspace);
326 if (!ls)
327 return -ENOENT2;
328
329 error = dlm_user_deadlock(ls, params->flags, params->lkid);
330
331 dlm_put_lockspace(ls);
332 return error;
333}
334
335static int dlm_device_register(struct dlm_ls *ls, char *name)
336{
337 int error, len;
338
339 /* The device is already registered. This happens when the
340 lockspace is created multiple times from userspace. */
341 if (ls->ls_device.name)
342 return 0;
343
344 error = -ENOMEM12;
345 len = strlen(name) + strlen(name_prefix) + 2;
346 ls->ls_device.name = kzalloc(len, GFP_NOFS((( gfp_t)(0x400000u|0x2000000u)) | (( gfp_t)0x40u)));
347 if (!ls->ls_device.name)
348 goto fail;
349
350 snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
351 name);
352 ls->ls_device.fops = &device_fops;
353 ls->ls_device.minor = MISC_DYNAMIC_MINOR255;
354
355 error = misc_register(&ls->ls_device);
356 if (error) {
357 kfree(ls->ls_device.name);
358 }
359fail:
360 return error;
361}
362
363int dlm_device_deregister(struct dlm_ls *ls)
364{
365 /* The device is not registered. This happens when the lockspace
366 was never used from userspace, or when device_create_lockspace()
367 calls dlm_release_lockspace() after the register fails. */
368 if (!ls->ls_device.name)
369 return 0;
370
371 misc_deregister(&ls->ls_device);
372 kfree(ls->ls_device.name);
373 return 0;
374}
375
376static int device_user_purge(struct dlm_user_proc *proc,
377 struct dlm_purge_params *params)
378{
379 struct dlm_ls *ls;
380 int error;
381
382 ls = dlm_find_lockspace_local(proc->lockspace);
383 if (!ls)
384 return -ENOENT2;
385
386 error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
387
388 dlm_put_lockspace(ls);
389 return error;
390}
391
392static int device_create_lockspace(struct dlm_lspace_params *params)
393{
394 dlm_lockspace_t *lockspace;
395 struct dlm_ls *ls;
396 int error;
397
398 if (!capable(CAP_SYS_ADMIN21))
399 return -EPERM1;
400
401 error = dlm_new_lockspace(params->name, NULL((void *)0), params->flags,
402 DLM_USER_LVB_LEN32, NULL((void *)0), NULL((void *)0), NULL((void *)0),
403 &lockspace);
404 if (error)
405 return error;
406
407 ls = dlm_find_lockspace_local(lockspace);
408 if (!ls)
409 return -ENOENT2;
410
411 error = dlm_device_register(ls, params->name);
412 dlm_put_lockspace(ls);
413
414 if (error)
415 dlm_release_lockspace(lockspace, 0);
416 else
417 error = ls->ls_device.minor;
418
419 return error;
420}
421
422static int device_remove_lockspace(struct dlm_lspace_params *params)
423{
424 dlm_lockspace_t *lockspace;
425 struct dlm_ls *ls;
426 int error, force = 0;
427
428 if (!capable(CAP_SYS_ADMIN21))
429 return -EPERM1;
430
431 ls = dlm_find_lockspace_device(params->minor);
432 if (!ls)
433 return -ENOENT2;
434
435 if (params->flags & DLM_USER_LSFLG_FORCEFREE2)
436 force = 2;
437
438 lockspace = ls->ls_local_handle;
439 dlm_put_lockspace(ls);
440
441 /* The final dlm_release_lockspace waits for references to go to
442 zero, so all processes will need to close their device for the
443 ls before the release will proceed. release also calls the
444 device_deregister above. Converting a positive return value
445 from release to zero means that userspace won't know when its
446 release was the final one, but it shouldn't need to know. */
447
448 error = dlm_release_lockspace(lockspace, force);
449 if (error > 0)
450 error = 0;
451 return error;
452}
453
454/* Check the user's version matches ours */
455static int check_version(struct dlm_write_request *req)
456{
457 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR6 ||
458 (req->version[0] == DLM_DEVICE_VERSION_MAJOR6 &&
459 req->version[1] > DLM_DEVICE_VERSION_MINOR0)) {
460
461 printk(KERN_DEBUG"\001" "7" "dlm: process %s (%d) version mismatch "
462 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
463 currentget_current()->comm,
464 task_pid_nr(currentget_current()),
465 req->version[0],
466 req->version[1],
467 req->version[2],
468 DLM_DEVICE_VERSION_MAJOR6,
469 DLM_DEVICE_VERSION_MINOR0,
470 DLM_DEVICE_VERSION_PATCH2);
471 return -EINVAL22;
472 }
473 return 0;
474}
475
476/*
477 * device_write
478 *
479 * device_user_lock
480 * dlm_user_request -> request_lock
481 * dlm_user_convert -> convert_lock
482 *
483 * device_user_unlock
484 * dlm_user_unlock -> unlock_lock
485 * dlm_user_cancel -> cancel_lock
486 *
487 * device_create_lockspace
488 * dlm_new_lockspace
489 *
490 * device_remove_lockspace
491 * dlm_release_lockspace
492 */
493
494/* a write to a lockspace device is a lock or unlock request, a write
495 to the control device is to create/remove a lockspace */
496
497static ssize_t device_write(struct file *file, const char __user *buf,
498 size_t count, loff_t *ppos)
499{
500 struct dlm_user_proc *proc = file->private_data;
501 struct dlm_write_request *kbuf;
502 int error;
503
504#ifdef CONFIG_COMPAT1
505 if (count < sizeof(struct dlm_write_request32))
506#else
507 if (count < sizeof(struct dlm_write_request))
508#endif
509 return -EINVAL22;
510
511 /*
512 * can't compare against COMPAT/dlm_write_request32 because
513 * we don't yet know if is64bit is zero
514 */
515 if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN64)
516 return -EINVAL22;
517
518 kbuf = memdup_user_nul(buf, count);
519 if (IS_ERR(kbuf))
520 return PTR_ERR(kbuf);
521
522 if (check_version(kbuf)) {
523 error = -EBADE52;
524 goto out_free;
525 }
526
527#ifdef CONFIG_COMPAT1
528 if (!kbuf->is64bit) {
529 struct dlm_write_request32 *k32buf;
530 int namelen = 0;
531
532 if (count > sizeof(struct dlm_write_request32))
533 namelen = count - sizeof(struct dlm_write_request32);
534
535 k32buf = (struct dlm_write_request32 *)kbuf;
536
537 /* add 1 after namelen so that the name string is terminated */
538 kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
539 GFP_NOFS((( gfp_t)(0x400000u|0x2000000u)) | (( gfp_t)0x40u)));
540 if (!kbuf) {
541 kfree(k32buf);
542 return -ENOMEM12;
543 }
544
545 if (proc)
546 set_bit(DLM_PROC_FLAGS_COMPAT2, &proc->flags);
547
548 compat_input(kbuf, k32buf, namelen);
549 kfree(k32buf);
550 }
551#endif
552
553 /* do we really need this? can a write happen after a close? */
554 if ((kbuf->cmd == DLM_USER_LOCK1 || kbuf->cmd == DLM_USER_UNLOCK2) &&
555 (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags)(__builtin_constant_p((1)) ? constant_test_bit((1), (&proc
->flags)) : variable_test_bit((1), (&proc->flags)))
)) {
556 error = -EINVAL22;
557 goto out_free;
558 }
559
560 error = -EINVAL22;
561
562 switch (kbuf->cmd)
563 {
564 case DLM_USER_LOCK1:
565 if (!proc) {
566 log_print("no locking on control device")printk("\001" "3" "dlm: ""no locking on control device""\n");
567 goto out_free;
568 }
569 error = device_user_lock(proc, &kbuf->i.lock);
570 break;
571
572 case DLM_USER_UNLOCK2:
573 if (!proc) {
574 log_print("no locking on control device")printk("\001" "3" "dlm: ""no locking on control device""\n");
575 goto out_free;
576 }
577 error = device_user_unlock(proc, &kbuf->i.lock);
578 break;
579
580 case DLM_USER_DEADLOCK7:
581 if (!proc) {
582 log_print("no locking on control device")printk("\001" "3" "dlm: ""no locking on control device""\n");
583 goto out_free;
584 }
585 error = device_user_deadlock(proc, &kbuf->i.lock);
586 break;
587
588 case DLM_USER_CREATE_LOCKSPACE4:
589 if (proc) {
590 log_print("create/remove only on control device")printk("\001" "3" "dlm: ""create/remove only on control device"
"\n")
;
591 goto out_free;
592 }
593 error = device_create_lockspace(&kbuf->i.lspace);
594 break;
595
596 case DLM_USER_REMOVE_LOCKSPACE5:
597 if (proc) {
598 log_print("create/remove only on control device")printk("\001" "3" "dlm: ""create/remove only on control device"
"\n")
;
599 goto out_free;
600 }
601 error = device_remove_lockspace(&kbuf->i.lspace);
602 break;
603
604 case DLM_USER_PURGE6:
605 if (!proc) {
606 log_print("no locking on control device")printk("\001" "3" "dlm: ""no locking on control device""\n");
607 goto out_free;
608 }
609 error = device_user_purge(proc, &kbuf->i.purge);
610 break;
611
612 default:
613 log_print("Unknown command passed to DLM device : %d\n",printk("\001" "3" "dlm: ""Unknown command passed to DLM device : %d\n"
"\n" , kbuf->cmd)
614 kbuf->cmd)printk("\001" "3" "dlm: ""Unknown command passed to DLM device : %d\n"
"\n" , kbuf->cmd)
;
615 }
616
617 out_free:
618 kfree(kbuf);
619 return error;
620}
621
622/* Every process that opens the lockspace device has its own "proc" structure
623 hanging off the open file that's used to keep track of locks owned by the
624 process and asts that need to be delivered to the process. */
625
626static int device_open(struct inode *inode, struct file *file)
627{
628 struct dlm_user_proc *proc;
629 struct dlm_ls *ls;
630
631 ls = dlm_find_lockspace_device(iminor(inode));
632 if (!ls)
633 return -ENOENT2;
634
635 proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS((( gfp_t)(0x400000u|0x2000000u)) | (( gfp_t)0x40u)));
636 if (!proc) {
637 dlm_put_lockspace(ls);
638 return -ENOMEM12;
639 }
640
641 proc->lockspace = ls->ls_local_handle;
642 INIT_LIST_HEAD(&proc->asts);
643 INIT_LIST_HEAD(&proc->locks);
644 INIT_LIST_HEAD(&proc->unlocking);
645 spin_lock_init(&proc->asts_spin)do { spinlock_check(&proc->asts_spin); do { static struct
lock_class_key __key; __raw_spin_lock_init((&(&proc->
asts_spin)->rlock), "&(&proc->asts_spin)->rlock"
, &__key); } while (0); } while (0)
;
646 spin_lock_init(&proc->locks_spin)do { spinlock_check(&proc->locks_spin); do { static struct
lock_class_key __key; __raw_spin_lock_init((&(&proc->
locks_spin)->rlock), "&(&proc->locks_spin)->rlock"
, &__key); } while (0); } while (0)
;
647 init_waitqueue_head(&proc->wait)do { static struct lock_class_key __key; __init_waitqueue_head
((&proc->wait), "&proc->wait", &__key); } while
(0)
;
648 file->private_data = proc;
649
650 return 0;
651}
652
653static int device_close(struct inode *inode, struct file *file)
654{
655 struct dlm_user_proc *proc = file->private_data;
656 struct dlm_ls *ls;
657
658 ls = dlm_find_lockspace_local(proc->lockspace);
659 if (!ls)
660 return -ENOENT2;
661
662 set_bit(DLM_PROC_FLAGS_CLOSING1, &proc->flags);
663
664 dlm_clear_proc_locks(ls, proc);
665
666 /* at this point no more lkb's should exist for this lockspace,
667 so there's no chance of dlm_user_add_ast() being called and
668 looking for lkb->ua->proc */
669
670 kfree(proc);
671 file->private_data = NULL((void *)0);
672
673 dlm_put_lockspace(ls);
674 dlm_put_lockspace(ls); /* for the find in device_open() */
675
676 /* FIXME: AUTOFREE: if this ls is no longer used do
677 device_remove_lockspace() */
678
679 return 0;
680}
681
682static int copy_result_to_user(struct dlm_user_args *ua, int compat,
683 uint32_t flags, int mode, int copy_lvb,
684 char __user *buf, size_t count)
685{
686#ifdef CONFIG_COMPAT1
687 struct dlm_lock_result32 result32;
688#endif
689 struct dlm_lock_result result;
690 void *resultptr;
691 int error=0;
692 int len;
693 int struct_len;
694
695 memset(&result, 0, sizeof(struct dlm_lock_result));
696 result.version[0] = DLM_DEVICE_VERSION_MAJOR6;
697 result.version[1] = DLM_DEVICE_VERSION_MINOR0;
698 result.version[2] = DLM_DEVICE_VERSION_PATCH2;
699 memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb))({ size_t __len = (sizeof(struct dlm_lksb)); void *__ret; if (
__builtin_constant_p(sizeof(struct dlm_lksb)) && __len
>= 64) __ret = __memcpy((&result.lksb), (&ua->
lksb), __len); else __ret = __builtin_memcpy((&result.lksb
), (&ua->lksb), __len); __ret; })
;
700 result.user_lksb = ua->user_lksb;
701
702 /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
703 in a conversion unless the conversion is successful. See code
704 in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though,
705 notes that a new blocking AST address and parameter are set even if
706 the conversion fails, so maybe we should just do that. */
707
708 if (flags & DLM_CB_BAST0x00000002) {
14
Taking false branch
709 result.user_astaddr = ua->bastaddr;
710 result.user_astparam = ua->bastparam;
711 result.bast_mode = mode;
712 } else {
713 result.user_astaddr = ua->castaddr;
714 result.user_astparam = ua->castparam;
715 }
716
717#ifdef CONFIG_COMPAT1
718 if (compat)
15
Taking true branch
719 len = sizeof(struct dlm_lock_result32);
720 else
721#endif
722 len = sizeof(struct dlm_lock_result);
723 struct_len = len;
724
725 /* copy lvb to userspace if there is one, it's been updated, and
726 the user buffer has space for it */
727
728 if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN32) {
729 if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
730 DLM_USER_LVB_LEN32)) {
731 error = -EFAULT14;
732 goto out;
733 }
734
735 result.lvb_offset = len;
736 len += DLM_USER_LVB_LEN32;
737 }
738
739 result.length = len;
740 resultptr = &result;
741#ifdef CONFIG_COMPAT1
742 if (compat) {
16
Taking true branch
743 compat_output(&result, &result32);
744 resultptr = &result32;
745 }
746#endif
747
748 if (copy_to_user(buf, resultptr, struct_len))
17
Copies out a struct with untouched element(s): unused
749 error = -EFAULT14;
750 else
751 error = len;
752 out:
753 return error;
754}
755
756static int copy_version_to_user(char __user *buf, size_t count)
757{
758 struct dlm_device_version ver;
759
760 memset(&ver, 0, sizeof(struct dlm_device_version));
761 ver.version[0] = DLM_DEVICE_VERSION_MAJOR6;
762 ver.version[1] = DLM_DEVICE_VERSION_MINOR0;
763 ver.version[2] = DLM_DEVICE_VERSION_PATCH2;
764
765 if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
766 return -EFAULT14;
767 return sizeof(struct dlm_device_version);
768}
769
770/* a read returns a single ast described in a struct dlm_lock_result */
771
772static ssize_t device_read(struct file *file, char __user *buf, size_t count,
773 loff_t *ppos)
774{
775 struct dlm_user_proc *proc = file->private_data;
776 struct dlm_lkb *lkb;
777 DECLARE_WAITQUEUE(wait, current)wait_queue_t wait = { .private = get_current(), .func = default_wake_function
, .task_list = { ((void *)0), ((void *)0) } }
;
778 struct dlm_callback cb;
779 int rv, resid, copy_lvb = 0;
780 int old_mode, new_mode;
781
782 if (count == sizeof(struct dlm_device_version)) {
1
Taking false branch
783 rv = copy_version_to_user(buf, count);
784 return rv;
785 }
786
787 if (!proc) {
2
Assuming 'proc' is non-null
3
Taking false branch
788 log_print("non-version read from control device %zu", count)printk("\001" "3" "dlm: ""non-version read from control device %zu"
"\n" , count)
;
789 return -EINVAL22;
790 }
791
792#ifdef CONFIG_COMPAT1
793 if (count < sizeof(struct dlm_lock_result32))
4
Taking false branch
794#else
795 if (count < sizeof(struct dlm_lock_result))
796#endif
797 return -EINVAL22;
798
799 try_another:
800
801 /* do we really need this? can a read happen after a close? */
802 if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags)(__builtin_constant_p((1)) ? constant_test_bit((1), (&proc
->flags)) : variable_test_bit((1), (&proc->flags)))
)
5
Taking false branch
803 return -EINVAL22;
804
805 spin_lock(&proc->asts_spin);
806 if (list_empty(&proc->asts)) {
6
Taking false branch
807 if (file->f_flags & O_NONBLOCK00004000) {
808 spin_unlock(&proc->asts_spin);
809 return -EAGAIN11;
810 }
811
812 add_wait_queue(&proc->wait, &wait);
813
814 repeat:
815 set_current_state(TASK_INTERRUPTIBLE)do { get_current()->task_state_change = ({ __label__ __here
; __here: (unsigned long)&&__here; }); do { (void)({ __typeof__
(*((&get_current()->state))) __ret = (((1))); switch (
sizeof(*((&get_current()->state)))) { case 1: asm volatile
("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((&get_current
()->state))) : : "memory", "cc"); break; case 2: asm volatile
("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((&get_current
()->state))) : : "memory", "cc"); break; case 4: asm volatile
("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((&get_current
()->state))) : : "memory", "cc"); break; case 8: asm volatile
("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((&get_current
()->state))) : : "memory", "cc"); break; default: __xchg_wrong_size
(); } __ret; }); } while (0); } while (0)
;
816 if (list_empty(&proc->asts) && !signal_pending(currentget_current())) {
817 spin_unlock(&proc->asts_spin);
818 schedule();
819 spin_lock(&proc->asts_spin);
820 goto repeat;
821 }
822 set_current_state(TASK_RUNNING)do { get_current()->task_state_change = ({ __label__ __here
; __here: (unsigned long)&&__here; }); do { (void)({ __typeof__
(*((&get_current()->state))) __ret = (((0))); switch (
sizeof(*((&get_current()->state)))) { case 1: asm volatile
("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((&get_current
()->state))) : : "memory", "cc"); break; case 2: asm volatile
("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((&get_current
()->state))) : : "memory", "cc"); break; case 4: asm volatile
("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((&get_current
()->state))) : : "memory", "cc"); break; case 8: asm volatile
("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((&get_current
()->state))) : : "memory", "cc"); break; default: __xchg_wrong_size
(); } __ret; }); } while (0); } while (0)
;
823 remove_wait_queue(&proc->wait, &wait);
824
825 if (signal_pending(currentget_current())) {
826 spin_unlock(&proc->asts_spin);
827 return -ERESTARTSYS512;
828 }
829 }
830
831 /* if we empty lkb_callbacks, we don't want to unlock the spinlock
832 without removing lkb_cb_list; so empty lkb_cb_list is always
833 consistent with empty lkb_callbacks */
834
835 lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list)({ const typeof( ((struct dlm_lkb *)0)->lkb_cb_list ) *__mptr
= (proc->asts.next); (struct dlm_lkb *)( (char *)__mptr -
__builtin_offsetof(struct dlm_lkb, lkb_cb_list) );})
;
836
837 /* rem_lkb_callback sets a new lkb_last_cast */
838 old_mode = lkb->lkb_last_cast.mode;
839
840 rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
841 if (rv < 0) {
7
Assuming 'rv' is >= 0
8
Taking false branch
842 /* this shouldn't happen; lkb should have been removed from
843 list when resid was zero */
844 log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id)printk("\001" "3" "dlm: ""dlm_rem_lkb_callback empty %x""\n" ,
lkb->lkb_id)
;
845 list_del_init(&lkb->lkb_cb_list);
846 spin_unlock(&proc->asts_spin);
847 /* removes ref for proc->asts, may cause lkb to be freed */
848 dlm_put_lkb(lkb);
849 goto try_another;
850 }
851 if (!resid)
9
Assuming 'resid' is not equal to 0
10
Taking false branch
852 list_del_init(&lkb->lkb_cb_list);
853 spin_unlock(&proc->asts_spin);
854
855 if (cb.flags & DLM_CB_SKIP0x00000004) {
11
Taking false branch
856 /* removes ref for proc->asts, may cause lkb to be freed */
857 if (!resid)
858 dlm_put_lkb(lkb);
859 goto try_another;
860 }
861
862 if (cb.flags & DLM_CB_CAST0x00000001) {
12
Taking false branch
863 new_mode = cb.mode;
864
865 if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
866 dlm_lvb_operations[old_mode + 1][new_mode + 1])
867 copy_lvb = 1;
868
869 lkb->lkb_lksb->sb_status = cb.sb_status;
870 lkb->lkb_lksb->sb_flags = cb.sb_flags;
871 }
872
873 rv = copy_result_to_user(lkb->lkb_ua,
13
Calling 'copy_result_to_user'
874 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags)(__builtin_constant_p((2)) ? constant_test_bit((2), (&proc
->flags)) : variable_test_bit((2), (&proc->flags)))
,
875 cb.flags, cb.mode, copy_lvb, buf, count);
876
877 /* removes ref for proc->asts, may cause lkb to be freed */
878 if (!resid)
879 dlm_put_lkb(lkb);
880
881 return rv;
882}
883
884static unsigned int device_poll(struct file *file, poll_table *wait)
885{
886 struct dlm_user_proc *proc = file->private_data;
887
888 poll_wait(file, &proc->wait, wait);
889
890 spin_lock(&proc->asts_spin);
891 if (!list_empty(&proc->asts)) {
892 spin_unlock(&proc->asts_spin);
893 return POLLIN0x0001 | POLLRDNORM0x0040;
894 }
895 spin_unlock(&proc->asts_spin);
896 return 0;
897}
898
899int dlm_user_daemon_available(void)
900{
901 /* dlm_controld hasn't started (or, has started, but not
902 properly populated configfs) */
903
904 if (!dlm_our_nodeid())
905 return 0;
906
907 /* This is to deal with versions of dlm_controld that don't
908 know about the monitor device. We assume that if the
909 dlm_controld was started (above), but the monitor device
910 was never opened, that it's an old version. dlm_controld
911 should open the monitor device before populating configfs. */
912
913 if (dlm_monitor_unused)
914 return 1;
915
916 return atomic_read(&dlm_monitor_opened) ? 1 : 0;
917}
918
919static int ctl_device_open(struct inode *inode, struct file *file)
920{
921 file->private_data = NULL((void *)0);
922 return 0;
923}
924
925static int ctl_device_close(struct inode *inode, struct file *file)
926{
927 return 0;
928}
929
930static int monitor_device_open(struct inode *inode, struct file *file)
931{
932 atomic_inc(&dlm_monitor_opened);
933 dlm_monitor_unused = 0;
934 return 0;
935}
936
937static int monitor_device_close(struct inode *inode, struct file *file)
938{
939 if (atomic_dec_and_test(&dlm_monitor_opened))
940 dlm_stop_lockspaces();
941 return 0;
942}
943
944static const struct file_operations device_fops = {
945 .open = device_open,
946 .release = device_close,
947 .read = device_read,
948 .write = device_write,
949 .poll = device_poll,
950 .owner = THIS_MODULE((struct module *)0),
951 .llseek = noop_llseek,
952};
953
954static const struct file_operations ctl_device_fops = {
955 .open = ctl_device_open,
956 .release = ctl_device_close,
957 .read = device_read,
958 .write = device_write,
959 .owner = THIS_MODULE((struct module *)0),
960 .llseek = noop_llseek,
961};
962
963static struct miscdevice ctl_device = {
964 .name = "dlm-control",
965 .fops = &ctl_device_fops,
966 .minor = MISC_DYNAMIC_MINOR255,
967};
968
969static const struct file_operations monitor_device_fops = {
970 .open = monitor_device_open,
971 .release = monitor_device_close,
972 .owner = THIS_MODULE((struct module *)0),
973 .llseek = noop_llseek,
974};
975
976static struct miscdevice monitor_device = {
977 .name = "dlm-monitor",
978 .fops = &monitor_device_fops,
979 .minor = MISC_DYNAMIC_MINOR255,
980};
981
982int __init__attribute__ ((__section__(".init.text"))) __attribute__((no_instrument_function
))
dlm_user_init(void)
983{
984 int error;
985
986 atomic_set(&dlm_monitor_opened, 0);
987
988 error = misc_register(&ctl_device);
989 if (error) {
990 log_print("misc_register failed for control device")printk("\001" "3" "dlm: ""misc_register failed for control device"
"\n")
;
991 goto out;
992 }
993
994 error = misc_register(&monitor_device);
995 if (error) {
996 log_print("misc_register failed for monitor device")printk("\001" "3" "dlm: ""misc_register failed for monitor device"
"\n")
;
997 misc_deregister(&ctl_device);
998 }
999 out:
1000 return error;
1001}
1002
1003void dlm_user_exit(void)
1004{
1005 misc_deregister(&ctl_device);
1006 misc_deregister(&monitor_device);
1007}
1008