76 #include <linux/slab.h>
80 #include <linux/time.h>
83 #include <linux/audit.h>
84 #include <linux/capability.h>
90 #include <asm/uaccess.h>
139 #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
141 #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
142 #define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
146 #ifdef CONFIG_PROC_FS
147 static int sysvipc_sem_proc_show(
struct seq_file *
s,
void *it);
150 #define SEMMSL_FAST 256
151 #define SEMOPM_FAST 64
162 #define sc_semmsl sem_ctls[0]
163 #define sc_semmns sem_ctls[1]
164 #define sc_semopm sem_ctls[2]
165 #define sc_semmni sem_ctls[3]
189 " key semid perms nsems uid gid cuid cgid otime ctime\n",
218 static inline void sem_lock_and_putref(
struct sem_array *sma)
224 static inline void sem_getref_and_unlock(
struct sem_array *sma)
227 ipc_unlock(&(sma)->sem_perm);
230 static inline void sem_putref(
struct sem_array *sma)
234 ipc_unlock(&(sma)->sem_perm);
291 int nsems = params->
u.
nsems;
292 int semflg = params->
flg;
297 if (ns->
used_sems + nsems > ns->sc_semmns)
300 size =
sizeof (*sma) + nsems *
sizeof (
struct sem);
327 for (i = 0; i < nsems; i++)
328 INIT_LIST_HEAD(&sma->
sem_base[i].sem_pending);
344 static inline int sem_security(
struct kern_ipc_perm *ipcp,
int semflg)
355 static inline int sem_more_checks(
struct kern_ipc_perm *ipcp,
375 if (nsems < 0 || nsems > ns->sc_semmsl)
383 sem_params.
flg = semflg;
394 static int try_atomic_semop (
struct sem_array * sma,
struct sembuf * sops,
401 for (sop = sops; sop < sops + nsops; sop++) {
406 if (!sem_op && result)
426 while (sop >= sops) {
447 while (sop >= sops) {
461 static void wake_up_sem_queue_prepare(
struct list_head *pt,
464 if (list_empty(pt)) {
486 static void wake_up_sem_queue_do(
struct list_head *pt)
491 did_something = !list_empty(pt);
567 if (h->
sops[0].sem_op == 0)
594 int semop_completed = 0;
607 pending_list = &sma->
sem_base[semnum].sem_pending;
612 walk = pending_list->
next;
613 while (walk != pending_list) {
626 if (semnum != -1 && sma->
sem_base[semnum].semval == 0 &&
630 error = try_atomic_semop(sma, q->
sops, q->
nsops,
637 unlink_queue(sma, q);
643 restart = check_restart(sma, q);
646 wake_up_sem_queue_prepare(pt, q, error);
650 return semop_completed;
667 static void do_smart_update(
struct sem_array *sma,
struct sembuf *sops,
int nsops,
673 if (update_queue(sma, -1, pt))
678 for (i = 0; i <
nsops; i++) {
679 if (sops[i].sem_op > 0 ||
680 (sops[i].sem_op < 0 &&
682 if (update_queue(sma, sops[i].sem_num, pt))
708 int nsops = q->
nsops;
710 for (i = 0; i < nsops; i++)
712 && (sops[i].sem_op < 0)
727 int nsops = q->
nsops;
729 for (i = 0; i < nsops; i++)
731 && (sops[i].sem_op == 0)
753 spin_lock(&un->
ulp->lock);
756 spin_unlock(&un->
ulp->lock);
761 INIT_LIST_HEAD(&tasks);
763 unlink_queue(sma, q);
764 wake_up_sem_queue_prepare(&tasks, q, -
EIDRM);
771 wake_up_sem_queue_do(&tasks);
801 static int semctl_nolock(
struct ipc_namespace *ns,
int semid,
839 return (max_id < 0) ? 0: max_id;
848 sma = sem_lock(ns, semid);
853 sma = sem_lock_check(ns, semid);
867 memset(&tbuf, 0,
sizeof(tbuf));
874 if (copy_semid_to_user (arg.
buf, &tbuf, version))
886 static int semctl_main(
struct ipc_namespace *ns,
int semid,
int semnum,
887 int cmd,
int version,
union semun arg)
893 ushort* sem_io = fast_sem_io;
897 sma = sem_lock_check(ns, semid);
901 INIT_LIST_HEAD(&tasks);
902 nsems = sma->sem_nsems;
921 sem_getref_and_unlock(sma);
929 sem_lock_and_putref(sma);
930 if (sma->sem_perm.deleted) {
937 for (i = 0; i < sma->sem_nsems; i++)
938 sem_io[i] = sma->sem_base[i].semval;
950 sem_getref_and_unlock(sma);
966 for (i = 0; i < nsems; i++) {
973 sem_lock_and_putref(sma);
974 if (sma->sem_perm.deleted) {
980 for (i = 0; i < nsems; i++)
981 sma->sem_base[i].semval = sem_io[i];
985 for (i = 0; i < nsems; i++)
990 do_smart_update(sma,
NULL, 0, 0, &tasks);
997 if(semnum < 0 || semnum >= nsems)
1000 curr = &sma->sem_base[semnum];
1010 err = count_semncnt(sma,semnum);
1013 err = count_semzcnt(sma,semnum);
1021 if (val >
SEMVMX || val < 0)
1029 curr->sempid = task_tgid_vnr(
current);
1032 do_smart_update(sma,
NULL, 0, 0, &tasks);
1039 wake_up_sem_queue_do(&tasks);
1042 if(sem_io != fast_sem_io)
1047 static
inline unsigned long
1062 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1063 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1064 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1079 int cmd,
int version,
union semun arg)
1087 if (copy_semid_from_user(&semid64, arg.
buf, version))
1092 &semid64.sem_perm, 0);
1094 return PTR_ERR(ipcp);
1133 ns = current->nsproxy->ipc_ns;
1140 err = semctl_nolock(ns, semid, cmd, version, arg);
1149 err = semctl_main(ns,semid,semnum,cmd,version,arg);
1153 err = semctl_down(ns, semid, cmd, version, arg);
1159 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
1160 asmlinkage long SyS_semctl(
int semid,
int semnum,
int cmd,
union semun arg)
1162 return SYSC_semctl((
int) semid, (
int) semnum, (
int) cmd, arg);
1178 static inline int get_undo_list(
struct sem_undo_list **undo_listp)
1182 undo_list = current->sysvsem.undo_list;
1184 undo_list = kzalloc(
sizeof(*undo_list),
GFP_KERNEL);
1185 if (undo_list == NULL)
1191 current->sysvsem.undo_list = undo_list;
1193 *undo_listp = undo_list;
1202 if (un->
semid == semid)
1214 un = __lookup_undo(ulp, semid);
1241 error = get_undo_list(&ulp);
1243 return ERR_PTR(error);
1246 spin_lock(&ulp->
lock);
1247 un = lookup_undo(ulp, semid);
1248 spin_unlock(&ulp->
lock);
1255 sma = sem_lock_check(ns, semid);
1257 return ERR_CAST(sma);
1260 sem_getref_and_unlock(sma);
1270 sem_lock_and_putref(sma);
1274 un = ERR_PTR(-
EIDRM);
1277 spin_lock(&ulp->
lock);
1282 un = lookup_undo(ulp, semid);
1288 new->semadj = (
short *) &
new[1];
1292 list_add_rcu(&new->list_proc, &ulp->
list_proc);
1294 list_add(&new->list_id, &sma->
list_id);
1298 spin_unlock(&ulp->
lock);
1318 static int get_queue_result(
struct sem_queue *q)
1333 unsigned, nsops,
const struct timespec __user *, timeout)
1337 struct sembuf fast_sops[SEMOPM_FAST];
1338 struct sembuf* sops = fast_sops, *sop;
1340 int undos = 0, alter = 0,
max;
1342 unsigned long jiffies_left = 0;
1346 ns = current->nsproxy->ipc_ns;
1348 if (nsops < 1 || semid < 0)
1350 if (nsops > ns->sc_semopm)
1352 if(nsops > SEMOPM_FAST) {
1375 for (sop = sops; sop < sops + nsops; sop++) {
1376 if (sop->sem_num >=
max)
1380 if (sop->sem_op != 0)
1385 un = find_alloc_undo(ns, semid);
1387 error = PTR_ERR(un);
1393 INIT_LIST_HEAD(&tasks);
1395 sma = sem_lock_check(ns, semid);
1399 error = PTR_ERR(sma);
1412 if (un->
semid == -1) {
1414 goto out_unlock_free;
1430 goto out_unlock_free;
1434 goto out_unlock_free;
1438 goto out_unlock_free;
1440 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1442 if (alter && error == 0)
1443 do_smart_update(sma, sops, nsops, 1, &tasks);
1445 goto out_unlock_free;
1453 queue.
nsops = nsops;
1455 queue.
pid = task_tgid_vnr(current);
1456 queue.
alter = alter;
1487 error = get_queue_result(&queue);
1489 if (error != -
EINTR) {
1502 sma = sem_lock(ns, semid);
1507 error = get_queue_result(&queue);
1522 if (error != -
EINTR) {
1523 goto out_unlock_free;
1529 if (
timeout && jiffies_left == 0)
1535 if (error == -
EINTR && !signal_pending(current))
1538 unlink_queue(sma, &queue);
1543 wake_up_sem_queue_do(&tasks);
1545 if(sops != fast_sops)
1566 error = get_undo_list(&undo_list);
1570 tsk->sysvsem.undo_list = undo_list;
1572 tsk->sysvsem.undo_list =
NULL;
1593 ulp = tsk->sysvsem.undo_list;
1596 tsk->sysvsem.undo_list =
NULL;
1609 un = list_entry_rcu(ulp->
list_proc.next,
1626 un = __lookup_undo(ulp, semid);
1639 spin_lock(&ulp->
lock);
1641 spin_unlock(&ulp->
lock);
1661 if (semaphore->
semval < 0)
1665 semaphore->
sempid = task_tgid_vnr(current);
1669 INIT_LIST_HEAD(&tasks);
1670 do_smart_update(sma, NULL, 0, 1, &tasks);
1672 wake_up_sem_queue_do(&tasks);
1679 #ifdef CONFIG_PROC_FS
1680 static int sysvipc_sem_proc_show(
struct seq_file *s,
void *it)
1686 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",