ioprio.c 4.96 KB
Newer Older
1 2 3
/*
 * fs/ioprio.c
 *
4
 * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 *
 * Helper functions for setting/querying io priorities of processes. The
 * system calls closely mimmick getpriority/setpriority, see the man page for
 * those. The prio argument is a composite of prio class and prio data, where
 * the data argument has meaning within that class. The standard scheduling
 * classes have 8 distinct prio levels, with 0 being the highest prio and 7
 * being the lowest.
 *
 * IOW, setting BE scheduling class with prio 2 is done ala:
 *
 * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
 *
 * ioprio_set(PRIO_PROCESS, pid, prio);
 *
 * See also Documentation/block/ioprio.txt
 *
 */
22
#include <linux/gfp.h>
23
#include <linux/kernel.h>
24
#include <linux/export.h>
25
#include <linux/ioprio.h>
26
#include <linux/cred.h>
27
#include <linux/blkdev.h>
28
#include <linux/capability.h>
29
#include <linux/sched/user.h>
30
#include <linux/sched/task.h>
31
#include <linux/syscalls.h>
32
#include <linux/security.h>
33
#include <linux/pid_namespace.h>
34

35
int set_task_ioprio(struct task_struct *task, int ioprio)
36
{
37
	int err;
38
	struct io_context *ioc;
39
	const struct cred *cred = current_cred(), *tcred;
40

41 42
	rcu_read_lock();
	tcred = __task_cred(task);
43 44
	if (!uid_eq(tcred->uid, cred->euid) &&
	    !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
45
		rcu_read_unlock();
46
		return -EPERM;
47 48
	}
	rcu_read_unlock();
49

50 51 52 53
	err = security_task_setioprio(task, ioprio);
	if (err)
		return err;

54 55
	ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
	if (ioc) {
Tejun Heo's avatar
Tejun Heo committed
56
		ioc->ioprio = ioprio;
57
		put_io_context(ioc);
58
	}
59

60
	return err;
61
}
62
EXPORT_SYMBOL_GPL(set_task_ioprio);
63

64
SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
65 66 67 68 69
{
	int class = IOPRIO_PRIO_CLASS(ioprio);
	int data = IOPRIO_PRIO_DATA(ioprio);
	struct task_struct *p, *g;
	struct user_struct *user;
70
	struct pid *pgrp;
71
	kuid_t uid;
72 73 74 75 76 77
	int ret;

	switch (class) {
		case IOPRIO_CLASS_RT:
			if (!capable(CAP_SYS_ADMIN))
				return -EPERM;
78 79
			/* fall through */
			/* rt has prio field too */
80 81 82 83 84 85 86
		case IOPRIO_CLASS_BE:
			if (data >= IOPRIO_BE_NR || data < 0)
				return -EINVAL;

			break;
		case IOPRIO_CLASS_IDLE:
			break;
87 88 89 90
		case IOPRIO_CLASS_NONE:
			if (data)
				return -EINVAL;
			break;
91 92 93 94 95
		default:
			return -EINVAL;
	}

	ret = -ESRCH;
96
	rcu_read_lock();
97 98 99 100 101
	switch (which) {
		case IOPRIO_WHO_PROCESS:
			if (!who)
				p = current;
			else
102
				p = find_task_by_vpid(who);
103 104 105 106 107
			if (p)
				ret = set_task_ioprio(p, ioprio);
			break;
		case IOPRIO_WHO_PGRP:
			if (!who)
108 109
				pgrp = task_pgrp(current);
			else
110
				pgrp = find_vpid(who);
111
			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
112 113 114
				ret = set_task_ioprio(p, ioprio);
				if (ret)
					break;
115
			} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
116 117
			break;
		case IOPRIO_WHO_USER:
118 119 120
			uid = make_kuid(current_user_ns(), who);
			if (!uid_valid(uid))
				break;
121
			if (!who)
122
				user = current_user();
123
			else
124
				user = find_user(uid);
125 126 127 128

			if (!user)
				break;

129
			for_each_process_thread(g, p) {
130 131
				if (!uid_eq(task_uid(p), uid) ||
				    !task_pid_vnr(p))
132 133 134
					continue;
				ret = set_task_ioprio(p, ioprio);
				if (ret)
135
					goto free_uid;
136
			}
137
free_uid:
138 139 140 141 142 143 144
			if (who)
				free_uid(user);
			break;
		default:
			ret = -EINVAL;
	}

145
	rcu_read_unlock();
146 147 148
	return ret;
}

149 150 151 152 153 154 155
static int get_task_ioprio(struct task_struct *p)
{
	int ret;

	ret = security_task_getioprio(p);
	if (ret)
		goto out;
156
	ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
157
	task_lock(p);
158 159
	if (p->io_context)
		ret = p->io_context->ioprio;
160
	task_unlock(p);
161 162 163 164
out:
	return ret;
}

165 166
int ioprio_best(unsigned short aprio, unsigned short bprio)
{
167 168 169 170
	if (!ioprio_valid(aprio))
		aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
	if (!ioprio_valid(bprio))
		bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
171

172
	return min(aprio, bprio);
173 174
}

175
SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
176 177 178
{
	struct task_struct *g, *p;
	struct user_struct *user;
179
	struct pid *pgrp;
180
	kuid_t uid;
181
	int ret = -ESRCH;
182
	int tmpio;
183

184
	rcu_read_lock();
185 186 187 188 189
	switch (which) {
		case IOPRIO_WHO_PROCESS:
			if (!who)
				p = current;
			else
190
				p = find_task_by_vpid(who);
191
			if (p)
192
				ret = get_task_ioprio(p);
193 194 195
			break;
		case IOPRIO_WHO_PGRP:
			if (!who)
196 197
				pgrp = task_pgrp(current);
			else
198
				pgrp = find_vpid(who);
199
			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
200 201 202
				tmpio = get_task_ioprio(p);
				if (tmpio < 0)
					continue;
203
				if (ret == -ESRCH)
204
					ret = tmpio;
205
				else
206
					ret = ioprio_best(ret, tmpio);
207
			} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
208 209
			break;
		case IOPRIO_WHO_USER:
210
			uid = make_kuid(current_user_ns(), who);
211
			if (!who)
212
				user = current_user();
213
			else
214
				user = find_user(uid);
215 216 217 218

			if (!user)
				break;

219
			for_each_process_thread(g, p) {
220 221
				if (!uid_eq(task_uid(p), user->uid) ||
				    !task_pid_vnr(p))
222
					continue;
223 224 225
				tmpio = get_task_ioprio(p);
				if (tmpio < 0)
					continue;
226
				if (ret == -ESRCH)
227
					ret = tmpio;
228
				else
229
					ret = ioprio_best(ret, tmpio);
230
			}
231 232 233 234 235 236 237 238

			if (who)
				free_uid(user);
			break;
		default:
			ret = -EINVAL;
	}

239
	rcu_read_unlock();
240 241
	return ret;
}