softirq.c 18.6 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5
/*
 *	linux/kernel/softirq.c
 *
 *	Copyright (C) 1992 Linus Torvalds
 *
6 7 8
 *	Distribute under GPLv2.
 *
 *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
Linus Torvalds's avatar
Linus Torvalds committed
9 10
 */

11 12
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

13
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
14 15 16 17 18 19 20
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
21
#include <linux/freezer.h>
Linus Torvalds's avatar
Linus Torvalds committed
22 23
#include <linux/kthread.h>
#include <linux/rcupdate.h>
24
#include <linux/ftrace.h>
25
#include <linux/smp.h>
26
#include <linux/smpboot.h>
27
#include <linux/tick.h>
28
#include <linux/irq.h>
29 30

#define CREATE_TRACE_POINTS
31
#include <trace/events/irq.h>
Linus Torvalds's avatar
Linus Torvalds committed
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51

/*
   - No shared variables, all the data are CPU local.
   - If a softirq needs serialization, let it serialize itself
     by its own spinlocks.
   - Even if softirq is serialized, only local cpu is marked for
     execution. Hence, we get something sort of weak cpu binding.
     Though it is still not clear, will it result in better locality
     or will not.

   Examples:
   - NET RX softirq. It is multithreaded and does not require
     any global serialization.
   - NET TX softirq. It kicks software netdevice queues, hence
     it is logically serialized per device, but this serialization
     is invisible to common code.
   - Tasklets: serialized wrt itself.
 */

#ifndef __ARCH_IRQ_STAT
52 53
DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
Linus Torvalds's avatar
Linus Torvalds committed
54 55
#endif

56
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
Linus Torvalds's avatar
Linus Torvalds committed
57

58
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
Linus Torvalds's avatar
Linus Torvalds committed
59

60
const char * const softirq_to_name[NR_SOFTIRQS] = {
61
	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
62
	"TASKLET", "SCHED", "HRTIMER", "RCU"
63 64
};

Linus Torvalds's avatar
Linus Torvalds committed
65 66 67 68 69 70
/*
 * we cannot loop indefinitely here to avoid userspace starvation,
 * but we also don't want to introduce a worst case 1/HZ latency
 * to the pending events, so lets the scheduler to balance
 * the softirq load for us.
 */
71
static void wakeup_softirqd(void)
Linus Torvalds's avatar
Linus Torvalds committed
72 73
{
	/* Interrupts are disabled: no need to stop preemption */
74
	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
Linus Torvalds's avatar
Linus Torvalds committed
75 76 77 78 79

	if (tsk && tsk->state != TASK_RUNNING)
		wake_up_process(tsk);
}

80 81
/*
 * If ksoftirqd is scheduled, we do not want to process pending softirqs
82 83
 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
 * unless we're doing some of the synchronous softirqs.
84
 */
85 86
#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
static bool ksoftirqd_running(unsigned long pending)
87 88 89
{
	struct task_struct *tsk = __this_cpu_read(ksoftirqd);

90 91
	if (pending & SOFTIRQ_NOW_MASK)
		return false;
92 93 94
	return tsk && (tsk->state == TASK_RUNNING);
}

95 96 97 98 99 100 101 102 103 104
/*
 * preempt_count and SOFTIRQ_OFFSET usage:
 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
 *   softirq processing.
 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
 *   on local_bh_disable or local_bh_enable.
 * This lets us distinguish between whether we are currently processing
 * softirq and whether we just have bh disabled.
 */

105 106 107 108
/*
 * This one is for softirq.c-internal use,
 * where hardirqs are disabled legitimately:
 */
109
#ifdef CONFIG_TRACE_IRQFLAGS
110
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
111 112 113 114 115 116
{
	unsigned long flags;

	WARN_ON_ONCE(in_irq());

	raw_local_irq_save(flags);
117
	/*
118
	 * The preempt tracer hooks into preempt_count_add and will break
119 120 121 122 123
	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
	 * is set and before current->softirq_enabled is cleared.
	 * We must manually increment preempt_count here and manually
	 * call the trace_preempt_off later.
	 */
124
	__preempt_count_add(cnt);
125 126 127
	/*
	 * Were softirqs turned off above:
	 */
128
	if (softirq_count() == (cnt & SOFTIRQ_MASK))
129 130
		trace_softirqs_off(ip);
	raw_local_irq_restore(flags);
131

132 133
	if (preempt_count() == cnt) {
#ifdef CONFIG_DEBUG_PREEMPT
134
		current->preempt_disable_ip = get_lock_parent_ip();
135
#endif
136
		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
137
	}
138
}
139
EXPORT_SYMBOL(__local_bh_disable_ip);
140
#endif /* CONFIG_TRACE_IRQFLAGS */
141

142 143
static void __local_bh_enable(unsigned int cnt)
{
144
	lockdep_assert_irqs_disabled();
145

146 147 148
	if (preempt_count() == cnt)
		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());

149
	if (softirq_count() == (cnt & SOFTIRQ_MASK))
Davidlohr Bueso's avatar
Davidlohr Bueso committed
150
		trace_softirqs_on(_RET_IP_);
151 152

	__preempt_count_sub(cnt);
153 154
}

155
/*
156
 * Special-case - softirqs can safely be enabled by __do_softirq(),
157 158 159 160
 * without processing still-pending softirqs:
 */
void _local_bh_enable(void)
{
161
	WARN_ON_ONCE(in_irq());
162
	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
163 164 165
}
EXPORT_SYMBOL(_local_bh_enable);

166
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
167
{
168 169
	WARN_ON_ONCE(in_irq());
	lockdep_assert_irqs_enabled();
170
#ifdef CONFIG_TRACE_IRQFLAGS
171
	local_irq_disable();
172
#endif
173 174 175
	/*
	 * Are softirqs going to be turned on now:
	 */
176
	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
177
		trace_softirqs_on(ip);
178 179 180
	/*
	 * Keep preemption disabled until we are done with
	 * softirq processing:
181
	 */
182
	preempt_count_sub(cnt - 1);
183

184 185 186 187 188
	if (unlikely(!in_interrupt() && local_softirq_pending())) {
		/*
		 * Run softirq if any pending. And do it in its own stack
		 * as we may be calling this deep in a task call stack already.
		 */
189
		do_softirq();
190
	}
191

192
	preempt_count_dec();
193
#ifdef CONFIG_TRACE_IRQFLAGS
194
	local_irq_enable();
195
#endif
196 197
	preempt_check_resched();
}
198
EXPORT_SYMBOL(__local_bh_enable_ip);
199

Linus Torvalds's avatar
Linus Torvalds committed
200
/*
201 202 203 204 205 206
 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
 * but break the loop if need_resched() is set or after 2 ms.
 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
 * certain cases, such as stop_machine(), jiffies may cease to
 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
 * well to make sure we eventually return from this method.
Linus Torvalds's avatar
Linus Torvalds committed
207
 *
Eric Dumazet's avatar
Eric Dumazet committed
208
 * These limits have been established via experimentation.
Linus Torvalds's avatar
Linus Torvalds committed
209 210 211 212
 * The two things to balance is latency against fairness -
 * we want to handle softirqs as soon as possible, but they
 * should not be able to lock up the box.
 */
Eric Dumazet's avatar
Eric Dumazet committed
213
#define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
214
#define MAX_SOFTIRQ_RESTART 10
Linus Torvalds's avatar
Linus Torvalds committed
215

216 217 218 219 220 221 222
#ifdef CONFIG_TRACE_IRQFLAGS
/*
 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
 * to keep the lockdep irq context tracking as tight as possible in order to
 * not miss-qualify lock contexts and miss possible deadlocks.
 */

223
static inline bool lockdep_softirq_start(void)
224
{
225
	bool in_hardirq = false;
226

227 228
	if (trace_hardirq_context(current)) {
		in_hardirq = true;
229
		trace_hardirq_exit();
230 231
	}

232
	lockdep_softirq_enter();
233 234

	return in_hardirq;
235 236
}

237
static inline void lockdep_softirq_end(bool in_hardirq)
238 239
{
	lockdep_softirq_exit();
240 241

	if (in_hardirq)
242 243 244
		trace_hardirq_enter();
}
#else
245 246
static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
247 248
#endif

249
asmlinkage __visible void __softirq_entry __do_softirq(void)
Linus Torvalds's avatar
Linus Torvalds committed
250
{
Eric Dumazet's avatar
Eric Dumazet committed
251
	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
252
	unsigned long old_flags = current->flags;
253
	int max_restart = MAX_SOFTIRQ_RESTART;
254
	struct softirq_action *h;
255
	bool in_hardirq;
256
	__u32 pending;
257
	int softirq_bit;
258 259

	/*
260 261 262
	 * Mask out PF_MEMALLOC as the current task context is borrowed for the
	 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
	 * again if the socket is related to swapping.
263 264
	 */
	current->flags &= ~PF_MEMALLOC;
Linus Torvalds's avatar
Linus Torvalds committed
265 266

	pending = local_softirq_pending();
267
	account_irq_enter_time(current);
268

269
	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
270
	in_hardirq = lockdep_softirq_start();
Linus Torvalds's avatar
Linus Torvalds committed
271 272 273

restart:
	/* Reset the pending bitmask before enabling irqs */
274
	set_softirq_pending(0);
Linus Torvalds's avatar
Linus Torvalds committed
275

276
	local_irq_enable();
Linus Torvalds's avatar
Linus Torvalds committed
277 278 279

	h = softirq_vec;

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
	while ((softirq_bit = ffs(pending))) {
		unsigned int vec_nr;
		int prev_count;

		h += softirq_bit - 1;

		vec_nr = h - softirq_vec;
		prev_count = preempt_count();

		kstat_incr_softirqs_this_cpu(vec_nr);

		trace_softirq_entry(vec_nr);
		h->action(h);
		trace_softirq_exit(vec_nr);
		if (unlikely(prev_count != preempt_count())) {
295
			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
296 297 298
			       vec_nr, softirq_to_name[vec_nr], h->action,
			       prev_count, preempt_count());
			preempt_count_set(prev_count);
Linus Torvalds's avatar
Linus Torvalds committed
299 300
		}
		h++;
301 302
		pending >>= softirq_bit;
	}
Linus Torvalds's avatar
Linus Torvalds committed
303

304 305
	if (__this_cpu_read(ksoftirqd) == current)
		rcu_softirq_qs();
306
	local_irq_disable();
Linus Torvalds's avatar
Linus Torvalds committed
307 308

	pending = local_softirq_pending();
Eric Dumazet's avatar
Eric Dumazet committed
309
	if (pending) {
310 311
		if (time_before(jiffies, end) && !need_resched() &&
		    --max_restart)
Eric Dumazet's avatar
Eric Dumazet committed
312
			goto restart;
Linus Torvalds's avatar
Linus Torvalds committed
313 314

		wakeup_softirqd();
Eric Dumazet's avatar
Eric Dumazet committed
315
	}
Linus Torvalds's avatar
Linus Torvalds committed
316

317
	lockdep_softirq_end(in_hardirq);
318
	account_irq_exit_time(current);
319
	__local_bh_enable(SOFTIRQ_OFFSET);
320
	WARN_ON_ONCE(in_interrupt());
321
	current_restore_flags(old_flags, PF_MEMALLOC);
Linus Torvalds's avatar
Linus Torvalds committed
322 323
}

324
asmlinkage __visible void do_softirq(void)
Linus Torvalds's avatar
Linus Torvalds committed
325 326 327 328 329 330 331 332 333 334 335
{
	__u32 pending;
	unsigned long flags;

	if (in_interrupt())
		return;

	local_irq_save(flags);

	pending = local_softirq_pending();

336
	if (pending && !ksoftirqd_running(pending))
337
		do_softirq_own_stack();
Linus Torvalds's avatar
Linus Torvalds committed
338 339 340 341

	local_irq_restore(flags);
}

342 343 344 345 346
/*
 * Enter an interrupt context.
 */
void irq_enter(void)
{
347
	rcu_irq_enter();
348
	if (is_idle_task(current) && !in_interrupt()) {
349 350 351 352 353
		/*
		 * Prevent raise_softirq from needlessly waking up ksoftirqd
		 * here, as softirq will be serviced on return from interrupt.
		 */
		local_bh_disable();
354
		tick_irq_enter();
355 356 357 358
		_local_bh_enable();
	}

	__irq_enter();
359 360
}

361 362
static inline void invoke_softirq(void)
{
363
	if (ksoftirqd_running(local_softirq_pending()))
364 365
		return;

366
	if (!force_irqthreads) {
367
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
368 369 370
		/*
		 * We can safely execute softirq on the current stack if
		 * it is the irq stack, because it should be near empty
371 372 373 374 375 376 377 378
		 * at this stage.
		 */
		__do_softirq();
#else
		/*
		 * Otherwise, irq_exit() is called on the task stack that can
		 * be potentially deep already. So call softirq in its own stack
		 * to prevent from any overrun.
379
		 */
380
		do_softirq_own_stack();
381
#endif
382
	} else {
383
		wakeup_softirqd();
384
	}
385
}
Linus Torvalds's avatar
Linus Torvalds committed
386

387 388 389 390 391 392 393
static inline void tick_irq_exit(void)
{
#ifdef CONFIG_NO_HZ_COMMON
	int cpu = smp_processor_id();

	/* Make sure that timer wheel updates are propagated */
	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
394
		if (!in_irq())
395 396 397 398 399
			tick_nohz_irq_exit();
	}
#endif
}

Linus Torvalds's avatar
Linus Torvalds committed
400 401 402 403 404
/*
 * Exit an interrupt context. Process softirqs if needed and possible:
 */
void irq_exit(void)
{
405
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
406
	local_irq_disable();
407
#else
408
	lockdep_assert_irqs_disabled();
409
#endif
410
	account_irq_exit_time(current);
411
	preempt_count_sub(HARDIRQ_OFFSET);
Linus Torvalds's avatar
Linus Torvalds committed
412 413
	if (!in_interrupt() && local_softirq_pending())
		invoke_softirq();
414

415
	tick_irq_exit();
416
	rcu_irq_exit();
417
	trace_hardirq_exit(); /* must be last! */
Linus Torvalds's avatar
Linus Torvalds committed
418 419 420 421 422
}

/*
 * This function must run with irqs disabled!
 */
423
inline void raise_softirq_irqoff(unsigned int nr)
Linus Torvalds's avatar
Linus Torvalds committed
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
{
	__raise_softirq_irqoff(nr);

	/*
	 * If we're in an interrupt or softirq, we're done
	 * (this also catches softirq-disabled code). We will
	 * actually run the softirq once we return from
	 * the irq or softirq.
	 *
	 * Otherwise we wake up ksoftirqd to make sure we
	 * schedule the softirq soon.
	 */
	if (!in_interrupt())
		wakeup_softirqd();
}

440
void raise_softirq(unsigned int nr)
Linus Torvalds's avatar
Linus Torvalds committed
441 442 443 444 445 446 447 448
{
	unsigned long flags;

	local_irq_save(flags);
	raise_softirq_irqoff(nr);
	local_irq_restore(flags);
}

449 450 451 452 453 454
void __raise_softirq_irqoff(unsigned int nr)
{
	trace_softirq_raise(nr);
	or_softirq_pending(1UL << nr);
}

455
void open_softirq(int nr, void (*action)(struct softirq_action *))
Linus Torvalds's avatar
Linus Torvalds committed
456 457 458 459
{
	softirq_vec[nr].action = action;
}

460 461 462
/*
 * Tasklets
 */
463
struct tasklet_head {
464 465
	struct tasklet_struct *head;
	struct tasklet_struct **tail;
Linus Torvalds's avatar
Linus Torvalds committed
466 467
};

468 469
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
Linus Torvalds's avatar
Linus Torvalds committed
470

471 472 473
static void __tasklet_schedule_common(struct tasklet_struct *t,
				      struct tasklet_head __percpu *headp,
				      unsigned int softirq_nr)
Linus Torvalds's avatar
Linus Torvalds committed
474
{
475
	struct tasklet_head *head;
Linus Torvalds's avatar
Linus Torvalds committed
476 477 478
	unsigned long flags;

	local_irq_save(flags);
479
	head = this_cpu_ptr(headp);
480
	t->next = NULL;
481 482 483
	*head->tail = t;
	head->tail = &(t->next);
	raise_softirq_irqoff(softirq_nr);
Linus Torvalds's avatar
Linus Torvalds committed
484 485
	local_irq_restore(flags);
}
486 487 488 489 490 491

void __tasklet_schedule(struct tasklet_struct *t)
{
	__tasklet_schedule_common(t, &tasklet_vec,
				  TASKLET_SOFTIRQ);
}
Linus Torvalds's avatar
Linus Torvalds committed
492 493
EXPORT_SYMBOL(__tasklet_schedule);

494
void __tasklet_hi_schedule(struct tasklet_struct *t)
Linus Torvalds's avatar
Linus Torvalds committed
495
{
496 497
	__tasklet_schedule_common(t, &tasklet_hi_vec,
				  HI_SOFTIRQ);
Linus Torvalds's avatar
Linus Torvalds committed
498 499 500
}
EXPORT_SYMBOL(__tasklet_hi_schedule);

501 502 503
static void tasklet_action_common(struct softirq_action *a,
				  struct tasklet_head *tl_head,
				  unsigned int softirq_nr)
Linus Torvalds's avatar
Linus Torvalds committed
504 505 506 507
{
	struct tasklet_struct *list;

	local_irq_disable();
508 509 510
	list = tl_head->head;
	tl_head->head = NULL;
	tl_head->tail = &tl_head->head;
Linus Torvalds's avatar
Linus Torvalds committed
511 512 513 514 515 516 517 518 519
	local_irq_enable();

	while (list) {
		struct tasklet_struct *t = list;

		list = list->next;

		if (tasklet_trylock(t)) {
			if (!atomic_read(&t->count)) {
520 521
				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
							&t->state))
Linus Torvalds's avatar
Linus Torvalds committed
522 523 524 525 526 527 528 529 530
					BUG();
				t->func(t->data);
				tasklet_unlock(t);
				continue;
			}
			tasklet_unlock(t);
		}

		local_irq_disable();
531
		t->next = NULL;
532 533 534
		*tl_head->tail = t;
		tl_head->tail = &t->next;
		__raise_softirq_irqoff(softirq_nr);
Linus Torvalds's avatar
Linus Torvalds committed
535 536 537 538
		local_irq_enable();
	}
}

539
static __latent_entropy void tasklet_action(struct softirq_action *a)
Linus Torvalds's avatar
Linus Torvalds committed
540
{
541 542
	tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
}
Linus Torvalds's avatar
Linus Torvalds committed
543

544 545 546
static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
{
	tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
Linus Torvalds's avatar
Linus Torvalds committed
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
}

void tasklet_init(struct tasklet_struct *t,
		  void (*func)(unsigned long), unsigned long data)
{
	t->next = NULL;
	t->state = 0;
	atomic_set(&t->count, 0);
	t->func = func;
	t->data = data;
}
EXPORT_SYMBOL(tasklet_init);

void tasklet_kill(struct tasklet_struct *t)
{
	if (in_interrupt())
563
		pr_notice("Attempt to kill tasklet from interrupt\n");
Linus Torvalds's avatar
Linus Torvalds committed
564 565

	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
566
		do {
Linus Torvalds's avatar
Linus Torvalds committed
567
			yield();
568
		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
Linus Torvalds's avatar
Linus Torvalds committed
569 570 571 572 573 574
	}
	tasklet_unlock_wait(t);
	clear_bit(TASKLET_STATE_SCHED, &t->state);
}
EXPORT_SYMBOL(tasklet_kill);

575 576 577 578 579
/*
 * tasklet_hrtimer
 */

/*
580 581 582
 * The trampoline is called when the hrtimer expires. It schedules a tasklet
 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
 * hrtimer callback, but from softirq context.
583 584 585 586 587 588
 */
static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
{
	struct tasklet_hrtimer *ttimer =
		container_of(timer, struct tasklet_hrtimer, timer);

589 590
	tasklet_hi_schedule(&ttimer->tasklet);
	return HRTIMER_NORESTART;
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
}

/*
 * Helper function which calls the hrtimer callback from
 * tasklet/softirq context
 */
static void __tasklet_hrtimer_trampoline(unsigned long data)
{
	struct tasklet_hrtimer *ttimer = (void *)data;
	enum hrtimer_restart restart;

	restart = ttimer->function(&ttimer->timer);
	if (restart != HRTIMER_NORESTART)
		hrtimer_restart(&ttimer->timer);
}

/**
 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
 * @ttimer:	 tasklet_hrtimer which is initialized
610
 * @function:	 hrtimer callback function which gets called from softirq context
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
 * @mode:	 hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
 */
void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
			  enum hrtimer_restart (*function)(struct hrtimer *),
			  clockid_t which_clock, enum hrtimer_mode mode)
{
	hrtimer_init(&ttimer->timer, which_clock, mode);
	ttimer->timer.function = __hrtimer_tasklet_trampoline;
	tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
		     (unsigned long)ttimer);
	ttimer->function = function;
}
EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);

Linus Torvalds's avatar
Linus Torvalds committed
626 627
void __init softirq_init(void)
{
628 629 630 631 632 633 634 635 636
	int cpu;

	for_each_possible_cpu(cpu) {
		per_cpu(tasklet_vec, cpu).tail =
			&per_cpu(tasklet_vec, cpu).head;
		per_cpu(tasklet_hi_vec, cpu).tail =
			&per_cpu(tasklet_hi_vec, cpu).head;
	}

637 638
	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
Linus Torvalds's avatar
Linus Torvalds committed
639 640
}

641
static int ksoftirqd_should_run(unsigned int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
642
{
643 644
	return local_softirq_pending();
}
Linus Torvalds's avatar
Linus Torvalds committed
645

646 647 648 649
static void run_ksoftirqd(unsigned int cpu)
{
	local_irq_disable();
	if (local_softirq_pending()) {
650 651 652 653
		/*
		 * We can safely run softirq on inline stack, as we are not deep
		 * in the task stack here.
		 */
654 655
		__do_softirq();
		local_irq_enable();
656
		cond_resched();
657
		return;
Linus Torvalds's avatar
Linus Torvalds committed
658
	}
659
	local_irq_enable();
Linus Torvalds's avatar
Linus Torvalds committed
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
}

#ifdef CONFIG_HOTPLUG_CPU
/*
 * tasklet_kill_immediate is called to remove a tasklet which can already be
 * scheduled for execution on @cpu.
 *
 * Unlike tasklet_kill, this function removes the tasklet
 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
 *
 * When this function is called, @cpu must be in the CPU_DEAD state.
 */
void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
{
	struct tasklet_struct **i;

	BUG_ON(cpu_online(cpu));
	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));

	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
		return;

	/* CPU is dead, so no lock needed. */
683
	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
Linus Torvalds's avatar
Linus Torvalds committed
684 685
		if (*i == t) {
			*i = t->next;
686 687 688
			/* If this was the tail element, move the tail ptr */
			if (*i == NULL)
				per_cpu(tasklet_vec, cpu).tail = i;
Linus Torvalds's avatar
Linus Torvalds committed
689 690 691 692 693 694
			return;
		}
	}
	BUG();
}

695
static int takeover_tasklets(unsigned int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
696 697 698 699 700
{
	/* CPU is dead, so no lock needed. */
	local_irq_disable();

	/* Find end, append list for that CPU. */
701
	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
702 703
		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
		this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
704 705 706
		per_cpu(tasklet_vec, cpu).head = NULL;
		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
	}
Linus Torvalds's avatar
Linus Torvalds committed
707 708
	raise_softirq_irqoff(TASKLET_SOFTIRQ);

709
	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
710 711
		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
712 713 714
		per_cpu(tasklet_hi_vec, cpu).head = NULL;
		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
	}
Linus Torvalds's avatar
Linus Torvalds committed
715 716 717
	raise_softirq_irqoff(HI_SOFTIRQ);

	local_irq_enable();
718
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
719
}
720 721
#else
#define takeover_tasklets	NULL
Linus Torvalds's avatar
Linus Torvalds committed
722 723
#endif /* CONFIG_HOTPLUG_CPU */

724 725 726 727 728 729 730
static struct smp_hotplug_thread softirq_threads = {
	.store			= &ksoftirqd,
	.thread_should_run	= ksoftirqd_should_run,
	.thread_fn		= run_ksoftirqd,
	.thread_comm		= "ksoftirqd/%u",
};

731
static __init int spawn_ksoftirqd(void)
Linus Torvalds's avatar
Linus Torvalds committed
732
{
733 734
	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
				  takeover_tasklets);
735 736
	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));

Linus Torvalds's avatar
Linus Torvalds committed
737 738
	return 0;
}
739
early_initcall(spawn_ksoftirqd);
740

741 742 743 744 745 746 747 748 749 750
/*
 * [ These __weak aliases are kept in a separate compilation unit, so that
 *   GCC does not inline them incorrectly. ]
 */

int __init __weak early_irq_init(void)
{
	return 0;
}

Yinghai Lu's avatar
Yinghai Lu committed
751 752
int __init __weak arch_probe_nr_irqs(void)
{
753
	return NR_IRQS_LEGACY;
Yinghai Lu's avatar
Yinghai Lu committed
754 755
}

756 757 758 759
int __init __weak arch_early_irq_init(void)
{
	return 0;
}
760 761 762 763 764

unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
{
	return from;
}