irqdesc.h 8.01 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef _LINUX_IRQDESC_H
#define _LINUX_IRQDESC_H

5
#include <linux/rcupdate.h>
6
#include <linux/kobject.h>
7
#include <linux/mutex.h>
8

9 10 11 12
/*
 * Core internal functions to deal with irq descriptors
 */

13
struct irq_affinity_notify;
14
struct proc_dir_entry;
15
struct module;
16
struct irq_desc;
17 18
struct irq_domain;
struct pt_regs;
19

20 21
/**
 * struct irq_desc - interrupt descriptor
22
 * @irq_common_data:	per irq and chip data passed down to chip functions
23
 * @kstat_irqs:		irq stats per cpu
24 25
 * @handle_irq:		highlevel irq-events handler
 * @preflow_handler:	handler called before the flow handler (currently used by sparc)
26 27
 * @action:		the irq action chain
 * @status:		status information
28
 * @core_internal_state__do_not_mess_with_it: core internal status information
29
 * @depth:		disable-depth, for nested irq_disable() calls
30
 * @wake_depth:		enable depth, for multiple irq_set_irq_wake() callers
31 32 33
 * @irq_count:		stats field to detect stalled irqs
 * @last_unhandled:	aging timer for unhandled count
 * @irqs_unhandled:	stats field for spurious unhandled interrupts
34 35
 * @threads_handled:	stats field for deferred spurious detection of threaded handlers
 * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
36
 * @lock:		locking for SMP
37
 * @affinity_hint:	hint to user space for preferred irq affinity
38
 * @affinity_notify:	context for notification of affinity changes
39
 * @pending_mask:	pending rebalanced interrupts
40
 * @threads_oneshot:	bitfield to handle shared oneshot threads
41 42
 * @threads_active:	number of irqaction threads currently running
 * @wait_for_threads:	wait queue for sync_irq to wait for threaded handlers
43 44 45 46 47
 * @nr_actions:		number of installed actions on this descriptor
 * @no_suspend_depth:	number of irqactions on a irq descriptor with
 *			IRQF_NO_SUSPEND set
 * @force_resume_depth:	number of irqactions on a irq descriptor with
 *			IRQF_FORCE_RESUME set
48
 * @rcu:		rcu head for delayed free
49
 * @kobj:		kobject used to represent this struct in sysfs
50
 * @request_mutex:	mutex to protect request/free before locking desc->lock
51
 * @dir:		/proc/irq/ procfs entry
52
 * @debugfs_file:	dentry for the debugfs file
53 54 55
 * @name:		flow handler name for /proc/interrupts output
 */
struct irq_desc {
56
	struct irq_common_data	irq_common_data;
57
	struct irq_data		irq_data;
58
	unsigned int __percpu	*kstat_irqs;
59
	irq_flow_handler_t	handle_irq;
60 61 62
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
	irq_preflow_handler_t	preflow_handler;
#endif
63
	struct irqaction	*action;	/* IRQ action list */
64
	unsigned int		status_use_accessors;
65
	unsigned int		core_internal_state__do_not_mess_with_it;
66 67 68 69 70
	unsigned int		depth;		/* nested irq disables */
	unsigned int		wake_depth;	/* nested wake enables */
	unsigned int		irq_count;	/* For detecting broken IRQs */
	unsigned long		last_unhandled;	/* Aging timer for unhandled count */
	unsigned int		irqs_unhandled;
71 72
	atomic_t		threads_handled;
	int			threads_handled_last;
73
	raw_spinlock_t		lock;
74
	struct cpumask		*percpu_enabled;
75
	const struct cpumask	*percpu_affinity;
76 77
#ifdef CONFIG_SMP
	const struct cpumask	*affinity_hint;
78
	struct irq_affinity_notify *affinity_notify;
79 80 81 82
#ifdef CONFIG_GENERIC_PENDING_IRQ
	cpumask_var_t		pending_mask;
#endif
#endif
83
	unsigned long		threads_oneshot;
84 85
	atomic_t		threads_active;
	wait_queue_head_t       wait_for_threads;
86 87 88
#ifdef CONFIG_PM_SLEEP
	unsigned int		nr_actions;
	unsigned int		no_suspend_depth;
89
	unsigned int		cond_suspend_depth;
90 91
	unsigned int		force_resume_depth;
#endif
92 93
#ifdef CONFIG_PROC_FS
	struct proc_dir_entry	*dir;
94
#endif
95 96
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
	struct dentry		*debugfs_file;
97
	const char		*dev_name;
98
#endif
99 100
#ifdef CONFIG_SPARSE_IRQ
	struct rcu_head		rcu;
101
	struct kobject		kobj;
102
#endif
103
	struct mutex		request_mutex;
104
	int			parent_irq;
105
	struct module		*owner;
106 107 108
	const char		*name;
} ____cacheline_internodealigned_in_smp;

109 110 111 112 113 114
#ifdef CONFIG_SPARSE_IRQ
extern void irq_lock_sparse(void);
extern void irq_unlock_sparse(void);
#else
static inline void irq_lock_sparse(void) { }
static inline void irq_unlock_sparse(void) { }
115 116 117
extern struct irq_desc irq_desc[NR_IRQS];
#endif

118 119
static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
{
120
	return container_of(data->common, struct irq_desc, irq_common_data);
121 122
}

123 124 125 126 127
static inline unsigned int irq_desc_get_irq(struct irq_desc *desc)
{
	return desc->irq_data.irq;
}

128 129 130 131 132
static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
{
	return &desc->irq_data;
}

133 134 135 136 137 138 139 140 141 142 143 144
static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc)
{
	return desc->irq_data.chip;
}

static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
{
	return desc->irq_data.chip_data;
}

static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
{
145
	return desc->irq_common_data.handler_data;
146 147
}

148 149
/*
 * Architectures call this to let the generic IRQ layer
150
 * handle an interrupt.
151
 */
152
static inline void generic_handle_irq_desc(struct irq_desc *desc)
153
{
154
	desc->handle_irq(desc);
155 156
}

157
int generic_handle_irq(unsigned int irq);
158

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
#ifdef CONFIG_HANDLE_DOMAIN_IRQ
/*
 * Convert a HW interrupt number to a logical one using a IRQ domain,
 * and handle the result interrupt number. Return -EINVAL if
 * conversion failed. Providing a NULL domain indicates that the
 * conversion has already been done.
 */
int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
			bool lookup, struct pt_regs *regs);

static inline int handle_domain_irq(struct irq_domain *domain,
				    unsigned int hwirq, struct pt_regs *regs)
{
	return __handle_domain_irq(domain, hwirq, true, regs);
}
#endif

176
/* Test to see if a driver has successfully requested an irq */
177
static inline int irq_desc_has_action(struct irq_desc *desc)
178 179 180 181
{
	return desc->action != NULL;
}

182 183 184 185 186
static inline int irq_has_action(unsigned int irq)
{
	return irq_desc_has_action(irq_to_desc(irq));
}

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
/**
 * irq_set_handler_locked - Set irq handler from a locked region
 * @data:	Pointer to the irq_data structure which identifies the irq
 * @handler:	Flow control handler function for this interrupt
 *
 * Sets the handler in the irq descriptor associated to @data.
 *
 * Must be called with irq_desc locked and valid parameters. Typical
 * call site is the irq_set_type() callback.
 */
static inline void irq_set_handler_locked(struct irq_data *data,
					  irq_flow_handler_t handler)
{
	struct irq_desc *desc = irq_data_to_desc(data);

	desc->handle_irq = handler;
}

/**
 * irq_set_chip_handler_name_locked - Set chip, handler and name from a locked region
 * @data:	Pointer to the irq_data structure for which the chip is set
 * @chip:	Pointer to the new irq chip
 * @handler:	Flow control handler function for this interrupt
 * @name:	Name of the interrupt
 *
 * Replace the irq chip at the proper hierarchy level in @data and
 * sets the handler and name in the associated irq descriptor.
 *
 * Must be called with irq_desc locked and valid parameters.
 */
static inline void
irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
				 irq_flow_handler_t handler, const char *name)
{
	struct irq_desc *desc = irq_data_to_desc(data);

	desc->handle_irq = handler;
	desc->name = name;
	data->chip = chip;
}

228
static inline bool irq_balancing_disabled(unsigned int irq)
229 230 231 232
{
	struct irq_desc *desc;

	desc = irq_to_desc(irq);
233
	return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
234
}
235

236
static inline bool irq_is_percpu(unsigned int irq)
237 238 239 240 241 242 243
{
	struct irq_desc *desc;

	desc = irq_to_desc(irq);
	return desc->status_use_accessors & IRQ_PER_CPU;
}

244
static inline bool irq_is_percpu_devid(unsigned int irq)
245 246 247 248 249 250 251
{
	struct irq_desc *desc;

	desc = irq_to_desc(irq);
	return desc->status_use_accessors & IRQ_PER_CPU_DEVID;
}

252
static inline void
253 254
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
		      struct lock_class_key *request_class)
255 256 257
{
	struct irq_desc *desc = irq_to_desc(irq);

258 259 260 261
	if (desc) {
		lockdep_set_class(&desc->lock, lock_class);
		lockdep_set_class(&desc->request_mutex, request_class);
	}
262 263
}

264 265 266 267 268 269 270 271 272 273
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
static inline void
__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
{
	struct irq_desc *desc;

	desc = irq_to_desc(irq);
	desc->preflow_handler = handler;
}
#endif
274 275

#endif