Linux内核实现透视---硬中断

        Linux的中断处理是驱动中比较重要的一部分内容,要清楚具体的实现才能更好的理解而不是靠记住别人理解后总结的规律,所以今天就打算从从源码来学习一下Linux内核对于中断处理过程,设计中断子系统的初始化的内容比较少,后续有空了在去深入的看看。通过追踪Linux中断的响应过程就能知道中断的具体处理细节。

中断响应过程

网上总结中断的执行过程的大致流程是:

  1. 保存中断发生时CPSR寄存器内容到SPSR_irq寄存器中
  2. 修改CPSR寄存器,让CPU进入处理器模式(processor mode)中的IRQ模式,即修改CPSR寄存器中的M域设置为IRQ Mode。
  3. 硬件自动关闭中断IRQ或FIQ,即CPSR中的IRQ位或FIQ位置1。
  4. 保存返回地址到LR_irq寄存器中。
  5. 硬件自动调转到中断向量表的IRQ向量。,从此处开始进入软件领

Linux的源码中定义了一段代码如下,就是其中断向量表,然后通过连接脚本和处理并在启动过程中将其放到内存的特定地址上去并通过CP13寄存器指示。

.section .vectors, "ax", %progbits__vectors_start:
    W(b)    vector_rst
    W(b)    vector_und
    W(ldr)    pc, __vectors_start + 0x1000
    W(b)    vector_pabt
    W(b)    vector_dabt
    W(b)    vector_addrexcptn
    W(b)    vector_irq
    W(b)    vector_fiq

/*
 * Interrupt dispatcher
 */
    @------------------------------------------------vector_stub宏定义了vector_irq
    vector_stub    irq, IRQ_MODE, 4

    .long    __irq_usr            @  0  (USR_26 / USR_32)
    .long    __irq_invalid            @  1  (FIQ_26 / FIQ_32)
    .long    __irq_invalid            @  2  (IRQ_26 / IRQ_32)
    @----------------------------svc模式数值是0b10011,与上0xf后就是3。
    .long    __irq_svc            @  3  (SVC_26 / SVC_32)
    .long    __irq_invalid            @  4
    .long    __irq_invalid            @  5
    .long    __irq_invalid            @  6
    .long    __irq_invalid            @  7
    .long    __irq_invalid            @  8
    .long    __irq_invalid            @  9
    .long    __irq_invalid            @  a
    .long    __irq_invalid            @  b
    .long    __irq_invalid            @  c
    .long    __irq_invalid            @  d
    .long    __irq_invalid            @  e
    .long    __irq_invalid            @  f   
       
    @------------------------------------vector_stub宏定义
    .macro vector_stub, name, mode, correction=0    
    .align 5
    vector_name:
    .if correction
    sub    lr, lr, #correction
    .endif

    @
    @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
    @ (parent CPSR)
    @
    stmia    sp, {r0, lr}        @ save r0, lr
    mrs    lr, spsr
    str    lr, [sp, #8]        @ save spsr

    @
    @ Prepare for SVC32 mode.  IRQs remain disabled.
    @
    mrs    r0, cpsr
    @修改CPSR寄存器的控制域为SVC模式,为了使中断处理在SVC模式下执行。
    eor    r0, r0, #(mode ^ SVC_MODE | PSR_ISETSTATE)
    msr    spsr_cxsf, r0

    @
    @ the branch table must immediately follow this code
    @
    @低4位反映了进入中断前CPU的运行模式,9为USR,3为SVC模式。
    and    lr, lr, #0x0f
 THUMB(    adr    r0, 1f            )
    @根据中断发生点所在的模式,给lr寄存器赋值,__irq_usr或者__irq_svc标签处。
 THUMB(    ldr    lr, [r0, lr, lsl #2]    )
    mov    r0, spk
    @得到的lr就是".long __irq_svc"
 ARM(    ldr    lr, [pc, lr, lsl #2]    )
    @把lr的值赋给pc指针,跳转到__irq_usr或者__irq_svc。
    movs    pc, lr            @ branch to handler in SVC mode
ENDPROC(vector_
ame)

这里注意汇编宏的展开操作,理解了这一部分就能明白,24行处就是定义了vector_irq这个中断服务函数这个函数内读取了一些寄存器,然后根据进入中断前的CPU状态跳转到__irq_usr或者 __irq_svc。这里暂时分析在内核态时的情况所以是跳转到__irq_svc这个函数这一部分是中断上下文直接调用的所以他也就还在中断上下文中执行,接着看__irq_svc。

__irq_svc:
    svc_entry
    irq_handler
    @中断处理结束后,发生抢占的地方
#ifdef CONFIG_PREEMPT
    get_thread_info tsk
    @获取thread_info->preempt_cpunt变量;preempt_count为0,说明可以抢占进程;preempt_count大于0,表示不能抢占。
    ldr    r8, [tsk, #TI_PREEMPT]        @ get preempt count
    ldr    r0, [tsk, #TI_FLAGS]        @ get flags
    teq    r8, #0                @ if preempt count != 0
    movne    r0, #0                @ force flags to 0
    @判断是否设置了_TIF_NEED_RESCHED标志位
    tst    r0, #_TIF_NEED_RESCHED
    blne    svc_preempt
#endif

    svc_exit r5, irq = 1            @ return from exception
 UNWIND(.fnend        )
ENDPROC(__irq_svc)

__irq_svc处理发生在内核空间的中断,主要svc_entry保护中断现场;irq_handler执行中断处理,如果打开抢占功能后续还要检查是否可以抢占这里暂时不看;最后svc_exit执行中断退出处理。到irq_handler就会调用C代码部分的通用中断子系统处理过程。直接上源码

.macro    irq_handler
#ifdef CONFIG_MULTI_IRQ_HANDLER
    ldr    r1, =handle_arch_irq
    mov    r0, sp
    @设置返回地址
    adr    lr, BSYM(9997f)
    @调用handle_arch_irq 保存的处理接口,这里使用的是ldr所以不会覆盖LR寄存器从而保证了正常返回
    ldr    pc, [r1]
#else
    arch_irq_handler_default
#endif
9997:
    .endm


其中handle_arch_irq的是在中断子系统初始化过程中绑定的,在gic控制器的环境下为gic_handle_irq的,其中的处理分中断后大于15和小于等于的两种情况见源码。

static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
    u32 irqstat, irqnr;
    struct gic_chip_data *gic = &gic_data[0];
    void __iomem *cpu_base = gic_data_cpu_base(gic);

    do {
        @读取IAR寄存器
        irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
        @GICC_IAR_INT_ID_MASK为0x3ff,即低10位,所以中断最多从0~1023。
        irqnr = irqstat & GICC_IAR_INT_ID_MASK;

        if (likely(irqnr > 15 && irqnr < 1021)) {
            handle_domain_irq(gic->domain, irqnr, regs);
            continue;
        }
        @SGI类型的中断是CPU核间通信所用,只有定义了CONFIG_SMP才有意义。
        if (irqnr < 16) {
            @直接写EOI寄存器,表示结束中断。
            writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
#ifdef CONFIG_SMP
            handle_IPI(irqnr, regs);
#endif
            continue;
        }
        break;
    } while (1);
}

通过以个循环处理所有的硬件中断,根据GIC中断控制器的原理小于16的中断是用于SMP系统的核间通讯的所以,只在SMP架构下起作用。所以常用中断处理流程走的是irqnr > 15 && irqnr < 1021 这个分支调用---> handle_domain_irq(gic->domain, irqnr, regs)//irqnr 是硬件中断号;reg是svc_entry保存的中断现场;domain是中断子系统初始化时根据中断控制器的不同初始化的一个中断操作接口。由源码可以看到中断处理最重要的是通过调用__handle_domain_irq接口,并且在lookp中传入了ture。

这里再来看__handle_domain_irq 的处理流程:

int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
            bool lookup, struct pt_regs *regs)
{
    struct pt_regs *old_regs = set_irq_regs(regs);
    unsigned int irq = hwirq;
    int ret = 0;

    @通过显式增加hardirq域计数,通知Linux进入中断上下文
    irq_enter();

#ifdef CONFIG_IRQ_DOMAIN
    if (lookup)
        @根据硬件中断号找到对应的软件中断号
        irq = irq_find_mapping(domain, hwirq);
#endif

    /*
     * Some hardware gives randomly wrong interrupts.  Rather
     * than crashing, do something sensible.
     */
    if (unlikely(!irq || irq >= nr_irqs)) {
        ack_bad_irq(irq);
        ret = -EINVAL;
    } else {
        @开始具体某一个中断的处理,此处irq已经是Linux中断号。
        generic_handle_irq(irq);
    }

    irq_exit();
    @退出中断上下文
    set_irq_regs(old_regs);
    return ret;
}

其中处理中断的接口是generic_handle_irq,进入到generic_handle_irq参数是irq号,irq_to_desc()根据irq号找到对应的struct irq_desc。然后调用generic_handle_irq_desc(irq,desc)进一步调用调用irq_desc->handle_irq处理对应的中断。

int generic_handle_irq(unsigned int irq)
{
    struct irq_desc *desc = irq_to_desc(irq);

    if (!desc)
        return -EINVAL;
    generic_handle_irq_desc(irq, desc);
    return 0;
}

static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
{
   desc->handle_irq(irq, desc);
}

 这里需要明白中断描述符中的handle_irq接口的初始化也是分情况的。每个中断描述符注册的时候,由gic_irq_domain_map根据hwirq号决定。在gic_irq_domain_map的时候根据hw号决定handle,hw硬件中断号小于32指向handle_percpu_devid_irq,其他情况指向handle_fasteoi_irq。驱动编程通常情况都是使外部中断而非内部异常所以中断号>32,所以handle_irq = handle_fasteoi_irq。

void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
{
    @chip 中封装了中断控制器的ARCH层操作
    struct irq_chip *chip = desc->irq_data.chip;

    raw_spin_lock(&desc->lock);

    if (!irq_may_run(desc))
        goto out;

    desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
    kstat_incr_irqs_this_cpu(irq, desc);

    /*
     * If its disabled or no action available
     * then mask it and get out of here:
     */
     @如果该中断没有指定action描述符或该中断被关闭了IRQD_IRQ_DISABLED,设置该中断状态为IRQS_PENDING,且mask_irq()屏蔽该中断。
    if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
        desc->istate |= IRQS_PENDING;
        mask_irq(desc);
        goto out;
    }
    @如果中断是IRQS_ONESHOT,不支持中断嵌套,那么应该调用mask_irq()来屏蔽该中断源。
    if (desc->istate & IRQS_ONESHOT)
        mask_irq(desc);

    preflow_handler(desc);
    @实际的中断处理接口
    handle_irq_event(desc);
    根据不同条件执行unmask_irq()解除中断屏蔽,或者执行irq_chip->irq_eoi发送EOI信号,通知GIC中断处理完毕。
    cond_unmask_eoi_irq(desc, chip);

    raw_spin_unlock(&desc->lock);
    return;
out:
    if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
        chip->irq_eoi(&desc->irq_data);
    raw_spin_unlock(&desc->lock);
}

handle_irq_event调用handle_irq_event_percpu,执行action->handler(),如有需要唤醒内核中断线程执行action->thread_fn见源码如下:

irqreturn_t handle_irq_event(struct irq_desc *desc)
{
    struct irqaction *action = desc->action;
    irqreturn_t ret;
    @清除IRQS_PENDING标志位
    desc->istate &= ~IRQS_PENDING;
    @设置IRQD_IRQ_INPROGRESS标志位,表示正在处理硬件中断。
    irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
    raw_spin_unlock(&desc->lock);

    ret = handle_irq_event_percpu(desc, action);

    raw_spin_lock(&desc->lock);
    @清除IRQD_IRQ_INPROGRESS标志位,表示中断处理结束。
    irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
    return ret;
}

handle_irq_event_percpu,以此处理每个CPU上的中断的每个action(share):

irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
    irqreturn_t retval = IRQ_NONE;
    unsigned int flags = 0, irq = desc->irq_data.irq;
    @遍历中断描述符中的action链表,依次执行每个action元素中的primary handler回调函数action->handler。
    do {
        irqreturn_t res;

        trace_irq_handler_entry(irq, action);
        @执行struct irqaction的handler函数。这就是申请注册中断接口时指定的中断服务函数
        res = action->handler(irq, action->dev_id);
        trace_irq_handler_exit(irq, action, res);

        if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts
",
                  irq, action->handler))
            local_irq_disable();
        @根据中断服务函数的返回值执行操作
        switch (res) {
        @需要唤醒内核中断线程,去唤醒内核中断线程
        case IRQ_WAKE_THREAD:
            /*
             * Catch drivers which return WAKE_THREAD but
             * did not set up a thread function
             */
            if (unlikely(!action->thread_fn)) {
                @输出一个打印表示没有中断处理函数
                warn_no_thread(irq, action);
                break;
            }
            @唤醒此中断对应的内核线程
            __irq_wake_thread(desc, action);

            /* Fall through to add to randomness */
        case IRQ_HANDLED:
            @已经处理完毕,可以结束。
            flags |= action->flags;
            break;

        default:
            break;
        }

        retval |= res;
        action = action->next;
    } while (action);

    add_interrupt_randomness(irq, flags);

    if (!noirqdebug)
        note_interrupt(irq, desc, retval);
    return retval;
}

到这里应该大部分处理流程都清楚了,中断发生后通过以部分arch层相关的代码处理后得到Linux中断子系统的中断号进而找到对应的中断,而驱动在编程过程中已经通过request_irq接口注册好action接口函数到对应的中断。中断执行过程中直接找到对应的接口进行调用就完成了中断的响应处理,然后在根据中断handler处理的返回值如果是IRQ_WAKE_THREAD则唤醒中断注册时候创建的线程,这个线程的具体内容后面中断注册内容部分再具体学习。这里说明request_irq 申请注册的中断服务函数的执行时在中断上下文中的,但是实际上内核为例提高实时性进行了一部分优化通过强制中断服务接口线程化,这个后面学习。  先来看上面提到的区分中断号小于32的处理接口handle_percpu_devid_irq()。

void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
{
    struct irq_chip *chip = irq_desc_get_chip(desc);
    struct irqaction *action = desc->action;
    void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
    irqreturn_t res;

    kstat_incr_irqs_this_cpu(irq, desc);

    if (chip->irq_ack)
        chip->irq_ack(&desc->irq_data);

    trace_irq_handler_entry(irq, action);
    res = action->handler(irq, dev_id);
    trace_irq_handler_exit(irq, action, res);

    if (chip->irq_eoi)
        #调用gic_eoi_irq()函数
        chip->irq_eoi(&desc->irq_data);
}

首先响应IAR,然后执行handler这里的接口是内核注册的接口不是驱动开发过程使用(暂时这么理解),最后发送EOI基本上。它的处理流程不同于handle_fasteoi_irq。

中断注册流程

  内核驱动的开发过程常用申请中断的接口是request_irq,他实际上是对另一个接口的调用封转如下,所以进一步查看request_threaded_irq的处理过程

static inline int __must_check
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
        const char *name, void *dev)
{
    return request_threaded_irq(irq, handler, NULL, flags, name, dev);
}

 request_threaded_irq的处理过程才是正真的中断注册接口,而request_irq是一个将thread_fn设置为NULL的封装。这做其实是和他的实现有关系的继续往下看就明白了。

int request_threaded_irq(unsigned int irq, irq_handler_t handler,
             irq_handler_t thread_fn, unsigned long irqflags,
             const char *devname, void *dev_id)
{
    struct irqaction *action;
    struct irq_desc *desc;
    int retval;

    /*
     * Sanity-check: shared interrupts must pass in a real dev-ID,
     * otherwise we'll have trouble later trying to figure out
     * which interrupt is which (messes up the interrupt freeing
     * logic etc).
     */
    if ((irqflags & IRQF_SHARED) && !dev_id)
        return -EINVAL;

    desc = irq_to_desc(irq);
    if (!desc)
        return -EINVAL;

    if (!irq_settings_can_request(desc) ||
        WARN_ON(irq_settings_is_per_cpu_devid(desc)))
        return -EINVAL;
    //如果中断没有任何响应接口肯定是不行,但是是可以没有handler
    if (!handler) {
        if (!thread_fn)
            return -EINVAL;
        handler = irq_default_primary_handler;
    }
  //申请一个action结构,这个会在后面绑定到中断描述符中去以便中断发生时能找到这个接口
    action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
    if (!action)
        return -ENOMEM;
    //初始化构造的action
    action->handler = handler;
    action->thread_fn = thread_fn;
    action->flags = irqflags;
    action->name = devname;
    action->dev_id = dev_id;
  //接下来的调用是重点,完成了中断注册的主要部分
    chip_bus_lock(desc);
    retval = __setup_irq(irq, desc, action);
    chip_bus_sync_unlock(desc);

    if (retval)
        kfree(action);

#ifdef CONFIG_DEBUG_SHIRQ_FIXME
    if (!retval && (irqflags & IRQF_SHARED)) {
        /*
         * It's a shared IRQ -- the driver ought to be prepared for it
         * to happen immediately, so let's make sure....
         * We disable the irq to make sure that a 'real' IRQ doesn't
         * run in parallel with our fake.
         */
        unsigned long flags;

        disable_irq(irq);
        local_irq_save(flags);

        handler(irq, dev_id);

        local_irq_restore(flags);
        enable_irq(irq);
    }
#endif
    return retval;
}

下面这个接口是中断注册的关键也是能说明现在Linux内核对于中断的处理的正真实现机制,这里也就是前面说到的内核对实时性优化的中断处理机制。

/*
 * Internal function to register an irqaction - typically used to
 * allocate special interrupts that are part of the architecture.
 */
static int
__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
{
    struct irqaction *old, **old_ptr;
    unsigned long flags, thread_mask = 0;
    int ret, nested, shared = 0;
    cpumask_var_t mask;

    if (!desc)
        return -EINVAL;

    if (desc->irq_data.chip == &no_irq_chip)
        return -ENOSYS;
    if (!try_module_get(desc->owner))
        return -ENODEV;

    /*
     * Check whether the interrupt nests into another interrupt
     * thread.
     */
    //对于设置了_IRQ_NESTED_THREAD嵌套类型的中断描述符,必须指定thread_fn。
    nested = irq_settings_is_nested_thread(desc);
    if (nested) {
        if (!new->thread_fn) {
            ret = -EINVAL;
            goto out_mput;
        }
        /*
         * Replace the primary handler which was provided from
         * the driver for non nested interrupt handling by the
         * dummy function which warns when called.
         */
        //嵌套类型的中断的线程函数由这个替换,这个在前面设置过,所以这里是将原来指定的handler 
        //接口丢弃了,替换成现在的接口只是输出一个警告,所以说明不支持IRQ_NESTED_THREAD的handler
        new->handler = irq_nested_primary_handler;
    } else {
        //强制线程化了中断的服务接口,只要中断没有用使用IRQF_NO_THREAD强制不能线程化都会线程化它
        if (irq_settings_can_thread(desc))
            irq_setup_forced_threading(new);
    }

    /*
     * Create a handler thread when a thread function is supplied
     * and the interrupt does not nest into another interrupt
     * thread.
     */
     //线程化中断设置之后的的线程创建
    if (new->thread_fn && !nested) {
        struct task_struct *t;
        static const struct sched_param param = {
            .sched_priority = MAX_USER_RT_PRIO/2,
        };

        t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
                   new->name);
        if (IS_ERR(t)) {
            ret = PTR_ERR(t);
            goto out_mput;
        }
        //设置调度方式为FIFO
        sched_setscheduler_nocheck(t, SCHED_FIFO, &param);

        /*
         * We keep the reference to the task struct even if
         * the thread dies to avoid that the interrupt code
         * references an already freed task_struct.
         */
        get_task_struct(t);
        new->thread = t;
        /*
         * Tell the thread to set its affinity. This is
         * important for shared interrupt handlers as we do
         * not invoke setup_affinity() for the secondary
         * handlers as everything is already set up. Even for
         * interrupts marked with IRQF_NO_BALANCE this is
         * correct as we want the thread to move to the cpu(s)
         * on which the requesting code placed the interrupt.
         */
         //设置CPU亲和性
        set_bit(IRQTF_AFFINITY, &new->thread_flags);
    }

    if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
        ret = -ENOMEM;
        goto out_thread;
    }

    /*
     * Drivers are often written to work w/o knowledge about the
     * underlying irq chip implementation, so a request for a
     * threaded irq without a primary hard irq context handler
     * requires the ONESHOT flag to be set. Some irq chips like
     * MSI based interrupts are per se one shot safe. Check the
     * chip flags, so we can avoid the unmask dance at the end of
     * the threaded handler for those.
     */
    if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
        new->flags &= ~IRQF_ONESHOT;

    /*
     * The following block of code has to be executed atomically
     */
    //共享中断可能会有多个Action,这里吧它全部加进去形成链表
    raw_spin_lock_irqsave(&desc->lock, flags);
    old_ptr = &desc->action;
    old = *old_ptr;
    if (old) {
        /*
         * Can't share interrupts unless both agree to and are
         * the same type (level, edge, polarity). So both flag
         * fields must have IRQF_SHARED set and the bits which
         * set the trigger type must match. Also all must
         * agree on ONESHOT.
         */
        if (!((old->flags & new->flags) & IRQF_SHARED) ||
            ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
            ((old->flags ^ new->flags) & IRQF_ONESHOT))
            goto mismatch;

        /* All handlers must agree on per-cpuness */
        if ((old->flags & IRQF_PERCPU) !=
            (new->flags & IRQF_PERCPU))
            goto mismatch;

        /* add new interrupt at end of irq queue */
        do {
            /*
             * Or all existing action->thread_mask bits,
             * so we can find the next zero bit for this
             * new action.
             */
            thread_mask |= old->thread_mask;
            old_ptr = &old->next;
            old = *old_ptr;
        } while (old);
        shared = 1;
    }

    /*
     * Setup the thread mask for this irqaction for ONESHOT. For
     * !ONESHOT irqs the thread mask is 0 so we can avoid a
     * conditional in irq_wake_thread().
     */
     //RQF_ONESHOT类型中断 的中断掩蔽代码
    if (new->flags & IRQF_ONESHOT) {
        /*
         * Unlikely to have 32 resp 64 irqs sharing one line,
         * but who knows.
         */
        if (thread_mask == ~0UL) {
            ret = -EBUSY;
            goto out_mask;
        }
        /*
         * The thread_mask for the action is or'ed to
         * desc->thread_active to indicate that the
         * IRQF_ONESHOT thread handler has been woken, but not
         * yet finished. The bit is cleared when a thread
         * completes. When all threads of a shared interrupt
         * line have completed desc->threads_active becomes
         * zero and the interrupt line is unmasked. See
         * handle.c:irq_wake_thread() for further information.
         *
         * If no thread is woken by primary (hard irq context)
         * interrupt handlers, then desc->threads_active is
         * also checked for zero to unmask the irq line in the
         * affected hard irq flow handlers
         * (handle_[fasteoi|level]_irq).
         *
         * The new action gets the first zero bit of
         * thread_mask assigned. See the loop above which or's
         * all existing action->thread_mask bits.
         */
        new->thread_mask = 1 << ffz(thread_mask);

    /*handler使用默认irq_default_primary_handler(),如果中断触发类型是LEVEL,
    *如果中断出发后不清中断容易引发中断风暴。提醒驱动开发者,没有primary handler
    */且中断控制器不支持硬件oneshot,必须显式指定IRQF_ONESHOT表示位。

    } else if (new->handler == irq_default_primary_handler &&
           !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
        /*
         * The interrupt was requested with handler = NULL, so
         * we use the default primary handler for it. But it
         * does not have the oneshot flag set. In combination
         * with level interrupts this is deadly, because the
         * default primary handler just wakes the thread, then
         * the irq lines is reenabled, but the device still
         * has the level irq asserted. Rinse and repeat....
         *
         * While this works for edge type interrupts, we play
         * it safe and reject unconditionally because we can't
         * say for sure which type this interrupt really
         * has. The type flags are unreliable as the
         * underlying chip implementation can override them.
         */
        pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d
",
               irq);
        ret = -EINVAL;
        goto out_mask;
    }
    //非共享中断情况
    if (!shared) {
        ret = irq_request_resources(desc);
        if (ret) {
            pr_err("Failed to request resources for %s (irq %d) on irqchip %s
",
                   new->name, irq, desc->irq_data.chip->name);
            goto out_mask;
        }
        //初始化阻塞队列
        init_waitqueue_head(&desc->wait_for_threads);
        //设置中断触发方式
        /* Setup the type (level, edge polarity) if configured: */
        if (new->flags & IRQF_TRIGGER_MASK) {
            ret = __irq_set_trigger(desc, irq,
                    new->flags & IRQF_TRIGGER_MASK);

            if (ret)
                goto out_mask;
        }
        //清楚状态标志
        desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | 
                  IRQS_ONESHOT | IRQS_WAITING);
        irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);

        if (new->flags & IRQF_PERCPU) {
            irqd_set(&desc->irq_data, IRQD_PER_CPU);
            irq_settings_set_per_cpu(desc);
        }

        if (new->flags & IRQF_ONESHOT)
            desc->istate |= IRQS_ONESHOT;
        //如果可以直接启动则直接使能,默认不设置就是自动的
        if (irq_settings_can_autoenable(desc))
            irq_startup(desc, true);
        else
            /* Undo nested disables: */
            desc->depth = 1;

        /* Exclude IRQ from balancing if requested */
        if (new->flags & IRQF_NOBALANCING) {
            irq_settings_set_no_balancing(desc);
            irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
        }

        /* Set default affinity mask once everything is setup */
        setup_affinity(irq, desc, mask);

    } else if (new->flags & IRQF_TRIGGER_MASK) {
        unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
        unsigned int omsk = irq_settings_get_trigger_mask(desc);

        if (nmsk != omsk)
            /* hope the handler works with current  trigger mode */
            pr_warning("irq %d uses trigger mode %u; requested %u
",
                   irq, nmsk, omsk);
    }

    new->irq = irq;
    *old_ptr = new;

    irq_pm_install_action(desc, new);

    /* Reset broken irq detection when installing new handler */
    desc->irq_count = 0;
    desc->irqs_unhandled = 0;

    /*
     * Check whether we disabled the irq via the spurious handler
     * before. Reenable it and give it another chance.
     */
    if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
        desc->istate &= ~IRQS_SPURIOUS_DISABLED;
        __enable_irq(desc, irq);
    }

    raw_spin_unlock_irqrestore(&desc->lock, flags);

    /*
     * Strictly no need to wake it up, but hung_task complains
     * when no hard interrupt wakes the thread up.
     */
    //唤醒中断进程,就会阻塞在等来中断发生的位置
    if (new->thread)
        wake_up_process(new->thread);
    //创建文件系统接口部分的内容
    register_irq_proc(irq, desc);
    new->dir = NULL;
    register_handler_proc(irq, new);
    free_cpumask_var(mask);

    return 0;

mismatch:
    if (!(new->flags & IRQF_PROBE_SHARED)) {
        pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)
",
               irq, new->flags, new->name, old->flags, old->name);
#ifdef CONFIG_DEBUG_SHIRQ
        dump_stack();
#endif
    }
    ret = -EBUSY;

out_mask:
    raw_spin_unlock_irqrestore(&desc->lock, flags);
    free_cpumask_var(mask);

out_thread:
    if (new->thread) {
        struct task_struct *t = new->thread;

        new->thread = NULL;
        kthread_stop(t);
        put_task_struct(t);
    }
out_mput:
    module_put(desc->owner);
    return ret;
}

这个函数的接口的整个执行流程就是,如果当前接口是要支持嵌套的则直接修改handler接口为irq_nested_primary_handler这个接口后面说明,如果不支持嵌套且没有强制指定为不可以线程化的时候都是进行线程化中断服务接口的通过irq_setup_forced_threading()同样是后面详细说明,线程化中断服务接口后就会为这个中断创建对应的线程并配置调度方式为FIFO和CPU亲和性。之后就是中断抽象数据的配置和中断使能和文件系统接口的问的创建在/proc目录下。到这里处理流程就算结束了,接下来在完善上面接口的详细内容的探究。

irq_setup_forced_threading

static void irq_setup_forced_threading(struct irqaction *new)
{
    if (!force_irqthreads)
        return;
    if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
        return;

    new->flags |= IRQF_ONESHOT;

    if (!new->thread_fn) {
        set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
        new->thread_fn = new->handler;
        new->handler = irq_default_primary_handler;
    }
}

通过源码可以清楚的看明白,进行的操作就是检查中断是否能线程化,然后再判断 thread_fn是否为未设置只有这个接口未设置才能强制线程化,原因就很简单,不能丢弃驱动层注册的接口内容要不然不就成BUG了吗。所以线程化的方式就是给未设置thread_fn的中断类型接口直接绑定了一个内建的线程irq_thread,然后在将原有接口的中断服务函数作为线程的接口函数绑定到线程中,再把原来的handler 替换为irq_default_primary_handler,这个接口和前面的irq_nested_primary_handler很相似,只是前面的接口会返回唤醒中断而第二个则直接答应了一个警告信息返回了NONE如下:

/*
 * Default primary interrupt handler for threaded interrupts. Is
 * assigned as primary handler when request_threaded_irq is called
 * with handler == NULL. Useful for oneshot interrupts.
 */
static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
{
    return IRQ_WAKE_THREAD;
}

/*
 * Primary handler for nested threaded interrupts. Should never be
 * called.
 */
static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
{
    WARN(1, "Primary handler called for nested irq %d
", irq);
    return IRQ_NONE;
}

所以到这里就应该明白通过request_irq接口注册的中断服务接口在内核是会有可能被线程化的所以,这个接口注册的中断服务函数在指定不可以线程化的标志后是运行在中断上下文的,而其他情况是运行在进程上下文的。这也是我一开始想来研究硬中断到底运行在哪个上下文的,因为并发的一些接口是需要区别中断上下文还是进程上下文的。最后再来看一下内核将中断服务进程线程化的线程是怎样执行的。

static int irq_thread(void *data)
{
    struct callback_head on_exit_work;
    struct irqaction *action = data;
    struct irq_desc *desc = irq_to_desc(action->irq);
    irqreturn_t (*handler_fn)(struct irq_desc *desc,
            struct irqaction *action);

    if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
                    &action->thread_flags))
        handler_fn = irq_forced_thread_fn;
    else
        handler_fn = irq_thread_fn;

    init_task_work(&on_exit_work, irq_thread_dtor);
    task_work_add(current, &on_exit_work, false);

    irq_thread_check_affinity(desc, action);

    while (!irq_wait_for_interrupt(action)) {
        irqreturn_t action_ret;

        irq_thread_check_affinity(desc, action);
        //执行中断内核线程函数
        action_ret = handler_fn(desc, action);
        if (action_ret == IRQ_HANDLED)
            //增加threads_handled计数
            atomic_inc(&desc->threads_handled);
        //唤醒wait_for_threads等待队列
        wake_threads_waitq(desc);
    }

    /*
     * This is the regular exit path. __free_irq() is stopping the
     * thread via kthread_stop() after calling
     * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
     * oneshot mask bit can be set. We cannot verify that as we
     * cannot touch the oneshot mask at this point anymore as
     * __setup_irq() might have given out currents thread_mask
     * again.
     */
    task_work_cancel(current, irq_thread_dtor);
    return 0;
}

这和线程接受一个struct irqaction作为参数,运行起来后会在irq_wait_for_interrupt()下阻塞挂起直到中断上下文执行线程唤醒时才会继续运行。就在中断执行为完handler后判断其返回值是否是IRQ_WAKE_THREAD,进而 调用__irq_wake_thread(desc, action);唤醒中断服务程序线程。而线程化则是将handler直接绑定到一个返回IRQ_WAKE_THREAD的接口上从而保证进程一定会被唤醒,以上就是我对于硬中断的全部理解了。

参考博客:

https://www.cnblogs.com/sky-heaven/p/11096462.html

https://www.cnblogs.com/arnoldlu/p/8659981.html

 

原文地址:https://www.cnblogs.com/w-smile/p/13457086.html